1 /* 2 * Bridge netlink control interface 3 * 4 * Authors: 5 * Stephen Hemminger <shemminger@osdl.org> 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 10 * 2 of the License, or (at your option) any later version. 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/slab.h> 15 #include <linux/etherdevice.h> 16 #include <net/rtnetlink.h> 17 #include <net/net_namespace.h> 18 #include <net/sock.h> 19 #include <uapi/linux/if_bridge.h> 20 21 #include "br_private.h" 22 #include "br_private_stp.h" 23 #include "br_private_tunnel.h" 24 25 static int __get_num_vlan_infos(struct net_bridge_vlan_group *vg, 26 u32 filter_mask) 27 { 28 struct net_bridge_vlan *v; 29 u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0; 30 u16 flags, pvid; 31 int num_vlans = 0; 32 33 if (!(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) 34 return 0; 35 36 pvid = br_get_pvid(vg); 37 /* Count number of vlan infos */ 38 list_for_each_entry_rcu(v, &vg->vlan_list, vlist) { 39 flags = 0; 40 /* only a context, bridge vlan not activated */ 41 if (!br_vlan_should_use(v)) 42 continue; 43 if (v->vid == pvid) 44 flags |= BRIDGE_VLAN_INFO_PVID; 45 46 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED) 47 flags |= BRIDGE_VLAN_INFO_UNTAGGED; 48 49 if (vid_range_start == 0) { 50 goto initvars; 51 } else if ((v->vid - vid_range_end) == 1 && 52 flags == vid_range_flags) { 53 vid_range_end = v->vid; 54 continue; 55 } else { 56 if ((vid_range_end - vid_range_start) > 0) 57 num_vlans += 2; 58 else 59 num_vlans += 1; 60 } 61 initvars: 62 vid_range_start = v->vid; 63 vid_range_end = v->vid; 64 vid_range_flags = flags; 65 } 66 67 if (vid_range_start != 0) { 68 if ((vid_range_end - vid_range_start) > 0) 69 num_vlans += 2; 70 else 71 num_vlans += 1; 72 } 73 74 return num_vlans; 75 } 76 77 static int br_get_num_vlan_infos(struct net_bridge_vlan_group *vg, 78 u32 filter_mask) 79 { 80 int num_vlans; 81 82 if (!vg) 83 return 0; 84 85 if (filter_mask & RTEXT_FILTER_BRVLAN) 86 return vg->num_vlans; 87 88 rcu_read_lock(); 89 num_vlans = __get_num_vlan_infos(vg, filter_mask); 90 rcu_read_unlock(); 91 92 return num_vlans; 93 } 94 95 static size_t br_get_link_af_size_filtered(const struct net_device *dev, 96 u32 filter_mask) 97 { 98 struct net_bridge_vlan_group *vg = NULL; 99 struct net_bridge_port *p = NULL; 100 struct net_bridge *br; 101 int num_vlan_infos; 102 size_t vinfo_sz = 0; 103 104 rcu_read_lock(); 105 if (br_port_exists(dev)) { 106 p = br_port_get_rcu(dev); 107 vg = nbp_vlan_group_rcu(p); 108 } else if (dev->priv_flags & IFF_EBRIDGE) { 109 br = netdev_priv(dev); 110 vg = br_vlan_group_rcu(br); 111 } 112 num_vlan_infos = br_get_num_vlan_infos(vg, filter_mask); 113 rcu_read_unlock(); 114 115 if (p && (p->flags & BR_VLAN_TUNNEL)) 116 vinfo_sz += br_get_vlan_tunnel_info_size(vg); 117 118 /* Each VLAN is returned in bridge_vlan_info along with flags */ 119 vinfo_sz += num_vlan_infos * nla_total_size(sizeof(struct bridge_vlan_info)); 120 121 return vinfo_sz; 122 } 123 124 static inline size_t br_port_info_size(void) 125 { 126 return nla_total_size(1) /* IFLA_BRPORT_STATE */ 127 + nla_total_size(2) /* IFLA_BRPORT_PRIORITY */ 128 + nla_total_size(4) /* IFLA_BRPORT_COST */ 129 + nla_total_size(1) /* IFLA_BRPORT_MODE */ 130 + nla_total_size(1) /* IFLA_BRPORT_GUARD */ 131 + nla_total_size(1) /* IFLA_BRPORT_PROTECT */ 132 + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */ 133 + nla_total_size(1) /* IFLA_BRPORT_MCAST_TO_UCAST */ 134 + nla_total_size(1) /* IFLA_BRPORT_LEARNING */ 135 + nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */ 136 + nla_total_size(1) /* IFLA_BRPORT_MCAST_FLOOD */ 137 + nla_total_size(1) /* IFLA_BRPORT_BCAST_FLOOD */ 138 + nla_total_size(1) /* IFLA_BRPORT_PROXYARP */ 139 + nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */ 140 + nla_total_size(1) /* IFLA_BRPORT_VLAN_TUNNEL */ 141 + nla_total_size(1) /* IFLA_BRPORT_NEIGH_SUPPRESS */ 142 + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_ROOT_ID */ 143 + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_BRIDGE_ID */ 144 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_PORT */ 145 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_COST */ 146 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_ID */ 147 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_NO */ 148 + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_TOPOLOGY_CHANGE_ACK */ 149 + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_CONFIG_PENDING */ 150 + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_MESSAGE_AGE_TIMER */ 151 + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_FORWARD_DELAY_TIMER */ 152 + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_HOLD_TIMER */ 153 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 154 + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_MULTICAST_ROUTER */ 155 #endif 156 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_GROUP_FWD_MASK */ 157 + 0; 158 } 159 160 static inline size_t br_nlmsg_size(struct net_device *dev, u32 filter_mask) 161 { 162 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 163 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 164 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ 165 + nla_total_size(4) /* IFLA_MASTER */ 166 + nla_total_size(4) /* IFLA_MTU */ 167 + nla_total_size(4) /* IFLA_LINK */ 168 + nla_total_size(1) /* IFLA_OPERSTATE */ 169 + nla_total_size(br_port_info_size()) /* IFLA_PROTINFO */ 170 + nla_total_size(br_get_link_af_size_filtered(dev, 171 filter_mask)); /* IFLA_AF_SPEC */ 172 } 173 174 static int br_port_fill_attrs(struct sk_buff *skb, 175 const struct net_bridge_port *p) 176 { 177 u8 mode = !!(p->flags & BR_HAIRPIN_MODE); 178 u64 timerval; 179 180 if (nla_put_u8(skb, IFLA_BRPORT_STATE, p->state) || 181 nla_put_u16(skb, IFLA_BRPORT_PRIORITY, p->priority) || 182 nla_put_u32(skb, IFLA_BRPORT_COST, p->path_cost) || 183 nla_put_u8(skb, IFLA_BRPORT_MODE, mode) || 184 nla_put_u8(skb, IFLA_BRPORT_GUARD, !!(p->flags & BR_BPDU_GUARD)) || 185 nla_put_u8(skb, IFLA_BRPORT_PROTECT, 186 !!(p->flags & BR_ROOT_BLOCK)) || 187 nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, 188 !!(p->flags & BR_MULTICAST_FAST_LEAVE)) || 189 nla_put_u8(skb, IFLA_BRPORT_MCAST_TO_UCAST, 190 !!(p->flags & BR_MULTICAST_TO_UNICAST)) || 191 nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) || 192 nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, 193 !!(p->flags & BR_FLOOD)) || 194 nla_put_u8(skb, IFLA_BRPORT_MCAST_FLOOD, 195 !!(p->flags & BR_MCAST_FLOOD)) || 196 nla_put_u8(skb, IFLA_BRPORT_BCAST_FLOOD, 197 !!(p->flags & BR_BCAST_FLOOD)) || 198 nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP)) || 199 nla_put_u8(skb, IFLA_BRPORT_PROXYARP_WIFI, 200 !!(p->flags & BR_PROXYARP_WIFI)) || 201 nla_put(skb, IFLA_BRPORT_ROOT_ID, sizeof(struct ifla_bridge_id), 202 &p->designated_root) || 203 nla_put(skb, IFLA_BRPORT_BRIDGE_ID, sizeof(struct ifla_bridge_id), 204 &p->designated_bridge) || 205 nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_PORT, p->designated_port) || 206 nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_COST, p->designated_cost) || 207 nla_put_u16(skb, IFLA_BRPORT_ID, p->port_id) || 208 nla_put_u16(skb, IFLA_BRPORT_NO, p->port_no) || 209 nla_put_u8(skb, IFLA_BRPORT_TOPOLOGY_CHANGE_ACK, 210 p->topology_change_ack) || 211 nla_put_u8(skb, IFLA_BRPORT_CONFIG_PENDING, p->config_pending) || 212 nla_put_u8(skb, IFLA_BRPORT_VLAN_TUNNEL, !!(p->flags & 213 BR_VLAN_TUNNEL)) || 214 nla_put_u16(skb, IFLA_BRPORT_GROUP_FWD_MASK, p->group_fwd_mask) || 215 nla_put_u8(skb, IFLA_BRPORT_NEIGH_SUPPRESS, 216 !!(p->flags & BR_NEIGH_SUPPRESS))) 217 return -EMSGSIZE; 218 219 timerval = br_timer_value(&p->message_age_timer); 220 if (nla_put_u64_64bit(skb, IFLA_BRPORT_MESSAGE_AGE_TIMER, timerval, 221 IFLA_BRPORT_PAD)) 222 return -EMSGSIZE; 223 timerval = br_timer_value(&p->forward_delay_timer); 224 if (nla_put_u64_64bit(skb, IFLA_BRPORT_FORWARD_DELAY_TIMER, timerval, 225 IFLA_BRPORT_PAD)) 226 return -EMSGSIZE; 227 timerval = br_timer_value(&p->hold_timer); 228 if (nla_put_u64_64bit(skb, IFLA_BRPORT_HOLD_TIMER, timerval, 229 IFLA_BRPORT_PAD)) 230 return -EMSGSIZE; 231 232 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 233 if (nla_put_u8(skb, IFLA_BRPORT_MULTICAST_ROUTER, 234 p->multicast_router)) 235 return -EMSGSIZE; 236 #endif 237 238 return 0; 239 } 240 241 static int br_fill_ifvlaninfo_range(struct sk_buff *skb, u16 vid_start, 242 u16 vid_end, u16 flags) 243 { 244 struct bridge_vlan_info vinfo; 245 246 if ((vid_end - vid_start) > 0) { 247 /* add range to skb */ 248 vinfo.vid = vid_start; 249 vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_BEGIN; 250 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO, 251 sizeof(vinfo), &vinfo)) 252 goto nla_put_failure; 253 254 vinfo.vid = vid_end; 255 vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_END; 256 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO, 257 sizeof(vinfo), &vinfo)) 258 goto nla_put_failure; 259 } else { 260 vinfo.vid = vid_start; 261 vinfo.flags = flags; 262 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO, 263 sizeof(vinfo), &vinfo)) 264 goto nla_put_failure; 265 } 266 267 return 0; 268 269 nla_put_failure: 270 return -EMSGSIZE; 271 } 272 273 static int br_fill_ifvlaninfo_compressed(struct sk_buff *skb, 274 struct net_bridge_vlan_group *vg) 275 { 276 struct net_bridge_vlan *v; 277 u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0; 278 u16 flags, pvid; 279 int err = 0; 280 281 /* Pack IFLA_BRIDGE_VLAN_INFO's for every vlan 282 * and mark vlan info with begin and end flags 283 * if vlaninfo represents a range 284 */ 285 pvid = br_get_pvid(vg); 286 list_for_each_entry_rcu(v, &vg->vlan_list, vlist) { 287 flags = 0; 288 if (!br_vlan_should_use(v)) 289 continue; 290 if (v->vid == pvid) 291 flags |= BRIDGE_VLAN_INFO_PVID; 292 293 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED) 294 flags |= BRIDGE_VLAN_INFO_UNTAGGED; 295 296 if (vid_range_start == 0) { 297 goto initvars; 298 } else if ((v->vid - vid_range_end) == 1 && 299 flags == vid_range_flags) { 300 vid_range_end = v->vid; 301 continue; 302 } else { 303 err = br_fill_ifvlaninfo_range(skb, vid_range_start, 304 vid_range_end, 305 vid_range_flags); 306 if (err) 307 return err; 308 } 309 310 initvars: 311 vid_range_start = v->vid; 312 vid_range_end = v->vid; 313 vid_range_flags = flags; 314 } 315 316 if (vid_range_start != 0) { 317 /* Call it once more to send any left over vlans */ 318 err = br_fill_ifvlaninfo_range(skb, vid_range_start, 319 vid_range_end, 320 vid_range_flags); 321 if (err) 322 return err; 323 } 324 325 return 0; 326 } 327 328 static int br_fill_ifvlaninfo(struct sk_buff *skb, 329 struct net_bridge_vlan_group *vg) 330 { 331 struct bridge_vlan_info vinfo; 332 struct net_bridge_vlan *v; 333 u16 pvid; 334 335 pvid = br_get_pvid(vg); 336 list_for_each_entry_rcu(v, &vg->vlan_list, vlist) { 337 if (!br_vlan_should_use(v)) 338 continue; 339 340 vinfo.vid = v->vid; 341 vinfo.flags = 0; 342 if (v->vid == pvid) 343 vinfo.flags |= BRIDGE_VLAN_INFO_PVID; 344 345 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED) 346 vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED; 347 348 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO, 349 sizeof(vinfo), &vinfo)) 350 goto nla_put_failure; 351 } 352 353 return 0; 354 355 nla_put_failure: 356 return -EMSGSIZE; 357 } 358 359 /* 360 * Create one netlink message for one interface 361 * Contains port and master info as well as carrier and bridge state. 362 */ 363 static int br_fill_ifinfo(struct sk_buff *skb, 364 const struct net_bridge_port *port, 365 u32 pid, u32 seq, int event, unsigned int flags, 366 u32 filter_mask, const struct net_device *dev) 367 { 368 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; 369 struct net_bridge *br; 370 struct ifinfomsg *hdr; 371 struct nlmsghdr *nlh; 372 373 if (port) 374 br = port->br; 375 else 376 br = netdev_priv(dev); 377 378 br_debug(br, "br_fill_info event %d port %s master %s\n", 379 event, dev->name, br->dev->name); 380 381 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags); 382 if (nlh == NULL) 383 return -EMSGSIZE; 384 385 hdr = nlmsg_data(nlh); 386 hdr->ifi_family = AF_BRIDGE; 387 hdr->__ifi_pad = 0; 388 hdr->ifi_type = dev->type; 389 hdr->ifi_index = dev->ifindex; 390 hdr->ifi_flags = dev_get_flags(dev); 391 hdr->ifi_change = 0; 392 393 if (nla_put_string(skb, IFLA_IFNAME, dev->name) || 394 nla_put_u32(skb, IFLA_MASTER, br->dev->ifindex) || 395 nla_put_u32(skb, IFLA_MTU, dev->mtu) || 396 nla_put_u8(skb, IFLA_OPERSTATE, operstate) || 397 (dev->addr_len && 398 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || 399 (dev->ifindex != dev_get_iflink(dev) && 400 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev)))) 401 goto nla_put_failure; 402 403 if (event == RTM_NEWLINK && port) { 404 struct nlattr *nest 405 = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED); 406 407 if (nest == NULL || br_port_fill_attrs(skb, port) < 0) 408 goto nla_put_failure; 409 nla_nest_end(skb, nest); 410 } 411 412 /* Check if the VID information is requested */ 413 if ((filter_mask & RTEXT_FILTER_BRVLAN) || 414 (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) { 415 struct net_bridge_vlan_group *vg; 416 struct nlattr *af; 417 int err; 418 419 /* RCU needed because of the VLAN locking rules (rcu || rtnl) */ 420 rcu_read_lock(); 421 if (port) 422 vg = nbp_vlan_group_rcu(port); 423 else 424 vg = br_vlan_group_rcu(br); 425 426 if (!vg || !vg->num_vlans) { 427 rcu_read_unlock(); 428 goto done; 429 } 430 af = nla_nest_start(skb, IFLA_AF_SPEC); 431 if (!af) { 432 rcu_read_unlock(); 433 goto nla_put_failure; 434 } 435 if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) 436 err = br_fill_ifvlaninfo_compressed(skb, vg); 437 else 438 err = br_fill_ifvlaninfo(skb, vg); 439 440 if (port && (port->flags & BR_VLAN_TUNNEL)) 441 err = br_fill_vlan_tunnel_info(skb, vg); 442 rcu_read_unlock(); 443 if (err) 444 goto nla_put_failure; 445 nla_nest_end(skb, af); 446 } 447 448 done: 449 nlmsg_end(skb, nlh); 450 return 0; 451 452 nla_put_failure: 453 nlmsg_cancel(skb, nlh); 454 return -EMSGSIZE; 455 } 456 457 /* Notify listeners of a change in bridge or port information */ 458 void br_ifinfo_notify(int event, const struct net_bridge *br, 459 const struct net_bridge_port *port) 460 { 461 u32 filter = RTEXT_FILTER_BRVLAN_COMPRESSED; 462 struct net_device *dev; 463 struct sk_buff *skb; 464 int err = -ENOBUFS; 465 struct net *net; 466 u16 port_no = 0; 467 468 if (WARN_ON(!port && !br)) 469 return; 470 471 if (port) { 472 dev = port->dev; 473 br = port->br; 474 port_no = port->port_no; 475 } else { 476 dev = br->dev; 477 } 478 479 net = dev_net(dev); 480 br_debug(br, "port %u(%s) event %d\n", port_no, dev->name, event); 481 482 skb = nlmsg_new(br_nlmsg_size(dev, filter), GFP_ATOMIC); 483 if (skb == NULL) 484 goto errout; 485 486 err = br_fill_ifinfo(skb, port, 0, 0, event, 0, filter, dev); 487 if (err < 0) { 488 /* -EMSGSIZE implies BUG in br_nlmsg_size() */ 489 WARN_ON(err == -EMSGSIZE); 490 kfree_skb(skb); 491 goto errout; 492 } 493 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); 494 return; 495 errout: 496 rtnl_set_sk_err(net, RTNLGRP_LINK, err); 497 } 498 499 /* 500 * Dump information about all ports, in response to GETLINK 501 */ 502 int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, 503 struct net_device *dev, u32 filter_mask, int nlflags) 504 { 505 struct net_bridge_port *port = br_port_get_rtnl(dev); 506 507 if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN) && 508 !(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) 509 return 0; 510 511 return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, nlflags, 512 filter_mask, dev); 513 } 514 515 static int br_vlan_info(struct net_bridge *br, struct net_bridge_port *p, 516 int cmd, struct bridge_vlan_info *vinfo, bool *changed) 517 { 518 bool curr_change; 519 int err = 0; 520 521 switch (cmd) { 522 case RTM_SETLINK: 523 if (p) { 524 /* if the MASTER flag is set this will act on the global 525 * per-VLAN entry as well 526 */ 527 err = nbp_vlan_add(p, vinfo->vid, vinfo->flags, 528 &curr_change); 529 } else { 530 vinfo->flags |= BRIDGE_VLAN_INFO_BRENTRY; 531 err = br_vlan_add(br, vinfo->vid, vinfo->flags, 532 &curr_change); 533 } 534 if (curr_change) 535 *changed = true; 536 break; 537 538 case RTM_DELLINK: 539 if (p) { 540 if (!nbp_vlan_delete(p, vinfo->vid)) 541 *changed = true; 542 543 if ((vinfo->flags & BRIDGE_VLAN_INFO_MASTER) && 544 !br_vlan_delete(p->br, vinfo->vid)) 545 *changed = true; 546 } else if (!br_vlan_delete(br, vinfo->vid)) { 547 *changed = true; 548 } 549 break; 550 } 551 552 return err; 553 } 554 555 static int br_process_vlan_info(struct net_bridge *br, 556 struct net_bridge_port *p, int cmd, 557 struct bridge_vlan_info *vinfo_curr, 558 struct bridge_vlan_info **vinfo_last, 559 bool *changed) 560 { 561 if (!vinfo_curr->vid || vinfo_curr->vid >= VLAN_VID_MASK) 562 return -EINVAL; 563 564 if (vinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) { 565 /* check if we are already processing a range */ 566 if (*vinfo_last) 567 return -EINVAL; 568 *vinfo_last = vinfo_curr; 569 /* don't allow range of pvids */ 570 if ((*vinfo_last)->flags & BRIDGE_VLAN_INFO_PVID) 571 return -EINVAL; 572 return 0; 573 } 574 575 if (*vinfo_last) { 576 struct bridge_vlan_info tmp_vinfo; 577 int v, err; 578 579 if (!(vinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_END)) 580 return -EINVAL; 581 582 if (vinfo_curr->vid <= (*vinfo_last)->vid) 583 return -EINVAL; 584 585 memcpy(&tmp_vinfo, *vinfo_last, 586 sizeof(struct bridge_vlan_info)); 587 for (v = (*vinfo_last)->vid; v <= vinfo_curr->vid; v++) { 588 tmp_vinfo.vid = v; 589 err = br_vlan_info(br, p, cmd, &tmp_vinfo, changed); 590 if (err) 591 break; 592 } 593 *vinfo_last = NULL; 594 595 return err; 596 } 597 598 return br_vlan_info(br, p, cmd, vinfo_curr, changed); 599 } 600 601 static int br_afspec(struct net_bridge *br, 602 struct net_bridge_port *p, 603 struct nlattr *af_spec, 604 int cmd, bool *changed) 605 { 606 struct bridge_vlan_info *vinfo_curr = NULL; 607 struct bridge_vlan_info *vinfo_last = NULL; 608 struct nlattr *attr; 609 struct vtunnel_info tinfo_last = {}; 610 struct vtunnel_info tinfo_curr = {}; 611 int err = 0, rem; 612 613 nla_for_each_nested(attr, af_spec, rem) { 614 err = 0; 615 switch (nla_type(attr)) { 616 case IFLA_BRIDGE_VLAN_TUNNEL_INFO: 617 if (!p || !(p->flags & BR_VLAN_TUNNEL)) 618 return -EINVAL; 619 err = br_parse_vlan_tunnel_info(attr, &tinfo_curr); 620 if (err) 621 return err; 622 err = br_process_vlan_tunnel_info(br, p, cmd, 623 &tinfo_curr, 624 &tinfo_last, 625 changed); 626 if (err) 627 return err; 628 break; 629 case IFLA_BRIDGE_VLAN_INFO: 630 if (nla_len(attr) != sizeof(struct bridge_vlan_info)) 631 return -EINVAL; 632 vinfo_curr = nla_data(attr); 633 err = br_process_vlan_info(br, p, cmd, vinfo_curr, 634 &vinfo_last, changed); 635 if (err) 636 return err; 637 break; 638 } 639 } 640 641 return err; 642 } 643 644 static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = { 645 [IFLA_BRPORT_STATE] = { .type = NLA_U8 }, 646 [IFLA_BRPORT_COST] = { .type = NLA_U32 }, 647 [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 }, 648 [IFLA_BRPORT_MODE] = { .type = NLA_U8 }, 649 [IFLA_BRPORT_GUARD] = { .type = NLA_U8 }, 650 [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 }, 651 [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 }, 652 [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 }, 653 [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 }, 654 [IFLA_BRPORT_PROXYARP] = { .type = NLA_U8 }, 655 [IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 }, 656 [IFLA_BRPORT_MULTICAST_ROUTER] = { .type = NLA_U8 }, 657 [IFLA_BRPORT_MCAST_TO_UCAST] = { .type = NLA_U8 }, 658 [IFLA_BRPORT_MCAST_FLOOD] = { .type = NLA_U8 }, 659 [IFLA_BRPORT_BCAST_FLOOD] = { .type = NLA_U8 }, 660 [IFLA_BRPORT_GROUP_FWD_MASK] = { .type = NLA_U16 }, 661 [IFLA_BRPORT_NEIGH_SUPPRESS] = { .type = NLA_U8 }, 662 }; 663 664 /* Change the state of the port and notify spanning tree */ 665 static int br_set_port_state(struct net_bridge_port *p, u8 state) 666 { 667 if (state > BR_STATE_BLOCKING) 668 return -EINVAL; 669 670 /* if kernel STP is running, don't allow changes */ 671 if (p->br->stp_enabled == BR_KERNEL_STP) 672 return -EBUSY; 673 674 /* if device is not up, change is not allowed 675 * if link is not present, only allowable state is disabled 676 */ 677 if (!netif_running(p->dev) || 678 (!netif_oper_up(p->dev) && state != BR_STATE_DISABLED)) 679 return -ENETDOWN; 680 681 br_set_state(p, state); 682 br_port_state_selection(p->br); 683 return 0; 684 } 685 686 /* Set/clear or port flags based on attribute */ 687 static int br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[], 688 int attrtype, unsigned long mask) 689 { 690 unsigned long flags; 691 int err; 692 693 if (!tb[attrtype]) 694 return 0; 695 696 if (nla_get_u8(tb[attrtype])) 697 flags = p->flags | mask; 698 else 699 flags = p->flags & ~mask; 700 701 err = br_switchdev_set_port_flag(p, flags, mask); 702 if (err) 703 return err; 704 705 p->flags = flags; 706 return 0; 707 } 708 709 /* Process bridge protocol info on port */ 710 static int br_setport(struct net_bridge_port *p, struct nlattr *tb[]) 711 { 712 unsigned long old_flags = p->flags; 713 bool br_vlan_tunnel_old = false; 714 int err; 715 716 err = br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE); 717 if (err) 718 return err; 719 720 err = br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD); 721 if (err) 722 return err; 723 724 err = br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE); 725 if (err) 726 return err; 727 728 err = br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK); 729 if (err) 730 return err; 731 732 err = br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING); 733 if (err) 734 return err; 735 736 err = br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD); 737 if (err) 738 return err; 739 740 err = br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD); 741 if (err) 742 return err; 743 744 err = br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_TO_UCAST, BR_MULTICAST_TO_UNICAST); 745 if (err) 746 return err; 747 748 err = br_set_port_flag(p, tb, IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD); 749 if (err) 750 return err; 751 752 err = br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP); 753 if (err) 754 return err; 755 756 err = br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI); 757 if (err) 758 return err; 759 760 br_vlan_tunnel_old = (p->flags & BR_VLAN_TUNNEL) ? true : false; 761 err = br_set_port_flag(p, tb, IFLA_BRPORT_VLAN_TUNNEL, BR_VLAN_TUNNEL); 762 if (err) 763 return err; 764 765 if (br_vlan_tunnel_old && !(p->flags & BR_VLAN_TUNNEL)) 766 nbp_vlan_tunnel_info_flush(p); 767 768 if (tb[IFLA_BRPORT_COST]) { 769 err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST])); 770 if (err) 771 return err; 772 } 773 774 if (tb[IFLA_BRPORT_PRIORITY]) { 775 err = br_stp_set_port_priority(p, nla_get_u16(tb[IFLA_BRPORT_PRIORITY])); 776 if (err) 777 return err; 778 } 779 780 if (tb[IFLA_BRPORT_STATE]) { 781 err = br_set_port_state(p, nla_get_u8(tb[IFLA_BRPORT_STATE])); 782 if (err) 783 return err; 784 } 785 786 if (tb[IFLA_BRPORT_FLUSH]) 787 br_fdb_delete_by_port(p->br, p, 0, 0); 788 789 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 790 if (tb[IFLA_BRPORT_MULTICAST_ROUTER]) { 791 u8 mcast_router = nla_get_u8(tb[IFLA_BRPORT_MULTICAST_ROUTER]); 792 793 err = br_multicast_set_port_router(p, mcast_router); 794 if (err) 795 return err; 796 } 797 #endif 798 799 if (tb[IFLA_BRPORT_GROUP_FWD_MASK]) { 800 u16 fwd_mask = nla_get_u16(tb[IFLA_BRPORT_GROUP_FWD_MASK]); 801 802 if (fwd_mask & BR_GROUPFWD_MACPAUSE) 803 return -EINVAL; 804 p->group_fwd_mask = fwd_mask; 805 } 806 807 err = br_set_port_flag(p, tb, IFLA_BRPORT_NEIGH_SUPPRESS, 808 BR_NEIGH_SUPPRESS); 809 if (err) 810 return err; 811 812 br_port_flags_change(p, old_flags ^ p->flags); 813 return 0; 814 } 815 816 /* Change state and parameters on port. */ 817 int br_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags) 818 { 819 struct net_bridge *br = (struct net_bridge *)netdev_priv(dev); 820 struct nlattr *tb[IFLA_BRPORT_MAX + 1]; 821 struct net_bridge_port *p; 822 struct nlattr *protinfo; 823 struct nlattr *afspec; 824 bool changed = false; 825 int err = 0; 826 827 protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO); 828 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 829 if (!protinfo && !afspec) 830 return 0; 831 832 p = br_port_get_rtnl(dev); 833 /* We want to accept dev as bridge itself if the AF_SPEC 834 * is set to see if someone is setting vlan info on the bridge 835 */ 836 if (!p && !afspec) 837 return -EINVAL; 838 839 if (p && protinfo) { 840 if (protinfo->nla_type & NLA_F_NESTED) { 841 err = nla_parse_nested(tb, IFLA_BRPORT_MAX, protinfo, 842 br_port_policy, NULL); 843 if (err) 844 return err; 845 846 spin_lock_bh(&p->br->lock); 847 err = br_setport(p, tb); 848 spin_unlock_bh(&p->br->lock); 849 } else { 850 /* Binary compatibility with old RSTP */ 851 if (nla_len(protinfo) < sizeof(u8)) 852 return -EINVAL; 853 854 spin_lock_bh(&p->br->lock); 855 err = br_set_port_state(p, nla_get_u8(protinfo)); 856 spin_unlock_bh(&p->br->lock); 857 } 858 if (err) 859 goto out; 860 changed = true; 861 } 862 863 if (afspec) 864 err = br_afspec(br, p, afspec, RTM_SETLINK, &changed); 865 866 if (changed) 867 br_ifinfo_notify(RTM_NEWLINK, br, p); 868 out: 869 return err; 870 } 871 872 /* Delete port information */ 873 int br_dellink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags) 874 { 875 struct net_bridge *br = (struct net_bridge *)netdev_priv(dev); 876 struct net_bridge_port *p; 877 struct nlattr *afspec; 878 bool changed = false; 879 int err = 0; 880 881 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 882 if (!afspec) 883 return 0; 884 885 p = br_port_get_rtnl(dev); 886 /* We want to accept dev as bridge itself as well */ 887 if (!p && !(dev->priv_flags & IFF_EBRIDGE)) 888 return -EINVAL; 889 890 err = br_afspec(br, p, afspec, RTM_DELLINK, &changed); 891 if (changed) 892 /* Send RTM_NEWLINK because userspace 893 * expects RTM_NEWLINK for vlan dels 894 */ 895 br_ifinfo_notify(RTM_NEWLINK, br, p); 896 897 return err; 898 } 899 900 static int br_validate(struct nlattr *tb[], struct nlattr *data[], 901 struct netlink_ext_ack *extack) 902 { 903 if (tb[IFLA_ADDRESS]) { 904 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) 905 return -EINVAL; 906 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) 907 return -EADDRNOTAVAIL; 908 } 909 910 if (!data) 911 return 0; 912 913 #ifdef CONFIG_BRIDGE_VLAN_FILTERING 914 if (data[IFLA_BR_VLAN_PROTOCOL]) { 915 switch (nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL])) { 916 case htons(ETH_P_8021Q): 917 case htons(ETH_P_8021AD): 918 break; 919 default: 920 return -EPROTONOSUPPORT; 921 } 922 } 923 924 if (data[IFLA_BR_VLAN_DEFAULT_PVID]) { 925 __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]); 926 927 if (defpvid >= VLAN_VID_MASK) 928 return -EINVAL; 929 } 930 #endif 931 932 return 0; 933 } 934 935 static int br_port_slave_changelink(struct net_device *brdev, 936 struct net_device *dev, 937 struct nlattr *tb[], 938 struct nlattr *data[], 939 struct netlink_ext_ack *extack) 940 { 941 struct net_bridge *br = netdev_priv(brdev); 942 int ret; 943 944 if (!data) 945 return 0; 946 947 spin_lock_bh(&br->lock); 948 ret = br_setport(br_port_get_rtnl(dev), data); 949 spin_unlock_bh(&br->lock); 950 951 return ret; 952 } 953 954 static int br_port_fill_slave_info(struct sk_buff *skb, 955 const struct net_device *brdev, 956 const struct net_device *dev) 957 { 958 return br_port_fill_attrs(skb, br_port_get_rtnl(dev)); 959 } 960 961 static size_t br_port_get_slave_size(const struct net_device *brdev, 962 const struct net_device *dev) 963 { 964 return br_port_info_size(); 965 } 966 967 static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = { 968 [IFLA_BR_FORWARD_DELAY] = { .type = NLA_U32 }, 969 [IFLA_BR_HELLO_TIME] = { .type = NLA_U32 }, 970 [IFLA_BR_MAX_AGE] = { .type = NLA_U32 }, 971 [IFLA_BR_AGEING_TIME] = { .type = NLA_U32 }, 972 [IFLA_BR_STP_STATE] = { .type = NLA_U32 }, 973 [IFLA_BR_PRIORITY] = { .type = NLA_U16 }, 974 [IFLA_BR_VLAN_FILTERING] = { .type = NLA_U8 }, 975 [IFLA_BR_VLAN_PROTOCOL] = { .type = NLA_U16 }, 976 [IFLA_BR_GROUP_FWD_MASK] = { .type = NLA_U16 }, 977 [IFLA_BR_GROUP_ADDR] = { .type = NLA_BINARY, 978 .len = ETH_ALEN }, 979 [IFLA_BR_MCAST_ROUTER] = { .type = NLA_U8 }, 980 [IFLA_BR_MCAST_SNOOPING] = { .type = NLA_U8 }, 981 [IFLA_BR_MCAST_QUERY_USE_IFADDR] = { .type = NLA_U8 }, 982 [IFLA_BR_MCAST_QUERIER] = { .type = NLA_U8 }, 983 [IFLA_BR_MCAST_HASH_ELASTICITY] = { .type = NLA_U32 }, 984 [IFLA_BR_MCAST_HASH_MAX] = { .type = NLA_U32 }, 985 [IFLA_BR_MCAST_LAST_MEMBER_CNT] = { .type = NLA_U32 }, 986 [IFLA_BR_MCAST_STARTUP_QUERY_CNT] = { .type = NLA_U32 }, 987 [IFLA_BR_MCAST_LAST_MEMBER_INTVL] = { .type = NLA_U64 }, 988 [IFLA_BR_MCAST_MEMBERSHIP_INTVL] = { .type = NLA_U64 }, 989 [IFLA_BR_MCAST_QUERIER_INTVL] = { .type = NLA_U64 }, 990 [IFLA_BR_MCAST_QUERY_INTVL] = { .type = NLA_U64 }, 991 [IFLA_BR_MCAST_QUERY_RESPONSE_INTVL] = { .type = NLA_U64 }, 992 [IFLA_BR_MCAST_STARTUP_QUERY_INTVL] = { .type = NLA_U64 }, 993 [IFLA_BR_NF_CALL_IPTABLES] = { .type = NLA_U8 }, 994 [IFLA_BR_NF_CALL_IP6TABLES] = { .type = NLA_U8 }, 995 [IFLA_BR_NF_CALL_ARPTABLES] = { .type = NLA_U8 }, 996 [IFLA_BR_VLAN_DEFAULT_PVID] = { .type = NLA_U16 }, 997 [IFLA_BR_VLAN_STATS_ENABLED] = { .type = NLA_U8 }, 998 [IFLA_BR_MCAST_STATS_ENABLED] = { .type = NLA_U8 }, 999 [IFLA_BR_MCAST_IGMP_VERSION] = { .type = NLA_U8 }, 1000 [IFLA_BR_MCAST_MLD_VERSION] = { .type = NLA_U8 }, 1001 }; 1002 1003 static int br_changelink(struct net_device *brdev, struct nlattr *tb[], 1004 struct nlattr *data[], 1005 struct netlink_ext_ack *extack) 1006 { 1007 struct net_bridge *br = netdev_priv(brdev); 1008 int err; 1009 1010 if (!data) 1011 return 0; 1012 1013 if (data[IFLA_BR_FORWARD_DELAY]) { 1014 err = br_set_forward_delay(br, nla_get_u32(data[IFLA_BR_FORWARD_DELAY])); 1015 if (err) 1016 return err; 1017 } 1018 1019 if (data[IFLA_BR_HELLO_TIME]) { 1020 err = br_set_hello_time(br, nla_get_u32(data[IFLA_BR_HELLO_TIME])); 1021 if (err) 1022 return err; 1023 } 1024 1025 if (data[IFLA_BR_MAX_AGE]) { 1026 err = br_set_max_age(br, nla_get_u32(data[IFLA_BR_MAX_AGE])); 1027 if (err) 1028 return err; 1029 } 1030 1031 if (data[IFLA_BR_AGEING_TIME]) { 1032 err = br_set_ageing_time(br, nla_get_u32(data[IFLA_BR_AGEING_TIME])); 1033 if (err) 1034 return err; 1035 } 1036 1037 if (data[IFLA_BR_STP_STATE]) { 1038 u32 stp_enabled = nla_get_u32(data[IFLA_BR_STP_STATE]); 1039 1040 br_stp_set_enabled(br, stp_enabled); 1041 } 1042 1043 if (data[IFLA_BR_PRIORITY]) { 1044 u32 priority = nla_get_u16(data[IFLA_BR_PRIORITY]); 1045 1046 br_stp_set_bridge_priority(br, priority); 1047 } 1048 1049 if (data[IFLA_BR_VLAN_FILTERING]) { 1050 u8 vlan_filter = nla_get_u8(data[IFLA_BR_VLAN_FILTERING]); 1051 1052 err = __br_vlan_filter_toggle(br, vlan_filter); 1053 if (err) 1054 return err; 1055 } 1056 1057 #ifdef CONFIG_BRIDGE_VLAN_FILTERING 1058 if (data[IFLA_BR_VLAN_PROTOCOL]) { 1059 __be16 vlan_proto = nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL]); 1060 1061 err = __br_vlan_set_proto(br, vlan_proto); 1062 if (err) 1063 return err; 1064 } 1065 1066 if (data[IFLA_BR_VLAN_DEFAULT_PVID]) { 1067 __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]); 1068 1069 err = __br_vlan_set_default_pvid(br, defpvid); 1070 if (err) 1071 return err; 1072 } 1073 1074 if (data[IFLA_BR_VLAN_STATS_ENABLED]) { 1075 __u8 vlan_stats = nla_get_u8(data[IFLA_BR_VLAN_STATS_ENABLED]); 1076 1077 err = br_vlan_set_stats(br, vlan_stats); 1078 if (err) 1079 return err; 1080 } 1081 #endif 1082 1083 if (data[IFLA_BR_GROUP_FWD_MASK]) { 1084 u16 fwd_mask = nla_get_u16(data[IFLA_BR_GROUP_FWD_MASK]); 1085 1086 if (fwd_mask & BR_GROUPFWD_RESTRICTED) 1087 return -EINVAL; 1088 br->group_fwd_mask = fwd_mask; 1089 } 1090 1091 if (data[IFLA_BR_GROUP_ADDR]) { 1092 u8 new_addr[ETH_ALEN]; 1093 1094 if (nla_len(data[IFLA_BR_GROUP_ADDR]) != ETH_ALEN) 1095 return -EINVAL; 1096 memcpy(new_addr, nla_data(data[IFLA_BR_GROUP_ADDR]), ETH_ALEN); 1097 if (!is_link_local_ether_addr(new_addr)) 1098 return -EINVAL; 1099 if (new_addr[5] == 1 || /* 802.3x Pause address */ 1100 new_addr[5] == 2 || /* 802.3ad Slow protocols */ 1101 new_addr[5] == 3) /* 802.1X PAE address */ 1102 return -EINVAL; 1103 spin_lock_bh(&br->lock); 1104 memcpy(br->group_addr, new_addr, sizeof(br->group_addr)); 1105 spin_unlock_bh(&br->lock); 1106 br->group_addr_set = true; 1107 br_recalculate_fwd_mask(br); 1108 } 1109 1110 if (data[IFLA_BR_FDB_FLUSH]) 1111 br_fdb_flush(br); 1112 1113 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 1114 if (data[IFLA_BR_MCAST_ROUTER]) { 1115 u8 multicast_router = nla_get_u8(data[IFLA_BR_MCAST_ROUTER]); 1116 1117 err = br_multicast_set_router(br, multicast_router); 1118 if (err) 1119 return err; 1120 } 1121 1122 if (data[IFLA_BR_MCAST_SNOOPING]) { 1123 u8 mcast_snooping = nla_get_u8(data[IFLA_BR_MCAST_SNOOPING]); 1124 1125 err = br_multicast_toggle(br, mcast_snooping); 1126 if (err) 1127 return err; 1128 } 1129 1130 if (data[IFLA_BR_MCAST_QUERY_USE_IFADDR]) { 1131 u8 val; 1132 1133 val = nla_get_u8(data[IFLA_BR_MCAST_QUERY_USE_IFADDR]); 1134 br->multicast_query_use_ifaddr = !!val; 1135 } 1136 1137 if (data[IFLA_BR_MCAST_QUERIER]) { 1138 u8 mcast_querier = nla_get_u8(data[IFLA_BR_MCAST_QUERIER]); 1139 1140 err = br_multicast_set_querier(br, mcast_querier); 1141 if (err) 1142 return err; 1143 } 1144 1145 if (data[IFLA_BR_MCAST_HASH_ELASTICITY]) { 1146 u32 val = nla_get_u32(data[IFLA_BR_MCAST_HASH_ELASTICITY]); 1147 1148 br->hash_elasticity = val; 1149 } 1150 1151 if (data[IFLA_BR_MCAST_HASH_MAX]) { 1152 u32 hash_max = nla_get_u32(data[IFLA_BR_MCAST_HASH_MAX]); 1153 1154 err = br_multicast_set_hash_max(br, hash_max); 1155 if (err) 1156 return err; 1157 } 1158 1159 if (data[IFLA_BR_MCAST_LAST_MEMBER_CNT]) { 1160 u32 val = nla_get_u32(data[IFLA_BR_MCAST_LAST_MEMBER_CNT]); 1161 1162 br->multicast_last_member_count = val; 1163 } 1164 1165 if (data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]) { 1166 u32 val = nla_get_u32(data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]); 1167 1168 br->multicast_startup_query_count = val; 1169 } 1170 1171 if (data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]) { 1172 u64 val = nla_get_u64(data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]); 1173 1174 br->multicast_last_member_interval = clock_t_to_jiffies(val); 1175 } 1176 1177 if (data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]) { 1178 u64 val = nla_get_u64(data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]); 1179 1180 br->multicast_membership_interval = clock_t_to_jiffies(val); 1181 } 1182 1183 if (data[IFLA_BR_MCAST_QUERIER_INTVL]) { 1184 u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERIER_INTVL]); 1185 1186 br->multicast_querier_interval = clock_t_to_jiffies(val); 1187 } 1188 1189 if (data[IFLA_BR_MCAST_QUERY_INTVL]) { 1190 u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_INTVL]); 1191 1192 br->multicast_query_interval = clock_t_to_jiffies(val); 1193 } 1194 1195 if (data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]) { 1196 u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]); 1197 1198 br->multicast_query_response_interval = clock_t_to_jiffies(val); 1199 } 1200 1201 if (data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]) { 1202 u64 val = nla_get_u64(data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]); 1203 1204 br->multicast_startup_query_interval = clock_t_to_jiffies(val); 1205 } 1206 1207 if (data[IFLA_BR_MCAST_STATS_ENABLED]) { 1208 __u8 mcast_stats; 1209 1210 mcast_stats = nla_get_u8(data[IFLA_BR_MCAST_STATS_ENABLED]); 1211 br->multicast_stats_enabled = !!mcast_stats; 1212 } 1213 1214 if (data[IFLA_BR_MCAST_IGMP_VERSION]) { 1215 __u8 igmp_version; 1216 1217 igmp_version = nla_get_u8(data[IFLA_BR_MCAST_IGMP_VERSION]); 1218 err = br_multicast_set_igmp_version(br, igmp_version); 1219 if (err) 1220 return err; 1221 } 1222 1223 #if IS_ENABLED(CONFIG_IPV6) 1224 if (data[IFLA_BR_MCAST_MLD_VERSION]) { 1225 __u8 mld_version; 1226 1227 mld_version = nla_get_u8(data[IFLA_BR_MCAST_MLD_VERSION]); 1228 err = br_multicast_set_mld_version(br, mld_version); 1229 if (err) 1230 return err; 1231 } 1232 #endif 1233 #endif 1234 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 1235 if (data[IFLA_BR_NF_CALL_IPTABLES]) { 1236 u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IPTABLES]); 1237 1238 br->nf_call_iptables = val ? true : false; 1239 } 1240 1241 if (data[IFLA_BR_NF_CALL_IP6TABLES]) { 1242 u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IP6TABLES]); 1243 1244 br->nf_call_ip6tables = val ? true : false; 1245 } 1246 1247 if (data[IFLA_BR_NF_CALL_ARPTABLES]) { 1248 u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_ARPTABLES]); 1249 1250 br->nf_call_arptables = val ? true : false; 1251 } 1252 #endif 1253 1254 return 0; 1255 } 1256 1257 static int br_dev_newlink(struct net *src_net, struct net_device *dev, 1258 struct nlattr *tb[], struct nlattr *data[], 1259 struct netlink_ext_ack *extack) 1260 { 1261 struct net_bridge *br = netdev_priv(dev); 1262 int err; 1263 1264 if (tb[IFLA_ADDRESS]) { 1265 spin_lock_bh(&br->lock); 1266 br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS])); 1267 spin_unlock_bh(&br->lock); 1268 } 1269 1270 err = register_netdevice(dev); 1271 if (err) 1272 return err; 1273 1274 err = br_changelink(dev, tb, data, extack); 1275 if (err) 1276 unregister_netdevice(dev); 1277 return err; 1278 } 1279 1280 static size_t br_get_size(const struct net_device *brdev) 1281 { 1282 return nla_total_size(sizeof(u32)) + /* IFLA_BR_FORWARD_DELAY */ 1283 nla_total_size(sizeof(u32)) + /* IFLA_BR_HELLO_TIME */ 1284 nla_total_size(sizeof(u32)) + /* IFLA_BR_MAX_AGE */ 1285 nla_total_size(sizeof(u32)) + /* IFLA_BR_AGEING_TIME */ 1286 nla_total_size(sizeof(u32)) + /* IFLA_BR_STP_STATE */ 1287 nla_total_size(sizeof(u16)) + /* IFLA_BR_PRIORITY */ 1288 nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_FILTERING */ 1289 #ifdef CONFIG_BRIDGE_VLAN_FILTERING 1290 nla_total_size(sizeof(__be16)) + /* IFLA_BR_VLAN_PROTOCOL */ 1291 nla_total_size(sizeof(u16)) + /* IFLA_BR_VLAN_DEFAULT_PVID */ 1292 nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_STATS_ENABLED */ 1293 #endif 1294 nla_total_size(sizeof(u16)) + /* IFLA_BR_GROUP_FWD_MASK */ 1295 nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_ROOT_ID */ 1296 nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_BRIDGE_ID */ 1297 nla_total_size(sizeof(u16)) + /* IFLA_BR_ROOT_PORT */ 1298 nla_total_size(sizeof(u32)) + /* IFLA_BR_ROOT_PATH_COST */ 1299 nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE */ 1300 nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE_DETECTED */ 1301 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_HELLO_TIMER */ 1302 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TCN_TIMER */ 1303 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TOPOLOGY_CHANGE_TIMER */ 1304 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_GC_TIMER */ 1305 nla_total_size(ETH_ALEN) + /* IFLA_BR_GROUP_ADDR */ 1306 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 1307 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_ROUTER */ 1308 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_SNOOPING */ 1309 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERY_USE_IFADDR */ 1310 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERIER */ 1311 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_STATS_ENABLED */ 1312 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_ELASTICITY */ 1313 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_MAX */ 1314 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_LAST_MEMBER_CNT */ 1315 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_STARTUP_QUERY_CNT */ 1316 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_LAST_MEMBER_INTVL */ 1317 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_MEMBERSHIP_INTVL */ 1318 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERIER_INTVL */ 1319 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_INTVL */ 1320 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_RESPONSE_INTVL */ 1321 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_STARTUP_QUERY_INTVL */ 1322 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_IGMP_VERSION */ 1323 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_MLD_VERSION */ 1324 #endif 1325 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 1326 nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IPTABLES */ 1327 nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IP6TABLES */ 1328 nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_ARPTABLES */ 1329 #endif 1330 0; 1331 } 1332 1333 static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev) 1334 { 1335 struct net_bridge *br = netdev_priv(brdev); 1336 u32 forward_delay = jiffies_to_clock_t(br->forward_delay); 1337 u32 hello_time = jiffies_to_clock_t(br->hello_time); 1338 u32 age_time = jiffies_to_clock_t(br->max_age); 1339 u32 ageing_time = jiffies_to_clock_t(br->ageing_time); 1340 u32 stp_enabled = br->stp_enabled; 1341 u16 priority = (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1]; 1342 u8 vlan_enabled = br_vlan_enabled(br->dev); 1343 u64 clockval; 1344 1345 clockval = br_timer_value(&br->hello_timer); 1346 if (nla_put_u64_64bit(skb, IFLA_BR_HELLO_TIMER, clockval, IFLA_BR_PAD)) 1347 return -EMSGSIZE; 1348 clockval = br_timer_value(&br->tcn_timer); 1349 if (nla_put_u64_64bit(skb, IFLA_BR_TCN_TIMER, clockval, IFLA_BR_PAD)) 1350 return -EMSGSIZE; 1351 clockval = br_timer_value(&br->topology_change_timer); 1352 if (nla_put_u64_64bit(skb, IFLA_BR_TOPOLOGY_CHANGE_TIMER, clockval, 1353 IFLA_BR_PAD)) 1354 return -EMSGSIZE; 1355 clockval = br_timer_value(&br->gc_work.timer); 1356 if (nla_put_u64_64bit(skb, IFLA_BR_GC_TIMER, clockval, IFLA_BR_PAD)) 1357 return -EMSGSIZE; 1358 1359 if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) || 1360 nla_put_u32(skb, IFLA_BR_HELLO_TIME, hello_time) || 1361 nla_put_u32(skb, IFLA_BR_MAX_AGE, age_time) || 1362 nla_put_u32(skb, IFLA_BR_AGEING_TIME, ageing_time) || 1363 nla_put_u32(skb, IFLA_BR_STP_STATE, stp_enabled) || 1364 nla_put_u16(skb, IFLA_BR_PRIORITY, priority) || 1365 nla_put_u8(skb, IFLA_BR_VLAN_FILTERING, vlan_enabled) || 1366 nla_put_u16(skb, IFLA_BR_GROUP_FWD_MASK, br->group_fwd_mask) || 1367 nla_put(skb, IFLA_BR_BRIDGE_ID, sizeof(struct ifla_bridge_id), 1368 &br->bridge_id) || 1369 nla_put(skb, IFLA_BR_ROOT_ID, sizeof(struct ifla_bridge_id), 1370 &br->designated_root) || 1371 nla_put_u16(skb, IFLA_BR_ROOT_PORT, br->root_port) || 1372 nla_put_u32(skb, IFLA_BR_ROOT_PATH_COST, br->root_path_cost) || 1373 nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE, br->topology_change) || 1374 nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE_DETECTED, 1375 br->topology_change_detected) || 1376 nla_put(skb, IFLA_BR_GROUP_ADDR, ETH_ALEN, br->group_addr)) 1377 return -EMSGSIZE; 1378 1379 #ifdef CONFIG_BRIDGE_VLAN_FILTERING 1380 if (nla_put_be16(skb, IFLA_BR_VLAN_PROTOCOL, br->vlan_proto) || 1381 nla_put_u16(skb, IFLA_BR_VLAN_DEFAULT_PVID, br->default_pvid) || 1382 nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED, br->vlan_stats_enabled)) 1383 return -EMSGSIZE; 1384 #endif 1385 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 1386 if (nla_put_u8(skb, IFLA_BR_MCAST_ROUTER, br->multicast_router) || 1387 nla_put_u8(skb, IFLA_BR_MCAST_SNOOPING, !br->multicast_disabled) || 1388 nla_put_u8(skb, IFLA_BR_MCAST_QUERY_USE_IFADDR, 1389 br->multicast_query_use_ifaddr) || 1390 nla_put_u8(skb, IFLA_BR_MCAST_QUERIER, br->multicast_querier) || 1391 nla_put_u8(skb, IFLA_BR_MCAST_STATS_ENABLED, 1392 br->multicast_stats_enabled) || 1393 nla_put_u32(skb, IFLA_BR_MCAST_HASH_ELASTICITY, 1394 br->hash_elasticity) || 1395 nla_put_u32(skb, IFLA_BR_MCAST_HASH_MAX, br->hash_max) || 1396 nla_put_u32(skb, IFLA_BR_MCAST_LAST_MEMBER_CNT, 1397 br->multicast_last_member_count) || 1398 nla_put_u32(skb, IFLA_BR_MCAST_STARTUP_QUERY_CNT, 1399 br->multicast_startup_query_count) || 1400 nla_put_u8(skb, IFLA_BR_MCAST_IGMP_VERSION, 1401 br->multicast_igmp_version)) 1402 return -EMSGSIZE; 1403 #if IS_ENABLED(CONFIG_IPV6) 1404 if (nla_put_u8(skb, IFLA_BR_MCAST_MLD_VERSION, 1405 br->multicast_mld_version)) 1406 return -EMSGSIZE; 1407 #endif 1408 clockval = jiffies_to_clock_t(br->multicast_last_member_interval); 1409 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_LAST_MEMBER_INTVL, clockval, 1410 IFLA_BR_PAD)) 1411 return -EMSGSIZE; 1412 clockval = jiffies_to_clock_t(br->multicast_membership_interval); 1413 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_MEMBERSHIP_INTVL, clockval, 1414 IFLA_BR_PAD)) 1415 return -EMSGSIZE; 1416 clockval = jiffies_to_clock_t(br->multicast_querier_interval); 1417 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERIER_INTVL, clockval, 1418 IFLA_BR_PAD)) 1419 return -EMSGSIZE; 1420 clockval = jiffies_to_clock_t(br->multicast_query_interval); 1421 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_INTVL, clockval, 1422 IFLA_BR_PAD)) 1423 return -EMSGSIZE; 1424 clockval = jiffies_to_clock_t(br->multicast_query_response_interval); 1425 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, clockval, 1426 IFLA_BR_PAD)) 1427 return -EMSGSIZE; 1428 clockval = jiffies_to_clock_t(br->multicast_startup_query_interval); 1429 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_STARTUP_QUERY_INTVL, clockval, 1430 IFLA_BR_PAD)) 1431 return -EMSGSIZE; 1432 #endif 1433 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 1434 if (nla_put_u8(skb, IFLA_BR_NF_CALL_IPTABLES, 1435 br->nf_call_iptables ? 1 : 0) || 1436 nla_put_u8(skb, IFLA_BR_NF_CALL_IP6TABLES, 1437 br->nf_call_ip6tables ? 1 : 0) || 1438 nla_put_u8(skb, IFLA_BR_NF_CALL_ARPTABLES, 1439 br->nf_call_arptables ? 1 : 0)) 1440 return -EMSGSIZE; 1441 #endif 1442 1443 return 0; 1444 } 1445 1446 static size_t br_get_linkxstats_size(const struct net_device *dev, int attr) 1447 { 1448 struct net_bridge_port *p = NULL; 1449 struct net_bridge_vlan_group *vg; 1450 struct net_bridge_vlan *v; 1451 struct net_bridge *br; 1452 int numvls = 0; 1453 1454 switch (attr) { 1455 case IFLA_STATS_LINK_XSTATS: 1456 br = netdev_priv(dev); 1457 vg = br_vlan_group(br); 1458 break; 1459 case IFLA_STATS_LINK_XSTATS_SLAVE: 1460 p = br_port_get_rtnl(dev); 1461 if (!p) 1462 return 0; 1463 br = p->br; 1464 vg = nbp_vlan_group(p); 1465 break; 1466 default: 1467 return 0; 1468 } 1469 1470 if (vg) { 1471 /* we need to count all, even placeholder entries */ 1472 list_for_each_entry(v, &vg->vlan_list, vlist) 1473 numvls++; 1474 } 1475 1476 return numvls * nla_total_size(sizeof(struct bridge_vlan_xstats)) + 1477 nla_total_size(sizeof(struct br_mcast_stats)) + 1478 nla_total_size(0); 1479 } 1480 1481 static int br_fill_linkxstats(struct sk_buff *skb, 1482 const struct net_device *dev, 1483 int *prividx, int attr) 1484 { 1485 struct nlattr *nla __maybe_unused; 1486 struct net_bridge_port *p = NULL; 1487 struct net_bridge_vlan_group *vg; 1488 struct net_bridge_vlan *v; 1489 struct net_bridge *br; 1490 struct nlattr *nest; 1491 int vl_idx = 0; 1492 1493 switch (attr) { 1494 case IFLA_STATS_LINK_XSTATS: 1495 br = netdev_priv(dev); 1496 vg = br_vlan_group(br); 1497 break; 1498 case IFLA_STATS_LINK_XSTATS_SLAVE: 1499 p = br_port_get_rtnl(dev); 1500 if (!p) 1501 return 0; 1502 br = p->br; 1503 vg = nbp_vlan_group(p); 1504 break; 1505 default: 1506 return -EINVAL; 1507 } 1508 1509 nest = nla_nest_start(skb, LINK_XSTATS_TYPE_BRIDGE); 1510 if (!nest) 1511 return -EMSGSIZE; 1512 1513 if (vg) { 1514 u16 pvid; 1515 1516 pvid = br_get_pvid(vg); 1517 list_for_each_entry(v, &vg->vlan_list, vlist) { 1518 struct bridge_vlan_xstats vxi; 1519 struct br_vlan_stats stats; 1520 1521 if (++vl_idx < *prividx) 1522 continue; 1523 memset(&vxi, 0, sizeof(vxi)); 1524 vxi.vid = v->vid; 1525 vxi.flags = v->flags; 1526 if (v->vid == pvid) 1527 vxi.flags |= BRIDGE_VLAN_INFO_PVID; 1528 br_vlan_get_stats(v, &stats); 1529 vxi.rx_bytes = stats.rx_bytes; 1530 vxi.rx_packets = stats.rx_packets; 1531 vxi.tx_bytes = stats.tx_bytes; 1532 vxi.tx_packets = stats.tx_packets; 1533 1534 if (nla_put(skb, BRIDGE_XSTATS_VLAN, sizeof(vxi), &vxi)) 1535 goto nla_put_failure; 1536 } 1537 } 1538 1539 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 1540 if (++vl_idx >= *prividx) { 1541 nla = nla_reserve_64bit(skb, BRIDGE_XSTATS_MCAST, 1542 sizeof(struct br_mcast_stats), 1543 BRIDGE_XSTATS_PAD); 1544 if (!nla) 1545 goto nla_put_failure; 1546 br_multicast_get_stats(br, p, nla_data(nla)); 1547 } 1548 #endif 1549 nla_nest_end(skb, nest); 1550 *prividx = 0; 1551 1552 return 0; 1553 1554 nla_put_failure: 1555 nla_nest_end(skb, nest); 1556 *prividx = vl_idx; 1557 1558 return -EMSGSIZE; 1559 } 1560 1561 static struct rtnl_af_ops br_af_ops __read_mostly = { 1562 .family = AF_BRIDGE, 1563 .get_link_af_size = br_get_link_af_size_filtered, 1564 }; 1565 1566 struct rtnl_link_ops br_link_ops __read_mostly = { 1567 .kind = "bridge", 1568 .priv_size = sizeof(struct net_bridge), 1569 .setup = br_dev_setup, 1570 .maxtype = IFLA_BR_MAX, 1571 .policy = br_policy, 1572 .validate = br_validate, 1573 .newlink = br_dev_newlink, 1574 .changelink = br_changelink, 1575 .dellink = br_dev_delete, 1576 .get_size = br_get_size, 1577 .fill_info = br_fill_info, 1578 .fill_linkxstats = br_fill_linkxstats, 1579 .get_linkxstats_size = br_get_linkxstats_size, 1580 1581 .slave_maxtype = IFLA_BRPORT_MAX, 1582 .slave_policy = br_port_policy, 1583 .slave_changelink = br_port_slave_changelink, 1584 .get_slave_size = br_port_get_slave_size, 1585 .fill_slave_info = br_port_fill_slave_info, 1586 }; 1587 1588 int __init br_netlink_init(void) 1589 { 1590 int err; 1591 1592 br_mdb_init(); 1593 rtnl_af_register(&br_af_ops); 1594 1595 err = rtnl_link_register(&br_link_ops); 1596 if (err) 1597 goto out_af; 1598 1599 return 0; 1600 1601 out_af: 1602 rtnl_af_unregister(&br_af_ops); 1603 br_mdb_uninit(); 1604 return err; 1605 } 1606 1607 void br_netlink_fini(void) 1608 { 1609 br_mdb_uninit(); 1610 rtnl_af_unregister(&br_af_ops); 1611 rtnl_link_unregister(&br_link_ops); 1612 } 1613