1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Bridge netlink control interface 4 * 5 * Authors: 6 * Stephen Hemminger <shemminger@osdl.org> 7 */ 8 9 #include <linux/kernel.h> 10 #include <linux/slab.h> 11 #include <linux/etherdevice.h> 12 #include <net/rtnetlink.h> 13 #include <net/net_namespace.h> 14 #include <net/sock.h> 15 #include <uapi/linux/if_bridge.h> 16 17 #include "br_private.h" 18 #include "br_private_stp.h" 19 #include "br_private_tunnel.h" 20 21 static int __get_num_vlan_infos(struct net_bridge_vlan_group *vg, 22 u32 filter_mask) 23 { 24 struct net_bridge_vlan *v; 25 u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0; 26 u16 flags, pvid; 27 int num_vlans = 0; 28 29 if (!(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) 30 return 0; 31 32 pvid = br_get_pvid(vg); 33 /* Count number of vlan infos */ 34 list_for_each_entry_rcu(v, &vg->vlan_list, vlist) { 35 flags = 0; 36 /* only a context, bridge vlan not activated */ 37 if (!br_vlan_should_use(v)) 38 continue; 39 if (v->vid == pvid) 40 flags |= BRIDGE_VLAN_INFO_PVID; 41 42 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED) 43 flags |= BRIDGE_VLAN_INFO_UNTAGGED; 44 45 if (vid_range_start == 0) { 46 goto initvars; 47 } else if ((v->vid - vid_range_end) == 1 && 48 flags == vid_range_flags) { 49 vid_range_end = v->vid; 50 continue; 51 } else { 52 if ((vid_range_end - vid_range_start) > 0) 53 num_vlans += 2; 54 else 55 num_vlans += 1; 56 } 57 initvars: 58 vid_range_start = v->vid; 59 vid_range_end = v->vid; 60 vid_range_flags = flags; 61 } 62 63 if (vid_range_start != 0) { 64 if ((vid_range_end - vid_range_start) > 0) 65 num_vlans += 2; 66 else 67 num_vlans += 1; 68 } 69 70 return num_vlans; 71 } 72 73 static int br_get_num_vlan_infos(struct net_bridge_vlan_group *vg, 74 u32 filter_mask) 75 { 76 int num_vlans; 77 78 if (!vg) 79 return 0; 80 81 if (filter_mask & RTEXT_FILTER_BRVLAN) 82 return vg->num_vlans; 83 84 rcu_read_lock(); 85 num_vlans = __get_num_vlan_infos(vg, filter_mask); 86 rcu_read_unlock(); 87 88 return num_vlans; 89 } 90 91 static size_t br_get_link_af_size_filtered(const struct net_device *dev, 92 u32 filter_mask) 93 { 94 struct net_bridge_vlan_group *vg = NULL; 95 struct net_bridge_port *p = NULL; 96 struct net_bridge *br; 97 int num_vlan_infos; 98 size_t vinfo_sz = 0; 99 100 rcu_read_lock(); 101 if (netif_is_bridge_port(dev)) { 102 p = br_port_get_rcu(dev); 103 vg = nbp_vlan_group_rcu(p); 104 } else if (dev->priv_flags & IFF_EBRIDGE) { 105 br = netdev_priv(dev); 106 vg = br_vlan_group_rcu(br); 107 } 108 num_vlan_infos = br_get_num_vlan_infos(vg, filter_mask); 109 rcu_read_unlock(); 110 111 if (p && (p->flags & BR_VLAN_TUNNEL)) 112 vinfo_sz += br_get_vlan_tunnel_info_size(vg); 113 114 /* Each VLAN is returned in bridge_vlan_info along with flags */ 115 vinfo_sz += num_vlan_infos * nla_total_size(sizeof(struct bridge_vlan_info)); 116 117 return vinfo_sz; 118 } 119 120 static inline size_t br_port_info_size(void) 121 { 122 return nla_total_size(1) /* IFLA_BRPORT_STATE */ 123 + nla_total_size(2) /* IFLA_BRPORT_PRIORITY */ 124 + nla_total_size(4) /* IFLA_BRPORT_COST */ 125 + nla_total_size(1) /* IFLA_BRPORT_MODE */ 126 + nla_total_size(1) /* IFLA_BRPORT_GUARD */ 127 + nla_total_size(1) /* IFLA_BRPORT_PROTECT */ 128 + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */ 129 + nla_total_size(1) /* IFLA_BRPORT_MCAST_TO_UCAST */ 130 + nla_total_size(1) /* IFLA_BRPORT_LEARNING */ 131 + nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */ 132 + nla_total_size(1) /* IFLA_BRPORT_MCAST_FLOOD */ 133 + nla_total_size(1) /* IFLA_BRPORT_BCAST_FLOOD */ 134 + nla_total_size(1) /* IFLA_BRPORT_PROXYARP */ 135 + nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */ 136 + nla_total_size(1) /* IFLA_BRPORT_VLAN_TUNNEL */ 137 + nla_total_size(1) /* IFLA_BRPORT_NEIGH_SUPPRESS */ 138 + nla_total_size(1) /* IFLA_BRPORT_ISOLATED */ 139 + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_ROOT_ID */ 140 + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_BRIDGE_ID */ 141 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_PORT */ 142 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_COST */ 143 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_ID */ 144 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_NO */ 145 + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_TOPOLOGY_CHANGE_ACK */ 146 + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_CONFIG_PENDING */ 147 + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_MESSAGE_AGE_TIMER */ 148 + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_FORWARD_DELAY_TIMER */ 149 + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_HOLD_TIMER */ 150 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 151 + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_MULTICAST_ROUTER */ 152 #endif 153 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_GROUP_FWD_MASK */ 154 + 0; 155 } 156 157 static inline size_t br_nlmsg_size(struct net_device *dev, u32 filter_mask) 158 { 159 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 160 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 161 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ 162 + nla_total_size(4) /* IFLA_MASTER */ 163 + nla_total_size(4) /* IFLA_MTU */ 164 + nla_total_size(4) /* IFLA_LINK */ 165 + nla_total_size(1) /* IFLA_OPERSTATE */ 166 + nla_total_size(br_port_info_size()) /* IFLA_PROTINFO */ 167 + nla_total_size(br_get_link_af_size_filtered(dev, 168 filter_mask)) /* IFLA_AF_SPEC */ 169 + nla_total_size(4); /* IFLA_BRPORT_BACKUP_PORT */ 170 } 171 172 static int br_port_fill_attrs(struct sk_buff *skb, 173 const struct net_bridge_port *p) 174 { 175 u8 mode = !!(p->flags & BR_HAIRPIN_MODE); 176 struct net_bridge_port *backup_p; 177 u64 timerval; 178 179 if (nla_put_u8(skb, IFLA_BRPORT_STATE, p->state) || 180 nla_put_u16(skb, IFLA_BRPORT_PRIORITY, p->priority) || 181 nla_put_u32(skb, IFLA_BRPORT_COST, p->path_cost) || 182 nla_put_u8(skb, IFLA_BRPORT_MODE, mode) || 183 nla_put_u8(skb, IFLA_BRPORT_GUARD, !!(p->flags & BR_BPDU_GUARD)) || 184 nla_put_u8(skb, IFLA_BRPORT_PROTECT, 185 !!(p->flags & BR_ROOT_BLOCK)) || 186 nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, 187 !!(p->flags & BR_MULTICAST_FAST_LEAVE)) || 188 nla_put_u8(skb, IFLA_BRPORT_MCAST_TO_UCAST, 189 !!(p->flags & BR_MULTICAST_TO_UNICAST)) || 190 nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) || 191 nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, 192 !!(p->flags & BR_FLOOD)) || 193 nla_put_u8(skb, IFLA_BRPORT_MCAST_FLOOD, 194 !!(p->flags & BR_MCAST_FLOOD)) || 195 nla_put_u8(skb, IFLA_BRPORT_BCAST_FLOOD, 196 !!(p->flags & BR_BCAST_FLOOD)) || 197 nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP)) || 198 nla_put_u8(skb, IFLA_BRPORT_PROXYARP_WIFI, 199 !!(p->flags & BR_PROXYARP_WIFI)) || 200 nla_put(skb, IFLA_BRPORT_ROOT_ID, sizeof(struct ifla_bridge_id), 201 &p->designated_root) || 202 nla_put(skb, IFLA_BRPORT_BRIDGE_ID, sizeof(struct ifla_bridge_id), 203 &p->designated_bridge) || 204 nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_PORT, p->designated_port) || 205 nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_COST, p->designated_cost) || 206 nla_put_u16(skb, IFLA_BRPORT_ID, p->port_id) || 207 nla_put_u16(skb, IFLA_BRPORT_NO, p->port_no) || 208 nla_put_u8(skb, IFLA_BRPORT_TOPOLOGY_CHANGE_ACK, 209 p->topology_change_ack) || 210 nla_put_u8(skb, IFLA_BRPORT_CONFIG_PENDING, p->config_pending) || 211 nla_put_u8(skb, IFLA_BRPORT_VLAN_TUNNEL, !!(p->flags & 212 BR_VLAN_TUNNEL)) || 213 nla_put_u16(skb, IFLA_BRPORT_GROUP_FWD_MASK, p->group_fwd_mask) || 214 nla_put_u8(skb, IFLA_BRPORT_NEIGH_SUPPRESS, 215 !!(p->flags & BR_NEIGH_SUPPRESS)) || 216 nla_put_u8(skb, IFLA_BRPORT_ISOLATED, !!(p->flags & BR_ISOLATED))) 217 return -EMSGSIZE; 218 219 timerval = br_timer_value(&p->message_age_timer); 220 if (nla_put_u64_64bit(skb, IFLA_BRPORT_MESSAGE_AGE_TIMER, timerval, 221 IFLA_BRPORT_PAD)) 222 return -EMSGSIZE; 223 timerval = br_timer_value(&p->forward_delay_timer); 224 if (nla_put_u64_64bit(skb, IFLA_BRPORT_FORWARD_DELAY_TIMER, timerval, 225 IFLA_BRPORT_PAD)) 226 return -EMSGSIZE; 227 timerval = br_timer_value(&p->hold_timer); 228 if (nla_put_u64_64bit(skb, IFLA_BRPORT_HOLD_TIMER, timerval, 229 IFLA_BRPORT_PAD)) 230 return -EMSGSIZE; 231 232 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 233 if (nla_put_u8(skb, IFLA_BRPORT_MULTICAST_ROUTER, 234 p->multicast_router)) 235 return -EMSGSIZE; 236 #endif 237 238 /* we might be called only with br->lock */ 239 rcu_read_lock(); 240 backup_p = rcu_dereference(p->backup_port); 241 if (backup_p) 242 nla_put_u32(skb, IFLA_BRPORT_BACKUP_PORT, 243 backup_p->dev->ifindex); 244 rcu_read_unlock(); 245 246 return 0; 247 } 248 249 static int br_fill_ifvlaninfo_range(struct sk_buff *skb, u16 vid_start, 250 u16 vid_end, u16 flags) 251 { 252 struct bridge_vlan_info vinfo; 253 254 if ((vid_end - vid_start) > 0) { 255 /* add range to skb */ 256 vinfo.vid = vid_start; 257 vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_BEGIN; 258 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO, 259 sizeof(vinfo), &vinfo)) 260 goto nla_put_failure; 261 262 vinfo.vid = vid_end; 263 vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_END; 264 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO, 265 sizeof(vinfo), &vinfo)) 266 goto nla_put_failure; 267 } else { 268 vinfo.vid = vid_start; 269 vinfo.flags = flags; 270 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO, 271 sizeof(vinfo), &vinfo)) 272 goto nla_put_failure; 273 } 274 275 return 0; 276 277 nla_put_failure: 278 return -EMSGSIZE; 279 } 280 281 static int br_fill_ifvlaninfo_compressed(struct sk_buff *skb, 282 struct net_bridge_vlan_group *vg) 283 { 284 struct net_bridge_vlan *v; 285 u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0; 286 u16 flags, pvid; 287 int err = 0; 288 289 /* Pack IFLA_BRIDGE_VLAN_INFO's for every vlan 290 * and mark vlan info with begin and end flags 291 * if vlaninfo represents a range 292 */ 293 pvid = br_get_pvid(vg); 294 list_for_each_entry_rcu(v, &vg->vlan_list, vlist) { 295 flags = 0; 296 if (!br_vlan_should_use(v)) 297 continue; 298 if (v->vid == pvid) 299 flags |= BRIDGE_VLAN_INFO_PVID; 300 301 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED) 302 flags |= BRIDGE_VLAN_INFO_UNTAGGED; 303 304 if (vid_range_start == 0) { 305 goto initvars; 306 } else if ((v->vid - vid_range_end) == 1 && 307 flags == vid_range_flags) { 308 vid_range_end = v->vid; 309 continue; 310 } else { 311 err = br_fill_ifvlaninfo_range(skb, vid_range_start, 312 vid_range_end, 313 vid_range_flags); 314 if (err) 315 return err; 316 } 317 318 initvars: 319 vid_range_start = v->vid; 320 vid_range_end = v->vid; 321 vid_range_flags = flags; 322 } 323 324 if (vid_range_start != 0) { 325 /* Call it once more to send any left over vlans */ 326 err = br_fill_ifvlaninfo_range(skb, vid_range_start, 327 vid_range_end, 328 vid_range_flags); 329 if (err) 330 return err; 331 } 332 333 return 0; 334 } 335 336 static int br_fill_ifvlaninfo(struct sk_buff *skb, 337 struct net_bridge_vlan_group *vg) 338 { 339 struct bridge_vlan_info vinfo; 340 struct net_bridge_vlan *v; 341 u16 pvid; 342 343 pvid = br_get_pvid(vg); 344 list_for_each_entry_rcu(v, &vg->vlan_list, vlist) { 345 if (!br_vlan_should_use(v)) 346 continue; 347 348 vinfo.vid = v->vid; 349 vinfo.flags = 0; 350 if (v->vid == pvid) 351 vinfo.flags |= BRIDGE_VLAN_INFO_PVID; 352 353 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED) 354 vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED; 355 356 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO, 357 sizeof(vinfo), &vinfo)) 358 goto nla_put_failure; 359 } 360 361 return 0; 362 363 nla_put_failure: 364 return -EMSGSIZE; 365 } 366 367 /* 368 * Create one netlink message for one interface 369 * Contains port and master info as well as carrier and bridge state. 370 */ 371 static int br_fill_ifinfo(struct sk_buff *skb, 372 const struct net_bridge_port *port, 373 u32 pid, u32 seq, int event, unsigned int flags, 374 u32 filter_mask, const struct net_device *dev) 375 { 376 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; 377 struct net_bridge *br; 378 struct ifinfomsg *hdr; 379 struct nlmsghdr *nlh; 380 381 if (port) 382 br = port->br; 383 else 384 br = netdev_priv(dev); 385 386 br_debug(br, "br_fill_info event %d port %s master %s\n", 387 event, dev->name, br->dev->name); 388 389 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags); 390 if (nlh == NULL) 391 return -EMSGSIZE; 392 393 hdr = nlmsg_data(nlh); 394 hdr->ifi_family = AF_BRIDGE; 395 hdr->__ifi_pad = 0; 396 hdr->ifi_type = dev->type; 397 hdr->ifi_index = dev->ifindex; 398 hdr->ifi_flags = dev_get_flags(dev); 399 hdr->ifi_change = 0; 400 401 if (nla_put_string(skb, IFLA_IFNAME, dev->name) || 402 nla_put_u32(skb, IFLA_MASTER, br->dev->ifindex) || 403 nla_put_u32(skb, IFLA_MTU, dev->mtu) || 404 nla_put_u8(skb, IFLA_OPERSTATE, operstate) || 405 (dev->addr_len && 406 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || 407 (dev->ifindex != dev_get_iflink(dev) && 408 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev)))) 409 goto nla_put_failure; 410 411 if (event == RTM_NEWLINK && port) { 412 struct nlattr *nest; 413 414 nest = nla_nest_start(skb, IFLA_PROTINFO); 415 if (nest == NULL || br_port_fill_attrs(skb, port) < 0) 416 goto nla_put_failure; 417 nla_nest_end(skb, nest); 418 } 419 420 /* Check if the VID information is requested */ 421 if ((filter_mask & RTEXT_FILTER_BRVLAN) || 422 (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) { 423 struct net_bridge_vlan_group *vg; 424 struct nlattr *af; 425 int err; 426 427 /* RCU needed because of the VLAN locking rules (rcu || rtnl) */ 428 rcu_read_lock(); 429 if (port) 430 vg = nbp_vlan_group_rcu(port); 431 else 432 vg = br_vlan_group_rcu(br); 433 434 if (!vg || !vg->num_vlans) { 435 rcu_read_unlock(); 436 goto done; 437 } 438 af = nla_nest_start_noflag(skb, IFLA_AF_SPEC); 439 if (!af) { 440 rcu_read_unlock(); 441 goto nla_put_failure; 442 } 443 if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) 444 err = br_fill_ifvlaninfo_compressed(skb, vg); 445 else 446 err = br_fill_ifvlaninfo(skb, vg); 447 448 if (port && (port->flags & BR_VLAN_TUNNEL)) 449 err = br_fill_vlan_tunnel_info(skb, vg); 450 rcu_read_unlock(); 451 if (err) 452 goto nla_put_failure; 453 nla_nest_end(skb, af); 454 } 455 456 done: 457 nlmsg_end(skb, nlh); 458 return 0; 459 460 nla_put_failure: 461 nlmsg_cancel(skb, nlh); 462 return -EMSGSIZE; 463 } 464 465 /* Notify listeners of a change in bridge or port information */ 466 void br_ifinfo_notify(int event, const struct net_bridge *br, 467 const struct net_bridge_port *port) 468 { 469 u32 filter = RTEXT_FILTER_BRVLAN_COMPRESSED; 470 struct net_device *dev; 471 struct sk_buff *skb; 472 int err = -ENOBUFS; 473 struct net *net; 474 u16 port_no = 0; 475 476 if (WARN_ON(!port && !br)) 477 return; 478 479 if (port) { 480 dev = port->dev; 481 br = port->br; 482 port_no = port->port_no; 483 } else { 484 dev = br->dev; 485 } 486 487 net = dev_net(dev); 488 br_debug(br, "port %u(%s) event %d\n", port_no, dev->name, event); 489 490 skb = nlmsg_new(br_nlmsg_size(dev, filter), GFP_ATOMIC); 491 if (skb == NULL) 492 goto errout; 493 494 err = br_fill_ifinfo(skb, port, 0, 0, event, 0, filter, dev); 495 if (err < 0) { 496 /* -EMSGSIZE implies BUG in br_nlmsg_size() */ 497 WARN_ON(err == -EMSGSIZE); 498 kfree_skb(skb); 499 goto errout; 500 } 501 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); 502 return; 503 errout: 504 rtnl_set_sk_err(net, RTNLGRP_LINK, err); 505 } 506 507 /* 508 * Dump information about all ports, in response to GETLINK 509 */ 510 int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, 511 struct net_device *dev, u32 filter_mask, int nlflags) 512 { 513 struct net_bridge_port *port = br_port_get_rtnl(dev); 514 515 if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN) && 516 !(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) 517 return 0; 518 519 return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, nlflags, 520 filter_mask, dev); 521 } 522 523 static int br_vlan_info(struct net_bridge *br, struct net_bridge_port *p, 524 int cmd, struct bridge_vlan_info *vinfo, bool *changed, 525 struct netlink_ext_ack *extack) 526 { 527 bool curr_change; 528 int err = 0; 529 530 switch (cmd) { 531 case RTM_SETLINK: 532 if (p) { 533 /* if the MASTER flag is set this will act on the global 534 * per-VLAN entry as well 535 */ 536 err = nbp_vlan_add(p, vinfo->vid, vinfo->flags, 537 &curr_change, extack); 538 } else { 539 vinfo->flags |= BRIDGE_VLAN_INFO_BRENTRY; 540 err = br_vlan_add(br, vinfo->vid, vinfo->flags, 541 &curr_change, extack); 542 } 543 if (curr_change) 544 *changed = true; 545 break; 546 547 case RTM_DELLINK: 548 if (p) { 549 if (!nbp_vlan_delete(p, vinfo->vid)) 550 *changed = true; 551 552 if ((vinfo->flags & BRIDGE_VLAN_INFO_MASTER) && 553 !br_vlan_delete(p->br, vinfo->vid)) 554 *changed = true; 555 } else if (!br_vlan_delete(br, vinfo->vid)) { 556 *changed = true; 557 } 558 break; 559 } 560 561 return err; 562 } 563 564 int br_process_vlan_info(struct net_bridge *br, 565 struct net_bridge_port *p, int cmd, 566 struct bridge_vlan_info *vinfo_curr, 567 struct bridge_vlan_info **vinfo_last, 568 bool *changed, 569 struct netlink_ext_ack *extack) 570 { 571 int err, rtm_cmd; 572 573 if (!br_vlan_valid_id(vinfo_curr->vid, extack)) 574 return -EINVAL; 575 576 /* needed for vlan-only NEWVLAN/DELVLAN notifications */ 577 rtm_cmd = br_afspec_cmd_to_rtm(cmd); 578 579 if (vinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) { 580 if (!br_vlan_valid_range(vinfo_curr, *vinfo_last, extack)) 581 return -EINVAL; 582 *vinfo_last = vinfo_curr; 583 return 0; 584 } 585 586 if (*vinfo_last) { 587 struct bridge_vlan_info tmp_vinfo; 588 int v, v_change_start = 0; 589 590 if (!br_vlan_valid_range(vinfo_curr, *vinfo_last, extack)) 591 return -EINVAL; 592 593 memcpy(&tmp_vinfo, *vinfo_last, 594 sizeof(struct bridge_vlan_info)); 595 for (v = (*vinfo_last)->vid; v <= vinfo_curr->vid; v++) { 596 bool curr_change = false; 597 598 tmp_vinfo.vid = v; 599 err = br_vlan_info(br, p, cmd, &tmp_vinfo, &curr_change, 600 extack); 601 if (err) 602 break; 603 if (curr_change) { 604 *changed = curr_change; 605 if (!v_change_start) 606 v_change_start = v; 607 } else { 608 /* nothing to notify yet */ 609 if (!v_change_start) 610 continue; 611 br_vlan_notify(br, p, v_change_start, 612 v - 1, rtm_cmd); 613 v_change_start = 0; 614 } 615 cond_resched(); 616 } 617 /* v_change_start is set only if the last/whole range changed */ 618 if (v_change_start) 619 br_vlan_notify(br, p, v_change_start, 620 v - 1, rtm_cmd); 621 622 *vinfo_last = NULL; 623 624 return err; 625 } 626 627 err = br_vlan_info(br, p, cmd, vinfo_curr, changed, extack); 628 if (*changed) 629 br_vlan_notify(br, p, vinfo_curr->vid, 0, rtm_cmd); 630 631 return err; 632 } 633 634 static int br_afspec(struct net_bridge *br, 635 struct net_bridge_port *p, 636 struct nlattr *af_spec, 637 int cmd, bool *changed, 638 struct netlink_ext_ack *extack) 639 { 640 struct bridge_vlan_info *vinfo_curr = NULL; 641 struct bridge_vlan_info *vinfo_last = NULL; 642 struct nlattr *attr; 643 struct vtunnel_info tinfo_last = {}; 644 struct vtunnel_info tinfo_curr = {}; 645 int err = 0, rem; 646 647 nla_for_each_nested(attr, af_spec, rem) { 648 err = 0; 649 switch (nla_type(attr)) { 650 case IFLA_BRIDGE_VLAN_TUNNEL_INFO: 651 if (!p || !(p->flags & BR_VLAN_TUNNEL)) 652 return -EINVAL; 653 err = br_parse_vlan_tunnel_info(attr, &tinfo_curr); 654 if (err) 655 return err; 656 err = br_process_vlan_tunnel_info(br, p, cmd, 657 &tinfo_curr, 658 &tinfo_last, 659 changed); 660 if (err) 661 return err; 662 break; 663 case IFLA_BRIDGE_VLAN_INFO: 664 if (nla_len(attr) != sizeof(struct bridge_vlan_info)) 665 return -EINVAL; 666 vinfo_curr = nla_data(attr); 667 err = br_process_vlan_info(br, p, cmd, vinfo_curr, 668 &vinfo_last, changed, 669 extack); 670 if (err) 671 return err; 672 break; 673 } 674 } 675 676 return err; 677 } 678 679 static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = { 680 [IFLA_BRPORT_STATE] = { .type = NLA_U8 }, 681 [IFLA_BRPORT_COST] = { .type = NLA_U32 }, 682 [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 }, 683 [IFLA_BRPORT_MODE] = { .type = NLA_U8 }, 684 [IFLA_BRPORT_GUARD] = { .type = NLA_U8 }, 685 [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 }, 686 [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 }, 687 [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 }, 688 [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 }, 689 [IFLA_BRPORT_PROXYARP] = { .type = NLA_U8 }, 690 [IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 }, 691 [IFLA_BRPORT_MULTICAST_ROUTER] = { .type = NLA_U8 }, 692 [IFLA_BRPORT_MCAST_TO_UCAST] = { .type = NLA_U8 }, 693 [IFLA_BRPORT_MCAST_FLOOD] = { .type = NLA_U8 }, 694 [IFLA_BRPORT_BCAST_FLOOD] = { .type = NLA_U8 }, 695 [IFLA_BRPORT_VLAN_TUNNEL] = { .type = NLA_U8 }, 696 [IFLA_BRPORT_GROUP_FWD_MASK] = { .type = NLA_U16 }, 697 [IFLA_BRPORT_NEIGH_SUPPRESS] = { .type = NLA_U8 }, 698 [IFLA_BRPORT_ISOLATED] = { .type = NLA_U8 }, 699 [IFLA_BRPORT_BACKUP_PORT] = { .type = NLA_U32 }, 700 }; 701 702 /* Change the state of the port and notify spanning tree */ 703 static int br_set_port_state(struct net_bridge_port *p, u8 state) 704 { 705 if (state > BR_STATE_BLOCKING) 706 return -EINVAL; 707 708 /* if kernel STP is running, don't allow changes */ 709 if (p->br->stp_enabled == BR_KERNEL_STP) 710 return -EBUSY; 711 712 /* if device is not up, change is not allowed 713 * if link is not present, only allowable state is disabled 714 */ 715 if (!netif_running(p->dev) || 716 (!netif_oper_up(p->dev) && state != BR_STATE_DISABLED)) 717 return -ENETDOWN; 718 719 br_set_state(p, state); 720 br_port_state_selection(p->br); 721 return 0; 722 } 723 724 /* Set/clear or port flags based on attribute */ 725 static int br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[], 726 int attrtype, unsigned long mask) 727 { 728 unsigned long flags; 729 int err; 730 731 if (!tb[attrtype]) 732 return 0; 733 734 if (nla_get_u8(tb[attrtype])) 735 flags = p->flags | mask; 736 else 737 flags = p->flags & ~mask; 738 739 err = br_switchdev_set_port_flag(p, flags, mask); 740 if (err) 741 return err; 742 743 p->flags = flags; 744 return 0; 745 } 746 747 /* Process bridge protocol info on port */ 748 static int br_setport(struct net_bridge_port *p, struct nlattr *tb[]) 749 { 750 unsigned long old_flags = p->flags; 751 bool br_vlan_tunnel_old = false; 752 int err; 753 754 err = br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE); 755 if (err) 756 return err; 757 758 err = br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD); 759 if (err) 760 return err; 761 762 err = br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE); 763 if (err) 764 return err; 765 766 err = br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK); 767 if (err) 768 return err; 769 770 err = br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING); 771 if (err) 772 return err; 773 774 err = br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD); 775 if (err) 776 return err; 777 778 err = br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD); 779 if (err) 780 return err; 781 782 err = br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_TO_UCAST, BR_MULTICAST_TO_UNICAST); 783 if (err) 784 return err; 785 786 err = br_set_port_flag(p, tb, IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD); 787 if (err) 788 return err; 789 790 err = br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP); 791 if (err) 792 return err; 793 794 err = br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI); 795 if (err) 796 return err; 797 798 br_vlan_tunnel_old = (p->flags & BR_VLAN_TUNNEL) ? true : false; 799 err = br_set_port_flag(p, tb, IFLA_BRPORT_VLAN_TUNNEL, BR_VLAN_TUNNEL); 800 if (err) 801 return err; 802 803 if (br_vlan_tunnel_old && !(p->flags & BR_VLAN_TUNNEL)) 804 nbp_vlan_tunnel_info_flush(p); 805 806 if (tb[IFLA_BRPORT_COST]) { 807 err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST])); 808 if (err) 809 return err; 810 } 811 812 if (tb[IFLA_BRPORT_PRIORITY]) { 813 err = br_stp_set_port_priority(p, nla_get_u16(tb[IFLA_BRPORT_PRIORITY])); 814 if (err) 815 return err; 816 } 817 818 if (tb[IFLA_BRPORT_STATE]) { 819 err = br_set_port_state(p, nla_get_u8(tb[IFLA_BRPORT_STATE])); 820 if (err) 821 return err; 822 } 823 824 if (tb[IFLA_BRPORT_FLUSH]) 825 br_fdb_delete_by_port(p->br, p, 0, 0); 826 827 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 828 if (tb[IFLA_BRPORT_MULTICAST_ROUTER]) { 829 u8 mcast_router = nla_get_u8(tb[IFLA_BRPORT_MULTICAST_ROUTER]); 830 831 err = br_multicast_set_port_router(p, mcast_router); 832 if (err) 833 return err; 834 } 835 #endif 836 837 if (tb[IFLA_BRPORT_GROUP_FWD_MASK]) { 838 u16 fwd_mask = nla_get_u16(tb[IFLA_BRPORT_GROUP_FWD_MASK]); 839 840 if (fwd_mask & BR_GROUPFWD_MACPAUSE) 841 return -EINVAL; 842 p->group_fwd_mask = fwd_mask; 843 } 844 845 err = br_set_port_flag(p, tb, IFLA_BRPORT_NEIGH_SUPPRESS, 846 BR_NEIGH_SUPPRESS); 847 if (err) 848 return err; 849 850 err = br_set_port_flag(p, tb, IFLA_BRPORT_ISOLATED, BR_ISOLATED); 851 if (err) 852 return err; 853 854 if (tb[IFLA_BRPORT_BACKUP_PORT]) { 855 struct net_device *backup_dev = NULL; 856 u32 backup_ifindex; 857 858 backup_ifindex = nla_get_u32(tb[IFLA_BRPORT_BACKUP_PORT]); 859 if (backup_ifindex) { 860 backup_dev = __dev_get_by_index(dev_net(p->dev), 861 backup_ifindex); 862 if (!backup_dev) 863 return -ENOENT; 864 } 865 866 err = nbp_backup_change(p, backup_dev); 867 if (err) 868 return err; 869 } 870 871 br_port_flags_change(p, old_flags ^ p->flags); 872 return 0; 873 } 874 875 /* Change state and parameters on port. */ 876 int br_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags, 877 struct netlink_ext_ack *extack) 878 { 879 struct net_bridge *br = (struct net_bridge *)netdev_priv(dev); 880 struct nlattr *tb[IFLA_BRPORT_MAX + 1]; 881 struct net_bridge_port *p; 882 struct nlattr *protinfo; 883 struct nlattr *afspec; 884 bool changed = false; 885 int err = 0; 886 887 protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO); 888 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 889 if (!protinfo && !afspec) 890 return 0; 891 892 p = br_port_get_rtnl(dev); 893 /* We want to accept dev as bridge itself if the AF_SPEC 894 * is set to see if someone is setting vlan info on the bridge 895 */ 896 if (!p && !afspec) 897 return -EINVAL; 898 899 if (p && protinfo) { 900 if (protinfo->nla_type & NLA_F_NESTED) { 901 err = nla_parse_nested_deprecated(tb, IFLA_BRPORT_MAX, 902 protinfo, 903 br_port_policy, 904 NULL); 905 if (err) 906 return err; 907 908 spin_lock_bh(&p->br->lock); 909 err = br_setport(p, tb); 910 spin_unlock_bh(&p->br->lock); 911 } else { 912 /* Binary compatibility with old RSTP */ 913 if (nla_len(protinfo) < sizeof(u8)) 914 return -EINVAL; 915 916 spin_lock_bh(&p->br->lock); 917 err = br_set_port_state(p, nla_get_u8(protinfo)); 918 spin_unlock_bh(&p->br->lock); 919 } 920 if (err) 921 goto out; 922 changed = true; 923 } 924 925 if (afspec) 926 err = br_afspec(br, p, afspec, RTM_SETLINK, &changed, extack); 927 928 if (changed) 929 br_ifinfo_notify(RTM_NEWLINK, br, p); 930 out: 931 return err; 932 } 933 934 /* Delete port information */ 935 int br_dellink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags) 936 { 937 struct net_bridge *br = (struct net_bridge *)netdev_priv(dev); 938 struct net_bridge_port *p; 939 struct nlattr *afspec; 940 bool changed = false; 941 int err = 0; 942 943 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 944 if (!afspec) 945 return 0; 946 947 p = br_port_get_rtnl(dev); 948 /* We want to accept dev as bridge itself as well */ 949 if (!p && !(dev->priv_flags & IFF_EBRIDGE)) 950 return -EINVAL; 951 952 err = br_afspec(br, p, afspec, RTM_DELLINK, &changed, NULL); 953 if (changed) 954 /* Send RTM_NEWLINK because userspace 955 * expects RTM_NEWLINK for vlan dels 956 */ 957 br_ifinfo_notify(RTM_NEWLINK, br, p); 958 959 return err; 960 } 961 962 static int br_validate(struct nlattr *tb[], struct nlattr *data[], 963 struct netlink_ext_ack *extack) 964 { 965 if (tb[IFLA_ADDRESS]) { 966 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) 967 return -EINVAL; 968 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) 969 return -EADDRNOTAVAIL; 970 } 971 972 if (!data) 973 return 0; 974 975 #ifdef CONFIG_BRIDGE_VLAN_FILTERING 976 if (data[IFLA_BR_VLAN_PROTOCOL]) { 977 switch (nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL])) { 978 case htons(ETH_P_8021Q): 979 case htons(ETH_P_8021AD): 980 break; 981 default: 982 return -EPROTONOSUPPORT; 983 } 984 } 985 986 if (data[IFLA_BR_VLAN_DEFAULT_PVID]) { 987 __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]); 988 989 if (defpvid >= VLAN_VID_MASK) 990 return -EINVAL; 991 } 992 #endif 993 994 return 0; 995 } 996 997 static int br_port_slave_changelink(struct net_device *brdev, 998 struct net_device *dev, 999 struct nlattr *tb[], 1000 struct nlattr *data[], 1001 struct netlink_ext_ack *extack) 1002 { 1003 struct net_bridge *br = netdev_priv(brdev); 1004 int ret; 1005 1006 if (!data) 1007 return 0; 1008 1009 spin_lock_bh(&br->lock); 1010 ret = br_setport(br_port_get_rtnl(dev), data); 1011 spin_unlock_bh(&br->lock); 1012 1013 return ret; 1014 } 1015 1016 static int br_port_fill_slave_info(struct sk_buff *skb, 1017 const struct net_device *brdev, 1018 const struct net_device *dev) 1019 { 1020 return br_port_fill_attrs(skb, br_port_get_rtnl(dev)); 1021 } 1022 1023 static size_t br_port_get_slave_size(const struct net_device *brdev, 1024 const struct net_device *dev) 1025 { 1026 return br_port_info_size(); 1027 } 1028 1029 static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = { 1030 [IFLA_BR_FORWARD_DELAY] = { .type = NLA_U32 }, 1031 [IFLA_BR_HELLO_TIME] = { .type = NLA_U32 }, 1032 [IFLA_BR_MAX_AGE] = { .type = NLA_U32 }, 1033 [IFLA_BR_AGEING_TIME] = { .type = NLA_U32 }, 1034 [IFLA_BR_STP_STATE] = { .type = NLA_U32 }, 1035 [IFLA_BR_PRIORITY] = { .type = NLA_U16 }, 1036 [IFLA_BR_VLAN_FILTERING] = { .type = NLA_U8 }, 1037 [IFLA_BR_VLAN_PROTOCOL] = { .type = NLA_U16 }, 1038 [IFLA_BR_GROUP_FWD_MASK] = { .type = NLA_U16 }, 1039 [IFLA_BR_GROUP_ADDR] = { .type = NLA_BINARY, 1040 .len = ETH_ALEN }, 1041 [IFLA_BR_MCAST_ROUTER] = { .type = NLA_U8 }, 1042 [IFLA_BR_MCAST_SNOOPING] = { .type = NLA_U8 }, 1043 [IFLA_BR_MCAST_QUERY_USE_IFADDR] = { .type = NLA_U8 }, 1044 [IFLA_BR_MCAST_QUERIER] = { .type = NLA_U8 }, 1045 [IFLA_BR_MCAST_HASH_ELASTICITY] = { .type = NLA_U32 }, 1046 [IFLA_BR_MCAST_HASH_MAX] = { .type = NLA_U32 }, 1047 [IFLA_BR_MCAST_LAST_MEMBER_CNT] = { .type = NLA_U32 }, 1048 [IFLA_BR_MCAST_STARTUP_QUERY_CNT] = { .type = NLA_U32 }, 1049 [IFLA_BR_MCAST_LAST_MEMBER_INTVL] = { .type = NLA_U64 }, 1050 [IFLA_BR_MCAST_MEMBERSHIP_INTVL] = { .type = NLA_U64 }, 1051 [IFLA_BR_MCAST_QUERIER_INTVL] = { .type = NLA_U64 }, 1052 [IFLA_BR_MCAST_QUERY_INTVL] = { .type = NLA_U64 }, 1053 [IFLA_BR_MCAST_QUERY_RESPONSE_INTVL] = { .type = NLA_U64 }, 1054 [IFLA_BR_MCAST_STARTUP_QUERY_INTVL] = { .type = NLA_U64 }, 1055 [IFLA_BR_NF_CALL_IPTABLES] = { .type = NLA_U8 }, 1056 [IFLA_BR_NF_CALL_IP6TABLES] = { .type = NLA_U8 }, 1057 [IFLA_BR_NF_CALL_ARPTABLES] = { .type = NLA_U8 }, 1058 [IFLA_BR_VLAN_DEFAULT_PVID] = { .type = NLA_U16 }, 1059 [IFLA_BR_VLAN_STATS_ENABLED] = { .type = NLA_U8 }, 1060 [IFLA_BR_MCAST_STATS_ENABLED] = { .type = NLA_U8 }, 1061 [IFLA_BR_MCAST_IGMP_VERSION] = { .type = NLA_U8 }, 1062 [IFLA_BR_MCAST_MLD_VERSION] = { .type = NLA_U8 }, 1063 [IFLA_BR_VLAN_STATS_PER_PORT] = { .type = NLA_U8 }, 1064 [IFLA_BR_MULTI_BOOLOPT] = { .type = NLA_EXACT_LEN, 1065 .len = sizeof(struct br_boolopt_multi) }, 1066 }; 1067 1068 static int br_changelink(struct net_device *brdev, struct nlattr *tb[], 1069 struct nlattr *data[], 1070 struct netlink_ext_ack *extack) 1071 { 1072 struct net_bridge *br = netdev_priv(brdev); 1073 int err; 1074 1075 if (!data) 1076 return 0; 1077 1078 if (data[IFLA_BR_FORWARD_DELAY]) { 1079 err = br_set_forward_delay(br, nla_get_u32(data[IFLA_BR_FORWARD_DELAY])); 1080 if (err) 1081 return err; 1082 } 1083 1084 if (data[IFLA_BR_HELLO_TIME]) { 1085 err = br_set_hello_time(br, nla_get_u32(data[IFLA_BR_HELLO_TIME])); 1086 if (err) 1087 return err; 1088 } 1089 1090 if (data[IFLA_BR_MAX_AGE]) { 1091 err = br_set_max_age(br, nla_get_u32(data[IFLA_BR_MAX_AGE])); 1092 if (err) 1093 return err; 1094 } 1095 1096 if (data[IFLA_BR_AGEING_TIME]) { 1097 err = br_set_ageing_time(br, nla_get_u32(data[IFLA_BR_AGEING_TIME])); 1098 if (err) 1099 return err; 1100 } 1101 1102 if (data[IFLA_BR_STP_STATE]) { 1103 u32 stp_enabled = nla_get_u32(data[IFLA_BR_STP_STATE]); 1104 1105 br_stp_set_enabled(br, stp_enabled); 1106 } 1107 1108 if (data[IFLA_BR_PRIORITY]) { 1109 u32 priority = nla_get_u16(data[IFLA_BR_PRIORITY]); 1110 1111 br_stp_set_bridge_priority(br, priority); 1112 } 1113 1114 if (data[IFLA_BR_VLAN_FILTERING]) { 1115 u8 vlan_filter = nla_get_u8(data[IFLA_BR_VLAN_FILTERING]); 1116 1117 err = __br_vlan_filter_toggle(br, vlan_filter); 1118 if (err) 1119 return err; 1120 } 1121 1122 #ifdef CONFIG_BRIDGE_VLAN_FILTERING 1123 if (data[IFLA_BR_VLAN_PROTOCOL]) { 1124 __be16 vlan_proto = nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL]); 1125 1126 err = __br_vlan_set_proto(br, vlan_proto); 1127 if (err) 1128 return err; 1129 } 1130 1131 if (data[IFLA_BR_VLAN_DEFAULT_PVID]) { 1132 __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]); 1133 1134 err = __br_vlan_set_default_pvid(br, defpvid, extack); 1135 if (err) 1136 return err; 1137 } 1138 1139 if (data[IFLA_BR_VLAN_STATS_ENABLED]) { 1140 __u8 vlan_stats = nla_get_u8(data[IFLA_BR_VLAN_STATS_ENABLED]); 1141 1142 err = br_vlan_set_stats(br, vlan_stats); 1143 if (err) 1144 return err; 1145 } 1146 1147 if (data[IFLA_BR_VLAN_STATS_PER_PORT]) { 1148 __u8 per_port = nla_get_u8(data[IFLA_BR_VLAN_STATS_PER_PORT]); 1149 1150 err = br_vlan_set_stats_per_port(br, per_port); 1151 if (err) 1152 return err; 1153 } 1154 #endif 1155 1156 if (data[IFLA_BR_GROUP_FWD_MASK]) { 1157 u16 fwd_mask = nla_get_u16(data[IFLA_BR_GROUP_FWD_MASK]); 1158 1159 if (fwd_mask & BR_GROUPFWD_RESTRICTED) 1160 return -EINVAL; 1161 br->group_fwd_mask = fwd_mask; 1162 } 1163 1164 if (data[IFLA_BR_GROUP_ADDR]) { 1165 u8 new_addr[ETH_ALEN]; 1166 1167 if (nla_len(data[IFLA_BR_GROUP_ADDR]) != ETH_ALEN) 1168 return -EINVAL; 1169 memcpy(new_addr, nla_data(data[IFLA_BR_GROUP_ADDR]), ETH_ALEN); 1170 if (!is_link_local_ether_addr(new_addr)) 1171 return -EINVAL; 1172 if (new_addr[5] == 1 || /* 802.3x Pause address */ 1173 new_addr[5] == 2 || /* 802.3ad Slow protocols */ 1174 new_addr[5] == 3) /* 802.1X PAE address */ 1175 return -EINVAL; 1176 spin_lock_bh(&br->lock); 1177 memcpy(br->group_addr, new_addr, sizeof(br->group_addr)); 1178 spin_unlock_bh(&br->lock); 1179 br_opt_toggle(br, BROPT_GROUP_ADDR_SET, true); 1180 br_recalculate_fwd_mask(br); 1181 } 1182 1183 if (data[IFLA_BR_FDB_FLUSH]) 1184 br_fdb_flush(br); 1185 1186 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 1187 if (data[IFLA_BR_MCAST_ROUTER]) { 1188 u8 multicast_router = nla_get_u8(data[IFLA_BR_MCAST_ROUTER]); 1189 1190 err = br_multicast_set_router(br, multicast_router); 1191 if (err) 1192 return err; 1193 } 1194 1195 if (data[IFLA_BR_MCAST_SNOOPING]) { 1196 u8 mcast_snooping = nla_get_u8(data[IFLA_BR_MCAST_SNOOPING]); 1197 1198 br_multicast_toggle(br, mcast_snooping); 1199 } 1200 1201 if (data[IFLA_BR_MCAST_QUERY_USE_IFADDR]) { 1202 u8 val; 1203 1204 val = nla_get_u8(data[IFLA_BR_MCAST_QUERY_USE_IFADDR]); 1205 br_opt_toggle(br, BROPT_MULTICAST_QUERY_USE_IFADDR, !!val); 1206 } 1207 1208 if (data[IFLA_BR_MCAST_QUERIER]) { 1209 u8 mcast_querier = nla_get_u8(data[IFLA_BR_MCAST_QUERIER]); 1210 1211 err = br_multicast_set_querier(br, mcast_querier); 1212 if (err) 1213 return err; 1214 } 1215 1216 if (data[IFLA_BR_MCAST_HASH_ELASTICITY]) 1217 br_warn(br, "the hash_elasticity option has been deprecated and is always %u\n", 1218 RHT_ELASTICITY); 1219 1220 if (data[IFLA_BR_MCAST_HASH_MAX]) 1221 br->hash_max = nla_get_u32(data[IFLA_BR_MCAST_HASH_MAX]); 1222 1223 if (data[IFLA_BR_MCAST_LAST_MEMBER_CNT]) { 1224 u32 val = nla_get_u32(data[IFLA_BR_MCAST_LAST_MEMBER_CNT]); 1225 1226 br->multicast_last_member_count = val; 1227 } 1228 1229 if (data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]) { 1230 u32 val = nla_get_u32(data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]); 1231 1232 br->multicast_startup_query_count = val; 1233 } 1234 1235 if (data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]) { 1236 u64 val = nla_get_u64(data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]); 1237 1238 br->multicast_last_member_interval = clock_t_to_jiffies(val); 1239 } 1240 1241 if (data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]) { 1242 u64 val = nla_get_u64(data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]); 1243 1244 br->multicast_membership_interval = clock_t_to_jiffies(val); 1245 } 1246 1247 if (data[IFLA_BR_MCAST_QUERIER_INTVL]) { 1248 u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERIER_INTVL]); 1249 1250 br->multicast_querier_interval = clock_t_to_jiffies(val); 1251 } 1252 1253 if (data[IFLA_BR_MCAST_QUERY_INTVL]) { 1254 u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_INTVL]); 1255 1256 br->multicast_query_interval = clock_t_to_jiffies(val); 1257 } 1258 1259 if (data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]) { 1260 u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]); 1261 1262 br->multicast_query_response_interval = clock_t_to_jiffies(val); 1263 } 1264 1265 if (data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]) { 1266 u64 val = nla_get_u64(data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]); 1267 1268 br->multicast_startup_query_interval = clock_t_to_jiffies(val); 1269 } 1270 1271 if (data[IFLA_BR_MCAST_STATS_ENABLED]) { 1272 __u8 mcast_stats; 1273 1274 mcast_stats = nla_get_u8(data[IFLA_BR_MCAST_STATS_ENABLED]); 1275 br_opt_toggle(br, BROPT_MULTICAST_STATS_ENABLED, !!mcast_stats); 1276 } 1277 1278 if (data[IFLA_BR_MCAST_IGMP_VERSION]) { 1279 __u8 igmp_version; 1280 1281 igmp_version = nla_get_u8(data[IFLA_BR_MCAST_IGMP_VERSION]); 1282 err = br_multicast_set_igmp_version(br, igmp_version); 1283 if (err) 1284 return err; 1285 } 1286 1287 #if IS_ENABLED(CONFIG_IPV6) 1288 if (data[IFLA_BR_MCAST_MLD_VERSION]) { 1289 __u8 mld_version; 1290 1291 mld_version = nla_get_u8(data[IFLA_BR_MCAST_MLD_VERSION]); 1292 err = br_multicast_set_mld_version(br, mld_version); 1293 if (err) 1294 return err; 1295 } 1296 #endif 1297 #endif 1298 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 1299 if (data[IFLA_BR_NF_CALL_IPTABLES]) { 1300 u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IPTABLES]); 1301 1302 br_opt_toggle(br, BROPT_NF_CALL_IPTABLES, !!val); 1303 } 1304 1305 if (data[IFLA_BR_NF_CALL_IP6TABLES]) { 1306 u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IP6TABLES]); 1307 1308 br_opt_toggle(br, BROPT_NF_CALL_IP6TABLES, !!val); 1309 } 1310 1311 if (data[IFLA_BR_NF_CALL_ARPTABLES]) { 1312 u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_ARPTABLES]); 1313 1314 br_opt_toggle(br, BROPT_NF_CALL_ARPTABLES, !!val); 1315 } 1316 #endif 1317 1318 if (data[IFLA_BR_MULTI_BOOLOPT]) { 1319 struct br_boolopt_multi *bm; 1320 1321 bm = nla_data(data[IFLA_BR_MULTI_BOOLOPT]); 1322 err = br_boolopt_multi_toggle(br, bm, extack); 1323 if (err) 1324 return err; 1325 } 1326 1327 return 0; 1328 } 1329 1330 static int br_dev_newlink(struct net *src_net, struct net_device *dev, 1331 struct nlattr *tb[], struct nlattr *data[], 1332 struct netlink_ext_ack *extack) 1333 { 1334 struct net_bridge *br = netdev_priv(dev); 1335 int err; 1336 1337 err = register_netdevice(dev); 1338 if (err) 1339 return err; 1340 1341 if (tb[IFLA_ADDRESS]) { 1342 spin_lock_bh(&br->lock); 1343 br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS])); 1344 spin_unlock_bh(&br->lock); 1345 } 1346 1347 err = br_changelink(dev, tb, data, extack); 1348 if (err) 1349 br_dev_delete(dev, NULL); 1350 1351 return err; 1352 } 1353 1354 static size_t br_get_size(const struct net_device *brdev) 1355 { 1356 return nla_total_size(sizeof(u32)) + /* IFLA_BR_FORWARD_DELAY */ 1357 nla_total_size(sizeof(u32)) + /* IFLA_BR_HELLO_TIME */ 1358 nla_total_size(sizeof(u32)) + /* IFLA_BR_MAX_AGE */ 1359 nla_total_size(sizeof(u32)) + /* IFLA_BR_AGEING_TIME */ 1360 nla_total_size(sizeof(u32)) + /* IFLA_BR_STP_STATE */ 1361 nla_total_size(sizeof(u16)) + /* IFLA_BR_PRIORITY */ 1362 nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_FILTERING */ 1363 #ifdef CONFIG_BRIDGE_VLAN_FILTERING 1364 nla_total_size(sizeof(__be16)) + /* IFLA_BR_VLAN_PROTOCOL */ 1365 nla_total_size(sizeof(u16)) + /* IFLA_BR_VLAN_DEFAULT_PVID */ 1366 nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_STATS_ENABLED */ 1367 nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_STATS_PER_PORT */ 1368 #endif 1369 nla_total_size(sizeof(u16)) + /* IFLA_BR_GROUP_FWD_MASK */ 1370 nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_ROOT_ID */ 1371 nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_BRIDGE_ID */ 1372 nla_total_size(sizeof(u16)) + /* IFLA_BR_ROOT_PORT */ 1373 nla_total_size(sizeof(u32)) + /* IFLA_BR_ROOT_PATH_COST */ 1374 nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE */ 1375 nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE_DETECTED */ 1376 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_HELLO_TIMER */ 1377 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TCN_TIMER */ 1378 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TOPOLOGY_CHANGE_TIMER */ 1379 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_GC_TIMER */ 1380 nla_total_size(ETH_ALEN) + /* IFLA_BR_GROUP_ADDR */ 1381 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 1382 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_ROUTER */ 1383 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_SNOOPING */ 1384 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERY_USE_IFADDR */ 1385 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERIER */ 1386 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_STATS_ENABLED */ 1387 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_ELASTICITY */ 1388 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_MAX */ 1389 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_LAST_MEMBER_CNT */ 1390 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_STARTUP_QUERY_CNT */ 1391 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_LAST_MEMBER_INTVL */ 1392 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_MEMBERSHIP_INTVL */ 1393 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERIER_INTVL */ 1394 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_INTVL */ 1395 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_RESPONSE_INTVL */ 1396 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_STARTUP_QUERY_INTVL */ 1397 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_IGMP_VERSION */ 1398 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_MLD_VERSION */ 1399 #endif 1400 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 1401 nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IPTABLES */ 1402 nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IP6TABLES */ 1403 nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_ARPTABLES */ 1404 #endif 1405 nla_total_size(sizeof(struct br_boolopt_multi)) + /* IFLA_BR_MULTI_BOOLOPT */ 1406 0; 1407 } 1408 1409 static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev) 1410 { 1411 struct net_bridge *br = netdev_priv(brdev); 1412 u32 forward_delay = jiffies_to_clock_t(br->forward_delay); 1413 u32 hello_time = jiffies_to_clock_t(br->hello_time); 1414 u32 age_time = jiffies_to_clock_t(br->max_age); 1415 u32 ageing_time = jiffies_to_clock_t(br->ageing_time); 1416 u32 stp_enabled = br->stp_enabled; 1417 u16 priority = (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1]; 1418 u8 vlan_enabled = br_vlan_enabled(br->dev); 1419 struct br_boolopt_multi bm; 1420 u64 clockval; 1421 1422 clockval = br_timer_value(&br->hello_timer); 1423 if (nla_put_u64_64bit(skb, IFLA_BR_HELLO_TIMER, clockval, IFLA_BR_PAD)) 1424 return -EMSGSIZE; 1425 clockval = br_timer_value(&br->tcn_timer); 1426 if (nla_put_u64_64bit(skb, IFLA_BR_TCN_TIMER, clockval, IFLA_BR_PAD)) 1427 return -EMSGSIZE; 1428 clockval = br_timer_value(&br->topology_change_timer); 1429 if (nla_put_u64_64bit(skb, IFLA_BR_TOPOLOGY_CHANGE_TIMER, clockval, 1430 IFLA_BR_PAD)) 1431 return -EMSGSIZE; 1432 clockval = br_timer_value(&br->gc_work.timer); 1433 if (nla_put_u64_64bit(skb, IFLA_BR_GC_TIMER, clockval, IFLA_BR_PAD)) 1434 return -EMSGSIZE; 1435 1436 br_boolopt_multi_get(br, &bm); 1437 if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) || 1438 nla_put_u32(skb, IFLA_BR_HELLO_TIME, hello_time) || 1439 nla_put_u32(skb, IFLA_BR_MAX_AGE, age_time) || 1440 nla_put_u32(skb, IFLA_BR_AGEING_TIME, ageing_time) || 1441 nla_put_u32(skb, IFLA_BR_STP_STATE, stp_enabled) || 1442 nla_put_u16(skb, IFLA_BR_PRIORITY, priority) || 1443 nla_put_u8(skb, IFLA_BR_VLAN_FILTERING, vlan_enabled) || 1444 nla_put_u16(skb, IFLA_BR_GROUP_FWD_MASK, br->group_fwd_mask) || 1445 nla_put(skb, IFLA_BR_BRIDGE_ID, sizeof(struct ifla_bridge_id), 1446 &br->bridge_id) || 1447 nla_put(skb, IFLA_BR_ROOT_ID, sizeof(struct ifla_bridge_id), 1448 &br->designated_root) || 1449 nla_put_u16(skb, IFLA_BR_ROOT_PORT, br->root_port) || 1450 nla_put_u32(skb, IFLA_BR_ROOT_PATH_COST, br->root_path_cost) || 1451 nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE, br->topology_change) || 1452 nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE_DETECTED, 1453 br->topology_change_detected) || 1454 nla_put(skb, IFLA_BR_GROUP_ADDR, ETH_ALEN, br->group_addr) || 1455 nla_put(skb, IFLA_BR_MULTI_BOOLOPT, sizeof(bm), &bm)) 1456 return -EMSGSIZE; 1457 1458 #ifdef CONFIG_BRIDGE_VLAN_FILTERING 1459 if (nla_put_be16(skb, IFLA_BR_VLAN_PROTOCOL, br->vlan_proto) || 1460 nla_put_u16(skb, IFLA_BR_VLAN_DEFAULT_PVID, br->default_pvid) || 1461 nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED, 1462 br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) || 1463 nla_put_u8(skb, IFLA_BR_VLAN_STATS_PER_PORT, 1464 br_opt_get(br, BROPT_VLAN_STATS_PER_PORT))) 1465 return -EMSGSIZE; 1466 #endif 1467 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 1468 if (nla_put_u8(skb, IFLA_BR_MCAST_ROUTER, br->multicast_router) || 1469 nla_put_u8(skb, IFLA_BR_MCAST_SNOOPING, 1470 br_opt_get(br, BROPT_MULTICAST_ENABLED)) || 1471 nla_put_u8(skb, IFLA_BR_MCAST_QUERY_USE_IFADDR, 1472 br_opt_get(br, BROPT_MULTICAST_QUERY_USE_IFADDR)) || 1473 nla_put_u8(skb, IFLA_BR_MCAST_QUERIER, 1474 br_opt_get(br, BROPT_MULTICAST_QUERIER)) || 1475 nla_put_u8(skb, IFLA_BR_MCAST_STATS_ENABLED, 1476 br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED)) || 1477 nla_put_u32(skb, IFLA_BR_MCAST_HASH_ELASTICITY, RHT_ELASTICITY) || 1478 nla_put_u32(skb, IFLA_BR_MCAST_HASH_MAX, br->hash_max) || 1479 nla_put_u32(skb, IFLA_BR_MCAST_LAST_MEMBER_CNT, 1480 br->multicast_last_member_count) || 1481 nla_put_u32(skb, IFLA_BR_MCAST_STARTUP_QUERY_CNT, 1482 br->multicast_startup_query_count) || 1483 nla_put_u8(skb, IFLA_BR_MCAST_IGMP_VERSION, 1484 br->multicast_igmp_version)) 1485 return -EMSGSIZE; 1486 #if IS_ENABLED(CONFIG_IPV6) 1487 if (nla_put_u8(skb, IFLA_BR_MCAST_MLD_VERSION, 1488 br->multicast_mld_version)) 1489 return -EMSGSIZE; 1490 #endif 1491 clockval = jiffies_to_clock_t(br->multicast_last_member_interval); 1492 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_LAST_MEMBER_INTVL, clockval, 1493 IFLA_BR_PAD)) 1494 return -EMSGSIZE; 1495 clockval = jiffies_to_clock_t(br->multicast_membership_interval); 1496 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_MEMBERSHIP_INTVL, clockval, 1497 IFLA_BR_PAD)) 1498 return -EMSGSIZE; 1499 clockval = jiffies_to_clock_t(br->multicast_querier_interval); 1500 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERIER_INTVL, clockval, 1501 IFLA_BR_PAD)) 1502 return -EMSGSIZE; 1503 clockval = jiffies_to_clock_t(br->multicast_query_interval); 1504 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_INTVL, clockval, 1505 IFLA_BR_PAD)) 1506 return -EMSGSIZE; 1507 clockval = jiffies_to_clock_t(br->multicast_query_response_interval); 1508 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, clockval, 1509 IFLA_BR_PAD)) 1510 return -EMSGSIZE; 1511 clockval = jiffies_to_clock_t(br->multicast_startup_query_interval); 1512 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_STARTUP_QUERY_INTVL, clockval, 1513 IFLA_BR_PAD)) 1514 return -EMSGSIZE; 1515 #endif 1516 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 1517 if (nla_put_u8(skb, IFLA_BR_NF_CALL_IPTABLES, 1518 br_opt_get(br, BROPT_NF_CALL_IPTABLES) ? 1 : 0) || 1519 nla_put_u8(skb, IFLA_BR_NF_CALL_IP6TABLES, 1520 br_opt_get(br, BROPT_NF_CALL_IP6TABLES) ? 1 : 0) || 1521 nla_put_u8(skb, IFLA_BR_NF_CALL_ARPTABLES, 1522 br_opt_get(br, BROPT_NF_CALL_ARPTABLES) ? 1 : 0)) 1523 return -EMSGSIZE; 1524 #endif 1525 1526 return 0; 1527 } 1528 1529 static size_t br_get_linkxstats_size(const struct net_device *dev, int attr) 1530 { 1531 struct net_bridge_port *p = NULL; 1532 struct net_bridge_vlan_group *vg; 1533 struct net_bridge_vlan *v; 1534 struct net_bridge *br; 1535 int numvls = 0; 1536 1537 switch (attr) { 1538 case IFLA_STATS_LINK_XSTATS: 1539 br = netdev_priv(dev); 1540 vg = br_vlan_group(br); 1541 break; 1542 case IFLA_STATS_LINK_XSTATS_SLAVE: 1543 p = br_port_get_rtnl(dev); 1544 if (!p) 1545 return 0; 1546 br = p->br; 1547 vg = nbp_vlan_group(p); 1548 break; 1549 default: 1550 return 0; 1551 } 1552 1553 if (vg) { 1554 /* we need to count all, even placeholder entries */ 1555 list_for_each_entry(v, &vg->vlan_list, vlist) 1556 numvls++; 1557 } 1558 1559 return numvls * nla_total_size(sizeof(struct bridge_vlan_xstats)) + 1560 nla_total_size(sizeof(struct br_mcast_stats)) + 1561 nla_total_size(0); 1562 } 1563 1564 static int br_fill_linkxstats(struct sk_buff *skb, 1565 const struct net_device *dev, 1566 int *prividx, int attr) 1567 { 1568 struct nlattr *nla __maybe_unused; 1569 struct net_bridge_port *p = NULL; 1570 struct net_bridge_vlan_group *vg; 1571 struct net_bridge_vlan *v; 1572 struct net_bridge *br; 1573 struct nlattr *nest; 1574 int vl_idx = 0; 1575 1576 switch (attr) { 1577 case IFLA_STATS_LINK_XSTATS: 1578 br = netdev_priv(dev); 1579 vg = br_vlan_group(br); 1580 break; 1581 case IFLA_STATS_LINK_XSTATS_SLAVE: 1582 p = br_port_get_rtnl(dev); 1583 if (!p) 1584 return 0; 1585 br = p->br; 1586 vg = nbp_vlan_group(p); 1587 break; 1588 default: 1589 return -EINVAL; 1590 } 1591 1592 nest = nla_nest_start_noflag(skb, LINK_XSTATS_TYPE_BRIDGE); 1593 if (!nest) 1594 return -EMSGSIZE; 1595 1596 if (vg) { 1597 u16 pvid; 1598 1599 pvid = br_get_pvid(vg); 1600 list_for_each_entry(v, &vg->vlan_list, vlist) { 1601 struct bridge_vlan_xstats vxi; 1602 struct br_vlan_stats stats; 1603 1604 if (++vl_idx < *prividx) 1605 continue; 1606 memset(&vxi, 0, sizeof(vxi)); 1607 vxi.vid = v->vid; 1608 vxi.flags = v->flags; 1609 if (v->vid == pvid) 1610 vxi.flags |= BRIDGE_VLAN_INFO_PVID; 1611 br_vlan_get_stats(v, &stats); 1612 vxi.rx_bytes = stats.rx_bytes; 1613 vxi.rx_packets = stats.rx_packets; 1614 vxi.tx_bytes = stats.tx_bytes; 1615 vxi.tx_packets = stats.tx_packets; 1616 1617 if (nla_put(skb, BRIDGE_XSTATS_VLAN, sizeof(vxi), &vxi)) 1618 goto nla_put_failure; 1619 } 1620 } 1621 1622 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 1623 if (++vl_idx >= *prividx) { 1624 nla = nla_reserve_64bit(skb, BRIDGE_XSTATS_MCAST, 1625 sizeof(struct br_mcast_stats), 1626 BRIDGE_XSTATS_PAD); 1627 if (!nla) 1628 goto nla_put_failure; 1629 br_multicast_get_stats(br, p, nla_data(nla)); 1630 } 1631 #endif 1632 1633 if (p) { 1634 nla = nla_reserve_64bit(skb, BRIDGE_XSTATS_STP, 1635 sizeof(p->stp_xstats), 1636 BRIDGE_XSTATS_PAD); 1637 if (!nla) 1638 goto nla_put_failure; 1639 1640 spin_lock_bh(&br->lock); 1641 memcpy(nla_data(nla), &p->stp_xstats, sizeof(p->stp_xstats)); 1642 spin_unlock_bh(&br->lock); 1643 } 1644 1645 nla_nest_end(skb, nest); 1646 *prividx = 0; 1647 1648 return 0; 1649 1650 nla_put_failure: 1651 nla_nest_end(skb, nest); 1652 *prividx = vl_idx; 1653 1654 return -EMSGSIZE; 1655 } 1656 1657 static struct rtnl_af_ops br_af_ops __read_mostly = { 1658 .family = AF_BRIDGE, 1659 .get_link_af_size = br_get_link_af_size_filtered, 1660 }; 1661 1662 struct rtnl_link_ops br_link_ops __read_mostly = { 1663 .kind = "bridge", 1664 .priv_size = sizeof(struct net_bridge), 1665 .setup = br_dev_setup, 1666 .maxtype = IFLA_BR_MAX, 1667 .policy = br_policy, 1668 .validate = br_validate, 1669 .newlink = br_dev_newlink, 1670 .changelink = br_changelink, 1671 .dellink = br_dev_delete, 1672 .get_size = br_get_size, 1673 .fill_info = br_fill_info, 1674 .fill_linkxstats = br_fill_linkxstats, 1675 .get_linkxstats_size = br_get_linkxstats_size, 1676 1677 .slave_maxtype = IFLA_BRPORT_MAX, 1678 .slave_policy = br_port_policy, 1679 .slave_changelink = br_port_slave_changelink, 1680 .get_slave_size = br_port_get_slave_size, 1681 .fill_slave_info = br_port_fill_slave_info, 1682 }; 1683 1684 int __init br_netlink_init(void) 1685 { 1686 int err; 1687 1688 br_mdb_init(); 1689 br_vlan_rtnl_init(); 1690 rtnl_af_register(&br_af_ops); 1691 1692 err = rtnl_link_register(&br_link_ops); 1693 if (err) 1694 goto out_af; 1695 1696 return 0; 1697 1698 out_af: 1699 rtnl_af_unregister(&br_af_ops); 1700 br_mdb_uninit(); 1701 return err; 1702 } 1703 1704 void br_netlink_fini(void) 1705 { 1706 br_mdb_uninit(); 1707 br_vlan_rtnl_uninit(); 1708 rtnl_af_unregister(&br_af_ops); 1709 rtnl_link_unregister(&br_link_ops); 1710 } 1711