1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Bridge netlink control interface 4 * 5 * Authors: 6 * Stephen Hemminger <shemminger@osdl.org> 7 */ 8 9 #include <linux/kernel.h> 10 #include <linux/slab.h> 11 #include <linux/etherdevice.h> 12 #include <net/rtnetlink.h> 13 #include <net/net_namespace.h> 14 #include <net/sock.h> 15 #include <uapi/linux/if_bridge.h> 16 17 #include "br_private.h" 18 #include "br_private_stp.h" 19 #include "br_private_cfm.h" 20 #include "br_private_tunnel.h" 21 #include "br_private_mcast_eht.h" 22 23 static int __get_num_vlan_infos(struct net_bridge_vlan_group *vg, 24 u32 filter_mask) 25 { 26 struct net_bridge_vlan *v; 27 u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0; 28 u16 flags, pvid; 29 int num_vlans = 0; 30 31 if (!(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) 32 return 0; 33 34 pvid = br_get_pvid(vg); 35 /* Count number of vlan infos */ 36 list_for_each_entry_rcu(v, &vg->vlan_list, vlist) { 37 flags = 0; 38 /* only a context, bridge vlan not activated */ 39 if (!br_vlan_should_use(v)) 40 continue; 41 if (v->vid == pvid) 42 flags |= BRIDGE_VLAN_INFO_PVID; 43 44 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED) 45 flags |= BRIDGE_VLAN_INFO_UNTAGGED; 46 47 if (vid_range_start == 0) { 48 goto initvars; 49 } else if ((v->vid - vid_range_end) == 1 && 50 flags == vid_range_flags) { 51 vid_range_end = v->vid; 52 continue; 53 } else { 54 if ((vid_range_end - vid_range_start) > 0) 55 num_vlans += 2; 56 else 57 num_vlans += 1; 58 } 59 initvars: 60 vid_range_start = v->vid; 61 vid_range_end = v->vid; 62 vid_range_flags = flags; 63 } 64 65 if (vid_range_start != 0) { 66 if ((vid_range_end - vid_range_start) > 0) 67 num_vlans += 2; 68 else 69 num_vlans += 1; 70 } 71 72 return num_vlans; 73 } 74 75 static int br_get_num_vlan_infos(struct net_bridge_vlan_group *vg, 76 u32 filter_mask) 77 { 78 int num_vlans; 79 80 if (!vg) 81 return 0; 82 83 if (filter_mask & RTEXT_FILTER_BRVLAN) 84 return vg->num_vlans; 85 86 rcu_read_lock(); 87 num_vlans = __get_num_vlan_infos(vg, filter_mask); 88 rcu_read_unlock(); 89 90 return num_vlans; 91 } 92 93 static size_t br_get_link_af_size_filtered(const struct net_device *dev, 94 u32 filter_mask) 95 { 96 struct net_bridge_vlan_group *vg = NULL; 97 struct net_bridge_port *p = NULL; 98 struct net_bridge *br = NULL; 99 u32 num_cfm_peer_mep_infos; 100 u32 num_cfm_mep_infos; 101 size_t vinfo_sz = 0; 102 int num_vlan_infos; 103 104 rcu_read_lock(); 105 if (netif_is_bridge_port(dev)) { 106 p = br_port_get_check_rcu(dev); 107 if (p) 108 vg = nbp_vlan_group_rcu(p); 109 } else if (netif_is_bridge_master(dev)) { 110 br = netdev_priv(dev); 111 vg = br_vlan_group_rcu(br); 112 } 113 num_vlan_infos = br_get_num_vlan_infos(vg, filter_mask); 114 rcu_read_unlock(); 115 116 if (p && (p->flags & BR_VLAN_TUNNEL)) 117 vinfo_sz += br_get_vlan_tunnel_info_size(vg); 118 119 /* Each VLAN is returned in bridge_vlan_info along with flags */ 120 vinfo_sz += num_vlan_infos * nla_total_size(sizeof(struct bridge_vlan_info)); 121 122 if (p && vg && (filter_mask & RTEXT_FILTER_MST)) 123 vinfo_sz += br_mst_info_size(vg); 124 125 if (!(filter_mask & RTEXT_FILTER_CFM_STATUS)) 126 return vinfo_sz; 127 128 if (!br) 129 return vinfo_sz; 130 131 /* CFM status info must be added */ 132 br_cfm_mep_count(br, &num_cfm_mep_infos); 133 br_cfm_peer_mep_count(br, &num_cfm_peer_mep_infos); 134 135 vinfo_sz += nla_total_size(0); /* IFLA_BRIDGE_CFM */ 136 /* For each status struct the MEP instance (u32) is added */ 137 /* MEP instance (u32) + br_cfm_mep_status */ 138 vinfo_sz += num_cfm_mep_infos * 139 /*IFLA_BRIDGE_CFM_MEP_STATUS_INSTANCE */ 140 (nla_total_size(sizeof(u32)) 141 /* IFLA_BRIDGE_CFM_MEP_STATUS_OPCODE_UNEXP_SEEN */ 142 + nla_total_size(sizeof(u32)) 143 /* IFLA_BRIDGE_CFM_MEP_STATUS_VERSION_UNEXP_SEEN */ 144 + nla_total_size(sizeof(u32)) 145 /* IFLA_BRIDGE_CFM_MEP_STATUS_RX_LEVEL_LOW_SEEN */ 146 + nla_total_size(sizeof(u32))); 147 /* MEP instance (u32) + br_cfm_cc_peer_status */ 148 vinfo_sz += num_cfm_peer_mep_infos * 149 /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_INSTANCE */ 150 (nla_total_size(sizeof(u32)) 151 /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_PEER_MEPID */ 152 + nla_total_size(sizeof(u32)) 153 /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_CCM_DEFECT */ 154 + nla_total_size(sizeof(u32)) 155 /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_RDI */ 156 + nla_total_size(sizeof(u32)) 157 /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_PORT_TLV_VALUE */ 158 + nla_total_size(sizeof(u8)) 159 /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_IF_TLV_VALUE */ 160 + nla_total_size(sizeof(u8)) 161 /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_SEEN */ 162 + nla_total_size(sizeof(u32)) 163 /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_TLV_SEEN */ 164 + nla_total_size(sizeof(u32)) 165 /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_SEQ_UNEXP_SEEN */ 166 + nla_total_size(sizeof(u32))); 167 168 return vinfo_sz; 169 } 170 171 static inline size_t br_port_info_size(void) 172 { 173 return nla_total_size(1) /* IFLA_BRPORT_STATE */ 174 + nla_total_size(2) /* IFLA_BRPORT_PRIORITY */ 175 + nla_total_size(4) /* IFLA_BRPORT_COST */ 176 + nla_total_size(1) /* IFLA_BRPORT_MODE */ 177 + nla_total_size(1) /* IFLA_BRPORT_GUARD */ 178 + nla_total_size(1) /* IFLA_BRPORT_PROTECT */ 179 + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */ 180 + nla_total_size(1) /* IFLA_BRPORT_MCAST_TO_UCAST */ 181 + nla_total_size(1) /* IFLA_BRPORT_LEARNING */ 182 + nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */ 183 + nla_total_size(1) /* IFLA_BRPORT_MCAST_FLOOD */ 184 + nla_total_size(1) /* IFLA_BRPORT_BCAST_FLOOD */ 185 + nla_total_size(1) /* IFLA_BRPORT_PROXYARP */ 186 + nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */ 187 + nla_total_size(1) /* IFLA_BRPORT_VLAN_TUNNEL */ 188 + nla_total_size(1) /* IFLA_BRPORT_NEIGH_SUPPRESS */ 189 + nla_total_size(1) /* IFLA_BRPORT_ISOLATED */ 190 + nla_total_size(1) /* IFLA_BRPORT_LOCKED */ 191 + nla_total_size(1) /* IFLA_BRPORT_MAB */ 192 + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_ROOT_ID */ 193 + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_BRIDGE_ID */ 194 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_PORT */ 195 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_COST */ 196 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_ID */ 197 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_NO */ 198 + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_TOPOLOGY_CHANGE_ACK */ 199 + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_CONFIG_PENDING */ 200 + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_MESSAGE_AGE_TIMER */ 201 + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_FORWARD_DELAY_TIMER */ 202 + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_HOLD_TIMER */ 203 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 204 + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_MULTICAST_ROUTER */ 205 #endif 206 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_GROUP_FWD_MASK */ 207 + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_MRP_RING_OPEN */ 208 + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_MRP_IN_OPEN */ 209 + nla_total_size(sizeof(u32)) /* IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT */ 210 + nla_total_size(sizeof(u32)) /* IFLA_BRPORT_MCAST_EHT_HOSTS_CNT */ 211 + 0; 212 } 213 214 static inline size_t br_nlmsg_size(struct net_device *dev, u32 filter_mask) 215 { 216 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 217 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 218 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ 219 + nla_total_size(4) /* IFLA_MASTER */ 220 + nla_total_size(4) /* IFLA_MTU */ 221 + nla_total_size(4) /* IFLA_LINK */ 222 + nla_total_size(1) /* IFLA_OPERSTATE */ 223 + nla_total_size(br_port_info_size()) /* IFLA_PROTINFO */ 224 + nla_total_size(br_get_link_af_size_filtered(dev, 225 filter_mask)) /* IFLA_AF_SPEC */ 226 + nla_total_size(4); /* IFLA_BRPORT_BACKUP_PORT */ 227 } 228 229 static int br_port_fill_attrs(struct sk_buff *skb, 230 const struct net_bridge_port *p) 231 { 232 u8 mode = !!(p->flags & BR_HAIRPIN_MODE); 233 struct net_bridge_port *backup_p; 234 u64 timerval; 235 236 if (nla_put_u8(skb, IFLA_BRPORT_STATE, p->state) || 237 nla_put_u16(skb, IFLA_BRPORT_PRIORITY, p->priority) || 238 nla_put_u32(skb, IFLA_BRPORT_COST, p->path_cost) || 239 nla_put_u8(skb, IFLA_BRPORT_MODE, mode) || 240 nla_put_u8(skb, IFLA_BRPORT_GUARD, !!(p->flags & BR_BPDU_GUARD)) || 241 nla_put_u8(skb, IFLA_BRPORT_PROTECT, 242 !!(p->flags & BR_ROOT_BLOCK)) || 243 nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, 244 !!(p->flags & BR_MULTICAST_FAST_LEAVE)) || 245 nla_put_u8(skb, IFLA_BRPORT_MCAST_TO_UCAST, 246 !!(p->flags & BR_MULTICAST_TO_UNICAST)) || 247 nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) || 248 nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, 249 !!(p->flags & BR_FLOOD)) || 250 nla_put_u8(skb, IFLA_BRPORT_MCAST_FLOOD, 251 !!(p->flags & BR_MCAST_FLOOD)) || 252 nla_put_u8(skb, IFLA_BRPORT_BCAST_FLOOD, 253 !!(p->flags & BR_BCAST_FLOOD)) || 254 nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP)) || 255 nla_put_u8(skb, IFLA_BRPORT_PROXYARP_WIFI, 256 !!(p->flags & BR_PROXYARP_WIFI)) || 257 nla_put(skb, IFLA_BRPORT_ROOT_ID, sizeof(struct ifla_bridge_id), 258 &p->designated_root) || 259 nla_put(skb, IFLA_BRPORT_BRIDGE_ID, sizeof(struct ifla_bridge_id), 260 &p->designated_bridge) || 261 nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_PORT, p->designated_port) || 262 nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_COST, p->designated_cost) || 263 nla_put_u16(skb, IFLA_BRPORT_ID, p->port_id) || 264 nla_put_u16(skb, IFLA_BRPORT_NO, p->port_no) || 265 nla_put_u8(skb, IFLA_BRPORT_TOPOLOGY_CHANGE_ACK, 266 p->topology_change_ack) || 267 nla_put_u8(skb, IFLA_BRPORT_CONFIG_PENDING, p->config_pending) || 268 nla_put_u8(skb, IFLA_BRPORT_VLAN_TUNNEL, !!(p->flags & 269 BR_VLAN_TUNNEL)) || 270 nla_put_u16(skb, IFLA_BRPORT_GROUP_FWD_MASK, p->group_fwd_mask) || 271 nla_put_u8(skb, IFLA_BRPORT_NEIGH_SUPPRESS, 272 !!(p->flags & BR_NEIGH_SUPPRESS)) || 273 nla_put_u8(skb, IFLA_BRPORT_MRP_RING_OPEN, !!(p->flags & 274 BR_MRP_LOST_CONT)) || 275 nla_put_u8(skb, IFLA_BRPORT_MRP_IN_OPEN, 276 !!(p->flags & BR_MRP_LOST_IN_CONT)) || 277 nla_put_u8(skb, IFLA_BRPORT_ISOLATED, !!(p->flags & BR_ISOLATED)) || 278 nla_put_u8(skb, IFLA_BRPORT_LOCKED, !!(p->flags & BR_PORT_LOCKED)) || 279 nla_put_u8(skb, IFLA_BRPORT_MAB, !!(p->flags & BR_PORT_MAB))) 280 return -EMSGSIZE; 281 282 timerval = br_timer_value(&p->message_age_timer); 283 if (nla_put_u64_64bit(skb, IFLA_BRPORT_MESSAGE_AGE_TIMER, timerval, 284 IFLA_BRPORT_PAD)) 285 return -EMSGSIZE; 286 timerval = br_timer_value(&p->forward_delay_timer); 287 if (nla_put_u64_64bit(skb, IFLA_BRPORT_FORWARD_DELAY_TIMER, timerval, 288 IFLA_BRPORT_PAD)) 289 return -EMSGSIZE; 290 timerval = br_timer_value(&p->hold_timer); 291 if (nla_put_u64_64bit(skb, IFLA_BRPORT_HOLD_TIMER, timerval, 292 IFLA_BRPORT_PAD)) 293 return -EMSGSIZE; 294 295 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 296 if (nla_put_u8(skb, IFLA_BRPORT_MULTICAST_ROUTER, 297 p->multicast_ctx.multicast_router) || 298 nla_put_u32(skb, IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT, 299 p->multicast_eht_hosts_limit) || 300 nla_put_u32(skb, IFLA_BRPORT_MCAST_EHT_HOSTS_CNT, 301 p->multicast_eht_hosts_cnt)) 302 return -EMSGSIZE; 303 #endif 304 305 /* we might be called only with br->lock */ 306 rcu_read_lock(); 307 backup_p = rcu_dereference(p->backup_port); 308 if (backup_p) 309 nla_put_u32(skb, IFLA_BRPORT_BACKUP_PORT, 310 backup_p->dev->ifindex); 311 rcu_read_unlock(); 312 313 return 0; 314 } 315 316 static int br_fill_ifvlaninfo_range(struct sk_buff *skb, u16 vid_start, 317 u16 vid_end, u16 flags) 318 { 319 struct bridge_vlan_info vinfo; 320 321 if ((vid_end - vid_start) > 0) { 322 /* add range to skb */ 323 vinfo.vid = vid_start; 324 vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_BEGIN; 325 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO, 326 sizeof(vinfo), &vinfo)) 327 goto nla_put_failure; 328 329 vinfo.vid = vid_end; 330 vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_END; 331 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO, 332 sizeof(vinfo), &vinfo)) 333 goto nla_put_failure; 334 } else { 335 vinfo.vid = vid_start; 336 vinfo.flags = flags; 337 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO, 338 sizeof(vinfo), &vinfo)) 339 goto nla_put_failure; 340 } 341 342 return 0; 343 344 nla_put_failure: 345 return -EMSGSIZE; 346 } 347 348 static int br_fill_ifvlaninfo_compressed(struct sk_buff *skb, 349 struct net_bridge_vlan_group *vg) 350 { 351 struct net_bridge_vlan *v; 352 u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0; 353 u16 flags, pvid; 354 int err = 0; 355 356 /* Pack IFLA_BRIDGE_VLAN_INFO's for every vlan 357 * and mark vlan info with begin and end flags 358 * if vlaninfo represents a range 359 */ 360 pvid = br_get_pvid(vg); 361 list_for_each_entry_rcu(v, &vg->vlan_list, vlist) { 362 flags = 0; 363 if (!br_vlan_should_use(v)) 364 continue; 365 if (v->vid == pvid) 366 flags |= BRIDGE_VLAN_INFO_PVID; 367 368 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED) 369 flags |= BRIDGE_VLAN_INFO_UNTAGGED; 370 371 if (vid_range_start == 0) { 372 goto initvars; 373 } else if ((v->vid - vid_range_end) == 1 && 374 flags == vid_range_flags) { 375 vid_range_end = v->vid; 376 continue; 377 } else { 378 err = br_fill_ifvlaninfo_range(skb, vid_range_start, 379 vid_range_end, 380 vid_range_flags); 381 if (err) 382 return err; 383 } 384 385 initvars: 386 vid_range_start = v->vid; 387 vid_range_end = v->vid; 388 vid_range_flags = flags; 389 } 390 391 if (vid_range_start != 0) { 392 /* Call it once more to send any left over vlans */ 393 err = br_fill_ifvlaninfo_range(skb, vid_range_start, 394 vid_range_end, 395 vid_range_flags); 396 if (err) 397 return err; 398 } 399 400 return 0; 401 } 402 403 static int br_fill_ifvlaninfo(struct sk_buff *skb, 404 struct net_bridge_vlan_group *vg) 405 { 406 struct bridge_vlan_info vinfo; 407 struct net_bridge_vlan *v; 408 u16 pvid; 409 410 pvid = br_get_pvid(vg); 411 list_for_each_entry_rcu(v, &vg->vlan_list, vlist) { 412 if (!br_vlan_should_use(v)) 413 continue; 414 415 vinfo.vid = v->vid; 416 vinfo.flags = 0; 417 if (v->vid == pvid) 418 vinfo.flags |= BRIDGE_VLAN_INFO_PVID; 419 420 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED) 421 vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED; 422 423 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO, 424 sizeof(vinfo), &vinfo)) 425 goto nla_put_failure; 426 } 427 428 return 0; 429 430 nla_put_failure: 431 return -EMSGSIZE; 432 } 433 434 /* 435 * Create one netlink message for one interface 436 * Contains port and master info as well as carrier and bridge state. 437 */ 438 static int br_fill_ifinfo(struct sk_buff *skb, 439 const struct net_bridge_port *port, 440 u32 pid, u32 seq, int event, unsigned int flags, 441 u32 filter_mask, const struct net_device *dev, 442 bool getlink) 443 { 444 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; 445 struct nlattr *af = NULL; 446 struct net_bridge *br; 447 struct ifinfomsg *hdr; 448 struct nlmsghdr *nlh; 449 450 if (port) 451 br = port->br; 452 else 453 br = netdev_priv(dev); 454 455 br_debug(br, "br_fill_info event %d port %s master %s\n", 456 event, dev->name, br->dev->name); 457 458 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags); 459 if (nlh == NULL) 460 return -EMSGSIZE; 461 462 hdr = nlmsg_data(nlh); 463 hdr->ifi_family = AF_BRIDGE; 464 hdr->__ifi_pad = 0; 465 hdr->ifi_type = dev->type; 466 hdr->ifi_index = dev->ifindex; 467 hdr->ifi_flags = dev_get_flags(dev); 468 hdr->ifi_change = 0; 469 470 if (nla_put_string(skb, IFLA_IFNAME, dev->name) || 471 nla_put_u32(skb, IFLA_MASTER, br->dev->ifindex) || 472 nla_put_u32(skb, IFLA_MTU, dev->mtu) || 473 nla_put_u8(skb, IFLA_OPERSTATE, operstate) || 474 (dev->addr_len && 475 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || 476 (dev->ifindex != dev_get_iflink(dev) && 477 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev)))) 478 goto nla_put_failure; 479 480 if (event == RTM_NEWLINK && port) { 481 struct nlattr *nest; 482 483 nest = nla_nest_start(skb, IFLA_PROTINFO); 484 if (nest == NULL || br_port_fill_attrs(skb, port) < 0) 485 goto nla_put_failure; 486 nla_nest_end(skb, nest); 487 } 488 489 if (filter_mask & (RTEXT_FILTER_BRVLAN | 490 RTEXT_FILTER_BRVLAN_COMPRESSED | 491 RTEXT_FILTER_MRP | 492 RTEXT_FILTER_CFM_CONFIG | 493 RTEXT_FILTER_CFM_STATUS | 494 RTEXT_FILTER_MST)) { 495 af = nla_nest_start_noflag(skb, IFLA_AF_SPEC); 496 if (!af) 497 goto nla_put_failure; 498 } 499 500 /* Check if the VID information is requested */ 501 if ((filter_mask & RTEXT_FILTER_BRVLAN) || 502 (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) { 503 struct net_bridge_vlan_group *vg; 504 int err; 505 506 /* RCU needed because of the VLAN locking rules (rcu || rtnl) */ 507 rcu_read_lock(); 508 if (port) 509 vg = nbp_vlan_group_rcu(port); 510 else 511 vg = br_vlan_group_rcu(br); 512 513 if (!vg || !vg->num_vlans) { 514 rcu_read_unlock(); 515 goto done; 516 } 517 if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) 518 err = br_fill_ifvlaninfo_compressed(skb, vg); 519 else 520 err = br_fill_ifvlaninfo(skb, vg); 521 522 if (port && (port->flags & BR_VLAN_TUNNEL)) 523 err = br_fill_vlan_tunnel_info(skb, vg); 524 rcu_read_unlock(); 525 if (err) 526 goto nla_put_failure; 527 } 528 529 if (filter_mask & RTEXT_FILTER_MRP) { 530 int err; 531 532 if (!br_mrp_enabled(br) || port) 533 goto done; 534 535 rcu_read_lock(); 536 err = br_mrp_fill_info(skb, br); 537 rcu_read_unlock(); 538 539 if (err) 540 goto nla_put_failure; 541 } 542 543 if (filter_mask & (RTEXT_FILTER_CFM_CONFIG | RTEXT_FILTER_CFM_STATUS)) { 544 struct nlattr *cfm_nest = NULL; 545 int err; 546 547 if (!br_cfm_created(br) || port) 548 goto done; 549 550 cfm_nest = nla_nest_start(skb, IFLA_BRIDGE_CFM); 551 if (!cfm_nest) 552 goto nla_put_failure; 553 554 if (filter_mask & RTEXT_FILTER_CFM_CONFIG) { 555 rcu_read_lock(); 556 err = br_cfm_config_fill_info(skb, br); 557 rcu_read_unlock(); 558 if (err) 559 goto nla_put_failure; 560 } 561 562 if (filter_mask & RTEXT_FILTER_CFM_STATUS) { 563 rcu_read_lock(); 564 err = br_cfm_status_fill_info(skb, br, getlink); 565 rcu_read_unlock(); 566 if (err) 567 goto nla_put_failure; 568 } 569 570 nla_nest_end(skb, cfm_nest); 571 } 572 573 if ((filter_mask & RTEXT_FILTER_MST) && 574 br_opt_get(br, BROPT_MST_ENABLED) && port) { 575 const struct net_bridge_vlan_group *vg = nbp_vlan_group(port); 576 struct nlattr *mst_nest; 577 int err; 578 579 if (!vg || !vg->num_vlans) 580 goto done; 581 582 mst_nest = nla_nest_start(skb, IFLA_BRIDGE_MST); 583 if (!mst_nest) 584 goto nla_put_failure; 585 586 err = br_mst_fill_info(skb, vg); 587 if (err) 588 goto nla_put_failure; 589 590 nla_nest_end(skb, mst_nest); 591 } 592 593 done: 594 if (af) { 595 if (nlmsg_get_pos(skb) - (void *)af > nla_attr_size(0)) 596 nla_nest_end(skb, af); 597 else 598 nla_nest_cancel(skb, af); 599 } 600 601 nlmsg_end(skb, nlh); 602 return 0; 603 604 nla_put_failure: 605 nlmsg_cancel(skb, nlh); 606 return -EMSGSIZE; 607 } 608 609 void br_info_notify(int event, const struct net_bridge *br, 610 const struct net_bridge_port *port, u32 filter) 611 { 612 struct net_device *dev; 613 struct sk_buff *skb; 614 int err = -ENOBUFS; 615 struct net *net; 616 u16 port_no = 0; 617 618 if (WARN_ON(!port && !br)) 619 return; 620 621 if (port) { 622 dev = port->dev; 623 br = port->br; 624 port_no = port->port_no; 625 } else { 626 dev = br->dev; 627 } 628 629 net = dev_net(dev); 630 br_debug(br, "port %u(%s) event %d\n", port_no, dev->name, event); 631 632 skb = nlmsg_new(br_nlmsg_size(dev, filter), GFP_ATOMIC); 633 if (skb == NULL) 634 goto errout; 635 636 err = br_fill_ifinfo(skb, port, 0, 0, event, 0, filter, dev, false); 637 if (err < 0) { 638 /* -EMSGSIZE implies BUG in br_nlmsg_size() */ 639 WARN_ON(err == -EMSGSIZE); 640 kfree_skb(skb); 641 goto errout; 642 } 643 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); 644 return; 645 errout: 646 rtnl_set_sk_err(net, RTNLGRP_LINK, err); 647 } 648 649 /* Notify listeners of a change in bridge or port information */ 650 void br_ifinfo_notify(int event, const struct net_bridge *br, 651 const struct net_bridge_port *port) 652 { 653 u32 filter = RTEXT_FILTER_BRVLAN_COMPRESSED; 654 655 return br_info_notify(event, br, port, filter); 656 } 657 658 /* 659 * Dump information about all ports, in response to GETLINK 660 */ 661 int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, 662 struct net_device *dev, u32 filter_mask, int nlflags) 663 { 664 struct net_bridge_port *port = br_port_get_rtnl(dev); 665 666 if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN) && 667 !(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) && 668 !(filter_mask & RTEXT_FILTER_MRP) && 669 !(filter_mask & RTEXT_FILTER_CFM_CONFIG) && 670 !(filter_mask & RTEXT_FILTER_CFM_STATUS)) 671 return 0; 672 673 return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, nlflags, 674 filter_mask, dev, true); 675 } 676 677 static int br_vlan_info(struct net_bridge *br, struct net_bridge_port *p, 678 int cmd, struct bridge_vlan_info *vinfo, bool *changed, 679 struct netlink_ext_ack *extack) 680 { 681 bool curr_change; 682 int err = 0; 683 684 switch (cmd) { 685 case RTM_SETLINK: 686 if (p) { 687 /* if the MASTER flag is set this will act on the global 688 * per-VLAN entry as well 689 */ 690 err = nbp_vlan_add(p, vinfo->vid, vinfo->flags, 691 &curr_change, extack); 692 } else { 693 vinfo->flags |= BRIDGE_VLAN_INFO_BRENTRY; 694 err = br_vlan_add(br, vinfo->vid, vinfo->flags, 695 &curr_change, extack); 696 } 697 if (curr_change) 698 *changed = true; 699 break; 700 701 case RTM_DELLINK: 702 if (p) { 703 if (!nbp_vlan_delete(p, vinfo->vid)) 704 *changed = true; 705 706 if ((vinfo->flags & BRIDGE_VLAN_INFO_MASTER) && 707 !br_vlan_delete(p->br, vinfo->vid)) 708 *changed = true; 709 } else if (!br_vlan_delete(br, vinfo->vid)) { 710 *changed = true; 711 } 712 break; 713 } 714 715 return err; 716 } 717 718 int br_process_vlan_info(struct net_bridge *br, 719 struct net_bridge_port *p, int cmd, 720 struct bridge_vlan_info *vinfo_curr, 721 struct bridge_vlan_info **vinfo_last, 722 bool *changed, 723 struct netlink_ext_ack *extack) 724 { 725 int err, rtm_cmd; 726 727 if (!br_vlan_valid_id(vinfo_curr->vid, extack)) 728 return -EINVAL; 729 730 /* needed for vlan-only NEWVLAN/DELVLAN notifications */ 731 rtm_cmd = br_afspec_cmd_to_rtm(cmd); 732 733 if (vinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) { 734 if (!br_vlan_valid_range(vinfo_curr, *vinfo_last, extack)) 735 return -EINVAL; 736 *vinfo_last = vinfo_curr; 737 return 0; 738 } 739 740 if (*vinfo_last) { 741 struct bridge_vlan_info tmp_vinfo; 742 int v, v_change_start = 0; 743 744 if (!br_vlan_valid_range(vinfo_curr, *vinfo_last, extack)) 745 return -EINVAL; 746 747 memcpy(&tmp_vinfo, *vinfo_last, 748 sizeof(struct bridge_vlan_info)); 749 for (v = (*vinfo_last)->vid; v <= vinfo_curr->vid; v++) { 750 bool curr_change = false; 751 752 tmp_vinfo.vid = v; 753 err = br_vlan_info(br, p, cmd, &tmp_vinfo, &curr_change, 754 extack); 755 if (err) 756 break; 757 if (curr_change) { 758 *changed = curr_change; 759 if (!v_change_start) 760 v_change_start = v; 761 } else { 762 /* nothing to notify yet */ 763 if (!v_change_start) 764 continue; 765 br_vlan_notify(br, p, v_change_start, 766 v - 1, rtm_cmd); 767 v_change_start = 0; 768 } 769 cond_resched(); 770 } 771 /* v_change_start is set only if the last/whole range changed */ 772 if (v_change_start) 773 br_vlan_notify(br, p, v_change_start, 774 v - 1, rtm_cmd); 775 776 *vinfo_last = NULL; 777 778 return err; 779 } 780 781 err = br_vlan_info(br, p, cmd, vinfo_curr, changed, extack); 782 if (*changed) 783 br_vlan_notify(br, p, vinfo_curr->vid, 0, rtm_cmd); 784 785 return err; 786 } 787 788 static int br_afspec(struct net_bridge *br, 789 struct net_bridge_port *p, 790 struct nlattr *af_spec, 791 int cmd, bool *changed, 792 struct netlink_ext_ack *extack) 793 { 794 struct bridge_vlan_info *vinfo_curr = NULL; 795 struct bridge_vlan_info *vinfo_last = NULL; 796 struct nlattr *attr; 797 struct vtunnel_info tinfo_last = {}; 798 struct vtunnel_info tinfo_curr = {}; 799 int err = 0, rem; 800 801 nla_for_each_nested(attr, af_spec, rem) { 802 err = 0; 803 switch (nla_type(attr)) { 804 case IFLA_BRIDGE_VLAN_TUNNEL_INFO: 805 if (!p || !(p->flags & BR_VLAN_TUNNEL)) 806 return -EINVAL; 807 err = br_parse_vlan_tunnel_info(attr, &tinfo_curr); 808 if (err) 809 return err; 810 err = br_process_vlan_tunnel_info(br, p, cmd, 811 &tinfo_curr, 812 &tinfo_last, 813 changed); 814 if (err) 815 return err; 816 break; 817 case IFLA_BRIDGE_VLAN_INFO: 818 if (nla_len(attr) != sizeof(struct bridge_vlan_info)) 819 return -EINVAL; 820 vinfo_curr = nla_data(attr); 821 err = br_process_vlan_info(br, p, cmd, vinfo_curr, 822 &vinfo_last, changed, 823 extack); 824 if (err) 825 return err; 826 break; 827 case IFLA_BRIDGE_MRP: 828 err = br_mrp_parse(br, p, attr, cmd, extack); 829 if (err) 830 return err; 831 break; 832 case IFLA_BRIDGE_CFM: 833 err = br_cfm_parse(br, p, attr, cmd, extack); 834 if (err) 835 return err; 836 break; 837 case IFLA_BRIDGE_MST: 838 if (!p) { 839 NL_SET_ERR_MSG(extack, 840 "MST states can only be set on bridge ports"); 841 return -EINVAL; 842 } 843 844 if (cmd != RTM_SETLINK) { 845 NL_SET_ERR_MSG(extack, 846 "MST states can only be set through RTM_SETLINK"); 847 return -EINVAL; 848 } 849 850 err = br_mst_process(p, attr, extack); 851 if (err) 852 return err; 853 break; 854 } 855 } 856 857 return err; 858 } 859 860 static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = { 861 [IFLA_BRPORT_STATE] = { .type = NLA_U8 }, 862 [IFLA_BRPORT_COST] = { .type = NLA_U32 }, 863 [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 }, 864 [IFLA_BRPORT_MODE] = { .type = NLA_U8 }, 865 [IFLA_BRPORT_GUARD] = { .type = NLA_U8 }, 866 [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 }, 867 [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 }, 868 [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 }, 869 [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 }, 870 [IFLA_BRPORT_PROXYARP] = { .type = NLA_U8 }, 871 [IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 }, 872 [IFLA_BRPORT_MULTICAST_ROUTER] = { .type = NLA_U8 }, 873 [IFLA_BRPORT_MCAST_TO_UCAST] = { .type = NLA_U8 }, 874 [IFLA_BRPORT_MCAST_FLOOD] = { .type = NLA_U8 }, 875 [IFLA_BRPORT_BCAST_FLOOD] = { .type = NLA_U8 }, 876 [IFLA_BRPORT_VLAN_TUNNEL] = { .type = NLA_U8 }, 877 [IFLA_BRPORT_GROUP_FWD_MASK] = { .type = NLA_U16 }, 878 [IFLA_BRPORT_NEIGH_SUPPRESS] = { .type = NLA_U8 }, 879 [IFLA_BRPORT_ISOLATED] = { .type = NLA_U8 }, 880 [IFLA_BRPORT_LOCKED] = { .type = NLA_U8 }, 881 [IFLA_BRPORT_MAB] = { .type = NLA_U8 }, 882 [IFLA_BRPORT_BACKUP_PORT] = { .type = NLA_U32 }, 883 [IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT] = { .type = NLA_U32 }, 884 }; 885 886 /* Change the state of the port and notify spanning tree */ 887 static int br_set_port_state(struct net_bridge_port *p, u8 state) 888 { 889 if (state > BR_STATE_BLOCKING) 890 return -EINVAL; 891 892 /* if kernel STP is running, don't allow changes */ 893 if (p->br->stp_enabled == BR_KERNEL_STP) 894 return -EBUSY; 895 896 /* if device is not up, change is not allowed 897 * if link is not present, only allowable state is disabled 898 */ 899 if (!netif_running(p->dev) || 900 (!netif_oper_up(p->dev) && state != BR_STATE_DISABLED)) 901 return -ENETDOWN; 902 903 br_set_state(p, state); 904 br_port_state_selection(p->br); 905 return 0; 906 } 907 908 /* Set/clear or port flags based on attribute */ 909 static void br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[], 910 int attrtype, unsigned long mask) 911 { 912 if (!tb[attrtype]) 913 return; 914 915 if (nla_get_u8(tb[attrtype])) 916 p->flags |= mask; 917 else 918 p->flags &= ~mask; 919 } 920 921 /* Process bridge protocol info on port */ 922 static int br_setport(struct net_bridge_port *p, struct nlattr *tb[], 923 struct netlink_ext_ack *extack) 924 { 925 unsigned long old_flags, changed_mask; 926 bool br_vlan_tunnel_old; 927 int err; 928 929 old_flags = p->flags; 930 br_vlan_tunnel_old = (old_flags & BR_VLAN_TUNNEL) ? true : false; 931 932 br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE); 933 br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD); 934 br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, 935 BR_MULTICAST_FAST_LEAVE); 936 br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK); 937 br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING); 938 br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD); 939 br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD); 940 br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_TO_UCAST, 941 BR_MULTICAST_TO_UNICAST); 942 br_set_port_flag(p, tb, IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD); 943 br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP); 944 br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI); 945 br_set_port_flag(p, tb, IFLA_BRPORT_VLAN_TUNNEL, BR_VLAN_TUNNEL); 946 br_set_port_flag(p, tb, IFLA_BRPORT_NEIGH_SUPPRESS, BR_NEIGH_SUPPRESS); 947 br_set_port_flag(p, tb, IFLA_BRPORT_ISOLATED, BR_ISOLATED); 948 br_set_port_flag(p, tb, IFLA_BRPORT_LOCKED, BR_PORT_LOCKED); 949 br_set_port_flag(p, tb, IFLA_BRPORT_MAB, BR_PORT_MAB); 950 951 if ((p->flags & BR_PORT_MAB) && 952 (!(p->flags & BR_PORT_LOCKED) || !(p->flags & BR_LEARNING))) { 953 NL_SET_ERR_MSG(extack, "Bridge port must be locked and have learning enabled when MAB is enabled"); 954 p->flags = old_flags; 955 return -EINVAL; 956 } else if (!(p->flags & BR_PORT_MAB) && (old_flags & BR_PORT_MAB)) { 957 struct net_bridge_fdb_flush_desc desc = { 958 .flags = BIT(BR_FDB_LOCKED), 959 .flags_mask = BIT(BR_FDB_LOCKED), 960 .port_ifindex = p->dev->ifindex, 961 }; 962 963 br_fdb_flush(p->br, &desc); 964 } 965 966 changed_mask = old_flags ^ p->flags; 967 968 err = br_switchdev_set_port_flag(p, p->flags, changed_mask, extack); 969 if (err) { 970 p->flags = old_flags; 971 return err; 972 } 973 974 if (br_vlan_tunnel_old && !(p->flags & BR_VLAN_TUNNEL)) 975 nbp_vlan_tunnel_info_flush(p); 976 977 br_port_flags_change(p, changed_mask); 978 979 if (tb[IFLA_BRPORT_COST]) { 980 err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST])); 981 if (err) 982 return err; 983 } 984 985 if (tb[IFLA_BRPORT_PRIORITY]) { 986 err = br_stp_set_port_priority(p, nla_get_u16(tb[IFLA_BRPORT_PRIORITY])); 987 if (err) 988 return err; 989 } 990 991 if (tb[IFLA_BRPORT_STATE]) { 992 err = br_set_port_state(p, nla_get_u8(tb[IFLA_BRPORT_STATE])); 993 if (err) 994 return err; 995 } 996 997 if (tb[IFLA_BRPORT_FLUSH]) 998 br_fdb_delete_by_port(p->br, p, 0, 0); 999 1000 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 1001 if (tb[IFLA_BRPORT_MULTICAST_ROUTER]) { 1002 u8 mcast_router = nla_get_u8(tb[IFLA_BRPORT_MULTICAST_ROUTER]); 1003 1004 err = br_multicast_set_port_router(&p->multicast_ctx, 1005 mcast_router); 1006 if (err) 1007 return err; 1008 } 1009 1010 if (tb[IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT]) { 1011 u32 hlimit; 1012 1013 hlimit = nla_get_u32(tb[IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT]); 1014 err = br_multicast_eht_set_hosts_limit(p, hlimit); 1015 if (err) 1016 return err; 1017 } 1018 #endif 1019 1020 if (tb[IFLA_BRPORT_GROUP_FWD_MASK]) { 1021 u16 fwd_mask = nla_get_u16(tb[IFLA_BRPORT_GROUP_FWD_MASK]); 1022 1023 if (fwd_mask & BR_GROUPFWD_MACPAUSE) 1024 return -EINVAL; 1025 p->group_fwd_mask = fwd_mask; 1026 } 1027 1028 if (tb[IFLA_BRPORT_BACKUP_PORT]) { 1029 struct net_device *backup_dev = NULL; 1030 u32 backup_ifindex; 1031 1032 backup_ifindex = nla_get_u32(tb[IFLA_BRPORT_BACKUP_PORT]); 1033 if (backup_ifindex) { 1034 backup_dev = __dev_get_by_index(dev_net(p->dev), 1035 backup_ifindex); 1036 if (!backup_dev) 1037 return -ENOENT; 1038 } 1039 1040 err = nbp_backup_change(p, backup_dev); 1041 if (err) 1042 return err; 1043 } 1044 1045 return 0; 1046 } 1047 1048 /* Change state and parameters on port. */ 1049 int br_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags, 1050 struct netlink_ext_ack *extack) 1051 { 1052 struct net_bridge *br = (struct net_bridge *)netdev_priv(dev); 1053 struct nlattr *tb[IFLA_BRPORT_MAX + 1]; 1054 struct net_bridge_port *p; 1055 struct nlattr *protinfo; 1056 struct nlattr *afspec; 1057 bool changed = false; 1058 int err = 0; 1059 1060 protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO); 1061 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 1062 if (!protinfo && !afspec) 1063 return 0; 1064 1065 p = br_port_get_rtnl(dev); 1066 /* We want to accept dev as bridge itself if the AF_SPEC 1067 * is set to see if someone is setting vlan info on the bridge 1068 */ 1069 if (!p && !afspec) 1070 return -EINVAL; 1071 1072 if (p && protinfo) { 1073 if (protinfo->nla_type & NLA_F_NESTED) { 1074 err = nla_parse_nested_deprecated(tb, IFLA_BRPORT_MAX, 1075 protinfo, 1076 br_port_policy, 1077 NULL); 1078 if (err) 1079 return err; 1080 1081 spin_lock_bh(&p->br->lock); 1082 err = br_setport(p, tb, extack); 1083 spin_unlock_bh(&p->br->lock); 1084 } else { 1085 /* Binary compatibility with old RSTP */ 1086 if (nla_len(protinfo) < sizeof(u8)) 1087 return -EINVAL; 1088 1089 spin_lock_bh(&p->br->lock); 1090 err = br_set_port_state(p, nla_get_u8(protinfo)); 1091 spin_unlock_bh(&p->br->lock); 1092 } 1093 if (err) 1094 goto out; 1095 changed = true; 1096 } 1097 1098 if (afspec) 1099 err = br_afspec(br, p, afspec, RTM_SETLINK, &changed, extack); 1100 1101 if (changed) 1102 br_ifinfo_notify(RTM_NEWLINK, br, p); 1103 out: 1104 return err; 1105 } 1106 1107 /* Delete port information */ 1108 int br_dellink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags) 1109 { 1110 struct net_bridge *br = (struct net_bridge *)netdev_priv(dev); 1111 struct net_bridge_port *p; 1112 struct nlattr *afspec; 1113 bool changed = false; 1114 int err = 0; 1115 1116 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 1117 if (!afspec) 1118 return 0; 1119 1120 p = br_port_get_rtnl(dev); 1121 /* We want to accept dev as bridge itself as well */ 1122 if (!p && !netif_is_bridge_master(dev)) 1123 return -EINVAL; 1124 1125 err = br_afspec(br, p, afspec, RTM_DELLINK, &changed, NULL); 1126 if (changed) 1127 /* Send RTM_NEWLINK because userspace 1128 * expects RTM_NEWLINK for vlan dels 1129 */ 1130 br_ifinfo_notify(RTM_NEWLINK, br, p); 1131 1132 return err; 1133 } 1134 1135 static int br_validate(struct nlattr *tb[], struct nlattr *data[], 1136 struct netlink_ext_ack *extack) 1137 { 1138 if (tb[IFLA_ADDRESS]) { 1139 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) 1140 return -EINVAL; 1141 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) 1142 return -EADDRNOTAVAIL; 1143 } 1144 1145 if (!data) 1146 return 0; 1147 1148 #ifdef CONFIG_BRIDGE_VLAN_FILTERING 1149 if (data[IFLA_BR_VLAN_PROTOCOL] && 1150 !eth_type_vlan(nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL]))) 1151 return -EPROTONOSUPPORT; 1152 1153 if (data[IFLA_BR_VLAN_DEFAULT_PVID]) { 1154 __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]); 1155 1156 if (defpvid >= VLAN_VID_MASK) 1157 return -EINVAL; 1158 } 1159 #endif 1160 1161 return 0; 1162 } 1163 1164 static int br_port_slave_changelink(struct net_device *brdev, 1165 struct net_device *dev, 1166 struct nlattr *tb[], 1167 struct nlattr *data[], 1168 struct netlink_ext_ack *extack) 1169 { 1170 struct net_bridge *br = netdev_priv(brdev); 1171 int ret; 1172 1173 if (!data) 1174 return 0; 1175 1176 spin_lock_bh(&br->lock); 1177 ret = br_setport(br_port_get_rtnl(dev), data, extack); 1178 spin_unlock_bh(&br->lock); 1179 1180 return ret; 1181 } 1182 1183 static int br_port_fill_slave_info(struct sk_buff *skb, 1184 const struct net_device *brdev, 1185 const struct net_device *dev) 1186 { 1187 return br_port_fill_attrs(skb, br_port_get_rtnl(dev)); 1188 } 1189 1190 static size_t br_port_get_slave_size(const struct net_device *brdev, 1191 const struct net_device *dev) 1192 { 1193 return br_port_info_size(); 1194 } 1195 1196 static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = { 1197 [IFLA_BR_FORWARD_DELAY] = { .type = NLA_U32 }, 1198 [IFLA_BR_HELLO_TIME] = { .type = NLA_U32 }, 1199 [IFLA_BR_MAX_AGE] = { .type = NLA_U32 }, 1200 [IFLA_BR_AGEING_TIME] = { .type = NLA_U32 }, 1201 [IFLA_BR_STP_STATE] = { .type = NLA_U32 }, 1202 [IFLA_BR_PRIORITY] = { .type = NLA_U16 }, 1203 [IFLA_BR_VLAN_FILTERING] = { .type = NLA_U8 }, 1204 [IFLA_BR_VLAN_PROTOCOL] = { .type = NLA_U16 }, 1205 [IFLA_BR_GROUP_FWD_MASK] = { .type = NLA_U16 }, 1206 [IFLA_BR_GROUP_ADDR] = { .type = NLA_BINARY, 1207 .len = ETH_ALEN }, 1208 [IFLA_BR_MCAST_ROUTER] = { .type = NLA_U8 }, 1209 [IFLA_BR_MCAST_SNOOPING] = { .type = NLA_U8 }, 1210 [IFLA_BR_MCAST_QUERY_USE_IFADDR] = { .type = NLA_U8 }, 1211 [IFLA_BR_MCAST_QUERIER] = { .type = NLA_U8 }, 1212 [IFLA_BR_MCAST_HASH_ELASTICITY] = { .type = NLA_U32 }, 1213 [IFLA_BR_MCAST_HASH_MAX] = { .type = NLA_U32 }, 1214 [IFLA_BR_MCAST_LAST_MEMBER_CNT] = { .type = NLA_U32 }, 1215 [IFLA_BR_MCAST_STARTUP_QUERY_CNT] = { .type = NLA_U32 }, 1216 [IFLA_BR_MCAST_LAST_MEMBER_INTVL] = { .type = NLA_U64 }, 1217 [IFLA_BR_MCAST_MEMBERSHIP_INTVL] = { .type = NLA_U64 }, 1218 [IFLA_BR_MCAST_QUERIER_INTVL] = { .type = NLA_U64 }, 1219 [IFLA_BR_MCAST_QUERY_INTVL] = { .type = NLA_U64 }, 1220 [IFLA_BR_MCAST_QUERY_RESPONSE_INTVL] = { .type = NLA_U64 }, 1221 [IFLA_BR_MCAST_STARTUP_QUERY_INTVL] = { .type = NLA_U64 }, 1222 [IFLA_BR_NF_CALL_IPTABLES] = { .type = NLA_U8 }, 1223 [IFLA_BR_NF_CALL_IP6TABLES] = { .type = NLA_U8 }, 1224 [IFLA_BR_NF_CALL_ARPTABLES] = { .type = NLA_U8 }, 1225 [IFLA_BR_VLAN_DEFAULT_PVID] = { .type = NLA_U16 }, 1226 [IFLA_BR_VLAN_STATS_ENABLED] = { .type = NLA_U8 }, 1227 [IFLA_BR_MCAST_STATS_ENABLED] = { .type = NLA_U8 }, 1228 [IFLA_BR_MCAST_IGMP_VERSION] = { .type = NLA_U8 }, 1229 [IFLA_BR_MCAST_MLD_VERSION] = { .type = NLA_U8 }, 1230 [IFLA_BR_VLAN_STATS_PER_PORT] = { .type = NLA_U8 }, 1231 [IFLA_BR_MULTI_BOOLOPT] = 1232 NLA_POLICY_EXACT_LEN(sizeof(struct br_boolopt_multi)), 1233 }; 1234 1235 static int br_changelink(struct net_device *brdev, struct nlattr *tb[], 1236 struct nlattr *data[], 1237 struct netlink_ext_ack *extack) 1238 { 1239 struct net_bridge *br = netdev_priv(brdev); 1240 int err; 1241 1242 if (!data) 1243 return 0; 1244 1245 if (data[IFLA_BR_FORWARD_DELAY]) { 1246 err = br_set_forward_delay(br, nla_get_u32(data[IFLA_BR_FORWARD_DELAY])); 1247 if (err) 1248 return err; 1249 } 1250 1251 if (data[IFLA_BR_HELLO_TIME]) { 1252 err = br_set_hello_time(br, nla_get_u32(data[IFLA_BR_HELLO_TIME])); 1253 if (err) 1254 return err; 1255 } 1256 1257 if (data[IFLA_BR_MAX_AGE]) { 1258 err = br_set_max_age(br, nla_get_u32(data[IFLA_BR_MAX_AGE])); 1259 if (err) 1260 return err; 1261 } 1262 1263 if (data[IFLA_BR_AGEING_TIME]) { 1264 err = br_set_ageing_time(br, nla_get_u32(data[IFLA_BR_AGEING_TIME])); 1265 if (err) 1266 return err; 1267 } 1268 1269 if (data[IFLA_BR_STP_STATE]) { 1270 u32 stp_enabled = nla_get_u32(data[IFLA_BR_STP_STATE]); 1271 1272 err = br_stp_set_enabled(br, stp_enabled, extack); 1273 if (err) 1274 return err; 1275 } 1276 1277 if (data[IFLA_BR_PRIORITY]) { 1278 u32 priority = nla_get_u16(data[IFLA_BR_PRIORITY]); 1279 1280 br_stp_set_bridge_priority(br, priority); 1281 } 1282 1283 if (data[IFLA_BR_VLAN_FILTERING]) { 1284 u8 vlan_filter = nla_get_u8(data[IFLA_BR_VLAN_FILTERING]); 1285 1286 err = br_vlan_filter_toggle(br, vlan_filter, extack); 1287 if (err) 1288 return err; 1289 } 1290 1291 #ifdef CONFIG_BRIDGE_VLAN_FILTERING 1292 if (data[IFLA_BR_VLAN_PROTOCOL]) { 1293 __be16 vlan_proto = nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL]); 1294 1295 err = __br_vlan_set_proto(br, vlan_proto, extack); 1296 if (err) 1297 return err; 1298 } 1299 1300 if (data[IFLA_BR_VLAN_DEFAULT_PVID]) { 1301 __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]); 1302 1303 err = __br_vlan_set_default_pvid(br, defpvid, extack); 1304 if (err) 1305 return err; 1306 } 1307 1308 if (data[IFLA_BR_VLAN_STATS_ENABLED]) { 1309 __u8 vlan_stats = nla_get_u8(data[IFLA_BR_VLAN_STATS_ENABLED]); 1310 1311 err = br_vlan_set_stats(br, vlan_stats); 1312 if (err) 1313 return err; 1314 } 1315 1316 if (data[IFLA_BR_VLAN_STATS_PER_PORT]) { 1317 __u8 per_port = nla_get_u8(data[IFLA_BR_VLAN_STATS_PER_PORT]); 1318 1319 err = br_vlan_set_stats_per_port(br, per_port); 1320 if (err) 1321 return err; 1322 } 1323 #endif 1324 1325 if (data[IFLA_BR_GROUP_FWD_MASK]) { 1326 u16 fwd_mask = nla_get_u16(data[IFLA_BR_GROUP_FWD_MASK]); 1327 1328 if (fwd_mask & BR_GROUPFWD_RESTRICTED) 1329 return -EINVAL; 1330 br->group_fwd_mask = fwd_mask; 1331 } 1332 1333 if (data[IFLA_BR_GROUP_ADDR]) { 1334 u8 new_addr[ETH_ALEN]; 1335 1336 if (nla_len(data[IFLA_BR_GROUP_ADDR]) != ETH_ALEN) 1337 return -EINVAL; 1338 memcpy(new_addr, nla_data(data[IFLA_BR_GROUP_ADDR]), ETH_ALEN); 1339 if (!is_link_local_ether_addr(new_addr)) 1340 return -EINVAL; 1341 if (new_addr[5] == 1 || /* 802.3x Pause address */ 1342 new_addr[5] == 2 || /* 802.3ad Slow protocols */ 1343 new_addr[5] == 3) /* 802.1X PAE address */ 1344 return -EINVAL; 1345 spin_lock_bh(&br->lock); 1346 memcpy(br->group_addr, new_addr, sizeof(br->group_addr)); 1347 spin_unlock_bh(&br->lock); 1348 br_opt_toggle(br, BROPT_GROUP_ADDR_SET, true); 1349 br_recalculate_fwd_mask(br); 1350 } 1351 1352 if (data[IFLA_BR_FDB_FLUSH]) { 1353 struct net_bridge_fdb_flush_desc desc = { 1354 .flags_mask = BIT(BR_FDB_STATIC) 1355 }; 1356 1357 br_fdb_flush(br, &desc); 1358 } 1359 1360 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 1361 if (data[IFLA_BR_MCAST_ROUTER]) { 1362 u8 multicast_router = nla_get_u8(data[IFLA_BR_MCAST_ROUTER]); 1363 1364 err = br_multicast_set_router(&br->multicast_ctx, 1365 multicast_router); 1366 if (err) 1367 return err; 1368 } 1369 1370 if (data[IFLA_BR_MCAST_SNOOPING]) { 1371 u8 mcast_snooping = nla_get_u8(data[IFLA_BR_MCAST_SNOOPING]); 1372 1373 err = br_multicast_toggle(br, mcast_snooping, extack); 1374 if (err) 1375 return err; 1376 } 1377 1378 if (data[IFLA_BR_MCAST_QUERY_USE_IFADDR]) { 1379 u8 val; 1380 1381 val = nla_get_u8(data[IFLA_BR_MCAST_QUERY_USE_IFADDR]); 1382 br_opt_toggle(br, BROPT_MULTICAST_QUERY_USE_IFADDR, !!val); 1383 } 1384 1385 if (data[IFLA_BR_MCAST_QUERIER]) { 1386 u8 mcast_querier = nla_get_u8(data[IFLA_BR_MCAST_QUERIER]); 1387 1388 err = br_multicast_set_querier(&br->multicast_ctx, 1389 mcast_querier); 1390 if (err) 1391 return err; 1392 } 1393 1394 if (data[IFLA_BR_MCAST_HASH_ELASTICITY]) 1395 br_warn(br, "the hash_elasticity option has been deprecated and is always %u\n", 1396 RHT_ELASTICITY); 1397 1398 if (data[IFLA_BR_MCAST_HASH_MAX]) 1399 br->hash_max = nla_get_u32(data[IFLA_BR_MCAST_HASH_MAX]); 1400 1401 if (data[IFLA_BR_MCAST_LAST_MEMBER_CNT]) { 1402 u32 val = nla_get_u32(data[IFLA_BR_MCAST_LAST_MEMBER_CNT]); 1403 1404 br->multicast_ctx.multicast_last_member_count = val; 1405 } 1406 1407 if (data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]) { 1408 u32 val = nla_get_u32(data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]); 1409 1410 br->multicast_ctx.multicast_startup_query_count = val; 1411 } 1412 1413 if (data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]) { 1414 u64 val = nla_get_u64(data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]); 1415 1416 br->multicast_ctx.multicast_last_member_interval = clock_t_to_jiffies(val); 1417 } 1418 1419 if (data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]) { 1420 u64 val = nla_get_u64(data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]); 1421 1422 br->multicast_ctx.multicast_membership_interval = clock_t_to_jiffies(val); 1423 } 1424 1425 if (data[IFLA_BR_MCAST_QUERIER_INTVL]) { 1426 u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERIER_INTVL]); 1427 1428 br->multicast_ctx.multicast_querier_interval = clock_t_to_jiffies(val); 1429 } 1430 1431 if (data[IFLA_BR_MCAST_QUERY_INTVL]) { 1432 u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_INTVL]); 1433 1434 br_multicast_set_query_intvl(&br->multicast_ctx, val); 1435 } 1436 1437 if (data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]) { 1438 u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]); 1439 1440 br->multicast_ctx.multicast_query_response_interval = clock_t_to_jiffies(val); 1441 } 1442 1443 if (data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]) { 1444 u64 val = nla_get_u64(data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]); 1445 1446 br_multicast_set_startup_query_intvl(&br->multicast_ctx, val); 1447 } 1448 1449 if (data[IFLA_BR_MCAST_STATS_ENABLED]) { 1450 __u8 mcast_stats; 1451 1452 mcast_stats = nla_get_u8(data[IFLA_BR_MCAST_STATS_ENABLED]); 1453 br_opt_toggle(br, BROPT_MULTICAST_STATS_ENABLED, !!mcast_stats); 1454 } 1455 1456 if (data[IFLA_BR_MCAST_IGMP_VERSION]) { 1457 __u8 igmp_version; 1458 1459 igmp_version = nla_get_u8(data[IFLA_BR_MCAST_IGMP_VERSION]); 1460 err = br_multicast_set_igmp_version(&br->multicast_ctx, 1461 igmp_version); 1462 if (err) 1463 return err; 1464 } 1465 1466 #if IS_ENABLED(CONFIG_IPV6) 1467 if (data[IFLA_BR_MCAST_MLD_VERSION]) { 1468 __u8 mld_version; 1469 1470 mld_version = nla_get_u8(data[IFLA_BR_MCAST_MLD_VERSION]); 1471 err = br_multicast_set_mld_version(&br->multicast_ctx, 1472 mld_version); 1473 if (err) 1474 return err; 1475 } 1476 #endif 1477 #endif 1478 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 1479 if (data[IFLA_BR_NF_CALL_IPTABLES]) { 1480 u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IPTABLES]); 1481 1482 br_opt_toggle(br, BROPT_NF_CALL_IPTABLES, !!val); 1483 } 1484 1485 if (data[IFLA_BR_NF_CALL_IP6TABLES]) { 1486 u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IP6TABLES]); 1487 1488 br_opt_toggle(br, BROPT_NF_CALL_IP6TABLES, !!val); 1489 } 1490 1491 if (data[IFLA_BR_NF_CALL_ARPTABLES]) { 1492 u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_ARPTABLES]); 1493 1494 br_opt_toggle(br, BROPT_NF_CALL_ARPTABLES, !!val); 1495 } 1496 #endif 1497 1498 if (data[IFLA_BR_MULTI_BOOLOPT]) { 1499 struct br_boolopt_multi *bm; 1500 1501 bm = nla_data(data[IFLA_BR_MULTI_BOOLOPT]); 1502 err = br_boolopt_multi_toggle(br, bm, extack); 1503 if (err) 1504 return err; 1505 } 1506 1507 return 0; 1508 } 1509 1510 static int br_dev_newlink(struct net *src_net, struct net_device *dev, 1511 struct nlattr *tb[], struct nlattr *data[], 1512 struct netlink_ext_ack *extack) 1513 { 1514 struct net_bridge *br = netdev_priv(dev); 1515 int err; 1516 1517 err = register_netdevice(dev); 1518 if (err) 1519 return err; 1520 1521 if (tb[IFLA_ADDRESS]) { 1522 spin_lock_bh(&br->lock); 1523 br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS])); 1524 spin_unlock_bh(&br->lock); 1525 } 1526 1527 err = br_changelink(dev, tb, data, extack); 1528 if (err) 1529 br_dev_delete(dev, NULL); 1530 1531 return err; 1532 } 1533 1534 static size_t br_get_size(const struct net_device *brdev) 1535 { 1536 return nla_total_size(sizeof(u32)) + /* IFLA_BR_FORWARD_DELAY */ 1537 nla_total_size(sizeof(u32)) + /* IFLA_BR_HELLO_TIME */ 1538 nla_total_size(sizeof(u32)) + /* IFLA_BR_MAX_AGE */ 1539 nla_total_size(sizeof(u32)) + /* IFLA_BR_AGEING_TIME */ 1540 nla_total_size(sizeof(u32)) + /* IFLA_BR_STP_STATE */ 1541 nla_total_size(sizeof(u16)) + /* IFLA_BR_PRIORITY */ 1542 nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_FILTERING */ 1543 #ifdef CONFIG_BRIDGE_VLAN_FILTERING 1544 nla_total_size(sizeof(__be16)) + /* IFLA_BR_VLAN_PROTOCOL */ 1545 nla_total_size(sizeof(u16)) + /* IFLA_BR_VLAN_DEFAULT_PVID */ 1546 nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_STATS_ENABLED */ 1547 nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_STATS_PER_PORT */ 1548 #endif 1549 nla_total_size(sizeof(u16)) + /* IFLA_BR_GROUP_FWD_MASK */ 1550 nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_ROOT_ID */ 1551 nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_BRIDGE_ID */ 1552 nla_total_size(sizeof(u16)) + /* IFLA_BR_ROOT_PORT */ 1553 nla_total_size(sizeof(u32)) + /* IFLA_BR_ROOT_PATH_COST */ 1554 nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE */ 1555 nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE_DETECTED */ 1556 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_HELLO_TIMER */ 1557 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TCN_TIMER */ 1558 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TOPOLOGY_CHANGE_TIMER */ 1559 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_GC_TIMER */ 1560 nla_total_size(ETH_ALEN) + /* IFLA_BR_GROUP_ADDR */ 1561 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 1562 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_ROUTER */ 1563 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_SNOOPING */ 1564 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERY_USE_IFADDR */ 1565 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERIER */ 1566 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_STATS_ENABLED */ 1567 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_ELASTICITY */ 1568 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_MAX */ 1569 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_LAST_MEMBER_CNT */ 1570 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_STARTUP_QUERY_CNT */ 1571 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_LAST_MEMBER_INTVL */ 1572 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_MEMBERSHIP_INTVL */ 1573 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERIER_INTVL */ 1574 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_INTVL */ 1575 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_RESPONSE_INTVL */ 1576 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_STARTUP_QUERY_INTVL */ 1577 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_IGMP_VERSION */ 1578 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_MLD_VERSION */ 1579 br_multicast_querier_state_size() + /* IFLA_BR_MCAST_QUERIER_STATE */ 1580 #endif 1581 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 1582 nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IPTABLES */ 1583 nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IP6TABLES */ 1584 nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_ARPTABLES */ 1585 #endif 1586 nla_total_size(sizeof(struct br_boolopt_multi)) + /* IFLA_BR_MULTI_BOOLOPT */ 1587 0; 1588 } 1589 1590 static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev) 1591 { 1592 struct net_bridge *br = netdev_priv(brdev); 1593 u32 forward_delay = jiffies_to_clock_t(br->forward_delay); 1594 u32 hello_time = jiffies_to_clock_t(br->hello_time); 1595 u32 age_time = jiffies_to_clock_t(br->max_age); 1596 u32 ageing_time = jiffies_to_clock_t(br->ageing_time); 1597 u32 stp_enabled = br->stp_enabled; 1598 u16 priority = (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1]; 1599 u8 vlan_enabled = br_vlan_enabled(br->dev); 1600 struct br_boolopt_multi bm; 1601 u64 clockval; 1602 1603 clockval = br_timer_value(&br->hello_timer); 1604 if (nla_put_u64_64bit(skb, IFLA_BR_HELLO_TIMER, clockval, IFLA_BR_PAD)) 1605 return -EMSGSIZE; 1606 clockval = br_timer_value(&br->tcn_timer); 1607 if (nla_put_u64_64bit(skb, IFLA_BR_TCN_TIMER, clockval, IFLA_BR_PAD)) 1608 return -EMSGSIZE; 1609 clockval = br_timer_value(&br->topology_change_timer); 1610 if (nla_put_u64_64bit(skb, IFLA_BR_TOPOLOGY_CHANGE_TIMER, clockval, 1611 IFLA_BR_PAD)) 1612 return -EMSGSIZE; 1613 clockval = br_timer_value(&br->gc_work.timer); 1614 if (nla_put_u64_64bit(skb, IFLA_BR_GC_TIMER, clockval, IFLA_BR_PAD)) 1615 return -EMSGSIZE; 1616 1617 br_boolopt_multi_get(br, &bm); 1618 if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) || 1619 nla_put_u32(skb, IFLA_BR_HELLO_TIME, hello_time) || 1620 nla_put_u32(skb, IFLA_BR_MAX_AGE, age_time) || 1621 nla_put_u32(skb, IFLA_BR_AGEING_TIME, ageing_time) || 1622 nla_put_u32(skb, IFLA_BR_STP_STATE, stp_enabled) || 1623 nla_put_u16(skb, IFLA_BR_PRIORITY, priority) || 1624 nla_put_u8(skb, IFLA_BR_VLAN_FILTERING, vlan_enabled) || 1625 nla_put_u16(skb, IFLA_BR_GROUP_FWD_MASK, br->group_fwd_mask) || 1626 nla_put(skb, IFLA_BR_BRIDGE_ID, sizeof(struct ifla_bridge_id), 1627 &br->bridge_id) || 1628 nla_put(skb, IFLA_BR_ROOT_ID, sizeof(struct ifla_bridge_id), 1629 &br->designated_root) || 1630 nla_put_u16(skb, IFLA_BR_ROOT_PORT, br->root_port) || 1631 nla_put_u32(skb, IFLA_BR_ROOT_PATH_COST, br->root_path_cost) || 1632 nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE, br->topology_change) || 1633 nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE_DETECTED, 1634 br->topology_change_detected) || 1635 nla_put(skb, IFLA_BR_GROUP_ADDR, ETH_ALEN, br->group_addr) || 1636 nla_put(skb, IFLA_BR_MULTI_BOOLOPT, sizeof(bm), &bm)) 1637 return -EMSGSIZE; 1638 1639 #ifdef CONFIG_BRIDGE_VLAN_FILTERING 1640 if (nla_put_be16(skb, IFLA_BR_VLAN_PROTOCOL, br->vlan_proto) || 1641 nla_put_u16(skb, IFLA_BR_VLAN_DEFAULT_PVID, br->default_pvid) || 1642 nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED, 1643 br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) || 1644 nla_put_u8(skb, IFLA_BR_VLAN_STATS_PER_PORT, 1645 br_opt_get(br, BROPT_VLAN_STATS_PER_PORT))) 1646 return -EMSGSIZE; 1647 #endif 1648 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 1649 if (nla_put_u8(skb, IFLA_BR_MCAST_ROUTER, 1650 br->multicast_ctx.multicast_router) || 1651 nla_put_u8(skb, IFLA_BR_MCAST_SNOOPING, 1652 br_opt_get(br, BROPT_MULTICAST_ENABLED)) || 1653 nla_put_u8(skb, IFLA_BR_MCAST_QUERY_USE_IFADDR, 1654 br_opt_get(br, BROPT_MULTICAST_QUERY_USE_IFADDR)) || 1655 nla_put_u8(skb, IFLA_BR_MCAST_QUERIER, 1656 br->multicast_ctx.multicast_querier) || 1657 nla_put_u8(skb, IFLA_BR_MCAST_STATS_ENABLED, 1658 br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED)) || 1659 nla_put_u32(skb, IFLA_BR_MCAST_HASH_ELASTICITY, RHT_ELASTICITY) || 1660 nla_put_u32(skb, IFLA_BR_MCAST_HASH_MAX, br->hash_max) || 1661 nla_put_u32(skb, IFLA_BR_MCAST_LAST_MEMBER_CNT, 1662 br->multicast_ctx.multicast_last_member_count) || 1663 nla_put_u32(skb, IFLA_BR_MCAST_STARTUP_QUERY_CNT, 1664 br->multicast_ctx.multicast_startup_query_count) || 1665 nla_put_u8(skb, IFLA_BR_MCAST_IGMP_VERSION, 1666 br->multicast_ctx.multicast_igmp_version) || 1667 br_multicast_dump_querier_state(skb, &br->multicast_ctx, 1668 IFLA_BR_MCAST_QUERIER_STATE)) 1669 return -EMSGSIZE; 1670 #if IS_ENABLED(CONFIG_IPV6) 1671 if (nla_put_u8(skb, IFLA_BR_MCAST_MLD_VERSION, 1672 br->multicast_ctx.multicast_mld_version)) 1673 return -EMSGSIZE; 1674 #endif 1675 clockval = jiffies_to_clock_t(br->multicast_ctx.multicast_last_member_interval); 1676 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_LAST_MEMBER_INTVL, clockval, 1677 IFLA_BR_PAD)) 1678 return -EMSGSIZE; 1679 clockval = jiffies_to_clock_t(br->multicast_ctx.multicast_membership_interval); 1680 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_MEMBERSHIP_INTVL, clockval, 1681 IFLA_BR_PAD)) 1682 return -EMSGSIZE; 1683 clockval = jiffies_to_clock_t(br->multicast_ctx.multicast_querier_interval); 1684 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERIER_INTVL, clockval, 1685 IFLA_BR_PAD)) 1686 return -EMSGSIZE; 1687 clockval = jiffies_to_clock_t(br->multicast_ctx.multicast_query_interval); 1688 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_INTVL, clockval, 1689 IFLA_BR_PAD)) 1690 return -EMSGSIZE; 1691 clockval = jiffies_to_clock_t(br->multicast_ctx.multicast_query_response_interval); 1692 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, clockval, 1693 IFLA_BR_PAD)) 1694 return -EMSGSIZE; 1695 clockval = jiffies_to_clock_t(br->multicast_ctx.multicast_startup_query_interval); 1696 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_STARTUP_QUERY_INTVL, clockval, 1697 IFLA_BR_PAD)) 1698 return -EMSGSIZE; 1699 #endif 1700 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 1701 if (nla_put_u8(skb, IFLA_BR_NF_CALL_IPTABLES, 1702 br_opt_get(br, BROPT_NF_CALL_IPTABLES) ? 1 : 0) || 1703 nla_put_u8(skb, IFLA_BR_NF_CALL_IP6TABLES, 1704 br_opt_get(br, BROPT_NF_CALL_IP6TABLES) ? 1 : 0) || 1705 nla_put_u8(skb, IFLA_BR_NF_CALL_ARPTABLES, 1706 br_opt_get(br, BROPT_NF_CALL_ARPTABLES) ? 1 : 0)) 1707 return -EMSGSIZE; 1708 #endif 1709 1710 return 0; 1711 } 1712 1713 static size_t br_get_linkxstats_size(const struct net_device *dev, int attr) 1714 { 1715 struct net_bridge_port *p = NULL; 1716 struct net_bridge_vlan_group *vg; 1717 struct net_bridge_vlan *v; 1718 struct net_bridge *br; 1719 int numvls = 0; 1720 1721 switch (attr) { 1722 case IFLA_STATS_LINK_XSTATS: 1723 br = netdev_priv(dev); 1724 vg = br_vlan_group(br); 1725 break; 1726 case IFLA_STATS_LINK_XSTATS_SLAVE: 1727 p = br_port_get_rtnl(dev); 1728 if (!p) 1729 return 0; 1730 vg = nbp_vlan_group(p); 1731 break; 1732 default: 1733 return 0; 1734 } 1735 1736 if (vg) { 1737 /* we need to count all, even placeholder entries */ 1738 list_for_each_entry(v, &vg->vlan_list, vlist) 1739 numvls++; 1740 } 1741 1742 return numvls * nla_total_size(sizeof(struct bridge_vlan_xstats)) + 1743 nla_total_size_64bit(sizeof(struct br_mcast_stats)) + 1744 (p ? nla_total_size_64bit(sizeof(p->stp_xstats)) : 0) + 1745 nla_total_size(0); 1746 } 1747 1748 static int br_fill_linkxstats(struct sk_buff *skb, 1749 const struct net_device *dev, 1750 int *prividx, int attr) 1751 { 1752 struct nlattr *nla __maybe_unused; 1753 struct net_bridge_port *p = NULL; 1754 struct net_bridge_vlan_group *vg; 1755 struct net_bridge_vlan *v; 1756 struct net_bridge *br; 1757 struct nlattr *nest; 1758 int vl_idx = 0; 1759 1760 switch (attr) { 1761 case IFLA_STATS_LINK_XSTATS: 1762 br = netdev_priv(dev); 1763 vg = br_vlan_group(br); 1764 break; 1765 case IFLA_STATS_LINK_XSTATS_SLAVE: 1766 p = br_port_get_rtnl(dev); 1767 if (!p) 1768 return 0; 1769 br = p->br; 1770 vg = nbp_vlan_group(p); 1771 break; 1772 default: 1773 return -EINVAL; 1774 } 1775 1776 nest = nla_nest_start_noflag(skb, LINK_XSTATS_TYPE_BRIDGE); 1777 if (!nest) 1778 return -EMSGSIZE; 1779 1780 if (vg) { 1781 u16 pvid; 1782 1783 pvid = br_get_pvid(vg); 1784 list_for_each_entry(v, &vg->vlan_list, vlist) { 1785 struct bridge_vlan_xstats vxi; 1786 struct pcpu_sw_netstats stats; 1787 1788 if (++vl_idx < *prividx) 1789 continue; 1790 memset(&vxi, 0, sizeof(vxi)); 1791 vxi.vid = v->vid; 1792 vxi.flags = v->flags; 1793 if (v->vid == pvid) 1794 vxi.flags |= BRIDGE_VLAN_INFO_PVID; 1795 br_vlan_get_stats(v, &stats); 1796 vxi.rx_bytes = u64_stats_read(&stats.rx_bytes); 1797 vxi.rx_packets = u64_stats_read(&stats.rx_packets); 1798 vxi.tx_bytes = u64_stats_read(&stats.tx_bytes); 1799 vxi.tx_packets = u64_stats_read(&stats.tx_packets); 1800 1801 if (nla_put(skb, BRIDGE_XSTATS_VLAN, sizeof(vxi), &vxi)) 1802 goto nla_put_failure; 1803 } 1804 } 1805 1806 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 1807 if (++vl_idx >= *prividx) { 1808 nla = nla_reserve_64bit(skb, BRIDGE_XSTATS_MCAST, 1809 sizeof(struct br_mcast_stats), 1810 BRIDGE_XSTATS_PAD); 1811 if (!nla) 1812 goto nla_put_failure; 1813 br_multicast_get_stats(br, p, nla_data(nla)); 1814 } 1815 #endif 1816 1817 if (p) { 1818 nla = nla_reserve_64bit(skb, BRIDGE_XSTATS_STP, 1819 sizeof(p->stp_xstats), 1820 BRIDGE_XSTATS_PAD); 1821 if (!nla) 1822 goto nla_put_failure; 1823 1824 spin_lock_bh(&br->lock); 1825 memcpy(nla_data(nla), &p->stp_xstats, sizeof(p->stp_xstats)); 1826 spin_unlock_bh(&br->lock); 1827 } 1828 1829 nla_nest_end(skb, nest); 1830 *prividx = 0; 1831 1832 return 0; 1833 1834 nla_put_failure: 1835 nla_nest_end(skb, nest); 1836 *prividx = vl_idx; 1837 1838 return -EMSGSIZE; 1839 } 1840 1841 static struct rtnl_af_ops br_af_ops __read_mostly = { 1842 .family = AF_BRIDGE, 1843 .get_link_af_size = br_get_link_af_size_filtered, 1844 }; 1845 1846 struct rtnl_link_ops br_link_ops __read_mostly = { 1847 .kind = "bridge", 1848 .priv_size = sizeof(struct net_bridge), 1849 .setup = br_dev_setup, 1850 .maxtype = IFLA_BR_MAX, 1851 .policy = br_policy, 1852 .validate = br_validate, 1853 .newlink = br_dev_newlink, 1854 .changelink = br_changelink, 1855 .dellink = br_dev_delete, 1856 .get_size = br_get_size, 1857 .fill_info = br_fill_info, 1858 .fill_linkxstats = br_fill_linkxstats, 1859 .get_linkxstats_size = br_get_linkxstats_size, 1860 1861 .slave_maxtype = IFLA_BRPORT_MAX, 1862 .slave_policy = br_port_policy, 1863 .slave_changelink = br_port_slave_changelink, 1864 .get_slave_size = br_port_get_slave_size, 1865 .fill_slave_info = br_port_fill_slave_info, 1866 }; 1867 1868 int __init br_netlink_init(void) 1869 { 1870 int err; 1871 1872 br_mdb_init(); 1873 br_vlan_rtnl_init(); 1874 rtnl_af_register(&br_af_ops); 1875 1876 err = rtnl_link_register(&br_link_ops); 1877 if (err) 1878 goto out_af; 1879 1880 return 0; 1881 1882 out_af: 1883 rtnl_af_unregister(&br_af_ops); 1884 br_mdb_uninit(); 1885 return err; 1886 } 1887 1888 void br_netlink_fini(void) 1889 { 1890 br_mdb_uninit(); 1891 br_vlan_rtnl_uninit(); 1892 rtnl_af_unregister(&br_af_ops); 1893 rtnl_link_unregister(&br_link_ops); 1894 } 1895