1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/types.h> 6 #include <linux/netdevice.h> 7 #include <linux/etherdevice.h> 8 #include <linux/slab.h> 9 #include <linux/device.h> 10 #include <linux/skbuff.h> 11 #include <linux/if_vlan.h> 12 #include <linux/if_bridge.h> 13 #include <linux/workqueue.h> 14 #include <linux/jiffies.h> 15 #include <linux/rtnetlink.h> 16 #include <linux/netlink.h> 17 #include <net/switchdev.h> 18 19 #include "spectrum_span.h" 20 #include "spectrum_router.h" 21 #include "spectrum_switchdev.h" 22 #include "spectrum.h" 23 #include "core.h" 24 #include "reg.h" 25 26 struct mlxsw_sp_bridge_ops; 27 28 struct mlxsw_sp_bridge { 29 struct mlxsw_sp *mlxsw_sp; 30 struct { 31 struct delayed_work dw; 32 #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100 33 unsigned int interval; /* ms */ 34 } fdb_notify; 35 #define MLXSW_SP_MIN_AGEING_TIME 10 36 #define MLXSW_SP_MAX_AGEING_TIME 1000000 37 #define MLXSW_SP_DEFAULT_AGEING_TIME 300 38 u32 ageing_time; 39 bool vlan_enabled_exists; 40 struct list_head bridges_list; 41 DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX); 42 const struct mlxsw_sp_bridge_ops *bridge_8021q_ops; 43 const struct mlxsw_sp_bridge_ops *bridge_8021d_ops; 44 }; 45 46 struct mlxsw_sp_bridge_device { 47 struct net_device *dev; 48 struct list_head list; 49 struct list_head ports_list; 50 struct list_head mids_list; 51 u8 vlan_enabled:1, 52 multicast_enabled:1, 53 mrouter:1; 54 const struct mlxsw_sp_bridge_ops *ops; 55 }; 56 57 struct mlxsw_sp_bridge_port { 58 struct net_device *dev; 59 struct mlxsw_sp_bridge_device *bridge_device; 60 struct list_head list; 61 struct list_head vlans_list; 62 unsigned int ref_count; 63 u8 stp_state; 64 unsigned long flags; 65 bool mrouter; 66 bool lagged; 67 union { 68 u16 lag_id; 69 u16 system_port; 70 }; 71 }; 72 73 struct mlxsw_sp_bridge_vlan { 74 struct list_head list; 75 struct list_head port_vlan_list; 76 u16 vid; 77 }; 78 79 struct mlxsw_sp_bridge_ops { 80 int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device, 81 struct mlxsw_sp_bridge_port *bridge_port, 82 struct mlxsw_sp_port *mlxsw_sp_port, 83 struct netlink_ext_ack *extack); 84 void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device, 85 struct mlxsw_sp_bridge_port *bridge_port, 86 struct mlxsw_sp_port *mlxsw_sp_port); 87 struct mlxsw_sp_fid * 88 (*fid_get)(struct mlxsw_sp_bridge_device *bridge_device, 89 u16 vid); 90 }; 91 92 static int 93 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp, 94 struct mlxsw_sp_bridge_port *bridge_port, 95 u16 fid_index); 96 97 static void 98 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, 99 struct mlxsw_sp_bridge_port *bridge_port); 100 101 static void 102 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port, 103 struct mlxsw_sp_bridge_device 104 *bridge_device); 105 106 static void 107 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port, 108 struct mlxsw_sp_bridge_port *bridge_port, 109 bool add); 110 111 static struct mlxsw_sp_bridge_device * 112 mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge, 113 const struct net_device *br_dev) 114 { 115 struct mlxsw_sp_bridge_device *bridge_device; 116 117 list_for_each_entry(bridge_device, &bridge->bridges_list, list) 118 if (bridge_device->dev == br_dev) 119 return bridge_device; 120 121 return NULL; 122 } 123 124 bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp, 125 const struct net_device *br_dev) 126 { 127 return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); 128 } 129 130 static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev, 131 void *data) 132 { 133 struct mlxsw_sp *mlxsw_sp = data; 134 135 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev); 136 return 0; 137 } 138 139 static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp, 140 struct net_device *dev) 141 { 142 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev); 143 netdev_walk_all_upper_dev_rcu(dev, 144 mlxsw_sp_bridge_device_upper_rif_destroy, 145 mlxsw_sp); 146 } 147 148 static struct mlxsw_sp_bridge_device * 149 mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge, 150 struct net_device *br_dev) 151 { 152 struct device *dev = bridge->mlxsw_sp->bus_info->dev; 153 struct mlxsw_sp_bridge_device *bridge_device; 154 bool vlan_enabled = br_vlan_enabled(br_dev); 155 156 if (vlan_enabled && bridge->vlan_enabled_exists) { 157 dev_err(dev, "Only one VLAN-aware bridge is supported\n"); 158 return ERR_PTR(-EINVAL); 159 } 160 161 bridge_device = kzalloc(sizeof(*bridge_device), GFP_KERNEL); 162 if (!bridge_device) 163 return ERR_PTR(-ENOMEM); 164 165 bridge_device->dev = br_dev; 166 bridge_device->vlan_enabled = vlan_enabled; 167 bridge_device->multicast_enabled = br_multicast_enabled(br_dev); 168 bridge_device->mrouter = br_multicast_router(br_dev); 169 INIT_LIST_HEAD(&bridge_device->ports_list); 170 if (vlan_enabled) { 171 bridge->vlan_enabled_exists = true; 172 bridge_device->ops = bridge->bridge_8021q_ops; 173 } else { 174 bridge_device->ops = bridge->bridge_8021d_ops; 175 } 176 INIT_LIST_HEAD(&bridge_device->mids_list); 177 list_add(&bridge_device->list, &bridge->bridges_list); 178 179 return bridge_device; 180 } 181 182 static void 183 mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge, 184 struct mlxsw_sp_bridge_device *bridge_device) 185 { 186 mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp, 187 bridge_device->dev); 188 list_del(&bridge_device->list); 189 if (bridge_device->vlan_enabled) 190 bridge->vlan_enabled_exists = false; 191 WARN_ON(!list_empty(&bridge_device->ports_list)); 192 WARN_ON(!list_empty(&bridge_device->mids_list)); 193 kfree(bridge_device); 194 } 195 196 static struct mlxsw_sp_bridge_device * 197 mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge, 198 struct net_device *br_dev) 199 { 200 struct mlxsw_sp_bridge_device *bridge_device; 201 202 bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev); 203 if (bridge_device) 204 return bridge_device; 205 206 return mlxsw_sp_bridge_device_create(bridge, br_dev); 207 } 208 209 static void 210 mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge *bridge, 211 struct mlxsw_sp_bridge_device *bridge_device) 212 { 213 if (list_empty(&bridge_device->ports_list)) 214 mlxsw_sp_bridge_device_destroy(bridge, bridge_device); 215 } 216 217 static struct mlxsw_sp_bridge_port * 218 __mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device, 219 const struct net_device *brport_dev) 220 { 221 struct mlxsw_sp_bridge_port *bridge_port; 222 223 list_for_each_entry(bridge_port, &bridge_device->ports_list, list) { 224 if (bridge_port->dev == brport_dev) 225 return bridge_port; 226 } 227 228 return NULL; 229 } 230 231 struct mlxsw_sp_bridge_port * 232 mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge, 233 struct net_device *brport_dev) 234 { 235 struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev); 236 struct mlxsw_sp_bridge_device *bridge_device; 237 238 if (!br_dev) 239 return NULL; 240 241 bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev); 242 if (!bridge_device) 243 return NULL; 244 245 return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev); 246 } 247 248 static struct mlxsw_sp_bridge_port * 249 mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device, 250 struct net_device *brport_dev) 251 { 252 struct mlxsw_sp_bridge_port *bridge_port; 253 struct mlxsw_sp_port *mlxsw_sp_port; 254 255 bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL); 256 if (!bridge_port) 257 return NULL; 258 259 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev); 260 bridge_port->lagged = mlxsw_sp_port->lagged; 261 if (bridge_port->lagged) 262 bridge_port->lag_id = mlxsw_sp_port->lag_id; 263 else 264 bridge_port->system_port = mlxsw_sp_port->local_port; 265 bridge_port->dev = brport_dev; 266 bridge_port->bridge_device = bridge_device; 267 bridge_port->stp_state = BR_STATE_DISABLED; 268 bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC | 269 BR_MCAST_FLOOD; 270 INIT_LIST_HEAD(&bridge_port->vlans_list); 271 list_add(&bridge_port->list, &bridge_device->ports_list); 272 bridge_port->ref_count = 1; 273 274 return bridge_port; 275 } 276 277 static void 278 mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port) 279 { 280 list_del(&bridge_port->list); 281 WARN_ON(!list_empty(&bridge_port->vlans_list)); 282 kfree(bridge_port); 283 } 284 285 static bool 286 mlxsw_sp_bridge_port_should_destroy(const struct mlxsw_sp_bridge_port * 287 bridge_port) 288 { 289 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_port->dev); 290 291 /* In case ports were pulled from out of a bridged LAG, then 292 * it's possible the reference count isn't zero, yet the bridge 293 * port should be destroyed, as it's no longer an upper of ours. 294 */ 295 if (!mlxsw_sp && list_empty(&bridge_port->vlans_list)) 296 return true; 297 else if (bridge_port->ref_count == 0) 298 return true; 299 else 300 return false; 301 } 302 303 static struct mlxsw_sp_bridge_port * 304 mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge, 305 struct net_device *brport_dev) 306 { 307 struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev); 308 struct mlxsw_sp_bridge_device *bridge_device; 309 struct mlxsw_sp_bridge_port *bridge_port; 310 int err; 311 312 bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev); 313 if (bridge_port) { 314 bridge_port->ref_count++; 315 return bridge_port; 316 } 317 318 bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev); 319 if (IS_ERR(bridge_device)) 320 return ERR_CAST(bridge_device); 321 322 bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev); 323 if (!bridge_port) { 324 err = -ENOMEM; 325 goto err_bridge_port_create; 326 } 327 328 return bridge_port; 329 330 err_bridge_port_create: 331 mlxsw_sp_bridge_device_put(bridge, bridge_device); 332 return ERR_PTR(err); 333 } 334 335 static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge, 336 struct mlxsw_sp_bridge_port *bridge_port) 337 { 338 struct mlxsw_sp_bridge_device *bridge_device; 339 340 bridge_port->ref_count--; 341 if (!mlxsw_sp_bridge_port_should_destroy(bridge_port)) 342 return; 343 bridge_device = bridge_port->bridge_device; 344 mlxsw_sp_bridge_port_destroy(bridge_port); 345 mlxsw_sp_bridge_device_put(bridge, bridge_device); 346 } 347 348 static struct mlxsw_sp_port_vlan * 349 mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port *mlxsw_sp_port, 350 const struct mlxsw_sp_bridge_device * 351 bridge_device, 352 u16 vid) 353 { 354 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 355 356 list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list, 357 list) { 358 if (!mlxsw_sp_port_vlan->bridge_port) 359 continue; 360 if (mlxsw_sp_port_vlan->bridge_port->bridge_device != 361 bridge_device) 362 continue; 363 if (bridge_device->vlan_enabled && 364 mlxsw_sp_port_vlan->vid != vid) 365 continue; 366 return mlxsw_sp_port_vlan; 367 } 368 369 return NULL; 370 } 371 372 static struct mlxsw_sp_port_vlan* 373 mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port *mlxsw_sp_port, 374 u16 fid_index) 375 { 376 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 377 378 list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list, 379 list) { 380 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; 381 382 if (fid && mlxsw_sp_fid_index(fid) == fid_index) 383 return mlxsw_sp_port_vlan; 384 } 385 386 return NULL; 387 } 388 389 static struct mlxsw_sp_bridge_vlan * 390 mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port *bridge_port, 391 u16 vid) 392 { 393 struct mlxsw_sp_bridge_vlan *bridge_vlan; 394 395 list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) { 396 if (bridge_vlan->vid == vid) 397 return bridge_vlan; 398 } 399 400 return NULL; 401 } 402 403 static struct mlxsw_sp_bridge_vlan * 404 mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port *bridge_port, u16 vid) 405 { 406 struct mlxsw_sp_bridge_vlan *bridge_vlan; 407 408 bridge_vlan = kzalloc(sizeof(*bridge_vlan), GFP_KERNEL); 409 if (!bridge_vlan) 410 return NULL; 411 412 INIT_LIST_HEAD(&bridge_vlan->port_vlan_list); 413 bridge_vlan->vid = vid; 414 list_add(&bridge_vlan->list, &bridge_port->vlans_list); 415 416 return bridge_vlan; 417 } 418 419 static void 420 mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan *bridge_vlan) 421 { 422 list_del(&bridge_vlan->list); 423 WARN_ON(!list_empty(&bridge_vlan->port_vlan_list)); 424 kfree(bridge_vlan); 425 } 426 427 static struct mlxsw_sp_bridge_vlan * 428 mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port *bridge_port, u16 vid) 429 { 430 struct mlxsw_sp_bridge_vlan *bridge_vlan; 431 432 bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid); 433 if (bridge_vlan) 434 return bridge_vlan; 435 436 return mlxsw_sp_bridge_vlan_create(bridge_port, vid); 437 } 438 439 static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan) 440 { 441 if (list_empty(&bridge_vlan->port_vlan_list)) 442 mlxsw_sp_bridge_vlan_destroy(bridge_vlan); 443 } 444 445 static void mlxsw_sp_port_bridge_flags_get(struct mlxsw_sp_bridge *bridge, 446 struct net_device *dev, 447 unsigned long *brport_flags) 448 { 449 struct mlxsw_sp_bridge_port *bridge_port; 450 451 bridge_port = mlxsw_sp_bridge_port_find(bridge, dev); 452 if (WARN_ON(!bridge_port)) 453 return; 454 455 memcpy(brport_flags, &bridge_port->flags, sizeof(*brport_flags)); 456 } 457 458 static int mlxsw_sp_port_attr_get(struct net_device *dev, 459 struct switchdev_attr *attr) 460 { 461 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 462 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 463 464 switch (attr->id) { 465 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: 466 attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac); 467 memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac, 468 attr->u.ppid.id_len); 469 break; 470 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: 471 mlxsw_sp_port_bridge_flags_get(mlxsw_sp->bridge, attr->orig_dev, 472 &attr->u.brport_flags); 473 break; 474 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT: 475 attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD | 476 BR_MCAST_FLOOD; 477 break; 478 default: 479 return -EOPNOTSUPP; 480 } 481 482 return 0; 483 } 484 485 static int 486 mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 487 struct mlxsw_sp_bridge_vlan *bridge_vlan, 488 u8 state) 489 { 490 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 491 492 list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list, 493 bridge_vlan_node) { 494 if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port) 495 continue; 496 return mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, 497 bridge_vlan->vid, state); 498 } 499 500 return 0; 501 } 502 503 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, 504 struct switchdev_trans *trans, 505 struct net_device *orig_dev, 506 u8 state) 507 { 508 struct mlxsw_sp_bridge_port *bridge_port; 509 struct mlxsw_sp_bridge_vlan *bridge_vlan; 510 int err; 511 512 if (switchdev_trans_ph_prepare(trans)) 513 return 0; 514 515 /* It's possible we failed to enslave the port, yet this 516 * operation is executed due to it being deferred. 517 */ 518 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge, 519 orig_dev); 520 if (!bridge_port) 521 return 0; 522 523 list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) { 524 err = mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, 525 bridge_vlan, state); 526 if (err) 527 goto err_port_bridge_vlan_stp_set; 528 } 529 530 bridge_port->stp_state = state; 531 532 return 0; 533 534 err_port_bridge_vlan_stp_set: 535 list_for_each_entry_continue_reverse(bridge_vlan, 536 &bridge_port->vlans_list, list) 537 mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, bridge_vlan, 538 bridge_port->stp_state); 539 return err; 540 } 541 542 static int 543 mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, 544 struct mlxsw_sp_bridge_vlan *bridge_vlan, 545 enum mlxsw_sp_flood_type packet_type, 546 bool member) 547 { 548 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 549 550 list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list, 551 bridge_vlan_node) { 552 if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port) 553 continue; 554 return mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid, 555 packet_type, 556 mlxsw_sp_port->local_port, 557 member); 558 } 559 560 return 0; 561 } 562 563 static int 564 mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port, 565 struct mlxsw_sp_bridge_port *bridge_port, 566 enum mlxsw_sp_flood_type packet_type, 567 bool member) 568 { 569 struct mlxsw_sp_bridge_vlan *bridge_vlan; 570 int err; 571 572 list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) { 573 err = mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, 574 bridge_vlan, 575 packet_type, 576 member); 577 if (err) 578 goto err_port_bridge_vlan_flood_set; 579 } 580 581 return 0; 582 583 err_port_bridge_vlan_flood_set: 584 list_for_each_entry_continue_reverse(bridge_vlan, 585 &bridge_port->vlans_list, list) 586 mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, bridge_vlan, 587 packet_type, !member); 588 return err; 589 } 590 591 static int 592 mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, 593 struct mlxsw_sp_bridge_vlan *bridge_vlan, 594 bool set) 595 { 596 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 597 u16 vid = bridge_vlan->vid; 598 599 list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list, 600 bridge_vlan_node) { 601 if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port) 602 continue; 603 return mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set); 604 } 605 606 return 0; 607 } 608 609 static int 610 mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, 611 struct mlxsw_sp_bridge_port *bridge_port, 612 bool set) 613 { 614 struct mlxsw_sp_bridge_vlan *bridge_vlan; 615 int err; 616 617 list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) { 618 err = mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port, 619 bridge_vlan, set); 620 if (err) 621 goto err_port_bridge_vlan_learning_set; 622 } 623 624 return 0; 625 626 err_port_bridge_vlan_learning_set: 627 list_for_each_entry_continue_reverse(bridge_vlan, 628 &bridge_port->vlans_list, list) 629 mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port, 630 bridge_vlan, !set); 631 return err; 632 } 633 634 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, 635 struct switchdev_trans *trans, 636 struct net_device *orig_dev, 637 unsigned long brport_flags) 638 { 639 struct mlxsw_sp_bridge_port *bridge_port; 640 int err; 641 642 if (switchdev_trans_ph_prepare(trans)) 643 return 0; 644 645 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge, 646 orig_dev); 647 if (!bridge_port) 648 return 0; 649 650 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port, 651 MLXSW_SP_FLOOD_TYPE_UC, 652 brport_flags & BR_FLOOD); 653 if (err) 654 return err; 655 656 err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port, bridge_port, 657 brport_flags & BR_LEARNING); 658 if (err) 659 return err; 660 661 if (bridge_port->bridge_device->multicast_enabled) 662 goto out; 663 664 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port, 665 MLXSW_SP_FLOOD_TYPE_MC, 666 brport_flags & 667 BR_MCAST_FLOOD); 668 if (err) 669 return err; 670 671 out: 672 memcpy(&bridge_port->flags, &brport_flags, sizeof(brport_flags)); 673 return 0; 674 } 675 676 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time) 677 { 678 char sfdat_pl[MLXSW_REG_SFDAT_LEN]; 679 int err; 680 681 mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time); 682 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl); 683 if (err) 684 return err; 685 mlxsw_sp->bridge->ageing_time = ageing_time; 686 return 0; 687 } 688 689 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port, 690 struct switchdev_trans *trans, 691 unsigned long ageing_clock_t) 692 { 693 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 694 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t); 695 u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000; 696 697 if (switchdev_trans_ph_prepare(trans)) { 698 if (ageing_time < MLXSW_SP_MIN_AGEING_TIME || 699 ageing_time > MLXSW_SP_MAX_AGEING_TIME) 700 return -ERANGE; 701 else 702 return 0; 703 } 704 705 return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time); 706 } 707 708 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 709 struct switchdev_trans *trans, 710 struct net_device *orig_dev, 711 bool vlan_enabled) 712 { 713 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 714 struct mlxsw_sp_bridge_device *bridge_device; 715 716 if (!switchdev_trans_ph_prepare(trans)) 717 return 0; 718 719 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev); 720 if (WARN_ON(!bridge_device)) 721 return -EINVAL; 722 723 if (bridge_device->vlan_enabled == vlan_enabled) 724 return 0; 725 726 netdev_err(bridge_device->dev, "VLAN filtering can't be changed for existing bridge\n"); 727 return -EINVAL; 728 } 729 730 static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port, 731 struct switchdev_trans *trans, 732 struct net_device *orig_dev, 733 bool is_port_mrouter) 734 { 735 struct mlxsw_sp_bridge_port *bridge_port; 736 int err; 737 738 if (switchdev_trans_ph_prepare(trans)) 739 return 0; 740 741 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge, 742 orig_dev); 743 if (!bridge_port) 744 return 0; 745 746 if (!bridge_port->bridge_device->multicast_enabled) 747 goto out; 748 749 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port, 750 MLXSW_SP_FLOOD_TYPE_MC, 751 is_port_mrouter); 752 if (err) 753 return err; 754 755 mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port, 756 is_port_mrouter); 757 out: 758 bridge_port->mrouter = is_port_mrouter; 759 return 0; 760 } 761 762 static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port) 763 { 764 const struct mlxsw_sp_bridge_device *bridge_device; 765 766 bridge_device = bridge_port->bridge_device; 767 return bridge_device->multicast_enabled ? bridge_port->mrouter : 768 bridge_port->flags & BR_MCAST_FLOOD; 769 } 770 771 static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port, 772 struct switchdev_trans *trans, 773 struct net_device *orig_dev, 774 bool mc_disabled) 775 { 776 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 777 struct mlxsw_sp_bridge_device *bridge_device; 778 struct mlxsw_sp_bridge_port *bridge_port; 779 int err; 780 781 if (switchdev_trans_ph_prepare(trans)) 782 return 0; 783 784 /* It's possible we failed to enslave the port, yet this 785 * operation is executed due to it being deferred. 786 */ 787 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev); 788 if (!bridge_device) 789 return 0; 790 791 if (bridge_device->multicast_enabled != !mc_disabled) { 792 bridge_device->multicast_enabled = !mc_disabled; 793 mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp_port, 794 bridge_device); 795 } 796 797 list_for_each_entry(bridge_port, &bridge_device->ports_list, list) { 798 enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC; 799 bool member = mlxsw_sp_mc_flood(bridge_port); 800 801 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, 802 bridge_port, 803 packet_type, member); 804 if (err) 805 return err; 806 } 807 808 bridge_device->multicast_enabled = !mc_disabled; 809 810 return 0; 811 } 812 813 static int mlxsw_sp_smid_router_port_set(struct mlxsw_sp *mlxsw_sp, 814 u16 mid_idx, bool add) 815 { 816 char *smid_pl; 817 int err; 818 819 smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL); 820 if (!smid_pl) 821 return -ENOMEM; 822 823 mlxsw_reg_smid_pack(smid_pl, mid_idx, 824 mlxsw_sp_router_port(mlxsw_sp), add); 825 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl); 826 kfree(smid_pl); 827 return err; 828 } 829 830 static void 831 mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp, 832 struct mlxsw_sp_bridge_device *bridge_device, 833 bool add) 834 { 835 struct mlxsw_sp_mid *mid; 836 837 list_for_each_entry(mid, &bridge_device->mids_list, list) 838 mlxsw_sp_smid_router_port_set(mlxsw_sp, mid->mid, add); 839 } 840 841 static int 842 mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port, 843 struct switchdev_trans *trans, 844 struct net_device *orig_dev, 845 bool is_mrouter) 846 { 847 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 848 struct mlxsw_sp_bridge_device *bridge_device; 849 850 if (switchdev_trans_ph_prepare(trans)) 851 return 0; 852 853 /* It's possible we failed to enslave the port, yet this 854 * operation is executed due to it being deferred. 855 */ 856 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev); 857 if (!bridge_device) 858 return 0; 859 860 if (bridge_device->mrouter != is_mrouter) 861 mlxsw_sp_bridge_mrouter_update_mdb(mlxsw_sp, bridge_device, 862 is_mrouter); 863 bridge_device->mrouter = is_mrouter; 864 return 0; 865 } 866 867 static int mlxsw_sp_port_attr_set(struct net_device *dev, 868 const struct switchdev_attr *attr, 869 struct switchdev_trans *trans) 870 { 871 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 872 int err; 873 874 switch (attr->id) { 875 case SWITCHDEV_ATTR_ID_PORT_STP_STATE: 876 err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans, 877 attr->orig_dev, 878 attr->u.stp_state); 879 break; 880 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: 881 err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans, 882 attr->orig_dev, 883 attr->u.brport_flags); 884 break; 885 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: 886 err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans, 887 attr->u.ageing_time); 888 break; 889 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: 890 err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans, 891 attr->orig_dev, 892 attr->u.vlan_filtering); 893 break; 894 case SWITCHDEV_ATTR_ID_PORT_MROUTER: 895 err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port, trans, 896 attr->orig_dev, 897 attr->u.mrouter); 898 break; 899 case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED: 900 err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans, 901 attr->orig_dev, 902 attr->u.mc_disabled); 903 break; 904 case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER: 905 err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port, trans, 906 attr->orig_dev, 907 attr->u.mrouter); 908 break; 909 default: 910 err = -EOPNOTSUPP; 911 break; 912 } 913 914 if (switchdev_trans_ph_commit(trans)) 915 mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp); 916 917 return err; 918 } 919 920 static int 921 mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, 922 struct mlxsw_sp_bridge_port *bridge_port) 923 { 924 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 925 struct mlxsw_sp_bridge_device *bridge_device; 926 u8 local_port = mlxsw_sp_port->local_port; 927 u16 vid = mlxsw_sp_port_vlan->vid; 928 struct mlxsw_sp_fid *fid; 929 int err; 930 931 bridge_device = bridge_port->bridge_device; 932 fid = bridge_device->ops->fid_get(bridge_device, vid); 933 if (IS_ERR(fid)) 934 return PTR_ERR(fid); 935 936 err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, 937 bridge_port->flags & BR_FLOOD); 938 if (err) 939 goto err_fid_uc_flood_set; 940 941 err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, 942 mlxsw_sp_mc_flood(bridge_port)); 943 if (err) 944 goto err_fid_mc_flood_set; 945 946 err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, 947 true); 948 if (err) 949 goto err_fid_bc_flood_set; 950 951 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid); 952 if (err) 953 goto err_fid_port_vid_map; 954 955 mlxsw_sp_port_vlan->fid = fid; 956 957 return 0; 958 959 err_fid_port_vid_map: 960 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false); 961 err_fid_bc_flood_set: 962 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false); 963 err_fid_mc_flood_set: 964 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false); 965 err_fid_uc_flood_set: 966 mlxsw_sp_fid_put(fid); 967 return err; 968 } 969 970 static void 971 mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 972 { 973 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 974 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; 975 u8 local_port = mlxsw_sp_port->local_port; 976 u16 vid = mlxsw_sp_port_vlan->vid; 977 978 mlxsw_sp_port_vlan->fid = NULL; 979 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid); 980 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false); 981 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false); 982 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false); 983 mlxsw_sp_fid_put(fid); 984 } 985 986 static u16 987 mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port *mlxsw_sp_port, 988 u16 vid, bool is_pvid) 989 { 990 if (is_pvid) 991 return vid; 992 else if (mlxsw_sp_port->pvid == vid) 993 return 0; /* Dis-allow untagged packets */ 994 else 995 return mlxsw_sp_port->pvid; 996 } 997 998 static int 999 mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, 1000 struct mlxsw_sp_bridge_port *bridge_port) 1001 { 1002 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1003 struct mlxsw_sp_bridge_vlan *bridge_vlan; 1004 u16 vid = mlxsw_sp_port_vlan->vid; 1005 int err; 1006 1007 /* No need to continue if only VLAN flags were changed */ 1008 if (mlxsw_sp_port_vlan->bridge_port) { 1009 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); 1010 return 0; 1011 } 1012 1013 err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port); 1014 if (err) 1015 return err; 1016 1017 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, 1018 bridge_port->flags & BR_LEARNING); 1019 if (err) 1020 goto err_port_vid_learning_set; 1021 1022 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, 1023 bridge_port->stp_state); 1024 if (err) 1025 goto err_port_vid_stp_set; 1026 1027 bridge_vlan = mlxsw_sp_bridge_vlan_get(bridge_port, vid); 1028 if (!bridge_vlan) { 1029 err = -ENOMEM; 1030 goto err_bridge_vlan_get; 1031 } 1032 1033 list_add(&mlxsw_sp_port_vlan->bridge_vlan_node, 1034 &bridge_vlan->port_vlan_list); 1035 1036 mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge, 1037 bridge_port->dev); 1038 mlxsw_sp_port_vlan->bridge_port = bridge_port; 1039 1040 return 0; 1041 1042 err_bridge_vlan_get: 1043 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED); 1044 err_port_vid_stp_set: 1045 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false); 1046 err_port_vid_learning_set: 1047 mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan); 1048 return err; 1049 } 1050 1051 void 1052 mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1053 { 1054 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1055 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; 1056 struct mlxsw_sp_bridge_vlan *bridge_vlan; 1057 struct mlxsw_sp_bridge_port *bridge_port; 1058 u16 vid = mlxsw_sp_port_vlan->vid; 1059 bool last_port, last_vlan; 1060 1061 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q && 1062 mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D)) 1063 return; 1064 1065 bridge_port = mlxsw_sp_port_vlan->bridge_port; 1066 last_vlan = list_is_singular(&bridge_port->vlans_list); 1067 bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid); 1068 last_port = list_is_singular(&bridge_vlan->port_vlan_list); 1069 1070 list_del(&mlxsw_sp_port_vlan->bridge_vlan_node); 1071 mlxsw_sp_bridge_vlan_put(bridge_vlan); 1072 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED); 1073 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false); 1074 if (last_port) 1075 mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp, 1076 bridge_port, 1077 mlxsw_sp_fid_index(fid)); 1078 if (last_vlan) 1079 mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port); 1080 1081 mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan); 1082 1083 mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port); 1084 mlxsw_sp_port_vlan->bridge_port = NULL; 1085 } 1086 1087 static int 1088 mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port, 1089 struct mlxsw_sp_bridge_port *bridge_port, 1090 u16 vid, bool is_untagged, bool is_pvid) 1091 { 1092 u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid); 1093 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1094 u16 old_pvid = mlxsw_sp_port->pvid; 1095 int err; 1096 1097 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid); 1098 if (IS_ERR(mlxsw_sp_port_vlan)) 1099 return PTR_ERR(mlxsw_sp_port_vlan); 1100 1101 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, 1102 is_untagged); 1103 if (err) 1104 goto err_port_vlan_set; 1105 1106 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid); 1107 if (err) 1108 goto err_port_pvid_set; 1109 1110 err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port); 1111 if (err) 1112 goto err_port_vlan_bridge_join; 1113 1114 return 0; 1115 1116 err_port_vlan_bridge_join: 1117 mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid); 1118 err_port_pvid_set: 1119 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1120 err_port_vlan_set: 1121 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); 1122 return err; 1123 } 1124 1125 static int 1126 mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp, 1127 const struct net_device *br_dev, 1128 const struct switchdev_obj_port_vlan *vlan) 1129 { 1130 struct mlxsw_sp_rif *rif; 1131 struct mlxsw_sp_fid *fid; 1132 u16 pvid; 1133 u16 vid; 1134 1135 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev); 1136 if (!rif) 1137 return 0; 1138 fid = mlxsw_sp_rif_fid(rif); 1139 pvid = mlxsw_sp_fid_8021q_vid(fid); 1140 1141 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { 1142 if (vlan->flags & BRIDGE_VLAN_INFO_PVID) { 1143 if (vid != pvid) { 1144 netdev_err(br_dev, "Can't change PVID, it's used by router interface\n"); 1145 return -EBUSY; 1146 } 1147 } else { 1148 if (vid == pvid) { 1149 netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n"); 1150 return -EBUSY; 1151 } 1152 } 1153 } 1154 1155 return 0; 1156 } 1157 1158 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, 1159 const struct switchdev_obj_port_vlan *vlan, 1160 struct switchdev_trans *trans) 1161 { 1162 bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 1163 bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; 1164 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1165 struct net_device *orig_dev = vlan->obj.orig_dev; 1166 struct mlxsw_sp_bridge_port *bridge_port; 1167 u16 vid; 1168 1169 if (netif_is_bridge_master(orig_dev)) { 1170 int err = 0; 1171 1172 if ((vlan->flags & BRIDGE_VLAN_INFO_BRENTRY) && 1173 br_vlan_enabled(orig_dev) && 1174 switchdev_trans_ph_prepare(trans)) 1175 err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp, 1176 orig_dev, vlan); 1177 if (!err) 1178 err = -EOPNOTSUPP; 1179 return err; 1180 } 1181 1182 if (switchdev_trans_ph_prepare(trans)) 1183 return 0; 1184 1185 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); 1186 if (WARN_ON(!bridge_port)) 1187 return -EINVAL; 1188 1189 if (!bridge_port->bridge_device->vlan_enabled) 1190 return 0; 1191 1192 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { 1193 int err; 1194 1195 err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port, 1196 vid, flag_untagged, 1197 flag_pvid); 1198 if (err) 1199 return err; 1200 } 1201 1202 return 0; 1203 } 1204 1205 static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged) 1206 { 1207 return lagged ? MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID : 1208 MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID; 1209 } 1210 1211 static int 1212 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp, 1213 struct mlxsw_sp_bridge_port *bridge_port, 1214 u16 fid_index) 1215 { 1216 bool lagged = bridge_port->lagged; 1217 char sfdf_pl[MLXSW_REG_SFDF_LEN]; 1218 u16 system_port; 1219 1220 system_port = lagged ? bridge_port->lag_id : bridge_port->system_port; 1221 mlxsw_reg_sfdf_pack(sfdf_pl, mlxsw_sp_fdb_flush_type(lagged)); 1222 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index); 1223 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, system_port); 1224 1225 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); 1226 } 1227 1228 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic) 1229 { 1230 return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS : 1231 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY; 1232 } 1233 1234 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding) 1235 { 1236 return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT : 1237 MLXSW_REG_SFD_OP_WRITE_REMOVE; 1238 } 1239 1240 static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, 1241 const char *mac, u16 fid, bool adding, 1242 enum mlxsw_reg_sfd_rec_action action, 1243 bool dynamic) 1244 { 1245 char *sfd_pl; 1246 u8 num_rec; 1247 int err; 1248 1249 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 1250 if (!sfd_pl) 1251 return -ENOMEM; 1252 1253 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); 1254 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), 1255 mac, fid, action, local_port); 1256 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); 1257 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 1258 if (err) 1259 goto out; 1260 1261 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl)) 1262 err = -EBUSY; 1263 1264 out: 1265 kfree(sfd_pl); 1266 return err; 1267 } 1268 1269 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, 1270 const char *mac, u16 fid, bool adding, 1271 bool dynamic) 1272 { 1273 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding, 1274 MLXSW_REG_SFD_REC_ACTION_NOP, dynamic); 1275 } 1276 1277 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid, 1278 bool adding) 1279 { 1280 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding, 1281 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER, 1282 false); 1283 } 1284 1285 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id, 1286 const char *mac, u16 fid, u16 lag_vid, 1287 bool adding, bool dynamic) 1288 { 1289 char *sfd_pl; 1290 u8 num_rec; 1291 int err; 1292 1293 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 1294 if (!sfd_pl) 1295 return -ENOMEM; 1296 1297 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); 1298 mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), 1299 mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP, 1300 lag_vid, lag_id); 1301 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); 1302 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 1303 if (err) 1304 goto out; 1305 1306 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl)) 1307 err = -EBUSY; 1308 1309 out: 1310 kfree(sfd_pl); 1311 return err; 1312 } 1313 1314 static int 1315 mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port, 1316 struct switchdev_notifier_fdb_info *fdb_info, bool adding) 1317 { 1318 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1319 struct net_device *orig_dev = fdb_info->info.dev; 1320 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1321 struct mlxsw_sp_bridge_device *bridge_device; 1322 struct mlxsw_sp_bridge_port *bridge_port; 1323 u16 fid_index, vid; 1324 1325 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); 1326 if (!bridge_port) 1327 return -EINVAL; 1328 1329 bridge_device = bridge_port->bridge_device; 1330 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port, 1331 bridge_device, 1332 fdb_info->vid); 1333 if (!mlxsw_sp_port_vlan) 1334 return 0; 1335 1336 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid); 1337 vid = mlxsw_sp_port_vlan->vid; 1338 1339 if (!bridge_port->lagged) 1340 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 1341 bridge_port->system_port, 1342 fdb_info->addr, fid_index, 1343 adding, false); 1344 else 1345 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, 1346 bridge_port->lag_id, 1347 fdb_info->addr, fid_index, 1348 vid, adding, false); 1349 } 1350 1351 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr, 1352 u16 fid, u16 mid_idx, bool adding) 1353 { 1354 char *sfd_pl; 1355 u8 num_rec; 1356 int err; 1357 1358 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 1359 if (!sfd_pl) 1360 return -ENOMEM; 1361 1362 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); 1363 mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid, 1364 MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx); 1365 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); 1366 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 1367 if (err) 1368 goto out; 1369 1370 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl)) 1371 err = -EBUSY; 1372 1373 out: 1374 kfree(sfd_pl); 1375 return err; 1376 } 1377 1378 static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx, 1379 long *ports_bitmap, 1380 bool set_router_port) 1381 { 1382 char *smid_pl; 1383 int err, i; 1384 1385 smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL); 1386 if (!smid_pl) 1387 return -ENOMEM; 1388 1389 mlxsw_reg_smid_pack(smid_pl, mid_idx, 0, false); 1390 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) { 1391 if (mlxsw_sp->ports[i]) 1392 mlxsw_reg_smid_port_mask_set(smid_pl, i, 1); 1393 } 1394 1395 mlxsw_reg_smid_port_mask_set(smid_pl, 1396 mlxsw_sp_router_port(mlxsw_sp), 1); 1397 1398 for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core)) 1399 mlxsw_reg_smid_port_set(smid_pl, i, 1); 1400 1401 mlxsw_reg_smid_port_set(smid_pl, mlxsw_sp_router_port(mlxsw_sp), 1402 set_router_port); 1403 1404 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl); 1405 kfree(smid_pl); 1406 return err; 1407 } 1408 1409 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, 1410 u16 mid_idx, bool add) 1411 { 1412 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1413 char *smid_pl; 1414 int err; 1415 1416 smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL); 1417 if (!smid_pl) 1418 return -ENOMEM; 1419 1420 mlxsw_reg_smid_pack(smid_pl, mid_idx, mlxsw_sp_port->local_port, add); 1421 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl); 1422 kfree(smid_pl); 1423 return err; 1424 } 1425 1426 static struct 1427 mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp_bridge_device *bridge_device, 1428 const unsigned char *addr, 1429 u16 fid) 1430 { 1431 struct mlxsw_sp_mid *mid; 1432 1433 list_for_each_entry(mid, &bridge_device->mids_list, list) { 1434 if (ether_addr_equal(mid->addr, addr) && mid->fid == fid) 1435 return mid; 1436 } 1437 return NULL; 1438 } 1439 1440 static void 1441 mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp, 1442 struct mlxsw_sp_bridge_port *bridge_port, 1443 unsigned long *ports_bitmap) 1444 { 1445 struct mlxsw_sp_port *mlxsw_sp_port; 1446 u64 max_lag_members, i; 1447 int lag_id; 1448 1449 if (!bridge_port->lagged) { 1450 set_bit(bridge_port->system_port, ports_bitmap); 1451 } else { 1452 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 1453 MAX_LAG_MEMBERS); 1454 lag_id = bridge_port->lag_id; 1455 for (i = 0; i < max_lag_members; i++) { 1456 mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, 1457 lag_id, i); 1458 if (mlxsw_sp_port) 1459 set_bit(mlxsw_sp_port->local_port, 1460 ports_bitmap); 1461 } 1462 } 1463 } 1464 1465 static void 1466 mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap, 1467 struct mlxsw_sp_bridge_device *bridge_device, 1468 struct mlxsw_sp *mlxsw_sp) 1469 { 1470 struct mlxsw_sp_bridge_port *bridge_port; 1471 1472 list_for_each_entry(bridge_port, &bridge_device->ports_list, list) { 1473 if (bridge_port->mrouter) { 1474 mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp, 1475 bridge_port, 1476 flood_bitmap); 1477 } 1478 } 1479 } 1480 1481 static bool 1482 mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp, 1483 struct mlxsw_sp_mid *mid, 1484 struct mlxsw_sp_bridge_device *bridge_device) 1485 { 1486 long *flood_bitmap; 1487 int num_of_ports; 1488 int alloc_size; 1489 u16 mid_idx; 1490 int err; 1491 1492 mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap, 1493 MLXSW_SP_MID_MAX); 1494 if (mid_idx == MLXSW_SP_MID_MAX) 1495 return false; 1496 1497 num_of_ports = mlxsw_core_max_ports(mlxsw_sp->core); 1498 alloc_size = sizeof(long) * BITS_TO_LONGS(num_of_ports); 1499 flood_bitmap = kzalloc(alloc_size, GFP_KERNEL); 1500 if (!flood_bitmap) 1501 return false; 1502 1503 bitmap_copy(flood_bitmap, mid->ports_in_mid, num_of_ports); 1504 mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp); 1505 1506 mid->mid = mid_idx; 1507 err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap, 1508 bridge_device->mrouter); 1509 kfree(flood_bitmap); 1510 if (err) 1511 return false; 1512 1513 err = mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid_idx, 1514 true); 1515 if (err) 1516 return false; 1517 1518 set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap); 1519 mid->in_hw = true; 1520 return true; 1521 } 1522 1523 static int mlxsw_sp_mc_remove_mdb_entry(struct mlxsw_sp *mlxsw_sp, 1524 struct mlxsw_sp_mid *mid) 1525 { 1526 if (!mid->in_hw) 1527 return 0; 1528 1529 clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap); 1530 mid->in_hw = false; 1531 return mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid->mid, 1532 false); 1533 } 1534 1535 static struct 1536 mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, 1537 struct mlxsw_sp_bridge_device *bridge_device, 1538 const unsigned char *addr, 1539 u16 fid) 1540 { 1541 struct mlxsw_sp_mid *mid; 1542 size_t alloc_size; 1543 1544 mid = kzalloc(sizeof(*mid), GFP_KERNEL); 1545 if (!mid) 1546 return NULL; 1547 1548 alloc_size = sizeof(unsigned long) * 1549 BITS_TO_LONGS(mlxsw_core_max_ports(mlxsw_sp->core)); 1550 1551 mid->ports_in_mid = kzalloc(alloc_size, GFP_KERNEL); 1552 if (!mid->ports_in_mid) 1553 goto err_ports_in_mid_alloc; 1554 1555 ether_addr_copy(mid->addr, addr); 1556 mid->fid = fid; 1557 mid->in_hw = false; 1558 1559 if (!bridge_device->multicast_enabled) 1560 goto out; 1561 1562 if (!mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, bridge_device)) 1563 goto err_write_mdb_entry; 1564 1565 out: 1566 list_add_tail(&mid->list, &bridge_device->mids_list); 1567 return mid; 1568 1569 err_write_mdb_entry: 1570 kfree(mid->ports_in_mid); 1571 err_ports_in_mid_alloc: 1572 kfree(mid); 1573 return NULL; 1574 } 1575 1576 static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port, 1577 struct mlxsw_sp_mid *mid) 1578 { 1579 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1580 int err = 0; 1581 1582 clear_bit(mlxsw_sp_port->local_port, mid->ports_in_mid); 1583 if (bitmap_empty(mid->ports_in_mid, 1584 mlxsw_core_max_ports(mlxsw_sp->core))) { 1585 err = mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid); 1586 list_del(&mid->list); 1587 kfree(mid->ports_in_mid); 1588 kfree(mid); 1589 } 1590 return err; 1591 } 1592 1593 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, 1594 const struct switchdev_obj_port_mdb *mdb, 1595 struct switchdev_trans *trans) 1596 { 1597 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1598 struct net_device *orig_dev = mdb->obj.orig_dev; 1599 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1600 struct net_device *dev = mlxsw_sp_port->dev; 1601 struct mlxsw_sp_bridge_device *bridge_device; 1602 struct mlxsw_sp_bridge_port *bridge_port; 1603 struct mlxsw_sp_mid *mid; 1604 u16 fid_index; 1605 int err = 0; 1606 1607 if (switchdev_trans_ph_prepare(trans)) 1608 return 0; 1609 1610 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); 1611 if (!bridge_port) 1612 return 0; 1613 1614 bridge_device = bridge_port->bridge_device; 1615 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port, 1616 bridge_device, 1617 mdb->vid); 1618 if (!mlxsw_sp_port_vlan) 1619 return 0; 1620 1621 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid); 1622 1623 mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index); 1624 if (!mid) { 1625 mid = __mlxsw_sp_mc_alloc(mlxsw_sp, bridge_device, mdb->addr, 1626 fid_index); 1627 if (!mid) { 1628 netdev_err(dev, "Unable to allocate MC group\n"); 1629 return -ENOMEM; 1630 } 1631 } 1632 set_bit(mlxsw_sp_port->local_port, mid->ports_in_mid); 1633 1634 if (!bridge_device->multicast_enabled) 1635 return 0; 1636 1637 if (bridge_port->mrouter) 1638 return 0; 1639 1640 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true); 1641 if (err) { 1642 netdev_err(dev, "Unable to set SMID\n"); 1643 goto err_out; 1644 } 1645 1646 return 0; 1647 1648 err_out: 1649 mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid); 1650 return err; 1651 } 1652 1653 static void 1654 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port, 1655 struct mlxsw_sp_bridge_device 1656 *bridge_device) 1657 { 1658 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1659 struct mlxsw_sp_mid *mid; 1660 bool mc_enabled; 1661 1662 mc_enabled = bridge_device->multicast_enabled; 1663 1664 list_for_each_entry(mid, &bridge_device->mids_list, list) { 1665 if (mc_enabled) 1666 mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, 1667 bridge_device); 1668 else 1669 mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid); 1670 } 1671 } 1672 1673 static void 1674 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port, 1675 struct mlxsw_sp_bridge_port *bridge_port, 1676 bool add) 1677 { 1678 struct mlxsw_sp_bridge_device *bridge_device; 1679 struct mlxsw_sp_mid *mid; 1680 1681 bridge_device = bridge_port->bridge_device; 1682 1683 list_for_each_entry(mid, &bridge_device->mids_list, list) { 1684 if (!test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) 1685 mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, add); 1686 } 1687 } 1688 1689 struct mlxsw_sp_span_respin_work { 1690 struct work_struct work; 1691 struct mlxsw_sp *mlxsw_sp; 1692 }; 1693 1694 static void mlxsw_sp_span_respin_work(struct work_struct *work) 1695 { 1696 struct mlxsw_sp_span_respin_work *respin_work = 1697 container_of(work, struct mlxsw_sp_span_respin_work, work); 1698 1699 rtnl_lock(); 1700 mlxsw_sp_span_respin(respin_work->mlxsw_sp); 1701 rtnl_unlock(); 1702 kfree(respin_work); 1703 } 1704 1705 static void mlxsw_sp_span_respin_schedule(struct mlxsw_sp *mlxsw_sp) 1706 { 1707 struct mlxsw_sp_span_respin_work *respin_work; 1708 1709 respin_work = kzalloc(sizeof(*respin_work), GFP_ATOMIC); 1710 if (!respin_work) 1711 return; 1712 1713 INIT_WORK(&respin_work->work, mlxsw_sp_span_respin_work); 1714 respin_work->mlxsw_sp = mlxsw_sp; 1715 1716 mlxsw_core_schedule_work(&respin_work->work); 1717 } 1718 1719 static int mlxsw_sp_port_obj_add(struct net_device *dev, 1720 const struct switchdev_obj *obj, 1721 struct switchdev_trans *trans) 1722 { 1723 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1724 const struct switchdev_obj_port_vlan *vlan; 1725 int err = 0; 1726 1727 switch (obj->id) { 1728 case SWITCHDEV_OBJ_ID_PORT_VLAN: 1729 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); 1730 err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, trans); 1731 1732 if (switchdev_trans_ph_prepare(trans)) { 1733 /* The event is emitted before the changes are actually 1734 * applied to the bridge. Therefore schedule the respin 1735 * call for later, so that the respin logic sees the 1736 * updated bridge state. 1737 */ 1738 mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp); 1739 } 1740 break; 1741 case SWITCHDEV_OBJ_ID_PORT_MDB: 1742 err = mlxsw_sp_port_mdb_add(mlxsw_sp_port, 1743 SWITCHDEV_OBJ_PORT_MDB(obj), 1744 trans); 1745 break; 1746 default: 1747 err = -EOPNOTSUPP; 1748 break; 1749 } 1750 1751 return err; 1752 } 1753 1754 static void 1755 mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port, 1756 struct mlxsw_sp_bridge_port *bridge_port, u16 vid) 1757 { 1758 u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : vid; 1759 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1760 1761 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1762 if (WARN_ON(!mlxsw_sp_port_vlan)) 1763 return; 1764 1765 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1766 mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid); 1767 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1768 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); 1769 } 1770 1771 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, 1772 const struct switchdev_obj_port_vlan *vlan) 1773 { 1774 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1775 struct net_device *orig_dev = vlan->obj.orig_dev; 1776 struct mlxsw_sp_bridge_port *bridge_port; 1777 u16 vid; 1778 1779 if (netif_is_bridge_master(orig_dev)) 1780 return -EOPNOTSUPP; 1781 1782 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); 1783 if (WARN_ON(!bridge_port)) 1784 return -EINVAL; 1785 1786 if (!bridge_port->bridge_device->vlan_enabled) 1787 return 0; 1788 1789 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) 1790 mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vid); 1791 1792 return 0; 1793 } 1794 1795 static int 1796 __mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, 1797 struct mlxsw_sp_bridge_port *bridge_port, 1798 struct mlxsw_sp_mid *mid) 1799 { 1800 struct net_device *dev = mlxsw_sp_port->dev; 1801 int err; 1802 1803 if (bridge_port->bridge_device->multicast_enabled && 1804 !bridge_port->mrouter) { 1805 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false); 1806 if (err) 1807 netdev_err(dev, "Unable to remove port from SMID\n"); 1808 } 1809 1810 err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid); 1811 if (err) 1812 netdev_err(dev, "Unable to remove MC SFD\n"); 1813 1814 return err; 1815 } 1816 1817 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, 1818 const struct switchdev_obj_port_mdb *mdb) 1819 { 1820 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1821 struct net_device *orig_dev = mdb->obj.orig_dev; 1822 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1823 struct mlxsw_sp_bridge_device *bridge_device; 1824 struct net_device *dev = mlxsw_sp_port->dev; 1825 struct mlxsw_sp_bridge_port *bridge_port; 1826 struct mlxsw_sp_mid *mid; 1827 u16 fid_index; 1828 1829 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); 1830 if (!bridge_port) 1831 return 0; 1832 1833 bridge_device = bridge_port->bridge_device; 1834 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port, 1835 bridge_device, 1836 mdb->vid); 1837 if (!mlxsw_sp_port_vlan) 1838 return 0; 1839 1840 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid); 1841 1842 mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index); 1843 if (!mid) { 1844 netdev_err(dev, "Unable to remove port from MC DB\n"); 1845 return -EINVAL; 1846 } 1847 1848 return __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, mid); 1849 } 1850 1851 static void 1852 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, 1853 struct mlxsw_sp_bridge_port *bridge_port) 1854 { 1855 struct mlxsw_sp_bridge_device *bridge_device; 1856 struct mlxsw_sp_mid *mid, *tmp; 1857 1858 bridge_device = bridge_port->bridge_device; 1859 1860 list_for_each_entry_safe(mid, tmp, &bridge_device->mids_list, list) { 1861 if (test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) { 1862 __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, 1863 mid); 1864 } else if (bridge_device->multicast_enabled && 1865 bridge_port->mrouter) { 1866 mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false); 1867 } 1868 } 1869 } 1870 1871 static int mlxsw_sp_port_obj_del(struct net_device *dev, 1872 const struct switchdev_obj *obj) 1873 { 1874 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1875 int err = 0; 1876 1877 switch (obj->id) { 1878 case SWITCHDEV_OBJ_ID_PORT_VLAN: 1879 err = mlxsw_sp_port_vlans_del(mlxsw_sp_port, 1880 SWITCHDEV_OBJ_PORT_VLAN(obj)); 1881 break; 1882 case SWITCHDEV_OBJ_ID_PORT_MDB: 1883 err = mlxsw_sp_port_mdb_del(mlxsw_sp_port, 1884 SWITCHDEV_OBJ_PORT_MDB(obj)); 1885 break; 1886 default: 1887 err = -EOPNOTSUPP; 1888 break; 1889 } 1890 1891 mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp); 1892 1893 return err; 1894 } 1895 1896 static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp, 1897 u16 lag_id) 1898 { 1899 struct mlxsw_sp_port *mlxsw_sp_port; 1900 u64 max_lag_members; 1901 int i; 1902 1903 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 1904 MAX_LAG_MEMBERS); 1905 for (i = 0; i < max_lag_members; i++) { 1906 mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i); 1907 if (mlxsw_sp_port) 1908 return mlxsw_sp_port; 1909 } 1910 return NULL; 1911 } 1912 1913 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = { 1914 .switchdev_port_attr_get = mlxsw_sp_port_attr_get, 1915 .switchdev_port_attr_set = mlxsw_sp_port_attr_set, 1916 .switchdev_port_obj_add = mlxsw_sp_port_obj_add, 1917 .switchdev_port_obj_del = mlxsw_sp_port_obj_del, 1918 }; 1919 1920 static int 1921 mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device, 1922 struct mlxsw_sp_bridge_port *bridge_port, 1923 struct mlxsw_sp_port *mlxsw_sp_port, 1924 struct netlink_ext_ack *extack) 1925 { 1926 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1927 1928 if (is_vlan_dev(bridge_port->dev)) { 1929 NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge"); 1930 return -EINVAL; 1931 } 1932 1933 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1); 1934 if (WARN_ON(!mlxsw_sp_port_vlan)) 1935 return -EINVAL; 1936 1937 /* Let VLAN-aware bridge take care of its own VLANs */ 1938 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); 1939 1940 return 0; 1941 } 1942 1943 static void 1944 mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device, 1945 struct mlxsw_sp_bridge_port *bridge_port, 1946 struct mlxsw_sp_port *mlxsw_sp_port) 1947 { 1948 mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1); 1949 /* Make sure untagged frames are allowed to ingress */ 1950 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1); 1951 } 1952 1953 static struct mlxsw_sp_fid * 1954 mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device, 1955 u16 vid) 1956 { 1957 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev); 1958 1959 return mlxsw_sp_fid_8021q_get(mlxsw_sp, vid); 1960 } 1961 1962 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = { 1963 .port_join = mlxsw_sp_bridge_8021q_port_join, 1964 .port_leave = mlxsw_sp_bridge_8021q_port_leave, 1965 .fid_get = mlxsw_sp_bridge_8021q_fid_get, 1966 }; 1967 1968 static bool 1969 mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port, 1970 const struct net_device *br_dev) 1971 { 1972 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1973 1974 list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list, 1975 list) { 1976 if (mlxsw_sp_port_vlan->bridge_port && 1977 mlxsw_sp_port_vlan->bridge_port->bridge_device->dev == 1978 br_dev) 1979 return true; 1980 } 1981 1982 return false; 1983 } 1984 1985 static int 1986 mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device, 1987 struct mlxsw_sp_bridge_port *bridge_port, 1988 struct mlxsw_sp_port *mlxsw_sp_port, 1989 struct netlink_ext_ack *extack) 1990 { 1991 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1992 struct net_device *dev = bridge_port->dev; 1993 u16 vid; 1994 1995 vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1; 1996 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1997 if (WARN_ON(!mlxsw_sp_port_vlan)) 1998 return -EINVAL; 1999 2000 if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) { 2001 NL_SET_ERR_MSG_MOD(extack, "Can not bridge VLAN uppers of the same port"); 2002 return -EINVAL; 2003 } 2004 2005 /* Port is no longer usable as a router interface */ 2006 if (mlxsw_sp_port_vlan->fid) 2007 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 2008 2009 return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port); 2010 } 2011 2012 static void 2013 mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device, 2014 struct mlxsw_sp_bridge_port *bridge_port, 2015 struct mlxsw_sp_port *mlxsw_sp_port) 2016 { 2017 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 2018 struct net_device *dev = bridge_port->dev; 2019 u16 vid; 2020 2021 vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1; 2022 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 2023 if (WARN_ON(!mlxsw_sp_port_vlan)) 2024 return; 2025 2026 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 2027 } 2028 2029 static struct mlxsw_sp_fid * 2030 mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device, 2031 u16 vid) 2032 { 2033 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev); 2034 2035 return mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex); 2036 } 2037 2038 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = { 2039 .port_join = mlxsw_sp_bridge_8021d_port_join, 2040 .port_leave = mlxsw_sp_bridge_8021d_port_leave, 2041 .fid_get = mlxsw_sp_bridge_8021d_fid_get, 2042 }; 2043 2044 int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port, 2045 struct net_device *brport_dev, 2046 struct net_device *br_dev, 2047 struct netlink_ext_ack *extack) 2048 { 2049 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2050 struct mlxsw_sp_bridge_device *bridge_device; 2051 struct mlxsw_sp_bridge_port *bridge_port; 2052 int err; 2053 2054 bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev); 2055 if (IS_ERR(bridge_port)) 2056 return PTR_ERR(bridge_port); 2057 bridge_device = bridge_port->bridge_device; 2058 2059 err = bridge_device->ops->port_join(bridge_device, bridge_port, 2060 mlxsw_sp_port, extack); 2061 if (err) 2062 goto err_port_join; 2063 2064 return 0; 2065 2066 err_port_join: 2067 mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port); 2068 return err; 2069 } 2070 2071 void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port, 2072 struct net_device *brport_dev, 2073 struct net_device *br_dev) 2074 { 2075 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2076 struct mlxsw_sp_bridge_device *bridge_device; 2077 struct mlxsw_sp_bridge_port *bridge_port; 2078 2079 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); 2080 if (!bridge_device) 2081 return; 2082 bridge_port = __mlxsw_sp_bridge_port_find(bridge_device, brport_dev); 2083 if (!bridge_port) 2084 return; 2085 2086 bridge_device->ops->port_leave(bridge_device, bridge_port, 2087 mlxsw_sp_port); 2088 mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port); 2089 } 2090 2091 static void 2092 mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type, 2093 const char *mac, u16 vid, 2094 struct net_device *dev) 2095 { 2096 struct switchdev_notifier_fdb_info info; 2097 2098 info.addr = mac; 2099 info.vid = vid; 2100 call_switchdev_notifiers(type, dev, &info.info); 2101 } 2102 2103 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp, 2104 char *sfn_pl, int rec_index, 2105 bool adding) 2106 { 2107 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 2108 struct mlxsw_sp_bridge_device *bridge_device; 2109 struct mlxsw_sp_bridge_port *bridge_port; 2110 struct mlxsw_sp_port *mlxsw_sp_port; 2111 enum switchdev_notifier_type type; 2112 char mac[ETH_ALEN]; 2113 u8 local_port; 2114 u16 vid, fid; 2115 bool do_notification = true; 2116 int err; 2117 2118 mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port); 2119 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2120 if (!mlxsw_sp_port) { 2121 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n"); 2122 goto just_remove; 2123 } 2124 2125 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid); 2126 if (!mlxsw_sp_port_vlan) { 2127 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n"); 2128 goto just_remove; 2129 } 2130 2131 bridge_port = mlxsw_sp_port_vlan->bridge_port; 2132 if (!bridge_port) { 2133 netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n"); 2134 goto just_remove; 2135 } 2136 2137 bridge_device = bridge_port->bridge_device; 2138 vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0; 2139 2140 do_fdb_op: 2141 err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, 2142 adding, true); 2143 if (err) { 2144 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n"); 2145 return; 2146 } 2147 2148 if (!do_notification) 2149 return; 2150 type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE; 2151 mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev); 2152 2153 return; 2154 2155 just_remove: 2156 adding = false; 2157 do_notification = false; 2158 goto do_fdb_op; 2159 } 2160 2161 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp, 2162 char *sfn_pl, int rec_index, 2163 bool adding) 2164 { 2165 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 2166 struct mlxsw_sp_bridge_device *bridge_device; 2167 struct mlxsw_sp_bridge_port *bridge_port; 2168 struct mlxsw_sp_port *mlxsw_sp_port; 2169 enum switchdev_notifier_type type; 2170 char mac[ETH_ALEN]; 2171 u16 lag_vid = 0; 2172 u16 lag_id; 2173 u16 vid, fid; 2174 bool do_notification = true; 2175 int err; 2176 2177 mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id); 2178 mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id); 2179 if (!mlxsw_sp_port) { 2180 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n"); 2181 goto just_remove; 2182 } 2183 2184 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid); 2185 if (!mlxsw_sp_port_vlan) { 2186 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n"); 2187 goto just_remove; 2188 } 2189 2190 bridge_port = mlxsw_sp_port_vlan->bridge_port; 2191 if (!bridge_port) { 2192 netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n"); 2193 goto just_remove; 2194 } 2195 2196 bridge_device = bridge_port->bridge_device; 2197 vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0; 2198 lag_vid = mlxsw_sp_port_vlan->vid; 2199 2200 do_fdb_op: 2201 err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid, 2202 adding, true); 2203 if (err) { 2204 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n"); 2205 return; 2206 } 2207 2208 if (!do_notification) 2209 return; 2210 type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE; 2211 mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev); 2212 2213 return; 2214 2215 just_remove: 2216 adding = false; 2217 do_notification = false; 2218 goto do_fdb_op; 2219 } 2220 2221 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp, 2222 char *sfn_pl, int rec_index) 2223 { 2224 switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) { 2225 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC: 2226 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl, 2227 rec_index, true); 2228 break; 2229 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC: 2230 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl, 2231 rec_index, false); 2232 break; 2233 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG: 2234 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl, 2235 rec_index, true); 2236 break; 2237 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG: 2238 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl, 2239 rec_index, false); 2240 break; 2241 } 2242 } 2243 2244 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp) 2245 { 2246 struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge; 2247 2248 mlxsw_core_schedule_dw(&bridge->fdb_notify.dw, 2249 msecs_to_jiffies(bridge->fdb_notify.interval)); 2250 } 2251 2252 static void mlxsw_sp_fdb_notify_work(struct work_struct *work) 2253 { 2254 struct mlxsw_sp_bridge *bridge; 2255 struct mlxsw_sp *mlxsw_sp; 2256 char *sfn_pl; 2257 u8 num_rec; 2258 int i; 2259 int err; 2260 2261 sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL); 2262 if (!sfn_pl) 2263 return; 2264 2265 bridge = container_of(work, struct mlxsw_sp_bridge, fdb_notify.dw.work); 2266 mlxsw_sp = bridge->mlxsw_sp; 2267 2268 rtnl_lock(); 2269 mlxsw_reg_sfn_pack(sfn_pl); 2270 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl); 2271 if (err) { 2272 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n"); 2273 goto out; 2274 } 2275 num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl); 2276 for (i = 0; i < num_rec; i++) 2277 mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i); 2278 2279 out: 2280 rtnl_unlock(); 2281 kfree(sfn_pl); 2282 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); 2283 } 2284 2285 struct mlxsw_sp_switchdev_event_work { 2286 struct work_struct work; 2287 struct switchdev_notifier_fdb_info fdb_info; 2288 struct net_device *dev; 2289 unsigned long event; 2290 }; 2291 2292 static void mlxsw_sp_switchdev_event_work(struct work_struct *work) 2293 { 2294 struct mlxsw_sp_switchdev_event_work *switchdev_work = 2295 container_of(work, struct mlxsw_sp_switchdev_event_work, work); 2296 struct net_device *dev = switchdev_work->dev; 2297 struct switchdev_notifier_fdb_info *fdb_info; 2298 struct mlxsw_sp_port *mlxsw_sp_port; 2299 int err; 2300 2301 rtnl_lock(); 2302 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 2303 if (!mlxsw_sp_port) 2304 goto out; 2305 2306 switch (switchdev_work->event) { 2307 case SWITCHDEV_FDB_ADD_TO_DEVICE: 2308 fdb_info = &switchdev_work->fdb_info; 2309 if (!fdb_info->added_by_user) 2310 break; 2311 err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true); 2312 if (err) 2313 break; 2314 mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, 2315 fdb_info->addr, 2316 fdb_info->vid, dev); 2317 break; 2318 case SWITCHDEV_FDB_DEL_TO_DEVICE: 2319 fdb_info = &switchdev_work->fdb_info; 2320 if (!fdb_info->added_by_user) 2321 break; 2322 mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false); 2323 break; 2324 case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */ 2325 case SWITCHDEV_FDB_DEL_TO_BRIDGE: 2326 /* These events are only used to potentially update an existing 2327 * SPAN mirror. 2328 */ 2329 break; 2330 } 2331 2332 mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp); 2333 2334 out: 2335 rtnl_unlock(); 2336 kfree(switchdev_work->fdb_info.addr); 2337 kfree(switchdev_work); 2338 dev_put(dev); 2339 } 2340 2341 /* Called under rcu_read_lock() */ 2342 static int mlxsw_sp_switchdev_event(struct notifier_block *unused, 2343 unsigned long event, void *ptr) 2344 { 2345 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 2346 struct mlxsw_sp_switchdev_event_work *switchdev_work; 2347 struct switchdev_notifier_fdb_info *fdb_info = ptr; 2348 2349 if (!mlxsw_sp_port_dev_lower_find_rcu(dev)) 2350 return NOTIFY_DONE; 2351 2352 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); 2353 if (!switchdev_work) 2354 return NOTIFY_BAD; 2355 2356 INIT_WORK(&switchdev_work->work, mlxsw_sp_switchdev_event_work); 2357 switchdev_work->dev = dev; 2358 switchdev_work->event = event; 2359 2360 switch (event) { 2361 case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */ 2362 case SWITCHDEV_FDB_DEL_TO_DEVICE: /* fall through */ 2363 case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */ 2364 case SWITCHDEV_FDB_DEL_TO_BRIDGE: 2365 memcpy(&switchdev_work->fdb_info, ptr, 2366 sizeof(switchdev_work->fdb_info)); 2367 switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); 2368 if (!switchdev_work->fdb_info.addr) 2369 goto err_addr_alloc; 2370 ether_addr_copy((u8 *)switchdev_work->fdb_info.addr, 2371 fdb_info->addr); 2372 /* Take a reference on the device. This can be either 2373 * upper device containig mlxsw_sp_port or just a 2374 * mlxsw_sp_port 2375 */ 2376 dev_hold(dev); 2377 break; 2378 default: 2379 kfree(switchdev_work); 2380 return NOTIFY_DONE; 2381 } 2382 2383 mlxsw_core_schedule_work(&switchdev_work->work); 2384 2385 return NOTIFY_DONE; 2386 2387 err_addr_alloc: 2388 kfree(switchdev_work); 2389 return NOTIFY_BAD; 2390 } 2391 2392 static struct notifier_block mlxsw_sp_switchdev_notifier = { 2393 .notifier_call = mlxsw_sp_switchdev_event, 2394 }; 2395 2396 u8 2397 mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port) 2398 { 2399 return bridge_port->stp_state; 2400 } 2401 2402 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp) 2403 { 2404 struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge; 2405 int err; 2406 2407 err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME); 2408 if (err) { 2409 dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n"); 2410 return err; 2411 } 2412 2413 err = register_switchdev_notifier(&mlxsw_sp_switchdev_notifier); 2414 if (err) { 2415 dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev notifier\n"); 2416 return err; 2417 } 2418 2419 INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work); 2420 bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL; 2421 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); 2422 return 0; 2423 } 2424 2425 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp) 2426 { 2427 cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw); 2428 unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier); 2429 2430 } 2431 2432 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) 2433 { 2434 struct mlxsw_sp_bridge *bridge; 2435 2436 bridge = kzalloc(sizeof(*mlxsw_sp->bridge), GFP_KERNEL); 2437 if (!bridge) 2438 return -ENOMEM; 2439 mlxsw_sp->bridge = bridge; 2440 bridge->mlxsw_sp = mlxsw_sp; 2441 2442 INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list); 2443 2444 bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops; 2445 bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops; 2446 2447 return mlxsw_sp_fdb_init(mlxsw_sp); 2448 } 2449 2450 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp) 2451 { 2452 mlxsw_sp_fdb_fini(mlxsw_sp); 2453 WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list)); 2454 kfree(mlxsw_sp->bridge); 2455 } 2456 2457 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port) 2458 { 2459 mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops; 2460 } 2461 2462 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port) 2463 { 2464 } 2465