1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/types.h> 6 #include <linux/netdevice.h> 7 #include <linux/etherdevice.h> 8 #include <linux/slab.h> 9 #include <linux/device.h> 10 #include <linux/skbuff.h> 11 #include <linux/if_vlan.h> 12 #include <linux/if_bridge.h> 13 #include <linux/workqueue.h> 14 #include <linux/jiffies.h> 15 #include <linux/rtnetlink.h> 16 #include <linux/netlink.h> 17 #include <net/switchdev.h> 18 #include <net/vxlan.h> 19 20 #include "spectrum_span.h" 21 #include "spectrum_switchdev.h" 22 #include "spectrum.h" 23 #include "core.h" 24 #include "reg.h" 25 26 struct mlxsw_sp_bridge_ops; 27 28 struct mlxsw_sp_bridge { 29 struct mlxsw_sp *mlxsw_sp; 30 struct { 31 struct delayed_work dw; 32 #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100 33 unsigned int interval; /* ms */ 34 } fdb_notify; 35 #define MLXSW_SP_MIN_AGEING_TIME 10 36 #define MLXSW_SP_MAX_AGEING_TIME 1000000 37 #define MLXSW_SP_DEFAULT_AGEING_TIME 300 38 u32 ageing_time; 39 bool vlan_enabled_exists; 40 struct list_head bridges_list; 41 DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX); 42 const struct mlxsw_sp_bridge_ops *bridge_8021q_ops; 43 const struct mlxsw_sp_bridge_ops *bridge_8021d_ops; 44 }; 45 46 struct mlxsw_sp_bridge_device { 47 struct net_device *dev; 48 struct list_head list; 49 struct list_head ports_list; 50 struct list_head mids_list; 51 u8 vlan_enabled:1, 52 multicast_enabled:1, 53 mrouter:1; 54 const struct mlxsw_sp_bridge_ops *ops; 55 }; 56 57 struct mlxsw_sp_bridge_port { 58 struct net_device *dev; 59 struct mlxsw_sp_bridge_device *bridge_device; 60 struct list_head list; 61 struct list_head vlans_list; 62 unsigned int ref_count; 63 u8 stp_state; 64 unsigned long flags; 65 bool mrouter; 66 bool lagged; 67 union { 68 u16 lag_id; 69 u16 system_port; 70 }; 71 }; 72 73 struct mlxsw_sp_bridge_vlan { 74 struct list_head list; 75 struct list_head port_vlan_list; 76 u16 vid; 77 }; 78 79 struct mlxsw_sp_bridge_ops { 80 int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device, 81 struct mlxsw_sp_bridge_port *bridge_port, 82 struct mlxsw_sp_port *mlxsw_sp_port, 83 struct netlink_ext_ack *extack); 84 void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device, 85 struct mlxsw_sp_bridge_port *bridge_port, 86 struct mlxsw_sp_port *mlxsw_sp_port); 87 int (*vxlan_join)(struct mlxsw_sp_bridge_device *bridge_device, 88 const struct net_device *vxlan_dev, u16 vid, 89 struct netlink_ext_ack *extack); 90 struct mlxsw_sp_fid * 91 (*fid_get)(struct mlxsw_sp_bridge_device *bridge_device, 92 u16 vid, struct netlink_ext_ack *extack); 93 struct mlxsw_sp_fid * 94 (*fid_lookup)(struct mlxsw_sp_bridge_device *bridge_device, 95 u16 vid); 96 u16 (*fid_vid)(struct mlxsw_sp_bridge_device *bridge_device, 97 const struct mlxsw_sp_fid *fid); 98 }; 99 100 static int 101 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp, 102 struct mlxsw_sp_bridge_port *bridge_port, 103 u16 fid_index); 104 105 static void 106 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, 107 struct mlxsw_sp_bridge_port *bridge_port); 108 109 static void 110 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port, 111 struct mlxsw_sp_bridge_device 112 *bridge_device); 113 114 static void 115 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port, 116 struct mlxsw_sp_bridge_port *bridge_port, 117 bool add); 118 119 static struct mlxsw_sp_bridge_device * 120 mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge, 121 const struct net_device *br_dev) 122 { 123 struct mlxsw_sp_bridge_device *bridge_device; 124 125 list_for_each_entry(bridge_device, &bridge->bridges_list, list) 126 if (bridge_device->dev == br_dev) 127 return bridge_device; 128 129 return NULL; 130 } 131 132 bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp, 133 const struct net_device *br_dev) 134 { 135 return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); 136 } 137 138 static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev, 139 void *data) 140 { 141 struct mlxsw_sp *mlxsw_sp = data; 142 143 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev); 144 return 0; 145 } 146 147 static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp, 148 struct net_device *dev) 149 { 150 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev); 151 netdev_walk_all_upper_dev_rcu(dev, 152 mlxsw_sp_bridge_device_upper_rif_destroy, 153 mlxsw_sp); 154 } 155 156 static struct mlxsw_sp_bridge_device * 157 mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge, 158 struct net_device *br_dev) 159 { 160 struct device *dev = bridge->mlxsw_sp->bus_info->dev; 161 struct mlxsw_sp_bridge_device *bridge_device; 162 bool vlan_enabled = br_vlan_enabled(br_dev); 163 164 if (vlan_enabled && bridge->vlan_enabled_exists) { 165 dev_err(dev, "Only one VLAN-aware bridge is supported\n"); 166 return ERR_PTR(-EINVAL); 167 } 168 169 bridge_device = kzalloc(sizeof(*bridge_device), GFP_KERNEL); 170 if (!bridge_device) 171 return ERR_PTR(-ENOMEM); 172 173 bridge_device->dev = br_dev; 174 bridge_device->vlan_enabled = vlan_enabled; 175 bridge_device->multicast_enabled = br_multicast_enabled(br_dev); 176 bridge_device->mrouter = br_multicast_router(br_dev); 177 INIT_LIST_HEAD(&bridge_device->ports_list); 178 if (vlan_enabled) { 179 bridge->vlan_enabled_exists = true; 180 bridge_device->ops = bridge->bridge_8021q_ops; 181 } else { 182 bridge_device->ops = bridge->bridge_8021d_ops; 183 } 184 INIT_LIST_HEAD(&bridge_device->mids_list); 185 list_add(&bridge_device->list, &bridge->bridges_list); 186 187 return bridge_device; 188 } 189 190 static void 191 mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge, 192 struct mlxsw_sp_bridge_device *bridge_device) 193 { 194 mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp, 195 bridge_device->dev); 196 list_del(&bridge_device->list); 197 if (bridge_device->vlan_enabled) 198 bridge->vlan_enabled_exists = false; 199 WARN_ON(!list_empty(&bridge_device->ports_list)); 200 WARN_ON(!list_empty(&bridge_device->mids_list)); 201 kfree(bridge_device); 202 } 203 204 static struct mlxsw_sp_bridge_device * 205 mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge, 206 struct net_device *br_dev) 207 { 208 struct mlxsw_sp_bridge_device *bridge_device; 209 210 bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev); 211 if (bridge_device) 212 return bridge_device; 213 214 return mlxsw_sp_bridge_device_create(bridge, br_dev); 215 } 216 217 static void 218 mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge *bridge, 219 struct mlxsw_sp_bridge_device *bridge_device) 220 { 221 if (list_empty(&bridge_device->ports_list)) 222 mlxsw_sp_bridge_device_destroy(bridge, bridge_device); 223 } 224 225 static struct mlxsw_sp_bridge_port * 226 __mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device, 227 const struct net_device *brport_dev) 228 { 229 struct mlxsw_sp_bridge_port *bridge_port; 230 231 list_for_each_entry(bridge_port, &bridge_device->ports_list, list) { 232 if (bridge_port->dev == brport_dev) 233 return bridge_port; 234 } 235 236 return NULL; 237 } 238 239 struct mlxsw_sp_bridge_port * 240 mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge, 241 struct net_device *brport_dev) 242 { 243 struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev); 244 struct mlxsw_sp_bridge_device *bridge_device; 245 246 if (!br_dev) 247 return NULL; 248 249 bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev); 250 if (!bridge_device) 251 return NULL; 252 253 return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev); 254 } 255 256 static struct mlxsw_sp_bridge_port * 257 mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device, 258 struct net_device *brport_dev) 259 { 260 struct mlxsw_sp_bridge_port *bridge_port; 261 struct mlxsw_sp_port *mlxsw_sp_port; 262 263 bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL); 264 if (!bridge_port) 265 return NULL; 266 267 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev); 268 bridge_port->lagged = mlxsw_sp_port->lagged; 269 if (bridge_port->lagged) 270 bridge_port->lag_id = mlxsw_sp_port->lag_id; 271 else 272 bridge_port->system_port = mlxsw_sp_port->local_port; 273 bridge_port->dev = brport_dev; 274 bridge_port->bridge_device = bridge_device; 275 bridge_port->stp_state = BR_STATE_DISABLED; 276 bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC | 277 BR_MCAST_FLOOD; 278 INIT_LIST_HEAD(&bridge_port->vlans_list); 279 list_add(&bridge_port->list, &bridge_device->ports_list); 280 bridge_port->ref_count = 1; 281 282 return bridge_port; 283 } 284 285 static void 286 mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port) 287 { 288 list_del(&bridge_port->list); 289 WARN_ON(!list_empty(&bridge_port->vlans_list)); 290 kfree(bridge_port); 291 } 292 293 static struct mlxsw_sp_bridge_port * 294 mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge, 295 struct net_device *brport_dev) 296 { 297 struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev); 298 struct mlxsw_sp_bridge_device *bridge_device; 299 struct mlxsw_sp_bridge_port *bridge_port; 300 int err; 301 302 bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev); 303 if (bridge_port) { 304 bridge_port->ref_count++; 305 return bridge_port; 306 } 307 308 bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev); 309 if (IS_ERR(bridge_device)) 310 return ERR_CAST(bridge_device); 311 312 bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev); 313 if (!bridge_port) { 314 err = -ENOMEM; 315 goto err_bridge_port_create; 316 } 317 318 return bridge_port; 319 320 err_bridge_port_create: 321 mlxsw_sp_bridge_device_put(bridge, bridge_device); 322 return ERR_PTR(err); 323 } 324 325 static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge, 326 struct mlxsw_sp_bridge_port *bridge_port) 327 { 328 struct mlxsw_sp_bridge_device *bridge_device; 329 330 if (--bridge_port->ref_count != 0) 331 return; 332 bridge_device = bridge_port->bridge_device; 333 mlxsw_sp_bridge_port_destroy(bridge_port); 334 mlxsw_sp_bridge_device_put(bridge, bridge_device); 335 } 336 337 static struct mlxsw_sp_port_vlan * 338 mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port *mlxsw_sp_port, 339 const struct mlxsw_sp_bridge_device * 340 bridge_device, 341 u16 vid) 342 { 343 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 344 345 list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list, 346 list) { 347 if (!mlxsw_sp_port_vlan->bridge_port) 348 continue; 349 if (mlxsw_sp_port_vlan->bridge_port->bridge_device != 350 bridge_device) 351 continue; 352 if (bridge_device->vlan_enabled && 353 mlxsw_sp_port_vlan->vid != vid) 354 continue; 355 return mlxsw_sp_port_vlan; 356 } 357 358 return NULL; 359 } 360 361 static struct mlxsw_sp_port_vlan* 362 mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port *mlxsw_sp_port, 363 u16 fid_index) 364 { 365 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 366 367 list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list, 368 list) { 369 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; 370 371 if (fid && mlxsw_sp_fid_index(fid) == fid_index) 372 return mlxsw_sp_port_vlan; 373 } 374 375 return NULL; 376 } 377 378 static struct mlxsw_sp_bridge_vlan * 379 mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port *bridge_port, 380 u16 vid) 381 { 382 struct mlxsw_sp_bridge_vlan *bridge_vlan; 383 384 list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) { 385 if (bridge_vlan->vid == vid) 386 return bridge_vlan; 387 } 388 389 return NULL; 390 } 391 392 static struct mlxsw_sp_bridge_vlan * 393 mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port *bridge_port, u16 vid) 394 { 395 struct mlxsw_sp_bridge_vlan *bridge_vlan; 396 397 bridge_vlan = kzalloc(sizeof(*bridge_vlan), GFP_KERNEL); 398 if (!bridge_vlan) 399 return NULL; 400 401 INIT_LIST_HEAD(&bridge_vlan->port_vlan_list); 402 bridge_vlan->vid = vid; 403 list_add(&bridge_vlan->list, &bridge_port->vlans_list); 404 405 return bridge_vlan; 406 } 407 408 static void 409 mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan *bridge_vlan) 410 { 411 list_del(&bridge_vlan->list); 412 WARN_ON(!list_empty(&bridge_vlan->port_vlan_list)); 413 kfree(bridge_vlan); 414 } 415 416 static struct mlxsw_sp_bridge_vlan * 417 mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port *bridge_port, u16 vid) 418 { 419 struct mlxsw_sp_bridge_vlan *bridge_vlan; 420 421 bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid); 422 if (bridge_vlan) 423 return bridge_vlan; 424 425 return mlxsw_sp_bridge_vlan_create(bridge_port, vid); 426 } 427 428 static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan) 429 { 430 if (list_empty(&bridge_vlan->port_vlan_list)) 431 mlxsw_sp_bridge_vlan_destroy(bridge_vlan); 432 } 433 434 static int 435 mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 436 struct mlxsw_sp_bridge_vlan *bridge_vlan, 437 u8 state) 438 { 439 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 440 441 list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list, 442 bridge_vlan_node) { 443 if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port) 444 continue; 445 return mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, 446 bridge_vlan->vid, state); 447 } 448 449 return 0; 450 } 451 452 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, 453 struct switchdev_trans *trans, 454 struct net_device *orig_dev, 455 u8 state) 456 { 457 struct mlxsw_sp_bridge_port *bridge_port; 458 struct mlxsw_sp_bridge_vlan *bridge_vlan; 459 int err; 460 461 if (switchdev_trans_ph_prepare(trans)) 462 return 0; 463 464 /* It's possible we failed to enslave the port, yet this 465 * operation is executed due to it being deferred. 466 */ 467 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge, 468 orig_dev); 469 if (!bridge_port) 470 return 0; 471 472 list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) { 473 err = mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, 474 bridge_vlan, state); 475 if (err) 476 goto err_port_bridge_vlan_stp_set; 477 } 478 479 bridge_port->stp_state = state; 480 481 return 0; 482 483 err_port_bridge_vlan_stp_set: 484 list_for_each_entry_continue_reverse(bridge_vlan, 485 &bridge_port->vlans_list, list) 486 mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, bridge_vlan, 487 bridge_port->stp_state); 488 return err; 489 } 490 491 static int 492 mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, 493 struct mlxsw_sp_bridge_vlan *bridge_vlan, 494 enum mlxsw_sp_flood_type packet_type, 495 bool member) 496 { 497 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 498 499 list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list, 500 bridge_vlan_node) { 501 if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port) 502 continue; 503 return mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid, 504 packet_type, 505 mlxsw_sp_port->local_port, 506 member); 507 } 508 509 return 0; 510 } 511 512 static int 513 mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port, 514 struct mlxsw_sp_bridge_port *bridge_port, 515 enum mlxsw_sp_flood_type packet_type, 516 bool member) 517 { 518 struct mlxsw_sp_bridge_vlan *bridge_vlan; 519 int err; 520 521 list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) { 522 err = mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, 523 bridge_vlan, 524 packet_type, 525 member); 526 if (err) 527 goto err_port_bridge_vlan_flood_set; 528 } 529 530 return 0; 531 532 err_port_bridge_vlan_flood_set: 533 list_for_each_entry_continue_reverse(bridge_vlan, 534 &bridge_port->vlans_list, list) 535 mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, bridge_vlan, 536 packet_type, !member); 537 return err; 538 } 539 540 static int 541 mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, 542 struct mlxsw_sp_bridge_vlan *bridge_vlan, 543 bool set) 544 { 545 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 546 u16 vid = bridge_vlan->vid; 547 548 list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list, 549 bridge_vlan_node) { 550 if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port) 551 continue; 552 return mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set); 553 } 554 555 return 0; 556 } 557 558 static int 559 mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, 560 struct mlxsw_sp_bridge_port *bridge_port, 561 bool set) 562 { 563 struct mlxsw_sp_bridge_vlan *bridge_vlan; 564 int err; 565 566 list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) { 567 err = mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port, 568 bridge_vlan, set); 569 if (err) 570 goto err_port_bridge_vlan_learning_set; 571 } 572 573 return 0; 574 575 err_port_bridge_vlan_learning_set: 576 list_for_each_entry_continue_reverse(bridge_vlan, 577 &bridge_port->vlans_list, list) 578 mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port, 579 bridge_vlan, !set); 580 return err; 581 } 582 583 static int mlxsw_sp_port_attr_br_pre_flags_set(struct mlxsw_sp_port 584 *mlxsw_sp_port, 585 struct switchdev_trans *trans, 586 unsigned long brport_flags) 587 { 588 if (brport_flags & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD)) 589 return -EINVAL; 590 591 return 0; 592 } 593 594 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, 595 struct switchdev_trans *trans, 596 struct net_device *orig_dev, 597 unsigned long brport_flags) 598 { 599 struct mlxsw_sp_bridge_port *bridge_port; 600 int err; 601 602 if (switchdev_trans_ph_prepare(trans)) 603 return 0; 604 605 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge, 606 orig_dev); 607 if (!bridge_port) 608 return 0; 609 610 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port, 611 MLXSW_SP_FLOOD_TYPE_UC, 612 brport_flags & BR_FLOOD); 613 if (err) 614 return err; 615 616 err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port, bridge_port, 617 brport_flags & BR_LEARNING); 618 if (err) 619 return err; 620 621 if (bridge_port->bridge_device->multicast_enabled) 622 goto out; 623 624 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port, 625 MLXSW_SP_FLOOD_TYPE_MC, 626 brport_flags & 627 BR_MCAST_FLOOD); 628 if (err) 629 return err; 630 631 out: 632 memcpy(&bridge_port->flags, &brport_flags, sizeof(brport_flags)); 633 return 0; 634 } 635 636 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time) 637 { 638 char sfdat_pl[MLXSW_REG_SFDAT_LEN]; 639 int err; 640 641 mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time); 642 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl); 643 if (err) 644 return err; 645 mlxsw_sp->bridge->ageing_time = ageing_time; 646 return 0; 647 } 648 649 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port, 650 struct switchdev_trans *trans, 651 unsigned long ageing_clock_t) 652 { 653 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 654 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t); 655 u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000; 656 657 if (switchdev_trans_ph_prepare(trans)) { 658 if (ageing_time < MLXSW_SP_MIN_AGEING_TIME || 659 ageing_time > MLXSW_SP_MAX_AGEING_TIME) 660 return -ERANGE; 661 else 662 return 0; 663 } 664 665 return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time); 666 } 667 668 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 669 struct switchdev_trans *trans, 670 struct net_device *orig_dev, 671 bool vlan_enabled) 672 { 673 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 674 struct mlxsw_sp_bridge_device *bridge_device; 675 676 if (!switchdev_trans_ph_prepare(trans)) 677 return 0; 678 679 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev); 680 if (WARN_ON(!bridge_device)) 681 return -EINVAL; 682 683 if (bridge_device->vlan_enabled == vlan_enabled) 684 return 0; 685 686 netdev_err(bridge_device->dev, "VLAN filtering can't be changed for existing bridge\n"); 687 return -EINVAL; 688 } 689 690 static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port, 691 struct switchdev_trans *trans, 692 struct net_device *orig_dev, 693 bool is_port_mrouter) 694 { 695 struct mlxsw_sp_bridge_port *bridge_port; 696 int err; 697 698 if (switchdev_trans_ph_prepare(trans)) 699 return 0; 700 701 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge, 702 orig_dev); 703 if (!bridge_port) 704 return 0; 705 706 if (!bridge_port->bridge_device->multicast_enabled) 707 goto out; 708 709 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port, 710 MLXSW_SP_FLOOD_TYPE_MC, 711 is_port_mrouter); 712 if (err) 713 return err; 714 715 mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port, 716 is_port_mrouter); 717 out: 718 bridge_port->mrouter = is_port_mrouter; 719 return 0; 720 } 721 722 static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port) 723 { 724 const struct mlxsw_sp_bridge_device *bridge_device; 725 726 bridge_device = bridge_port->bridge_device; 727 return bridge_device->multicast_enabled ? bridge_port->mrouter : 728 bridge_port->flags & BR_MCAST_FLOOD; 729 } 730 731 static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port, 732 struct switchdev_trans *trans, 733 struct net_device *orig_dev, 734 bool mc_disabled) 735 { 736 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 737 struct mlxsw_sp_bridge_device *bridge_device; 738 struct mlxsw_sp_bridge_port *bridge_port; 739 int err; 740 741 if (switchdev_trans_ph_prepare(trans)) 742 return 0; 743 744 /* It's possible we failed to enslave the port, yet this 745 * operation is executed due to it being deferred. 746 */ 747 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev); 748 if (!bridge_device) 749 return 0; 750 751 if (bridge_device->multicast_enabled != !mc_disabled) { 752 bridge_device->multicast_enabled = !mc_disabled; 753 mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp_port, 754 bridge_device); 755 } 756 757 list_for_each_entry(bridge_port, &bridge_device->ports_list, list) { 758 enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC; 759 bool member = mlxsw_sp_mc_flood(bridge_port); 760 761 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, 762 bridge_port, 763 packet_type, member); 764 if (err) 765 return err; 766 } 767 768 bridge_device->multicast_enabled = !mc_disabled; 769 770 return 0; 771 } 772 773 static int mlxsw_sp_smid_router_port_set(struct mlxsw_sp *mlxsw_sp, 774 u16 mid_idx, bool add) 775 { 776 char *smid_pl; 777 int err; 778 779 smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL); 780 if (!smid_pl) 781 return -ENOMEM; 782 783 mlxsw_reg_smid_pack(smid_pl, mid_idx, 784 mlxsw_sp_router_port(mlxsw_sp), add); 785 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl); 786 kfree(smid_pl); 787 return err; 788 } 789 790 static void 791 mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp, 792 struct mlxsw_sp_bridge_device *bridge_device, 793 bool add) 794 { 795 struct mlxsw_sp_mid *mid; 796 797 list_for_each_entry(mid, &bridge_device->mids_list, list) 798 mlxsw_sp_smid_router_port_set(mlxsw_sp, mid->mid, add); 799 } 800 801 static int 802 mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port, 803 struct switchdev_trans *trans, 804 struct net_device *orig_dev, 805 bool is_mrouter) 806 { 807 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 808 struct mlxsw_sp_bridge_device *bridge_device; 809 810 if (switchdev_trans_ph_prepare(trans)) 811 return 0; 812 813 /* It's possible we failed to enslave the port, yet this 814 * operation is executed due to it being deferred. 815 */ 816 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev); 817 if (!bridge_device) 818 return 0; 819 820 if (bridge_device->mrouter != is_mrouter) 821 mlxsw_sp_bridge_mrouter_update_mdb(mlxsw_sp, bridge_device, 822 is_mrouter); 823 bridge_device->mrouter = is_mrouter; 824 return 0; 825 } 826 827 static int mlxsw_sp_port_attr_set(struct net_device *dev, 828 const struct switchdev_attr *attr, 829 struct switchdev_trans *trans) 830 { 831 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 832 int err; 833 834 switch (attr->id) { 835 case SWITCHDEV_ATTR_ID_PORT_STP_STATE: 836 err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans, 837 attr->orig_dev, 838 attr->u.stp_state); 839 break; 840 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: 841 err = mlxsw_sp_port_attr_br_pre_flags_set(mlxsw_sp_port, 842 trans, 843 attr->u.brport_flags); 844 break; 845 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: 846 err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans, 847 attr->orig_dev, 848 attr->u.brport_flags); 849 break; 850 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: 851 err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans, 852 attr->u.ageing_time); 853 break; 854 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: 855 err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans, 856 attr->orig_dev, 857 attr->u.vlan_filtering); 858 break; 859 case SWITCHDEV_ATTR_ID_PORT_MROUTER: 860 err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port, trans, 861 attr->orig_dev, 862 attr->u.mrouter); 863 break; 864 case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED: 865 err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans, 866 attr->orig_dev, 867 attr->u.mc_disabled); 868 break; 869 case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER: 870 err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port, trans, 871 attr->orig_dev, 872 attr->u.mrouter); 873 break; 874 default: 875 err = -EOPNOTSUPP; 876 break; 877 } 878 879 if (switchdev_trans_ph_commit(trans)) 880 mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp); 881 882 return err; 883 } 884 885 static int 886 mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, 887 struct mlxsw_sp_bridge_port *bridge_port, 888 struct netlink_ext_ack *extack) 889 { 890 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 891 struct mlxsw_sp_bridge_device *bridge_device; 892 u8 local_port = mlxsw_sp_port->local_port; 893 u16 vid = mlxsw_sp_port_vlan->vid; 894 struct mlxsw_sp_fid *fid; 895 int err; 896 897 bridge_device = bridge_port->bridge_device; 898 fid = bridge_device->ops->fid_get(bridge_device, vid, extack); 899 if (IS_ERR(fid)) 900 return PTR_ERR(fid); 901 902 err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, 903 bridge_port->flags & BR_FLOOD); 904 if (err) 905 goto err_fid_uc_flood_set; 906 907 err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, 908 mlxsw_sp_mc_flood(bridge_port)); 909 if (err) 910 goto err_fid_mc_flood_set; 911 912 err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, 913 true); 914 if (err) 915 goto err_fid_bc_flood_set; 916 917 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid); 918 if (err) 919 goto err_fid_port_vid_map; 920 921 mlxsw_sp_port_vlan->fid = fid; 922 923 return 0; 924 925 err_fid_port_vid_map: 926 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false); 927 err_fid_bc_flood_set: 928 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false); 929 err_fid_mc_flood_set: 930 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false); 931 err_fid_uc_flood_set: 932 mlxsw_sp_fid_put(fid); 933 return err; 934 } 935 936 static void 937 mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 938 { 939 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 940 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; 941 u8 local_port = mlxsw_sp_port->local_port; 942 u16 vid = mlxsw_sp_port_vlan->vid; 943 944 mlxsw_sp_port_vlan->fid = NULL; 945 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid); 946 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false); 947 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false); 948 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false); 949 mlxsw_sp_fid_put(fid); 950 } 951 952 static u16 953 mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port *mlxsw_sp_port, 954 u16 vid, bool is_pvid) 955 { 956 if (is_pvid) 957 return vid; 958 else if (mlxsw_sp_port->pvid == vid) 959 return 0; /* Dis-allow untagged packets */ 960 else 961 return mlxsw_sp_port->pvid; 962 } 963 964 static int 965 mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, 966 struct mlxsw_sp_bridge_port *bridge_port, 967 struct netlink_ext_ack *extack) 968 { 969 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 970 struct mlxsw_sp_bridge_vlan *bridge_vlan; 971 u16 vid = mlxsw_sp_port_vlan->vid; 972 int err; 973 974 /* No need to continue if only VLAN flags were changed */ 975 if (mlxsw_sp_port_vlan->bridge_port) 976 return 0; 977 978 err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port, 979 extack); 980 if (err) 981 return err; 982 983 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, 984 bridge_port->flags & BR_LEARNING); 985 if (err) 986 goto err_port_vid_learning_set; 987 988 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, 989 bridge_port->stp_state); 990 if (err) 991 goto err_port_vid_stp_set; 992 993 bridge_vlan = mlxsw_sp_bridge_vlan_get(bridge_port, vid); 994 if (!bridge_vlan) { 995 err = -ENOMEM; 996 goto err_bridge_vlan_get; 997 } 998 999 list_add(&mlxsw_sp_port_vlan->bridge_vlan_node, 1000 &bridge_vlan->port_vlan_list); 1001 1002 mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge, 1003 bridge_port->dev); 1004 mlxsw_sp_port_vlan->bridge_port = bridge_port; 1005 1006 return 0; 1007 1008 err_bridge_vlan_get: 1009 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED); 1010 err_port_vid_stp_set: 1011 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false); 1012 err_port_vid_learning_set: 1013 mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan); 1014 return err; 1015 } 1016 1017 void 1018 mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1019 { 1020 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1021 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; 1022 struct mlxsw_sp_bridge_vlan *bridge_vlan; 1023 struct mlxsw_sp_bridge_port *bridge_port; 1024 u16 vid = mlxsw_sp_port_vlan->vid; 1025 bool last_port, last_vlan; 1026 1027 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q && 1028 mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D)) 1029 return; 1030 1031 bridge_port = mlxsw_sp_port_vlan->bridge_port; 1032 last_vlan = list_is_singular(&bridge_port->vlans_list); 1033 bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid); 1034 last_port = list_is_singular(&bridge_vlan->port_vlan_list); 1035 1036 list_del(&mlxsw_sp_port_vlan->bridge_vlan_node); 1037 mlxsw_sp_bridge_vlan_put(bridge_vlan); 1038 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED); 1039 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false); 1040 if (last_port) 1041 mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp, 1042 bridge_port, 1043 mlxsw_sp_fid_index(fid)); 1044 if (last_vlan) 1045 mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port); 1046 1047 mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan); 1048 1049 mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port); 1050 mlxsw_sp_port_vlan->bridge_port = NULL; 1051 } 1052 1053 static int 1054 mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port, 1055 struct mlxsw_sp_bridge_port *bridge_port, 1056 u16 vid, bool is_untagged, bool is_pvid, 1057 struct netlink_ext_ack *extack) 1058 { 1059 u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid); 1060 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1061 u16 old_pvid = mlxsw_sp_port->pvid; 1062 int err; 1063 1064 /* The only valid scenario in which a port-vlan already exists, is if 1065 * the VLAN flags were changed and the port-vlan is associated with the 1066 * correct bridge port 1067 */ 1068 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1069 if (mlxsw_sp_port_vlan && 1070 mlxsw_sp_port_vlan->bridge_port != bridge_port) 1071 return -EEXIST; 1072 1073 if (!mlxsw_sp_port_vlan) { 1074 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, 1075 vid); 1076 if (IS_ERR(mlxsw_sp_port_vlan)) 1077 return PTR_ERR(mlxsw_sp_port_vlan); 1078 } 1079 1080 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, 1081 is_untagged); 1082 if (err) 1083 goto err_port_vlan_set; 1084 1085 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid); 1086 if (err) 1087 goto err_port_pvid_set; 1088 1089 err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port, 1090 extack); 1091 if (err) 1092 goto err_port_vlan_bridge_join; 1093 1094 return 0; 1095 1096 err_port_vlan_bridge_join: 1097 mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid); 1098 err_port_pvid_set: 1099 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1100 err_port_vlan_set: 1101 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1102 return err; 1103 } 1104 1105 static int 1106 mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp, 1107 const struct net_device *br_dev, 1108 const struct switchdev_obj_port_vlan *vlan) 1109 { 1110 struct mlxsw_sp_rif *rif; 1111 struct mlxsw_sp_fid *fid; 1112 u16 pvid; 1113 u16 vid; 1114 1115 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev); 1116 if (!rif) 1117 return 0; 1118 fid = mlxsw_sp_rif_fid(rif); 1119 pvid = mlxsw_sp_fid_8021q_vid(fid); 1120 1121 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { 1122 if (vlan->flags & BRIDGE_VLAN_INFO_PVID) { 1123 if (vid != pvid) { 1124 netdev_err(br_dev, "Can't change PVID, it's used by router interface\n"); 1125 return -EBUSY; 1126 } 1127 } else { 1128 if (vid == pvid) { 1129 netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n"); 1130 return -EBUSY; 1131 } 1132 } 1133 } 1134 1135 return 0; 1136 } 1137 1138 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, 1139 const struct switchdev_obj_port_vlan *vlan, 1140 struct switchdev_trans *trans, 1141 struct netlink_ext_ack *extack) 1142 { 1143 bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 1144 bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; 1145 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1146 struct net_device *orig_dev = vlan->obj.orig_dev; 1147 struct mlxsw_sp_bridge_port *bridge_port; 1148 u16 vid; 1149 1150 if (netif_is_bridge_master(orig_dev)) { 1151 int err = 0; 1152 1153 if ((vlan->flags & BRIDGE_VLAN_INFO_BRENTRY) && 1154 br_vlan_enabled(orig_dev) && 1155 switchdev_trans_ph_prepare(trans)) 1156 err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp, 1157 orig_dev, vlan); 1158 if (!err) 1159 err = -EOPNOTSUPP; 1160 return err; 1161 } 1162 1163 if (switchdev_trans_ph_commit(trans)) 1164 return 0; 1165 1166 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); 1167 if (WARN_ON(!bridge_port)) 1168 return -EINVAL; 1169 1170 if (!bridge_port->bridge_device->vlan_enabled) 1171 return 0; 1172 1173 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { 1174 int err; 1175 1176 err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port, 1177 vid, flag_untagged, 1178 flag_pvid, extack); 1179 if (err) 1180 return err; 1181 } 1182 1183 return 0; 1184 } 1185 1186 static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged) 1187 { 1188 return lagged ? MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID : 1189 MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID; 1190 } 1191 1192 static int 1193 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp, 1194 struct mlxsw_sp_bridge_port *bridge_port, 1195 u16 fid_index) 1196 { 1197 bool lagged = bridge_port->lagged; 1198 char sfdf_pl[MLXSW_REG_SFDF_LEN]; 1199 u16 system_port; 1200 1201 system_port = lagged ? bridge_port->lag_id : bridge_port->system_port; 1202 mlxsw_reg_sfdf_pack(sfdf_pl, mlxsw_sp_fdb_flush_type(lagged)); 1203 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index); 1204 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, system_port); 1205 1206 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); 1207 } 1208 1209 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic) 1210 { 1211 return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS : 1212 MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG; 1213 } 1214 1215 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding) 1216 { 1217 return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT : 1218 MLXSW_REG_SFD_OP_WRITE_REMOVE; 1219 } 1220 1221 static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp, 1222 const char *mac, u16 fid, 1223 enum mlxsw_sp_l3proto proto, 1224 const union mlxsw_sp_l3addr *addr, 1225 bool adding, bool dynamic) 1226 { 1227 enum mlxsw_reg_sfd_uc_tunnel_protocol sfd_proto; 1228 char *sfd_pl; 1229 u8 num_rec; 1230 u32 uip; 1231 int err; 1232 1233 switch (proto) { 1234 case MLXSW_SP_L3_PROTO_IPV4: 1235 uip = be32_to_cpu(addr->addr4); 1236 sfd_proto = MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV4; 1237 break; 1238 case MLXSW_SP_L3_PROTO_IPV6: /* fall through */ 1239 default: 1240 WARN_ON(1); 1241 return -EOPNOTSUPP; 1242 } 1243 1244 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 1245 if (!sfd_pl) 1246 return -ENOMEM; 1247 1248 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); 1249 mlxsw_reg_sfd_uc_tunnel_pack(sfd_pl, 0, 1250 mlxsw_sp_sfd_rec_policy(dynamic), mac, fid, 1251 MLXSW_REG_SFD_REC_ACTION_NOP, uip, 1252 sfd_proto); 1253 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); 1254 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 1255 if (err) 1256 goto out; 1257 1258 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl)) 1259 err = -EBUSY; 1260 1261 out: 1262 kfree(sfd_pl); 1263 return err; 1264 } 1265 1266 static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, 1267 const char *mac, u16 fid, bool adding, 1268 enum mlxsw_reg_sfd_rec_action action, 1269 enum mlxsw_reg_sfd_rec_policy policy) 1270 { 1271 char *sfd_pl; 1272 u8 num_rec; 1273 int err; 1274 1275 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 1276 if (!sfd_pl) 1277 return -ENOMEM; 1278 1279 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); 1280 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, action, local_port); 1281 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); 1282 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 1283 if (err) 1284 goto out; 1285 1286 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl)) 1287 err = -EBUSY; 1288 1289 out: 1290 kfree(sfd_pl); 1291 return err; 1292 } 1293 1294 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, 1295 const char *mac, u16 fid, bool adding, 1296 bool dynamic) 1297 { 1298 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding, 1299 MLXSW_REG_SFD_REC_ACTION_NOP, 1300 mlxsw_sp_sfd_rec_policy(dynamic)); 1301 } 1302 1303 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid, 1304 bool adding) 1305 { 1306 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding, 1307 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER, 1308 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY); 1309 } 1310 1311 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id, 1312 const char *mac, u16 fid, u16 lag_vid, 1313 bool adding, bool dynamic) 1314 { 1315 char *sfd_pl; 1316 u8 num_rec; 1317 int err; 1318 1319 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 1320 if (!sfd_pl) 1321 return -ENOMEM; 1322 1323 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); 1324 mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), 1325 mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP, 1326 lag_vid, lag_id); 1327 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); 1328 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 1329 if (err) 1330 goto out; 1331 1332 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl)) 1333 err = -EBUSY; 1334 1335 out: 1336 kfree(sfd_pl); 1337 return err; 1338 } 1339 1340 static int 1341 mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port, 1342 struct switchdev_notifier_fdb_info *fdb_info, bool adding) 1343 { 1344 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1345 struct net_device *orig_dev = fdb_info->info.dev; 1346 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1347 struct mlxsw_sp_bridge_device *bridge_device; 1348 struct mlxsw_sp_bridge_port *bridge_port; 1349 u16 fid_index, vid; 1350 1351 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); 1352 if (!bridge_port) 1353 return -EINVAL; 1354 1355 bridge_device = bridge_port->bridge_device; 1356 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port, 1357 bridge_device, 1358 fdb_info->vid); 1359 if (!mlxsw_sp_port_vlan) 1360 return 0; 1361 1362 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid); 1363 vid = mlxsw_sp_port_vlan->vid; 1364 1365 if (!bridge_port->lagged) 1366 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 1367 bridge_port->system_port, 1368 fdb_info->addr, fid_index, 1369 adding, false); 1370 else 1371 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, 1372 bridge_port->lag_id, 1373 fdb_info->addr, fid_index, 1374 vid, adding, false); 1375 } 1376 1377 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr, 1378 u16 fid, u16 mid_idx, bool adding) 1379 { 1380 char *sfd_pl; 1381 u8 num_rec; 1382 int err; 1383 1384 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 1385 if (!sfd_pl) 1386 return -ENOMEM; 1387 1388 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); 1389 mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid, 1390 MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx); 1391 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); 1392 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 1393 if (err) 1394 goto out; 1395 1396 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl)) 1397 err = -EBUSY; 1398 1399 out: 1400 kfree(sfd_pl); 1401 return err; 1402 } 1403 1404 static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx, 1405 long *ports_bitmap, 1406 bool set_router_port) 1407 { 1408 char *smid_pl; 1409 int err, i; 1410 1411 smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL); 1412 if (!smid_pl) 1413 return -ENOMEM; 1414 1415 mlxsw_reg_smid_pack(smid_pl, mid_idx, 0, false); 1416 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) { 1417 if (mlxsw_sp->ports[i]) 1418 mlxsw_reg_smid_port_mask_set(smid_pl, i, 1); 1419 } 1420 1421 mlxsw_reg_smid_port_mask_set(smid_pl, 1422 mlxsw_sp_router_port(mlxsw_sp), 1); 1423 1424 for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core)) 1425 mlxsw_reg_smid_port_set(smid_pl, i, 1); 1426 1427 mlxsw_reg_smid_port_set(smid_pl, mlxsw_sp_router_port(mlxsw_sp), 1428 set_router_port); 1429 1430 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl); 1431 kfree(smid_pl); 1432 return err; 1433 } 1434 1435 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, 1436 u16 mid_idx, bool add) 1437 { 1438 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1439 char *smid_pl; 1440 int err; 1441 1442 smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL); 1443 if (!smid_pl) 1444 return -ENOMEM; 1445 1446 mlxsw_reg_smid_pack(smid_pl, mid_idx, mlxsw_sp_port->local_port, add); 1447 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl); 1448 kfree(smid_pl); 1449 return err; 1450 } 1451 1452 static struct 1453 mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp_bridge_device *bridge_device, 1454 const unsigned char *addr, 1455 u16 fid) 1456 { 1457 struct mlxsw_sp_mid *mid; 1458 1459 list_for_each_entry(mid, &bridge_device->mids_list, list) { 1460 if (ether_addr_equal(mid->addr, addr) && mid->fid == fid) 1461 return mid; 1462 } 1463 return NULL; 1464 } 1465 1466 static void 1467 mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp, 1468 struct mlxsw_sp_bridge_port *bridge_port, 1469 unsigned long *ports_bitmap) 1470 { 1471 struct mlxsw_sp_port *mlxsw_sp_port; 1472 u64 max_lag_members, i; 1473 int lag_id; 1474 1475 if (!bridge_port->lagged) { 1476 set_bit(bridge_port->system_port, ports_bitmap); 1477 } else { 1478 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 1479 MAX_LAG_MEMBERS); 1480 lag_id = bridge_port->lag_id; 1481 for (i = 0; i < max_lag_members; i++) { 1482 mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, 1483 lag_id, i); 1484 if (mlxsw_sp_port) 1485 set_bit(mlxsw_sp_port->local_port, 1486 ports_bitmap); 1487 } 1488 } 1489 } 1490 1491 static void 1492 mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap, 1493 struct mlxsw_sp_bridge_device *bridge_device, 1494 struct mlxsw_sp *mlxsw_sp) 1495 { 1496 struct mlxsw_sp_bridge_port *bridge_port; 1497 1498 list_for_each_entry(bridge_port, &bridge_device->ports_list, list) { 1499 if (bridge_port->mrouter) { 1500 mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp, 1501 bridge_port, 1502 flood_bitmap); 1503 } 1504 } 1505 } 1506 1507 static bool 1508 mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp, 1509 struct mlxsw_sp_mid *mid, 1510 struct mlxsw_sp_bridge_device *bridge_device) 1511 { 1512 long *flood_bitmap; 1513 int num_of_ports; 1514 int alloc_size; 1515 u16 mid_idx; 1516 int err; 1517 1518 mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap, 1519 MLXSW_SP_MID_MAX); 1520 if (mid_idx == MLXSW_SP_MID_MAX) 1521 return false; 1522 1523 num_of_ports = mlxsw_core_max_ports(mlxsw_sp->core); 1524 alloc_size = sizeof(long) * BITS_TO_LONGS(num_of_ports); 1525 flood_bitmap = kzalloc(alloc_size, GFP_KERNEL); 1526 if (!flood_bitmap) 1527 return false; 1528 1529 bitmap_copy(flood_bitmap, mid->ports_in_mid, num_of_ports); 1530 mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp); 1531 1532 mid->mid = mid_idx; 1533 err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap, 1534 bridge_device->mrouter); 1535 kfree(flood_bitmap); 1536 if (err) 1537 return false; 1538 1539 err = mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid_idx, 1540 true); 1541 if (err) 1542 return false; 1543 1544 set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap); 1545 mid->in_hw = true; 1546 return true; 1547 } 1548 1549 static int mlxsw_sp_mc_remove_mdb_entry(struct mlxsw_sp *mlxsw_sp, 1550 struct mlxsw_sp_mid *mid) 1551 { 1552 if (!mid->in_hw) 1553 return 0; 1554 1555 clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap); 1556 mid->in_hw = false; 1557 return mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid->mid, 1558 false); 1559 } 1560 1561 static struct 1562 mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, 1563 struct mlxsw_sp_bridge_device *bridge_device, 1564 const unsigned char *addr, 1565 u16 fid) 1566 { 1567 struct mlxsw_sp_mid *mid; 1568 size_t alloc_size; 1569 1570 mid = kzalloc(sizeof(*mid), GFP_KERNEL); 1571 if (!mid) 1572 return NULL; 1573 1574 alloc_size = sizeof(unsigned long) * 1575 BITS_TO_LONGS(mlxsw_core_max_ports(mlxsw_sp->core)); 1576 1577 mid->ports_in_mid = kzalloc(alloc_size, GFP_KERNEL); 1578 if (!mid->ports_in_mid) 1579 goto err_ports_in_mid_alloc; 1580 1581 ether_addr_copy(mid->addr, addr); 1582 mid->fid = fid; 1583 mid->in_hw = false; 1584 1585 if (!bridge_device->multicast_enabled) 1586 goto out; 1587 1588 if (!mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, bridge_device)) 1589 goto err_write_mdb_entry; 1590 1591 out: 1592 list_add_tail(&mid->list, &bridge_device->mids_list); 1593 return mid; 1594 1595 err_write_mdb_entry: 1596 kfree(mid->ports_in_mid); 1597 err_ports_in_mid_alloc: 1598 kfree(mid); 1599 return NULL; 1600 } 1601 1602 static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port, 1603 struct mlxsw_sp_mid *mid) 1604 { 1605 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1606 int err = 0; 1607 1608 clear_bit(mlxsw_sp_port->local_port, mid->ports_in_mid); 1609 if (bitmap_empty(mid->ports_in_mid, 1610 mlxsw_core_max_ports(mlxsw_sp->core))) { 1611 err = mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid); 1612 list_del(&mid->list); 1613 kfree(mid->ports_in_mid); 1614 kfree(mid); 1615 } 1616 return err; 1617 } 1618 1619 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, 1620 const struct switchdev_obj_port_mdb *mdb, 1621 struct switchdev_trans *trans) 1622 { 1623 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1624 struct net_device *orig_dev = mdb->obj.orig_dev; 1625 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1626 struct net_device *dev = mlxsw_sp_port->dev; 1627 struct mlxsw_sp_bridge_device *bridge_device; 1628 struct mlxsw_sp_bridge_port *bridge_port; 1629 struct mlxsw_sp_mid *mid; 1630 u16 fid_index; 1631 int err = 0; 1632 1633 if (switchdev_trans_ph_commit(trans)) 1634 return 0; 1635 1636 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); 1637 if (!bridge_port) 1638 return 0; 1639 1640 bridge_device = bridge_port->bridge_device; 1641 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port, 1642 bridge_device, 1643 mdb->vid); 1644 if (!mlxsw_sp_port_vlan) 1645 return 0; 1646 1647 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid); 1648 1649 mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index); 1650 if (!mid) { 1651 mid = __mlxsw_sp_mc_alloc(mlxsw_sp, bridge_device, mdb->addr, 1652 fid_index); 1653 if (!mid) { 1654 netdev_err(dev, "Unable to allocate MC group\n"); 1655 return -ENOMEM; 1656 } 1657 } 1658 set_bit(mlxsw_sp_port->local_port, mid->ports_in_mid); 1659 1660 if (!bridge_device->multicast_enabled) 1661 return 0; 1662 1663 if (bridge_port->mrouter) 1664 return 0; 1665 1666 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true); 1667 if (err) { 1668 netdev_err(dev, "Unable to set SMID\n"); 1669 goto err_out; 1670 } 1671 1672 return 0; 1673 1674 err_out: 1675 mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid); 1676 return err; 1677 } 1678 1679 static void 1680 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port, 1681 struct mlxsw_sp_bridge_device 1682 *bridge_device) 1683 { 1684 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1685 struct mlxsw_sp_mid *mid; 1686 bool mc_enabled; 1687 1688 mc_enabled = bridge_device->multicast_enabled; 1689 1690 list_for_each_entry(mid, &bridge_device->mids_list, list) { 1691 if (mc_enabled) 1692 mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, 1693 bridge_device); 1694 else 1695 mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid); 1696 } 1697 } 1698 1699 static void 1700 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port, 1701 struct mlxsw_sp_bridge_port *bridge_port, 1702 bool add) 1703 { 1704 struct mlxsw_sp_bridge_device *bridge_device; 1705 struct mlxsw_sp_mid *mid; 1706 1707 bridge_device = bridge_port->bridge_device; 1708 1709 list_for_each_entry(mid, &bridge_device->mids_list, list) { 1710 if (!test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) 1711 mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, add); 1712 } 1713 } 1714 1715 struct mlxsw_sp_span_respin_work { 1716 struct work_struct work; 1717 struct mlxsw_sp *mlxsw_sp; 1718 }; 1719 1720 static void mlxsw_sp_span_respin_work(struct work_struct *work) 1721 { 1722 struct mlxsw_sp_span_respin_work *respin_work = 1723 container_of(work, struct mlxsw_sp_span_respin_work, work); 1724 1725 rtnl_lock(); 1726 mlxsw_sp_span_respin(respin_work->mlxsw_sp); 1727 rtnl_unlock(); 1728 kfree(respin_work); 1729 } 1730 1731 static void mlxsw_sp_span_respin_schedule(struct mlxsw_sp *mlxsw_sp) 1732 { 1733 struct mlxsw_sp_span_respin_work *respin_work; 1734 1735 respin_work = kzalloc(sizeof(*respin_work), GFP_ATOMIC); 1736 if (!respin_work) 1737 return; 1738 1739 INIT_WORK(&respin_work->work, mlxsw_sp_span_respin_work); 1740 respin_work->mlxsw_sp = mlxsw_sp; 1741 1742 mlxsw_core_schedule_work(&respin_work->work); 1743 } 1744 1745 static int mlxsw_sp_port_obj_add(struct net_device *dev, 1746 const struct switchdev_obj *obj, 1747 struct switchdev_trans *trans, 1748 struct netlink_ext_ack *extack) 1749 { 1750 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1751 const struct switchdev_obj_port_vlan *vlan; 1752 int err = 0; 1753 1754 switch (obj->id) { 1755 case SWITCHDEV_OBJ_ID_PORT_VLAN: 1756 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); 1757 err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, trans, 1758 extack); 1759 1760 if (switchdev_trans_ph_prepare(trans)) { 1761 /* The event is emitted before the changes are actually 1762 * applied to the bridge. Therefore schedule the respin 1763 * call for later, so that the respin logic sees the 1764 * updated bridge state. 1765 */ 1766 mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp); 1767 } 1768 break; 1769 case SWITCHDEV_OBJ_ID_PORT_MDB: 1770 err = mlxsw_sp_port_mdb_add(mlxsw_sp_port, 1771 SWITCHDEV_OBJ_PORT_MDB(obj), 1772 trans); 1773 break; 1774 default: 1775 err = -EOPNOTSUPP; 1776 break; 1777 } 1778 1779 return err; 1780 } 1781 1782 static void 1783 mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port, 1784 struct mlxsw_sp_bridge_port *bridge_port, u16 vid) 1785 { 1786 u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : mlxsw_sp_port->pvid; 1787 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1788 1789 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1790 if (WARN_ON(!mlxsw_sp_port_vlan)) 1791 return; 1792 1793 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1794 mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid); 1795 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1796 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1797 } 1798 1799 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, 1800 const struct switchdev_obj_port_vlan *vlan) 1801 { 1802 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1803 struct net_device *orig_dev = vlan->obj.orig_dev; 1804 struct mlxsw_sp_bridge_port *bridge_port; 1805 u16 vid; 1806 1807 if (netif_is_bridge_master(orig_dev)) 1808 return -EOPNOTSUPP; 1809 1810 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); 1811 if (WARN_ON(!bridge_port)) 1812 return -EINVAL; 1813 1814 if (!bridge_port->bridge_device->vlan_enabled) 1815 return 0; 1816 1817 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) 1818 mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vid); 1819 1820 return 0; 1821 } 1822 1823 static int 1824 __mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, 1825 struct mlxsw_sp_bridge_port *bridge_port, 1826 struct mlxsw_sp_mid *mid) 1827 { 1828 struct net_device *dev = mlxsw_sp_port->dev; 1829 int err; 1830 1831 if (bridge_port->bridge_device->multicast_enabled && 1832 !bridge_port->mrouter) { 1833 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false); 1834 if (err) 1835 netdev_err(dev, "Unable to remove port from SMID\n"); 1836 } 1837 1838 err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid); 1839 if (err) 1840 netdev_err(dev, "Unable to remove MC SFD\n"); 1841 1842 return err; 1843 } 1844 1845 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, 1846 const struct switchdev_obj_port_mdb *mdb) 1847 { 1848 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1849 struct net_device *orig_dev = mdb->obj.orig_dev; 1850 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1851 struct mlxsw_sp_bridge_device *bridge_device; 1852 struct net_device *dev = mlxsw_sp_port->dev; 1853 struct mlxsw_sp_bridge_port *bridge_port; 1854 struct mlxsw_sp_mid *mid; 1855 u16 fid_index; 1856 1857 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); 1858 if (!bridge_port) 1859 return 0; 1860 1861 bridge_device = bridge_port->bridge_device; 1862 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port, 1863 bridge_device, 1864 mdb->vid); 1865 if (!mlxsw_sp_port_vlan) 1866 return 0; 1867 1868 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid); 1869 1870 mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index); 1871 if (!mid) { 1872 netdev_err(dev, "Unable to remove port from MC DB\n"); 1873 return -EINVAL; 1874 } 1875 1876 return __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, mid); 1877 } 1878 1879 static void 1880 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, 1881 struct mlxsw_sp_bridge_port *bridge_port) 1882 { 1883 struct mlxsw_sp_bridge_device *bridge_device; 1884 struct mlxsw_sp_mid *mid, *tmp; 1885 1886 bridge_device = bridge_port->bridge_device; 1887 1888 list_for_each_entry_safe(mid, tmp, &bridge_device->mids_list, list) { 1889 if (test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) { 1890 __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, 1891 mid); 1892 } else if (bridge_device->multicast_enabled && 1893 bridge_port->mrouter) { 1894 mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false); 1895 } 1896 } 1897 } 1898 1899 static int mlxsw_sp_port_obj_del(struct net_device *dev, 1900 const struct switchdev_obj *obj) 1901 { 1902 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1903 int err = 0; 1904 1905 switch (obj->id) { 1906 case SWITCHDEV_OBJ_ID_PORT_VLAN: 1907 err = mlxsw_sp_port_vlans_del(mlxsw_sp_port, 1908 SWITCHDEV_OBJ_PORT_VLAN(obj)); 1909 break; 1910 case SWITCHDEV_OBJ_ID_PORT_MDB: 1911 err = mlxsw_sp_port_mdb_del(mlxsw_sp_port, 1912 SWITCHDEV_OBJ_PORT_MDB(obj)); 1913 break; 1914 default: 1915 err = -EOPNOTSUPP; 1916 break; 1917 } 1918 1919 mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp); 1920 1921 return err; 1922 } 1923 1924 static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp, 1925 u16 lag_id) 1926 { 1927 struct mlxsw_sp_port *mlxsw_sp_port; 1928 u64 max_lag_members; 1929 int i; 1930 1931 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 1932 MAX_LAG_MEMBERS); 1933 for (i = 0; i < max_lag_members; i++) { 1934 mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i); 1935 if (mlxsw_sp_port) 1936 return mlxsw_sp_port; 1937 } 1938 return NULL; 1939 } 1940 1941 static int 1942 mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device, 1943 struct mlxsw_sp_bridge_port *bridge_port, 1944 struct mlxsw_sp_port *mlxsw_sp_port, 1945 struct netlink_ext_ack *extack) 1946 { 1947 if (is_vlan_dev(bridge_port->dev)) { 1948 NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge"); 1949 return -EINVAL; 1950 } 1951 1952 /* Port is no longer usable as a router interface */ 1953 if (mlxsw_sp_port->default_vlan->fid) 1954 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); 1955 1956 return 0; 1957 } 1958 1959 static void 1960 mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device, 1961 struct mlxsw_sp_bridge_port *bridge_port, 1962 struct mlxsw_sp_port *mlxsw_sp_port) 1963 { 1964 /* Make sure untagged frames are allowed to ingress */ 1965 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 1966 } 1967 1968 static int 1969 mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device, 1970 const struct net_device *vxlan_dev, u16 vid, 1971 struct netlink_ext_ack *extack) 1972 { 1973 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev); 1974 struct vxlan_dev *vxlan = netdev_priv(vxlan_dev); 1975 struct mlxsw_sp_nve_params params = { 1976 .type = MLXSW_SP_NVE_TYPE_VXLAN, 1977 .vni = vxlan->cfg.vni, 1978 .dev = vxlan_dev, 1979 }; 1980 struct mlxsw_sp_fid *fid; 1981 int err; 1982 1983 /* If the VLAN is 0, we need to find the VLAN that is configured as 1984 * PVID and egress untagged on the bridge port of the VxLAN device. 1985 * It is possible no such VLAN exists 1986 */ 1987 if (!vid) { 1988 err = mlxsw_sp_vxlan_mapped_vid(vxlan_dev, &vid); 1989 if (err || !vid) 1990 return err; 1991 } 1992 1993 /* If no other port is member in the VLAN, then the FID does not exist. 1994 * NVE will be enabled on the FID once a port joins the VLAN 1995 */ 1996 fid = mlxsw_sp_fid_8021q_lookup(mlxsw_sp, vid); 1997 if (!fid) 1998 return 0; 1999 2000 if (mlxsw_sp_fid_vni_is_set(fid)) { 2001 NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID"); 2002 err = -EINVAL; 2003 goto err_vni_exists; 2004 } 2005 2006 err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, ¶ms, extack); 2007 if (err) 2008 goto err_nve_fid_enable; 2009 2010 /* The tunnel port does not hold a reference on the FID. Only 2011 * local ports and the router port 2012 */ 2013 mlxsw_sp_fid_put(fid); 2014 2015 return 0; 2016 2017 err_nve_fid_enable: 2018 err_vni_exists: 2019 mlxsw_sp_fid_put(fid); 2020 return err; 2021 } 2022 2023 static struct net_device * 2024 mlxsw_sp_bridge_8021q_vxlan_dev_find(struct net_device *br_dev, u16 vid) 2025 { 2026 struct net_device *dev; 2027 struct list_head *iter; 2028 2029 netdev_for_each_lower_dev(br_dev, dev, iter) { 2030 u16 pvid; 2031 int err; 2032 2033 if (!netif_is_vxlan(dev)) 2034 continue; 2035 2036 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid); 2037 if (err || pvid != vid) 2038 continue; 2039 2040 return dev; 2041 } 2042 2043 return NULL; 2044 } 2045 2046 static struct mlxsw_sp_fid * 2047 mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device, 2048 u16 vid, struct netlink_ext_ack *extack) 2049 { 2050 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev); 2051 struct net_device *vxlan_dev; 2052 struct mlxsw_sp_fid *fid; 2053 int err; 2054 2055 fid = mlxsw_sp_fid_8021q_get(mlxsw_sp, vid); 2056 if (IS_ERR(fid)) 2057 return fid; 2058 2059 if (mlxsw_sp_fid_vni_is_set(fid)) 2060 return fid; 2061 2062 /* Find the VxLAN device that has the specified VLAN configured as 2063 * PVID and egress untagged. There can be at most one such device 2064 */ 2065 vxlan_dev = mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, 2066 vid); 2067 if (!vxlan_dev) 2068 return fid; 2069 2070 if (!netif_running(vxlan_dev)) 2071 return fid; 2072 2073 err = mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, vid, 2074 extack); 2075 if (err) 2076 goto err_vxlan_join; 2077 2078 return fid; 2079 2080 err_vxlan_join: 2081 mlxsw_sp_fid_put(fid); 2082 return ERR_PTR(err); 2083 } 2084 2085 static struct mlxsw_sp_fid * 2086 mlxsw_sp_bridge_8021q_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device, 2087 u16 vid) 2088 { 2089 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev); 2090 2091 return mlxsw_sp_fid_8021q_lookup(mlxsw_sp, vid); 2092 } 2093 2094 static u16 2095 mlxsw_sp_bridge_8021q_fid_vid(struct mlxsw_sp_bridge_device *bridge_device, 2096 const struct mlxsw_sp_fid *fid) 2097 { 2098 return mlxsw_sp_fid_8021q_vid(fid); 2099 } 2100 2101 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = { 2102 .port_join = mlxsw_sp_bridge_8021q_port_join, 2103 .port_leave = mlxsw_sp_bridge_8021q_port_leave, 2104 .vxlan_join = mlxsw_sp_bridge_8021q_vxlan_join, 2105 .fid_get = mlxsw_sp_bridge_8021q_fid_get, 2106 .fid_lookup = mlxsw_sp_bridge_8021q_fid_lookup, 2107 .fid_vid = mlxsw_sp_bridge_8021q_fid_vid, 2108 }; 2109 2110 static bool 2111 mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port, 2112 const struct net_device *br_dev) 2113 { 2114 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 2115 2116 list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list, 2117 list) { 2118 if (mlxsw_sp_port_vlan->bridge_port && 2119 mlxsw_sp_port_vlan->bridge_port->bridge_device->dev == 2120 br_dev) 2121 return true; 2122 } 2123 2124 return false; 2125 } 2126 2127 static int 2128 mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device, 2129 struct mlxsw_sp_bridge_port *bridge_port, 2130 struct mlxsw_sp_port *mlxsw_sp_port, 2131 struct netlink_ext_ack *extack) 2132 { 2133 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 2134 struct net_device *dev = bridge_port->dev; 2135 u16 vid; 2136 2137 vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID; 2138 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 2139 if (WARN_ON(!mlxsw_sp_port_vlan)) 2140 return -EINVAL; 2141 2142 if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) { 2143 NL_SET_ERR_MSG_MOD(extack, "Can not bridge VLAN uppers of the same port"); 2144 return -EINVAL; 2145 } 2146 2147 /* Port is no longer usable as a router interface */ 2148 if (mlxsw_sp_port_vlan->fid) 2149 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 2150 2151 return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port, 2152 extack); 2153 } 2154 2155 static void 2156 mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device, 2157 struct mlxsw_sp_bridge_port *bridge_port, 2158 struct mlxsw_sp_port *mlxsw_sp_port) 2159 { 2160 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 2161 struct net_device *dev = bridge_port->dev; 2162 u16 vid; 2163 2164 vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID; 2165 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 2166 if (!mlxsw_sp_port_vlan || !mlxsw_sp_port_vlan->bridge_port) 2167 return; 2168 2169 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 2170 } 2171 2172 static int 2173 mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device, 2174 const struct net_device *vxlan_dev, u16 vid, 2175 struct netlink_ext_ack *extack) 2176 { 2177 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev); 2178 struct vxlan_dev *vxlan = netdev_priv(vxlan_dev); 2179 struct mlxsw_sp_nve_params params = { 2180 .type = MLXSW_SP_NVE_TYPE_VXLAN, 2181 .vni = vxlan->cfg.vni, 2182 .dev = vxlan_dev, 2183 }; 2184 struct mlxsw_sp_fid *fid; 2185 int err; 2186 2187 fid = mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex); 2188 if (!fid) { 2189 NL_SET_ERR_MSG_MOD(extack, "Did not find a corresponding FID"); 2190 return -EINVAL; 2191 } 2192 2193 if (mlxsw_sp_fid_vni_is_set(fid)) { 2194 NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID"); 2195 err = -EINVAL; 2196 goto err_vni_exists; 2197 } 2198 2199 err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, ¶ms, extack); 2200 if (err) 2201 goto err_nve_fid_enable; 2202 2203 /* The tunnel port does not hold a reference on the FID. Only 2204 * local ports and the router port 2205 */ 2206 mlxsw_sp_fid_put(fid); 2207 2208 return 0; 2209 2210 err_nve_fid_enable: 2211 err_vni_exists: 2212 mlxsw_sp_fid_put(fid); 2213 return err; 2214 } 2215 2216 static struct mlxsw_sp_fid * 2217 mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device, 2218 u16 vid, struct netlink_ext_ack *extack) 2219 { 2220 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev); 2221 struct net_device *vxlan_dev; 2222 struct mlxsw_sp_fid *fid; 2223 int err; 2224 2225 fid = mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex); 2226 if (IS_ERR(fid)) 2227 return fid; 2228 2229 if (mlxsw_sp_fid_vni_is_set(fid)) 2230 return fid; 2231 2232 vxlan_dev = mlxsw_sp_bridge_vxlan_dev_find(bridge_device->dev); 2233 if (!vxlan_dev) 2234 return fid; 2235 2236 if (!netif_running(vxlan_dev)) 2237 return fid; 2238 2239 err = mlxsw_sp_bridge_8021d_vxlan_join(bridge_device, vxlan_dev, 0, 2240 extack); 2241 if (err) 2242 goto err_vxlan_join; 2243 2244 return fid; 2245 2246 err_vxlan_join: 2247 mlxsw_sp_fid_put(fid); 2248 return ERR_PTR(err); 2249 } 2250 2251 static struct mlxsw_sp_fid * 2252 mlxsw_sp_bridge_8021d_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device, 2253 u16 vid) 2254 { 2255 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev); 2256 2257 /* The only valid VLAN for a VLAN-unaware bridge is 0 */ 2258 if (vid) 2259 return NULL; 2260 2261 return mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex); 2262 } 2263 2264 static u16 2265 mlxsw_sp_bridge_8021d_fid_vid(struct mlxsw_sp_bridge_device *bridge_device, 2266 const struct mlxsw_sp_fid *fid) 2267 { 2268 return 0; 2269 } 2270 2271 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = { 2272 .port_join = mlxsw_sp_bridge_8021d_port_join, 2273 .port_leave = mlxsw_sp_bridge_8021d_port_leave, 2274 .vxlan_join = mlxsw_sp_bridge_8021d_vxlan_join, 2275 .fid_get = mlxsw_sp_bridge_8021d_fid_get, 2276 .fid_lookup = mlxsw_sp_bridge_8021d_fid_lookup, 2277 .fid_vid = mlxsw_sp_bridge_8021d_fid_vid, 2278 }; 2279 2280 int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port, 2281 struct net_device *brport_dev, 2282 struct net_device *br_dev, 2283 struct netlink_ext_ack *extack) 2284 { 2285 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2286 struct mlxsw_sp_bridge_device *bridge_device; 2287 struct mlxsw_sp_bridge_port *bridge_port; 2288 int err; 2289 2290 bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev); 2291 if (IS_ERR(bridge_port)) 2292 return PTR_ERR(bridge_port); 2293 bridge_device = bridge_port->bridge_device; 2294 2295 err = bridge_device->ops->port_join(bridge_device, bridge_port, 2296 mlxsw_sp_port, extack); 2297 if (err) 2298 goto err_port_join; 2299 2300 return 0; 2301 2302 err_port_join: 2303 mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port); 2304 return err; 2305 } 2306 2307 void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port, 2308 struct net_device *brport_dev, 2309 struct net_device *br_dev) 2310 { 2311 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2312 struct mlxsw_sp_bridge_device *bridge_device; 2313 struct mlxsw_sp_bridge_port *bridge_port; 2314 2315 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); 2316 if (!bridge_device) 2317 return; 2318 bridge_port = __mlxsw_sp_bridge_port_find(bridge_device, brport_dev); 2319 if (!bridge_port) 2320 return; 2321 2322 bridge_device->ops->port_leave(bridge_device, bridge_port, 2323 mlxsw_sp_port); 2324 mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port); 2325 } 2326 2327 int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp, 2328 const struct net_device *br_dev, 2329 const struct net_device *vxlan_dev, u16 vid, 2330 struct netlink_ext_ack *extack) 2331 { 2332 struct mlxsw_sp_bridge_device *bridge_device; 2333 2334 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); 2335 if (WARN_ON(!bridge_device)) 2336 return -EINVAL; 2337 2338 return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid, 2339 extack); 2340 } 2341 2342 void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp, 2343 const struct net_device *vxlan_dev) 2344 { 2345 struct vxlan_dev *vxlan = netdev_priv(vxlan_dev); 2346 struct mlxsw_sp_fid *fid; 2347 2348 /* If the VxLAN device is down, then the FID does not have a VNI */ 2349 fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan->cfg.vni); 2350 if (!fid) 2351 return; 2352 2353 mlxsw_sp_nve_fid_disable(mlxsw_sp, fid); 2354 mlxsw_sp_fid_put(fid); 2355 } 2356 2357 struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp, 2358 const struct net_device *br_dev, 2359 u16 vid, 2360 struct netlink_ext_ack *extack) 2361 { 2362 struct mlxsw_sp_bridge_device *bridge_device; 2363 2364 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); 2365 if (WARN_ON(!bridge_device)) 2366 return ERR_PTR(-EINVAL); 2367 2368 return bridge_device->ops->fid_get(bridge_device, vid, extack); 2369 } 2370 2371 static void 2372 mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr, 2373 enum mlxsw_sp_l3proto *proto, 2374 union mlxsw_sp_l3addr *addr) 2375 { 2376 if (vxlan_addr->sa.sa_family == AF_INET) { 2377 addr->addr4 = vxlan_addr->sin.sin_addr.s_addr; 2378 *proto = MLXSW_SP_L3_PROTO_IPV4; 2379 } else { 2380 addr->addr6 = vxlan_addr->sin6.sin6_addr; 2381 *proto = MLXSW_SP_L3_PROTO_IPV6; 2382 } 2383 } 2384 2385 static void 2386 mlxsw_sp_switchdev_addr_vxlan_convert(enum mlxsw_sp_l3proto proto, 2387 const union mlxsw_sp_l3addr *addr, 2388 union vxlan_addr *vxlan_addr) 2389 { 2390 switch (proto) { 2391 case MLXSW_SP_L3_PROTO_IPV4: 2392 vxlan_addr->sa.sa_family = AF_INET; 2393 vxlan_addr->sin.sin_addr.s_addr = addr->addr4; 2394 break; 2395 case MLXSW_SP_L3_PROTO_IPV6: 2396 vxlan_addr->sa.sa_family = AF_INET6; 2397 vxlan_addr->sin6.sin6_addr = addr->addr6; 2398 break; 2399 } 2400 } 2401 2402 static void mlxsw_sp_fdb_vxlan_call_notifiers(struct net_device *dev, 2403 const char *mac, 2404 enum mlxsw_sp_l3proto proto, 2405 union mlxsw_sp_l3addr *addr, 2406 __be32 vni, bool adding) 2407 { 2408 struct switchdev_notifier_vxlan_fdb_info info; 2409 struct vxlan_dev *vxlan = netdev_priv(dev); 2410 enum switchdev_notifier_type type; 2411 2412 type = adding ? SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE : 2413 SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE; 2414 mlxsw_sp_switchdev_addr_vxlan_convert(proto, addr, &info.remote_ip); 2415 info.remote_port = vxlan->cfg.dst_port; 2416 info.remote_vni = vni; 2417 info.remote_ifindex = 0; 2418 ether_addr_copy(info.eth_addr, mac); 2419 info.vni = vni; 2420 info.offloaded = adding; 2421 call_switchdev_notifiers(type, dev, &info.info, NULL); 2422 } 2423 2424 static void mlxsw_sp_fdb_nve_call_notifiers(struct net_device *dev, 2425 const char *mac, 2426 enum mlxsw_sp_l3proto proto, 2427 union mlxsw_sp_l3addr *addr, 2428 __be32 vni, 2429 bool adding) 2430 { 2431 if (netif_is_vxlan(dev)) 2432 mlxsw_sp_fdb_vxlan_call_notifiers(dev, mac, proto, addr, vni, 2433 adding); 2434 } 2435 2436 static void 2437 mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type, 2438 const char *mac, u16 vid, 2439 struct net_device *dev, bool offloaded) 2440 { 2441 struct switchdev_notifier_fdb_info info; 2442 2443 info.addr = mac; 2444 info.vid = vid; 2445 info.offloaded = offloaded; 2446 call_switchdev_notifiers(type, dev, &info.info, NULL); 2447 } 2448 2449 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp, 2450 char *sfn_pl, int rec_index, 2451 bool adding) 2452 { 2453 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 2454 struct mlxsw_sp_bridge_device *bridge_device; 2455 struct mlxsw_sp_bridge_port *bridge_port; 2456 struct mlxsw_sp_port *mlxsw_sp_port; 2457 enum switchdev_notifier_type type; 2458 char mac[ETH_ALEN]; 2459 u8 local_port; 2460 u16 vid, fid; 2461 bool do_notification = true; 2462 int err; 2463 2464 mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port); 2465 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2466 if (!mlxsw_sp_port) { 2467 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n"); 2468 goto just_remove; 2469 } 2470 2471 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid); 2472 if (!mlxsw_sp_port_vlan) { 2473 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n"); 2474 goto just_remove; 2475 } 2476 2477 bridge_port = mlxsw_sp_port_vlan->bridge_port; 2478 if (!bridge_port) { 2479 netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n"); 2480 goto just_remove; 2481 } 2482 2483 bridge_device = bridge_port->bridge_device; 2484 vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0; 2485 2486 do_fdb_op: 2487 err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, 2488 adding, true); 2489 if (err) { 2490 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n"); 2491 return; 2492 } 2493 2494 if (!do_notification) 2495 return; 2496 type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE; 2497 mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding); 2498 2499 return; 2500 2501 just_remove: 2502 adding = false; 2503 do_notification = false; 2504 goto do_fdb_op; 2505 } 2506 2507 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp, 2508 char *sfn_pl, int rec_index, 2509 bool adding) 2510 { 2511 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 2512 struct mlxsw_sp_bridge_device *bridge_device; 2513 struct mlxsw_sp_bridge_port *bridge_port; 2514 struct mlxsw_sp_port *mlxsw_sp_port; 2515 enum switchdev_notifier_type type; 2516 char mac[ETH_ALEN]; 2517 u16 lag_vid = 0; 2518 u16 lag_id; 2519 u16 vid, fid; 2520 bool do_notification = true; 2521 int err; 2522 2523 mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id); 2524 mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id); 2525 if (!mlxsw_sp_port) { 2526 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n"); 2527 goto just_remove; 2528 } 2529 2530 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid); 2531 if (!mlxsw_sp_port_vlan) { 2532 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n"); 2533 goto just_remove; 2534 } 2535 2536 bridge_port = mlxsw_sp_port_vlan->bridge_port; 2537 if (!bridge_port) { 2538 netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n"); 2539 goto just_remove; 2540 } 2541 2542 bridge_device = bridge_port->bridge_device; 2543 vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0; 2544 lag_vid = mlxsw_sp_fid_lag_vid_valid(mlxsw_sp_port_vlan->fid) ? 2545 mlxsw_sp_port_vlan->vid : 0; 2546 2547 do_fdb_op: 2548 err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid, 2549 adding, true); 2550 if (err) { 2551 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n"); 2552 return; 2553 } 2554 2555 if (!do_notification) 2556 return; 2557 type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE; 2558 mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding); 2559 2560 return; 2561 2562 just_remove: 2563 adding = false; 2564 do_notification = false; 2565 goto do_fdb_op; 2566 } 2567 2568 static int 2569 __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp, 2570 const struct mlxsw_sp_fid *fid, 2571 bool adding, 2572 struct net_device **nve_dev, 2573 u16 *p_vid, __be32 *p_vni) 2574 { 2575 struct mlxsw_sp_bridge_device *bridge_device; 2576 struct net_device *br_dev, *dev; 2577 int nve_ifindex; 2578 int err; 2579 2580 err = mlxsw_sp_fid_nve_ifindex(fid, &nve_ifindex); 2581 if (err) 2582 return err; 2583 2584 err = mlxsw_sp_fid_vni(fid, p_vni); 2585 if (err) 2586 return err; 2587 2588 dev = __dev_get_by_index(&init_net, nve_ifindex); 2589 if (!dev) 2590 return -EINVAL; 2591 *nve_dev = dev; 2592 2593 if (!netif_running(dev)) 2594 return -EINVAL; 2595 2596 if (adding && !br_port_flag_is_set(dev, BR_LEARNING)) 2597 return -EINVAL; 2598 2599 if (adding && netif_is_vxlan(dev)) { 2600 struct vxlan_dev *vxlan = netdev_priv(dev); 2601 2602 if (!(vxlan->cfg.flags & VXLAN_F_LEARN)) 2603 return -EINVAL; 2604 } 2605 2606 br_dev = netdev_master_upper_dev_get(dev); 2607 if (!br_dev) 2608 return -EINVAL; 2609 2610 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); 2611 if (!bridge_device) 2612 return -EINVAL; 2613 2614 *p_vid = bridge_device->ops->fid_vid(bridge_device, fid); 2615 2616 return 0; 2617 } 2618 2619 static void mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp, 2620 char *sfn_pl, 2621 int rec_index, 2622 bool adding) 2623 { 2624 enum mlxsw_reg_sfn_uc_tunnel_protocol sfn_proto; 2625 enum switchdev_notifier_type type; 2626 struct net_device *nve_dev; 2627 union mlxsw_sp_l3addr addr; 2628 struct mlxsw_sp_fid *fid; 2629 char mac[ETH_ALEN]; 2630 u16 fid_index, vid; 2631 __be32 vni; 2632 u32 uip; 2633 int err; 2634 2635 mlxsw_reg_sfn_uc_tunnel_unpack(sfn_pl, rec_index, mac, &fid_index, 2636 &uip, &sfn_proto); 2637 2638 fid = mlxsw_sp_fid_lookup_by_index(mlxsw_sp, fid_index); 2639 if (!fid) 2640 goto err_fid_lookup; 2641 2642 err = mlxsw_sp_nve_learned_ip_resolve(mlxsw_sp, uip, 2643 (enum mlxsw_sp_l3proto) sfn_proto, 2644 &addr); 2645 if (err) 2646 goto err_ip_resolve; 2647 2648 err = __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, fid, adding, 2649 &nve_dev, &vid, &vni); 2650 if (err) 2651 goto err_fdb_process; 2652 2653 err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index, 2654 (enum mlxsw_sp_l3proto) sfn_proto, 2655 &addr, adding, true); 2656 if (err) 2657 goto err_fdb_op; 2658 2659 mlxsw_sp_fdb_nve_call_notifiers(nve_dev, mac, 2660 (enum mlxsw_sp_l3proto) sfn_proto, 2661 &addr, vni, adding); 2662 2663 type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : 2664 SWITCHDEV_FDB_DEL_TO_BRIDGE; 2665 mlxsw_sp_fdb_call_notifiers(type, mac, vid, nve_dev, adding); 2666 2667 mlxsw_sp_fid_put(fid); 2668 2669 return; 2670 2671 err_fdb_op: 2672 err_fdb_process: 2673 err_ip_resolve: 2674 mlxsw_sp_fid_put(fid); 2675 err_fid_lookup: 2676 /* Remove an FDB entry in case we cannot process it. Otherwise the 2677 * device will keep sending the same notification over and over again. 2678 */ 2679 mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index, 2680 (enum mlxsw_sp_l3proto) sfn_proto, &addr, 2681 false, true); 2682 } 2683 2684 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp, 2685 char *sfn_pl, int rec_index) 2686 { 2687 switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) { 2688 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC: 2689 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl, 2690 rec_index, true); 2691 break; 2692 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC: 2693 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl, 2694 rec_index, false); 2695 break; 2696 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG: 2697 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl, 2698 rec_index, true); 2699 break; 2700 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG: 2701 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl, 2702 rec_index, false); 2703 break; 2704 case MLXSW_REG_SFN_REC_TYPE_LEARNED_UNICAST_TUNNEL: 2705 mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl, 2706 rec_index, true); 2707 break; 2708 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_UNICAST_TUNNEL: 2709 mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl, 2710 rec_index, false); 2711 break; 2712 } 2713 } 2714 2715 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp) 2716 { 2717 struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge; 2718 2719 mlxsw_core_schedule_dw(&bridge->fdb_notify.dw, 2720 msecs_to_jiffies(bridge->fdb_notify.interval)); 2721 } 2722 2723 static void mlxsw_sp_fdb_notify_work(struct work_struct *work) 2724 { 2725 struct mlxsw_sp_bridge *bridge; 2726 struct mlxsw_sp *mlxsw_sp; 2727 char *sfn_pl; 2728 u8 num_rec; 2729 int i; 2730 int err; 2731 2732 sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL); 2733 if (!sfn_pl) 2734 return; 2735 2736 bridge = container_of(work, struct mlxsw_sp_bridge, fdb_notify.dw.work); 2737 mlxsw_sp = bridge->mlxsw_sp; 2738 2739 rtnl_lock(); 2740 mlxsw_reg_sfn_pack(sfn_pl); 2741 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl); 2742 if (err) { 2743 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n"); 2744 goto out; 2745 } 2746 num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl); 2747 for (i = 0; i < num_rec; i++) 2748 mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i); 2749 2750 out: 2751 rtnl_unlock(); 2752 kfree(sfn_pl); 2753 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); 2754 } 2755 2756 struct mlxsw_sp_switchdev_event_work { 2757 struct work_struct work; 2758 union { 2759 struct switchdev_notifier_fdb_info fdb_info; 2760 struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info; 2761 }; 2762 struct net_device *dev; 2763 unsigned long event; 2764 }; 2765 2766 static void 2767 mlxsw_sp_switchdev_bridge_vxlan_fdb_event(struct mlxsw_sp *mlxsw_sp, 2768 struct mlxsw_sp_switchdev_event_work * 2769 switchdev_work, 2770 struct mlxsw_sp_fid *fid, __be32 vni) 2771 { 2772 struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info; 2773 struct switchdev_notifier_fdb_info *fdb_info; 2774 struct net_device *dev = switchdev_work->dev; 2775 enum mlxsw_sp_l3proto proto; 2776 union mlxsw_sp_l3addr addr; 2777 int err; 2778 2779 fdb_info = &switchdev_work->fdb_info; 2780 err = vxlan_fdb_find_uc(dev, fdb_info->addr, vni, &vxlan_fdb_info); 2781 if (err) 2782 return; 2783 2784 mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info.remote_ip, 2785 &proto, &addr); 2786 2787 switch (switchdev_work->event) { 2788 case SWITCHDEV_FDB_ADD_TO_DEVICE: 2789 err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, 2790 vxlan_fdb_info.eth_addr, 2791 mlxsw_sp_fid_index(fid), 2792 proto, &addr, true, false); 2793 if (err) 2794 return; 2795 vxlan_fdb_info.offloaded = true; 2796 call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev, 2797 &vxlan_fdb_info.info, NULL); 2798 mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, 2799 vxlan_fdb_info.eth_addr, 2800 fdb_info->vid, dev, true); 2801 break; 2802 case SWITCHDEV_FDB_DEL_TO_DEVICE: 2803 err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, 2804 vxlan_fdb_info.eth_addr, 2805 mlxsw_sp_fid_index(fid), 2806 proto, &addr, false, 2807 false); 2808 vxlan_fdb_info.offloaded = false; 2809 call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev, 2810 &vxlan_fdb_info.info, NULL); 2811 break; 2812 } 2813 } 2814 2815 static void 2816 mlxsw_sp_switchdev_bridge_nve_fdb_event(struct mlxsw_sp_switchdev_event_work * 2817 switchdev_work) 2818 { 2819 struct mlxsw_sp_bridge_device *bridge_device; 2820 struct net_device *dev = switchdev_work->dev; 2821 struct net_device *br_dev; 2822 struct mlxsw_sp *mlxsw_sp; 2823 struct mlxsw_sp_fid *fid; 2824 __be32 vni; 2825 int err; 2826 2827 if (switchdev_work->event != SWITCHDEV_FDB_ADD_TO_DEVICE && 2828 switchdev_work->event != SWITCHDEV_FDB_DEL_TO_DEVICE) 2829 return; 2830 2831 if (switchdev_work->event == SWITCHDEV_FDB_ADD_TO_DEVICE && 2832 !switchdev_work->fdb_info.added_by_user) 2833 return; 2834 2835 if (!netif_running(dev)) 2836 return; 2837 br_dev = netdev_master_upper_dev_get(dev); 2838 if (!br_dev) 2839 return; 2840 if (!netif_is_bridge_master(br_dev)) 2841 return; 2842 mlxsw_sp = mlxsw_sp_lower_get(br_dev); 2843 if (!mlxsw_sp) 2844 return; 2845 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); 2846 if (!bridge_device) 2847 return; 2848 2849 fid = bridge_device->ops->fid_lookup(bridge_device, 2850 switchdev_work->fdb_info.vid); 2851 if (!fid) 2852 return; 2853 2854 err = mlxsw_sp_fid_vni(fid, &vni); 2855 if (err) 2856 goto out; 2857 2858 mlxsw_sp_switchdev_bridge_vxlan_fdb_event(mlxsw_sp, switchdev_work, fid, 2859 vni); 2860 2861 out: 2862 mlxsw_sp_fid_put(fid); 2863 } 2864 2865 static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work) 2866 { 2867 struct mlxsw_sp_switchdev_event_work *switchdev_work = 2868 container_of(work, struct mlxsw_sp_switchdev_event_work, work); 2869 struct net_device *dev = switchdev_work->dev; 2870 struct switchdev_notifier_fdb_info *fdb_info; 2871 struct mlxsw_sp_port *mlxsw_sp_port; 2872 int err; 2873 2874 rtnl_lock(); 2875 if (netif_is_vxlan(dev)) { 2876 mlxsw_sp_switchdev_bridge_nve_fdb_event(switchdev_work); 2877 goto out; 2878 } 2879 2880 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 2881 if (!mlxsw_sp_port) 2882 goto out; 2883 2884 switch (switchdev_work->event) { 2885 case SWITCHDEV_FDB_ADD_TO_DEVICE: 2886 fdb_info = &switchdev_work->fdb_info; 2887 if (!fdb_info->added_by_user) 2888 break; 2889 err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true); 2890 if (err) 2891 break; 2892 mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, 2893 fdb_info->addr, 2894 fdb_info->vid, dev, true); 2895 break; 2896 case SWITCHDEV_FDB_DEL_TO_DEVICE: 2897 fdb_info = &switchdev_work->fdb_info; 2898 mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false); 2899 break; 2900 case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */ 2901 case SWITCHDEV_FDB_DEL_TO_BRIDGE: 2902 /* These events are only used to potentially update an existing 2903 * SPAN mirror. 2904 */ 2905 break; 2906 } 2907 2908 mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp); 2909 2910 out: 2911 rtnl_unlock(); 2912 kfree(switchdev_work->fdb_info.addr); 2913 kfree(switchdev_work); 2914 dev_put(dev); 2915 } 2916 2917 static void 2918 mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp, 2919 struct mlxsw_sp_switchdev_event_work * 2920 switchdev_work) 2921 { 2922 struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info; 2923 struct mlxsw_sp_bridge_device *bridge_device; 2924 struct net_device *dev = switchdev_work->dev; 2925 u8 all_zeros_mac[ETH_ALEN] = { 0 }; 2926 enum mlxsw_sp_l3proto proto; 2927 union mlxsw_sp_l3addr addr; 2928 struct net_device *br_dev; 2929 struct mlxsw_sp_fid *fid; 2930 u16 vid; 2931 int err; 2932 2933 vxlan_fdb_info = &switchdev_work->vxlan_fdb_info; 2934 br_dev = netdev_master_upper_dev_get(dev); 2935 2936 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); 2937 if (!bridge_device) 2938 return; 2939 2940 fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni); 2941 if (!fid) 2942 return; 2943 2944 mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip, 2945 &proto, &addr); 2946 2947 if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) { 2948 err = mlxsw_sp_nve_flood_ip_add(mlxsw_sp, fid, proto, &addr); 2949 if (err) { 2950 mlxsw_sp_fid_put(fid); 2951 return; 2952 } 2953 vxlan_fdb_info->offloaded = true; 2954 call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev, 2955 &vxlan_fdb_info->info, NULL); 2956 mlxsw_sp_fid_put(fid); 2957 return; 2958 } 2959 2960 /* The device has a single FDB table, whereas Linux has two - one 2961 * in the bridge driver and another in the VxLAN driver. We only 2962 * program an entry to the device if the MAC points to the VxLAN 2963 * device in the bridge's FDB table 2964 */ 2965 vid = bridge_device->ops->fid_vid(bridge_device, fid); 2966 if (br_fdb_find_port(br_dev, vxlan_fdb_info->eth_addr, vid) != dev) 2967 goto err_br_fdb_find; 2968 2969 err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr, 2970 mlxsw_sp_fid_index(fid), proto, 2971 &addr, true, false); 2972 if (err) 2973 goto err_fdb_tunnel_uc_op; 2974 vxlan_fdb_info->offloaded = true; 2975 call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev, 2976 &vxlan_fdb_info->info, NULL); 2977 mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, 2978 vxlan_fdb_info->eth_addr, vid, dev, true); 2979 2980 mlxsw_sp_fid_put(fid); 2981 2982 return; 2983 2984 err_fdb_tunnel_uc_op: 2985 err_br_fdb_find: 2986 mlxsw_sp_fid_put(fid); 2987 } 2988 2989 static void 2990 mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp *mlxsw_sp, 2991 struct mlxsw_sp_switchdev_event_work * 2992 switchdev_work) 2993 { 2994 struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info; 2995 struct mlxsw_sp_bridge_device *bridge_device; 2996 struct net_device *dev = switchdev_work->dev; 2997 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 2998 u8 all_zeros_mac[ETH_ALEN] = { 0 }; 2999 enum mlxsw_sp_l3proto proto; 3000 union mlxsw_sp_l3addr addr; 3001 struct mlxsw_sp_fid *fid; 3002 u16 vid; 3003 3004 vxlan_fdb_info = &switchdev_work->vxlan_fdb_info; 3005 3006 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); 3007 if (!bridge_device) 3008 return; 3009 3010 fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni); 3011 if (!fid) 3012 return; 3013 3014 mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip, 3015 &proto, &addr); 3016 3017 if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) { 3018 mlxsw_sp_nve_flood_ip_del(mlxsw_sp, fid, proto, &addr); 3019 mlxsw_sp_fid_put(fid); 3020 return; 3021 } 3022 3023 mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr, 3024 mlxsw_sp_fid_index(fid), proto, &addr, 3025 false, false); 3026 vid = bridge_device->ops->fid_vid(bridge_device, fid); 3027 mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, 3028 vxlan_fdb_info->eth_addr, vid, dev, false); 3029 3030 mlxsw_sp_fid_put(fid); 3031 } 3032 3033 static void mlxsw_sp_switchdev_vxlan_fdb_event_work(struct work_struct *work) 3034 { 3035 struct mlxsw_sp_switchdev_event_work *switchdev_work = 3036 container_of(work, struct mlxsw_sp_switchdev_event_work, work); 3037 struct net_device *dev = switchdev_work->dev; 3038 struct mlxsw_sp *mlxsw_sp; 3039 struct net_device *br_dev; 3040 3041 rtnl_lock(); 3042 3043 if (!netif_running(dev)) 3044 goto out; 3045 br_dev = netdev_master_upper_dev_get(dev); 3046 if (!br_dev) 3047 goto out; 3048 if (!netif_is_bridge_master(br_dev)) 3049 goto out; 3050 mlxsw_sp = mlxsw_sp_lower_get(br_dev); 3051 if (!mlxsw_sp) 3052 goto out; 3053 3054 switch (switchdev_work->event) { 3055 case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE: 3056 mlxsw_sp_switchdev_vxlan_fdb_add(mlxsw_sp, switchdev_work); 3057 break; 3058 case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE: 3059 mlxsw_sp_switchdev_vxlan_fdb_del(mlxsw_sp, switchdev_work); 3060 break; 3061 } 3062 3063 out: 3064 rtnl_unlock(); 3065 kfree(switchdev_work); 3066 dev_put(dev); 3067 } 3068 3069 static int 3070 mlxsw_sp_switchdev_vxlan_work_prepare(struct mlxsw_sp_switchdev_event_work * 3071 switchdev_work, 3072 struct switchdev_notifier_info *info) 3073 { 3074 struct vxlan_dev *vxlan = netdev_priv(switchdev_work->dev); 3075 struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info; 3076 struct vxlan_config *cfg = &vxlan->cfg; 3077 struct netlink_ext_ack *extack; 3078 3079 extack = switchdev_notifier_info_to_extack(info); 3080 vxlan_fdb_info = container_of(info, 3081 struct switchdev_notifier_vxlan_fdb_info, 3082 info); 3083 3084 if (vxlan_fdb_info->remote_port != cfg->dst_port) { 3085 NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Non-default remote port is not supported"); 3086 return -EOPNOTSUPP; 3087 } 3088 if (vxlan_fdb_info->remote_vni != cfg->vni || 3089 vxlan_fdb_info->vni != cfg->vni) { 3090 NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Non-default VNI is not supported"); 3091 return -EOPNOTSUPP; 3092 } 3093 if (vxlan_fdb_info->remote_ifindex) { 3094 NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Local interface is not supported"); 3095 return -EOPNOTSUPP; 3096 } 3097 if (is_multicast_ether_addr(vxlan_fdb_info->eth_addr)) { 3098 NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Multicast MAC addresses not supported"); 3099 return -EOPNOTSUPP; 3100 } 3101 if (vxlan_addr_multicast(&vxlan_fdb_info->remote_ip)) { 3102 NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Multicast destination IP is not supported"); 3103 return -EOPNOTSUPP; 3104 } 3105 3106 switchdev_work->vxlan_fdb_info = *vxlan_fdb_info; 3107 3108 return 0; 3109 } 3110 3111 /* Called under rcu_read_lock() */ 3112 static int mlxsw_sp_switchdev_event(struct notifier_block *unused, 3113 unsigned long event, void *ptr) 3114 { 3115 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 3116 struct mlxsw_sp_switchdev_event_work *switchdev_work; 3117 struct switchdev_notifier_fdb_info *fdb_info; 3118 struct switchdev_notifier_info *info = ptr; 3119 struct net_device *br_dev; 3120 int err; 3121 3122 if (event == SWITCHDEV_PORT_ATTR_SET) { 3123 err = switchdev_handle_port_attr_set(dev, ptr, 3124 mlxsw_sp_port_dev_check, 3125 mlxsw_sp_port_attr_set); 3126 return notifier_from_errno(err); 3127 } 3128 3129 /* Tunnel devices are not our uppers, so check their master instead */ 3130 br_dev = netdev_master_upper_dev_get_rcu(dev); 3131 if (!br_dev) 3132 return NOTIFY_DONE; 3133 if (!netif_is_bridge_master(br_dev)) 3134 return NOTIFY_DONE; 3135 if (!mlxsw_sp_port_dev_lower_find_rcu(br_dev)) 3136 return NOTIFY_DONE; 3137 3138 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); 3139 if (!switchdev_work) 3140 return NOTIFY_BAD; 3141 3142 switchdev_work->dev = dev; 3143 switchdev_work->event = event; 3144 3145 switch (event) { 3146 case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */ 3147 case SWITCHDEV_FDB_DEL_TO_DEVICE: /* fall through */ 3148 case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */ 3149 case SWITCHDEV_FDB_DEL_TO_BRIDGE: 3150 fdb_info = container_of(info, 3151 struct switchdev_notifier_fdb_info, 3152 info); 3153 INIT_WORK(&switchdev_work->work, 3154 mlxsw_sp_switchdev_bridge_fdb_event_work); 3155 memcpy(&switchdev_work->fdb_info, ptr, 3156 sizeof(switchdev_work->fdb_info)); 3157 switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); 3158 if (!switchdev_work->fdb_info.addr) 3159 goto err_addr_alloc; 3160 ether_addr_copy((u8 *)switchdev_work->fdb_info.addr, 3161 fdb_info->addr); 3162 /* Take a reference on the device. This can be either 3163 * upper device containig mlxsw_sp_port or just a 3164 * mlxsw_sp_port 3165 */ 3166 dev_hold(dev); 3167 break; 3168 case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE: /* fall through */ 3169 case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE: 3170 INIT_WORK(&switchdev_work->work, 3171 mlxsw_sp_switchdev_vxlan_fdb_event_work); 3172 err = mlxsw_sp_switchdev_vxlan_work_prepare(switchdev_work, 3173 info); 3174 if (err) 3175 goto err_vxlan_work_prepare; 3176 dev_hold(dev); 3177 break; 3178 default: 3179 kfree(switchdev_work); 3180 return NOTIFY_DONE; 3181 } 3182 3183 mlxsw_core_schedule_work(&switchdev_work->work); 3184 3185 return NOTIFY_DONE; 3186 3187 err_vxlan_work_prepare: 3188 err_addr_alloc: 3189 kfree(switchdev_work); 3190 return NOTIFY_BAD; 3191 } 3192 3193 struct notifier_block mlxsw_sp_switchdev_notifier = { 3194 .notifier_call = mlxsw_sp_switchdev_event, 3195 }; 3196 3197 static int 3198 mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp, 3199 struct mlxsw_sp_bridge_device *bridge_device, 3200 const struct net_device *vxlan_dev, u16 vid, 3201 bool flag_untagged, bool flag_pvid, 3202 struct netlink_ext_ack *extack) 3203 { 3204 struct vxlan_dev *vxlan = netdev_priv(vxlan_dev); 3205 __be32 vni = vxlan->cfg.vni; 3206 struct mlxsw_sp_fid *fid; 3207 u16 old_vid; 3208 int err; 3209 3210 /* We cannot have the same VLAN as PVID and egress untagged on multiple 3211 * VxLAN devices. Note that we get this notification before the VLAN is 3212 * actually added to the bridge's database, so it is not possible for 3213 * the lookup function to return 'vxlan_dev' 3214 */ 3215 if (flag_untagged && flag_pvid && 3216 mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid)) { 3217 NL_SET_ERR_MSG_MOD(extack, "VLAN already mapped to a different VNI"); 3218 return -EINVAL; 3219 } 3220 3221 if (!netif_running(vxlan_dev)) 3222 return 0; 3223 3224 /* First case: FID is not associated with this VNI, but the new VLAN 3225 * is both PVID and egress untagged. Need to enable NVE on the FID, if 3226 * it exists 3227 */ 3228 fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni); 3229 if (!fid) { 3230 if (!flag_untagged || !flag_pvid) 3231 return 0; 3232 return mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, 3233 vxlan_dev, vid, extack); 3234 } 3235 3236 /* Second case: FID is associated with the VNI and the VLAN associated 3237 * with the FID is the same as the notified VLAN. This means the flags 3238 * (PVID / egress untagged) were toggled and that NVE should be 3239 * disabled on the FID 3240 */ 3241 old_vid = mlxsw_sp_fid_8021q_vid(fid); 3242 if (vid == old_vid) { 3243 if (WARN_ON(flag_untagged && flag_pvid)) { 3244 mlxsw_sp_fid_put(fid); 3245 return -EINVAL; 3246 } 3247 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev); 3248 mlxsw_sp_fid_put(fid); 3249 return 0; 3250 } 3251 3252 /* Third case: A new VLAN was configured on the VxLAN device, but this 3253 * VLAN is not PVID, so there is nothing to do. 3254 */ 3255 if (!flag_pvid) { 3256 mlxsw_sp_fid_put(fid); 3257 return 0; 3258 } 3259 3260 /* Fourth case: Thew new VLAN is PVID, which means the VLAN currently 3261 * mapped to the VNI should be unmapped 3262 */ 3263 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev); 3264 mlxsw_sp_fid_put(fid); 3265 3266 /* Fifth case: The new VLAN is also egress untagged, which means the 3267 * VLAN needs to be mapped to the VNI 3268 */ 3269 if (!flag_untagged) 3270 return 0; 3271 3272 err = mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, vid, 3273 extack); 3274 if (err) 3275 goto err_vxlan_join; 3276 3277 return 0; 3278 3279 err_vxlan_join: 3280 mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, old_vid, 3281 NULL); 3282 return err; 3283 } 3284 3285 static void 3286 mlxsw_sp_switchdev_vxlan_vlan_del(struct mlxsw_sp *mlxsw_sp, 3287 struct mlxsw_sp_bridge_device *bridge_device, 3288 const struct net_device *vxlan_dev, u16 vid) 3289 { 3290 struct vxlan_dev *vxlan = netdev_priv(vxlan_dev); 3291 __be32 vni = vxlan->cfg.vni; 3292 struct mlxsw_sp_fid *fid; 3293 3294 if (!netif_running(vxlan_dev)) 3295 return; 3296 3297 fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni); 3298 if (!fid) 3299 return; 3300 3301 /* A different VLAN than the one mapped to the VNI is deleted */ 3302 if (mlxsw_sp_fid_8021q_vid(fid) != vid) 3303 goto out; 3304 3305 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev); 3306 3307 out: 3308 mlxsw_sp_fid_put(fid); 3309 } 3310 3311 static int 3312 mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev, 3313 struct switchdev_notifier_port_obj_info * 3314 port_obj_info) 3315 { 3316 struct switchdev_obj_port_vlan *vlan = 3317 SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj); 3318 bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 3319 bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; 3320 struct switchdev_trans *trans = port_obj_info->trans; 3321 struct mlxsw_sp_bridge_device *bridge_device; 3322 struct netlink_ext_ack *extack; 3323 struct mlxsw_sp *mlxsw_sp; 3324 struct net_device *br_dev; 3325 u16 vid; 3326 3327 extack = switchdev_notifier_info_to_extack(&port_obj_info->info); 3328 br_dev = netdev_master_upper_dev_get(vxlan_dev); 3329 if (!br_dev) 3330 return 0; 3331 3332 mlxsw_sp = mlxsw_sp_lower_get(br_dev); 3333 if (!mlxsw_sp) 3334 return 0; 3335 3336 port_obj_info->handled = true; 3337 3338 if (switchdev_trans_ph_commit(trans)) 3339 return 0; 3340 3341 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); 3342 if (!bridge_device) 3343 return -EINVAL; 3344 3345 if (!bridge_device->vlan_enabled) 3346 return 0; 3347 3348 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { 3349 int err; 3350 3351 err = mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device, 3352 vxlan_dev, vid, 3353 flag_untagged, 3354 flag_pvid, extack); 3355 if (err) 3356 return err; 3357 } 3358 3359 return 0; 3360 } 3361 3362 static void 3363 mlxsw_sp_switchdev_vxlan_vlans_del(struct net_device *vxlan_dev, 3364 struct switchdev_notifier_port_obj_info * 3365 port_obj_info) 3366 { 3367 struct switchdev_obj_port_vlan *vlan = 3368 SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj); 3369 struct mlxsw_sp_bridge_device *bridge_device; 3370 struct mlxsw_sp *mlxsw_sp; 3371 struct net_device *br_dev; 3372 u16 vid; 3373 3374 br_dev = netdev_master_upper_dev_get(vxlan_dev); 3375 if (!br_dev) 3376 return; 3377 3378 mlxsw_sp = mlxsw_sp_lower_get(br_dev); 3379 if (!mlxsw_sp) 3380 return; 3381 3382 port_obj_info->handled = true; 3383 3384 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); 3385 if (!bridge_device) 3386 return; 3387 3388 if (!bridge_device->vlan_enabled) 3389 return; 3390 3391 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) 3392 mlxsw_sp_switchdev_vxlan_vlan_del(mlxsw_sp, bridge_device, 3393 vxlan_dev, vid); 3394 } 3395 3396 static int 3397 mlxsw_sp_switchdev_handle_vxlan_obj_add(struct net_device *vxlan_dev, 3398 struct switchdev_notifier_port_obj_info * 3399 port_obj_info) 3400 { 3401 int err = 0; 3402 3403 switch (port_obj_info->obj->id) { 3404 case SWITCHDEV_OBJ_ID_PORT_VLAN: 3405 err = mlxsw_sp_switchdev_vxlan_vlans_add(vxlan_dev, 3406 port_obj_info); 3407 break; 3408 default: 3409 break; 3410 } 3411 3412 return err; 3413 } 3414 3415 static void 3416 mlxsw_sp_switchdev_handle_vxlan_obj_del(struct net_device *vxlan_dev, 3417 struct switchdev_notifier_port_obj_info * 3418 port_obj_info) 3419 { 3420 switch (port_obj_info->obj->id) { 3421 case SWITCHDEV_OBJ_ID_PORT_VLAN: 3422 mlxsw_sp_switchdev_vxlan_vlans_del(vxlan_dev, port_obj_info); 3423 break; 3424 default: 3425 break; 3426 } 3427 } 3428 3429 static int mlxsw_sp_switchdev_blocking_event(struct notifier_block *unused, 3430 unsigned long event, void *ptr) 3431 { 3432 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 3433 int err = 0; 3434 3435 switch (event) { 3436 case SWITCHDEV_PORT_OBJ_ADD: 3437 if (netif_is_vxlan(dev)) 3438 err = mlxsw_sp_switchdev_handle_vxlan_obj_add(dev, ptr); 3439 else 3440 err = switchdev_handle_port_obj_add(dev, ptr, 3441 mlxsw_sp_port_dev_check, 3442 mlxsw_sp_port_obj_add); 3443 return notifier_from_errno(err); 3444 case SWITCHDEV_PORT_OBJ_DEL: 3445 if (netif_is_vxlan(dev)) 3446 mlxsw_sp_switchdev_handle_vxlan_obj_del(dev, ptr); 3447 else 3448 err = switchdev_handle_port_obj_del(dev, ptr, 3449 mlxsw_sp_port_dev_check, 3450 mlxsw_sp_port_obj_del); 3451 return notifier_from_errno(err); 3452 case SWITCHDEV_PORT_ATTR_SET: 3453 err = switchdev_handle_port_attr_set(dev, ptr, 3454 mlxsw_sp_port_dev_check, 3455 mlxsw_sp_port_attr_set); 3456 return notifier_from_errno(err); 3457 } 3458 3459 return NOTIFY_DONE; 3460 } 3461 3462 static struct notifier_block mlxsw_sp_switchdev_blocking_notifier = { 3463 .notifier_call = mlxsw_sp_switchdev_blocking_event, 3464 }; 3465 3466 u8 3467 mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port) 3468 { 3469 return bridge_port->stp_state; 3470 } 3471 3472 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp) 3473 { 3474 struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge; 3475 struct notifier_block *nb; 3476 int err; 3477 3478 err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME); 3479 if (err) { 3480 dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n"); 3481 return err; 3482 } 3483 3484 err = register_switchdev_notifier(&mlxsw_sp_switchdev_notifier); 3485 if (err) { 3486 dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev notifier\n"); 3487 return err; 3488 } 3489 3490 nb = &mlxsw_sp_switchdev_blocking_notifier; 3491 err = register_switchdev_blocking_notifier(nb); 3492 if (err) { 3493 dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev blocking notifier\n"); 3494 goto err_register_switchdev_blocking_notifier; 3495 } 3496 3497 INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work); 3498 bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL; 3499 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); 3500 return 0; 3501 3502 err_register_switchdev_blocking_notifier: 3503 unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier); 3504 return err; 3505 } 3506 3507 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp) 3508 { 3509 struct notifier_block *nb; 3510 3511 cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw); 3512 3513 nb = &mlxsw_sp_switchdev_blocking_notifier; 3514 unregister_switchdev_blocking_notifier(nb); 3515 3516 unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier); 3517 } 3518 3519 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) 3520 { 3521 struct mlxsw_sp_bridge *bridge; 3522 3523 bridge = kzalloc(sizeof(*mlxsw_sp->bridge), GFP_KERNEL); 3524 if (!bridge) 3525 return -ENOMEM; 3526 mlxsw_sp->bridge = bridge; 3527 bridge->mlxsw_sp = mlxsw_sp; 3528 3529 INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list); 3530 3531 bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops; 3532 bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops; 3533 3534 return mlxsw_sp_fdb_init(mlxsw_sp); 3535 } 3536 3537 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp) 3538 { 3539 mlxsw_sp_fdb_fini(mlxsw_sp); 3540 WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list)); 3541 kfree(mlxsw_sp->bridge); 3542 } 3543 3544