1 /* 2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c 3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> 5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> 6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <linux/kernel.h> 38 #include <linux/types.h> 39 #include <linux/netdevice.h> 40 #include <linux/etherdevice.h> 41 #include <linux/slab.h> 42 #include <linux/device.h> 43 #include <linux/skbuff.h> 44 #include <linux/if_vlan.h> 45 #include <linux/if_bridge.h> 46 #include <linux/workqueue.h> 47 #include <linux/jiffies.h> 48 #include <linux/rtnetlink.h> 49 #include <net/switchdev.h> 50 51 #include "spectrum.h" 52 #include "core.h" 53 #include "reg.h" 54 55 static u16 mlxsw_sp_port_vid_to_fid_get(struct mlxsw_sp_port *mlxsw_sp_port, 56 u16 vid) 57 { 58 u16 fid = vid; 59 60 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { 61 u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_port); 62 63 fid = mlxsw_sp_vfid_to_fid(vfid); 64 } 65 66 if (!fid) 67 fid = mlxsw_sp_port->pvid; 68 69 return fid; 70 } 71 72 static struct mlxsw_sp_port * 73 mlxsw_sp_port_orig_get(struct net_device *dev, 74 struct mlxsw_sp_port *mlxsw_sp_port) 75 { 76 struct mlxsw_sp_port *mlxsw_sp_vport; 77 u16 vid; 78 79 if (!is_vlan_dev(dev)) 80 return mlxsw_sp_port; 81 82 vid = vlan_dev_vlan_id(dev); 83 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 84 WARN_ON(!mlxsw_sp_vport); 85 86 return mlxsw_sp_vport; 87 } 88 89 static int mlxsw_sp_port_attr_get(struct net_device *dev, 90 struct switchdev_attr *attr) 91 { 92 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 93 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 94 95 mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port); 96 if (!mlxsw_sp_port) 97 return -EINVAL; 98 99 switch (attr->id) { 100 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: 101 attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac); 102 memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac, 103 attr->u.ppid.id_len); 104 break; 105 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: 106 attr->u.brport_flags = 107 (mlxsw_sp_port->learning ? BR_LEARNING : 0) | 108 (mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0) | 109 (mlxsw_sp_port->uc_flood ? BR_FLOOD : 0); 110 break; 111 default: 112 return -EOPNOTSUPP; 113 } 114 115 return 0; 116 } 117 118 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, 119 u8 state) 120 { 121 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 122 enum mlxsw_reg_spms_state spms_state; 123 char *spms_pl; 124 u16 vid; 125 int err; 126 127 switch (state) { 128 case BR_STATE_FORWARDING: 129 spms_state = MLXSW_REG_SPMS_STATE_FORWARDING; 130 break; 131 case BR_STATE_LEARNING: 132 spms_state = MLXSW_REG_SPMS_STATE_LEARNING; 133 break; 134 case BR_STATE_LISTENING: /* fall-through */ 135 case BR_STATE_DISABLED: /* fall-through */ 136 case BR_STATE_BLOCKING: 137 spms_state = MLXSW_REG_SPMS_STATE_DISCARDING; 138 break; 139 default: 140 BUG(); 141 } 142 143 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 144 if (!spms_pl) 145 return -ENOMEM; 146 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 147 148 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { 149 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); 150 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 151 } else { 152 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) 153 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 154 } 155 156 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 157 kfree(spms_pl); 158 return err; 159 } 160 161 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, 162 struct switchdev_trans *trans, 163 u8 state) 164 { 165 if (switchdev_trans_ph_prepare(trans)) 166 return 0; 167 168 mlxsw_sp_port->stp_state = state; 169 return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state); 170 } 171 172 static bool mlxsw_sp_vfid_is_vport_br(u16 vfid) 173 { 174 return vfid >= MLXSW_SP_VFID_PORT_MAX; 175 } 176 177 static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, 178 u16 idx_begin, u16 idx_end, bool set, 179 bool only_uc) 180 { 181 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 182 u16 local_port = mlxsw_sp_port->local_port; 183 enum mlxsw_flood_table_type table_type; 184 u16 range = idx_end - idx_begin + 1; 185 char *sftr_pl; 186 int err; 187 188 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { 189 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID; 190 if (mlxsw_sp_vfid_is_vport_br(idx_begin)) 191 local_port = mlxsw_sp_port->local_port; 192 else 193 local_port = MLXSW_PORT_CPU_PORT; 194 } else { 195 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; 196 } 197 198 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); 199 if (!sftr_pl) 200 return -ENOMEM; 201 202 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin, 203 table_type, range, local_port, set); 204 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); 205 if (err) 206 goto buffer_out; 207 208 /* Flooding control allows one to decide whether a given port will 209 * flood unicast traffic for which there is no FDB entry. 210 */ 211 if (only_uc) 212 goto buffer_out; 213 214 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin, 215 table_type, range, local_port, set); 216 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); 217 218 buffer_out: 219 kfree(sftr_pl); 220 return err; 221 } 222 223 static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, 224 bool set) 225 { 226 struct net_device *dev = mlxsw_sp_port->dev; 227 u16 vid, last_visited_vid; 228 int err; 229 230 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { 231 u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_port); 232 233 return __mlxsw_sp_port_flood_set(mlxsw_sp_port, vfid, vfid, 234 set, true); 235 } 236 237 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { 238 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, set, 239 true); 240 if (err) { 241 last_visited_vid = vid; 242 goto err_port_flood_set; 243 } 244 } 245 246 return 0; 247 248 err_port_flood_set: 249 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid) 250 __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, !set, true); 251 netdev_err(dev, "Failed to configure unicast flooding\n"); 252 return err; 253 } 254 255 int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid, 256 bool set, bool only_uc) 257 { 258 /* In case of vFIDs, index into the flooding table is relative to 259 * the start of the vFIDs range. 260 */ 261 return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, 262 only_uc); 263 } 264 265 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, 266 struct switchdev_trans *trans, 267 unsigned long brport_flags) 268 { 269 unsigned long uc_flood = mlxsw_sp_port->uc_flood ? BR_FLOOD : 0; 270 bool set; 271 int err; 272 273 if (!mlxsw_sp_port->bridged) 274 return -EINVAL; 275 276 if (switchdev_trans_ph_prepare(trans)) 277 return 0; 278 279 if ((uc_flood ^ brport_flags) & BR_FLOOD) { 280 set = mlxsw_sp_port->uc_flood ? false : true; 281 err = mlxsw_sp_port_uc_flood_set(mlxsw_sp_port, set); 282 if (err) 283 return err; 284 } 285 286 mlxsw_sp_port->uc_flood = brport_flags & BR_FLOOD ? 1 : 0; 287 mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0; 288 mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0; 289 290 return 0; 291 } 292 293 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time) 294 { 295 char sfdat_pl[MLXSW_REG_SFDAT_LEN]; 296 int err; 297 298 mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time); 299 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl); 300 if (err) 301 return err; 302 mlxsw_sp->ageing_time = ageing_time; 303 return 0; 304 } 305 306 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port, 307 struct switchdev_trans *trans, 308 unsigned long ageing_clock_t) 309 { 310 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 311 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t); 312 u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000; 313 314 if (switchdev_trans_ph_prepare(trans)) 315 return 0; 316 317 return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time); 318 } 319 320 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 321 struct switchdev_trans *trans, 322 struct net_device *orig_dev, 323 bool vlan_enabled) 324 { 325 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 326 327 /* SWITCHDEV_TRANS_PREPARE phase */ 328 if ((!vlan_enabled) && (mlxsw_sp->master_bridge.dev == orig_dev)) { 329 netdev_err(mlxsw_sp_port->dev, "Bridge must be vlan-aware\n"); 330 return -EINVAL; 331 } 332 333 return 0; 334 } 335 336 static int mlxsw_sp_port_attr_set(struct net_device *dev, 337 const struct switchdev_attr *attr, 338 struct switchdev_trans *trans) 339 { 340 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 341 int err = 0; 342 343 mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port); 344 if (!mlxsw_sp_port) 345 return -EINVAL; 346 347 switch (attr->id) { 348 case SWITCHDEV_ATTR_ID_PORT_STP_STATE: 349 err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans, 350 attr->u.stp_state); 351 break; 352 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: 353 err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans, 354 attr->u.brport_flags); 355 break; 356 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: 357 err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans, 358 attr->u.ageing_time); 359 break; 360 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: 361 err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans, 362 attr->orig_dev, 363 attr->u.vlan_filtering); 364 break; 365 default: 366 err = -EOPNOTSUPP; 367 break; 368 } 369 370 return err; 371 } 372 373 static int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 374 { 375 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 376 char spvid_pl[MLXSW_REG_SPVID_LEN]; 377 378 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid); 379 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 380 } 381 382 static int mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid) 383 { 384 char sfmr_pl[MLXSW_REG_SFMR_LEN]; 385 int err; 386 387 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, fid); 388 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); 389 390 if (err) 391 return err; 392 393 set_bit(fid, mlxsw_sp->active_fids); 394 return 0; 395 } 396 397 static void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, u16 fid) 398 { 399 char sfmr_pl[MLXSW_REG_SFMR_LEN]; 400 401 clear_bit(fid, mlxsw_sp->active_fids); 402 403 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, 404 fid, fid); 405 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); 406 } 407 408 static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid) 409 { 410 enum mlxsw_reg_svfa_mt mt; 411 412 if (!list_empty(&mlxsw_sp_port->vports_list)) 413 mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; 414 else 415 mt = MLXSW_REG_SVFA_MT_VID_TO_FID; 416 417 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, fid, fid); 418 } 419 420 static int mlxsw_sp_port_fid_unmap(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid) 421 { 422 enum mlxsw_reg_svfa_mt mt; 423 424 if (list_empty(&mlxsw_sp_port->vports_list)) 425 return 0; 426 427 mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; 428 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, fid, fid); 429 } 430 431 static int mlxsw_sp_port_add_vids(struct net_device *dev, u16 vid_begin, 432 u16 vid_end) 433 { 434 u16 vid; 435 int err; 436 437 for (vid = vid_begin; vid <= vid_end; vid++) { 438 err = mlxsw_sp_port_add_vid(dev, 0, vid); 439 if (err) 440 goto err_port_add_vid; 441 } 442 return 0; 443 444 err_port_add_vid: 445 for (vid--; vid >= vid_begin; vid--) 446 mlxsw_sp_port_kill_vid(dev, 0, vid); 447 return err; 448 } 449 450 static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port *mlxsw_sp_port, 451 u16 vid_begin, u16 vid_end, bool is_member, 452 bool untagged) 453 { 454 u16 vid, vid_e; 455 int err; 456 457 for (vid = vid_begin; vid <= vid_end; 458 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 459 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 460 vid_end); 461 462 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 463 is_member, untagged); 464 if (err) 465 return err; 466 } 467 468 return 0; 469 } 470 471 static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, 472 u16 vid_begin, u16 vid_end, 473 bool flag_untagged, bool flag_pvid) 474 { 475 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 476 struct net_device *dev = mlxsw_sp_port->dev; 477 u16 vid, last_visited_vid, old_pvid; 478 enum mlxsw_reg_svfa_mt mt; 479 int err; 480 481 /* In case this is invoked with BRIDGE_FLAGS_SELF and port is 482 * not bridged, then packets ingressing through the port with 483 * the specified VIDs will be directed to CPU. 484 */ 485 if (!mlxsw_sp_port->bridged) 486 return mlxsw_sp_port_add_vids(dev, vid_begin, vid_end); 487 488 for (vid = vid_begin; vid <= vid_end; vid++) { 489 if (!test_bit(vid, mlxsw_sp->active_fids)) { 490 err = mlxsw_sp_fid_create(mlxsw_sp, vid); 491 if (err) { 492 netdev_err(dev, "Failed to create FID=%d\n", 493 vid); 494 return err; 495 } 496 497 /* When creating a FID, we set a VID to FID mapping 498 * regardless of the port's mode. 499 */ 500 mt = MLXSW_REG_SVFA_MT_VID_TO_FID; 501 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, 502 true, vid, vid); 503 if (err) { 504 netdev_err(dev, "Failed to create FID=VID=%d mapping\n", 505 vid); 506 goto err_port_vid_to_fid_set; 507 } 508 } 509 } 510 511 /* Set FID mapping according to port's mode */ 512 for (vid = vid_begin; vid <= vid_end; vid++) { 513 err = mlxsw_sp_port_fid_map(mlxsw_sp_port, vid); 514 if (err) { 515 netdev_err(dev, "Failed to map FID=%d", vid); 516 last_visited_vid = --vid; 517 goto err_port_fid_map; 518 } 519 } 520 521 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end, 522 true, false); 523 if (err) { 524 netdev_err(dev, "Failed to configure flooding\n"); 525 goto err_port_flood_set; 526 } 527 528 err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, 529 true, flag_untagged); 530 if (err) { 531 netdev_err(dev, "Unable to add VIDs %d-%d\n", vid_begin, 532 vid_end); 533 goto err_port_vlans_set; 534 } 535 536 old_pvid = mlxsw_sp_port->pvid; 537 if (flag_pvid && old_pvid != vid_begin) { 538 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid_begin); 539 if (err) { 540 netdev_err(dev, "Unable to add PVID %d\n", vid_begin); 541 goto err_port_pvid_set; 542 } 543 mlxsw_sp_port->pvid = vid_begin; 544 } 545 546 /* Changing activity bits only if HW operation succeded */ 547 for (vid = vid_begin; vid <= vid_end; vid++) { 548 set_bit(vid, mlxsw_sp_port->active_vlans); 549 if (flag_untagged) 550 set_bit(vid, mlxsw_sp_port->untagged_vlans); 551 else 552 clear_bit(vid, mlxsw_sp_port->untagged_vlans); 553 } 554 555 /* STP state change must be done after we set active VLANs */ 556 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, 557 mlxsw_sp_port->stp_state); 558 if (err) { 559 netdev_err(dev, "Failed to set STP state\n"); 560 goto err_port_stp_state_set; 561 } 562 563 return 0; 564 565 err_port_vid_to_fid_set: 566 mlxsw_sp_fid_destroy(mlxsw_sp, vid); 567 return err; 568 569 err_port_stp_state_set: 570 for (vid = vid_begin; vid <= vid_end; vid++) 571 clear_bit(vid, mlxsw_sp_port->active_vlans); 572 if (old_pvid != mlxsw_sp_port->pvid) 573 mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid); 574 err_port_pvid_set: 575 __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false, 576 false); 577 err_port_vlans_set: 578 __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end, false, 579 false); 580 err_port_flood_set: 581 last_visited_vid = vid_end; 582 err_port_fid_map: 583 for (vid = last_visited_vid; vid >= vid_begin; vid--) 584 mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid); 585 return err; 586 } 587 588 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, 589 const struct switchdev_obj_port_vlan *vlan, 590 struct switchdev_trans *trans) 591 { 592 bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 593 bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; 594 595 if (switchdev_trans_ph_prepare(trans)) 596 return 0; 597 598 return __mlxsw_sp_port_vlans_add(mlxsw_sp_port, 599 vlan->vid_begin, vlan->vid_end, 600 flag_untagged, flag_pvid); 601 } 602 603 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic) 604 { 605 return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS : 606 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY; 607 } 608 609 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding) 610 { 611 return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT : 612 MLXSW_REG_SFD_OP_WRITE_REMOVE; 613 } 614 615 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, 616 const char *mac, u16 fid, bool adding, 617 bool dynamic) 618 { 619 char *sfd_pl; 620 int err; 621 622 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 623 if (!sfd_pl) 624 return -ENOMEM; 625 626 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); 627 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), 628 mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP, 629 local_port); 630 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 631 kfree(sfd_pl); 632 633 return err; 634 } 635 636 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id, 637 const char *mac, u16 fid, u16 lag_vid, 638 bool adding, bool dynamic) 639 { 640 char *sfd_pl; 641 int err; 642 643 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 644 if (!sfd_pl) 645 return -ENOMEM; 646 647 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); 648 mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), 649 mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP, 650 lag_vid, lag_id); 651 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 652 kfree(sfd_pl); 653 654 return err; 655 } 656 657 static int 658 mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port, 659 const struct switchdev_obj_port_fdb *fdb, 660 struct switchdev_trans *trans) 661 { 662 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid); 663 u16 lag_vid = 0; 664 665 if (switchdev_trans_ph_prepare(trans)) 666 return 0; 667 668 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { 669 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); 670 } 671 672 if (!mlxsw_sp_port->lagged) 673 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp, 674 mlxsw_sp_port->local_port, 675 fdb->addr, fid, true, false); 676 else 677 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp, 678 mlxsw_sp_port->lag_id, 679 fdb->addr, fid, lag_vid, 680 true, false); 681 } 682 683 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr, 684 u16 fid, u16 mid, bool adding) 685 { 686 char *sfd_pl; 687 int err; 688 689 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 690 if (!sfd_pl) 691 return -ENOMEM; 692 693 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); 694 mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid, 695 MLXSW_REG_SFD_REC_ACTION_NOP, mid); 696 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 697 kfree(sfd_pl); 698 return err; 699 } 700 701 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid, 702 bool add, bool clear_all_ports) 703 { 704 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 705 char *smid_pl; 706 int err, i; 707 708 smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL); 709 if (!smid_pl) 710 return -ENOMEM; 711 712 mlxsw_reg_smid_pack(smid_pl, mid, mlxsw_sp_port->local_port, add); 713 if (clear_all_ports) { 714 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) 715 if (mlxsw_sp->ports[i]) 716 mlxsw_reg_smid_port_mask_set(smid_pl, i, 1); 717 } 718 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl); 719 kfree(smid_pl); 720 return err; 721 } 722 723 static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp, 724 const unsigned char *addr, 725 u16 vid) 726 { 727 struct mlxsw_sp_mid *mid; 728 729 list_for_each_entry(mid, &mlxsw_sp->br_mids.list, list) { 730 if (ether_addr_equal(mid->addr, addr) && mid->vid == vid) 731 return mid; 732 } 733 return NULL; 734 } 735 736 static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, 737 const unsigned char *addr, 738 u16 vid) 739 { 740 struct mlxsw_sp_mid *mid; 741 u16 mid_idx; 742 743 mid_idx = find_first_zero_bit(mlxsw_sp->br_mids.mapped, 744 MLXSW_SP_MID_MAX); 745 if (mid_idx == MLXSW_SP_MID_MAX) 746 return NULL; 747 748 mid = kzalloc(sizeof(*mid), GFP_KERNEL); 749 if (!mid) 750 return NULL; 751 752 set_bit(mid_idx, mlxsw_sp->br_mids.mapped); 753 ether_addr_copy(mid->addr, addr); 754 mid->vid = vid; 755 mid->mid = mid_idx; 756 mid->ref_count = 0; 757 list_add_tail(&mid->list, &mlxsw_sp->br_mids.list); 758 759 return mid; 760 } 761 762 static int __mlxsw_sp_mc_dec_ref(struct mlxsw_sp *mlxsw_sp, 763 struct mlxsw_sp_mid *mid) 764 { 765 if (--mid->ref_count == 0) { 766 list_del(&mid->list); 767 clear_bit(mid->mid, mlxsw_sp->br_mids.mapped); 768 kfree(mid); 769 return 1; 770 } 771 return 0; 772 } 773 774 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, 775 const struct switchdev_obj_port_mdb *mdb, 776 struct switchdev_trans *trans) 777 { 778 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 779 struct net_device *dev = mlxsw_sp_port->dev; 780 struct mlxsw_sp_mid *mid; 781 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid); 782 int err = 0; 783 784 if (switchdev_trans_ph_prepare(trans)) 785 return 0; 786 787 mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid); 788 if (!mid) { 789 mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, mdb->vid); 790 if (!mid) { 791 netdev_err(dev, "Unable to allocate MC group\n"); 792 return -ENOMEM; 793 } 794 } 795 mid->ref_count++; 796 797 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true, 798 mid->ref_count == 1); 799 if (err) { 800 netdev_err(dev, "Unable to set SMID\n"); 801 goto err_out; 802 } 803 804 if (mid->ref_count == 1) { 805 err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid->mid, 806 true); 807 if (err) { 808 netdev_err(dev, "Unable to set MC SFD\n"); 809 goto err_out; 810 } 811 } 812 813 return 0; 814 815 err_out: 816 __mlxsw_sp_mc_dec_ref(mlxsw_sp, mid); 817 return err; 818 } 819 820 static int mlxsw_sp_port_obj_add(struct net_device *dev, 821 const struct switchdev_obj *obj, 822 struct switchdev_trans *trans) 823 { 824 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 825 int err = 0; 826 827 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port); 828 if (!mlxsw_sp_port) 829 return -EINVAL; 830 831 switch (obj->id) { 832 case SWITCHDEV_OBJ_ID_PORT_VLAN: 833 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) 834 return 0; 835 836 err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, 837 SWITCHDEV_OBJ_PORT_VLAN(obj), 838 trans); 839 break; 840 case SWITCHDEV_OBJ_ID_PORT_FDB: 841 err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port, 842 SWITCHDEV_OBJ_PORT_FDB(obj), 843 trans); 844 break; 845 case SWITCHDEV_OBJ_ID_PORT_MDB: 846 err = mlxsw_sp_port_mdb_add(mlxsw_sp_port, 847 SWITCHDEV_OBJ_PORT_MDB(obj), 848 trans); 849 break; 850 default: 851 err = -EOPNOTSUPP; 852 break; 853 } 854 855 return err; 856 } 857 858 static int mlxsw_sp_port_kill_vids(struct net_device *dev, u16 vid_begin, 859 u16 vid_end) 860 { 861 u16 vid; 862 int err; 863 864 for (vid = vid_begin; vid <= vid_end; vid++) { 865 err = mlxsw_sp_port_kill_vid(dev, 0, vid); 866 if (err) 867 return err; 868 } 869 870 return 0; 871 } 872 873 static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, 874 u16 vid_begin, u16 vid_end, bool init) 875 { 876 struct net_device *dev = mlxsw_sp_port->dev; 877 u16 vid, pvid; 878 int err; 879 880 /* In case this is invoked with BRIDGE_FLAGS_SELF and port is 881 * not bridged, then prevent packets ingressing through the 882 * port with the specified VIDs from being trapped to CPU. 883 */ 884 if (!init && !mlxsw_sp_port->bridged) 885 return mlxsw_sp_port_kill_vids(dev, vid_begin, vid_end); 886 887 err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, 888 false, false); 889 if (err) { 890 netdev_err(dev, "Unable to del VIDs %d-%d\n", vid_begin, 891 vid_end); 892 return err; 893 } 894 895 pvid = mlxsw_sp_port->pvid; 896 if (pvid >= vid_begin && pvid <= vid_end && pvid != 1) { 897 /* Default VLAN is always 1 */ 898 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1); 899 if (err) { 900 netdev_err(dev, "Unable to del PVID %d\n", pvid); 901 return err; 902 } 903 mlxsw_sp_port->pvid = 1; 904 } 905 906 if (init) 907 goto out; 908 909 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end, 910 false, false); 911 if (err) { 912 netdev_err(dev, "Failed to clear flooding\n"); 913 return err; 914 } 915 916 for (vid = vid_begin; vid <= vid_end; vid++) { 917 /* Remove FID mapping in case of Virtual mode */ 918 err = mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid); 919 if (err) { 920 netdev_err(dev, "Failed to unmap FID=%d", vid); 921 return err; 922 } 923 } 924 925 out: 926 /* Changing activity bits only if HW operation succeded */ 927 for (vid = vid_begin; vid <= vid_end; vid++) 928 clear_bit(vid, mlxsw_sp_port->active_vlans); 929 930 return 0; 931 } 932 933 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, 934 const struct switchdev_obj_port_vlan *vlan) 935 { 936 return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 937 vlan->vid_begin, vlan->vid_end, false); 938 } 939 940 void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port) 941 { 942 u16 vid; 943 944 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) 945 __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid, false); 946 } 947 948 static int 949 mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port, 950 const struct switchdev_obj_port_fdb *fdb) 951 { 952 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid); 953 u16 lag_vid = 0; 954 955 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { 956 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); 957 } 958 959 if (!mlxsw_sp_port->lagged) 960 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp, 961 mlxsw_sp_port->local_port, 962 fdb->addr, fid, 963 false, false); 964 else 965 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp, 966 mlxsw_sp_port->lag_id, 967 fdb->addr, fid, lag_vid, 968 false, false); 969 } 970 971 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, 972 const struct switchdev_obj_port_mdb *mdb) 973 { 974 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 975 struct net_device *dev = mlxsw_sp_port->dev; 976 struct mlxsw_sp_mid *mid; 977 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid); 978 u16 mid_idx; 979 int err = 0; 980 981 mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid); 982 if (!mid) { 983 netdev_err(dev, "Unable to remove port from MC DB\n"); 984 return -EINVAL; 985 } 986 987 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false, false); 988 if (err) 989 netdev_err(dev, "Unable to remove port from SMID\n"); 990 991 mid_idx = mid->mid; 992 if (__mlxsw_sp_mc_dec_ref(mlxsw_sp, mid)) { 993 err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid_idx, 994 false); 995 if (err) 996 netdev_err(dev, "Unable to remove MC SFD\n"); 997 } 998 999 return err; 1000 } 1001 1002 static int mlxsw_sp_port_obj_del(struct net_device *dev, 1003 const struct switchdev_obj *obj) 1004 { 1005 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1006 int err = 0; 1007 1008 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port); 1009 if (!mlxsw_sp_port) 1010 return -EINVAL; 1011 1012 switch (obj->id) { 1013 case SWITCHDEV_OBJ_ID_PORT_VLAN: 1014 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) 1015 return 0; 1016 1017 err = mlxsw_sp_port_vlans_del(mlxsw_sp_port, 1018 SWITCHDEV_OBJ_PORT_VLAN(obj)); 1019 break; 1020 case SWITCHDEV_OBJ_ID_PORT_FDB: 1021 err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port, 1022 SWITCHDEV_OBJ_PORT_FDB(obj)); 1023 break; 1024 case SWITCHDEV_OBJ_ID_PORT_MDB: 1025 err = mlxsw_sp_port_mdb_del(mlxsw_sp_port, 1026 SWITCHDEV_OBJ_PORT_MDB(obj)); 1027 break; 1028 default: 1029 err = -EOPNOTSUPP; 1030 break; 1031 } 1032 1033 return err; 1034 } 1035 1036 static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp, 1037 u16 lag_id) 1038 { 1039 struct mlxsw_sp_port *mlxsw_sp_port; 1040 int i; 1041 1042 for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) { 1043 mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i); 1044 if (mlxsw_sp_port) 1045 return mlxsw_sp_port; 1046 } 1047 return NULL; 1048 } 1049 1050 static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port, 1051 struct switchdev_obj_port_fdb *fdb, 1052 switchdev_obj_dump_cb_t *cb, 1053 struct net_device *orig_dev) 1054 { 1055 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1056 struct mlxsw_sp_port *tmp; 1057 u16 vport_fid = 0; 1058 char *sfd_pl; 1059 char mac[ETH_ALEN]; 1060 u16 fid; 1061 u8 local_port; 1062 u16 lag_id; 1063 u8 num_rec; 1064 int stored_err = 0; 1065 int i; 1066 int err; 1067 1068 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 1069 if (!sfd_pl) 1070 return -ENOMEM; 1071 1072 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { 1073 u16 tmp; 1074 1075 tmp = mlxsw_sp_vport_vfid_get(mlxsw_sp_port); 1076 vport_fid = mlxsw_sp_vfid_to_fid(tmp); 1077 } 1078 1079 mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0); 1080 do { 1081 mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT); 1082 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 1083 if (err) 1084 goto out; 1085 1086 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); 1087 1088 /* Even in case of error, we have to run the dump to the end 1089 * so the session in firmware is finished. 1090 */ 1091 if (stored_err) 1092 continue; 1093 1094 for (i = 0; i < num_rec; i++) { 1095 switch (mlxsw_reg_sfd_rec_type_get(sfd_pl, i)) { 1096 case MLXSW_REG_SFD_REC_TYPE_UNICAST: 1097 mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &fid, 1098 &local_port); 1099 if (local_port == mlxsw_sp_port->local_port) { 1100 if (vport_fid && vport_fid == fid) 1101 fdb->vid = 0; 1102 else if (!vport_fid && 1103 !mlxsw_sp_fid_is_vfid(fid)) 1104 fdb->vid = fid; 1105 else 1106 continue; 1107 ether_addr_copy(fdb->addr, mac); 1108 fdb->ndm_state = NUD_REACHABLE; 1109 err = cb(&fdb->obj); 1110 if (err) 1111 stored_err = err; 1112 } 1113 break; 1114 case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG: 1115 mlxsw_reg_sfd_uc_lag_unpack(sfd_pl, i, 1116 mac, &fid, &lag_id); 1117 tmp = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id); 1118 if (tmp && tmp->local_port == 1119 mlxsw_sp_port->local_port) { 1120 /* LAG records can only point to LAG 1121 * devices or VLAN devices on top. 1122 */ 1123 if (!netif_is_lag_master(orig_dev) && 1124 !is_vlan_dev(orig_dev)) 1125 continue; 1126 if (vport_fid && vport_fid == fid) 1127 fdb->vid = 0; 1128 else if (!vport_fid && 1129 !mlxsw_sp_fid_is_vfid(fid)) 1130 fdb->vid = fid; 1131 else 1132 continue; 1133 ether_addr_copy(fdb->addr, mac); 1134 fdb->ndm_state = NUD_REACHABLE; 1135 err = cb(&fdb->obj); 1136 if (err) 1137 stored_err = err; 1138 } 1139 break; 1140 } 1141 } 1142 } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT); 1143 1144 out: 1145 kfree(sfd_pl); 1146 return stored_err ? stored_err : err; 1147 } 1148 1149 static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port, 1150 struct switchdev_obj_port_vlan *vlan, 1151 switchdev_obj_dump_cb_t *cb) 1152 { 1153 u16 vid; 1154 int err = 0; 1155 1156 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { 1157 vlan->flags = 0; 1158 vlan->vid_begin = mlxsw_sp_vport_vid_get(mlxsw_sp_port); 1159 vlan->vid_end = mlxsw_sp_vport_vid_get(mlxsw_sp_port); 1160 return cb(&vlan->obj); 1161 } 1162 1163 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { 1164 vlan->flags = 0; 1165 if (vid == mlxsw_sp_port->pvid) 1166 vlan->flags |= BRIDGE_VLAN_INFO_PVID; 1167 if (test_bit(vid, mlxsw_sp_port->untagged_vlans)) 1168 vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED; 1169 vlan->vid_begin = vid; 1170 vlan->vid_end = vid; 1171 err = cb(&vlan->obj); 1172 if (err) 1173 break; 1174 } 1175 return err; 1176 } 1177 1178 static int mlxsw_sp_port_obj_dump(struct net_device *dev, 1179 struct switchdev_obj *obj, 1180 switchdev_obj_dump_cb_t *cb) 1181 { 1182 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1183 int err = 0; 1184 1185 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port); 1186 if (!mlxsw_sp_port) 1187 return -EINVAL; 1188 1189 switch (obj->id) { 1190 case SWITCHDEV_OBJ_ID_PORT_VLAN: 1191 err = mlxsw_sp_port_vlan_dump(mlxsw_sp_port, 1192 SWITCHDEV_OBJ_PORT_VLAN(obj), cb); 1193 break; 1194 case SWITCHDEV_OBJ_ID_PORT_FDB: 1195 err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port, 1196 SWITCHDEV_OBJ_PORT_FDB(obj), cb, 1197 obj->orig_dev); 1198 break; 1199 default: 1200 err = -EOPNOTSUPP; 1201 break; 1202 } 1203 1204 return err; 1205 } 1206 1207 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = { 1208 .switchdev_port_attr_get = mlxsw_sp_port_attr_get, 1209 .switchdev_port_attr_set = mlxsw_sp_port_attr_set, 1210 .switchdev_port_obj_add = mlxsw_sp_port_obj_add, 1211 .switchdev_port_obj_del = mlxsw_sp_port_obj_del, 1212 .switchdev_port_obj_dump = mlxsw_sp_port_obj_dump, 1213 }; 1214 1215 static void mlxsw_sp_fdb_call_notifiers(bool learning_sync, bool adding, 1216 char *mac, u16 vid, 1217 struct net_device *dev) 1218 { 1219 struct switchdev_notifier_fdb_info info; 1220 unsigned long notifier_type; 1221 1222 if (learning_sync) { 1223 info.addr = mac; 1224 info.vid = vid; 1225 notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL; 1226 call_switchdev_notifiers(notifier_type, dev, &info.info); 1227 } 1228 } 1229 1230 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp, 1231 char *sfn_pl, int rec_index, 1232 bool adding) 1233 { 1234 struct mlxsw_sp_port *mlxsw_sp_port; 1235 char mac[ETH_ALEN]; 1236 u8 local_port; 1237 u16 vid, fid; 1238 bool do_notification = true; 1239 int err; 1240 1241 mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port); 1242 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1243 if (!mlxsw_sp_port) { 1244 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n"); 1245 goto just_remove; 1246 } 1247 1248 if (mlxsw_sp_fid_is_vfid(fid)) { 1249 u16 vfid = mlxsw_sp_fid_to_vfid(fid); 1250 struct mlxsw_sp_port *mlxsw_sp_vport; 1251 1252 mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_vfid(mlxsw_sp_port, 1253 vfid); 1254 if (!mlxsw_sp_vport) { 1255 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n"); 1256 goto just_remove; 1257 } 1258 vid = 0; 1259 /* Override the physical port with the vPort. */ 1260 mlxsw_sp_port = mlxsw_sp_vport; 1261 } else { 1262 vid = fid; 1263 } 1264 1265 adding = adding && mlxsw_sp_port->learning; 1266 1267 do_fdb_op: 1268 err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, 1269 adding, true); 1270 if (err) { 1271 if (net_ratelimit()) 1272 netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n"); 1273 return; 1274 } 1275 1276 if (!do_notification) 1277 return; 1278 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync, 1279 adding, mac, vid, mlxsw_sp_port->dev); 1280 return; 1281 1282 just_remove: 1283 adding = false; 1284 do_notification = false; 1285 goto do_fdb_op; 1286 } 1287 1288 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp, 1289 char *sfn_pl, int rec_index, 1290 bool adding) 1291 { 1292 struct mlxsw_sp_port *mlxsw_sp_port; 1293 struct net_device *dev; 1294 char mac[ETH_ALEN]; 1295 u16 lag_vid = 0; 1296 u16 lag_id; 1297 u16 vid, fid; 1298 bool do_notification = true; 1299 int err; 1300 1301 mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id); 1302 mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id); 1303 if (!mlxsw_sp_port) { 1304 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n"); 1305 goto just_remove; 1306 } 1307 1308 if (mlxsw_sp_fid_is_vfid(fid)) { 1309 u16 vfid = mlxsw_sp_fid_to_vfid(fid); 1310 struct mlxsw_sp_port *mlxsw_sp_vport; 1311 1312 mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_vfid(mlxsw_sp_port, 1313 vfid); 1314 if (!mlxsw_sp_vport) { 1315 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n"); 1316 goto just_remove; 1317 } 1318 1319 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); 1320 dev = mlxsw_sp_vport->dev; 1321 vid = 0; 1322 /* Override the physical port with the vPort. */ 1323 mlxsw_sp_port = mlxsw_sp_vport; 1324 } else { 1325 dev = mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev; 1326 vid = fid; 1327 } 1328 1329 adding = adding && mlxsw_sp_port->learning; 1330 1331 do_fdb_op: 1332 err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid, 1333 adding, true); 1334 if (err) { 1335 if (net_ratelimit()) 1336 netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n"); 1337 return; 1338 } 1339 1340 if (!do_notification) 1341 return; 1342 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync, adding, mac, 1343 vid, dev); 1344 return; 1345 1346 just_remove: 1347 adding = false; 1348 do_notification = false; 1349 goto do_fdb_op; 1350 } 1351 1352 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp, 1353 char *sfn_pl, int rec_index) 1354 { 1355 switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) { 1356 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC: 1357 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl, 1358 rec_index, true); 1359 break; 1360 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC: 1361 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl, 1362 rec_index, false); 1363 break; 1364 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG: 1365 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl, 1366 rec_index, true); 1367 break; 1368 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG: 1369 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl, 1370 rec_index, false); 1371 break; 1372 } 1373 } 1374 1375 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp) 1376 { 1377 schedule_delayed_work(&mlxsw_sp->fdb_notify.dw, 1378 msecs_to_jiffies(mlxsw_sp->fdb_notify.interval)); 1379 } 1380 1381 static void mlxsw_sp_fdb_notify_work(struct work_struct *work) 1382 { 1383 struct mlxsw_sp *mlxsw_sp; 1384 char *sfn_pl; 1385 u8 num_rec; 1386 int i; 1387 int err; 1388 1389 sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL); 1390 if (!sfn_pl) 1391 return; 1392 1393 mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work); 1394 1395 rtnl_lock(); 1396 do { 1397 mlxsw_reg_sfn_pack(sfn_pl); 1398 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl); 1399 if (err) { 1400 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n"); 1401 break; 1402 } 1403 num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl); 1404 for (i = 0; i < num_rec; i++) 1405 mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i); 1406 1407 } while (num_rec); 1408 rtnl_unlock(); 1409 1410 kfree(sfn_pl); 1411 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); 1412 } 1413 1414 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp) 1415 { 1416 int err; 1417 1418 err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME); 1419 if (err) { 1420 dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n"); 1421 return err; 1422 } 1423 INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work); 1424 mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL; 1425 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); 1426 return 0; 1427 } 1428 1429 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp) 1430 { 1431 cancel_delayed_work_sync(&mlxsw_sp->fdb_notify.dw); 1432 } 1433 1434 static void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp) 1435 { 1436 u16 fid; 1437 1438 for_each_set_bit(fid, mlxsw_sp->active_fids, VLAN_N_VID) 1439 mlxsw_sp_fid_destroy(mlxsw_sp, fid); 1440 } 1441 1442 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) 1443 { 1444 return mlxsw_sp_fdb_init(mlxsw_sp); 1445 } 1446 1447 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp) 1448 { 1449 mlxsw_sp_fdb_fini(mlxsw_sp); 1450 mlxsw_sp_fids_fini(mlxsw_sp); 1451 } 1452 1453 int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port) 1454 { 1455 struct net_device *dev = mlxsw_sp_port->dev; 1456 int err; 1457 1458 /* Allow only untagged packets to ingress and tag them internally 1459 * with VID 1. 1460 */ 1461 mlxsw_sp_port->pvid = 1; 1462 err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID - 1, 1463 true); 1464 if (err) { 1465 netdev_err(dev, "Unable to init VLANs\n"); 1466 return err; 1467 } 1468 1469 /* Add implicit VLAN interface in the device, so that untagged 1470 * packets will be classified to the default vFID. 1471 */ 1472 err = mlxsw_sp_port_add_vid(dev, 0, 1); 1473 if (err) 1474 netdev_err(dev, "Failed to configure default vFID\n"); 1475 1476 return err; 1477 } 1478 1479 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port) 1480 { 1481 mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops; 1482 } 1483 1484 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port) 1485 { 1486 } 1487