1 /* 2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c 3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> 5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> 6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <linux/kernel.h> 38 #include <linux/module.h> 39 #include <linux/types.h> 40 #include <linux/netdevice.h> 41 #include <linux/etherdevice.h> 42 #include <linux/ethtool.h> 43 #include <linux/slab.h> 44 #include <linux/device.h> 45 #include <linux/skbuff.h> 46 #include <linux/if_vlan.h> 47 #include <linux/if_bridge.h> 48 #include <linux/workqueue.h> 49 #include <linux/jiffies.h> 50 #include <linux/bitops.h> 51 #include <linux/list.h> 52 #include <linux/notifier.h> 53 #include <linux/dcbnl.h> 54 #include <linux/inetdevice.h> 55 #include <net/switchdev.h> 56 #include <generated/utsrelease.h> 57 #include <net/pkt_cls.h> 58 #include <net/tc_act/tc_mirred.h> 59 60 #include "spectrum.h" 61 #include "core.h" 62 #include "reg.h" 63 #include "port.h" 64 #include "trap.h" 65 #include "txheader.h" 66 67 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum"; 68 static const char mlxsw_sp_driver_version[] = "1.0"; 69 70 /* tx_hdr_version 71 * Tx header version. 72 * Must be set to 1. 73 */ 74 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 75 76 /* tx_hdr_ctl 77 * Packet control type. 78 * 0 - Ethernet control (e.g. EMADs, LACP) 79 * 1 - Ethernet data 80 */ 81 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 82 83 /* tx_hdr_proto 84 * Packet protocol type. Must be set to 1 (Ethernet). 85 */ 86 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 87 88 /* tx_hdr_rx_is_router 89 * Packet is sent from the router. Valid for data packets only. 90 */ 91 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 92 93 /* tx_hdr_fid_valid 94 * Indicates if the 'fid' field is valid and should be used for 95 * forwarding lookup. Valid for data packets only. 96 */ 97 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 98 99 /* tx_hdr_swid 100 * Switch partition ID. Must be set to 0. 101 */ 102 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 103 104 /* tx_hdr_control_tclass 105 * Indicates if the packet should use the control TClass and not one 106 * of the data TClasses. 107 */ 108 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 109 110 /* tx_hdr_etclass 111 * Egress TClass to be used on the egress device on the egress port. 112 */ 113 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 114 115 /* tx_hdr_port_mid 116 * Destination local port for unicast packets. 117 * Destination multicast ID for multicast packets. 118 * 119 * Control packets are directed to a specific egress port, while data 120 * packets are transmitted through the CPU port (0) into the switch partition, 121 * where forwarding rules are applied. 122 */ 123 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 124 125 /* tx_hdr_fid 126 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 127 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 128 * Valid for data packets only. 129 */ 130 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 131 132 /* tx_hdr_type 133 * 0 - Data packets 134 * 6 - Control packets 135 */ 136 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 137 138 static bool mlxsw_sp_port_dev_check(const struct net_device *dev); 139 140 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 141 const struct mlxsw_tx_info *tx_info) 142 { 143 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 144 145 memset(txhdr, 0, MLXSW_TXHDR_LEN); 146 147 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 148 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 149 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 150 mlxsw_tx_hdr_swid_set(txhdr, 0); 151 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 152 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 153 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 154 } 155 156 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 157 { 158 char spad_pl[MLXSW_REG_SPAD_LEN]; 159 int err; 160 161 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 162 if (err) 163 return err; 164 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 165 return 0; 166 } 167 168 static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp) 169 { 170 struct mlxsw_resources *resources; 171 int i; 172 173 resources = mlxsw_core_resources_get(mlxsw_sp->core); 174 if (!resources->max_span_valid) 175 return -EIO; 176 177 mlxsw_sp->span.entries_count = resources->max_span; 178 mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count, 179 sizeof(struct mlxsw_sp_span_entry), 180 GFP_KERNEL); 181 if (!mlxsw_sp->span.entries) 182 return -ENOMEM; 183 184 for (i = 0; i < mlxsw_sp->span.entries_count; i++) 185 INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list); 186 187 return 0; 188 } 189 190 static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp) 191 { 192 int i; 193 194 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 195 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 196 197 WARN_ON_ONCE(!list_empty(&curr->bound_ports_list)); 198 } 199 kfree(mlxsw_sp->span.entries); 200 } 201 202 static struct mlxsw_sp_span_entry * 203 mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port) 204 { 205 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 206 struct mlxsw_sp_span_entry *span_entry; 207 char mpat_pl[MLXSW_REG_MPAT_LEN]; 208 u8 local_port = port->local_port; 209 int index; 210 int i; 211 int err; 212 213 /* find a free entry to use */ 214 index = -1; 215 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 216 if (!mlxsw_sp->span.entries[i].used) { 217 index = i; 218 span_entry = &mlxsw_sp->span.entries[i]; 219 break; 220 } 221 } 222 if (index < 0) 223 return NULL; 224 225 /* create a new port analayzer entry for local_port */ 226 mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true); 227 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 228 if (err) 229 return NULL; 230 231 span_entry->used = true; 232 span_entry->id = index; 233 span_entry->ref_count = 0; 234 span_entry->local_port = local_port; 235 return span_entry; 236 } 237 238 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp, 239 struct mlxsw_sp_span_entry *span_entry) 240 { 241 u8 local_port = span_entry->local_port; 242 char mpat_pl[MLXSW_REG_MPAT_LEN]; 243 int pa_id = span_entry->id; 244 245 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false); 246 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 247 span_entry->used = false; 248 } 249 250 struct mlxsw_sp_span_entry *mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port) 251 { 252 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 253 int i; 254 255 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 256 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 257 258 if (curr->used && curr->local_port == port->local_port) 259 return curr; 260 } 261 return NULL; 262 } 263 264 struct mlxsw_sp_span_entry *mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port) 265 { 266 struct mlxsw_sp_span_entry *span_entry; 267 268 span_entry = mlxsw_sp_span_entry_find(port); 269 if (span_entry) { 270 span_entry->ref_count++; 271 return span_entry; 272 } 273 274 return mlxsw_sp_span_entry_create(port); 275 } 276 277 static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, 278 struct mlxsw_sp_span_entry *span_entry) 279 { 280 if (--span_entry->ref_count == 0) 281 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry); 282 return 0; 283 } 284 285 static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port) 286 { 287 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 288 struct mlxsw_sp_span_inspected_port *p; 289 int i; 290 291 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 292 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 293 294 list_for_each_entry(p, &curr->bound_ports_list, list) 295 if (p->local_port == port->local_port && 296 p->type == MLXSW_SP_SPAN_EGRESS) 297 return true; 298 } 299 300 return false; 301 } 302 303 static int mlxsw_sp_span_mtu_to_buffsize(int mtu) 304 { 305 return MLXSW_SP_BYTES_TO_CELLS(mtu * 5 / 2) + 1; 306 } 307 308 static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu) 309 { 310 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 311 char sbib_pl[MLXSW_REG_SBIB_LEN]; 312 int err; 313 314 /* If port is egress mirrored, the shared buffer size should be 315 * updated according to the mtu value 316 */ 317 if (mlxsw_sp_span_is_egress_mirror(port)) { 318 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 319 mlxsw_sp_span_mtu_to_buffsize(mtu)); 320 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 321 if (err) { 322 netdev_err(port->dev, "Could not update shared buffer for mirroring\n"); 323 return err; 324 } 325 } 326 327 return 0; 328 } 329 330 static struct mlxsw_sp_span_inspected_port * 331 mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port, 332 struct mlxsw_sp_span_entry *span_entry) 333 { 334 struct mlxsw_sp_span_inspected_port *p; 335 336 list_for_each_entry(p, &span_entry->bound_ports_list, list) 337 if (port->local_port == p->local_port) 338 return p; 339 return NULL; 340 } 341 342 static int 343 mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port, 344 struct mlxsw_sp_span_entry *span_entry, 345 enum mlxsw_sp_span_type type) 346 { 347 struct mlxsw_sp_span_inspected_port *inspected_port; 348 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 349 char mpar_pl[MLXSW_REG_MPAR_LEN]; 350 char sbib_pl[MLXSW_REG_SBIB_LEN]; 351 int pa_id = span_entry->id; 352 int err; 353 354 /* if it is an egress SPAN, bind a shared buffer to it */ 355 if (type == MLXSW_SP_SPAN_EGRESS) { 356 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 357 mlxsw_sp_span_mtu_to_buffsize(port->dev->mtu)); 358 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 359 if (err) { 360 netdev_err(port->dev, "Could not create shared buffer for mirroring\n"); 361 return err; 362 } 363 } 364 365 /* bind the port to the SPAN entry */ 366 mlxsw_reg_mpar_pack(mpar_pl, port->local_port, type, true, pa_id); 367 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); 368 if (err) 369 goto err_mpar_reg_write; 370 371 inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL); 372 if (!inspected_port) { 373 err = -ENOMEM; 374 goto err_inspected_port_alloc; 375 } 376 inspected_port->local_port = port->local_port; 377 inspected_port->type = type; 378 list_add_tail(&inspected_port->list, &span_entry->bound_ports_list); 379 380 return 0; 381 382 err_mpar_reg_write: 383 err_inspected_port_alloc: 384 if (type == MLXSW_SP_SPAN_EGRESS) { 385 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); 386 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 387 } 388 return err; 389 } 390 391 static void 392 mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port, 393 struct mlxsw_sp_span_entry *span_entry, 394 enum mlxsw_sp_span_type type) 395 { 396 struct mlxsw_sp_span_inspected_port *inspected_port; 397 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 398 char mpar_pl[MLXSW_REG_MPAR_LEN]; 399 char sbib_pl[MLXSW_REG_SBIB_LEN]; 400 int pa_id = span_entry->id; 401 402 inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry); 403 if (!inspected_port) 404 return; 405 406 /* remove the inspected port */ 407 mlxsw_reg_mpar_pack(mpar_pl, port->local_port, type, false, pa_id); 408 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); 409 410 /* remove the SBIB buffer if it was egress SPAN */ 411 if (type == MLXSW_SP_SPAN_EGRESS) { 412 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); 413 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 414 } 415 416 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); 417 418 list_del(&inspected_port->list); 419 kfree(inspected_port); 420 } 421 422 static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from, 423 struct mlxsw_sp_port *to, 424 enum mlxsw_sp_span_type type) 425 { 426 struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp; 427 struct mlxsw_sp_span_entry *span_entry; 428 int err; 429 430 span_entry = mlxsw_sp_span_entry_get(to); 431 if (!span_entry) 432 return -ENOENT; 433 434 netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n", 435 span_entry->id); 436 437 err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type); 438 if (err) 439 goto err_port_bind; 440 441 return 0; 442 443 err_port_bind: 444 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); 445 return err; 446 } 447 448 static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from, 449 struct mlxsw_sp_port *to, 450 enum mlxsw_sp_span_type type) 451 { 452 struct mlxsw_sp_span_entry *span_entry; 453 454 span_entry = mlxsw_sp_span_entry_find(to); 455 if (!span_entry) { 456 netdev_err(from->dev, "no span entry found\n"); 457 return; 458 } 459 460 netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n", 461 span_entry->id); 462 mlxsw_sp_span_inspected_port_unbind(from, span_entry, type); 463 } 464 465 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 466 bool is_up) 467 { 468 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 469 char paos_pl[MLXSW_REG_PAOS_LEN]; 470 471 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 472 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 473 MLXSW_PORT_ADMIN_STATUS_DOWN); 474 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 475 } 476 477 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 478 unsigned char *addr) 479 { 480 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 481 char ppad_pl[MLXSW_REG_PPAD_LEN]; 482 483 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 484 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 485 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 486 } 487 488 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 489 { 490 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 491 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 492 493 ether_addr_copy(addr, mlxsw_sp->base_mac); 494 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 495 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 496 } 497 498 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 499 { 500 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 501 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 502 int max_mtu; 503 int err; 504 505 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 506 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 507 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 508 if (err) 509 return err; 510 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 511 512 if (mtu > max_mtu) 513 return -EINVAL; 514 515 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 516 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 517 } 518 519 static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port, 520 u8 swid) 521 { 522 char pspa_pl[MLXSW_REG_PSPA_LEN]; 523 524 mlxsw_reg_pspa_pack(pspa_pl, swid, local_port); 525 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 526 } 527 528 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 529 { 530 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 531 532 return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port, 533 swid); 534 } 535 536 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 537 bool enable) 538 { 539 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 540 char svpe_pl[MLXSW_REG_SVPE_LEN]; 541 542 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 543 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 544 } 545 546 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port, 547 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid, 548 u16 vid) 549 { 550 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 551 char svfa_pl[MLXSW_REG_SVFA_LEN]; 552 553 mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid, 554 fid, vid); 555 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl); 556 } 557 558 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, 559 u16 vid, bool learn_enable) 560 { 561 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 562 char *spvmlr_pl; 563 int err; 564 565 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 566 if (!spvmlr_pl) 567 return -ENOMEM; 568 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 569 learn_enable); 570 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 571 kfree(spvmlr_pl); 572 return err; 573 } 574 575 static int 576 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 577 { 578 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 579 char sspr_pl[MLXSW_REG_SSPR_LEN]; 580 581 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 582 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 583 } 584 585 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, 586 u8 local_port, u8 *p_module, 587 u8 *p_width, u8 *p_lane) 588 { 589 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 590 int err; 591 592 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 593 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 594 if (err) 595 return err; 596 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 597 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl); 598 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 599 return 0; 600 } 601 602 static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port, 603 u8 module, u8 width, u8 lane) 604 { 605 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 606 int i; 607 608 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 609 mlxsw_reg_pmlp_width_set(pmlp_pl, width); 610 for (i = 0; i < width; i++) { 611 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module); 612 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */ 613 } 614 615 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 616 } 617 618 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port) 619 { 620 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 621 622 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 623 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 624 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 625 } 626 627 static int mlxsw_sp_port_open(struct net_device *dev) 628 { 629 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 630 int err; 631 632 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 633 if (err) 634 return err; 635 netif_start_queue(dev); 636 return 0; 637 } 638 639 static int mlxsw_sp_port_stop(struct net_device *dev) 640 { 641 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 642 643 netif_stop_queue(dev); 644 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 645 } 646 647 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 648 struct net_device *dev) 649 { 650 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 651 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 652 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 653 const struct mlxsw_tx_info tx_info = { 654 .local_port = mlxsw_sp_port->local_port, 655 .is_emad = false, 656 }; 657 u64 len; 658 int err; 659 660 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 661 return NETDEV_TX_BUSY; 662 663 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { 664 struct sk_buff *skb_orig = skb; 665 666 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); 667 if (!skb) { 668 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 669 dev_kfree_skb_any(skb_orig); 670 return NETDEV_TX_OK; 671 } 672 } 673 674 if (eth_skb_pad(skb)) { 675 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 676 return NETDEV_TX_OK; 677 } 678 679 mlxsw_sp_txhdr_construct(skb, &tx_info); 680 /* TX header is consumed by HW on the way so we shouldn't count its 681 * bytes as being sent. 682 */ 683 len = skb->len - MLXSW_TXHDR_LEN; 684 685 /* Due to a race we might fail here because of a full queue. In that 686 * unlikely case we simply drop the packet. 687 */ 688 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 689 690 if (!err) { 691 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 692 u64_stats_update_begin(&pcpu_stats->syncp); 693 pcpu_stats->tx_packets++; 694 pcpu_stats->tx_bytes += len; 695 u64_stats_update_end(&pcpu_stats->syncp); 696 } else { 697 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 698 dev_kfree_skb_any(skb); 699 } 700 return NETDEV_TX_OK; 701 } 702 703 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 704 { 705 } 706 707 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 708 { 709 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 710 struct sockaddr *addr = p; 711 int err; 712 713 if (!is_valid_ether_addr(addr->sa_data)) 714 return -EADDRNOTAVAIL; 715 716 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 717 if (err) 718 return err; 719 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 720 return 0; 721 } 722 723 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu, 724 bool pause_en, bool pfc_en, u16 delay) 725 { 726 u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu); 727 728 delay = pfc_en ? mlxsw_sp_pfc_delay_get(mtu, delay) : 729 MLXSW_SP_PAUSE_DELAY; 730 731 if (pause_en || pfc_en) 732 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, pg_index, 733 pg_size + delay, pg_size); 734 else 735 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size); 736 } 737 738 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, 739 u8 *prio_tc, bool pause_en, 740 struct ieee_pfc *my_pfc) 741 { 742 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 743 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; 744 u16 delay = !!my_pfc ? my_pfc->delay : 0; 745 char pbmc_pl[MLXSW_REG_PBMC_LEN]; 746 int i, j, err; 747 748 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); 749 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 750 if (err) 751 return err; 752 753 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 754 bool configure = false; 755 bool pfc = false; 756 757 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { 758 if (prio_tc[j] == i) { 759 pfc = pfc_en & BIT(j); 760 configure = true; 761 break; 762 } 763 } 764 765 if (!configure) 766 continue; 767 mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en, pfc, delay); 768 } 769 770 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 771 } 772 773 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, 774 int mtu, bool pause_en) 775 { 776 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; 777 bool dcb_en = !!mlxsw_sp_port->dcb.ets; 778 struct ieee_pfc *my_pfc; 779 u8 *prio_tc; 780 781 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; 782 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; 783 784 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, 785 pause_en, my_pfc); 786 } 787 788 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 789 { 790 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 791 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 792 int err; 793 794 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); 795 if (err) 796 return err; 797 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu); 798 if (err) 799 goto err_span_port_mtu_update; 800 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 801 if (err) 802 goto err_port_mtu_set; 803 dev->mtu = mtu; 804 return 0; 805 806 err_port_mtu_set: 807 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu); 808 err_span_port_mtu_update: 809 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 810 return err; 811 } 812 813 static struct rtnl_link_stats64 * 814 mlxsw_sp_port_get_stats64(struct net_device *dev, 815 struct rtnl_link_stats64 *stats) 816 { 817 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 818 struct mlxsw_sp_port_pcpu_stats *p; 819 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 820 u32 tx_dropped = 0; 821 unsigned int start; 822 int i; 823 824 for_each_possible_cpu(i) { 825 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 826 do { 827 start = u64_stats_fetch_begin_irq(&p->syncp); 828 rx_packets = p->rx_packets; 829 rx_bytes = p->rx_bytes; 830 tx_packets = p->tx_packets; 831 tx_bytes = p->tx_bytes; 832 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 833 834 stats->rx_packets += rx_packets; 835 stats->rx_bytes += rx_bytes; 836 stats->tx_packets += tx_packets; 837 stats->tx_bytes += tx_bytes; 838 /* tx_dropped is u32, updated without syncp protection. */ 839 tx_dropped += p->tx_dropped; 840 } 841 stats->tx_dropped = tx_dropped; 842 return stats; 843 } 844 845 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 846 u16 vid_end, bool is_member, bool untagged) 847 { 848 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 849 char *spvm_pl; 850 int err; 851 852 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 853 if (!spvm_pl) 854 return -ENOMEM; 855 856 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 857 vid_end, is_member, untagged); 858 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 859 kfree(spvm_pl); 860 return err; 861 } 862 863 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port) 864 { 865 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; 866 u16 vid, last_visited_vid; 867 int err; 868 869 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { 870 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid, 871 vid); 872 if (err) { 873 last_visited_vid = vid; 874 goto err_port_vid_to_fid_set; 875 } 876 } 877 878 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 879 if (err) { 880 last_visited_vid = VLAN_N_VID; 881 goto err_port_vid_to_fid_set; 882 } 883 884 return 0; 885 886 err_port_vid_to_fid_set: 887 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid) 888 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid, 889 vid); 890 return err; 891 } 892 893 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port) 894 { 895 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; 896 u16 vid; 897 int err; 898 899 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 900 if (err) 901 return err; 902 903 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { 904 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, 905 vid, vid); 906 if (err) 907 return err; 908 } 909 910 return 0; 911 } 912 913 static struct mlxsw_sp_port * 914 mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 915 { 916 struct mlxsw_sp_port *mlxsw_sp_vport; 917 918 mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL); 919 if (!mlxsw_sp_vport) 920 return NULL; 921 922 /* dev will be set correctly after the VLAN device is linked 923 * with the real device. In case of bridge SELF invocation, dev 924 * will remain as is. 925 */ 926 mlxsw_sp_vport->dev = mlxsw_sp_port->dev; 927 mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 928 mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port; 929 mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING; 930 mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged; 931 mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id; 932 mlxsw_sp_vport->vport.vid = vid; 933 934 list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list); 935 936 return mlxsw_sp_vport; 937 } 938 939 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport) 940 { 941 list_del(&mlxsw_sp_vport->vport.list); 942 kfree(mlxsw_sp_vport); 943 } 944 945 static int mlxsw_sp_port_add_vid(struct net_device *dev, 946 __be16 __always_unused proto, u16 vid) 947 { 948 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 949 struct mlxsw_sp_port *mlxsw_sp_vport; 950 bool untagged = vid == 1; 951 int err; 952 953 /* VLAN 0 is added to HW filter when device goes up, but it is 954 * reserved in our case, so simply return. 955 */ 956 if (!vid) 957 return 0; 958 959 if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) 960 return 0; 961 962 mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid); 963 if (!mlxsw_sp_vport) 964 return -ENOMEM; 965 966 /* When adding the first VLAN interface on a bridged port we need to 967 * transition all the active 802.1Q bridge VLANs to use explicit 968 * {Port, VID} to FID mappings and set the port's mode to Virtual mode. 969 */ 970 if (list_is_singular(&mlxsw_sp_port->vports_list)) { 971 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port); 972 if (err) 973 goto err_port_vp_mode_trans; 974 } 975 976 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); 977 if (err) 978 goto err_port_vid_learning_set; 979 980 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged); 981 if (err) 982 goto err_port_add_vid; 983 984 return 0; 985 986 err_port_add_vid: 987 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); 988 err_port_vid_learning_set: 989 if (list_is_singular(&mlxsw_sp_port->vports_list)) 990 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); 991 err_port_vp_mode_trans: 992 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); 993 return err; 994 } 995 996 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 997 __be16 __always_unused proto, u16 vid) 998 { 999 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1000 struct mlxsw_sp_port *mlxsw_sp_vport; 1001 struct mlxsw_sp_fid *f; 1002 1003 /* VLAN 0 is removed from HW filter when device goes down, but 1004 * it is reserved in our case, so simply return. 1005 */ 1006 if (!vid) 1007 return 0; 1008 1009 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 1010 if (WARN_ON(!mlxsw_sp_vport)) 1011 return 0; 1012 1013 mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); 1014 1015 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); 1016 1017 /* Drop FID reference. If this was the last reference the 1018 * resources will be freed. 1019 */ 1020 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 1021 if (f && !WARN_ON(!f->leave)) 1022 f->leave(mlxsw_sp_vport); 1023 1024 /* When removing the last VLAN interface on a bridged port we need to 1025 * transition all active 802.1Q bridge VLANs to use VID to FID 1026 * mappings and set port's mode to VLAN mode. 1027 */ 1028 if (list_is_singular(&mlxsw_sp_port->vports_list)) 1029 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); 1030 1031 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); 1032 1033 return 0; 1034 } 1035 1036 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name, 1037 size_t len) 1038 { 1039 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1040 u8 module = mlxsw_sp_port->mapping.module; 1041 u8 width = mlxsw_sp_port->mapping.width; 1042 u8 lane = mlxsw_sp_port->mapping.lane; 1043 int err; 1044 1045 if (!mlxsw_sp_port->split) 1046 err = snprintf(name, len, "p%d", module + 1); 1047 else 1048 err = snprintf(name, len, "p%ds%d", module + 1, 1049 lane / width); 1050 1051 if (err >= len) 1052 return -EINVAL; 1053 1054 return 0; 1055 } 1056 1057 static struct mlxsw_sp_port_mall_tc_entry * 1058 mlxsw_sp_port_mirror_entry_find(struct mlxsw_sp_port *port, 1059 unsigned long cookie) { 1060 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1061 1062 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list) 1063 if (mall_tc_entry->cookie == cookie) 1064 return mall_tc_entry; 1065 1066 return NULL; 1067 } 1068 1069 static int 1070 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1071 struct tc_cls_matchall_offload *cls, 1072 const struct tc_action *a, 1073 bool ingress) 1074 { 1075 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1076 struct net *net = dev_net(mlxsw_sp_port->dev); 1077 enum mlxsw_sp_span_type span_type; 1078 struct mlxsw_sp_port *to_port; 1079 struct net_device *to_dev; 1080 int ifindex; 1081 int err; 1082 1083 ifindex = tcf_mirred_ifindex(a); 1084 to_dev = __dev_get_by_index(net, ifindex); 1085 if (!to_dev) { 1086 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); 1087 return -EINVAL; 1088 } 1089 1090 if (!mlxsw_sp_port_dev_check(to_dev)) { 1091 netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port"); 1092 return -ENOTSUPP; 1093 } 1094 to_port = netdev_priv(to_dev); 1095 1096 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1097 if (!mall_tc_entry) 1098 return -ENOMEM; 1099 1100 mall_tc_entry->cookie = cls->cookie; 1101 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; 1102 mall_tc_entry->mirror.to_local_port = to_port->local_port; 1103 mall_tc_entry->mirror.ingress = ingress; 1104 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); 1105 1106 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1107 err = mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type); 1108 if (err) 1109 goto err_mirror_add; 1110 return 0; 1111 1112 err_mirror_add: 1113 list_del(&mall_tc_entry->list); 1114 kfree(mall_tc_entry); 1115 return err; 1116 } 1117 1118 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1119 __be16 protocol, 1120 struct tc_cls_matchall_offload *cls, 1121 bool ingress) 1122 { 1123 const struct tc_action *a; 1124 int err; 1125 1126 if (!tc_single_action(cls->exts)) { 1127 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); 1128 return -ENOTSUPP; 1129 } 1130 1131 tc_for_each_action(a, cls->exts) { 1132 if (!is_tcf_mirred_mirror(a) || protocol != htons(ETH_P_ALL)) 1133 return -ENOTSUPP; 1134 1135 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, cls, 1136 a, ingress); 1137 if (err) 1138 return err; 1139 } 1140 1141 return 0; 1142 } 1143 1144 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1145 struct tc_cls_matchall_offload *cls) 1146 { 1147 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1148 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1149 enum mlxsw_sp_span_type span_type; 1150 struct mlxsw_sp_port *to_port; 1151 1152 mall_tc_entry = mlxsw_sp_port_mirror_entry_find(mlxsw_sp_port, 1153 cls->cookie); 1154 if (!mall_tc_entry) { 1155 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); 1156 return; 1157 } 1158 1159 switch (mall_tc_entry->type) { 1160 case MLXSW_SP_PORT_MALL_MIRROR: 1161 to_port = mlxsw_sp->ports[mall_tc_entry->mirror.to_local_port]; 1162 span_type = mall_tc_entry->mirror.ingress ? 1163 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1164 1165 mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type); 1166 break; 1167 default: 1168 WARN_ON(1); 1169 } 1170 1171 list_del(&mall_tc_entry->list); 1172 kfree(mall_tc_entry); 1173 } 1174 1175 static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle, 1176 __be16 proto, struct tc_to_netdev *tc) 1177 { 1178 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1179 bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS); 1180 1181 if (tc->type == TC_SETUP_MATCHALL) { 1182 switch (tc->cls_mall->command) { 1183 case TC_CLSMATCHALL_REPLACE: 1184 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, 1185 proto, 1186 tc->cls_mall, 1187 ingress); 1188 case TC_CLSMATCHALL_DESTROY: 1189 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, 1190 tc->cls_mall); 1191 return 0; 1192 default: 1193 return -EINVAL; 1194 } 1195 } 1196 1197 return -ENOTSUPP; 1198 } 1199 1200 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1201 .ndo_open = mlxsw_sp_port_open, 1202 .ndo_stop = mlxsw_sp_port_stop, 1203 .ndo_start_xmit = mlxsw_sp_port_xmit, 1204 .ndo_setup_tc = mlxsw_sp_setup_tc, 1205 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1206 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1207 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1208 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1209 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1210 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1211 .ndo_neigh_construct = mlxsw_sp_router_neigh_construct, 1212 .ndo_neigh_destroy = mlxsw_sp_router_neigh_destroy, 1213 .ndo_fdb_add = switchdev_port_fdb_add, 1214 .ndo_fdb_del = switchdev_port_fdb_del, 1215 .ndo_fdb_dump = switchdev_port_fdb_dump, 1216 .ndo_bridge_setlink = switchdev_port_bridge_setlink, 1217 .ndo_bridge_getlink = switchdev_port_bridge_getlink, 1218 .ndo_bridge_dellink = switchdev_port_bridge_dellink, 1219 .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name, 1220 }; 1221 1222 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 1223 struct ethtool_drvinfo *drvinfo) 1224 { 1225 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1226 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1227 1228 strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver)); 1229 strlcpy(drvinfo->version, mlxsw_sp_driver_version, 1230 sizeof(drvinfo->version)); 1231 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 1232 "%d.%d.%d", 1233 mlxsw_sp->bus_info->fw_rev.major, 1234 mlxsw_sp->bus_info->fw_rev.minor, 1235 mlxsw_sp->bus_info->fw_rev.subminor); 1236 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 1237 sizeof(drvinfo->bus_info)); 1238 } 1239 1240 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, 1241 struct ethtool_pauseparam *pause) 1242 { 1243 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1244 1245 pause->rx_pause = mlxsw_sp_port->link.rx_pause; 1246 pause->tx_pause = mlxsw_sp_port->link.tx_pause; 1247 } 1248 1249 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, 1250 struct ethtool_pauseparam *pause) 1251 { 1252 char pfcc_pl[MLXSW_REG_PFCC_LEN]; 1253 1254 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); 1255 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause); 1256 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause); 1257 1258 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), 1259 pfcc_pl); 1260 } 1261 1262 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, 1263 struct ethtool_pauseparam *pause) 1264 { 1265 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1266 bool pause_en = pause->tx_pause || pause->rx_pause; 1267 int err; 1268 1269 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { 1270 netdev_err(dev, "PFC already enabled on port\n"); 1271 return -EINVAL; 1272 } 1273 1274 if (pause->autoneg) { 1275 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); 1276 return -EINVAL; 1277 } 1278 1279 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1280 if (err) { 1281 netdev_err(dev, "Failed to configure port's headroom\n"); 1282 return err; 1283 } 1284 1285 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause); 1286 if (err) { 1287 netdev_err(dev, "Failed to set PAUSE parameters\n"); 1288 goto err_port_pause_configure; 1289 } 1290 1291 mlxsw_sp_port->link.rx_pause = pause->rx_pause; 1292 mlxsw_sp_port->link.tx_pause = pause->tx_pause; 1293 1294 return 0; 1295 1296 err_port_pause_configure: 1297 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 1298 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1299 return err; 1300 } 1301 1302 struct mlxsw_sp_port_hw_stats { 1303 char str[ETH_GSTRING_LEN]; 1304 u64 (*getter)(char *payload); 1305 }; 1306 1307 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 1308 { 1309 .str = "a_frames_transmitted_ok", 1310 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 1311 }, 1312 { 1313 .str = "a_frames_received_ok", 1314 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 1315 }, 1316 { 1317 .str = "a_frame_check_sequence_errors", 1318 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 1319 }, 1320 { 1321 .str = "a_alignment_errors", 1322 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 1323 }, 1324 { 1325 .str = "a_octets_transmitted_ok", 1326 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 1327 }, 1328 { 1329 .str = "a_octets_received_ok", 1330 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 1331 }, 1332 { 1333 .str = "a_multicast_frames_xmitted_ok", 1334 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 1335 }, 1336 { 1337 .str = "a_broadcast_frames_xmitted_ok", 1338 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 1339 }, 1340 { 1341 .str = "a_multicast_frames_received_ok", 1342 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 1343 }, 1344 { 1345 .str = "a_broadcast_frames_received_ok", 1346 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 1347 }, 1348 { 1349 .str = "a_in_range_length_errors", 1350 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 1351 }, 1352 { 1353 .str = "a_out_of_range_length_field", 1354 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 1355 }, 1356 { 1357 .str = "a_frame_too_long_errors", 1358 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 1359 }, 1360 { 1361 .str = "a_symbol_error_during_carrier", 1362 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 1363 }, 1364 { 1365 .str = "a_mac_control_frames_transmitted", 1366 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 1367 }, 1368 { 1369 .str = "a_mac_control_frames_received", 1370 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 1371 }, 1372 { 1373 .str = "a_unsupported_opcodes_received", 1374 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 1375 }, 1376 { 1377 .str = "a_pause_mac_ctrl_frames_received", 1378 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 1379 }, 1380 { 1381 .str = "a_pause_mac_ctrl_frames_xmitted", 1382 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 1383 }, 1384 }; 1385 1386 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 1387 1388 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { 1389 { 1390 .str = "rx_octets_prio", 1391 .getter = mlxsw_reg_ppcnt_rx_octets_get, 1392 }, 1393 { 1394 .str = "rx_frames_prio", 1395 .getter = mlxsw_reg_ppcnt_rx_frames_get, 1396 }, 1397 { 1398 .str = "tx_octets_prio", 1399 .getter = mlxsw_reg_ppcnt_tx_octets_get, 1400 }, 1401 { 1402 .str = "tx_frames_prio", 1403 .getter = mlxsw_reg_ppcnt_tx_frames_get, 1404 }, 1405 { 1406 .str = "rx_pause_prio", 1407 .getter = mlxsw_reg_ppcnt_rx_pause_get, 1408 }, 1409 { 1410 .str = "rx_pause_duration_prio", 1411 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get, 1412 }, 1413 { 1414 .str = "tx_pause_prio", 1415 .getter = mlxsw_reg_ppcnt_tx_pause_get, 1416 }, 1417 { 1418 .str = "tx_pause_duration_prio", 1419 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get, 1420 }, 1421 }; 1422 1423 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) 1424 1425 static u64 mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get(char *ppcnt_pl) 1426 { 1427 u64 transmit_queue = mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 1428 1429 return MLXSW_SP_CELLS_TO_BYTES(transmit_queue); 1430 } 1431 1432 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { 1433 { 1434 .str = "tc_transmit_queue_tc", 1435 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get, 1436 }, 1437 { 1438 .str = "tc_no_buffer_discard_uc_tc", 1439 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get, 1440 }, 1441 }; 1442 1443 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) 1444 1445 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ 1446 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \ 1447 MLXSW_SP_PORT_HW_TC_STATS_LEN) * \ 1448 IEEE_8021QAZ_MAX_TCS) 1449 1450 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) 1451 { 1452 int i; 1453 1454 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { 1455 snprintf(*p, ETH_GSTRING_LEN, "%s_%d", 1456 mlxsw_sp_port_hw_prio_stats[i].str, prio); 1457 *p += ETH_GSTRING_LEN; 1458 } 1459 } 1460 1461 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) 1462 { 1463 int i; 1464 1465 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { 1466 snprintf(*p, ETH_GSTRING_LEN, "%s_%d", 1467 mlxsw_sp_port_hw_tc_stats[i].str, tc); 1468 *p += ETH_GSTRING_LEN; 1469 } 1470 } 1471 1472 static void mlxsw_sp_port_get_strings(struct net_device *dev, 1473 u32 stringset, u8 *data) 1474 { 1475 u8 *p = data; 1476 int i; 1477 1478 switch (stringset) { 1479 case ETH_SS_STATS: 1480 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 1481 memcpy(p, mlxsw_sp_port_hw_stats[i].str, 1482 ETH_GSTRING_LEN); 1483 p += ETH_GSTRING_LEN; 1484 } 1485 1486 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 1487 mlxsw_sp_port_get_prio_strings(&p, i); 1488 1489 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 1490 mlxsw_sp_port_get_tc_strings(&p, i); 1491 1492 break; 1493 } 1494 } 1495 1496 static int mlxsw_sp_port_set_phys_id(struct net_device *dev, 1497 enum ethtool_phys_id_state state) 1498 { 1499 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1500 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1501 char mlcr_pl[MLXSW_REG_MLCR_LEN]; 1502 bool active; 1503 1504 switch (state) { 1505 case ETHTOOL_ID_ACTIVE: 1506 active = true; 1507 break; 1508 case ETHTOOL_ID_INACTIVE: 1509 active = false; 1510 break; 1511 default: 1512 return -EOPNOTSUPP; 1513 } 1514 1515 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); 1516 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); 1517 } 1518 1519 static int 1520 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, 1521 int *p_len, enum mlxsw_reg_ppcnt_grp grp) 1522 { 1523 switch (grp) { 1524 case MLXSW_REG_PPCNT_IEEE_8023_CNT: 1525 *p_hw_stats = mlxsw_sp_port_hw_stats; 1526 *p_len = MLXSW_SP_PORT_HW_STATS_LEN; 1527 break; 1528 case MLXSW_REG_PPCNT_PRIO_CNT: 1529 *p_hw_stats = mlxsw_sp_port_hw_prio_stats; 1530 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 1531 break; 1532 case MLXSW_REG_PPCNT_TC_CNT: 1533 *p_hw_stats = mlxsw_sp_port_hw_tc_stats; 1534 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN; 1535 break; 1536 default: 1537 WARN_ON(1); 1538 return -ENOTSUPP; 1539 } 1540 return 0; 1541 } 1542 1543 static void __mlxsw_sp_port_get_stats(struct net_device *dev, 1544 enum mlxsw_reg_ppcnt_grp grp, int prio, 1545 u64 *data, int data_index) 1546 { 1547 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1548 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1549 struct mlxsw_sp_port_hw_stats *hw_stats; 1550 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1551 int i, len; 1552 int err; 1553 1554 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); 1555 if (err) 1556 return; 1557 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 1558 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 1559 for (i = 0; i < len; i++) 1560 data[data_index + i] = !err ? hw_stats[i].getter(ppcnt_pl) : 0; 1561 } 1562 1563 static void mlxsw_sp_port_get_stats(struct net_device *dev, 1564 struct ethtool_stats *stats, u64 *data) 1565 { 1566 int i, data_index = 0; 1567 1568 /* IEEE 802.3 Counters */ 1569 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0, 1570 data, data_index); 1571 data_index = MLXSW_SP_PORT_HW_STATS_LEN; 1572 1573 /* Per-Priority Counters */ 1574 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1575 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, 1576 data, data_index); 1577 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 1578 } 1579 1580 /* Per-TC Counters */ 1581 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1582 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, 1583 data, data_index); 1584 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; 1585 } 1586 } 1587 1588 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 1589 { 1590 switch (sset) { 1591 case ETH_SS_STATS: 1592 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN; 1593 default: 1594 return -EOPNOTSUPP; 1595 } 1596 } 1597 1598 struct mlxsw_sp_port_link_mode { 1599 u32 mask; 1600 u32 supported; 1601 u32 advertised; 1602 u32 speed; 1603 }; 1604 1605 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = { 1606 { 1607 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 1608 .supported = SUPPORTED_100baseT_Full, 1609 .advertised = ADVERTISED_100baseT_Full, 1610 .speed = 100, 1611 }, 1612 { 1613 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX, 1614 .speed = 100, 1615 }, 1616 { 1617 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 1618 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 1619 .supported = SUPPORTED_1000baseKX_Full, 1620 .advertised = ADVERTISED_1000baseKX_Full, 1621 .speed = 1000, 1622 }, 1623 { 1624 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 1625 .supported = SUPPORTED_10000baseT_Full, 1626 .advertised = ADVERTISED_10000baseT_Full, 1627 .speed = 10000, 1628 }, 1629 { 1630 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 1631 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 1632 .supported = SUPPORTED_10000baseKX4_Full, 1633 .advertised = ADVERTISED_10000baseKX4_Full, 1634 .speed = 10000, 1635 }, 1636 { 1637 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 1638 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 1639 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 1640 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 1641 .supported = SUPPORTED_10000baseKR_Full, 1642 .advertised = ADVERTISED_10000baseKR_Full, 1643 .speed = 10000, 1644 }, 1645 { 1646 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 1647 .supported = SUPPORTED_20000baseKR2_Full, 1648 .advertised = ADVERTISED_20000baseKR2_Full, 1649 .speed = 20000, 1650 }, 1651 { 1652 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 1653 .supported = SUPPORTED_40000baseCR4_Full, 1654 .advertised = ADVERTISED_40000baseCR4_Full, 1655 .speed = 40000, 1656 }, 1657 { 1658 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 1659 .supported = SUPPORTED_40000baseKR4_Full, 1660 .advertised = ADVERTISED_40000baseKR4_Full, 1661 .speed = 40000, 1662 }, 1663 { 1664 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 1665 .supported = SUPPORTED_40000baseSR4_Full, 1666 .advertised = ADVERTISED_40000baseSR4_Full, 1667 .speed = 40000, 1668 }, 1669 { 1670 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 1671 .supported = SUPPORTED_40000baseLR4_Full, 1672 .advertised = ADVERTISED_40000baseLR4_Full, 1673 .speed = 40000, 1674 }, 1675 { 1676 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR | 1677 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR | 1678 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 1679 .speed = 25000, 1680 }, 1681 { 1682 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 | 1683 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 | 1684 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 1685 .speed = 50000, 1686 }, 1687 { 1688 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 1689 .supported = SUPPORTED_56000baseKR4_Full, 1690 .advertised = ADVERTISED_56000baseKR4_Full, 1691 .speed = 56000, 1692 }, 1693 { 1694 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 | 1695 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 1696 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 1697 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 1698 .speed = 100000, 1699 }, 1700 }; 1701 1702 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode) 1703 1704 static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto) 1705 { 1706 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 1707 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 1708 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 1709 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 1710 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 1711 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 1712 return SUPPORTED_FIBRE; 1713 1714 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 1715 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 1716 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 1717 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 1718 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 1719 return SUPPORTED_Backplane; 1720 return 0; 1721 } 1722 1723 static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto) 1724 { 1725 u32 modes = 0; 1726 int i; 1727 1728 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1729 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) 1730 modes |= mlxsw_sp_port_link_mode[i].supported; 1731 } 1732 return modes; 1733 } 1734 1735 static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto) 1736 { 1737 u32 modes = 0; 1738 int i; 1739 1740 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1741 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) 1742 modes |= mlxsw_sp_port_link_mode[i].advertised; 1743 } 1744 return modes; 1745 } 1746 1747 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto, 1748 struct ethtool_cmd *cmd) 1749 { 1750 u32 speed = SPEED_UNKNOWN; 1751 u8 duplex = DUPLEX_UNKNOWN; 1752 int i; 1753 1754 if (!carrier_ok) 1755 goto out; 1756 1757 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1758 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) { 1759 speed = mlxsw_sp_port_link_mode[i].speed; 1760 duplex = DUPLEX_FULL; 1761 break; 1762 } 1763 } 1764 out: 1765 ethtool_cmd_speed_set(cmd, speed); 1766 cmd->duplex = duplex; 1767 } 1768 1769 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto) 1770 { 1771 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 1772 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 1773 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 1774 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 1775 return PORT_FIBRE; 1776 1777 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 1778 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 1779 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4)) 1780 return PORT_DA; 1781 1782 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 1783 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 1784 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 1785 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4)) 1786 return PORT_NONE; 1787 1788 return PORT_OTHER; 1789 } 1790 1791 static int mlxsw_sp_port_get_settings(struct net_device *dev, 1792 struct ethtool_cmd *cmd) 1793 { 1794 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1795 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1796 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1797 u32 eth_proto_cap; 1798 u32 eth_proto_admin; 1799 u32 eth_proto_oper; 1800 int err; 1801 1802 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0); 1803 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1804 if (err) { 1805 netdev_err(dev, "Failed to get proto"); 1806 return err; 1807 } 1808 mlxsw_reg_ptys_unpack(ptys_pl, ð_proto_cap, 1809 ð_proto_admin, ð_proto_oper); 1810 1811 cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) | 1812 mlxsw_sp_from_ptys_supported_link(eth_proto_cap) | 1813 SUPPORTED_Pause | SUPPORTED_Asym_Pause | 1814 SUPPORTED_Autoneg; 1815 cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin); 1816 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), 1817 eth_proto_oper, cmd); 1818 1819 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; 1820 cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper); 1821 cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper); 1822 1823 cmd->transceiver = XCVR_INTERNAL; 1824 return 0; 1825 } 1826 1827 static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising) 1828 { 1829 u32 ptys_proto = 0; 1830 int i; 1831 1832 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1833 if (advertising & mlxsw_sp_port_link_mode[i].advertised) 1834 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 1835 } 1836 return ptys_proto; 1837 } 1838 1839 static u32 mlxsw_sp_to_ptys_speed(u32 speed) 1840 { 1841 u32 ptys_proto = 0; 1842 int i; 1843 1844 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1845 if (speed == mlxsw_sp_port_link_mode[i].speed) 1846 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 1847 } 1848 return ptys_proto; 1849 } 1850 1851 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed) 1852 { 1853 u32 ptys_proto = 0; 1854 int i; 1855 1856 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1857 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed) 1858 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 1859 } 1860 return ptys_proto; 1861 } 1862 1863 static int mlxsw_sp_port_set_settings(struct net_device *dev, 1864 struct ethtool_cmd *cmd) 1865 { 1866 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1867 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1868 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1869 u32 speed; 1870 u32 eth_proto_new; 1871 u32 eth_proto_cap; 1872 u32 eth_proto_admin; 1873 int err; 1874 1875 speed = ethtool_cmd_speed(cmd); 1876 1877 eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ? 1878 mlxsw_sp_to_ptys_advert_link(cmd->advertising) : 1879 mlxsw_sp_to_ptys_speed(speed); 1880 1881 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0); 1882 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1883 if (err) { 1884 netdev_err(dev, "Failed to get proto"); 1885 return err; 1886 } 1887 mlxsw_reg_ptys_unpack(ptys_pl, ð_proto_cap, ð_proto_admin, NULL); 1888 1889 eth_proto_new = eth_proto_new & eth_proto_cap; 1890 if (!eth_proto_new) { 1891 netdev_err(dev, "Not supported proto admin requested"); 1892 return -EINVAL; 1893 } 1894 if (eth_proto_new == eth_proto_admin) 1895 return 0; 1896 1897 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new); 1898 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1899 if (err) { 1900 netdev_err(dev, "Failed to set proto admin"); 1901 return err; 1902 } 1903 1904 if (!netif_running(dev)) 1905 return 0; 1906 1907 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1908 if (err) { 1909 netdev_err(dev, "Failed to set admin status"); 1910 return err; 1911 } 1912 1913 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1914 if (err) { 1915 netdev_err(dev, "Failed to set admin status"); 1916 return err; 1917 } 1918 1919 return 0; 1920 } 1921 1922 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 1923 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 1924 .get_link = ethtool_op_get_link, 1925 .get_pauseparam = mlxsw_sp_port_get_pauseparam, 1926 .set_pauseparam = mlxsw_sp_port_set_pauseparam, 1927 .get_strings = mlxsw_sp_port_get_strings, 1928 .set_phys_id = mlxsw_sp_port_set_phys_id, 1929 .get_ethtool_stats = mlxsw_sp_port_get_stats, 1930 .get_sset_count = mlxsw_sp_port_get_sset_count, 1931 .get_settings = mlxsw_sp_port_get_settings, 1932 .set_settings = mlxsw_sp_port_set_settings, 1933 }; 1934 1935 static int 1936 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width) 1937 { 1938 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1939 u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width; 1940 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1941 u32 eth_proto_admin; 1942 1943 eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed); 1944 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 1945 eth_proto_admin); 1946 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1947 } 1948 1949 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 1950 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 1951 bool dwrr, u8 dwrr_weight) 1952 { 1953 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1954 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1955 1956 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1957 next_index); 1958 mlxsw_reg_qeec_de_set(qeec_pl, true); 1959 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 1960 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 1961 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1962 } 1963 1964 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 1965 enum mlxsw_reg_qeec_hr hr, u8 index, 1966 u8 next_index, u32 maxrate) 1967 { 1968 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1969 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1970 1971 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1972 next_index); 1973 mlxsw_reg_qeec_mase_set(qeec_pl, true); 1974 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 1975 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1976 } 1977 1978 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 1979 u8 switch_prio, u8 tclass) 1980 { 1981 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1982 char qtct_pl[MLXSW_REG_QTCT_LEN]; 1983 1984 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 1985 tclass); 1986 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 1987 } 1988 1989 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 1990 { 1991 int err, i; 1992 1993 /* Setup the elements hierarcy, so that each TC is linked to 1994 * one subgroup, which are all member in the same group. 1995 */ 1996 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1997 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false, 1998 0); 1999 if (err) 2000 return err; 2001 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2002 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2003 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i, 2004 0, false, 0); 2005 if (err) 2006 return err; 2007 } 2008 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2009 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2010 MLXSW_REG_QEEC_HIERARCY_TC, i, i, 2011 false, 0); 2012 if (err) 2013 return err; 2014 } 2015 2016 /* Make sure the max shaper is disabled in all hierarcies that 2017 * support it. 2018 */ 2019 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2020 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0, 2021 MLXSW_REG_QEEC_MAS_DIS); 2022 if (err) 2023 return err; 2024 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2025 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2026 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, 2027 i, 0, 2028 MLXSW_REG_QEEC_MAS_DIS); 2029 if (err) 2030 return err; 2031 } 2032 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2033 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2034 MLXSW_REG_QEEC_HIERARCY_TC, 2035 i, i, 2036 MLXSW_REG_QEEC_MAS_DIS); 2037 if (err) 2038 return err; 2039 } 2040 2041 /* Map all priorities to traffic class 0. */ 2042 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2043 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 2044 if (err) 2045 return err; 2046 } 2047 2048 return 0; 2049 } 2050 2051 static int mlxsw_sp_port_pvid_vport_create(struct mlxsw_sp_port *mlxsw_sp_port) 2052 { 2053 mlxsw_sp_port->pvid = 1; 2054 2055 return mlxsw_sp_port_add_vid(mlxsw_sp_port->dev, 0, 1); 2056 } 2057 2058 static int mlxsw_sp_port_pvid_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_port) 2059 { 2060 return mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1); 2061 } 2062 2063 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 2064 bool split, u8 module, u8 width, u8 lane) 2065 { 2066 struct mlxsw_sp_port *mlxsw_sp_port; 2067 struct net_device *dev; 2068 size_t bytes; 2069 int err; 2070 2071 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 2072 if (!dev) 2073 return -ENOMEM; 2074 mlxsw_sp_port = netdev_priv(dev); 2075 mlxsw_sp_port->dev = dev; 2076 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 2077 mlxsw_sp_port->local_port = local_port; 2078 mlxsw_sp_port->split = split; 2079 mlxsw_sp_port->mapping.module = module; 2080 mlxsw_sp_port->mapping.width = width; 2081 mlxsw_sp_port->mapping.lane = lane; 2082 bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE); 2083 mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL); 2084 if (!mlxsw_sp_port->active_vlans) { 2085 err = -ENOMEM; 2086 goto err_port_active_vlans_alloc; 2087 } 2088 mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL); 2089 if (!mlxsw_sp_port->untagged_vlans) { 2090 err = -ENOMEM; 2091 goto err_port_untagged_vlans_alloc; 2092 } 2093 INIT_LIST_HEAD(&mlxsw_sp_port->vports_list); 2094 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list); 2095 2096 mlxsw_sp_port->pcpu_stats = 2097 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 2098 if (!mlxsw_sp_port->pcpu_stats) { 2099 err = -ENOMEM; 2100 goto err_alloc_stats; 2101 } 2102 2103 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 2104 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 2105 2106 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 2107 if (err) { 2108 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 2109 mlxsw_sp_port->local_port); 2110 goto err_dev_addr_init; 2111 } 2112 2113 netif_carrier_off(dev); 2114 2115 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 2116 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 2117 dev->hw_features |= NETIF_F_HW_TC; 2118 2119 /* Each packet needs to have a Tx header (metadata) on top all other 2120 * headers. 2121 */ 2122 dev->hard_header_len += MLXSW_TXHDR_LEN; 2123 2124 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 2125 if (err) { 2126 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 2127 mlxsw_sp_port->local_port); 2128 goto err_port_system_port_mapping_set; 2129 } 2130 2131 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 2132 if (err) { 2133 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 2134 mlxsw_sp_port->local_port); 2135 goto err_port_swid_set; 2136 } 2137 2138 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width); 2139 if (err) { 2140 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 2141 mlxsw_sp_port->local_port); 2142 goto err_port_speed_by_width_set; 2143 } 2144 2145 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 2146 if (err) { 2147 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 2148 mlxsw_sp_port->local_port); 2149 goto err_port_mtu_set; 2150 } 2151 2152 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 2153 if (err) 2154 goto err_port_admin_status_set; 2155 2156 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 2157 if (err) { 2158 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 2159 mlxsw_sp_port->local_port); 2160 goto err_port_buffers_init; 2161 } 2162 2163 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 2164 if (err) { 2165 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 2166 mlxsw_sp_port->local_port); 2167 goto err_port_ets_init; 2168 } 2169 2170 /* ETS and buffers must be initialized before DCB. */ 2171 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 2172 if (err) { 2173 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 2174 mlxsw_sp_port->local_port); 2175 goto err_port_dcb_init; 2176 } 2177 2178 err = mlxsw_sp_port_pvid_vport_create(mlxsw_sp_port); 2179 if (err) { 2180 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create PVID vPort\n", 2181 mlxsw_sp_port->local_port); 2182 goto err_port_pvid_vport_create; 2183 } 2184 2185 mlxsw_sp_port_switchdev_init(mlxsw_sp_port); 2186 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 2187 err = register_netdev(dev); 2188 if (err) { 2189 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 2190 mlxsw_sp_port->local_port); 2191 goto err_register_netdev; 2192 } 2193 2194 err = mlxsw_core_port_init(mlxsw_sp->core, &mlxsw_sp_port->core_port, 2195 mlxsw_sp_port->local_port, dev, 2196 mlxsw_sp_port->split, module); 2197 if (err) { 2198 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 2199 mlxsw_sp_port->local_port); 2200 goto err_core_port_init; 2201 } 2202 2203 return 0; 2204 2205 err_core_port_init: 2206 unregister_netdev(dev); 2207 err_register_netdev: 2208 mlxsw_sp->ports[local_port] = NULL; 2209 mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port); 2210 err_port_pvid_vport_create: 2211 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 2212 err_port_dcb_init: 2213 err_port_ets_init: 2214 err_port_buffers_init: 2215 err_port_admin_status_set: 2216 err_port_mtu_set: 2217 err_port_speed_by_width_set: 2218 err_port_swid_set: 2219 err_port_system_port_mapping_set: 2220 err_dev_addr_init: 2221 free_percpu(mlxsw_sp_port->pcpu_stats); 2222 err_alloc_stats: 2223 kfree(mlxsw_sp_port->untagged_vlans); 2224 err_port_untagged_vlans_alloc: 2225 kfree(mlxsw_sp_port->active_vlans); 2226 err_port_active_vlans_alloc: 2227 free_netdev(dev); 2228 return err; 2229 } 2230 2231 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 2232 { 2233 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2234 2235 if (!mlxsw_sp_port) 2236 return; 2237 mlxsw_core_port_fini(&mlxsw_sp_port->core_port); 2238 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 2239 mlxsw_sp->ports[local_port] = NULL; 2240 mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port); 2241 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 2242 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 2243 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 2244 mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port); 2245 free_percpu(mlxsw_sp_port->pcpu_stats); 2246 kfree(mlxsw_sp_port->untagged_vlans); 2247 kfree(mlxsw_sp_port->active_vlans); 2248 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list)); 2249 free_netdev(mlxsw_sp_port->dev); 2250 } 2251 2252 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 2253 { 2254 int i; 2255 2256 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) 2257 mlxsw_sp_port_remove(mlxsw_sp, i); 2258 kfree(mlxsw_sp->ports); 2259 } 2260 2261 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 2262 { 2263 u8 module, width, lane; 2264 size_t alloc_size; 2265 int i; 2266 int err; 2267 2268 alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS; 2269 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 2270 if (!mlxsw_sp->ports) 2271 return -ENOMEM; 2272 2273 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) { 2274 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, 2275 &width, &lane); 2276 if (err) 2277 goto err_port_module_info_get; 2278 if (!width) 2279 continue; 2280 mlxsw_sp->port_to_module[i] = module; 2281 err = mlxsw_sp_port_create(mlxsw_sp, i, false, module, width, 2282 lane); 2283 if (err) 2284 goto err_port_create; 2285 } 2286 return 0; 2287 2288 err_port_create: 2289 err_port_module_info_get: 2290 for (i--; i >= 1; i--) 2291 mlxsw_sp_port_remove(mlxsw_sp, i); 2292 kfree(mlxsw_sp->ports); 2293 return err; 2294 } 2295 2296 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port) 2297 { 2298 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX; 2299 2300 return local_port - offset; 2301 } 2302 2303 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 2304 u8 module, unsigned int count) 2305 { 2306 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; 2307 int err, i; 2308 2309 for (i = 0; i < count; i++) { 2310 err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module, 2311 width, i * width); 2312 if (err) 2313 goto err_port_module_map; 2314 } 2315 2316 for (i = 0; i < count; i++) { 2317 err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0); 2318 if (err) 2319 goto err_port_swid_set; 2320 } 2321 2322 for (i = 0; i < count; i++) { 2323 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true, 2324 module, width, i * width); 2325 if (err) 2326 goto err_port_create; 2327 } 2328 2329 return 0; 2330 2331 err_port_create: 2332 for (i--; i >= 0; i--) 2333 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 2334 i = count; 2335 err_port_swid_set: 2336 for (i--; i >= 0; i--) 2337 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 2338 MLXSW_PORT_SWID_DISABLED_PORT); 2339 i = count; 2340 err_port_module_map: 2341 for (i--; i >= 0; i--) 2342 mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i); 2343 return err; 2344 } 2345 2346 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 2347 u8 base_port, unsigned int count) 2348 { 2349 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH; 2350 int i; 2351 2352 /* Split by four means we need to re-create two ports, otherwise 2353 * only one. 2354 */ 2355 count = count / 2; 2356 2357 for (i = 0; i < count; i++) { 2358 local_port = base_port + i * 2; 2359 module = mlxsw_sp->port_to_module[local_port]; 2360 2361 mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width, 2362 0); 2363 } 2364 2365 for (i = 0; i < count; i++) 2366 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0); 2367 2368 for (i = 0; i < count; i++) { 2369 local_port = base_port + i * 2; 2370 module = mlxsw_sp->port_to_module[local_port]; 2371 2372 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module, 2373 width, 0); 2374 } 2375 } 2376 2377 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 2378 unsigned int count) 2379 { 2380 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2381 struct mlxsw_sp_port *mlxsw_sp_port; 2382 u8 module, cur_width, base_port; 2383 int i; 2384 int err; 2385 2386 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2387 if (!mlxsw_sp_port) { 2388 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 2389 local_port); 2390 return -EINVAL; 2391 } 2392 2393 module = mlxsw_sp_port->mapping.module; 2394 cur_width = mlxsw_sp_port->mapping.width; 2395 2396 if (count != 2 && count != 4) { 2397 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); 2398 return -EINVAL; 2399 } 2400 2401 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { 2402 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); 2403 return -EINVAL; 2404 } 2405 2406 /* Make sure we have enough slave (even) ports for the split. */ 2407 if (count == 2) { 2408 base_port = local_port; 2409 if (mlxsw_sp->ports[base_port + 1]) { 2410 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 2411 return -EINVAL; 2412 } 2413 } else { 2414 base_port = mlxsw_sp_cluster_base_port_get(local_port); 2415 if (mlxsw_sp->ports[base_port + 1] || 2416 mlxsw_sp->ports[base_port + 3]) { 2417 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 2418 return -EINVAL; 2419 } 2420 } 2421 2422 for (i = 0; i < count; i++) 2423 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 2424 2425 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count); 2426 if (err) { 2427 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 2428 goto err_port_split_create; 2429 } 2430 2431 return 0; 2432 2433 err_port_split_create: 2434 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 2435 return err; 2436 } 2437 2438 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port) 2439 { 2440 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2441 struct mlxsw_sp_port *mlxsw_sp_port; 2442 u8 cur_width, base_port; 2443 unsigned int count; 2444 int i; 2445 2446 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2447 if (!mlxsw_sp_port) { 2448 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 2449 local_port); 2450 return -EINVAL; 2451 } 2452 2453 if (!mlxsw_sp_port->split) { 2454 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n"); 2455 return -EINVAL; 2456 } 2457 2458 cur_width = mlxsw_sp_port->mapping.width; 2459 count = cur_width == 1 ? 4 : 2; 2460 2461 base_port = mlxsw_sp_cluster_base_port_get(local_port); 2462 2463 /* Determine which ports to remove. */ 2464 if (count == 2 && local_port >= base_port + 2) 2465 base_port = base_port + 2; 2466 2467 for (i = 0; i < count; i++) 2468 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 2469 2470 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 2471 2472 return 0; 2473 } 2474 2475 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 2476 char *pude_pl, void *priv) 2477 { 2478 struct mlxsw_sp *mlxsw_sp = priv; 2479 struct mlxsw_sp_port *mlxsw_sp_port; 2480 enum mlxsw_reg_pude_oper_status status; 2481 u8 local_port; 2482 2483 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 2484 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2485 if (!mlxsw_sp_port) 2486 return; 2487 2488 status = mlxsw_reg_pude_oper_status_get(pude_pl); 2489 if (status == MLXSW_PORT_OPER_STATUS_UP) { 2490 netdev_info(mlxsw_sp_port->dev, "link up\n"); 2491 netif_carrier_on(mlxsw_sp_port->dev); 2492 } else { 2493 netdev_info(mlxsw_sp_port->dev, "link down\n"); 2494 netif_carrier_off(mlxsw_sp_port->dev); 2495 } 2496 } 2497 2498 static struct mlxsw_event_listener mlxsw_sp_pude_event = { 2499 .func = mlxsw_sp_pude_event_func, 2500 .trap_id = MLXSW_TRAP_ID_PUDE, 2501 }; 2502 2503 static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp, 2504 enum mlxsw_event_trap_id trap_id) 2505 { 2506 struct mlxsw_event_listener *el; 2507 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 2508 int err; 2509 2510 switch (trap_id) { 2511 case MLXSW_TRAP_ID_PUDE: 2512 el = &mlxsw_sp_pude_event; 2513 break; 2514 } 2515 err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp); 2516 if (err) 2517 return err; 2518 2519 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id); 2520 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); 2521 if (err) 2522 goto err_event_trap_set; 2523 2524 return 0; 2525 2526 err_event_trap_set: 2527 mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp); 2528 return err; 2529 } 2530 2531 static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp, 2532 enum mlxsw_event_trap_id trap_id) 2533 { 2534 struct mlxsw_event_listener *el; 2535 2536 switch (trap_id) { 2537 case MLXSW_TRAP_ID_PUDE: 2538 el = &mlxsw_sp_pude_event; 2539 break; 2540 } 2541 mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp); 2542 } 2543 2544 static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port, 2545 void *priv) 2546 { 2547 struct mlxsw_sp *mlxsw_sp = priv; 2548 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2549 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 2550 2551 if (unlikely(!mlxsw_sp_port)) { 2552 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 2553 local_port); 2554 return; 2555 } 2556 2557 skb->dev = mlxsw_sp_port->dev; 2558 2559 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 2560 u64_stats_update_begin(&pcpu_stats->syncp); 2561 pcpu_stats->rx_packets++; 2562 pcpu_stats->rx_bytes += skb->len; 2563 u64_stats_update_end(&pcpu_stats->syncp); 2564 2565 skb->protocol = eth_type_trans(skb, skb->dev); 2566 netif_receive_skb(skb); 2567 } 2568 2569 static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = { 2570 { 2571 .func = mlxsw_sp_rx_listener_func, 2572 .local_port = MLXSW_PORT_DONT_CARE, 2573 .trap_id = MLXSW_TRAP_ID_FDB_MC, 2574 }, 2575 /* Traps for specific L2 packet types, not trapped as FDB MC */ 2576 { 2577 .func = mlxsw_sp_rx_listener_func, 2578 .local_port = MLXSW_PORT_DONT_CARE, 2579 .trap_id = MLXSW_TRAP_ID_STP, 2580 }, 2581 { 2582 .func = mlxsw_sp_rx_listener_func, 2583 .local_port = MLXSW_PORT_DONT_CARE, 2584 .trap_id = MLXSW_TRAP_ID_LACP, 2585 }, 2586 { 2587 .func = mlxsw_sp_rx_listener_func, 2588 .local_port = MLXSW_PORT_DONT_CARE, 2589 .trap_id = MLXSW_TRAP_ID_EAPOL, 2590 }, 2591 { 2592 .func = mlxsw_sp_rx_listener_func, 2593 .local_port = MLXSW_PORT_DONT_CARE, 2594 .trap_id = MLXSW_TRAP_ID_LLDP, 2595 }, 2596 { 2597 .func = mlxsw_sp_rx_listener_func, 2598 .local_port = MLXSW_PORT_DONT_CARE, 2599 .trap_id = MLXSW_TRAP_ID_MMRP, 2600 }, 2601 { 2602 .func = mlxsw_sp_rx_listener_func, 2603 .local_port = MLXSW_PORT_DONT_CARE, 2604 .trap_id = MLXSW_TRAP_ID_MVRP, 2605 }, 2606 { 2607 .func = mlxsw_sp_rx_listener_func, 2608 .local_port = MLXSW_PORT_DONT_CARE, 2609 .trap_id = MLXSW_TRAP_ID_RPVST, 2610 }, 2611 { 2612 .func = mlxsw_sp_rx_listener_func, 2613 .local_port = MLXSW_PORT_DONT_CARE, 2614 .trap_id = MLXSW_TRAP_ID_DHCP, 2615 }, 2616 { 2617 .func = mlxsw_sp_rx_listener_func, 2618 .local_port = MLXSW_PORT_DONT_CARE, 2619 .trap_id = MLXSW_TRAP_ID_IGMP_QUERY, 2620 }, 2621 { 2622 .func = mlxsw_sp_rx_listener_func, 2623 .local_port = MLXSW_PORT_DONT_CARE, 2624 .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT, 2625 }, 2626 { 2627 .func = mlxsw_sp_rx_listener_func, 2628 .local_port = MLXSW_PORT_DONT_CARE, 2629 .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT, 2630 }, 2631 { 2632 .func = mlxsw_sp_rx_listener_func, 2633 .local_port = MLXSW_PORT_DONT_CARE, 2634 .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE, 2635 }, 2636 { 2637 .func = mlxsw_sp_rx_listener_func, 2638 .local_port = MLXSW_PORT_DONT_CARE, 2639 .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT, 2640 }, 2641 { 2642 .func = mlxsw_sp_rx_listener_func, 2643 .local_port = MLXSW_PORT_DONT_CARE, 2644 .trap_id = MLXSW_TRAP_ID_ARPBC, 2645 }, 2646 { 2647 .func = mlxsw_sp_rx_listener_func, 2648 .local_port = MLXSW_PORT_DONT_CARE, 2649 .trap_id = MLXSW_TRAP_ID_ARPUC, 2650 }, 2651 { 2652 .func = mlxsw_sp_rx_listener_func, 2653 .local_port = MLXSW_PORT_DONT_CARE, 2654 .trap_id = MLXSW_TRAP_ID_MTUERROR, 2655 }, 2656 { 2657 .func = mlxsw_sp_rx_listener_func, 2658 .local_port = MLXSW_PORT_DONT_CARE, 2659 .trap_id = MLXSW_TRAP_ID_TTLERROR, 2660 }, 2661 { 2662 .func = mlxsw_sp_rx_listener_func, 2663 .local_port = MLXSW_PORT_DONT_CARE, 2664 .trap_id = MLXSW_TRAP_ID_OSPF, 2665 }, 2666 { 2667 .func = mlxsw_sp_rx_listener_func, 2668 .local_port = MLXSW_PORT_DONT_CARE, 2669 .trap_id = MLXSW_TRAP_ID_IP2ME, 2670 }, 2671 { 2672 .func = mlxsw_sp_rx_listener_func, 2673 .local_port = MLXSW_PORT_DONT_CARE, 2674 .trap_id = MLXSW_TRAP_ID_RTR_INGRESS0, 2675 }, 2676 { 2677 .func = mlxsw_sp_rx_listener_func, 2678 .local_port = MLXSW_PORT_DONT_CARE, 2679 .trap_id = MLXSW_TRAP_ID_HOST_MISS_IPV4, 2680 }, 2681 }; 2682 2683 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 2684 { 2685 char htgt_pl[MLXSW_REG_HTGT_LEN]; 2686 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 2687 int i; 2688 int err; 2689 2690 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX); 2691 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl); 2692 if (err) 2693 return err; 2694 2695 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL); 2696 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl); 2697 if (err) 2698 return err; 2699 2700 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) { 2701 err = mlxsw_core_rx_listener_register(mlxsw_sp->core, 2702 &mlxsw_sp_rx_listener[i], 2703 mlxsw_sp); 2704 if (err) 2705 goto err_rx_listener_register; 2706 2707 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU, 2708 mlxsw_sp_rx_listener[i].trap_id); 2709 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); 2710 if (err) 2711 goto err_rx_trap_set; 2712 } 2713 return 0; 2714 2715 err_rx_trap_set: 2716 mlxsw_core_rx_listener_unregister(mlxsw_sp->core, 2717 &mlxsw_sp_rx_listener[i], 2718 mlxsw_sp); 2719 err_rx_listener_register: 2720 for (i--; i >= 0; i--) { 2721 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD, 2722 mlxsw_sp_rx_listener[i].trap_id); 2723 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); 2724 2725 mlxsw_core_rx_listener_unregister(mlxsw_sp->core, 2726 &mlxsw_sp_rx_listener[i], 2727 mlxsw_sp); 2728 } 2729 return err; 2730 } 2731 2732 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 2733 { 2734 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 2735 int i; 2736 2737 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) { 2738 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD, 2739 mlxsw_sp_rx_listener[i].trap_id); 2740 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); 2741 2742 mlxsw_core_rx_listener_unregister(mlxsw_sp->core, 2743 &mlxsw_sp_rx_listener[i], 2744 mlxsw_sp); 2745 } 2746 } 2747 2748 static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core, 2749 enum mlxsw_reg_sfgc_type type, 2750 enum mlxsw_reg_sfgc_bridge_type bridge_type) 2751 { 2752 enum mlxsw_flood_table_type table_type; 2753 enum mlxsw_sp_flood_table flood_table; 2754 char sfgc_pl[MLXSW_REG_SFGC_LEN]; 2755 2756 if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID) 2757 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID; 2758 else 2759 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; 2760 2761 if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST) 2762 flood_table = MLXSW_SP_FLOOD_TABLE_UC; 2763 else 2764 flood_table = MLXSW_SP_FLOOD_TABLE_BM; 2765 2766 mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type, 2767 flood_table); 2768 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl); 2769 } 2770 2771 static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp) 2772 { 2773 int type, err; 2774 2775 for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) { 2776 if (type == MLXSW_REG_SFGC_TYPE_RESERVED) 2777 continue; 2778 2779 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type, 2780 MLXSW_REG_SFGC_BRIDGE_TYPE_VFID); 2781 if (err) 2782 return err; 2783 2784 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type, 2785 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID); 2786 if (err) 2787 return err; 2788 } 2789 2790 return 0; 2791 } 2792 2793 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 2794 { 2795 char slcr_pl[MLXSW_REG_SLCR_LEN]; 2796 2797 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 2798 MLXSW_REG_SLCR_LAG_HASH_DMAC | 2799 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 2800 MLXSW_REG_SLCR_LAG_HASH_VLANID | 2801 MLXSW_REG_SLCR_LAG_HASH_SIP | 2802 MLXSW_REG_SLCR_LAG_HASH_DIP | 2803 MLXSW_REG_SLCR_LAG_HASH_SPORT | 2804 MLXSW_REG_SLCR_LAG_HASH_DPORT | 2805 MLXSW_REG_SLCR_LAG_HASH_IPPROTO); 2806 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 2807 } 2808 2809 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 2810 const struct mlxsw_bus_info *mlxsw_bus_info) 2811 { 2812 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2813 int err; 2814 2815 mlxsw_sp->core = mlxsw_core; 2816 mlxsw_sp->bus_info = mlxsw_bus_info; 2817 INIT_LIST_HEAD(&mlxsw_sp->fids); 2818 INIT_LIST_HEAD(&mlxsw_sp->vfids.list); 2819 INIT_LIST_HEAD(&mlxsw_sp->br_mids.list); 2820 2821 err = mlxsw_sp_base_mac_get(mlxsw_sp); 2822 if (err) { 2823 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 2824 return err; 2825 } 2826 2827 err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE); 2828 if (err) { 2829 dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n"); 2830 return err; 2831 } 2832 2833 err = mlxsw_sp_traps_init(mlxsw_sp); 2834 if (err) { 2835 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n"); 2836 goto err_rx_listener_register; 2837 } 2838 2839 err = mlxsw_sp_flood_init(mlxsw_sp); 2840 if (err) { 2841 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n"); 2842 goto err_flood_init; 2843 } 2844 2845 err = mlxsw_sp_buffers_init(mlxsw_sp); 2846 if (err) { 2847 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 2848 goto err_buffers_init; 2849 } 2850 2851 err = mlxsw_sp_lag_init(mlxsw_sp); 2852 if (err) { 2853 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 2854 goto err_lag_init; 2855 } 2856 2857 err = mlxsw_sp_switchdev_init(mlxsw_sp); 2858 if (err) { 2859 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 2860 goto err_switchdev_init; 2861 } 2862 2863 err = mlxsw_sp_router_init(mlxsw_sp); 2864 if (err) { 2865 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 2866 goto err_router_init; 2867 } 2868 2869 err = mlxsw_sp_span_init(mlxsw_sp); 2870 if (err) { 2871 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 2872 goto err_span_init; 2873 } 2874 2875 err = mlxsw_sp_ports_create(mlxsw_sp); 2876 if (err) { 2877 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 2878 goto err_ports_create; 2879 } 2880 2881 return 0; 2882 2883 err_ports_create: 2884 mlxsw_sp_span_fini(mlxsw_sp); 2885 err_span_init: 2886 mlxsw_sp_router_fini(mlxsw_sp); 2887 err_router_init: 2888 mlxsw_sp_switchdev_fini(mlxsw_sp); 2889 err_switchdev_init: 2890 err_lag_init: 2891 mlxsw_sp_buffers_fini(mlxsw_sp); 2892 err_buffers_init: 2893 err_flood_init: 2894 mlxsw_sp_traps_fini(mlxsw_sp); 2895 err_rx_listener_register: 2896 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); 2897 return err; 2898 } 2899 2900 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 2901 { 2902 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2903 int i; 2904 2905 mlxsw_sp_ports_remove(mlxsw_sp); 2906 mlxsw_sp_span_fini(mlxsw_sp); 2907 mlxsw_sp_router_fini(mlxsw_sp); 2908 mlxsw_sp_switchdev_fini(mlxsw_sp); 2909 mlxsw_sp_buffers_fini(mlxsw_sp); 2910 mlxsw_sp_traps_fini(mlxsw_sp); 2911 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); 2912 WARN_ON(!list_empty(&mlxsw_sp->vfids.list)); 2913 WARN_ON(!list_empty(&mlxsw_sp->fids)); 2914 for (i = 0; i < MLXSW_SP_RIF_MAX; i++) 2915 WARN_ON_ONCE(mlxsw_sp->rifs[i]); 2916 } 2917 2918 static struct mlxsw_config_profile mlxsw_sp_config_profile = { 2919 .used_max_vepa_channels = 1, 2920 .max_vepa_channels = 0, 2921 .used_max_lag = 1, 2922 .max_lag = MLXSW_SP_LAG_MAX, 2923 .used_max_port_per_lag = 1, 2924 .max_port_per_lag = MLXSW_SP_PORT_PER_LAG_MAX, 2925 .used_max_mid = 1, 2926 .max_mid = MLXSW_SP_MID_MAX, 2927 .used_max_pgt = 1, 2928 .max_pgt = 0, 2929 .used_max_system_port = 1, 2930 .max_system_port = 64, 2931 .used_max_vlan_groups = 1, 2932 .max_vlan_groups = 127, 2933 .used_max_regions = 1, 2934 .max_regions = 400, 2935 .used_flood_tables = 1, 2936 .used_flood_mode = 1, 2937 .flood_mode = 3, 2938 .max_fid_offset_flood_tables = 2, 2939 .fid_offset_flood_table_size = VLAN_N_VID - 1, 2940 .max_fid_flood_tables = 2, 2941 .fid_flood_table_size = MLXSW_SP_VFID_MAX, 2942 .used_max_ib_mc = 1, 2943 .max_ib_mc = 0, 2944 .used_max_pkey = 1, 2945 .max_pkey = 0, 2946 .used_kvd_sizes = 1, 2947 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 2948 .kvd_hash_single_size = MLXSW_SP_KVD_HASH_SINGLE_SIZE, 2949 .kvd_hash_double_size = MLXSW_SP_KVD_HASH_DOUBLE_SIZE, 2950 .swid_config = { 2951 { 2952 .used_type = 1, 2953 .type = MLXSW_PORT_SWID_TYPE_ETH, 2954 } 2955 }, 2956 .resource_query_enable = 1, 2957 }; 2958 2959 static struct mlxsw_driver mlxsw_sp_driver = { 2960 .kind = MLXSW_DEVICE_KIND_SPECTRUM, 2961 .owner = THIS_MODULE, 2962 .priv_size = sizeof(struct mlxsw_sp), 2963 .init = mlxsw_sp_init, 2964 .fini = mlxsw_sp_fini, 2965 .port_split = mlxsw_sp_port_split, 2966 .port_unsplit = mlxsw_sp_port_unsplit, 2967 .sb_pool_get = mlxsw_sp_sb_pool_get, 2968 .sb_pool_set = mlxsw_sp_sb_pool_set, 2969 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 2970 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 2971 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 2972 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 2973 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 2974 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 2975 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 2976 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 2977 .txhdr_construct = mlxsw_sp_txhdr_construct, 2978 .txhdr_len = MLXSW_TXHDR_LEN, 2979 .profile = &mlxsw_sp_config_profile, 2980 }; 2981 2982 static bool mlxsw_sp_port_dev_check(const struct net_device *dev) 2983 { 2984 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 2985 } 2986 2987 static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 2988 { 2989 struct net_device *lower_dev; 2990 struct list_head *iter; 2991 2992 if (mlxsw_sp_port_dev_check(dev)) 2993 return netdev_priv(dev); 2994 2995 netdev_for_each_all_lower_dev(dev, lower_dev, iter) { 2996 if (mlxsw_sp_port_dev_check(lower_dev)) 2997 return netdev_priv(lower_dev); 2998 } 2999 return NULL; 3000 } 3001 3002 static struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 3003 { 3004 struct mlxsw_sp_port *mlxsw_sp_port; 3005 3006 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 3007 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 3008 } 3009 3010 static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 3011 { 3012 struct net_device *lower_dev; 3013 struct list_head *iter; 3014 3015 if (mlxsw_sp_port_dev_check(dev)) 3016 return netdev_priv(dev); 3017 3018 netdev_for_each_all_lower_dev_rcu(dev, lower_dev, iter) { 3019 if (mlxsw_sp_port_dev_check(lower_dev)) 3020 return netdev_priv(lower_dev); 3021 } 3022 return NULL; 3023 } 3024 3025 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 3026 { 3027 struct mlxsw_sp_port *mlxsw_sp_port; 3028 3029 rcu_read_lock(); 3030 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 3031 if (mlxsw_sp_port) 3032 dev_hold(mlxsw_sp_port->dev); 3033 rcu_read_unlock(); 3034 return mlxsw_sp_port; 3035 } 3036 3037 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 3038 { 3039 dev_put(mlxsw_sp_port->dev); 3040 } 3041 3042 static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *r, 3043 unsigned long event) 3044 { 3045 switch (event) { 3046 case NETDEV_UP: 3047 if (!r) 3048 return true; 3049 r->ref_count++; 3050 return false; 3051 case NETDEV_DOWN: 3052 if (r && --r->ref_count == 0) 3053 return true; 3054 /* It is possible we already removed the RIF ourselves 3055 * if it was assigned to a netdev that is now a bridge 3056 * or LAG slave. 3057 */ 3058 return false; 3059 } 3060 3061 return false; 3062 } 3063 3064 static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp) 3065 { 3066 int i; 3067 3068 for (i = 0; i < MLXSW_SP_RIF_MAX; i++) 3069 if (!mlxsw_sp->rifs[i]) 3070 return i; 3071 3072 return MLXSW_SP_RIF_MAX; 3073 } 3074 3075 static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport, 3076 bool *p_lagged, u16 *p_system_port) 3077 { 3078 u8 local_port = mlxsw_sp_vport->local_port; 3079 3080 *p_lagged = mlxsw_sp_vport->lagged; 3081 *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port; 3082 } 3083 3084 static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport, 3085 struct net_device *l3_dev, u16 rif, 3086 bool create) 3087 { 3088 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; 3089 bool lagged = mlxsw_sp_vport->lagged; 3090 char ritr_pl[MLXSW_REG_RITR_LEN]; 3091 u16 system_port; 3092 3093 mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif, 3094 l3_dev->mtu, l3_dev->dev_addr); 3095 3096 mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port); 3097 mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port, 3098 mlxsw_sp_vport_vid_get(mlxsw_sp_vport)); 3099 3100 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 3101 } 3102 3103 static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport); 3104 3105 static struct mlxsw_sp_fid * 3106 mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev) 3107 { 3108 struct mlxsw_sp_fid *f; 3109 3110 f = kzalloc(sizeof(*f), GFP_KERNEL); 3111 if (!f) 3112 return NULL; 3113 3114 f->leave = mlxsw_sp_vport_rif_sp_leave; 3115 f->ref_count = 0; 3116 f->dev = l3_dev; 3117 f->fid = fid; 3118 3119 return f; 3120 } 3121 3122 static struct mlxsw_sp_rif * 3123 mlxsw_sp_rif_alloc(u16 rif, struct net_device *l3_dev, struct mlxsw_sp_fid *f) 3124 { 3125 struct mlxsw_sp_rif *r; 3126 3127 r = kzalloc(sizeof(*r), GFP_KERNEL); 3128 if (!r) 3129 return NULL; 3130 3131 ether_addr_copy(r->addr, l3_dev->dev_addr); 3132 r->mtu = l3_dev->mtu; 3133 r->ref_count = 1; 3134 r->dev = l3_dev; 3135 r->rif = rif; 3136 r->f = f; 3137 3138 return r; 3139 } 3140 3141 static struct mlxsw_sp_rif * 3142 mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport, 3143 struct net_device *l3_dev) 3144 { 3145 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; 3146 struct mlxsw_sp_fid *f; 3147 struct mlxsw_sp_rif *r; 3148 u16 fid, rif; 3149 int err; 3150 3151 rif = mlxsw_sp_avail_rif_get(mlxsw_sp); 3152 if (rif == MLXSW_SP_RIF_MAX) 3153 return ERR_PTR(-ERANGE); 3154 3155 err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, true); 3156 if (err) 3157 return ERR_PTR(err); 3158 3159 fid = mlxsw_sp_rif_sp_to_fid(rif); 3160 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true); 3161 if (err) 3162 goto err_rif_fdb_op; 3163 3164 f = mlxsw_sp_rfid_alloc(fid, l3_dev); 3165 if (!f) { 3166 err = -ENOMEM; 3167 goto err_rfid_alloc; 3168 } 3169 3170 r = mlxsw_sp_rif_alloc(rif, l3_dev, f); 3171 if (!r) { 3172 err = -ENOMEM; 3173 goto err_rif_alloc; 3174 } 3175 3176 f->r = r; 3177 mlxsw_sp->rifs[rif] = r; 3178 3179 return r; 3180 3181 err_rif_alloc: 3182 kfree(f); 3183 err_rfid_alloc: 3184 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false); 3185 err_rif_fdb_op: 3186 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false); 3187 return ERR_PTR(err); 3188 } 3189 3190 static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport, 3191 struct mlxsw_sp_rif *r) 3192 { 3193 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; 3194 struct net_device *l3_dev = r->dev; 3195 struct mlxsw_sp_fid *f = r->f; 3196 u16 fid = f->fid; 3197 u16 rif = r->rif; 3198 3199 mlxsw_sp->rifs[rif] = NULL; 3200 f->r = NULL; 3201 3202 kfree(r); 3203 3204 kfree(f); 3205 3206 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false); 3207 3208 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false); 3209 } 3210 3211 static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport, 3212 struct net_device *l3_dev) 3213 { 3214 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; 3215 struct mlxsw_sp_rif *r; 3216 3217 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev); 3218 if (!r) { 3219 r = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev); 3220 if (IS_ERR(r)) 3221 return PTR_ERR(r); 3222 } 3223 3224 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, r->f); 3225 r->f->ref_count++; 3226 3227 netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", r->f->fid); 3228 3229 return 0; 3230 } 3231 3232 static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport) 3233 { 3234 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 3235 3236 netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid); 3237 3238 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL); 3239 if (--f->ref_count == 0) 3240 mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->r); 3241 } 3242 3243 static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev, 3244 struct net_device *port_dev, 3245 unsigned long event, u16 vid) 3246 { 3247 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev); 3248 struct mlxsw_sp_port *mlxsw_sp_vport; 3249 3250 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 3251 if (WARN_ON(!mlxsw_sp_vport)) 3252 return -EINVAL; 3253 3254 switch (event) { 3255 case NETDEV_UP: 3256 return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev); 3257 case NETDEV_DOWN: 3258 mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport); 3259 break; 3260 } 3261 3262 return 0; 3263 } 3264 3265 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev, 3266 unsigned long event) 3267 { 3268 if (netif_is_bridge_port(port_dev) || netif_is_lag_port(port_dev)) 3269 return 0; 3270 3271 return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1); 3272 } 3273 3274 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev, 3275 struct net_device *lag_dev, 3276 unsigned long event, u16 vid) 3277 { 3278 struct net_device *port_dev; 3279 struct list_head *iter; 3280 int err; 3281 3282 netdev_for_each_lower_dev(lag_dev, port_dev, iter) { 3283 if (mlxsw_sp_port_dev_check(port_dev)) { 3284 err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev, 3285 event, vid); 3286 if (err) 3287 return err; 3288 } 3289 } 3290 3291 return 0; 3292 } 3293 3294 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev, 3295 unsigned long event) 3296 { 3297 if (netif_is_bridge_port(lag_dev)) 3298 return 0; 3299 3300 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1); 3301 } 3302 3303 static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp, 3304 struct net_device *l3_dev) 3305 { 3306 u16 fid; 3307 3308 if (is_vlan_dev(l3_dev)) 3309 fid = vlan_dev_vlan_id(l3_dev); 3310 else if (mlxsw_sp->master_bridge.dev == l3_dev) 3311 fid = 1; 3312 else 3313 return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev); 3314 3315 return mlxsw_sp_fid_find(mlxsw_sp, fid); 3316 } 3317 3318 static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid) 3319 { 3320 if (mlxsw_sp_fid_is_vfid(fid)) 3321 return MLXSW_REG_RITR_FID_IF; 3322 else 3323 return MLXSW_REG_RITR_VLAN_IF; 3324 } 3325 3326 static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp, 3327 struct net_device *l3_dev, 3328 u16 fid, u16 rif, 3329 bool create) 3330 { 3331 enum mlxsw_reg_ritr_if_type rif_type; 3332 char ritr_pl[MLXSW_REG_RITR_LEN]; 3333 3334 rif_type = mlxsw_sp_rif_type_get(fid); 3335 mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, l3_dev->mtu, 3336 l3_dev->dev_addr); 3337 mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid); 3338 3339 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 3340 } 3341 3342 static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp, 3343 struct net_device *l3_dev, 3344 struct mlxsw_sp_fid *f) 3345 { 3346 struct mlxsw_sp_rif *r; 3347 u16 rif; 3348 int err; 3349 3350 rif = mlxsw_sp_avail_rif_get(mlxsw_sp); 3351 if (rif == MLXSW_SP_RIF_MAX) 3352 return -ERANGE; 3353 3354 err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true); 3355 if (err) 3356 return err; 3357 3358 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true); 3359 if (err) 3360 goto err_rif_fdb_op; 3361 3362 r = mlxsw_sp_rif_alloc(rif, l3_dev, f); 3363 if (!r) { 3364 err = -ENOMEM; 3365 goto err_rif_alloc; 3366 } 3367 3368 f->r = r; 3369 mlxsw_sp->rifs[rif] = r; 3370 3371 netdev_dbg(l3_dev, "RIF=%d created\n", rif); 3372 3373 return 0; 3374 3375 err_rif_alloc: 3376 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false); 3377 err_rif_fdb_op: 3378 mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false); 3379 return err; 3380 } 3381 3382 void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp, 3383 struct mlxsw_sp_rif *r) 3384 { 3385 struct net_device *l3_dev = r->dev; 3386 struct mlxsw_sp_fid *f = r->f; 3387 u16 rif = r->rif; 3388 3389 mlxsw_sp->rifs[rif] = NULL; 3390 f->r = NULL; 3391 3392 kfree(r); 3393 3394 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false); 3395 3396 mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false); 3397 3398 netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif); 3399 } 3400 3401 static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev, 3402 struct net_device *br_dev, 3403 unsigned long event) 3404 { 3405 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev); 3406 struct mlxsw_sp_fid *f; 3407 3408 /* FID can either be an actual FID if the L3 device is the 3409 * VLAN-aware bridge or a VLAN device on top. Otherwise, the 3410 * L3 device is a VLAN-unaware bridge and we get a vFID. 3411 */ 3412 f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev); 3413 if (WARN_ON(!f)) 3414 return -EINVAL; 3415 3416 switch (event) { 3417 case NETDEV_UP: 3418 return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f); 3419 case NETDEV_DOWN: 3420 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r); 3421 break; 3422 } 3423 3424 return 0; 3425 } 3426 3427 static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev, 3428 unsigned long event) 3429 { 3430 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 3431 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); 3432 u16 vid = vlan_dev_vlan_id(vlan_dev); 3433 3434 if (mlxsw_sp_port_dev_check(real_dev)) 3435 return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event, 3436 vid); 3437 else if (netif_is_lag_master(real_dev)) 3438 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event, 3439 vid); 3440 else if (netif_is_bridge_master(real_dev) && 3441 mlxsw_sp->master_bridge.dev == real_dev) 3442 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev, 3443 event); 3444 3445 return 0; 3446 } 3447 3448 static int mlxsw_sp_inetaddr_event(struct notifier_block *unused, 3449 unsigned long event, void *ptr) 3450 { 3451 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr; 3452 struct net_device *dev = ifa->ifa_dev->dev; 3453 struct mlxsw_sp *mlxsw_sp; 3454 struct mlxsw_sp_rif *r; 3455 int err = 0; 3456 3457 mlxsw_sp = mlxsw_sp_lower_get(dev); 3458 if (!mlxsw_sp) 3459 goto out; 3460 3461 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); 3462 if (!mlxsw_sp_rif_should_config(r, event)) 3463 goto out; 3464 3465 if (mlxsw_sp_port_dev_check(dev)) 3466 err = mlxsw_sp_inetaddr_port_event(dev, event); 3467 else if (netif_is_lag_master(dev)) 3468 err = mlxsw_sp_inetaddr_lag_event(dev, event); 3469 else if (netif_is_bridge_master(dev)) 3470 err = mlxsw_sp_inetaddr_bridge_event(dev, dev, event); 3471 else if (is_vlan_dev(dev)) 3472 err = mlxsw_sp_inetaddr_vlan_event(dev, event); 3473 3474 out: 3475 return notifier_from_errno(err); 3476 } 3477 3478 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif, 3479 const char *mac, int mtu) 3480 { 3481 char ritr_pl[MLXSW_REG_RITR_LEN]; 3482 int err; 3483 3484 mlxsw_reg_ritr_rif_pack(ritr_pl, rif); 3485 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 3486 if (err) 3487 return err; 3488 3489 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu); 3490 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac); 3491 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE); 3492 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 3493 } 3494 3495 static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev) 3496 { 3497 struct mlxsw_sp *mlxsw_sp; 3498 struct mlxsw_sp_rif *r; 3499 int err; 3500 3501 mlxsw_sp = mlxsw_sp_lower_get(dev); 3502 if (!mlxsw_sp) 3503 return 0; 3504 3505 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); 3506 if (!r) 3507 return 0; 3508 3509 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, false); 3510 if (err) 3511 return err; 3512 3513 err = mlxsw_sp_rif_edit(mlxsw_sp, r->rif, dev->dev_addr, dev->mtu); 3514 if (err) 3515 goto err_rif_edit; 3516 3517 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, r->f->fid, true); 3518 if (err) 3519 goto err_rif_fdb_op; 3520 3521 ether_addr_copy(r->addr, dev->dev_addr); 3522 r->mtu = dev->mtu; 3523 3524 netdev_dbg(dev, "Updated RIF=%d\n", r->rif); 3525 3526 return 0; 3527 3528 err_rif_fdb_op: 3529 mlxsw_sp_rif_edit(mlxsw_sp, r->rif, r->addr, r->mtu); 3530 err_rif_edit: 3531 mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, true); 3532 return err; 3533 } 3534 3535 static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port, 3536 u16 fid) 3537 { 3538 if (mlxsw_sp_fid_is_vfid(fid)) 3539 return mlxsw_sp_port_vport_find_by_fid(lag_port, fid); 3540 else 3541 return test_bit(fid, lag_port->active_vlans); 3542 } 3543 3544 static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port, 3545 u16 fid) 3546 { 3547 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3548 u8 local_port = mlxsw_sp_port->local_port; 3549 u16 lag_id = mlxsw_sp_port->lag_id; 3550 int i, count = 0; 3551 3552 if (!mlxsw_sp_port->lagged) 3553 return true; 3554 3555 for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) { 3556 struct mlxsw_sp_port *lag_port; 3557 3558 lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i); 3559 if (!lag_port || lag_port->local_port == local_port) 3560 continue; 3561 if (mlxsw_sp_lag_port_fid_member(lag_port, fid)) 3562 count++; 3563 } 3564 3565 return !count; 3566 } 3567 3568 static int 3569 mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port, 3570 u16 fid) 3571 { 3572 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3573 char sfdf_pl[MLXSW_REG_SFDF_LEN]; 3574 3575 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID); 3576 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid); 3577 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, 3578 mlxsw_sp_port->local_port); 3579 3580 netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using Port=%d, FID=%d\n", 3581 mlxsw_sp_port->local_port, fid); 3582 3583 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); 3584 } 3585 3586 static int 3587 mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port, 3588 u16 fid) 3589 { 3590 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3591 char sfdf_pl[MLXSW_REG_SFDF_LEN]; 3592 3593 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID); 3594 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid); 3595 mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id); 3596 3597 netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using LAG ID=%d, FID=%d\n", 3598 mlxsw_sp_port->lag_id, fid); 3599 3600 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); 3601 } 3602 3603 int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid) 3604 { 3605 if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port, fid)) 3606 return 0; 3607 3608 if (mlxsw_sp_port->lagged) 3609 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, 3610 fid); 3611 else 3612 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid); 3613 } 3614 3615 static void mlxsw_sp_master_bridge_gone_sync(struct mlxsw_sp *mlxsw_sp) 3616 { 3617 struct mlxsw_sp_fid *f, *tmp; 3618 3619 list_for_each_entry_safe(f, tmp, &mlxsw_sp->fids, list) 3620 if (--f->ref_count == 0) 3621 mlxsw_sp_fid_destroy(mlxsw_sp, f); 3622 else 3623 WARN_ON_ONCE(1); 3624 } 3625 3626 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp, 3627 struct net_device *br_dev) 3628 { 3629 return !mlxsw_sp->master_bridge.dev || 3630 mlxsw_sp->master_bridge.dev == br_dev; 3631 } 3632 3633 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp, 3634 struct net_device *br_dev) 3635 { 3636 mlxsw_sp->master_bridge.dev = br_dev; 3637 mlxsw_sp->master_bridge.ref_count++; 3638 } 3639 3640 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp) 3641 { 3642 if (--mlxsw_sp->master_bridge.ref_count == 0) { 3643 mlxsw_sp->master_bridge.dev = NULL; 3644 /* It's possible upper VLAN devices are still holding 3645 * references to underlying FIDs. Drop the reference 3646 * and release the resources if it was the last one. 3647 * If it wasn't, then something bad happened. 3648 */ 3649 mlxsw_sp_master_bridge_gone_sync(mlxsw_sp); 3650 } 3651 } 3652 3653 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port, 3654 struct net_device *br_dev) 3655 { 3656 struct net_device *dev = mlxsw_sp_port->dev; 3657 int err; 3658 3659 /* When port is not bridged untagged packets are tagged with 3660 * PVID=VID=1, thereby creating an implicit VLAN interface in 3661 * the device. Remove it and let bridge code take care of its 3662 * own VLANs. 3663 */ 3664 err = mlxsw_sp_port_kill_vid(dev, 0, 1); 3665 if (err) 3666 return err; 3667 3668 mlxsw_sp_master_bridge_inc(mlxsw_sp_port->mlxsw_sp, br_dev); 3669 3670 mlxsw_sp_port->learning = 1; 3671 mlxsw_sp_port->learning_sync = 1; 3672 mlxsw_sp_port->uc_flood = 1; 3673 mlxsw_sp_port->bridged = 1; 3674 3675 return 0; 3676 } 3677 3678 static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port) 3679 { 3680 struct net_device *dev = mlxsw_sp_port->dev; 3681 3682 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1); 3683 3684 mlxsw_sp_master_bridge_dec(mlxsw_sp_port->mlxsw_sp); 3685 3686 mlxsw_sp_port->learning = 0; 3687 mlxsw_sp_port->learning_sync = 0; 3688 mlxsw_sp_port->uc_flood = 0; 3689 mlxsw_sp_port->bridged = 0; 3690 3691 /* Add implicit VLAN interface in the device, so that untagged 3692 * packets will be classified to the default vFID. 3693 */ 3694 mlxsw_sp_port_add_vid(dev, 0, 1); 3695 } 3696 3697 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 3698 { 3699 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3700 3701 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 3702 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3703 } 3704 3705 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 3706 { 3707 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3708 3709 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 3710 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3711 } 3712 3713 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 3714 u16 lag_id, u8 port_index) 3715 { 3716 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3717 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3718 3719 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 3720 lag_id, port_index); 3721 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3722 } 3723 3724 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 3725 u16 lag_id) 3726 { 3727 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3728 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3729 3730 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 3731 lag_id); 3732 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3733 } 3734 3735 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 3736 u16 lag_id) 3737 { 3738 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3739 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3740 3741 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 3742 lag_id); 3743 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3744 } 3745 3746 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 3747 u16 lag_id) 3748 { 3749 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3750 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3751 3752 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 3753 lag_id); 3754 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3755 } 3756 3757 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 3758 struct net_device *lag_dev, 3759 u16 *p_lag_id) 3760 { 3761 struct mlxsw_sp_upper *lag; 3762 int free_lag_id = -1; 3763 int i; 3764 3765 for (i = 0; i < MLXSW_SP_LAG_MAX; i++) { 3766 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 3767 if (lag->ref_count) { 3768 if (lag->dev == lag_dev) { 3769 *p_lag_id = i; 3770 return 0; 3771 } 3772 } else if (free_lag_id < 0) { 3773 free_lag_id = i; 3774 } 3775 } 3776 if (free_lag_id < 0) 3777 return -EBUSY; 3778 *p_lag_id = free_lag_id; 3779 return 0; 3780 } 3781 3782 static bool 3783 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 3784 struct net_device *lag_dev, 3785 struct netdev_lag_upper_info *lag_upper_info) 3786 { 3787 u16 lag_id; 3788 3789 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) 3790 return false; 3791 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) 3792 return false; 3793 return true; 3794 } 3795 3796 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 3797 u16 lag_id, u8 *p_port_index) 3798 { 3799 int i; 3800 3801 for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) { 3802 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 3803 *p_port_index = i; 3804 return 0; 3805 } 3806 } 3807 return -EBUSY; 3808 } 3809 3810 static void 3811 mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 3812 u16 lag_id) 3813 { 3814 struct mlxsw_sp_port *mlxsw_sp_vport; 3815 struct mlxsw_sp_fid *f; 3816 3817 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1); 3818 if (WARN_ON(!mlxsw_sp_vport)) 3819 return; 3820 3821 /* If vPort is assigned a RIF, then leave it since it's no 3822 * longer valid. 3823 */ 3824 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 3825 if (f) 3826 f->leave(mlxsw_sp_vport); 3827 3828 mlxsw_sp_vport->lag_id = lag_id; 3829 mlxsw_sp_vport->lagged = 1; 3830 } 3831 3832 static void 3833 mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port) 3834 { 3835 struct mlxsw_sp_port *mlxsw_sp_vport; 3836 struct mlxsw_sp_fid *f; 3837 3838 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1); 3839 if (WARN_ON(!mlxsw_sp_vport)) 3840 return; 3841 3842 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 3843 if (f) 3844 f->leave(mlxsw_sp_vport); 3845 3846 mlxsw_sp_vport->lagged = 0; 3847 } 3848 3849 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 3850 struct net_device *lag_dev) 3851 { 3852 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3853 struct mlxsw_sp_upper *lag; 3854 u16 lag_id; 3855 u8 port_index; 3856 int err; 3857 3858 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 3859 if (err) 3860 return err; 3861 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 3862 if (!lag->ref_count) { 3863 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 3864 if (err) 3865 return err; 3866 lag->dev = lag_dev; 3867 } 3868 3869 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 3870 if (err) 3871 return err; 3872 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 3873 if (err) 3874 goto err_col_port_add; 3875 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id); 3876 if (err) 3877 goto err_col_port_enable; 3878 3879 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 3880 mlxsw_sp_port->local_port); 3881 mlxsw_sp_port->lag_id = lag_id; 3882 mlxsw_sp_port->lagged = 1; 3883 lag->ref_count++; 3884 3885 mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_id); 3886 3887 return 0; 3888 3889 err_col_port_enable: 3890 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 3891 err_col_port_add: 3892 if (!lag->ref_count) 3893 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 3894 return err; 3895 } 3896 3897 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 3898 struct net_device *lag_dev) 3899 { 3900 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3901 u16 lag_id = mlxsw_sp_port->lag_id; 3902 struct mlxsw_sp_upper *lag; 3903 3904 if (!mlxsw_sp_port->lagged) 3905 return; 3906 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 3907 WARN_ON(lag->ref_count == 0); 3908 3909 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id); 3910 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 3911 3912 if (mlxsw_sp_port->bridged) { 3913 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port); 3914 mlxsw_sp_port_bridge_leave(mlxsw_sp_port); 3915 } 3916 3917 if (lag->ref_count == 1) 3918 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 3919 3920 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 3921 mlxsw_sp_port->local_port); 3922 mlxsw_sp_port->lagged = 0; 3923 lag->ref_count--; 3924 3925 mlxsw_sp_port_pvid_vport_lag_leave(mlxsw_sp_port); 3926 } 3927 3928 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 3929 u16 lag_id) 3930 { 3931 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3932 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3933 3934 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 3935 mlxsw_sp_port->local_port); 3936 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3937 } 3938 3939 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 3940 u16 lag_id) 3941 { 3942 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3943 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3944 3945 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 3946 mlxsw_sp_port->local_port); 3947 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3948 } 3949 3950 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port, 3951 bool lag_tx_enabled) 3952 { 3953 if (lag_tx_enabled) 3954 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, 3955 mlxsw_sp_port->lag_id); 3956 else 3957 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 3958 mlxsw_sp_port->lag_id); 3959 } 3960 3961 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 3962 struct netdev_lag_lower_state_info *info) 3963 { 3964 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled); 3965 } 3966 3967 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port, 3968 struct net_device *vlan_dev) 3969 { 3970 struct mlxsw_sp_port *mlxsw_sp_vport; 3971 u16 vid = vlan_dev_vlan_id(vlan_dev); 3972 3973 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 3974 if (WARN_ON(!mlxsw_sp_vport)) 3975 return -EINVAL; 3976 3977 mlxsw_sp_vport->dev = vlan_dev; 3978 3979 return 0; 3980 } 3981 3982 static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port, 3983 struct net_device *vlan_dev) 3984 { 3985 struct mlxsw_sp_port *mlxsw_sp_vport; 3986 u16 vid = vlan_dev_vlan_id(vlan_dev); 3987 3988 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 3989 if (WARN_ON(!mlxsw_sp_vport)) 3990 return; 3991 3992 mlxsw_sp_vport->dev = mlxsw_sp_port->dev; 3993 } 3994 3995 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev, 3996 unsigned long event, void *ptr) 3997 { 3998 struct netdev_notifier_changeupper_info *info; 3999 struct mlxsw_sp_port *mlxsw_sp_port; 4000 struct net_device *upper_dev; 4001 struct mlxsw_sp *mlxsw_sp; 4002 int err = 0; 4003 4004 mlxsw_sp_port = netdev_priv(dev); 4005 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4006 info = ptr; 4007 4008 switch (event) { 4009 case NETDEV_PRECHANGEUPPER: 4010 upper_dev = info->upper_dev; 4011 if (!is_vlan_dev(upper_dev) && 4012 !netif_is_lag_master(upper_dev) && 4013 !netif_is_bridge_master(upper_dev)) 4014 return -EINVAL; 4015 if (!info->linking) 4016 break; 4017 /* HW limitation forbids to put ports to multiple bridges. */ 4018 if (netif_is_bridge_master(upper_dev) && 4019 !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev)) 4020 return -EINVAL; 4021 if (netif_is_lag_master(upper_dev) && 4022 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 4023 info->upper_info)) 4024 return -EINVAL; 4025 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) 4026 return -EINVAL; 4027 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 4028 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) 4029 return -EINVAL; 4030 break; 4031 case NETDEV_CHANGEUPPER: 4032 upper_dev = info->upper_dev; 4033 if (is_vlan_dev(upper_dev)) { 4034 if (info->linking) 4035 err = mlxsw_sp_port_vlan_link(mlxsw_sp_port, 4036 upper_dev); 4037 else 4038 mlxsw_sp_port_vlan_unlink(mlxsw_sp_port, 4039 upper_dev); 4040 } else if (netif_is_bridge_master(upper_dev)) { 4041 if (info->linking) 4042 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4043 upper_dev); 4044 else 4045 mlxsw_sp_port_bridge_leave(mlxsw_sp_port); 4046 } else if (netif_is_lag_master(upper_dev)) { 4047 if (info->linking) 4048 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 4049 upper_dev); 4050 else 4051 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 4052 upper_dev); 4053 } else { 4054 err = -EINVAL; 4055 WARN_ON(1); 4056 } 4057 break; 4058 } 4059 4060 return err; 4061 } 4062 4063 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 4064 unsigned long event, void *ptr) 4065 { 4066 struct netdev_notifier_changelowerstate_info *info; 4067 struct mlxsw_sp_port *mlxsw_sp_port; 4068 int err; 4069 4070 mlxsw_sp_port = netdev_priv(dev); 4071 info = ptr; 4072 4073 switch (event) { 4074 case NETDEV_CHANGELOWERSTATE: 4075 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 4076 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 4077 info->lower_state_info); 4078 if (err) 4079 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 4080 } 4081 break; 4082 } 4083 4084 return 0; 4085 } 4086 4087 static int mlxsw_sp_netdevice_port_event(struct net_device *dev, 4088 unsigned long event, void *ptr) 4089 { 4090 switch (event) { 4091 case NETDEV_PRECHANGEUPPER: 4092 case NETDEV_CHANGEUPPER: 4093 return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr); 4094 case NETDEV_CHANGELOWERSTATE: 4095 return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr); 4096 } 4097 4098 return 0; 4099 } 4100 4101 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 4102 unsigned long event, void *ptr) 4103 { 4104 struct net_device *dev; 4105 struct list_head *iter; 4106 int ret; 4107 4108 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4109 if (mlxsw_sp_port_dev_check(dev)) { 4110 ret = mlxsw_sp_netdevice_port_event(dev, event, ptr); 4111 if (ret) 4112 return ret; 4113 } 4114 } 4115 4116 return 0; 4117 } 4118 4119 static int mlxsw_sp_master_bridge_vlan_link(struct mlxsw_sp *mlxsw_sp, 4120 struct net_device *vlan_dev) 4121 { 4122 u16 fid = vlan_dev_vlan_id(vlan_dev); 4123 struct mlxsw_sp_fid *f; 4124 4125 f = mlxsw_sp_fid_find(mlxsw_sp, fid); 4126 if (!f) { 4127 f = mlxsw_sp_fid_create(mlxsw_sp, fid); 4128 if (IS_ERR(f)) 4129 return PTR_ERR(f); 4130 } 4131 4132 f->ref_count++; 4133 4134 return 0; 4135 } 4136 4137 static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp *mlxsw_sp, 4138 struct net_device *vlan_dev) 4139 { 4140 u16 fid = vlan_dev_vlan_id(vlan_dev); 4141 struct mlxsw_sp_fid *f; 4142 4143 f = mlxsw_sp_fid_find(mlxsw_sp, fid); 4144 if (f && f->r) 4145 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r); 4146 if (f && --f->ref_count == 0) 4147 mlxsw_sp_fid_destroy(mlxsw_sp, f); 4148 } 4149 4150 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, 4151 unsigned long event, void *ptr) 4152 { 4153 struct netdev_notifier_changeupper_info *info; 4154 struct net_device *upper_dev; 4155 struct mlxsw_sp *mlxsw_sp; 4156 int err; 4157 4158 mlxsw_sp = mlxsw_sp_lower_get(br_dev); 4159 if (!mlxsw_sp) 4160 return 0; 4161 if (br_dev != mlxsw_sp->master_bridge.dev) 4162 return 0; 4163 4164 info = ptr; 4165 4166 switch (event) { 4167 case NETDEV_CHANGEUPPER: 4168 upper_dev = info->upper_dev; 4169 if (!is_vlan_dev(upper_dev)) 4170 break; 4171 if (info->linking) { 4172 err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp, 4173 upper_dev); 4174 if (err) 4175 return err; 4176 } else { 4177 mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp, upper_dev); 4178 } 4179 break; 4180 } 4181 4182 return 0; 4183 } 4184 4185 static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp) 4186 { 4187 return find_first_zero_bit(mlxsw_sp->vfids.mapped, 4188 MLXSW_SP_VFID_MAX); 4189 } 4190 4191 static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create) 4192 { 4193 char sfmr_pl[MLXSW_REG_SFMR_LEN]; 4194 4195 mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0); 4196 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); 4197 } 4198 4199 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport); 4200 4201 static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, 4202 struct net_device *br_dev) 4203 { 4204 struct device *dev = mlxsw_sp->bus_info->dev; 4205 struct mlxsw_sp_fid *f; 4206 u16 vfid, fid; 4207 int err; 4208 4209 vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp); 4210 if (vfid == MLXSW_SP_VFID_MAX) { 4211 dev_err(dev, "No available vFIDs\n"); 4212 return ERR_PTR(-ERANGE); 4213 } 4214 4215 fid = mlxsw_sp_vfid_to_fid(vfid); 4216 err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true); 4217 if (err) { 4218 dev_err(dev, "Failed to create FID=%d\n", fid); 4219 return ERR_PTR(err); 4220 } 4221 4222 f = kzalloc(sizeof(*f), GFP_KERNEL); 4223 if (!f) 4224 goto err_allocate_vfid; 4225 4226 f->leave = mlxsw_sp_vport_vfid_leave; 4227 f->fid = fid; 4228 f->dev = br_dev; 4229 4230 list_add(&f->list, &mlxsw_sp->vfids.list); 4231 set_bit(vfid, mlxsw_sp->vfids.mapped); 4232 4233 return f; 4234 4235 err_allocate_vfid: 4236 mlxsw_sp_vfid_op(mlxsw_sp, fid, false); 4237 return ERR_PTR(-ENOMEM); 4238 } 4239 4240 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, 4241 struct mlxsw_sp_fid *f) 4242 { 4243 u16 vfid = mlxsw_sp_fid_to_vfid(f->fid); 4244 u16 fid = f->fid; 4245 4246 clear_bit(vfid, mlxsw_sp->vfids.mapped); 4247 list_del(&f->list); 4248 4249 if (f->r) 4250 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r); 4251 4252 kfree(f); 4253 4254 mlxsw_sp_vfid_op(mlxsw_sp, fid, false); 4255 } 4256 4257 static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid, 4258 bool valid) 4259 { 4260 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; 4261 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); 4262 4263 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid, 4264 vid); 4265 } 4266 4267 static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport, 4268 struct net_device *br_dev) 4269 { 4270 struct mlxsw_sp_fid *f; 4271 int err; 4272 4273 f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev); 4274 if (!f) { 4275 f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev); 4276 if (IS_ERR(f)) 4277 return PTR_ERR(f); 4278 } 4279 4280 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true); 4281 if (err) 4282 goto err_vport_flood_set; 4283 4284 err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true); 4285 if (err) 4286 goto err_vport_fid_map; 4287 4288 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f); 4289 f->ref_count++; 4290 4291 netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", f->fid); 4292 4293 return 0; 4294 4295 err_vport_fid_map: 4296 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false); 4297 err_vport_flood_set: 4298 if (!f->ref_count) 4299 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f); 4300 return err; 4301 } 4302 4303 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport) 4304 { 4305 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 4306 4307 netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid); 4308 4309 mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false); 4310 4311 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false); 4312 4313 mlxsw_sp_port_fdb_flush(mlxsw_sp_vport, f->fid); 4314 4315 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL); 4316 if (--f->ref_count == 0) 4317 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f); 4318 } 4319 4320 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport, 4321 struct net_device *br_dev) 4322 { 4323 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 4324 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); 4325 struct net_device *dev = mlxsw_sp_vport->dev; 4326 int err; 4327 4328 if (f && !WARN_ON(!f->leave)) 4329 f->leave(mlxsw_sp_vport); 4330 4331 err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport, br_dev); 4332 if (err) { 4333 netdev_err(dev, "Failed to join vFID\n"); 4334 return err; 4335 } 4336 4337 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); 4338 if (err) { 4339 netdev_err(dev, "Failed to enable learning\n"); 4340 goto err_port_vid_learning_set; 4341 } 4342 4343 mlxsw_sp_vport->learning = 1; 4344 mlxsw_sp_vport->learning_sync = 1; 4345 mlxsw_sp_vport->uc_flood = 1; 4346 mlxsw_sp_vport->bridged = 1; 4347 4348 return 0; 4349 4350 err_port_vid_learning_set: 4351 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport); 4352 return err; 4353 } 4354 4355 static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport) 4356 { 4357 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); 4358 4359 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); 4360 4361 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport); 4362 4363 mlxsw_sp_vport->learning = 0; 4364 mlxsw_sp_vport->learning_sync = 0; 4365 mlxsw_sp_vport->uc_flood = 0; 4366 mlxsw_sp_vport->bridged = 0; 4367 } 4368 4369 static bool 4370 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port, 4371 const struct net_device *br_dev) 4372 { 4373 struct mlxsw_sp_port *mlxsw_sp_vport; 4374 4375 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, 4376 vport.list) { 4377 struct net_device *dev = mlxsw_sp_vport_dev_get(mlxsw_sp_vport); 4378 4379 if (dev && dev == br_dev) 4380 return false; 4381 } 4382 4383 return true; 4384 } 4385 4386 static int mlxsw_sp_netdevice_vport_event(struct net_device *dev, 4387 unsigned long event, void *ptr, 4388 u16 vid) 4389 { 4390 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 4391 struct netdev_notifier_changeupper_info *info = ptr; 4392 struct mlxsw_sp_port *mlxsw_sp_vport; 4393 struct net_device *upper_dev; 4394 int err = 0; 4395 4396 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 4397 4398 switch (event) { 4399 case NETDEV_PRECHANGEUPPER: 4400 upper_dev = info->upper_dev; 4401 if (!netif_is_bridge_master(upper_dev)) 4402 return -EINVAL; 4403 if (!info->linking) 4404 break; 4405 /* We can't have multiple VLAN interfaces configured on 4406 * the same port and being members in the same bridge. 4407 */ 4408 if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port, 4409 upper_dev)) 4410 return -EINVAL; 4411 break; 4412 case NETDEV_CHANGEUPPER: 4413 upper_dev = info->upper_dev; 4414 if (info->linking) { 4415 if (WARN_ON(!mlxsw_sp_vport)) 4416 return -EINVAL; 4417 err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport, 4418 upper_dev); 4419 } else { 4420 if (!mlxsw_sp_vport) 4421 return 0; 4422 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport); 4423 } 4424 } 4425 4426 return err; 4427 } 4428 4429 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev, 4430 unsigned long event, void *ptr, 4431 u16 vid) 4432 { 4433 struct net_device *dev; 4434 struct list_head *iter; 4435 int ret; 4436 4437 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4438 if (mlxsw_sp_port_dev_check(dev)) { 4439 ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr, 4440 vid); 4441 if (ret) 4442 return ret; 4443 } 4444 } 4445 4446 return 0; 4447 } 4448 4449 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 4450 unsigned long event, void *ptr) 4451 { 4452 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 4453 u16 vid = vlan_dev_vlan_id(vlan_dev); 4454 4455 if (mlxsw_sp_port_dev_check(real_dev)) 4456 return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr, 4457 vid); 4458 else if (netif_is_lag_master(real_dev)) 4459 return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr, 4460 vid); 4461 4462 return 0; 4463 } 4464 4465 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 4466 unsigned long event, void *ptr) 4467 { 4468 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 4469 int err = 0; 4470 4471 if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU) 4472 err = mlxsw_sp_netdevice_router_port_event(dev); 4473 else if (mlxsw_sp_port_dev_check(dev)) 4474 err = mlxsw_sp_netdevice_port_event(dev, event, ptr); 4475 else if (netif_is_lag_master(dev)) 4476 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 4477 else if (netif_is_bridge_master(dev)) 4478 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); 4479 else if (is_vlan_dev(dev)) 4480 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 4481 4482 return notifier_from_errno(err); 4483 } 4484 4485 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = { 4486 .notifier_call = mlxsw_sp_netdevice_event, 4487 }; 4488 4489 static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = { 4490 .notifier_call = mlxsw_sp_inetaddr_event, 4491 .priority = 10, /* Must be called before FIB notifier block */ 4492 }; 4493 4494 static int __init mlxsw_sp_module_init(void) 4495 { 4496 int err; 4497 4498 register_netdevice_notifier(&mlxsw_sp_netdevice_nb); 4499 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 4500 err = mlxsw_core_driver_register(&mlxsw_sp_driver); 4501 if (err) 4502 goto err_core_driver_register; 4503 return 0; 4504 4505 err_core_driver_register: 4506 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); 4507 return err; 4508 } 4509 4510 static void __exit mlxsw_sp_module_exit(void) 4511 { 4512 mlxsw_core_driver_unregister(&mlxsw_sp_driver); 4513 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 4514 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); 4515 } 4516 4517 module_init(mlxsw_sp_module_init); 4518 module_exit(mlxsw_sp_module_exit); 4519 4520 MODULE_LICENSE("Dual BSD/GPL"); 4521 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 4522 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 4523 MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM); 4524