1 /* 2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c 3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> 5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> 6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <linux/kernel.h> 38 #include <linux/module.h> 39 #include <linux/types.h> 40 #include <linux/netdevice.h> 41 #include <linux/etherdevice.h> 42 #include <linux/ethtool.h> 43 #include <linux/slab.h> 44 #include <linux/device.h> 45 #include <linux/skbuff.h> 46 #include <linux/if_vlan.h> 47 #include <linux/if_bridge.h> 48 #include <linux/workqueue.h> 49 #include <linux/jiffies.h> 50 #include <linux/bitops.h> 51 #include <linux/list.h> 52 #include <linux/notifier.h> 53 #include <linux/dcbnl.h> 54 #include <linux/inetdevice.h> 55 #include <net/switchdev.h> 56 #include <generated/utsrelease.h> 57 #include <net/pkt_cls.h> 58 #include <net/tc_act/tc_mirred.h> 59 #include <net/netevent.h> 60 61 #include "spectrum.h" 62 #include "core.h" 63 #include "reg.h" 64 #include "port.h" 65 #include "trap.h" 66 #include "txheader.h" 67 68 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum"; 69 static const char mlxsw_sp_driver_version[] = "1.0"; 70 71 /* tx_hdr_version 72 * Tx header version. 73 * Must be set to 1. 74 */ 75 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 76 77 /* tx_hdr_ctl 78 * Packet control type. 79 * 0 - Ethernet control (e.g. EMADs, LACP) 80 * 1 - Ethernet data 81 */ 82 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 83 84 /* tx_hdr_proto 85 * Packet protocol type. Must be set to 1 (Ethernet). 86 */ 87 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 88 89 /* tx_hdr_rx_is_router 90 * Packet is sent from the router. Valid for data packets only. 91 */ 92 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 93 94 /* tx_hdr_fid_valid 95 * Indicates if the 'fid' field is valid and should be used for 96 * forwarding lookup. Valid for data packets only. 97 */ 98 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 99 100 /* tx_hdr_swid 101 * Switch partition ID. Must be set to 0. 102 */ 103 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 104 105 /* tx_hdr_control_tclass 106 * Indicates if the packet should use the control TClass and not one 107 * of the data TClasses. 108 */ 109 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 110 111 /* tx_hdr_etclass 112 * Egress TClass to be used on the egress device on the egress port. 113 */ 114 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 115 116 /* tx_hdr_port_mid 117 * Destination local port for unicast packets. 118 * Destination multicast ID for multicast packets. 119 * 120 * Control packets are directed to a specific egress port, while data 121 * packets are transmitted through the CPU port (0) into the switch partition, 122 * where forwarding rules are applied. 123 */ 124 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 125 126 /* tx_hdr_fid 127 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 128 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 129 * Valid for data packets only. 130 */ 131 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 132 133 /* tx_hdr_type 134 * 0 - Data packets 135 * 6 - Control packets 136 */ 137 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 138 139 static bool mlxsw_sp_port_dev_check(const struct net_device *dev); 140 141 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 142 const struct mlxsw_tx_info *tx_info) 143 { 144 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 145 146 memset(txhdr, 0, MLXSW_TXHDR_LEN); 147 148 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 149 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 150 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 151 mlxsw_tx_hdr_swid_set(txhdr, 0); 152 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 153 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 154 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 155 } 156 157 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 158 { 159 char spad_pl[MLXSW_REG_SPAD_LEN]; 160 int err; 161 162 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 163 if (err) 164 return err; 165 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 166 return 0; 167 } 168 169 static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp) 170 { 171 int i; 172 173 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN)) 174 return -EIO; 175 176 mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, 177 MAX_SPAN); 178 mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count, 179 sizeof(struct mlxsw_sp_span_entry), 180 GFP_KERNEL); 181 if (!mlxsw_sp->span.entries) 182 return -ENOMEM; 183 184 for (i = 0; i < mlxsw_sp->span.entries_count; i++) 185 INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list); 186 187 return 0; 188 } 189 190 static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp) 191 { 192 int i; 193 194 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 195 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 196 197 WARN_ON_ONCE(!list_empty(&curr->bound_ports_list)); 198 } 199 kfree(mlxsw_sp->span.entries); 200 } 201 202 static struct mlxsw_sp_span_entry * 203 mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port) 204 { 205 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 206 struct mlxsw_sp_span_entry *span_entry; 207 char mpat_pl[MLXSW_REG_MPAT_LEN]; 208 u8 local_port = port->local_port; 209 int index; 210 int i; 211 int err; 212 213 /* find a free entry to use */ 214 index = -1; 215 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 216 if (!mlxsw_sp->span.entries[i].used) { 217 index = i; 218 span_entry = &mlxsw_sp->span.entries[i]; 219 break; 220 } 221 } 222 if (index < 0) 223 return NULL; 224 225 /* create a new port analayzer entry for local_port */ 226 mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true); 227 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 228 if (err) 229 return NULL; 230 231 span_entry->used = true; 232 span_entry->id = index; 233 span_entry->ref_count = 0; 234 span_entry->local_port = local_port; 235 return span_entry; 236 } 237 238 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp, 239 struct mlxsw_sp_span_entry *span_entry) 240 { 241 u8 local_port = span_entry->local_port; 242 char mpat_pl[MLXSW_REG_MPAT_LEN]; 243 int pa_id = span_entry->id; 244 245 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false); 246 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 247 span_entry->used = false; 248 } 249 250 static struct mlxsw_sp_span_entry * 251 mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port) 252 { 253 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 254 int i; 255 256 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 257 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 258 259 if (curr->used && curr->local_port == port->local_port) 260 return curr; 261 } 262 return NULL; 263 } 264 265 static struct mlxsw_sp_span_entry 266 *mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port) 267 { 268 struct mlxsw_sp_span_entry *span_entry; 269 270 span_entry = mlxsw_sp_span_entry_find(port); 271 if (span_entry) { 272 span_entry->ref_count++; 273 return span_entry; 274 } 275 276 return mlxsw_sp_span_entry_create(port); 277 } 278 279 static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, 280 struct mlxsw_sp_span_entry *span_entry) 281 { 282 if (--span_entry->ref_count == 0) 283 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry); 284 return 0; 285 } 286 287 static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port) 288 { 289 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 290 struct mlxsw_sp_span_inspected_port *p; 291 int i; 292 293 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 294 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 295 296 list_for_each_entry(p, &curr->bound_ports_list, list) 297 if (p->local_port == port->local_port && 298 p->type == MLXSW_SP_SPAN_EGRESS) 299 return true; 300 } 301 302 return false; 303 } 304 305 static int mlxsw_sp_span_mtu_to_buffsize(int mtu) 306 { 307 return MLXSW_SP_BYTES_TO_CELLS(mtu * 5 / 2) + 1; 308 } 309 310 static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu) 311 { 312 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 313 char sbib_pl[MLXSW_REG_SBIB_LEN]; 314 int err; 315 316 /* If port is egress mirrored, the shared buffer size should be 317 * updated according to the mtu value 318 */ 319 if (mlxsw_sp_span_is_egress_mirror(port)) { 320 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 321 mlxsw_sp_span_mtu_to_buffsize(mtu)); 322 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 323 if (err) { 324 netdev_err(port->dev, "Could not update shared buffer for mirroring\n"); 325 return err; 326 } 327 } 328 329 return 0; 330 } 331 332 static struct mlxsw_sp_span_inspected_port * 333 mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port, 334 struct mlxsw_sp_span_entry *span_entry) 335 { 336 struct mlxsw_sp_span_inspected_port *p; 337 338 list_for_each_entry(p, &span_entry->bound_ports_list, list) 339 if (port->local_port == p->local_port) 340 return p; 341 return NULL; 342 } 343 344 static int 345 mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port, 346 struct mlxsw_sp_span_entry *span_entry, 347 enum mlxsw_sp_span_type type) 348 { 349 struct mlxsw_sp_span_inspected_port *inspected_port; 350 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 351 char mpar_pl[MLXSW_REG_MPAR_LEN]; 352 char sbib_pl[MLXSW_REG_SBIB_LEN]; 353 int pa_id = span_entry->id; 354 int err; 355 356 /* if it is an egress SPAN, bind a shared buffer to it */ 357 if (type == MLXSW_SP_SPAN_EGRESS) { 358 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 359 mlxsw_sp_span_mtu_to_buffsize(port->dev->mtu)); 360 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 361 if (err) { 362 netdev_err(port->dev, "Could not create shared buffer for mirroring\n"); 363 return err; 364 } 365 } 366 367 /* bind the port to the SPAN entry */ 368 mlxsw_reg_mpar_pack(mpar_pl, port->local_port, 369 (enum mlxsw_reg_mpar_i_e) type, true, pa_id); 370 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); 371 if (err) 372 goto err_mpar_reg_write; 373 374 inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL); 375 if (!inspected_port) { 376 err = -ENOMEM; 377 goto err_inspected_port_alloc; 378 } 379 inspected_port->local_port = port->local_port; 380 inspected_port->type = type; 381 list_add_tail(&inspected_port->list, &span_entry->bound_ports_list); 382 383 return 0; 384 385 err_mpar_reg_write: 386 err_inspected_port_alloc: 387 if (type == MLXSW_SP_SPAN_EGRESS) { 388 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); 389 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 390 } 391 return err; 392 } 393 394 static void 395 mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port, 396 struct mlxsw_sp_span_entry *span_entry, 397 enum mlxsw_sp_span_type type) 398 { 399 struct mlxsw_sp_span_inspected_port *inspected_port; 400 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 401 char mpar_pl[MLXSW_REG_MPAR_LEN]; 402 char sbib_pl[MLXSW_REG_SBIB_LEN]; 403 int pa_id = span_entry->id; 404 405 inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry); 406 if (!inspected_port) 407 return; 408 409 /* remove the inspected port */ 410 mlxsw_reg_mpar_pack(mpar_pl, port->local_port, 411 (enum mlxsw_reg_mpar_i_e) type, false, pa_id); 412 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); 413 414 /* remove the SBIB buffer if it was egress SPAN */ 415 if (type == MLXSW_SP_SPAN_EGRESS) { 416 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); 417 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 418 } 419 420 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); 421 422 list_del(&inspected_port->list); 423 kfree(inspected_port); 424 } 425 426 static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from, 427 struct mlxsw_sp_port *to, 428 enum mlxsw_sp_span_type type) 429 { 430 struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp; 431 struct mlxsw_sp_span_entry *span_entry; 432 int err; 433 434 span_entry = mlxsw_sp_span_entry_get(to); 435 if (!span_entry) 436 return -ENOENT; 437 438 netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n", 439 span_entry->id); 440 441 err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type); 442 if (err) 443 goto err_port_bind; 444 445 return 0; 446 447 err_port_bind: 448 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); 449 return err; 450 } 451 452 static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from, 453 struct mlxsw_sp_port *to, 454 enum mlxsw_sp_span_type type) 455 { 456 struct mlxsw_sp_span_entry *span_entry; 457 458 span_entry = mlxsw_sp_span_entry_find(to); 459 if (!span_entry) { 460 netdev_err(from->dev, "no span entry found\n"); 461 return; 462 } 463 464 netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n", 465 span_entry->id); 466 mlxsw_sp_span_inspected_port_unbind(from, span_entry, type); 467 } 468 469 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 470 bool is_up) 471 { 472 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 473 char paos_pl[MLXSW_REG_PAOS_LEN]; 474 475 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 476 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 477 MLXSW_PORT_ADMIN_STATUS_DOWN); 478 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 479 } 480 481 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 482 unsigned char *addr) 483 { 484 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 485 char ppad_pl[MLXSW_REG_PPAD_LEN]; 486 487 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 488 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 489 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 490 } 491 492 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 493 { 494 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 495 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 496 497 ether_addr_copy(addr, mlxsw_sp->base_mac); 498 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 499 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 500 } 501 502 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 503 { 504 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 505 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 506 int max_mtu; 507 int err; 508 509 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 510 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 511 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 512 if (err) 513 return err; 514 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 515 516 if (mtu > max_mtu) 517 return -EINVAL; 518 519 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 520 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 521 } 522 523 static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port, 524 u8 swid) 525 { 526 char pspa_pl[MLXSW_REG_PSPA_LEN]; 527 528 mlxsw_reg_pspa_pack(pspa_pl, swid, local_port); 529 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 530 } 531 532 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 533 { 534 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 535 536 return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port, 537 swid); 538 } 539 540 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 541 bool enable) 542 { 543 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 544 char svpe_pl[MLXSW_REG_SVPE_LEN]; 545 546 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 547 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 548 } 549 550 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port, 551 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid, 552 u16 vid) 553 { 554 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 555 char svfa_pl[MLXSW_REG_SVFA_LEN]; 556 557 mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid, 558 fid, vid); 559 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl); 560 } 561 562 int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, 563 u16 vid_begin, u16 vid_end, 564 bool learn_enable) 565 { 566 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 567 char *spvmlr_pl; 568 int err; 569 570 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 571 if (!spvmlr_pl) 572 return -ENOMEM; 573 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid_begin, 574 vid_end, learn_enable); 575 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 576 kfree(spvmlr_pl); 577 return err; 578 } 579 580 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, 581 u16 vid, bool learn_enable) 582 { 583 return __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid, 584 learn_enable); 585 } 586 587 static int 588 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 589 { 590 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 591 char sspr_pl[MLXSW_REG_SSPR_LEN]; 592 593 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 594 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 595 } 596 597 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, 598 u8 local_port, u8 *p_module, 599 u8 *p_width, u8 *p_lane) 600 { 601 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 602 int err; 603 604 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 605 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 606 if (err) 607 return err; 608 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 609 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl); 610 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 611 return 0; 612 } 613 614 static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port, 615 u8 module, u8 width, u8 lane) 616 { 617 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 618 int i; 619 620 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 621 mlxsw_reg_pmlp_width_set(pmlp_pl, width); 622 for (i = 0; i < width; i++) { 623 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module); 624 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */ 625 } 626 627 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 628 } 629 630 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port) 631 { 632 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 633 634 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 635 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 636 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 637 } 638 639 static int mlxsw_sp_port_open(struct net_device *dev) 640 { 641 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 642 int err; 643 644 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 645 if (err) 646 return err; 647 netif_start_queue(dev); 648 return 0; 649 } 650 651 static int mlxsw_sp_port_stop(struct net_device *dev) 652 { 653 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 654 655 netif_stop_queue(dev); 656 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 657 } 658 659 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 660 struct net_device *dev) 661 { 662 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 663 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 664 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 665 const struct mlxsw_tx_info tx_info = { 666 .local_port = mlxsw_sp_port->local_port, 667 .is_emad = false, 668 }; 669 u64 len; 670 int err; 671 672 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 673 return NETDEV_TX_BUSY; 674 675 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { 676 struct sk_buff *skb_orig = skb; 677 678 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); 679 if (!skb) { 680 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 681 dev_kfree_skb_any(skb_orig); 682 return NETDEV_TX_OK; 683 } 684 } 685 686 if (eth_skb_pad(skb)) { 687 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 688 return NETDEV_TX_OK; 689 } 690 691 mlxsw_sp_txhdr_construct(skb, &tx_info); 692 /* TX header is consumed by HW on the way so we shouldn't count its 693 * bytes as being sent. 694 */ 695 len = skb->len - MLXSW_TXHDR_LEN; 696 697 /* Due to a race we might fail here because of a full queue. In that 698 * unlikely case we simply drop the packet. 699 */ 700 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 701 702 if (!err) { 703 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 704 u64_stats_update_begin(&pcpu_stats->syncp); 705 pcpu_stats->tx_packets++; 706 pcpu_stats->tx_bytes += len; 707 u64_stats_update_end(&pcpu_stats->syncp); 708 } else { 709 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 710 dev_kfree_skb_any(skb); 711 } 712 return NETDEV_TX_OK; 713 } 714 715 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 716 { 717 } 718 719 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 720 { 721 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 722 struct sockaddr *addr = p; 723 int err; 724 725 if (!is_valid_ether_addr(addr->sa_data)) 726 return -EADDRNOTAVAIL; 727 728 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 729 if (err) 730 return err; 731 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 732 return 0; 733 } 734 735 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu, 736 bool pause_en, bool pfc_en, u16 delay) 737 { 738 u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu); 739 740 delay = pfc_en ? mlxsw_sp_pfc_delay_get(mtu, delay) : 741 MLXSW_SP_PAUSE_DELAY; 742 743 if (pause_en || pfc_en) 744 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, pg_index, 745 pg_size + delay, pg_size); 746 else 747 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size); 748 } 749 750 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, 751 u8 *prio_tc, bool pause_en, 752 struct ieee_pfc *my_pfc) 753 { 754 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 755 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; 756 u16 delay = !!my_pfc ? my_pfc->delay : 0; 757 char pbmc_pl[MLXSW_REG_PBMC_LEN]; 758 int i, j, err; 759 760 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); 761 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 762 if (err) 763 return err; 764 765 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 766 bool configure = false; 767 bool pfc = false; 768 769 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { 770 if (prio_tc[j] == i) { 771 pfc = pfc_en & BIT(j); 772 configure = true; 773 break; 774 } 775 } 776 777 if (!configure) 778 continue; 779 mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en, pfc, delay); 780 } 781 782 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 783 } 784 785 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, 786 int mtu, bool pause_en) 787 { 788 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; 789 bool dcb_en = !!mlxsw_sp_port->dcb.ets; 790 struct ieee_pfc *my_pfc; 791 u8 *prio_tc; 792 793 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; 794 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; 795 796 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, 797 pause_en, my_pfc); 798 } 799 800 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 801 { 802 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 803 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 804 int err; 805 806 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); 807 if (err) 808 return err; 809 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu); 810 if (err) 811 goto err_span_port_mtu_update; 812 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 813 if (err) 814 goto err_port_mtu_set; 815 dev->mtu = mtu; 816 return 0; 817 818 err_port_mtu_set: 819 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu); 820 err_span_port_mtu_update: 821 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 822 return err; 823 } 824 825 static int 826 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 827 struct rtnl_link_stats64 *stats) 828 { 829 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 830 struct mlxsw_sp_port_pcpu_stats *p; 831 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 832 u32 tx_dropped = 0; 833 unsigned int start; 834 int i; 835 836 for_each_possible_cpu(i) { 837 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 838 do { 839 start = u64_stats_fetch_begin_irq(&p->syncp); 840 rx_packets = p->rx_packets; 841 rx_bytes = p->rx_bytes; 842 tx_packets = p->tx_packets; 843 tx_bytes = p->tx_bytes; 844 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 845 846 stats->rx_packets += rx_packets; 847 stats->rx_bytes += rx_bytes; 848 stats->tx_packets += tx_packets; 849 stats->tx_bytes += tx_bytes; 850 /* tx_dropped is u32, updated without syncp protection. */ 851 tx_dropped += p->tx_dropped; 852 } 853 stats->tx_dropped = tx_dropped; 854 return 0; 855 } 856 857 static bool mlxsw_sp_port_has_offload_stats(int attr_id) 858 { 859 switch (attr_id) { 860 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 861 return true; 862 } 863 864 return false; 865 } 866 867 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 868 void *sp) 869 { 870 switch (attr_id) { 871 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 872 return mlxsw_sp_port_get_sw_stats64(dev, sp); 873 } 874 875 return -EINVAL; 876 } 877 878 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 879 int prio, char *ppcnt_pl) 880 { 881 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 882 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 883 884 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 885 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 886 } 887 888 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 889 struct rtnl_link_stats64 *stats) 890 { 891 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 892 int err; 893 894 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 895 0, ppcnt_pl); 896 if (err) 897 goto out; 898 899 stats->tx_packets = 900 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 901 stats->rx_packets = 902 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 903 stats->tx_bytes = 904 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 905 stats->rx_bytes = 906 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 907 stats->multicast = 908 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 909 910 stats->rx_crc_errors = 911 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 912 stats->rx_frame_errors = 913 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 914 915 stats->rx_length_errors = ( 916 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 917 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 918 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 919 920 stats->rx_errors = (stats->rx_crc_errors + 921 stats->rx_frame_errors + stats->rx_length_errors); 922 923 out: 924 return err; 925 } 926 927 static void update_stats_cache(struct work_struct *work) 928 { 929 struct mlxsw_sp_port *mlxsw_sp_port = 930 container_of(work, struct mlxsw_sp_port, 931 hw_stats.update_dw.work); 932 933 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 934 goto out; 935 936 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 937 mlxsw_sp_port->hw_stats.cache); 938 939 out: 940 mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, 941 MLXSW_HW_STATS_UPDATE_TIME); 942 } 943 944 /* Return the stats from a cache that is updated periodically, 945 * as this function might get called in an atomic context. 946 */ 947 static struct rtnl_link_stats64 * 948 mlxsw_sp_port_get_stats64(struct net_device *dev, 949 struct rtnl_link_stats64 *stats) 950 { 951 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 952 953 memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats)); 954 955 return stats; 956 } 957 958 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 959 u16 vid_end, bool is_member, bool untagged) 960 { 961 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 962 char *spvm_pl; 963 int err; 964 965 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 966 if (!spvm_pl) 967 return -ENOMEM; 968 969 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 970 vid_end, is_member, untagged); 971 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 972 kfree(spvm_pl); 973 return err; 974 } 975 976 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port) 977 { 978 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; 979 u16 vid, last_visited_vid; 980 int err; 981 982 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { 983 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid, 984 vid); 985 if (err) { 986 last_visited_vid = vid; 987 goto err_port_vid_to_fid_set; 988 } 989 } 990 991 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 992 if (err) { 993 last_visited_vid = VLAN_N_VID; 994 goto err_port_vid_to_fid_set; 995 } 996 997 return 0; 998 999 err_port_vid_to_fid_set: 1000 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid) 1001 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid, 1002 vid); 1003 return err; 1004 } 1005 1006 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port) 1007 { 1008 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; 1009 u16 vid; 1010 int err; 1011 1012 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 1013 if (err) 1014 return err; 1015 1016 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { 1017 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, 1018 vid, vid); 1019 if (err) 1020 return err; 1021 } 1022 1023 return 0; 1024 } 1025 1026 static struct mlxsw_sp_port * 1027 mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1028 { 1029 struct mlxsw_sp_port *mlxsw_sp_vport; 1030 1031 mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL); 1032 if (!mlxsw_sp_vport) 1033 return NULL; 1034 1035 /* dev will be set correctly after the VLAN device is linked 1036 * with the real device. In case of bridge SELF invocation, dev 1037 * will remain as is. 1038 */ 1039 mlxsw_sp_vport->dev = mlxsw_sp_port->dev; 1040 mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1041 mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port; 1042 mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING; 1043 mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged; 1044 mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id; 1045 mlxsw_sp_vport->vport.vid = vid; 1046 1047 list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list); 1048 1049 return mlxsw_sp_vport; 1050 } 1051 1052 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport) 1053 { 1054 list_del(&mlxsw_sp_vport->vport.list); 1055 kfree(mlxsw_sp_vport); 1056 } 1057 1058 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1059 __be16 __always_unused proto, u16 vid) 1060 { 1061 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1062 struct mlxsw_sp_port *mlxsw_sp_vport; 1063 bool untagged = vid == 1; 1064 int err; 1065 1066 /* VLAN 0 is added to HW filter when device goes up, but it is 1067 * reserved in our case, so simply return. 1068 */ 1069 if (!vid) 1070 return 0; 1071 1072 if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) 1073 return 0; 1074 1075 mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid); 1076 if (!mlxsw_sp_vport) 1077 return -ENOMEM; 1078 1079 /* When adding the first VLAN interface on a bridged port we need to 1080 * transition all the active 802.1Q bridge VLANs to use explicit 1081 * {Port, VID} to FID mappings and set the port's mode to Virtual mode. 1082 */ 1083 if (list_is_singular(&mlxsw_sp_port->vports_list)) { 1084 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port); 1085 if (err) 1086 goto err_port_vp_mode_trans; 1087 } 1088 1089 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged); 1090 if (err) 1091 goto err_port_add_vid; 1092 1093 return 0; 1094 1095 err_port_add_vid: 1096 if (list_is_singular(&mlxsw_sp_port->vports_list)) 1097 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); 1098 err_port_vp_mode_trans: 1099 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); 1100 return err; 1101 } 1102 1103 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1104 __be16 __always_unused proto, u16 vid) 1105 { 1106 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1107 struct mlxsw_sp_port *mlxsw_sp_vport; 1108 struct mlxsw_sp_fid *f; 1109 1110 /* VLAN 0 is removed from HW filter when device goes down, but 1111 * it is reserved in our case, so simply return. 1112 */ 1113 if (!vid) 1114 return 0; 1115 1116 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 1117 if (WARN_ON(!mlxsw_sp_vport)) 1118 return 0; 1119 1120 mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); 1121 1122 /* Drop FID reference. If this was the last reference the 1123 * resources will be freed. 1124 */ 1125 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 1126 if (f && !WARN_ON(!f->leave)) 1127 f->leave(mlxsw_sp_vport); 1128 1129 /* When removing the last VLAN interface on a bridged port we need to 1130 * transition all active 802.1Q bridge VLANs to use VID to FID 1131 * mappings and set port's mode to VLAN mode. 1132 */ 1133 if (list_is_singular(&mlxsw_sp_port->vports_list)) 1134 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); 1135 1136 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); 1137 1138 return 0; 1139 } 1140 1141 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name, 1142 size_t len) 1143 { 1144 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1145 u8 module = mlxsw_sp_port->mapping.module; 1146 u8 width = mlxsw_sp_port->mapping.width; 1147 u8 lane = mlxsw_sp_port->mapping.lane; 1148 int err; 1149 1150 if (!mlxsw_sp_port->split) 1151 err = snprintf(name, len, "p%d", module + 1); 1152 else 1153 err = snprintf(name, len, "p%ds%d", module + 1, 1154 lane / width); 1155 1156 if (err >= len) 1157 return -EINVAL; 1158 1159 return 0; 1160 } 1161 1162 static struct mlxsw_sp_port_mall_tc_entry * 1163 mlxsw_sp_port_mirror_entry_find(struct mlxsw_sp_port *port, 1164 unsigned long cookie) { 1165 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1166 1167 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list) 1168 if (mall_tc_entry->cookie == cookie) 1169 return mall_tc_entry; 1170 1171 return NULL; 1172 } 1173 1174 static int 1175 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1176 struct tc_cls_matchall_offload *cls, 1177 const struct tc_action *a, 1178 bool ingress) 1179 { 1180 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1181 struct net *net = dev_net(mlxsw_sp_port->dev); 1182 enum mlxsw_sp_span_type span_type; 1183 struct mlxsw_sp_port *to_port; 1184 struct net_device *to_dev; 1185 int ifindex; 1186 int err; 1187 1188 ifindex = tcf_mirred_ifindex(a); 1189 to_dev = __dev_get_by_index(net, ifindex); 1190 if (!to_dev) { 1191 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); 1192 return -EINVAL; 1193 } 1194 1195 if (!mlxsw_sp_port_dev_check(to_dev)) { 1196 netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port"); 1197 return -ENOTSUPP; 1198 } 1199 to_port = netdev_priv(to_dev); 1200 1201 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1202 if (!mall_tc_entry) 1203 return -ENOMEM; 1204 1205 mall_tc_entry->cookie = cls->cookie; 1206 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; 1207 mall_tc_entry->mirror.to_local_port = to_port->local_port; 1208 mall_tc_entry->mirror.ingress = ingress; 1209 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); 1210 1211 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1212 err = mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type); 1213 if (err) 1214 goto err_mirror_add; 1215 return 0; 1216 1217 err_mirror_add: 1218 list_del(&mall_tc_entry->list); 1219 kfree(mall_tc_entry); 1220 return err; 1221 } 1222 1223 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1224 __be16 protocol, 1225 struct tc_cls_matchall_offload *cls, 1226 bool ingress) 1227 { 1228 const struct tc_action *a; 1229 LIST_HEAD(actions); 1230 int err; 1231 1232 if (!tc_single_action(cls->exts)) { 1233 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); 1234 return -ENOTSUPP; 1235 } 1236 1237 tcf_exts_to_list(cls->exts, &actions); 1238 list_for_each_entry(a, &actions, list) { 1239 if (!is_tcf_mirred_egress_mirror(a) || 1240 protocol != htons(ETH_P_ALL)) { 1241 return -ENOTSUPP; 1242 } 1243 1244 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, cls, 1245 a, ingress); 1246 if (err) 1247 return err; 1248 } 1249 1250 return 0; 1251 } 1252 1253 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1254 struct tc_cls_matchall_offload *cls) 1255 { 1256 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1257 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1258 enum mlxsw_sp_span_type span_type; 1259 struct mlxsw_sp_port *to_port; 1260 1261 mall_tc_entry = mlxsw_sp_port_mirror_entry_find(mlxsw_sp_port, 1262 cls->cookie); 1263 if (!mall_tc_entry) { 1264 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); 1265 return; 1266 } 1267 1268 switch (mall_tc_entry->type) { 1269 case MLXSW_SP_PORT_MALL_MIRROR: 1270 to_port = mlxsw_sp->ports[mall_tc_entry->mirror.to_local_port]; 1271 span_type = mall_tc_entry->mirror.ingress ? 1272 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1273 1274 mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type); 1275 break; 1276 default: 1277 WARN_ON(1); 1278 } 1279 1280 list_del(&mall_tc_entry->list); 1281 kfree(mall_tc_entry); 1282 } 1283 1284 static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle, 1285 __be16 proto, struct tc_to_netdev *tc) 1286 { 1287 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1288 bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS); 1289 1290 if (tc->type == TC_SETUP_MATCHALL) { 1291 switch (tc->cls_mall->command) { 1292 case TC_CLSMATCHALL_REPLACE: 1293 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, 1294 proto, 1295 tc->cls_mall, 1296 ingress); 1297 case TC_CLSMATCHALL_DESTROY: 1298 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, 1299 tc->cls_mall); 1300 return 0; 1301 default: 1302 return -EINVAL; 1303 } 1304 } 1305 1306 return -ENOTSUPP; 1307 } 1308 1309 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1310 .ndo_open = mlxsw_sp_port_open, 1311 .ndo_stop = mlxsw_sp_port_stop, 1312 .ndo_start_xmit = mlxsw_sp_port_xmit, 1313 .ndo_setup_tc = mlxsw_sp_setup_tc, 1314 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1315 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1316 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1317 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1318 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1319 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1320 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1321 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1322 .ndo_neigh_construct = mlxsw_sp_router_neigh_construct, 1323 .ndo_neigh_destroy = mlxsw_sp_router_neigh_destroy, 1324 .ndo_fdb_add = switchdev_port_fdb_add, 1325 .ndo_fdb_del = switchdev_port_fdb_del, 1326 .ndo_fdb_dump = switchdev_port_fdb_dump, 1327 .ndo_bridge_setlink = switchdev_port_bridge_setlink, 1328 .ndo_bridge_getlink = switchdev_port_bridge_getlink, 1329 .ndo_bridge_dellink = switchdev_port_bridge_dellink, 1330 .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name, 1331 }; 1332 1333 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 1334 struct ethtool_drvinfo *drvinfo) 1335 { 1336 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1337 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1338 1339 strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver)); 1340 strlcpy(drvinfo->version, mlxsw_sp_driver_version, 1341 sizeof(drvinfo->version)); 1342 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 1343 "%d.%d.%d", 1344 mlxsw_sp->bus_info->fw_rev.major, 1345 mlxsw_sp->bus_info->fw_rev.minor, 1346 mlxsw_sp->bus_info->fw_rev.subminor); 1347 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 1348 sizeof(drvinfo->bus_info)); 1349 } 1350 1351 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, 1352 struct ethtool_pauseparam *pause) 1353 { 1354 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1355 1356 pause->rx_pause = mlxsw_sp_port->link.rx_pause; 1357 pause->tx_pause = mlxsw_sp_port->link.tx_pause; 1358 } 1359 1360 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, 1361 struct ethtool_pauseparam *pause) 1362 { 1363 char pfcc_pl[MLXSW_REG_PFCC_LEN]; 1364 1365 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); 1366 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause); 1367 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause); 1368 1369 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), 1370 pfcc_pl); 1371 } 1372 1373 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, 1374 struct ethtool_pauseparam *pause) 1375 { 1376 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1377 bool pause_en = pause->tx_pause || pause->rx_pause; 1378 int err; 1379 1380 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { 1381 netdev_err(dev, "PFC already enabled on port\n"); 1382 return -EINVAL; 1383 } 1384 1385 if (pause->autoneg) { 1386 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); 1387 return -EINVAL; 1388 } 1389 1390 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1391 if (err) { 1392 netdev_err(dev, "Failed to configure port's headroom\n"); 1393 return err; 1394 } 1395 1396 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause); 1397 if (err) { 1398 netdev_err(dev, "Failed to set PAUSE parameters\n"); 1399 goto err_port_pause_configure; 1400 } 1401 1402 mlxsw_sp_port->link.rx_pause = pause->rx_pause; 1403 mlxsw_sp_port->link.tx_pause = pause->tx_pause; 1404 1405 return 0; 1406 1407 err_port_pause_configure: 1408 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 1409 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1410 return err; 1411 } 1412 1413 struct mlxsw_sp_port_hw_stats { 1414 char str[ETH_GSTRING_LEN]; 1415 u64 (*getter)(const char *payload); 1416 }; 1417 1418 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 1419 { 1420 .str = "a_frames_transmitted_ok", 1421 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 1422 }, 1423 { 1424 .str = "a_frames_received_ok", 1425 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 1426 }, 1427 { 1428 .str = "a_frame_check_sequence_errors", 1429 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 1430 }, 1431 { 1432 .str = "a_alignment_errors", 1433 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 1434 }, 1435 { 1436 .str = "a_octets_transmitted_ok", 1437 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 1438 }, 1439 { 1440 .str = "a_octets_received_ok", 1441 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 1442 }, 1443 { 1444 .str = "a_multicast_frames_xmitted_ok", 1445 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 1446 }, 1447 { 1448 .str = "a_broadcast_frames_xmitted_ok", 1449 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 1450 }, 1451 { 1452 .str = "a_multicast_frames_received_ok", 1453 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 1454 }, 1455 { 1456 .str = "a_broadcast_frames_received_ok", 1457 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 1458 }, 1459 { 1460 .str = "a_in_range_length_errors", 1461 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 1462 }, 1463 { 1464 .str = "a_out_of_range_length_field", 1465 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 1466 }, 1467 { 1468 .str = "a_frame_too_long_errors", 1469 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 1470 }, 1471 { 1472 .str = "a_symbol_error_during_carrier", 1473 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 1474 }, 1475 { 1476 .str = "a_mac_control_frames_transmitted", 1477 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 1478 }, 1479 { 1480 .str = "a_mac_control_frames_received", 1481 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 1482 }, 1483 { 1484 .str = "a_unsupported_opcodes_received", 1485 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 1486 }, 1487 { 1488 .str = "a_pause_mac_ctrl_frames_received", 1489 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 1490 }, 1491 { 1492 .str = "a_pause_mac_ctrl_frames_xmitted", 1493 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 1494 }, 1495 }; 1496 1497 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 1498 1499 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { 1500 { 1501 .str = "rx_octets_prio", 1502 .getter = mlxsw_reg_ppcnt_rx_octets_get, 1503 }, 1504 { 1505 .str = "rx_frames_prio", 1506 .getter = mlxsw_reg_ppcnt_rx_frames_get, 1507 }, 1508 { 1509 .str = "tx_octets_prio", 1510 .getter = mlxsw_reg_ppcnt_tx_octets_get, 1511 }, 1512 { 1513 .str = "tx_frames_prio", 1514 .getter = mlxsw_reg_ppcnt_tx_frames_get, 1515 }, 1516 { 1517 .str = "rx_pause_prio", 1518 .getter = mlxsw_reg_ppcnt_rx_pause_get, 1519 }, 1520 { 1521 .str = "rx_pause_duration_prio", 1522 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get, 1523 }, 1524 { 1525 .str = "tx_pause_prio", 1526 .getter = mlxsw_reg_ppcnt_tx_pause_get, 1527 }, 1528 { 1529 .str = "tx_pause_duration_prio", 1530 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get, 1531 }, 1532 }; 1533 1534 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) 1535 1536 static u64 mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get(const char *ppcnt_pl) 1537 { 1538 u64 transmit_queue = mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 1539 1540 return MLXSW_SP_CELLS_TO_BYTES(transmit_queue); 1541 } 1542 1543 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { 1544 { 1545 .str = "tc_transmit_queue_tc", 1546 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get, 1547 }, 1548 { 1549 .str = "tc_no_buffer_discard_uc_tc", 1550 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get, 1551 }, 1552 }; 1553 1554 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) 1555 1556 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ 1557 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \ 1558 MLXSW_SP_PORT_HW_TC_STATS_LEN) * \ 1559 IEEE_8021QAZ_MAX_TCS) 1560 1561 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) 1562 { 1563 int i; 1564 1565 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { 1566 snprintf(*p, ETH_GSTRING_LEN, "%s_%d", 1567 mlxsw_sp_port_hw_prio_stats[i].str, prio); 1568 *p += ETH_GSTRING_LEN; 1569 } 1570 } 1571 1572 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) 1573 { 1574 int i; 1575 1576 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { 1577 snprintf(*p, ETH_GSTRING_LEN, "%s_%d", 1578 mlxsw_sp_port_hw_tc_stats[i].str, tc); 1579 *p += ETH_GSTRING_LEN; 1580 } 1581 } 1582 1583 static void mlxsw_sp_port_get_strings(struct net_device *dev, 1584 u32 stringset, u8 *data) 1585 { 1586 u8 *p = data; 1587 int i; 1588 1589 switch (stringset) { 1590 case ETH_SS_STATS: 1591 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 1592 memcpy(p, mlxsw_sp_port_hw_stats[i].str, 1593 ETH_GSTRING_LEN); 1594 p += ETH_GSTRING_LEN; 1595 } 1596 1597 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 1598 mlxsw_sp_port_get_prio_strings(&p, i); 1599 1600 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 1601 mlxsw_sp_port_get_tc_strings(&p, i); 1602 1603 break; 1604 } 1605 } 1606 1607 static int mlxsw_sp_port_set_phys_id(struct net_device *dev, 1608 enum ethtool_phys_id_state state) 1609 { 1610 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1611 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1612 char mlcr_pl[MLXSW_REG_MLCR_LEN]; 1613 bool active; 1614 1615 switch (state) { 1616 case ETHTOOL_ID_ACTIVE: 1617 active = true; 1618 break; 1619 case ETHTOOL_ID_INACTIVE: 1620 active = false; 1621 break; 1622 default: 1623 return -EOPNOTSUPP; 1624 } 1625 1626 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); 1627 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); 1628 } 1629 1630 static int 1631 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, 1632 int *p_len, enum mlxsw_reg_ppcnt_grp grp) 1633 { 1634 switch (grp) { 1635 case MLXSW_REG_PPCNT_IEEE_8023_CNT: 1636 *p_hw_stats = mlxsw_sp_port_hw_stats; 1637 *p_len = MLXSW_SP_PORT_HW_STATS_LEN; 1638 break; 1639 case MLXSW_REG_PPCNT_PRIO_CNT: 1640 *p_hw_stats = mlxsw_sp_port_hw_prio_stats; 1641 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 1642 break; 1643 case MLXSW_REG_PPCNT_TC_CNT: 1644 *p_hw_stats = mlxsw_sp_port_hw_tc_stats; 1645 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN; 1646 break; 1647 default: 1648 WARN_ON(1); 1649 return -ENOTSUPP; 1650 } 1651 return 0; 1652 } 1653 1654 static void __mlxsw_sp_port_get_stats(struct net_device *dev, 1655 enum mlxsw_reg_ppcnt_grp grp, int prio, 1656 u64 *data, int data_index) 1657 { 1658 struct mlxsw_sp_port_hw_stats *hw_stats; 1659 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1660 int i, len; 1661 int err; 1662 1663 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); 1664 if (err) 1665 return; 1666 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl); 1667 for (i = 0; i < len; i++) 1668 data[data_index + i] = hw_stats[i].getter(ppcnt_pl); 1669 } 1670 1671 static void mlxsw_sp_port_get_stats(struct net_device *dev, 1672 struct ethtool_stats *stats, u64 *data) 1673 { 1674 int i, data_index = 0; 1675 1676 /* IEEE 802.3 Counters */ 1677 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0, 1678 data, data_index); 1679 data_index = MLXSW_SP_PORT_HW_STATS_LEN; 1680 1681 /* Per-Priority Counters */ 1682 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1683 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, 1684 data, data_index); 1685 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 1686 } 1687 1688 /* Per-TC Counters */ 1689 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1690 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, 1691 data, data_index); 1692 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; 1693 } 1694 } 1695 1696 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 1697 { 1698 switch (sset) { 1699 case ETH_SS_STATS: 1700 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN; 1701 default: 1702 return -EOPNOTSUPP; 1703 } 1704 } 1705 1706 struct mlxsw_sp_port_link_mode { 1707 enum ethtool_link_mode_bit_indices mask_ethtool; 1708 u32 mask; 1709 u32 speed; 1710 }; 1711 1712 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = { 1713 { 1714 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 1715 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT, 1716 .speed = SPEED_100, 1717 }, 1718 { 1719 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 1720 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 1721 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 1722 .speed = SPEED_1000, 1723 }, 1724 { 1725 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 1726 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 1727 .speed = SPEED_10000, 1728 }, 1729 { 1730 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 1731 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 1732 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 1733 .speed = SPEED_10000, 1734 }, 1735 { 1736 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 1737 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 1738 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 1739 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 1740 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 1741 .speed = SPEED_10000, 1742 }, 1743 { 1744 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 1745 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 1746 .speed = SPEED_20000, 1747 }, 1748 { 1749 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 1750 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 1751 .speed = SPEED_40000, 1752 }, 1753 { 1754 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 1755 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 1756 .speed = SPEED_40000, 1757 }, 1758 { 1759 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 1760 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 1761 .speed = SPEED_40000, 1762 }, 1763 { 1764 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 1765 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 1766 .speed = SPEED_40000, 1767 }, 1768 { 1769 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR, 1770 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 1771 .speed = SPEED_25000, 1772 }, 1773 { 1774 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR, 1775 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 1776 .speed = SPEED_25000, 1777 }, 1778 { 1779 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 1780 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 1781 .speed = SPEED_25000, 1782 }, 1783 { 1784 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 1785 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 1786 .speed = SPEED_25000, 1787 }, 1788 { 1789 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2, 1790 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 1791 .speed = SPEED_50000, 1792 }, 1793 { 1794 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 1795 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 1796 .speed = SPEED_50000, 1797 }, 1798 { 1799 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2, 1800 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 1801 .speed = SPEED_50000, 1802 }, 1803 { 1804 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 1805 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT, 1806 .speed = SPEED_56000, 1807 }, 1808 { 1809 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 1810 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT, 1811 .speed = SPEED_56000, 1812 }, 1813 { 1814 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 1815 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT, 1816 .speed = SPEED_56000, 1817 }, 1818 { 1819 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 1820 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, 1821 .speed = SPEED_56000, 1822 }, 1823 { 1824 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4, 1825 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 1826 .speed = SPEED_100000, 1827 }, 1828 { 1829 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4, 1830 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 1831 .speed = SPEED_100000, 1832 }, 1833 { 1834 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4, 1835 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 1836 .speed = SPEED_100000, 1837 }, 1838 { 1839 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 1840 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 1841 .speed = SPEED_100000, 1842 }, 1843 }; 1844 1845 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode) 1846 1847 static void 1848 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto, 1849 struct ethtool_link_ksettings *cmd) 1850 { 1851 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 1852 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 1853 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 1854 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 1855 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 1856 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 1857 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 1858 1859 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 1860 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 1861 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 1862 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 1863 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 1864 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 1865 } 1866 1867 static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode) 1868 { 1869 int i; 1870 1871 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1872 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) 1873 __set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool, 1874 mode); 1875 } 1876 } 1877 1878 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto, 1879 struct ethtool_link_ksettings *cmd) 1880 { 1881 u32 speed = SPEED_UNKNOWN; 1882 u8 duplex = DUPLEX_UNKNOWN; 1883 int i; 1884 1885 if (!carrier_ok) 1886 goto out; 1887 1888 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1889 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) { 1890 speed = mlxsw_sp_port_link_mode[i].speed; 1891 duplex = DUPLEX_FULL; 1892 break; 1893 } 1894 } 1895 out: 1896 cmd->base.speed = speed; 1897 cmd->base.duplex = duplex; 1898 } 1899 1900 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto) 1901 { 1902 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 1903 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 1904 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 1905 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 1906 return PORT_FIBRE; 1907 1908 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 1909 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 1910 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4)) 1911 return PORT_DA; 1912 1913 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 1914 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 1915 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 1916 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4)) 1917 return PORT_NONE; 1918 1919 return PORT_OTHER; 1920 } 1921 1922 static u32 1923 mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd) 1924 { 1925 u32 ptys_proto = 0; 1926 int i; 1927 1928 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1929 if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool, 1930 cmd->link_modes.advertising)) 1931 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 1932 } 1933 return ptys_proto; 1934 } 1935 1936 static u32 mlxsw_sp_to_ptys_speed(u32 speed) 1937 { 1938 u32 ptys_proto = 0; 1939 int i; 1940 1941 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1942 if (speed == mlxsw_sp_port_link_mode[i].speed) 1943 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 1944 } 1945 return ptys_proto; 1946 } 1947 1948 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed) 1949 { 1950 u32 ptys_proto = 0; 1951 int i; 1952 1953 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1954 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed) 1955 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 1956 } 1957 return ptys_proto; 1958 } 1959 1960 static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap, 1961 struct ethtool_link_ksettings *cmd) 1962 { 1963 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); 1964 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); 1965 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); 1966 1967 mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd); 1968 mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported); 1969 } 1970 1971 static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg, 1972 struct ethtool_link_ksettings *cmd) 1973 { 1974 if (!autoneg) 1975 return; 1976 1977 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); 1978 mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising); 1979 } 1980 1981 static void 1982 mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status, 1983 struct ethtool_link_ksettings *cmd) 1984 { 1985 if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp) 1986 return; 1987 1988 ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg); 1989 mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising); 1990 } 1991 1992 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev, 1993 struct ethtool_link_ksettings *cmd) 1994 { 1995 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp; 1996 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1997 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1998 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1999 u8 autoneg_status; 2000 bool autoneg; 2001 int err; 2002 2003 autoneg = mlxsw_sp_port->link.autoneg; 2004 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0); 2005 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2006 if (err) 2007 return err; 2008 mlxsw_reg_ptys_unpack(ptys_pl, ð_proto_cap, ð_proto_admin, 2009 ð_proto_oper); 2010 2011 mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd); 2012 2013 mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd); 2014 2015 eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl); 2016 autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl); 2017 mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd); 2018 2019 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 2020 cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper); 2021 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper, 2022 cmd); 2023 2024 return 0; 2025 } 2026 2027 static int 2028 mlxsw_sp_port_set_link_ksettings(struct net_device *dev, 2029 const struct ethtool_link_ksettings *cmd) 2030 { 2031 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2032 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2033 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2034 u32 eth_proto_cap, eth_proto_new; 2035 bool autoneg; 2036 int err; 2037 2038 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0); 2039 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2040 if (err) 2041 return err; 2042 mlxsw_reg_ptys_unpack(ptys_pl, ð_proto_cap, NULL, NULL); 2043 2044 autoneg = cmd->base.autoneg == AUTONEG_ENABLE; 2045 eth_proto_new = autoneg ? 2046 mlxsw_sp_to_ptys_advert_link(cmd) : 2047 mlxsw_sp_to_ptys_speed(cmd->base.speed); 2048 2049 eth_proto_new = eth_proto_new & eth_proto_cap; 2050 if (!eth_proto_new) { 2051 netdev_err(dev, "No supported speed requested\n"); 2052 return -EINVAL; 2053 } 2054 2055 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new); 2056 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2057 if (err) 2058 return err; 2059 2060 if (!netif_running(dev)) 2061 return 0; 2062 2063 mlxsw_sp_port->link.autoneg = autoneg; 2064 2065 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 2066 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 2067 2068 return 0; 2069 } 2070 2071 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 2072 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 2073 .get_link = ethtool_op_get_link, 2074 .get_pauseparam = mlxsw_sp_port_get_pauseparam, 2075 .set_pauseparam = mlxsw_sp_port_set_pauseparam, 2076 .get_strings = mlxsw_sp_port_get_strings, 2077 .set_phys_id = mlxsw_sp_port_set_phys_id, 2078 .get_ethtool_stats = mlxsw_sp_port_get_stats, 2079 .get_sset_count = mlxsw_sp_port_get_sset_count, 2080 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings, 2081 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings, 2082 }; 2083 2084 static int 2085 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width) 2086 { 2087 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2088 u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width; 2089 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2090 u32 eth_proto_admin; 2091 2092 eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed); 2093 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 2094 eth_proto_admin); 2095 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2096 } 2097 2098 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 2099 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 2100 bool dwrr, u8 dwrr_weight) 2101 { 2102 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2103 char qeec_pl[MLXSW_REG_QEEC_LEN]; 2104 2105 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 2106 next_index); 2107 mlxsw_reg_qeec_de_set(qeec_pl, true); 2108 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 2109 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 2110 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 2111 } 2112 2113 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 2114 enum mlxsw_reg_qeec_hr hr, u8 index, 2115 u8 next_index, u32 maxrate) 2116 { 2117 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2118 char qeec_pl[MLXSW_REG_QEEC_LEN]; 2119 2120 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 2121 next_index); 2122 mlxsw_reg_qeec_mase_set(qeec_pl, true); 2123 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 2124 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 2125 } 2126 2127 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 2128 u8 switch_prio, u8 tclass) 2129 { 2130 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2131 char qtct_pl[MLXSW_REG_QTCT_LEN]; 2132 2133 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 2134 tclass); 2135 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 2136 } 2137 2138 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 2139 { 2140 int err, i; 2141 2142 /* Setup the elements hierarcy, so that each TC is linked to 2143 * one subgroup, which are all member in the same group. 2144 */ 2145 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2146 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false, 2147 0); 2148 if (err) 2149 return err; 2150 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2151 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2152 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i, 2153 0, false, 0); 2154 if (err) 2155 return err; 2156 } 2157 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2158 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2159 MLXSW_REG_QEEC_HIERARCY_TC, i, i, 2160 false, 0); 2161 if (err) 2162 return err; 2163 } 2164 2165 /* Make sure the max shaper is disabled in all hierarcies that 2166 * support it. 2167 */ 2168 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2169 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0, 2170 MLXSW_REG_QEEC_MAS_DIS); 2171 if (err) 2172 return err; 2173 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2174 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2175 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, 2176 i, 0, 2177 MLXSW_REG_QEEC_MAS_DIS); 2178 if (err) 2179 return err; 2180 } 2181 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2182 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2183 MLXSW_REG_QEEC_HIERARCY_TC, 2184 i, i, 2185 MLXSW_REG_QEEC_MAS_DIS); 2186 if (err) 2187 return err; 2188 } 2189 2190 /* Map all priorities to traffic class 0. */ 2191 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2192 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 2193 if (err) 2194 return err; 2195 } 2196 2197 return 0; 2198 } 2199 2200 static int mlxsw_sp_port_pvid_vport_create(struct mlxsw_sp_port *mlxsw_sp_port) 2201 { 2202 mlxsw_sp_port->pvid = 1; 2203 2204 return mlxsw_sp_port_add_vid(mlxsw_sp_port->dev, 0, 1); 2205 } 2206 2207 static int mlxsw_sp_port_pvid_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_port) 2208 { 2209 return mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1); 2210 } 2211 2212 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 2213 bool split, u8 module, u8 width, u8 lane) 2214 { 2215 struct mlxsw_sp_port *mlxsw_sp_port; 2216 struct net_device *dev; 2217 size_t bytes; 2218 int err; 2219 2220 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 2221 if (!dev) 2222 return -ENOMEM; 2223 mlxsw_sp_port = netdev_priv(dev); 2224 mlxsw_sp_port->dev = dev; 2225 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 2226 mlxsw_sp_port->local_port = local_port; 2227 mlxsw_sp_port->split = split; 2228 mlxsw_sp_port->mapping.module = module; 2229 mlxsw_sp_port->mapping.width = width; 2230 mlxsw_sp_port->mapping.lane = lane; 2231 mlxsw_sp_port->link.autoneg = 1; 2232 bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE); 2233 mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL); 2234 if (!mlxsw_sp_port->active_vlans) { 2235 err = -ENOMEM; 2236 goto err_port_active_vlans_alloc; 2237 } 2238 mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL); 2239 if (!mlxsw_sp_port->untagged_vlans) { 2240 err = -ENOMEM; 2241 goto err_port_untagged_vlans_alloc; 2242 } 2243 INIT_LIST_HEAD(&mlxsw_sp_port->vports_list); 2244 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list); 2245 2246 mlxsw_sp_port->pcpu_stats = 2247 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 2248 if (!mlxsw_sp_port->pcpu_stats) { 2249 err = -ENOMEM; 2250 goto err_alloc_stats; 2251 } 2252 2253 mlxsw_sp_port->hw_stats.cache = 2254 kzalloc(sizeof(*mlxsw_sp_port->hw_stats.cache), GFP_KERNEL); 2255 2256 if (!mlxsw_sp_port->hw_stats.cache) { 2257 err = -ENOMEM; 2258 goto err_alloc_hw_stats; 2259 } 2260 INIT_DELAYED_WORK(&mlxsw_sp_port->hw_stats.update_dw, 2261 &update_stats_cache); 2262 2263 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 2264 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 2265 2266 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 2267 if (err) { 2268 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 2269 mlxsw_sp_port->local_port); 2270 goto err_port_swid_set; 2271 } 2272 2273 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 2274 if (err) { 2275 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 2276 mlxsw_sp_port->local_port); 2277 goto err_dev_addr_init; 2278 } 2279 2280 netif_carrier_off(dev); 2281 2282 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 2283 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 2284 dev->hw_features |= NETIF_F_HW_TC; 2285 2286 dev->min_mtu = 0; 2287 dev->max_mtu = ETH_MAX_MTU; 2288 2289 /* Each packet needs to have a Tx header (metadata) on top all other 2290 * headers. 2291 */ 2292 dev->needed_headroom = MLXSW_TXHDR_LEN; 2293 2294 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 2295 if (err) { 2296 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 2297 mlxsw_sp_port->local_port); 2298 goto err_port_system_port_mapping_set; 2299 } 2300 2301 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width); 2302 if (err) { 2303 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 2304 mlxsw_sp_port->local_port); 2305 goto err_port_speed_by_width_set; 2306 } 2307 2308 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 2309 if (err) { 2310 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 2311 mlxsw_sp_port->local_port); 2312 goto err_port_mtu_set; 2313 } 2314 2315 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 2316 if (err) 2317 goto err_port_admin_status_set; 2318 2319 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 2320 if (err) { 2321 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 2322 mlxsw_sp_port->local_port); 2323 goto err_port_buffers_init; 2324 } 2325 2326 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 2327 if (err) { 2328 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 2329 mlxsw_sp_port->local_port); 2330 goto err_port_ets_init; 2331 } 2332 2333 /* ETS and buffers must be initialized before DCB. */ 2334 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 2335 if (err) { 2336 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 2337 mlxsw_sp_port->local_port); 2338 goto err_port_dcb_init; 2339 } 2340 2341 err = mlxsw_sp_port_pvid_vport_create(mlxsw_sp_port); 2342 if (err) { 2343 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create PVID vPort\n", 2344 mlxsw_sp_port->local_port); 2345 goto err_port_pvid_vport_create; 2346 } 2347 2348 mlxsw_sp_port_switchdev_init(mlxsw_sp_port); 2349 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 2350 err = register_netdev(dev); 2351 if (err) { 2352 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 2353 mlxsw_sp_port->local_port); 2354 goto err_register_netdev; 2355 } 2356 2357 err = mlxsw_core_port_init(mlxsw_sp->core, &mlxsw_sp_port->core_port, 2358 mlxsw_sp_port->local_port, dev, 2359 mlxsw_sp_port->split, module); 2360 if (err) { 2361 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 2362 mlxsw_sp_port->local_port); 2363 goto err_core_port_init; 2364 } 2365 2366 mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, 0); 2367 return 0; 2368 2369 err_core_port_init: 2370 unregister_netdev(dev); 2371 err_register_netdev: 2372 mlxsw_sp->ports[local_port] = NULL; 2373 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 2374 mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port); 2375 err_port_pvid_vport_create: 2376 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 2377 err_port_dcb_init: 2378 err_port_ets_init: 2379 err_port_buffers_init: 2380 err_port_admin_status_set: 2381 err_port_mtu_set: 2382 err_port_speed_by_width_set: 2383 err_port_system_port_mapping_set: 2384 err_dev_addr_init: 2385 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 2386 err_port_swid_set: 2387 kfree(mlxsw_sp_port->hw_stats.cache); 2388 err_alloc_hw_stats: 2389 free_percpu(mlxsw_sp_port->pcpu_stats); 2390 err_alloc_stats: 2391 kfree(mlxsw_sp_port->untagged_vlans); 2392 err_port_untagged_vlans_alloc: 2393 kfree(mlxsw_sp_port->active_vlans); 2394 err_port_active_vlans_alloc: 2395 free_netdev(dev); 2396 return err; 2397 } 2398 2399 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 2400 { 2401 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2402 2403 if (!mlxsw_sp_port) 2404 return; 2405 cancel_delayed_work_sync(&mlxsw_sp_port->hw_stats.update_dw); 2406 mlxsw_core_port_fini(&mlxsw_sp_port->core_port); 2407 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 2408 mlxsw_sp->ports[local_port] = NULL; 2409 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 2410 mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port); 2411 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 2412 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 2413 mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port); 2414 free_percpu(mlxsw_sp_port->pcpu_stats); 2415 kfree(mlxsw_sp_port->hw_stats.cache); 2416 kfree(mlxsw_sp_port->untagged_vlans); 2417 kfree(mlxsw_sp_port->active_vlans); 2418 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list)); 2419 free_netdev(mlxsw_sp_port->dev); 2420 } 2421 2422 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 2423 { 2424 int i; 2425 2426 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) 2427 mlxsw_sp_port_remove(mlxsw_sp, i); 2428 kfree(mlxsw_sp->ports); 2429 } 2430 2431 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 2432 { 2433 u8 module, width, lane; 2434 size_t alloc_size; 2435 int i; 2436 int err; 2437 2438 alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS; 2439 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 2440 if (!mlxsw_sp->ports) 2441 return -ENOMEM; 2442 2443 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) { 2444 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, 2445 &width, &lane); 2446 if (err) 2447 goto err_port_module_info_get; 2448 if (!width) 2449 continue; 2450 mlxsw_sp->port_to_module[i] = module; 2451 err = mlxsw_sp_port_create(mlxsw_sp, i, false, module, width, 2452 lane); 2453 if (err) 2454 goto err_port_create; 2455 } 2456 return 0; 2457 2458 err_port_create: 2459 err_port_module_info_get: 2460 for (i--; i >= 1; i--) 2461 mlxsw_sp_port_remove(mlxsw_sp, i); 2462 kfree(mlxsw_sp->ports); 2463 return err; 2464 } 2465 2466 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port) 2467 { 2468 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX; 2469 2470 return local_port - offset; 2471 } 2472 2473 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 2474 u8 module, unsigned int count) 2475 { 2476 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; 2477 int err, i; 2478 2479 for (i = 0; i < count; i++) { 2480 err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module, 2481 width, i * width); 2482 if (err) 2483 goto err_port_module_map; 2484 } 2485 2486 for (i = 0; i < count; i++) { 2487 err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0); 2488 if (err) 2489 goto err_port_swid_set; 2490 } 2491 2492 for (i = 0; i < count; i++) { 2493 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true, 2494 module, width, i * width); 2495 if (err) 2496 goto err_port_create; 2497 } 2498 2499 return 0; 2500 2501 err_port_create: 2502 for (i--; i >= 0; i--) 2503 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 2504 i = count; 2505 err_port_swid_set: 2506 for (i--; i >= 0; i--) 2507 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 2508 MLXSW_PORT_SWID_DISABLED_PORT); 2509 i = count; 2510 err_port_module_map: 2511 for (i--; i >= 0; i--) 2512 mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i); 2513 return err; 2514 } 2515 2516 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 2517 u8 base_port, unsigned int count) 2518 { 2519 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH; 2520 int i; 2521 2522 /* Split by four means we need to re-create two ports, otherwise 2523 * only one. 2524 */ 2525 count = count / 2; 2526 2527 for (i = 0; i < count; i++) { 2528 local_port = base_port + i * 2; 2529 module = mlxsw_sp->port_to_module[local_port]; 2530 2531 mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width, 2532 0); 2533 } 2534 2535 for (i = 0; i < count; i++) 2536 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0); 2537 2538 for (i = 0; i < count; i++) { 2539 local_port = base_port + i * 2; 2540 module = mlxsw_sp->port_to_module[local_port]; 2541 2542 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module, 2543 width, 0); 2544 } 2545 } 2546 2547 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 2548 unsigned int count) 2549 { 2550 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2551 struct mlxsw_sp_port *mlxsw_sp_port; 2552 u8 module, cur_width, base_port; 2553 int i; 2554 int err; 2555 2556 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2557 if (!mlxsw_sp_port) { 2558 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 2559 local_port); 2560 return -EINVAL; 2561 } 2562 2563 module = mlxsw_sp_port->mapping.module; 2564 cur_width = mlxsw_sp_port->mapping.width; 2565 2566 if (count != 2 && count != 4) { 2567 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); 2568 return -EINVAL; 2569 } 2570 2571 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { 2572 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); 2573 return -EINVAL; 2574 } 2575 2576 /* Make sure we have enough slave (even) ports for the split. */ 2577 if (count == 2) { 2578 base_port = local_port; 2579 if (mlxsw_sp->ports[base_port + 1]) { 2580 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 2581 return -EINVAL; 2582 } 2583 } else { 2584 base_port = mlxsw_sp_cluster_base_port_get(local_port); 2585 if (mlxsw_sp->ports[base_port + 1] || 2586 mlxsw_sp->ports[base_port + 3]) { 2587 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 2588 return -EINVAL; 2589 } 2590 } 2591 2592 for (i = 0; i < count; i++) 2593 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 2594 2595 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count); 2596 if (err) { 2597 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 2598 goto err_port_split_create; 2599 } 2600 2601 return 0; 2602 2603 err_port_split_create: 2604 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 2605 return err; 2606 } 2607 2608 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port) 2609 { 2610 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2611 struct mlxsw_sp_port *mlxsw_sp_port; 2612 u8 cur_width, base_port; 2613 unsigned int count; 2614 int i; 2615 2616 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2617 if (!mlxsw_sp_port) { 2618 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 2619 local_port); 2620 return -EINVAL; 2621 } 2622 2623 if (!mlxsw_sp_port->split) { 2624 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n"); 2625 return -EINVAL; 2626 } 2627 2628 cur_width = mlxsw_sp_port->mapping.width; 2629 count = cur_width == 1 ? 4 : 2; 2630 2631 base_port = mlxsw_sp_cluster_base_port_get(local_port); 2632 2633 /* Determine which ports to remove. */ 2634 if (count == 2 && local_port >= base_port + 2) 2635 base_port = base_port + 2; 2636 2637 for (i = 0; i < count; i++) 2638 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 2639 2640 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 2641 2642 return 0; 2643 } 2644 2645 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 2646 char *pude_pl, void *priv) 2647 { 2648 struct mlxsw_sp *mlxsw_sp = priv; 2649 struct mlxsw_sp_port *mlxsw_sp_port; 2650 enum mlxsw_reg_pude_oper_status status; 2651 u8 local_port; 2652 2653 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 2654 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2655 if (!mlxsw_sp_port) 2656 return; 2657 2658 status = mlxsw_reg_pude_oper_status_get(pude_pl); 2659 if (status == MLXSW_PORT_OPER_STATUS_UP) { 2660 netdev_info(mlxsw_sp_port->dev, "link up\n"); 2661 netif_carrier_on(mlxsw_sp_port->dev); 2662 } else { 2663 netdev_info(mlxsw_sp_port->dev, "link down\n"); 2664 netif_carrier_off(mlxsw_sp_port->dev); 2665 } 2666 } 2667 2668 static struct mlxsw_event_listener mlxsw_sp_pude_event = { 2669 .func = mlxsw_sp_pude_event_func, 2670 .trap_id = MLXSW_TRAP_ID_PUDE, 2671 }; 2672 2673 static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp, 2674 enum mlxsw_event_trap_id trap_id) 2675 { 2676 struct mlxsw_event_listener *el; 2677 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 2678 int err; 2679 2680 switch (trap_id) { 2681 case MLXSW_TRAP_ID_PUDE: 2682 el = &mlxsw_sp_pude_event; 2683 break; 2684 } 2685 err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp); 2686 if (err) 2687 return err; 2688 2689 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id); 2690 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); 2691 if (err) 2692 goto err_event_trap_set; 2693 2694 return 0; 2695 2696 err_event_trap_set: 2697 mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp); 2698 return err; 2699 } 2700 2701 static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp, 2702 enum mlxsw_event_trap_id trap_id) 2703 { 2704 struct mlxsw_event_listener *el; 2705 2706 switch (trap_id) { 2707 case MLXSW_TRAP_ID_PUDE: 2708 el = &mlxsw_sp_pude_event; 2709 break; 2710 } 2711 mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp); 2712 } 2713 2714 static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port, 2715 void *priv) 2716 { 2717 struct mlxsw_sp *mlxsw_sp = priv; 2718 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2719 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 2720 2721 if (unlikely(!mlxsw_sp_port)) { 2722 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 2723 local_port); 2724 return; 2725 } 2726 2727 skb->dev = mlxsw_sp_port->dev; 2728 2729 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 2730 u64_stats_update_begin(&pcpu_stats->syncp); 2731 pcpu_stats->rx_packets++; 2732 pcpu_stats->rx_bytes += skb->len; 2733 u64_stats_update_end(&pcpu_stats->syncp); 2734 2735 skb->protocol = eth_type_trans(skb, skb->dev); 2736 netif_receive_skb(skb); 2737 } 2738 2739 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, 2740 void *priv) 2741 { 2742 skb->offload_fwd_mark = 1; 2743 return mlxsw_sp_rx_listener_func(skb, local_port, priv); 2744 } 2745 2746 #define MLXSW_SP_RXL(_func, _trap_id, _action) \ 2747 { \ 2748 .func = _func, \ 2749 .local_port = MLXSW_PORT_DONT_CARE, \ 2750 .trap_id = MLXSW_TRAP_ID_##_trap_id, \ 2751 .action = MLXSW_REG_HPKT_ACTION_##_action, \ 2752 } 2753 2754 static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = { 2755 MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, FDB_MC, TRAP_TO_CPU), 2756 /* Traps for specific L2 packet types, not trapped as FDB MC */ 2757 MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, STP, TRAP_TO_CPU), 2758 MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, LACP, TRAP_TO_CPU), 2759 MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, EAPOL, TRAP_TO_CPU), 2760 MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, LLDP, TRAP_TO_CPU), 2761 MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, MMRP, TRAP_TO_CPU), 2762 MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, MVRP, TRAP_TO_CPU), 2763 MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, RPVST, TRAP_TO_CPU), 2764 MLXSW_SP_RXL(mlxsw_sp_rx_listener_mark_func, DHCP, MIRROR_TO_CPU), 2765 MLXSW_SP_RXL(mlxsw_sp_rx_listener_mark_func, IGMP_QUERY, MIRROR_TO_CPU), 2766 MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, IGMP_V1_REPORT, TRAP_TO_CPU), 2767 MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, IGMP_V2_REPORT, TRAP_TO_CPU), 2768 MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, IGMP_V2_LEAVE, TRAP_TO_CPU), 2769 MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, IGMP_V3_REPORT, TRAP_TO_CPU), 2770 MLXSW_SP_RXL(mlxsw_sp_rx_listener_mark_func, ARPBC, MIRROR_TO_CPU), 2771 MLXSW_SP_RXL(mlxsw_sp_rx_listener_mark_func, ARPUC, MIRROR_TO_CPU), 2772 /* L3 traps */ 2773 MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, MTUERROR, TRAP_TO_CPU), 2774 MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, TTLERROR, TRAP_TO_CPU), 2775 MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, LBERROR, TRAP_TO_CPU), 2776 MLXSW_SP_RXL(mlxsw_sp_rx_listener_mark_func, OSPF, TRAP_TO_CPU), 2777 MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, IP2ME, TRAP_TO_CPU), 2778 MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, RTR_INGRESS0, TRAP_TO_CPU), 2779 MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, HOST_MISS_IPV4, TRAP_TO_CPU), 2780 }; 2781 2782 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 2783 { 2784 char htgt_pl[MLXSW_REG_HTGT_LEN]; 2785 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 2786 int i; 2787 int err; 2788 2789 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX); 2790 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl); 2791 if (err) 2792 return err; 2793 2794 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL); 2795 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl); 2796 if (err) 2797 return err; 2798 2799 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) { 2800 err = mlxsw_core_rx_listener_register(mlxsw_sp->core, 2801 &mlxsw_sp_rx_listener[i], 2802 mlxsw_sp); 2803 if (err) 2804 goto err_rx_listener_register; 2805 2806 mlxsw_reg_hpkt_pack(hpkt_pl, mlxsw_sp_rx_listener[i].action, 2807 mlxsw_sp_rx_listener[i].trap_id); 2808 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); 2809 if (err) 2810 goto err_rx_trap_set; 2811 } 2812 return 0; 2813 2814 err_rx_trap_set: 2815 mlxsw_core_rx_listener_unregister(mlxsw_sp->core, 2816 &mlxsw_sp_rx_listener[i], 2817 mlxsw_sp); 2818 err_rx_listener_register: 2819 for (i--; i >= 0; i--) { 2820 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD, 2821 mlxsw_sp_rx_listener[i].trap_id); 2822 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); 2823 2824 mlxsw_core_rx_listener_unregister(mlxsw_sp->core, 2825 &mlxsw_sp_rx_listener[i], 2826 mlxsw_sp); 2827 } 2828 return err; 2829 } 2830 2831 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 2832 { 2833 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 2834 int i; 2835 2836 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) { 2837 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD, 2838 mlxsw_sp_rx_listener[i].trap_id); 2839 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); 2840 2841 mlxsw_core_rx_listener_unregister(mlxsw_sp->core, 2842 &mlxsw_sp_rx_listener[i], 2843 mlxsw_sp); 2844 } 2845 } 2846 2847 static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core, 2848 enum mlxsw_reg_sfgc_type type, 2849 enum mlxsw_reg_sfgc_bridge_type bridge_type) 2850 { 2851 enum mlxsw_flood_table_type table_type; 2852 enum mlxsw_sp_flood_table flood_table; 2853 char sfgc_pl[MLXSW_REG_SFGC_LEN]; 2854 2855 if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID) 2856 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID; 2857 else 2858 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; 2859 2860 if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST) 2861 flood_table = MLXSW_SP_FLOOD_TABLE_UC; 2862 else 2863 flood_table = MLXSW_SP_FLOOD_TABLE_BM; 2864 2865 mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type, 2866 flood_table); 2867 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl); 2868 } 2869 2870 static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp) 2871 { 2872 int type, err; 2873 2874 for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) { 2875 if (type == MLXSW_REG_SFGC_TYPE_RESERVED) 2876 continue; 2877 2878 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type, 2879 MLXSW_REG_SFGC_BRIDGE_TYPE_VFID); 2880 if (err) 2881 return err; 2882 2883 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type, 2884 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID); 2885 if (err) 2886 return err; 2887 } 2888 2889 return 0; 2890 } 2891 2892 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 2893 { 2894 char slcr_pl[MLXSW_REG_SLCR_LEN]; 2895 int err; 2896 2897 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 2898 MLXSW_REG_SLCR_LAG_HASH_DMAC | 2899 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 2900 MLXSW_REG_SLCR_LAG_HASH_VLANID | 2901 MLXSW_REG_SLCR_LAG_HASH_SIP | 2902 MLXSW_REG_SLCR_LAG_HASH_DIP | 2903 MLXSW_REG_SLCR_LAG_HASH_SPORT | 2904 MLXSW_REG_SLCR_LAG_HASH_DPORT | 2905 MLXSW_REG_SLCR_LAG_HASH_IPPROTO); 2906 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 2907 if (err) 2908 return err; 2909 2910 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 2911 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 2912 return -EIO; 2913 2914 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 2915 sizeof(struct mlxsw_sp_upper), 2916 GFP_KERNEL); 2917 if (!mlxsw_sp->lags) 2918 return -ENOMEM; 2919 2920 return 0; 2921 } 2922 2923 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 2924 { 2925 kfree(mlxsw_sp->lags); 2926 } 2927 2928 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 2929 const struct mlxsw_bus_info *mlxsw_bus_info) 2930 { 2931 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2932 int err; 2933 2934 mlxsw_sp->core = mlxsw_core; 2935 mlxsw_sp->bus_info = mlxsw_bus_info; 2936 INIT_LIST_HEAD(&mlxsw_sp->fids); 2937 INIT_LIST_HEAD(&mlxsw_sp->vfids.list); 2938 INIT_LIST_HEAD(&mlxsw_sp->br_mids.list); 2939 2940 err = mlxsw_sp_base_mac_get(mlxsw_sp); 2941 if (err) { 2942 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 2943 return err; 2944 } 2945 2946 err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE); 2947 if (err) { 2948 dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n"); 2949 return err; 2950 } 2951 2952 err = mlxsw_sp_traps_init(mlxsw_sp); 2953 if (err) { 2954 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n"); 2955 goto err_rx_listener_register; 2956 } 2957 2958 err = mlxsw_sp_flood_init(mlxsw_sp); 2959 if (err) { 2960 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n"); 2961 goto err_flood_init; 2962 } 2963 2964 err = mlxsw_sp_buffers_init(mlxsw_sp); 2965 if (err) { 2966 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 2967 goto err_buffers_init; 2968 } 2969 2970 err = mlxsw_sp_lag_init(mlxsw_sp); 2971 if (err) { 2972 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 2973 goto err_lag_init; 2974 } 2975 2976 err = mlxsw_sp_switchdev_init(mlxsw_sp); 2977 if (err) { 2978 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 2979 goto err_switchdev_init; 2980 } 2981 2982 err = mlxsw_sp_router_init(mlxsw_sp); 2983 if (err) { 2984 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 2985 goto err_router_init; 2986 } 2987 2988 err = mlxsw_sp_span_init(mlxsw_sp); 2989 if (err) { 2990 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 2991 goto err_span_init; 2992 } 2993 2994 err = mlxsw_sp_ports_create(mlxsw_sp); 2995 if (err) { 2996 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 2997 goto err_ports_create; 2998 } 2999 3000 return 0; 3001 3002 err_ports_create: 3003 mlxsw_sp_span_fini(mlxsw_sp); 3004 err_span_init: 3005 mlxsw_sp_router_fini(mlxsw_sp); 3006 err_router_init: 3007 mlxsw_sp_switchdev_fini(mlxsw_sp); 3008 err_switchdev_init: 3009 mlxsw_sp_lag_fini(mlxsw_sp); 3010 err_lag_init: 3011 mlxsw_sp_buffers_fini(mlxsw_sp); 3012 err_buffers_init: 3013 err_flood_init: 3014 mlxsw_sp_traps_fini(mlxsw_sp); 3015 err_rx_listener_register: 3016 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); 3017 return err; 3018 } 3019 3020 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 3021 { 3022 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3023 3024 mlxsw_sp_ports_remove(mlxsw_sp); 3025 mlxsw_sp_span_fini(mlxsw_sp); 3026 mlxsw_sp_router_fini(mlxsw_sp); 3027 mlxsw_sp_switchdev_fini(mlxsw_sp); 3028 mlxsw_sp_lag_fini(mlxsw_sp); 3029 mlxsw_sp_buffers_fini(mlxsw_sp); 3030 mlxsw_sp_traps_fini(mlxsw_sp); 3031 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); 3032 WARN_ON(!list_empty(&mlxsw_sp->vfids.list)); 3033 WARN_ON(!list_empty(&mlxsw_sp->fids)); 3034 } 3035 3036 static struct mlxsw_config_profile mlxsw_sp_config_profile = { 3037 .used_max_vepa_channels = 1, 3038 .max_vepa_channels = 0, 3039 .used_max_mid = 1, 3040 .max_mid = MLXSW_SP_MID_MAX, 3041 .used_max_pgt = 1, 3042 .max_pgt = 0, 3043 .used_flood_tables = 1, 3044 .used_flood_mode = 1, 3045 .flood_mode = 3, 3046 .max_fid_offset_flood_tables = 2, 3047 .fid_offset_flood_table_size = VLAN_N_VID - 1, 3048 .max_fid_flood_tables = 2, 3049 .fid_flood_table_size = MLXSW_SP_VFID_MAX, 3050 .used_max_ib_mc = 1, 3051 .max_ib_mc = 0, 3052 .used_max_pkey = 1, 3053 .max_pkey = 0, 3054 .used_kvd_split_data = 1, 3055 .kvd_hash_granularity = MLXSW_SP_KVD_GRANULARITY, 3056 .kvd_hash_single_parts = 2, 3057 .kvd_hash_double_parts = 1, 3058 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 3059 .swid_config = { 3060 { 3061 .used_type = 1, 3062 .type = MLXSW_PORT_SWID_TYPE_ETH, 3063 } 3064 }, 3065 .resource_query_enable = 1, 3066 }; 3067 3068 static struct mlxsw_driver mlxsw_sp_driver = { 3069 .kind = MLXSW_DEVICE_KIND_SPECTRUM, 3070 .owner = THIS_MODULE, 3071 .priv_size = sizeof(struct mlxsw_sp), 3072 .init = mlxsw_sp_init, 3073 .fini = mlxsw_sp_fini, 3074 .port_split = mlxsw_sp_port_split, 3075 .port_unsplit = mlxsw_sp_port_unsplit, 3076 .sb_pool_get = mlxsw_sp_sb_pool_get, 3077 .sb_pool_set = mlxsw_sp_sb_pool_set, 3078 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3079 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3080 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3081 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3082 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3083 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3084 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3085 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3086 .txhdr_construct = mlxsw_sp_txhdr_construct, 3087 .txhdr_len = MLXSW_TXHDR_LEN, 3088 .profile = &mlxsw_sp_config_profile, 3089 }; 3090 3091 static bool mlxsw_sp_port_dev_check(const struct net_device *dev) 3092 { 3093 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 3094 } 3095 3096 static int mlxsw_lower_dev_walk(struct net_device *lower_dev, void *data) 3097 { 3098 struct mlxsw_sp_port **port = data; 3099 int ret = 0; 3100 3101 if (mlxsw_sp_port_dev_check(lower_dev)) { 3102 *port = netdev_priv(lower_dev); 3103 ret = 1; 3104 } 3105 3106 return ret; 3107 } 3108 3109 static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 3110 { 3111 struct mlxsw_sp_port *port; 3112 3113 if (mlxsw_sp_port_dev_check(dev)) 3114 return netdev_priv(dev); 3115 3116 port = NULL; 3117 netdev_walk_all_lower_dev(dev, mlxsw_lower_dev_walk, &port); 3118 3119 return port; 3120 } 3121 3122 static struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 3123 { 3124 struct mlxsw_sp_port *mlxsw_sp_port; 3125 3126 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 3127 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 3128 } 3129 3130 static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 3131 { 3132 struct mlxsw_sp_port *port; 3133 3134 if (mlxsw_sp_port_dev_check(dev)) 3135 return netdev_priv(dev); 3136 3137 port = NULL; 3138 netdev_walk_all_lower_dev_rcu(dev, mlxsw_lower_dev_walk, &port); 3139 3140 return port; 3141 } 3142 3143 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 3144 { 3145 struct mlxsw_sp_port *mlxsw_sp_port; 3146 3147 rcu_read_lock(); 3148 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 3149 if (mlxsw_sp_port) 3150 dev_hold(mlxsw_sp_port->dev); 3151 rcu_read_unlock(); 3152 return mlxsw_sp_port; 3153 } 3154 3155 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 3156 { 3157 dev_put(mlxsw_sp_port->dev); 3158 } 3159 3160 static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *r, 3161 unsigned long event) 3162 { 3163 switch (event) { 3164 case NETDEV_UP: 3165 if (!r) 3166 return true; 3167 r->ref_count++; 3168 return false; 3169 case NETDEV_DOWN: 3170 if (r && --r->ref_count == 0) 3171 return true; 3172 /* It is possible we already removed the RIF ourselves 3173 * if it was assigned to a netdev that is now a bridge 3174 * or LAG slave. 3175 */ 3176 return false; 3177 } 3178 3179 return false; 3180 } 3181 3182 static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp) 3183 { 3184 int i; 3185 3186 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) 3187 if (!mlxsw_sp->rifs[i]) 3188 return i; 3189 3190 return MLXSW_SP_INVALID_RIF; 3191 } 3192 3193 static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport, 3194 bool *p_lagged, u16 *p_system_port) 3195 { 3196 u8 local_port = mlxsw_sp_vport->local_port; 3197 3198 *p_lagged = mlxsw_sp_vport->lagged; 3199 *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port; 3200 } 3201 3202 static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport, 3203 struct net_device *l3_dev, u16 rif, 3204 bool create) 3205 { 3206 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; 3207 bool lagged = mlxsw_sp_vport->lagged; 3208 char ritr_pl[MLXSW_REG_RITR_LEN]; 3209 u16 system_port; 3210 3211 mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif, 3212 l3_dev->mtu, l3_dev->dev_addr); 3213 3214 mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port); 3215 mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port, 3216 mlxsw_sp_vport_vid_get(mlxsw_sp_vport)); 3217 3218 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 3219 } 3220 3221 static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport); 3222 3223 static struct mlxsw_sp_fid * 3224 mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev) 3225 { 3226 struct mlxsw_sp_fid *f; 3227 3228 f = kzalloc(sizeof(*f), GFP_KERNEL); 3229 if (!f) 3230 return NULL; 3231 3232 f->leave = mlxsw_sp_vport_rif_sp_leave; 3233 f->ref_count = 0; 3234 f->dev = l3_dev; 3235 f->fid = fid; 3236 3237 return f; 3238 } 3239 3240 static struct mlxsw_sp_rif * 3241 mlxsw_sp_rif_alloc(u16 rif, struct net_device *l3_dev, struct mlxsw_sp_fid *f) 3242 { 3243 struct mlxsw_sp_rif *r; 3244 3245 r = kzalloc(sizeof(*r), GFP_KERNEL); 3246 if (!r) 3247 return NULL; 3248 3249 ether_addr_copy(r->addr, l3_dev->dev_addr); 3250 r->mtu = l3_dev->mtu; 3251 r->ref_count = 1; 3252 r->dev = l3_dev; 3253 r->rif = rif; 3254 r->f = f; 3255 3256 return r; 3257 } 3258 3259 static struct mlxsw_sp_rif * 3260 mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport, 3261 struct net_device *l3_dev) 3262 { 3263 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; 3264 struct mlxsw_sp_fid *f; 3265 struct mlxsw_sp_rif *r; 3266 u16 fid, rif; 3267 int err; 3268 3269 rif = mlxsw_sp_avail_rif_get(mlxsw_sp); 3270 if (rif == MLXSW_SP_INVALID_RIF) 3271 return ERR_PTR(-ERANGE); 3272 3273 err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, true); 3274 if (err) 3275 return ERR_PTR(err); 3276 3277 fid = mlxsw_sp_rif_sp_to_fid(rif); 3278 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true); 3279 if (err) 3280 goto err_rif_fdb_op; 3281 3282 f = mlxsw_sp_rfid_alloc(fid, l3_dev); 3283 if (!f) { 3284 err = -ENOMEM; 3285 goto err_rfid_alloc; 3286 } 3287 3288 r = mlxsw_sp_rif_alloc(rif, l3_dev, f); 3289 if (!r) { 3290 err = -ENOMEM; 3291 goto err_rif_alloc; 3292 } 3293 3294 f->r = r; 3295 mlxsw_sp->rifs[rif] = r; 3296 3297 return r; 3298 3299 err_rif_alloc: 3300 kfree(f); 3301 err_rfid_alloc: 3302 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false); 3303 err_rif_fdb_op: 3304 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false); 3305 return ERR_PTR(err); 3306 } 3307 3308 static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport, 3309 struct mlxsw_sp_rif *r) 3310 { 3311 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; 3312 struct net_device *l3_dev = r->dev; 3313 struct mlxsw_sp_fid *f = r->f; 3314 u16 fid = f->fid; 3315 u16 rif = r->rif; 3316 3317 mlxsw_sp->rifs[rif] = NULL; 3318 f->r = NULL; 3319 3320 kfree(r); 3321 3322 kfree(f); 3323 3324 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false); 3325 3326 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false); 3327 } 3328 3329 static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport, 3330 struct net_device *l3_dev) 3331 { 3332 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; 3333 struct mlxsw_sp_rif *r; 3334 3335 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev); 3336 if (!r) { 3337 r = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev); 3338 if (IS_ERR(r)) 3339 return PTR_ERR(r); 3340 } 3341 3342 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, r->f); 3343 r->f->ref_count++; 3344 3345 netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", r->f->fid); 3346 3347 return 0; 3348 } 3349 3350 static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport) 3351 { 3352 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 3353 3354 netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid); 3355 3356 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL); 3357 if (--f->ref_count == 0) 3358 mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->r); 3359 } 3360 3361 static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev, 3362 struct net_device *port_dev, 3363 unsigned long event, u16 vid) 3364 { 3365 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev); 3366 struct mlxsw_sp_port *mlxsw_sp_vport; 3367 3368 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 3369 if (WARN_ON(!mlxsw_sp_vport)) 3370 return -EINVAL; 3371 3372 switch (event) { 3373 case NETDEV_UP: 3374 return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev); 3375 case NETDEV_DOWN: 3376 mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport); 3377 break; 3378 } 3379 3380 return 0; 3381 } 3382 3383 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev, 3384 unsigned long event) 3385 { 3386 if (netif_is_bridge_port(port_dev) || netif_is_lag_port(port_dev)) 3387 return 0; 3388 3389 return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1); 3390 } 3391 3392 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev, 3393 struct net_device *lag_dev, 3394 unsigned long event, u16 vid) 3395 { 3396 struct net_device *port_dev; 3397 struct list_head *iter; 3398 int err; 3399 3400 netdev_for_each_lower_dev(lag_dev, port_dev, iter) { 3401 if (mlxsw_sp_port_dev_check(port_dev)) { 3402 err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev, 3403 event, vid); 3404 if (err) 3405 return err; 3406 } 3407 } 3408 3409 return 0; 3410 } 3411 3412 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev, 3413 unsigned long event) 3414 { 3415 if (netif_is_bridge_port(lag_dev)) 3416 return 0; 3417 3418 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1); 3419 } 3420 3421 static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp, 3422 struct net_device *l3_dev) 3423 { 3424 u16 fid; 3425 3426 if (is_vlan_dev(l3_dev)) 3427 fid = vlan_dev_vlan_id(l3_dev); 3428 else if (mlxsw_sp->master_bridge.dev == l3_dev) 3429 fid = 1; 3430 else 3431 return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev); 3432 3433 return mlxsw_sp_fid_find(mlxsw_sp, fid); 3434 } 3435 3436 static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid) 3437 { 3438 return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID : 3439 MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; 3440 } 3441 3442 static u16 mlxsw_sp_flood_table_index_get(u16 fid) 3443 { 3444 return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid; 3445 } 3446 3447 static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid, 3448 bool set) 3449 { 3450 enum mlxsw_flood_table_type table_type; 3451 char *sftr_pl; 3452 u16 index; 3453 int err; 3454 3455 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); 3456 if (!sftr_pl) 3457 return -ENOMEM; 3458 3459 table_type = mlxsw_sp_flood_table_type_get(fid); 3460 index = mlxsw_sp_flood_table_index_get(fid); 3461 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, index, table_type, 3462 1, MLXSW_PORT_ROUTER_PORT, set); 3463 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); 3464 3465 kfree(sftr_pl); 3466 return err; 3467 } 3468 3469 static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid) 3470 { 3471 if (mlxsw_sp_fid_is_vfid(fid)) 3472 return MLXSW_REG_RITR_FID_IF; 3473 else 3474 return MLXSW_REG_RITR_VLAN_IF; 3475 } 3476 3477 static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp, 3478 struct net_device *l3_dev, 3479 u16 fid, u16 rif, 3480 bool create) 3481 { 3482 enum mlxsw_reg_ritr_if_type rif_type; 3483 char ritr_pl[MLXSW_REG_RITR_LEN]; 3484 3485 rif_type = mlxsw_sp_rif_type_get(fid); 3486 mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, l3_dev->mtu, 3487 l3_dev->dev_addr); 3488 mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid); 3489 3490 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 3491 } 3492 3493 static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp, 3494 struct net_device *l3_dev, 3495 struct mlxsw_sp_fid *f) 3496 { 3497 struct mlxsw_sp_rif *r; 3498 u16 rif; 3499 int err; 3500 3501 rif = mlxsw_sp_avail_rif_get(mlxsw_sp); 3502 if (rif == MLXSW_SP_INVALID_RIF) 3503 return -ERANGE; 3504 3505 err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true); 3506 if (err) 3507 return err; 3508 3509 err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true); 3510 if (err) 3511 goto err_rif_bridge_op; 3512 3513 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true); 3514 if (err) 3515 goto err_rif_fdb_op; 3516 3517 r = mlxsw_sp_rif_alloc(rif, l3_dev, f); 3518 if (!r) { 3519 err = -ENOMEM; 3520 goto err_rif_alloc; 3521 } 3522 3523 f->r = r; 3524 mlxsw_sp->rifs[rif] = r; 3525 3526 netdev_dbg(l3_dev, "RIF=%d created\n", rif); 3527 3528 return 0; 3529 3530 err_rif_alloc: 3531 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false); 3532 err_rif_fdb_op: 3533 mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false); 3534 err_rif_bridge_op: 3535 mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false); 3536 return err; 3537 } 3538 3539 void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp, 3540 struct mlxsw_sp_rif *r) 3541 { 3542 struct net_device *l3_dev = r->dev; 3543 struct mlxsw_sp_fid *f = r->f; 3544 u16 rif = r->rif; 3545 3546 mlxsw_sp->rifs[rif] = NULL; 3547 f->r = NULL; 3548 3549 kfree(r); 3550 3551 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false); 3552 3553 mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false); 3554 3555 mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false); 3556 3557 netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif); 3558 } 3559 3560 static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev, 3561 struct net_device *br_dev, 3562 unsigned long event) 3563 { 3564 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev); 3565 struct mlxsw_sp_fid *f; 3566 3567 /* FID can either be an actual FID if the L3 device is the 3568 * VLAN-aware bridge or a VLAN device on top. Otherwise, the 3569 * L3 device is a VLAN-unaware bridge and we get a vFID. 3570 */ 3571 f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev); 3572 if (WARN_ON(!f)) 3573 return -EINVAL; 3574 3575 switch (event) { 3576 case NETDEV_UP: 3577 return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f); 3578 case NETDEV_DOWN: 3579 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r); 3580 break; 3581 } 3582 3583 return 0; 3584 } 3585 3586 static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev, 3587 unsigned long event) 3588 { 3589 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 3590 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); 3591 u16 vid = vlan_dev_vlan_id(vlan_dev); 3592 3593 if (mlxsw_sp_port_dev_check(real_dev)) 3594 return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event, 3595 vid); 3596 else if (netif_is_lag_master(real_dev)) 3597 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event, 3598 vid); 3599 else if (netif_is_bridge_master(real_dev) && 3600 mlxsw_sp->master_bridge.dev == real_dev) 3601 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev, 3602 event); 3603 3604 return 0; 3605 } 3606 3607 static int mlxsw_sp_inetaddr_event(struct notifier_block *unused, 3608 unsigned long event, void *ptr) 3609 { 3610 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr; 3611 struct net_device *dev = ifa->ifa_dev->dev; 3612 struct mlxsw_sp *mlxsw_sp; 3613 struct mlxsw_sp_rif *r; 3614 int err = 0; 3615 3616 mlxsw_sp = mlxsw_sp_lower_get(dev); 3617 if (!mlxsw_sp) 3618 goto out; 3619 3620 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); 3621 if (!mlxsw_sp_rif_should_config(r, event)) 3622 goto out; 3623 3624 if (mlxsw_sp_port_dev_check(dev)) 3625 err = mlxsw_sp_inetaddr_port_event(dev, event); 3626 else if (netif_is_lag_master(dev)) 3627 err = mlxsw_sp_inetaddr_lag_event(dev, event); 3628 else if (netif_is_bridge_master(dev)) 3629 err = mlxsw_sp_inetaddr_bridge_event(dev, dev, event); 3630 else if (is_vlan_dev(dev)) 3631 err = mlxsw_sp_inetaddr_vlan_event(dev, event); 3632 3633 out: 3634 return notifier_from_errno(err); 3635 } 3636 3637 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif, 3638 const char *mac, int mtu) 3639 { 3640 char ritr_pl[MLXSW_REG_RITR_LEN]; 3641 int err; 3642 3643 mlxsw_reg_ritr_rif_pack(ritr_pl, rif); 3644 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 3645 if (err) 3646 return err; 3647 3648 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu); 3649 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac); 3650 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE); 3651 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 3652 } 3653 3654 static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev) 3655 { 3656 struct mlxsw_sp *mlxsw_sp; 3657 struct mlxsw_sp_rif *r; 3658 int err; 3659 3660 mlxsw_sp = mlxsw_sp_lower_get(dev); 3661 if (!mlxsw_sp) 3662 return 0; 3663 3664 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); 3665 if (!r) 3666 return 0; 3667 3668 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, false); 3669 if (err) 3670 return err; 3671 3672 err = mlxsw_sp_rif_edit(mlxsw_sp, r->rif, dev->dev_addr, dev->mtu); 3673 if (err) 3674 goto err_rif_edit; 3675 3676 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, r->f->fid, true); 3677 if (err) 3678 goto err_rif_fdb_op; 3679 3680 ether_addr_copy(r->addr, dev->dev_addr); 3681 r->mtu = dev->mtu; 3682 3683 netdev_dbg(dev, "Updated RIF=%d\n", r->rif); 3684 3685 return 0; 3686 3687 err_rif_fdb_op: 3688 mlxsw_sp_rif_edit(mlxsw_sp, r->rif, r->addr, r->mtu); 3689 err_rif_edit: 3690 mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, true); 3691 return err; 3692 } 3693 3694 static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port, 3695 u16 fid) 3696 { 3697 if (mlxsw_sp_fid_is_vfid(fid)) 3698 return mlxsw_sp_port_vport_find_by_fid(lag_port, fid); 3699 else 3700 return test_bit(fid, lag_port->active_vlans); 3701 } 3702 3703 static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port, 3704 u16 fid) 3705 { 3706 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3707 u8 local_port = mlxsw_sp_port->local_port; 3708 u16 lag_id = mlxsw_sp_port->lag_id; 3709 u64 max_lag_members; 3710 int i, count = 0; 3711 3712 if (!mlxsw_sp_port->lagged) 3713 return true; 3714 3715 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 3716 MAX_LAG_MEMBERS); 3717 for (i = 0; i < max_lag_members; i++) { 3718 struct mlxsw_sp_port *lag_port; 3719 3720 lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i); 3721 if (!lag_port || lag_port->local_port == local_port) 3722 continue; 3723 if (mlxsw_sp_lag_port_fid_member(lag_port, fid)) 3724 count++; 3725 } 3726 3727 return !count; 3728 } 3729 3730 static int 3731 mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port, 3732 u16 fid) 3733 { 3734 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3735 char sfdf_pl[MLXSW_REG_SFDF_LEN]; 3736 3737 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID); 3738 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid); 3739 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, 3740 mlxsw_sp_port->local_port); 3741 3742 netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using Port=%d, FID=%d\n", 3743 mlxsw_sp_port->local_port, fid); 3744 3745 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); 3746 } 3747 3748 static int 3749 mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port, 3750 u16 fid) 3751 { 3752 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3753 char sfdf_pl[MLXSW_REG_SFDF_LEN]; 3754 3755 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID); 3756 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid); 3757 mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id); 3758 3759 netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using LAG ID=%d, FID=%d\n", 3760 mlxsw_sp_port->lag_id, fid); 3761 3762 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); 3763 } 3764 3765 int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid) 3766 { 3767 if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port, fid)) 3768 return 0; 3769 3770 if (mlxsw_sp_port->lagged) 3771 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, 3772 fid); 3773 else 3774 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid); 3775 } 3776 3777 static void mlxsw_sp_master_bridge_gone_sync(struct mlxsw_sp *mlxsw_sp) 3778 { 3779 struct mlxsw_sp_fid *f, *tmp; 3780 3781 list_for_each_entry_safe(f, tmp, &mlxsw_sp->fids, list) 3782 if (--f->ref_count == 0) 3783 mlxsw_sp_fid_destroy(mlxsw_sp, f); 3784 else 3785 WARN_ON_ONCE(1); 3786 } 3787 3788 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp, 3789 struct net_device *br_dev) 3790 { 3791 return !mlxsw_sp->master_bridge.dev || 3792 mlxsw_sp->master_bridge.dev == br_dev; 3793 } 3794 3795 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp, 3796 struct net_device *br_dev) 3797 { 3798 mlxsw_sp->master_bridge.dev = br_dev; 3799 mlxsw_sp->master_bridge.ref_count++; 3800 } 3801 3802 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp) 3803 { 3804 if (--mlxsw_sp->master_bridge.ref_count == 0) { 3805 mlxsw_sp->master_bridge.dev = NULL; 3806 /* It's possible upper VLAN devices are still holding 3807 * references to underlying FIDs. Drop the reference 3808 * and release the resources if it was the last one. 3809 * If it wasn't, then something bad happened. 3810 */ 3811 mlxsw_sp_master_bridge_gone_sync(mlxsw_sp); 3812 } 3813 } 3814 3815 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port, 3816 struct net_device *br_dev) 3817 { 3818 struct net_device *dev = mlxsw_sp_port->dev; 3819 int err; 3820 3821 /* When port is not bridged untagged packets are tagged with 3822 * PVID=VID=1, thereby creating an implicit VLAN interface in 3823 * the device. Remove it and let bridge code take care of its 3824 * own VLANs. 3825 */ 3826 err = mlxsw_sp_port_kill_vid(dev, 0, 1); 3827 if (err) 3828 return err; 3829 3830 mlxsw_sp_master_bridge_inc(mlxsw_sp_port->mlxsw_sp, br_dev); 3831 3832 mlxsw_sp_port->learning = 1; 3833 mlxsw_sp_port->learning_sync = 1; 3834 mlxsw_sp_port->uc_flood = 1; 3835 mlxsw_sp_port->bridged = 1; 3836 3837 return 0; 3838 } 3839 3840 static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port) 3841 { 3842 struct net_device *dev = mlxsw_sp_port->dev; 3843 3844 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1); 3845 3846 mlxsw_sp_master_bridge_dec(mlxsw_sp_port->mlxsw_sp); 3847 3848 mlxsw_sp_port->learning = 0; 3849 mlxsw_sp_port->learning_sync = 0; 3850 mlxsw_sp_port->uc_flood = 0; 3851 mlxsw_sp_port->bridged = 0; 3852 3853 /* Add implicit VLAN interface in the device, so that untagged 3854 * packets will be classified to the default vFID. 3855 */ 3856 mlxsw_sp_port_add_vid(dev, 0, 1); 3857 } 3858 3859 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 3860 { 3861 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3862 3863 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 3864 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3865 } 3866 3867 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 3868 { 3869 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3870 3871 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 3872 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3873 } 3874 3875 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 3876 u16 lag_id, u8 port_index) 3877 { 3878 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3879 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3880 3881 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 3882 lag_id, port_index); 3883 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3884 } 3885 3886 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 3887 u16 lag_id) 3888 { 3889 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3890 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3891 3892 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 3893 lag_id); 3894 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3895 } 3896 3897 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 3898 u16 lag_id) 3899 { 3900 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3901 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3902 3903 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 3904 lag_id); 3905 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3906 } 3907 3908 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 3909 u16 lag_id) 3910 { 3911 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3912 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3913 3914 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 3915 lag_id); 3916 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3917 } 3918 3919 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 3920 struct net_device *lag_dev, 3921 u16 *p_lag_id) 3922 { 3923 struct mlxsw_sp_upper *lag; 3924 int free_lag_id = -1; 3925 u64 max_lag; 3926 int i; 3927 3928 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 3929 for (i = 0; i < max_lag; i++) { 3930 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 3931 if (lag->ref_count) { 3932 if (lag->dev == lag_dev) { 3933 *p_lag_id = i; 3934 return 0; 3935 } 3936 } else if (free_lag_id < 0) { 3937 free_lag_id = i; 3938 } 3939 } 3940 if (free_lag_id < 0) 3941 return -EBUSY; 3942 *p_lag_id = free_lag_id; 3943 return 0; 3944 } 3945 3946 static bool 3947 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 3948 struct net_device *lag_dev, 3949 struct netdev_lag_upper_info *lag_upper_info) 3950 { 3951 u16 lag_id; 3952 3953 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) 3954 return false; 3955 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) 3956 return false; 3957 return true; 3958 } 3959 3960 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 3961 u16 lag_id, u8 *p_port_index) 3962 { 3963 u64 max_lag_members; 3964 int i; 3965 3966 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 3967 MAX_LAG_MEMBERS); 3968 for (i = 0; i < max_lag_members; i++) { 3969 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 3970 *p_port_index = i; 3971 return 0; 3972 } 3973 } 3974 return -EBUSY; 3975 } 3976 3977 static void 3978 mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 3979 u16 lag_id) 3980 { 3981 struct mlxsw_sp_port *mlxsw_sp_vport; 3982 struct mlxsw_sp_fid *f; 3983 3984 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1); 3985 if (WARN_ON(!mlxsw_sp_vport)) 3986 return; 3987 3988 /* If vPort is assigned a RIF, then leave it since it's no 3989 * longer valid. 3990 */ 3991 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 3992 if (f) 3993 f->leave(mlxsw_sp_vport); 3994 3995 mlxsw_sp_vport->lag_id = lag_id; 3996 mlxsw_sp_vport->lagged = 1; 3997 } 3998 3999 static void 4000 mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port) 4001 { 4002 struct mlxsw_sp_port *mlxsw_sp_vport; 4003 struct mlxsw_sp_fid *f; 4004 4005 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1); 4006 if (WARN_ON(!mlxsw_sp_vport)) 4007 return; 4008 4009 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 4010 if (f) 4011 f->leave(mlxsw_sp_vport); 4012 4013 mlxsw_sp_vport->lagged = 0; 4014 } 4015 4016 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 4017 struct net_device *lag_dev) 4018 { 4019 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4020 struct mlxsw_sp_upper *lag; 4021 u16 lag_id; 4022 u8 port_index; 4023 int err; 4024 4025 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 4026 if (err) 4027 return err; 4028 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 4029 if (!lag->ref_count) { 4030 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 4031 if (err) 4032 return err; 4033 lag->dev = lag_dev; 4034 } 4035 4036 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 4037 if (err) 4038 return err; 4039 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 4040 if (err) 4041 goto err_col_port_add; 4042 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id); 4043 if (err) 4044 goto err_col_port_enable; 4045 4046 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 4047 mlxsw_sp_port->local_port); 4048 mlxsw_sp_port->lag_id = lag_id; 4049 mlxsw_sp_port->lagged = 1; 4050 lag->ref_count++; 4051 4052 mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_id); 4053 4054 return 0; 4055 4056 err_col_port_enable: 4057 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4058 err_col_port_add: 4059 if (!lag->ref_count) 4060 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 4061 return err; 4062 } 4063 4064 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 4065 struct net_device *lag_dev) 4066 { 4067 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4068 u16 lag_id = mlxsw_sp_port->lag_id; 4069 struct mlxsw_sp_upper *lag; 4070 4071 if (!mlxsw_sp_port->lagged) 4072 return; 4073 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 4074 WARN_ON(lag->ref_count == 0); 4075 4076 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id); 4077 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4078 4079 if (mlxsw_sp_port->bridged) { 4080 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port); 4081 mlxsw_sp_port_bridge_leave(mlxsw_sp_port); 4082 } 4083 4084 if (lag->ref_count == 1) 4085 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 4086 4087 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 4088 mlxsw_sp_port->local_port); 4089 mlxsw_sp_port->lagged = 0; 4090 lag->ref_count--; 4091 4092 mlxsw_sp_port_pvid_vport_lag_leave(mlxsw_sp_port); 4093 } 4094 4095 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 4096 u16 lag_id) 4097 { 4098 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4099 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4100 4101 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 4102 mlxsw_sp_port->local_port); 4103 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4104 } 4105 4106 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 4107 u16 lag_id) 4108 { 4109 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4110 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4111 4112 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 4113 mlxsw_sp_port->local_port); 4114 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4115 } 4116 4117 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port, 4118 bool lag_tx_enabled) 4119 { 4120 if (lag_tx_enabled) 4121 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, 4122 mlxsw_sp_port->lag_id); 4123 else 4124 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 4125 mlxsw_sp_port->lag_id); 4126 } 4127 4128 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 4129 struct netdev_lag_lower_state_info *info) 4130 { 4131 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled); 4132 } 4133 4134 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port, 4135 struct net_device *vlan_dev) 4136 { 4137 struct mlxsw_sp_port *mlxsw_sp_vport; 4138 u16 vid = vlan_dev_vlan_id(vlan_dev); 4139 4140 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 4141 if (WARN_ON(!mlxsw_sp_vport)) 4142 return -EINVAL; 4143 4144 mlxsw_sp_vport->dev = vlan_dev; 4145 4146 return 0; 4147 } 4148 4149 static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port, 4150 struct net_device *vlan_dev) 4151 { 4152 struct mlxsw_sp_port *mlxsw_sp_vport; 4153 u16 vid = vlan_dev_vlan_id(vlan_dev); 4154 4155 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 4156 if (WARN_ON(!mlxsw_sp_vport)) 4157 return; 4158 4159 mlxsw_sp_vport->dev = mlxsw_sp_port->dev; 4160 } 4161 4162 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev, 4163 unsigned long event, void *ptr) 4164 { 4165 struct netdev_notifier_changeupper_info *info; 4166 struct mlxsw_sp_port *mlxsw_sp_port; 4167 struct net_device *upper_dev; 4168 struct mlxsw_sp *mlxsw_sp; 4169 int err = 0; 4170 4171 mlxsw_sp_port = netdev_priv(dev); 4172 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4173 info = ptr; 4174 4175 switch (event) { 4176 case NETDEV_PRECHANGEUPPER: 4177 upper_dev = info->upper_dev; 4178 if (!is_vlan_dev(upper_dev) && 4179 !netif_is_lag_master(upper_dev) && 4180 !netif_is_bridge_master(upper_dev)) 4181 return -EINVAL; 4182 if (!info->linking) 4183 break; 4184 /* HW limitation forbids to put ports to multiple bridges. */ 4185 if (netif_is_bridge_master(upper_dev) && 4186 !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev)) 4187 return -EINVAL; 4188 if (netif_is_lag_master(upper_dev) && 4189 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 4190 info->upper_info)) 4191 return -EINVAL; 4192 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) 4193 return -EINVAL; 4194 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 4195 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) 4196 return -EINVAL; 4197 break; 4198 case NETDEV_CHANGEUPPER: 4199 upper_dev = info->upper_dev; 4200 if (is_vlan_dev(upper_dev)) { 4201 if (info->linking) 4202 err = mlxsw_sp_port_vlan_link(mlxsw_sp_port, 4203 upper_dev); 4204 else 4205 mlxsw_sp_port_vlan_unlink(mlxsw_sp_port, 4206 upper_dev); 4207 } else if (netif_is_bridge_master(upper_dev)) { 4208 if (info->linking) 4209 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4210 upper_dev); 4211 else 4212 mlxsw_sp_port_bridge_leave(mlxsw_sp_port); 4213 } else if (netif_is_lag_master(upper_dev)) { 4214 if (info->linking) 4215 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 4216 upper_dev); 4217 else 4218 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 4219 upper_dev); 4220 } else { 4221 err = -EINVAL; 4222 WARN_ON(1); 4223 } 4224 break; 4225 } 4226 4227 return err; 4228 } 4229 4230 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 4231 unsigned long event, void *ptr) 4232 { 4233 struct netdev_notifier_changelowerstate_info *info; 4234 struct mlxsw_sp_port *mlxsw_sp_port; 4235 int err; 4236 4237 mlxsw_sp_port = netdev_priv(dev); 4238 info = ptr; 4239 4240 switch (event) { 4241 case NETDEV_CHANGELOWERSTATE: 4242 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 4243 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 4244 info->lower_state_info); 4245 if (err) 4246 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 4247 } 4248 break; 4249 } 4250 4251 return 0; 4252 } 4253 4254 static int mlxsw_sp_netdevice_port_event(struct net_device *dev, 4255 unsigned long event, void *ptr) 4256 { 4257 switch (event) { 4258 case NETDEV_PRECHANGEUPPER: 4259 case NETDEV_CHANGEUPPER: 4260 return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr); 4261 case NETDEV_CHANGELOWERSTATE: 4262 return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr); 4263 } 4264 4265 return 0; 4266 } 4267 4268 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 4269 unsigned long event, void *ptr) 4270 { 4271 struct net_device *dev; 4272 struct list_head *iter; 4273 int ret; 4274 4275 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4276 if (mlxsw_sp_port_dev_check(dev)) { 4277 ret = mlxsw_sp_netdevice_port_event(dev, event, ptr); 4278 if (ret) 4279 return ret; 4280 } 4281 } 4282 4283 return 0; 4284 } 4285 4286 static int mlxsw_sp_master_bridge_vlan_link(struct mlxsw_sp *mlxsw_sp, 4287 struct net_device *vlan_dev) 4288 { 4289 u16 fid = vlan_dev_vlan_id(vlan_dev); 4290 struct mlxsw_sp_fid *f; 4291 4292 f = mlxsw_sp_fid_find(mlxsw_sp, fid); 4293 if (!f) { 4294 f = mlxsw_sp_fid_create(mlxsw_sp, fid); 4295 if (IS_ERR(f)) 4296 return PTR_ERR(f); 4297 } 4298 4299 f->ref_count++; 4300 4301 return 0; 4302 } 4303 4304 static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp *mlxsw_sp, 4305 struct net_device *vlan_dev) 4306 { 4307 u16 fid = vlan_dev_vlan_id(vlan_dev); 4308 struct mlxsw_sp_fid *f; 4309 4310 f = mlxsw_sp_fid_find(mlxsw_sp, fid); 4311 if (f && f->r) 4312 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r); 4313 if (f && --f->ref_count == 0) 4314 mlxsw_sp_fid_destroy(mlxsw_sp, f); 4315 } 4316 4317 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, 4318 unsigned long event, void *ptr) 4319 { 4320 struct netdev_notifier_changeupper_info *info; 4321 struct net_device *upper_dev; 4322 struct mlxsw_sp *mlxsw_sp; 4323 int err; 4324 4325 mlxsw_sp = mlxsw_sp_lower_get(br_dev); 4326 if (!mlxsw_sp) 4327 return 0; 4328 if (br_dev != mlxsw_sp->master_bridge.dev) 4329 return 0; 4330 4331 info = ptr; 4332 4333 switch (event) { 4334 case NETDEV_CHANGEUPPER: 4335 upper_dev = info->upper_dev; 4336 if (!is_vlan_dev(upper_dev)) 4337 break; 4338 if (info->linking) { 4339 err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp, 4340 upper_dev); 4341 if (err) 4342 return err; 4343 } else { 4344 mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp, upper_dev); 4345 } 4346 break; 4347 } 4348 4349 return 0; 4350 } 4351 4352 static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp) 4353 { 4354 return find_first_zero_bit(mlxsw_sp->vfids.mapped, 4355 MLXSW_SP_VFID_MAX); 4356 } 4357 4358 static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create) 4359 { 4360 char sfmr_pl[MLXSW_REG_SFMR_LEN]; 4361 4362 mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0); 4363 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); 4364 } 4365 4366 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport); 4367 4368 static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, 4369 struct net_device *br_dev) 4370 { 4371 struct device *dev = mlxsw_sp->bus_info->dev; 4372 struct mlxsw_sp_fid *f; 4373 u16 vfid, fid; 4374 int err; 4375 4376 vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp); 4377 if (vfid == MLXSW_SP_VFID_MAX) { 4378 dev_err(dev, "No available vFIDs\n"); 4379 return ERR_PTR(-ERANGE); 4380 } 4381 4382 fid = mlxsw_sp_vfid_to_fid(vfid); 4383 err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true); 4384 if (err) { 4385 dev_err(dev, "Failed to create FID=%d\n", fid); 4386 return ERR_PTR(err); 4387 } 4388 4389 f = kzalloc(sizeof(*f), GFP_KERNEL); 4390 if (!f) 4391 goto err_allocate_vfid; 4392 4393 f->leave = mlxsw_sp_vport_vfid_leave; 4394 f->fid = fid; 4395 f->dev = br_dev; 4396 4397 list_add(&f->list, &mlxsw_sp->vfids.list); 4398 set_bit(vfid, mlxsw_sp->vfids.mapped); 4399 4400 return f; 4401 4402 err_allocate_vfid: 4403 mlxsw_sp_vfid_op(mlxsw_sp, fid, false); 4404 return ERR_PTR(-ENOMEM); 4405 } 4406 4407 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, 4408 struct mlxsw_sp_fid *f) 4409 { 4410 u16 vfid = mlxsw_sp_fid_to_vfid(f->fid); 4411 u16 fid = f->fid; 4412 4413 clear_bit(vfid, mlxsw_sp->vfids.mapped); 4414 list_del(&f->list); 4415 4416 if (f->r) 4417 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r); 4418 4419 kfree(f); 4420 4421 mlxsw_sp_vfid_op(mlxsw_sp, fid, false); 4422 } 4423 4424 static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid, 4425 bool valid) 4426 { 4427 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; 4428 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); 4429 4430 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid, 4431 vid); 4432 } 4433 4434 static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport, 4435 struct net_device *br_dev) 4436 { 4437 struct mlxsw_sp_fid *f; 4438 int err; 4439 4440 f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev); 4441 if (!f) { 4442 f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev); 4443 if (IS_ERR(f)) 4444 return PTR_ERR(f); 4445 } 4446 4447 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true); 4448 if (err) 4449 goto err_vport_flood_set; 4450 4451 err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true); 4452 if (err) 4453 goto err_vport_fid_map; 4454 4455 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f); 4456 f->ref_count++; 4457 4458 netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", f->fid); 4459 4460 return 0; 4461 4462 err_vport_fid_map: 4463 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false); 4464 err_vport_flood_set: 4465 if (!f->ref_count) 4466 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f); 4467 return err; 4468 } 4469 4470 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport) 4471 { 4472 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 4473 4474 netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid); 4475 4476 mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false); 4477 4478 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false); 4479 4480 mlxsw_sp_port_fdb_flush(mlxsw_sp_vport, f->fid); 4481 4482 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL); 4483 if (--f->ref_count == 0) 4484 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f); 4485 } 4486 4487 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport, 4488 struct net_device *br_dev) 4489 { 4490 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 4491 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); 4492 struct net_device *dev = mlxsw_sp_vport->dev; 4493 int err; 4494 4495 if (f && !WARN_ON(!f->leave)) 4496 f->leave(mlxsw_sp_vport); 4497 4498 err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport, br_dev); 4499 if (err) { 4500 netdev_err(dev, "Failed to join vFID\n"); 4501 return err; 4502 } 4503 4504 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); 4505 if (err) { 4506 netdev_err(dev, "Failed to enable learning\n"); 4507 goto err_port_vid_learning_set; 4508 } 4509 4510 mlxsw_sp_vport->learning = 1; 4511 mlxsw_sp_vport->learning_sync = 1; 4512 mlxsw_sp_vport->uc_flood = 1; 4513 mlxsw_sp_vport->bridged = 1; 4514 4515 return 0; 4516 4517 err_port_vid_learning_set: 4518 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport); 4519 return err; 4520 } 4521 4522 static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport) 4523 { 4524 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); 4525 4526 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); 4527 4528 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport); 4529 4530 mlxsw_sp_vport->learning = 0; 4531 mlxsw_sp_vport->learning_sync = 0; 4532 mlxsw_sp_vport->uc_flood = 0; 4533 mlxsw_sp_vport->bridged = 0; 4534 } 4535 4536 static bool 4537 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port, 4538 const struct net_device *br_dev) 4539 { 4540 struct mlxsw_sp_port *mlxsw_sp_vport; 4541 4542 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, 4543 vport.list) { 4544 struct net_device *dev = mlxsw_sp_vport_dev_get(mlxsw_sp_vport); 4545 4546 if (dev && dev == br_dev) 4547 return false; 4548 } 4549 4550 return true; 4551 } 4552 4553 static int mlxsw_sp_netdevice_vport_event(struct net_device *dev, 4554 unsigned long event, void *ptr, 4555 u16 vid) 4556 { 4557 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 4558 struct netdev_notifier_changeupper_info *info = ptr; 4559 struct mlxsw_sp_port *mlxsw_sp_vport; 4560 struct net_device *upper_dev; 4561 int err = 0; 4562 4563 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 4564 4565 switch (event) { 4566 case NETDEV_PRECHANGEUPPER: 4567 upper_dev = info->upper_dev; 4568 if (!netif_is_bridge_master(upper_dev)) 4569 return -EINVAL; 4570 if (!info->linking) 4571 break; 4572 /* We can't have multiple VLAN interfaces configured on 4573 * the same port and being members in the same bridge. 4574 */ 4575 if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port, 4576 upper_dev)) 4577 return -EINVAL; 4578 break; 4579 case NETDEV_CHANGEUPPER: 4580 upper_dev = info->upper_dev; 4581 if (info->linking) { 4582 if (WARN_ON(!mlxsw_sp_vport)) 4583 return -EINVAL; 4584 err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport, 4585 upper_dev); 4586 } else { 4587 if (!mlxsw_sp_vport) 4588 return 0; 4589 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport); 4590 } 4591 } 4592 4593 return err; 4594 } 4595 4596 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev, 4597 unsigned long event, void *ptr, 4598 u16 vid) 4599 { 4600 struct net_device *dev; 4601 struct list_head *iter; 4602 int ret; 4603 4604 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4605 if (mlxsw_sp_port_dev_check(dev)) { 4606 ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr, 4607 vid); 4608 if (ret) 4609 return ret; 4610 } 4611 } 4612 4613 return 0; 4614 } 4615 4616 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 4617 unsigned long event, void *ptr) 4618 { 4619 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 4620 u16 vid = vlan_dev_vlan_id(vlan_dev); 4621 4622 if (mlxsw_sp_port_dev_check(real_dev)) 4623 return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr, 4624 vid); 4625 else if (netif_is_lag_master(real_dev)) 4626 return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr, 4627 vid); 4628 4629 return 0; 4630 } 4631 4632 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 4633 unsigned long event, void *ptr) 4634 { 4635 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 4636 int err = 0; 4637 4638 if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU) 4639 err = mlxsw_sp_netdevice_router_port_event(dev); 4640 else if (mlxsw_sp_port_dev_check(dev)) 4641 err = mlxsw_sp_netdevice_port_event(dev, event, ptr); 4642 else if (netif_is_lag_master(dev)) 4643 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 4644 else if (netif_is_bridge_master(dev)) 4645 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); 4646 else if (is_vlan_dev(dev)) 4647 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 4648 4649 return notifier_from_errno(err); 4650 } 4651 4652 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = { 4653 .notifier_call = mlxsw_sp_netdevice_event, 4654 }; 4655 4656 static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = { 4657 .notifier_call = mlxsw_sp_inetaddr_event, 4658 .priority = 10, /* Must be called before FIB notifier block */ 4659 }; 4660 4661 static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = { 4662 .notifier_call = mlxsw_sp_router_netevent_event, 4663 }; 4664 4665 static int __init mlxsw_sp_module_init(void) 4666 { 4667 int err; 4668 4669 register_netdevice_notifier(&mlxsw_sp_netdevice_nb); 4670 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 4671 register_netevent_notifier(&mlxsw_sp_router_netevent_nb); 4672 4673 err = mlxsw_core_driver_register(&mlxsw_sp_driver); 4674 if (err) 4675 goto err_core_driver_register; 4676 return 0; 4677 4678 err_core_driver_register: 4679 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb); 4680 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 4681 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); 4682 return err; 4683 } 4684 4685 static void __exit mlxsw_sp_module_exit(void) 4686 { 4687 mlxsw_core_driver_unregister(&mlxsw_sp_driver); 4688 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb); 4689 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 4690 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); 4691 } 4692 4693 module_init(mlxsw_sp_module_init); 4694 module_exit(mlxsw_sp_module_exit); 4695 4696 MODULE_LICENSE("Dual BSD/GPL"); 4697 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 4698 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 4699 MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM); 4700