1 /* 2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c 3 * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com> 5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> 6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <linux/kernel.h> 38 #include <linux/module.h> 39 #include <linux/types.h> 40 #include <linux/pci.h> 41 #include <linux/netdevice.h> 42 #include <linux/etherdevice.h> 43 #include <linux/ethtool.h> 44 #include <linux/slab.h> 45 #include <linux/device.h> 46 #include <linux/skbuff.h> 47 #include <linux/if_vlan.h> 48 #include <linux/if_bridge.h> 49 #include <linux/workqueue.h> 50 #include <linux/jiffies.h> 51 #include <linux/bitops.h> 52 #include <linux/list.h> 53 #include <linux/notifier.h> 54 #include <linux/dcbnl.h> 55 #include <linux/inetdevice.h> 56 #include <net/switchdev.h> 57 #include <net/pkt_cls.h> 58 #include <net/tc_act/tc_mirred.h> 59 #include <net/netevent.h> 60 #include <net/tc_act/tc_sample.h> 61 #include <net/addrconf.h> 62 63 #include "spectrum.h" 64 #include "pci.h" 65 #include "core.h" 66 #include "reg.h" 67 #include "port.h" 68 #include "trap.h" 69 #include "txheader.h" 70 #include "spectrum_cnt.h" 71 #include "spectrum_dpipe.h" 72 #include "../mlxfw/mlxfw.h" 73 74 #define MLXSW_FWREV_MAJOR 13 75 #define MLXSW_FWREV_MINOR 1420 76 #define MLXSW_FWREV_SUBMINOR 122 77 78 static const struct mlxsw_fw_rev mlxsw_sp_supported_fw_rev = { 79 .major = MLXSW_FWREV_MAJOR, 80 .minor = MLXSW_FWREV_MINOR, 81 .subminor = MLXSW_FWREV_SUBMINOR 82 }; 83 84 #define MLXSW_SP_FW_FILENAME \ 85 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_FWREV_MAJOR) \ 86 "." __stringify(MLXSW_FWREV_MINOR) \ 87 "." __stringify(MLXSW_FWREV_SUBMINOR) ".mfa2" 88 89 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum"; 90 static const char mlxsw_sp_driver_version[] = "1.0"; 91 92 /* tx_hdr_version 93 * Tx header version. 94 * Must be set to 1. 95 */ 96 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 97 98 /* tx_hdr_ctl 99 * Packet control type. 100 * 0 - Ethernet control (e.g. EMADs, LACP) 101 * 1 - Ethernet data 102 */ 103 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 104 105 /* tx_hdr_proto 106 * Packet protocol type. Must be set to 1 (Ethernet). 107 */ 108 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 109 110 /* tx_hdr_rx_is_router 111 * Packet is sent from the router. Valid for data packets only. 112 */ 113 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 114 115 /* tx_hdr_fid_valid 116 * Indicates if the 'fid' field is valid and should be used for 117 * forwarding lookup. Valid for data packets only. 118 */ 119 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 120 121 /* tx_hdr_swid 122 * Switch partition ID. Must be set to 0. 123 */ 124 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 125 126 /* tx_hdr_control_tclass 127 * Indicates if the packet should use the control TClass and not one 128 * of the data TClasses. 129 */ 130 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 131 132 /* tx_hdr_etclass 133 * Egress TClass to be used on the egress device on the egress port. 134 */ 135 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 136 137 /* tx_hdr_port_mid 138 * Destination local port for unicast packets. 139 * Destination multicast ID for multicast packets. 140 * 141 * Control packets are directed to a specific egress port, while data 142 * packets are transmitted through the CPU port (0) into the switch partition, 143 * where forwarding rules are applied. 144 */ 145 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 146 147 /* tx_hdr_fid 148 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 149 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 150 * Valid for data packets only. 151 */ 152 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 153 154 /* tx_hdr_type 155 * 0 - Data packets 156 * 6 - Control packets 157 */ 158 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 159 160 struct mlxsw_sp_mlxfw_dev { 161 struct mlxfw_dev mlxfw_dev; 162 struct mlxsw_sp *mlxsw_sp; 163 }; 164 165 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev, 166 u16 component_index, u32 *p_max_size, 167 u8 *p_align_bits, u16 *p_max_write_size) 168 { 169 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 170 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 171 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 172 char mcqi_pl[MLXSW_REG_MCQI_LEN]; 173 int err; 174 175 mlxsw_reg_mcqi_pack(mcqi_pl, component_index); 176 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl); 177 if (err) 178 return err; 179 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, 180 p_max_write_size); 181 182 *p_align_bits = max_t(u8, *p_align_bits, 2); 183 *p_max_write_size = min_t(u16, *p_max_write_size, 184 MLXSW_REG_MCDA_MAX_DATA_LEN); 185 return 0; 186 } 187 188 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle) 189 { 190 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 191 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 192 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 193 char mcc_pl[MLXSW_REG_MCC_LEN]; 194 u8 control_state; 195 int err; 196 197 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0); 198 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 199 if (err) 200 return err; 201 202 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state); 203 if (control_state != MLXFW_FSM_STATE_IDLE) 204 return -EBUSY; 205 206 mlxsw_reg_mcc_pack(mcc_pl, 207 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 208 0, *fwhandle, 0); 209 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 210 } 211 212 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev, 213 u32 fwhandle, u16 component_index, 214 u32 component_size) 215 { 216 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 217 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 218 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 219 char mcc_pl[MLXSW_REG_MCC_LEN]; 220 221 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT, 222 component_index, fwhandle, component_size); 223 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 224 } 225 226 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev, 227 u32 fwhandle, u8 *data, u16 size, 228 u32 offset) 229 { 230 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 231 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 232 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 233 char mcda_pl[MLXSW_REG_MCDA_LEN]; 234 235 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data); 236 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl); 237 } 238 239 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, 240 u32 fwhandle, u16 component_index) 241 { 242 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 243 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 244 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 245 char mcc_pl[MLXSW_REG_MCC_LEN]; 246 247 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT, 248 component_index, fwhandle, 0); 249 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 250 } 251 252 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 253 { 254 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 255 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 256 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 257 char mcc_pl[MLXSW_REG_MCC_LEN]; 258 259 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, 260 fwhandle, 0); 261 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 262 } 263 264 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 265 enum mlxfw_fsm_state *fsm_state, 266 enum mlxfw_fsm_state_err *fsm_state_err) 267 { 268 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 269 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 270 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 271 char mcc_pl[MLXSW_REG_MCC_LEN]; 272 u8 control_state; 273 u8 error_code; 274 int err; 275 276 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0); 277 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 278 if (err) 279 return err; 280 281 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state); 282 *fsm_state = control_state; 283 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, 284 MLXFW_FSM_STATE_ERR_MAX); 285 return 0; 286 } 287 288 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 289 { 290 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 291 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 292 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 293 char mcc_pl[MLXSW_REG_MCC_LEN]; 294 295 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, 296 fwhandle, 0); 297 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 298 } 299 300 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 301 { 302 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 303 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 304 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 305 char mcc_pl[MLXSW_REG_MCC_LEN]; 306 307 mlxsw_reg_mcc_pack(mcc_pl, 308 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, 309 fwhandle, 0); 310 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 311 } 312 313 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = { 314 .component_query = mlxsw_sp_component_query, 315 .fsm_lock = mlxsw_sp_fsm_lock, 316 .fsm_component_update = mlxsw_sp_fsm_component_update, 317 .fsm_block_download = mlxsw_sp_fsm_block_download, 318 .fsm_component_verify = mlxsw_sp_fsm_component_verify, 319 .fsm_activate = mlxsw_sp_fsm_activate, 320 .fsm_query_state = mlxsw_sp_fsm_query_state, 321 .fsm_cancel = mlxsw_sp_fsm_cancel, 322 .fsm_release = mlxsw_sp_fsm_release 323 }; 324 325 static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp, 326 const struct firmware *firmware) 327 { 328 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = { 329 .mlxfw_dev = { 330 .ops = &mlxsw_sp_mlxfw_dev_ops, 331 .psid = mlxsw_sp->bus_info->psid, 332 .psid_size = strlen(mlxsw_sp->bus_info->psid), 333 }, 334 .mlxsw_sp = mlxsw_sp 335 }; 336 337 return mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware); 338 } 339 340 static bool mlxsw_sp_fw_rev_ge(const struct mlxsw_fw_rev *a, 341 const struct mlxsw_fw_rev *b) 342 { 343 if (a->major != b->major) 344 return a->major > b->major; 345 if (a->minor != b->minor) 346 return a->minor > b->minor; 347 return a->subminor >= b->subminor; 348 } 349 350 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) 351 { 352 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev; 353 const struct firmware *firmware; 354 int err; 355 356 if (mlxsw_sp_fw_rev_ge(rev, &mlxsw_sp_supported_fw_rev)) 357 return 0; 358 359 dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d out of data\n", 360 rev->major, rev->minor, rev->subminor); 361 dev_info(mlxsw_sp->bus_info->dev, "Upgrading firmware using file %s\n", 362 MLXSW_SP_FW_FILENAME); 363 364 err = request_firmware_direct(&firmware, MLXSW_SP_FW_FILENAME, 365 mlxsw_sp->bus_info->dev); 366 if (err) { 367 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n", 368 MLXSW_SP_FW_FILENAME); 369 return err; 370 } 371 372 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware); 373 release_firmware(firmware); 374 return err; 375 } 376 377 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 378 unsigned int counter_index, u64 *packets, 379 u64 *bytes) 380 { 381 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 382 int err; 383 384 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 385 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 386 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 387 if (err) 388 return err; 389 if (packets) 390 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 391 if (bytes) 392 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 393 return 0; 394 } 395 396 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 397 unsigned int counter_index) 398 { 399 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 400 401 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 402 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 403 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 404 } 405 406 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 407 unsigned int *p_counter_index) 408 { 409 int err; 410 411 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 412 p_counter_index); 413 if (err) 414 return err; 415 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 416 if (err) 417 goto err_counter_clear; 418 return 0; 419 420 err_counter_clear: 421 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 422 *p_counter_index); 423 return err; 424 } 425 426 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 427 unsigned int counter_index) 428 { 429 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 430 counter_index); 431 } 432 433 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 434 const struct mlxsw_tx_info *tx_info) 435 { 436 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 437 438 memset(txhdr, 0, MLXSW_TXHDR_LEN); 439 440 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 441 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 442 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 443 mlxsw_tx_hdr_swid_set(txhdr, 0); 444 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 445 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 446 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 447 } 448 449 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 450 u8 state) 451 { 452 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 453 enum mlxsw_reg_spms_state spms_state; 454 char *spms_pl; 455 int err; 456 457 switch (state) { 458 case BR_STATE_FORWARDING: 459 spms_state = MLXSW_REG_SPMS_STATE_FORWARDING; 460 break; 461 case BR_STATE_LEARNING: 462 spms_state = MLXSW_REG_SPMS_STATE_LEARNING; 463 break; 464 case BR_STATE_LISTENING: /* fall-through */ 465 case BR_STATE_DISABLED: /* fall-through */ 466 case BR_STATE_BLOCKING: 467 spms_state = MLXSW_REG_SPMS_STATE_DISCARDING; 468 break; 469 default: 470 BUG(); 471 } 472 473 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 474 if (!spms_pl) 475 return -ENOMEM; 476 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 477 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 478 479 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 480 kfree(spms_pl); 481 return err; 482 } 483 484 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 485 { 486 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 487 int err; 488 489 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 490 if (err) 491 return err; 492 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 493 return 0; 494 } 495 496 static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp) 497 { 498 int i; 499 500 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN)) 501 return -EIO; 502 503 mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, 504 MAX_SPAN); 505 mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count, 506 sizeof(struct mlxsw_sp_span_entry), 507 GFP_KERNEL); 508 if (!mlxsw_sp->span.entries) 509 return -ENOMEM; 510 511 for (i = 0; i < mlxsw_sp->span.entries_count; i++) 512 INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list); 513 514 return 0; 515 } 516 517 static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp) 518 { 519 int i; 520 521 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 522 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 523 524 WARN_ON_ONCE(!list_empty(&curr->bound_ports_list)); 525 } 526 kfree(mlxsw_sp->span.entries); 527 } 528 529 static struct mlxsw_sp_span_entry * 530 mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port) 531 { 532 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 533 struct mlxsw_sp_span_entry *span_entry; 534 char mpat_pl[MLXSW_REG_MPAT_LEN]; 535 u8 local_port = port->local_port; 536 int index; 537 int i; 538 int err; 539 540 /* find a free entry to use */ 541 index = -1; 542 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 543 if (!mlxsw_sp->span.entries[i].used) { 544 index = i; 545 span_entry = &mlxsw_sp->span.entries[i]; 546 break; 547 } 548 } 549 if (index < 0) 550 return NULL; 551 552 /* create a new port analayzer entry for local_port */ 553 mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true); 554 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 555 if (err) 556 return NULL; 557 558 span_entry->used = true; 559 span_entry->id = index; 560 span_entry->ref_count = 1; 561 span_entry->local_port = local_port; 562 return span_entry; 563 } 564 565 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp, 566 struct mlxsw_sp_span_entry *span_entry) 567 { 568 u8 local_port = span_entry->local_port; 569 char mpat_pl[MLXSW_REG_MPAT_LEN]; 570 int pa_id = span_entry->id; 571 572 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false); 573 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 574 span_entry->used = false; 575 } 576 577 static struct mlxsw_sp_span_entry * 578 mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port) 579 { 580 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 581 int i; 582 583 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 584 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 585 586 if (curr->used && curr->local_port == port->local_port) 587 return curr; 588 } 589 return NULL; 590 } 591 592 static struct mlxsw_sp_span_entry 593 *mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port) 594 { 595 struct mlxsw_sp_span_entry *span_entry; 596 597 span_entry = mlxsw_sp_span_entry_find(port); 598 if (span_entry) { 599 /* Already exists, just take a reference */ 600 span_entry->ref_count++; 601 return span_entry; 602 } 603 604 return mlxsw_sp_span_entry_create(port); 605 } 606 607 static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, 608 struct mlxsw_sp_span_entry *span_entry) 609 { 610 WARN_ON(!span_entry->ref_count); 611 if (--span_entry->ref_count == 0) 612 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry); 613 return 0; 614 } 615 616 static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port) 617 { 618 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 619 struct mlxsw_sp_span_inspected_port *p; 620 int i; 621 622 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 623 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 624 625 list_for_each_entry(p, &curr->bound_ports_list, list) 626 if (p->local_port == port->local_port && 627 p->type == MLXSW_SP_SPAN_EGRESS) 628 return true; 629 } 630 631 return false; 632 } 633 634 static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp, 635 int mtu) 636 { 637 return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1; 638 } 639 640 static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu) 641 { 642 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 643 char sbib_pl[MLXSW_REG_SBIB_LEN]; 644 int err; 645 646 /* If port is egress mirrored, the shared buffer size should be 647 * updated according to the mtu value 648 */ 649 if (mlxsw_sp_span_is_egress_mirror(port)) { 650 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu); 651 652 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize); 653 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 654 if (err) { 655 netdev_err(port->dev, "Could not update shared buffer for mirroring\n"); 656 return err; 657 } 658 } 659 660 return 0; 661 } 662 663 static struct mlxsw_sp_span_inspected_port * 664 mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port, 665 struct mlxsw_sp_span_entry *span_entry) 666 { 667 struct mlxsw_sp_span_inspected_port *p; 668 669 list_for_each_entry(p, &span_entry->bound_ports_list, list) 670 if (port->local_port == p->local_port) 671 return p; 672 return NULL; 673 } 674 675 static int 676 mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port, 677 struct mlxsw_sp_span_entry *span_entry, 678 enum mlxsw_sp_span_type type) 679 { 680 struct mlxsw_sp_span_inspected_port *inspected_port; 681 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 682 char mpar_pl[MLXSW_REG_MPAR_LEN]; 683 char sbib_pl[MLXSW_REG_SBIB_LEN]; 684 int pa_id = span_entry->id; 685 int err; 686 687 /* if it is an egress SPAN, bind a shared buffer to it */ 688 if (type == MLXSW_SP_SPAN_EGRESS) { 689 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, 690 port->dev->mtu); 691 692 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize); 693 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 694 if (err) { 695 netdev_err(port->dev, "Could not create shared buffer for mirroring\n"); 696 return err; 697 } 698 } 699 700 /* bind the port to the SPAN entry */ 701 mlxsw_reg_mpar_pack(mpar_pl, port->local_port, 702 (enum mlxsw_reg_mpar_i_e) type, true, pa_id); 703 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); 704 if (err) 705 goto err_mpar_reg_write; 706 707 inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL); 708 if (!inspected_port) { 709 err = -ENOMEM; 710 goto err_inspected_port_alloc; 711 } 712 inspected_port->local_port = port->local_port; 713 inspected_port->type = type; 714 list_add_tail(&inspected_port->list, &span_entry->bound_ports_list); 715 716 return 0; 717 718 err_mpar_reg_write: 719 err_inspected_port_alloc: 720 if (type == MLXSW_SP_SPAN_EGRESS) { 721 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); 722 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 723 } 724 return err; 725 } 726 727 static void 728 mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port, 729 struct mlxsw_sp_span_entry *span_entry, 730 enum mlxsw_sp_span_type type) 731 { 732 struct mlxsw_sp_span_inspected_port *inspected_port; 733 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 734 char mpar_pl[MLXSW_REG_MPAR_LEN]; 735 char sbib_pl[MLXSW_REG_SBIB_LEN]; 736 int pa_id = span_entry->id; 737 738 inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry); 739 if (!inspected_port) 740 return; 741 742 /* remove the inspected port */ 743 mlxsw_reg_mpar_pack(mpar_pl, port->local_port, 744 (enum mlxsw_reg_mpar_i_e) type, false, pa_id); 745 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); 746 747 /* remove the SBIB buffer if it was egress SPAN */ 748 if (type == MLXSW_SP_SPAN_EGRESS) { 749 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); 750 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 751 } 752 753 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); 754 755 list_del(&inspected_port->list); 756 kfree(inspected_port); 757 } 758 759 static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from, 760 struct mlxsw_sp_port *to, 761 enum mlxsw_sp_span_type type) 762 { 763 struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp; 764 struct mlxsw_sp_span_entry *span_entry; 765 int err; 766 767 span_entry = mlxsw_sp_span_entry_get(to); 768 if (!span_entry) 769 return -ENOENT; 770 771 netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n", 772 span_entry->id); 773 774 err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type); 775 if (err) 776 goto err_port_bind; 777 778 return 0; 779 780 err_port_bind: 781 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); 782 return err; 783 } 784 785 static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from, 786 struct mlxsw_sp_port *to, 787 enum mlxsw_sp_span_type type) 788 { 789 struct mlxsw_sp_span_entry *span_entry; 790 791 span_entry = mlxsw_sp_span_entry_find(to); 792 if (!span_entry) { 793 netdev_err(from->dev, "no span entry found\n"); 794 return; 795 } 796 797 netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n", 798 span_entry->id); 799 mlxsw_sp_span_inspected_port_unbind(from, span_entry, type); 800 } 801 802 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port, 803 bool enable, u32 rate) 804 { 805 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 806 char mpsc_pl[MLXSW_REG_MPSC_LEN]; 807 808 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate); 809 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl); 810 } 811 812 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 813 bool is_up) 814 { 815 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 816 char paos_pl[MLXSW_REG_PAOS_LEN]; 817 818 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 819 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 820 MLXSW_PORT_ADMIN_STATUS_DOWN); 821 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 822 } 823 824 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 825 unsigned char *addr) 826 { 827 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 828 char ppad_pl[MLXSW_REG_PPAD_LEN]; 829 830 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 831 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 832 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 833 } 834 835 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 836 { 837 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 838 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 839 840 ether_addr_copy(addr, mlxsw_sp->base_mac); 841 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 842 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 843 } 844 845 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 846 { 847 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 848 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 849 int max_mtu; 850 int err; 851 852 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 853 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 854 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 855 if (err) 856 return err; 857 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 858 859 if (mtu > max_mtu) 860 return -EINVAL; 861 862 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 863 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 864 } 865 866 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 867 { 868 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 869 char pspa_pl[MLXSW_REG_PSPA_LEN]; 870 871 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 872 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 873 } 874 875 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 876 { 877 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 878 char svpe_pl[MLXSW_REG_SVPE_LEN]; 879 880 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 881 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 882 } 883 884 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 885 bool learn_enable) 886 { 887 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 888 char *spvmlr_pl; 889 int err; 890 891 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 892 if (!spvmlr_pl) 893 return -ENOMEM; 894 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 895 learn_enable); 896 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 897 kfree(spvmlr_pl); 898 return err; 899 } 900 901 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 902 u16 vid) 903 { 904 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 905 char spvid_pl[MLXSW_REG_SPVID_LEN]; 906 907 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid); 908 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 909 } 910 911 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 912 bool allow) 913 { 914 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 915 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 916 917 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 918 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 919 } 920 921 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 922 { 923 int err; 924 925 if (!vid) { 926 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 927 if (err) 928 return err; 929 } else { 930 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); 931 if (err) 932 return err; 933 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 934 if (err) 935 goto err_port_allow_untagged_set; 936 } 937 938 mlxsw_sp_port->pvid = vid; 939 return 0; 940 941 err_port_allow_untagged_set: 942 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid); 943 return err; 944 } 945 946 static int 947 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 948 { 949 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 950 char sspr_pl[MLXSW_REG_SSPR_LEN]; 951 952 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 953 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 954 } 955 956 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, 957 u8 local_port, u8 *p_module, 958 u8 *p_width, u8 *p_lane) 959 { 960 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 961 int err; 962 963 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 964 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 965 if (err) 966 return err; 967 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 968 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl); 969 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 970 return 0; 971 } 972 973 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port, 974 u8 module, u8 width, u8 lane) 975 { 976 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 977 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 978 int i; 979 980 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 981 mlxsw_reg_pmlp_width_set(pmlp_pl, width); 982 for (i = 0; i < width; i++) { 983 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module); 984 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */ 985 } 986 987 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 988 } 989 990 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port) 991 { 992 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 993 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 994 995 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 996 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 997 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 998 } 999 1000 static int mlxsw_sp_port_open(struct net_device *dev) 1001 { 1002 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1003 int err; 1004 1005 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1006 if (err) 1007 return err; 1008 netif_start_queue(dev); 1009 return 0; 1010 } 1011 1012 static int mlxsw_sp_port_stop(struct net_device *dev) 1013 { 1014 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1015 1016 netif_stop_queue(dev); 1017 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1018 } 1019 1020 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 1021 struct net_device *dev) 1022 { 1023 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1024 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1025 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 1026 const struct mlxsw_tx_info tx_info = { 1027 .local_port = mlxsw_sp_port->local_port, 1028 .is_emad = false, 1029 }; 1030 u64 len; 1031 int err; 1032 1033 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 1034 return NETDEV_TX_BUSY; 1035 1036 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { 1037 struct sk_buff *skb_orig = skb; 1038 1039 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); 1040 if (!skb) { 1041 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 1042 dev_kfree_skb_any(skb_orig); 1043 return NETDEV_TX_OK; 1044 } 1045 dev_consume_skb_any(skb_orig); 1046 } 1047 1048 if (eth_skb_pad(skb)) { 1049 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 1050 return NETDEV_TX_OK; 1051 } 1052 1053 mlxsw_sp_txhdr_construct(skb, &tx_info); 1054 /* TX header is consumed by HW on the way so we shouldn't count its 1055 * bytes as being sent. 1056 */ 1057 len = skb->len - MLXSW_TXHDR_LEN; 1058 1059 /* Due to a race we might fail here because of a full queue. In that 1060 * unlikely case we simply drop the packet. 1061 */ 1062 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 1063 1064 if (!err) { 1065 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 1066 u64_stats_update_begin(&pcpu_stats->syncp); 1067 pcpu_stats->tx_packets++; 1068 pcpu_stats->tx_bytes += len; 1069 u64_stats_update_end(&pcpu_stats->syncp); 1070 } else { 1071 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 1072 dev_kfree_skb_any(skb); 1073 } 1074 return NETDEV_TX_OK; 1075 } 1076 1077 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 1078 { 1079 } 1080 1081 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 1082 { 1083 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1084 struct sockaddr *addr = p; 1085 int err; 1086 1087 if (!is_valid_ether_addr(addr->sa_data)) 1088 return -EADDRNOTAVAIL; 1089 1090 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 1091 if (err) 1092 return err; 1093 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 1094 return 0; 1095 } 1096 1097 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, 1098 int mtu) 1099 { 1100 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu); 1101 } 1102 1103 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ 1104 1105 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 1106 u16 delay) 1107 { 1108 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay, 1109 BITS_PER_BYTE)); 1110 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp, 1111 mtu); 1112 } 1113 1114 /* Maximum delay buffer needed in case of PAUSE frames, in bytes. 1115 * Assumes 100m cable and maximum MTU. 1116 */ 1117 #define MLXSW_SP_PAUSE_DELAY 58752 1118 1119 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 1120 u16 delay, bool pfc, bool pause) 1121 { 1122 if (pfc) 1123 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay); 1124 else if (pause) 1125 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY); 1126 else 1127 return 0; 1128 } 1129 1130 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, 1131 bool lossy) 1132 { 1133 if (lossy) 1134 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size); 1135 else 1136 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size, 1137 thres); 1138 } 1139 1140 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, 1141 u8 *prio_tc, bool pause_en, 1142 struct ieee_pfc *my_pfc) 1143 { 1144 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1145 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; 1146 u16 delay = !!my_pfc ? my_pfc->delay : 0; 1147 char pbmc_pl[MLXSW_REG_PBMC_LEN]; 1148 int i, j, err; 1149 1150 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); 1151 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 1152 if (err) 1153 return err; 1154 1155 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1156 bool configure = false; 1157 bool pfc = false; 1158 bool lossy; 1159 u16 thres; 1160 1161 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { 1162 if (prio_tc[j] == i) { 1163 pfc = pfc_en & BIT(j); 1164 configure = true; 1165 break; 1166 } 1167 } 1168 1169 if (!configure) 1170 continue; 1171 1172 lossy = !(pfc || pause_en); 1173 thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); 1174 delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc, 1175 pause_en); 1176 mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy); 1177 } 1178 1179 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 1180 } 1181 1182 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, 1183 int mtu, bool pause_en) 1184 { 1185 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; 1186 bool dcb_en = !!mlxsw_sp_port->dcb.ets; 1187 struct ieee_pfc *my_pfc; 1188 u8 *prio_tc; 1189 1190 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; 1191 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; 1192 1193 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, 1194 pause_en, my_pfc); 1195 } 1196 1197 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 1198 { 1199 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1200 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 1201 int err; 1202 1203 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); 1204 if (err) 1205 return err; 1206 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu); 1207 if (err) 1208 goto err_span_port_mtu_update; 1209 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 1210 if (err) 1211 goto err_port_mtu_set; 1212 dev->mtu = mtu; 1213 return 0; 1214 1215 err_port_mtu_set: 1216 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu); 1217 err_span_port_mtu_update: 1218 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1219 return err; 1220 } 1221 1222 static int 1223 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 1224 struct rtnl_link_stats64 *stats) 1225 { 1226 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1227 struct mlxsw_sp_port_pcpu_stats *p; 1228 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1229 u32 tx_dropped = 0; 1230 unsigned int start; 1231 int i; 1232 1233 for_each_possible_cpu(i) { 1234 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 1235 do { 1236 start = u64_stats_fetch_begin_irq(&p->syncp); 1237 rx_packets = p->rx_packets; 1238 rx_bytes = p->rx_bytes; 1239 tx_packets = p->tx_packets; 1240 tx_bytes = p->tx_bytes; 1241 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 1242 1243 stats->rx_packets += rx_packets; 1244 stats->rx_bytes += rx_bytes; 1245 stats->tx_packets += tx_packets; 1246 stats->tx_bytes += tx_bytes; 1247 /* tx_dropped is u32, updated without syncp protection. */ 1248 tx_dropped += p->tx_dropped; 1249 } 1250 stats->tx_dropped = tx_dropped; 1251 return 0; 1252 } 1253 1254 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 1255 { 1256 switch (attr_id) { 1257 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1258 return true; 1259 } 1260 1261 return false; 1262 } 1263 1264 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 1265 void *sp) 1266 { 1267 switch (attr_id) { 1268 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1269 return mlxsw_sp_port_get_sw_stats64(dev, sp); 1270 } 1271 1272 return -EINVAL; 1273 } 1274 1275 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 1276 int prio, char *ppcnt_pl) 1277 { 1278 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1279 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1280 1281 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 1282 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 1283 } 1284 1285 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 1286 struct rtnl_link_stats64 *stats) 1287 { 1288 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1289 int err; 1290 1291 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 1292 0, ppcnt_pl); 1293 if (err) 1294 goto out; 1295 1296 stats->tx_packets = 1297 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 1298 stats->rx_packets = 1299 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 1300 stats->tx_bytes = 1301 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 1302 stats->rx_bytes = 1303 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 1304 stats->multicast = 1305 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 1306 1307 stats->rx_crc_errors = 1308 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 1309 stats->rx_frame_errors = 1310 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 1311 1312 stats->rx_length_errors = ( 1313 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 1314 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 1315 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 1316 1317 stats->rx_errors = (stats->rx_crc_errors + 1318 stats->rx_frame_errors + stats->rx_length_errors); 1319 1320 out: 1321 return err; 1322 } 1323 1324 static void update_stats_cache(struct work_struct *work) 1325 { 1326 struct mlxsw_sp_port *mlxsw_sp_port = 1327 container_of(work, struct mlxsw_sp_port, 1328 hw_stats.update_dw.work); 1329 1330 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 1331 goto out; 1332 1333 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 1334 mlxsw_sp_port->hw_stats.cache); 1335 1336 out: 1337 mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, 1338 MLXSW_HW_STATS_UPDATE_TIME); 1339 } 1340 1341 /* Return the stats from a cache that is updated periodically, 1342 * as this function might get called in an atomic context. 1343 */ 1344 static void 1345 mlxsw_sp_port_get_stats64(struct net_device *dev, 1346 struct rtnl_link_stats64 *stats) 1347 { 1348 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1349 1350 memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats)); 1351 } 1352 1353 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 1354 u16 vid_begin, u16 vid_end, 1355 bool is_member, bool untagged) 1356 { 1357 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1358 char *spvm_pl; 1359 int err; 1360 1361 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 1362 if (!spvm_pl) 1363 return -ENOMEM; 1364 1365 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 1366 vid_end, is_member, untagged); 1367 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 1368 kfree(spvm_pl); 1369 return err; 1370 } 1371 1372 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 1373 u16 vid_end, bool is_member, bool untagged) 1374 { 1375 u16 vid, vid_e; 1376 int err; 1377 1378 for (vid = vid_begin; vid <= vid_end; 1379 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 1380 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 1381 vid_end); 1382 1383 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 1384 is_member, untagged); 1385 if (err) 1386 return err; 1387 } 1388 1389 return 0; 1390 } 1391 1392 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port) 1393 { 1394 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 1395 1396 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 1397 &mlxsw_sp_port->vlans_list, list) 1398 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); 1399 } 1400 1401 static struct mlxsw_sp_port_vlan * 1402 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1403 { 1404 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1405 bool untagged = vid == 1; 1406 int err; 1407 1408 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 1409 if (err) 1410 return ERR_PTR(err); 1411 1412 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 1413 if (!mlxsw_sp_port_vlan) { 1414 err = -ENOMEM; 1415 goto err_port_vlan_alloc; 1416 } 1417 1418 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1419 mlxsw_sp_port_vlan->vid = vid; 1420 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1421 1422 return mlxsw_sp_port_vlan; 1423 1424 err_port_vlan_alloc: 1425 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1426 return ERR_PTR(err); 1427 } 1428 1429 static void 1430 mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1431 { 1432 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1433 u16 vid = mlxsw_sp_port_vlan->vid; 1434 1435 list_del(&mlxsw_sp_port_vlan->list); 1436 kfree(mlxsw_sp_port_vlan); 1437 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1438 } 1439 1440 struct mlxsw_sp_port_vlan * 1441 mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1442 { 1443 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1444 1445 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1446 if (mlxsw_sp_port_vlan) 1447 return mlxsw_sp_port_vlan; 1448 1449 return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid); 1450 } 1451 1452 void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1453 { 1454 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; 1455 1456 if (mlxsw_sp_port_vlan->bridge_port) 1457 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1458 else if (fid) 1459 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 1460 1461 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1462 } 1463 1464 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1465 __be16 __always_unused proto, u16 vid) 1466 { 1467 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1468 1469 /* VLAN 0 is added to HW filter when device goes up, but it is 1470 * reserved in our case, so simply return. 1471 */ 1472 if (!vid) 1473 return 0; 1474 1475 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid)); 1476 } 1477 1478 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1479 __be16 __always_unused proto, u16 vid) 1480 { 1481 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1482 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1483 1484 /* VLAN 0 is removed from HW filter when device goes down, but 1485 * it is reserved in our case, so simply return. 1486 */ 1487 if (!vid) 1488 return 0; 1489 1490 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1491 if (!mlxsw_sp_port_vlan) 1492 return 0; 1493 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); 1494 1495 return 0; 1496 } 1497 1498 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name, 1499 size_t len) 1500 { 1501 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1502 u8 module = mlxsw_sp_port->mapping.module; 1503 u8 width = mlxsw_sp_port->mapping.width; 1504 u8 lane = mlxsw_sp_port->mapping.lane; 1505 int err; 1506 1507 if (!mlxsw_sp_port->split) 1508 err = snprintf(name, len, "p%d", module + 1); 1509 else 1510 err = snprintf(name, len, "p%ds%d", module + 1, 1511 lane / width); 1512 1513 if (err >= len) 1514 return -EINVAL; 1515 1516 return 0; 1517 } 1518 1519 static struct mlxsw_sp_port_mall_tc_entry * 1520 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port, 1521 unsigned long cookie) { 1522 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1523 1524 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list) 1525 if (mall_tc_entry->cookie == cookie) 1526 return mall_tc_entry; 1527 1528 return NULL; 1529 } 1530 1531 static int 1532 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1533 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror, 1534 const struct tc_action *a, 1535 bool ingress) 1536 { 1537 struct net *net = dev_net(mlxsw_sp_port->dev); 1538 enum mlxsw_sp_span_type span_type; 1539 struct mlxsw_sp_port *to_port; 1540 struct net_device *to_dev; 1541 int ifindex; 1542 1543 ifindex = tcf_mirred_ifindex(a); 1544 to_dev = __dev_get_by_index(net, ifindex); 1545 if (!to_dev) { 1546 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); 1547 return -EINVAL; 1548 } 1549 1550 if (!mlxsw_sp_port_dev_check(to_dev)) { 1551 netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port"); 1552 return -EOPNOTSUPP; 1553 } 1554 to_port = netdev_priv(to_dev); 1555 1556 mirror->to_local_port = to_port->local_port; 1557 mirror->ingress = ingress; 1558 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1559 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type); 1560 } 1561 1562 static void 1563 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1564 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror) 1565 { 1566 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1567 enum mlxsw_sp_span_type span_type; 1568 struct mlxsw_sp_port *to_port; 1569 1570 to_port = mlxsw_sp->ports[mirror->to_local_port]; 1571 span_type = mirror->ingress ? 1572 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1573 mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type); 1574 } 1575 1576 static int 1577 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port, 1578 struct tc_cls_matchall_offload *cls, 1579 const struct tc_action *a, 1580 bool ingress) 1581 { 1582 int err; 1583 1584 if (!mlxsw_sp_port->sample) 1585 return -EOPNOTSUPP; 1586 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) { 1587 netdev_err(mlxsw_sp_port->dev, "sample already active\n"); 1588 return -EEXIST; 1589 } 1590 if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) { 1591 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n"); 1592 return -EOPNOTSUPP; 1593 } 1594 1595 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group, 1596 tcf_sample_psample_group(a)); 1597 mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a); 1598 mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a); 1599 mlxsw_sp_port->sample->rate = tcf_sample_rate(a); 1600 1601 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a)); 1602 if (err) 1603 goto err_port_sample_set; 1604 return 0; 1605 1606 err_port_sample_set: 1607 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1608 return err; 1609 } 1610 1611 static void 1612 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port) 1613 { 1614 if (!mlxsw_sp_port->sample) 1615 return; 1616 1617 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1); 1618 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1619 } 1620 1621 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1622 struct tc_cls_matchall_offload *f, 1623 bool ingress) 1624 { 1625 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1626 __be16 protocol = f->common.protocol; 1627 const struct tc_action *a; 1628 LIST_HEAD(actions); 1629 int err; 1630 1631 if (!tcf_exts_has_one_action(f->exts)) { 1632 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); 1633 return -EOPNOTSUPP; 1634 } 1635 1636 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1637 if (!mall_tc_entry) 1638 return -ENOMEM; 1639 mall_tc_entry->cookie = f->cookie; 1640 1641 tcf_exts_to_list(f->exts, &actions); 1642 a = list_first_entry(&actions, struct tc_action, list); 1643 1644 if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { 1645 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; 1646 1647 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; 1648 mirror = &mall_tc_entry->mirror; 1649 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, 1650 mirror, a, ingress); 1651 } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) { 1652 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE; 1653 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f, 1654 a, ingress); 1655 } else { 1656 err = -EOPNOTSUPP; 1657 } 1658 1659 if (err) 1660 goto err_add_action; 1661 1662 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); 1663 return 0; 1664 1665 err_add_action: 1666 kfree(mall_tc_entry); 1667 return err; 1668 } 1669 1670 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1671 struct tc_cls_matchall_offload *f) 1672 { 1673 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1674 1675 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port, 1676 f->cookie); 1677 if (!mall_tc_entry) { 1678 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); 1679 return; 1680 } 1681 list_del(&mall_tc_entry->list); 1682 1683 switch (mall_tc_entry->type) { 1684 case MLXSW_SP_PORT_MALL_MIRROR: 1685 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port, 1686 &mall_tc_entry->mirror); 1687 break; 1688 case MLXSW_SP_PORT_MALL_SAMPLE: 1689 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port); 1690 break; 1691 default: 1692 WARN_ON(1); 1693 } 1694 1695 kfree(mall_tc_entry); 1696 } 1697 1698 static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1699 struct tc_cls_matchall_offload *f) 1700 { 1701 bool ingress; 1702 1703 if (is_classid_clsact_ingress(f->common.classid)) 1704 ingress = true; 1705 else if (is_classid_clsact_egress(f->common.classid)) 1706 ingress = false; 1707 else 1708 return -EOPNOTSUPP; 1709 1710 if (f->common.chain_index) 1711 return -EOPNOTSUPP; 1712 1713 switch (f->command) { 1714 case TC_CLSMATCHALL_REPLACE: 1715 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f, 1716 ingress); 1717 case TC_CLSMATCHALL_DESTROY: 1718 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f); 1719 return 0; 1720 default: 1721 return -EOPNOTSUPP; 1722 } 1723 } 1724 1725 static int 1726 mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_port *mlxsw_sp_port, 1727 struct tc_cls_flower_offload *f) 1728 { 1729 bool ingress; 1730 1731 if (is_classid_clsact_ingress(f->common.classid)) 1732 ingress = true; 1733 else if (is_classid_clsact_egress(f->common.classid)) 1734 ingress = false; 1735 else 1736 return -EOPNOTSUPP; 1737 1738 switch (f->command) { 1739 case TC_CLSFLOWER_REPLACE: 1740 return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress, f); 1741 case TC_CLSFLOWER_DESTROY: 1742 mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress, f); 1743 return 0; 1744 case TC_CLSFLOWER_STATS: 1745 return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress, f); 1746 default: 1747 return -EOPNOTSUPP; 1748 } 1749 } 1750 1751 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1752 void *type_data) 1753 { 1754 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1755 1756 switch (type) { 1757 case TC_SETUP_CLSMATCHALL: 1758 return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data); 1759 case TC_SETUP_CLSFLOWER: 1760 return mlxsw_sp_setup_tc_cls_flower(mlxsw_sp_port, type_data); 1761 default: 1762 return -EOPNOTSUPP; 1763 } 1764 } 1765 1766 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1767 .ndo_open = mlxsw_sp_port_open, 1768 .ndo_stop = mlxsw_sp_port_stop, 1769 .ndo_start_xmit = mlxsw_sp_port_xmit, 1770 .ndo_setup_tc = mlxsw_sp_setup_tc, 1771 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1772 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1773 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1774 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1775 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1776 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1777 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1778 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1779 .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name, 1780 }; 1781 1782 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 1783 struct ethtool_drvinfo *drvinfo) 1784 { 1785 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1786 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1787 1788 strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver)); 1789 strlcpy(drvinfo->version, mlxsw_sp_driver_version, 1790 sizeof(drvinfo->version)); 1791 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 1792 "%d.%d.%d", 1793 mlxsw_sp->bus_info->fw_rev.major, 1794 mlxsw_sp->bus_info->fw_rev.minor, 1795 mlxsw_sp->bus_info->fw_rev.subminor); 1796 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 1797 sizeof(drvinfo->bus_info)); 1798 } 1799 1800 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, 1801 struct ethtool_pauseparam *pause) 1802 { 1803 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1804 1805 pause->rx_pause = mlxsw_sp_port->link.rx_pause; 1806 pause->tx_pause = mlxsw_sp_port->link.tx_pause; 1807 } 1808 1809 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, 1810 struct ethtool_pauseparam *pause) 1811 { 1812 char pfcc_pl[MLXSW_REG_PFCC_LEN]; 1813 1814 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); 1815 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause); 1816 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause); 1817 1818 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), 1819 pfcc_pl); 1820 } 1821 1822 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, 1823 struct ethtool_pauseparam *pause) 1824 { 1825 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1826 bool pause_en = pause->tx_pause || pause->rx_pause; 1827 int err; 1828 1829 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { 1830 netdev_err(dev, "PFC already enabled on port\n"); 1831 return -EINVAL; 1832 } 1833 1834 if (pause->autoneg) { 1835 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); 1836 return -EINVAL; 1837 } 1838 1839 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1840 if (err) { 1841 netdev_err(dev, "Failed to configure port's headroom\n"); 1842 return err; 1843 } 1844 1845 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause); 1846 if (err) { 1847 netdev_err(dev, "Failed to set PAUSE parameters\n"); 1848 goto err_port_pause_configure; 1849 } 1850 1851 mlxsw_sp_port->link.rx_pause = pause->rx_pause; 1852 mlxsw_sp_port->link.tx_pause = pause->tx_pause; 1853 1854 return 0; 1855 1856 err_port_pause_configure: 1857 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 1858 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1859 return err; 1860 } 1861 1862 struct mlxsw_sp_port_hw_stats { 1863 char str[ETH_GSTRING_LEN]; 1864 u64 (*getter)(const char *payload); 1865 bool cells_bytes; 1866 }; 1867 1868 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 1869 { 1870 .str = "a_frames_transmitted_ok", 1871 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 1872 }, 1873 { 1874 .str = "a_frames_received_ok", 1875 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 1876 }, 1877 { 1878 .str = "a_frame_check_sequence_errors", 1879 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 1880 }, 1881 { 1882 .str = "a_alignment_errors", 1883 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 1884 }, 1885 { 1886 .str = "a_octets_transmitted_ok", 1887 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 1888 }, 1889 { 1890 .str = "a_octets_received_ok", 1891 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 1892 }, 1893 { 1894 .str = "a_multicast_frames_xmitted_ok", 1895 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 1896 }, 1897 { 1898 .str = "a_broadcast_frames_xmitted_ok", 1899 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 1900 }, 1901 { 1902 .str = "a_multicast_frames_received_ok", 1903 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 1904 }, 1905 { 1906 .str = "a_broadcast_frames_received_ok", 1907 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 1908 }, 1909 { 1910 .str = "a_in_range_length_errors", 1911 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 1912 }, 1913 { 1914 .str = "a_out_of_range_length_field", 1915 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 1916 }, 1917 { 1918 .str = "a_frame_too_long_errors", 1919 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 1920 }, 1921 { 1922 .str = "a_symbol_error_during_carrier", 1923 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 1924 }, 1925 { 1926 .str = "a_mac_control_frames_transmitted", 1927 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 1928 }, 1929 { 1930 .str = "a_mac_control_frames_received", 1931 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 1932 }, 1933 { 1934 .str = "a_unsupported_opcodes_received", 1935 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 1936 }, 1937 { 1938 .str = "a_pause_mac_ctrl_frames_received", 1939 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 1940 }, 1941 { 1942 .str = "a_pause_mac_ctrl_frames_xmitted", 1943 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 1944 }, 1945 }; 1946 1947 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 1948 1949 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { 1950 { 1951 .str = "rx_octets_prio", 1952 .getter = mlxsw_reg_ppcnt_rx_octets_get, 1953 }, 1954 { 1955 .str = "rx_frames_prio", 1956 .getter = mlxsw_reg_ppcnt_rx_frames_get, 1957 }, 1958 { 1959 .str = "tx_octets_prio", 1960 .getter = mlxsw_reg_ppcnt_tx_octets_get, 1961 }, 1962 { 1963 .str = "tx_frames_prio", 1964 .getter = mlxsw_reg_ppcnt_tx_frames_get, 1965 }, 1966 { 1967 .str = "rx_pause_prio", 1968 .getter = mlxsw_reg_ppcnt_rx_pause_get, 1969 }, 1970 { 1971 .str = "rx_pause_duration_prio", 1972 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get, 1973 }, 1974 { 1975 .str = "tx_pause_prio", 1976 .getter = mlxsw_reg_ppcnt_tx_pause_get, 1977 }, 1978 { 1979 .str = "tx_pause_duration_prio", 1980 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get, 1981 }, 1982 }; 1983 1984 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) 1985 1986 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { 1987 { 1988 .str = "tc_transmit_queue_tc", 1989 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get, 1990 .cells_bytes = true, 1991 }, 1992 { 1993 .str = "tc_no_buffer_discard_uc_tc", 1994 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get, 1995 }, 1996 }; 1997 1998 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) 1999 2000 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ 2001 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \ 2002 MLXSW_SP_PORT_HW_TC_STATS_LEN) * \ 2003 IEEE_8021QAZ_MAX_TCS) 2004 2005 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) 2006 { 2007 int i; 2008 2009 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { 2010 snprintf(*p, ETH_GSTRING_LEN, "%s_%d", 2011 mlxsw_sp_port_hw_prio_stats[i].str, prio); 2012 *p += ETH_GSTRING_LEN; 2013 } 2014 } 2015 2016 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) 2017 { 2018 int i; 2019 2020 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { 2021 snprintf(*p, ETH_GSTRING_LEN, "%s_%d", 2022 mlxsw_sp_port_hw_tc_stats[i].str, tc); 2023 *p += ETH_GSTRING_LEN; 2024 } 2025 } 2026 2027 static void mlxsw_sp_port_get_strings(struct net_device *dev, 2028 u32 stringset, u8 *data) 2029 { 2030 u8 *p = data; 2031 int i; 2032 2033 switch (stringset) { 2034 case ETH_SS_STATS: 2035 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 2036 memcpy(p, mlxsw_sp_port_hw_stats[i].str, 2037 ETH_GSTRING_LEN); 2038 p += ETH_GSTRING_LEN; 2039 } 2040 2041 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 2042 mlxsw_sp_port_get_prio_strings(&p, i); 2043 2044 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 2045 mlxsw_sp_port_get_tc_strings(&p, i); 2046 2047 break; 2048 } 2049 } 2050 2051 static int mlxsw_sp_port_set_phys_id(struct net_device *dev, 2052 enum ethtool_phys_id_state state) 2053 { 2054 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2055 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2056 char mlcr_pl[MLXSW_REG_MLCR_LEN]; 2057 bool active; 2058 2059 switch (state) { 2060 case ETHTOOL_ID_ACTIVE: 2061 active = true; 2062 break; 2063 case ETHTOOL_ID_INACTIVE: 2064 active = false; 2065 break; 2066 default: 2067 return -EOPNOTSUPP; 2068 } 2069 2070 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); 2071 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); 2072 } 2073 2074 static int 2075 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, 2076 int *p_len, enum mlxsw_reg_ppcnt_grp grp) 2077 { 2078 switch (grp) { 2079 case MLXSW_REG_PPCNT_IEEE_8023_CNT: 2080 *p_hw_stats = mlxsw_sp_port_hw_stats; 2081 *p_len = MLXSW_SP_PORT_HW_STATS_LEN; 2082 break; 2083 case MLXSW_REG_PPCNT_PRIO_CNT: 2084 *p_hw_stats = mlxsw_sp_port_hw_prio_stats; 2085 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2086 break; 2087 case MLXSW_REG_PPCNT_TC_CNT: 2088 *p_hw_stats = mlxsw_sp_port_hw_tc_stats; 2089 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN; 2090 break; 2091 default: 2092 WARN_ON(1); 2093 return -EOPNOTSUPP; 2094 } 2095 return 0; 2096 } 2097 2098 static void __mlxsw_sp_port_get_stats(struct net_device *dev, 2099 enum mlxsw_reg_ppcnt_grp grp, int prio, 2100 u64 *data, int data_index) 2101 { 2102 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2103 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2104 struct mlxsw_sp_port_hw_stats *hw_stats; 2105 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 2106 int i, len; 2107 int err; 2108 2109 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); 2110 if (err) 2111 return; 2112 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl); 2113 for (i = 0; i < len; i++) { 2114 data[data_index + i] = hw_stats[i].getter(ppcnt_pl); 2115 if (!hw_stats[i].cells_bytes) 2116 continue; 2117 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp, 2118 data[data_index + i]); 2119 } 2120 } 2121 2122 static void mlxsw_sp_port_get_stats(struct net_device *dev, 2123 struct ethtool_stats *stats, u64 *data) 2124 { 2125 int i, data_index = 0; 2126 2127 /* IEEE 802.3 Counters */ 2128 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0, 2129 data, data_index); 2130 data_index = MLXSW_SP_PORT_HW_STATS_LEN; 2131 2132 /* Per-Priority Counters */ 2133 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2134 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, 2135 data, data_index); 2136 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2137 } 2138 2139 /* Per-TC Counters */ 2140 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2141 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, 2142 data, data_index); 2143 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; 2144 } 2145 } 2146 2147 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 2148 { 2149 switch (sset) { 2150 case ETH_SS_STATS: 2151 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN; 2152 default: 2153 return -EOPNOTSUPP; 2154 } 2155 } 2156 2157 struct mlxsw_sp_port_link_mode { 2158 enum ethtool_link_mode_bit_indices mask_ethtool; 2159 u32 mask; 2160 u32 speed; 2161 }; 2162 2163 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = { 2164 { 2165 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 2166 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2167 .speed = SPEED_100, 2168 }, 2169 { 2170 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 2171 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 2172 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2173 .speed = SPEED_1000, 2174 }, 2175 { 2176 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 2177 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2178 .speed = SPEED_10000, 2179 }, 2180 { 2181 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 2182 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 2183 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 2184 .speed = SPEED_10000, 2185 }, 2186 { 2187 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2188 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2189 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2190 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 2191 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2192 .speed = SPEED_10000, 2193 }, 2194 { 2195 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 2196 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 2197 .speed = SPEED_20000, 2198 }, 2199 { 2200 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 2201 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2202 .speed = SPEED_40000, 2203 }, 2204 { 2205 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 2206 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2207 .speed = SPEED_40000, 2208 }, 2209 { 2210 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 2211 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2212 .speed = SPEED_40000, 2213 }, 2214 { 2215 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 2216 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2217 .speed = SPEED_40000, 2218 }, 2219 { 2220 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR, 2221 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2222 .speed = SPEED_25000, 2223 }, 2224 { 2225 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR, 2226 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2227 .speed = SPEED_25000, 2228 }, 2229 { 2230 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 2231 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2232 .speed = SPEED_25000, 2233 }, 2234 { 2235 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 2236 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2237 .speed = SPEED_25000, 2238 }, 2239 { 2240 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2, 2241 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2242 .speed = SPEED_50000, 2243 }, 2244 { 2245 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 2246 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2247 .speed = SPEED_50000, 2248 }, 2249 { 2250 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2, 2251 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2252 .speed = SPEED_50000, 2253 }, 2254 { 2255 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2256 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT, 2257 .speed = SPEED_56000, 2258 }, 2259 { 2260 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2261 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT, 2262 .speed = SPEED_56000, 2263 }, 2264 { 2265 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2266 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT, 2267 .speed = SPEED_56000, 2268 }, 2269 { 2270 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2271 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, 2272 .speed = SPEED_56000, 2273 }, 2274 { 2275 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4, 2276 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2277 .speed = SPEED_100000, 2278 }, 2279 { 2280 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4, 2281 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2282 .speed = SPEED_100000, 2283 }, 2284 { 2285 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4, 2286 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2287 .speed = SPEED_100000, 2288 }, 2289 { 2290 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 2291 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2292 .speed = SPEED_100000, 2293 }, 2294 }; 2295 2296 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode) 2297 2298 static void 2299 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto, 2300 struct ethtool_link_ksettings *cmd) 2301 { 2302 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2303 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2304 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2305 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2306 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2307 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2308 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 2309 2310 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2311 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2312 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2313 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 2314 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 2315 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 2316 } 2317 2318 static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode) 2319 { 2320 int i; 2321 2322 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2323 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) 2324 __set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool, 2325 mode); 2326 } 2327 } 2328 2329 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto, 2330 struct ethtool_link_ksettings *cmd) 2331 { 2332 u32 speed = SPEED_UNKNOWN; 2333 u8 duplex = DUPLEX_UNKNOWN; 2334 int i; 2335 2336 if (!carrier_ok) 2337 goto out; 2338 2339 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2340 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) { 2341 speed = mlxsw_sp_port_link_mode[i].speed; 2342 duplex = DUPLEX_FULL; 2343 break; 2344 } 2345 } 2346 out: 2347 cmd->base.speed = speed; 2348 cmd->base.duplex = duplex; 2349 } 2350 2351 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto) 2352 { 2353 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2354 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2355 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2356 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2357 return PORT_FIBRE; 2358 2359 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2360 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2361 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4)) 2362 return PORT_DA; 2363 2364 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2365 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2366 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2367 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4)) 2368 return PORT_NONE; 2369 2370 return PORT_OTHER; 2371 } 2372 2373 static u32 2374 mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd) 2375 { 2376 u32 ptys_proto = 0; 2377 int i; 2378 2379 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2380 if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool, 2381 cmd->link_modes.advertising)) 2382 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 2383 } 2384 return ptys_proto; 2385 } 2386 2387 static u32 mlxsw_sp_to_ptys_speed(u32 speed) 2388 { 2389 u32 ptys_proto = 0; 2390 int i; 2391 2392 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2393 if (speed == mlxsw_sp_port_link_mode[i].speed) 2394 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 2395 } 2396 return ptys_proto; 2397 } 2398 2399 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed) 2400 { 2401 u32 ptys_proto = 0; 2402 int i; 2403 2404 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2405 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed) 2406 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 2407 } 2408 return ptys_proto; 2409 } 2410 2411 static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap, 2412 struct ethtool_link_ksettings *cmd) 2413 { 2414 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); 2415 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); 2416 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); 2417 2418 mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd); 2419 mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported); 2420 } 2421 2422 static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg, 2423 struct ethtool_link_ksettings *cmd) 2424 { 2425 if (!autoneg) 2426 return; 2427 2428 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); 2429 mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising); 2430 } 2431 2432 static void 2433 mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status, 2434 struct ethtool_link_ksettings *cmd) 2435 { 2436 if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp) 2437 return; 2438 2439 ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg); 2440 mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising); 2441 } 2442 2443 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev, 2444 struct ethtool_link_ksettings *cmd) 2445 { 2446 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp; 2447 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2448 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2449 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2450 u8 autoneg_status; 2451 bool autoneg; 2452 int err; 2453 2454 autoneg = mlxsw_sp_port->link.autoneg; 2455 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0); 2456 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2457 if (err) 2458 return err; 2459 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, ð_proto_admin, 2460 ð_proto_oper); 2461 2462 mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd); 2463 2464 mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd); 2465 2466 eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl); 2467 autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl); 2468 mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd); 2469 2470 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 2471 cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper); 2472 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper, 2473 cmd); 2474 2475 return 0; 2476 } 2477 2478 static int 2479 mlxsw_sp_port_set_link_ksettings(struct net_device *dev, 2480 const struct ethtool_link_ksettings *cmd) 2481 { 2482 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2483 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2484 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2485 u32 eth_proto_cap, eth_proto_new; 2486 bool autoneg; 2487 int err; 2488 2489 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0); 2490 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2491 if (err) 2492 return err; 2493 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL); 2494 2495 autoneg = cmd->base.autoneg == AUTONEG_ENABLE; 2496 eth_proto_new = autoneg ? 2497 mlxsw_sp_to_ptys_advert_link(cmd) : 2498 mlxsw_sp_to_ptys_speed(cmd->base.speed); 2499 2500 eth_proto_new = eth_proto_new & eth_proto_cap; 2501 if (!eth_proto_new) { 2502 netdev_err(dev, "No supported speed requested\n"); 2503 return -EINVAL; 2504 } 2505 2506 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 2507 eth_proto_new); 2508 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2509 if (err) 2510 return err; 2511 2512 if (!netif_running(dev)) 2513 return 0; 2514 2515 mlxsw_sp_port->link.autoneg = autoneg; 2516 2517 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 2518 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 2519 2520 return 0; 2521 } 2522 2523 static int mlxsw_sp_flash_device(struct net_device *dev, 2524 struct ethtool_flash *flash) 2525 { 2526 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2527 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2528 const struct firmware *firmware; 2529 int err; 2530 2531 if (flash->region != ETHTOOL_FLASH_ALL_REGIONS) 2532 return -EOPNOTSUPP; 2533 2534 dev_hold(dev); 2535 rtnl_unlock(); 2536 2537 err = request_firmware_direct(&firmware, flash->data, &dev->dev); 2538 if (err) 2539 goto out; 2540 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware); 2541 release_firmware(firmware); 2542 out: 2543 rtnl_lock(); 2544 dev_put(dev); 2545 return err; 2546 } 2547 2548 #define MLXSW_SP_QSFP_I2C_ADDR 0x50 2549 2550 static int mlxsw_sp_query_module_eeprom(struct mlxsw_sp_port *mlxsw_sp_port, 2551 u16 offset, u16 size, void *data, 2552 unsigned int *p_read_size) 2553 { 2554 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2555 char eeprom_tmp[MLXSW_SP_REG_MCIA_EEPROM_SIZE]; 2556 char mcia_pl[MLXSW_REG_MCIA_LEN]; 2557 int status; 2558 int err; 2559 2560 size = min_t(u16, size, MLXSW_SP_REG_MCIA_EEPROM_SIZE); 2561 mlxsw_reg_mcia_pack(mcia_pl, mlxsw_sp_port->mapping.module, 2562 0, 0, offset, size, MLXSW_SP_QSFP_I2C_ADDR); 2563 2564 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcia), mcia_pl); 2565 if (err) 2566 return err; 2567 2568 status = mlxsw_reg_mcia_status_get(mcia_pl); 2569 if (status) 2570 return -EIO; 2571 2572 mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp); 2573 memcpy(data, eeprom_tmp, size); 2574 *p_read_size = size; 2575 2576 return 0; 2577 } 2578 2579 enum mlxsw_sp_eeprom_module_info_rev_id { 2580 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_UNSPC = 0x00, 2581 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8436 = 0x01, 2582 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636 = 0x03, 2583 }; 2584 2585 enum mlxsw_sp_eeprom_module_info_id { 2586 MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP = 0x03, 2587 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP = 0x0C, 2588 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS = 0x0D, 2589 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 = 0x11, 2590 }; 2591 2592 enum mlxsw_sp_eeprom_module_info { 2593 MLXSW_SP_EEPROM_MODULE_INFO_ID, 2594 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID, 2595 MLXSW_SP_EEPROM_MODULE_INFO_SIZE, 2596 }; 2597 2598 static int mlxsw_sp_get_module_info(struct net_device *netdev, 2599 struct ethtool_modinfo *modinfo) 2600 { 2601 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 2602 u8 module_info[MLXSW_SP_EEPROM_MODULE_INFO_SIZE]; 2603 u8 module_rev_id, module_id; 2604 unsigned int read_size; 2605 int err; 2606 2607 err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, 0, 2608 MLXSW_SP_EEPROM_MODULE_INFO_SIZE, 2609 module_info, &read_size); 2610 if (err) 2611 return err; 2612 2613 if (read_size < MLXSW_SP_EEPROM_MODULE_INFO_SIZE) 2614 return -EIO; 2615 2616 module_rev_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_REV_ID]; 2617 module_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_ID]; 2618 2619 switch (module_id) { 2620 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP: 2621 modinfo->type = ETH_MODULE_SFF_8436; 2622 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 2623 break; 2624 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS: 2625 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28: 2626 if (module_id == MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 || 2627 module_rev_id >= MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636) { 2628 modinfo->type = ETH_MODULE_SFF_8636; 2629 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; 2630 } else { 2631 modinfo->type = ETH_MODULE_SFF_8436; 2632 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 2633 } 2634 break; 2635 case MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP: 2636 modinfo->type = ETH_MODULE_SFF_8472; 2637 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 2638 break; 2639 default: 2640 return -EINVAL; 2641 } 2642 2643 return 0; 2644 } 2645 2646 static int mlxsw_sp_get_module_eeprom(struct net_device *netdev, 2647 struct ethtool_eeprom *ee, 2648 u8 *data) 2649 { 2650 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 2651 int offset = ee->offset; 2652 unsigned int read_size; 2653 int i = 0; 2654 int err; 2655 2656 if (!ee->len) 2657 return -EINVAL; 2658 2659 memset(data, 0, ee->len); 2660 2661 while (i < ee->len) { 2662 err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, offset, 2663 ee->len - i, data + i, 2664 &read_size); 2665 if (err) { 2666 netdev_err(mlxsw_sp_port->dev, "Eeprom query failed\n"); 2667 return err; 2668 } 2669 2670 i += read_size; 2671 offset += read_size; 2672 } 2673 2674 return 0; 2675 } 2676 2677 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 2678 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 2679 .get_link = ethtool_op_get_link, 2680 .get_pauseparam = mlxsw_sp_port_get_pauseparam, 2681 .set_pauseparam = mlxsw_sp_port_set_pauseparam, 2682 .get_strings = mlxsw_sp_port_get_strings, 2683 .set_phys_id = mlxsw_sp_port_set_phys_id, 2684 .get_ethtool_stats = mlxsw_sp_port_get_stats, 2685 .get_sset_count = mlxsw_sp_port_get_sset_count, 2686 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings, 2687 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings, 2688 .flash_device = mlxsw_sp_flash_device, 2689 .get_module_info = mlxsw_sp_get_module_info, 2690 .get_module_eeprom = mlxsw_sp_get_module_eeprom, 2691 }; 2692 2693 static int 2694 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width) 2695 { 2696 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2697 u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width; 2698 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2699 u32 eth_proto_admin; 2700 2701 eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed); 2702 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 2703 eth_proto_admin); 2704 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2705 } 2706 2707 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 2708 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 2709 bool dwrr, u8 dwrr_weight) 2710 { 2711 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2712 char qeec_pl[MLXSW_REG_QEEC_LEN]; 2713 2714 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 2715 next_index); 2716 mlxsw_reg_qeec_de_set(qeec_pl, true); 2717 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 2718 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 2719 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 2720 } 2721 2722 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 2723 enum mlxsw_reg_qeec_hr hr, u8 index, 2724 u8 next_index, u32 maxrate) 2725 { 2726 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2727 char qeec_pl[MLXSW_REG_QEEC_LEN]; 2728 2729 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 2730 next_index); 2731 mlxsw_reg_qeec_mase_set(qeec_pl, true); 2732 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 2733 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 2734 } 2735 2736 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 2737 u8 switch_prio, u8 tclass) 2738 { 2739 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2740 char qtct_pl[MLXSW_REG_QTCT_LEN]; 2741 2742 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 2743 tclass); 2744 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 2745 } 2746 2747 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 2748 { 2749 int err, i; 2750 2751 /* Setup the elements hierarcy, so that each TC is linked to 2752 * one subgroup, which are all member in the same group. 2753 */ 2754 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2755 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false, 2756 0); 2757 if (err) 2758 return err; 2759 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2760 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2761 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i, 2762 0, false, 0); 2763 if (err) 2764 return err; 2765 } 2766 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2767 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2768 MLXSW_REG_QEEC_HIERARCY_TC, i, i, 2769 false, 0); 2770 if (err) 2771 return err; 2772 } 2773 2774 /* Make sure the max shaper is disabled in all hierarcies that 2775 * support it. 2776 */ 2777 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2778 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0, 2779 MLXSW_REG_QEEC_MAS_DIS); 2780 if (err) 2781 return err; 2782 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2783 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2784 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, 2785 i, 0, 2786 MLXSW_REG_QEEC_MAS_DIS); 2787 if (err) 2788 return err; 2789 } 2790 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2791 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2792 MLXSW_REG_QEEC_HIERARCY_TC, 2793 i, i, 2794 MLXSW_REG_QEEC_MAS_DIS); 2795 if (err) 2796 return err; 2797 } 2798 2799 /* Map all priorities to traffic class 0. */ 2800 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2801 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 2802 if (err) 2803 return err; 2804 } 2805 2806 return 0; 2807 } 2808 2809 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 2810 bool split, u8 module, u8 width, u8 lane) 2811 { 2812 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 2813 struct mlxsw_sp_port *mlxsw_sp_port; 2814 struct net_device *dev; 2815 int err; 2816 2817 err = mlxsw_core_port_init(mlxsw_sp->core, local_port); 2818 if (err) { 2819 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 2820 local_port); 2821 return err; 2822 } 2823 2824 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 2825 if (!dev) { 2826 err = -ENOMEM; 2827 goto err_alloc_etherdev; 2828 } 2829 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 2830 mlxsw_sp_port = netdev_priv(dev); 2831 mlxsw_sp_port->dev = dev; 2832 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 2833 mlxsw_sp_port->local_port = local_port; 2834 mlxsw_sp_port->pvid = 1; 2835 mlxsw_sp_port->split = split; 2836 mlxsw_sp_port->mapping.module = module; 2837 mlxsw_sp_port->mapping.width = width; 2838 mlxsw_sp_port->mapping.lane = lane; 2839 mlxsw_sp_port->link.autoneg = 1; 2840 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 2841 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list); 2842 2843 mlxsw_sp_port->pcpu_stats = 2844 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 2845 if (!mlxsw_sp_port->pcpu_stats) { 2846 err = -ENOMEM; 2847 goto err_alloc_stats; 2848 } 2849 2850 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample), 2851 GFP_KERNEL); 2852 if (!mlxsw_sp_port->sample) { 2853 err = -ENOMEM; 2854 goto err_alloc_sample; 2855 } 2856 2857 mlxsw_sp_port->hw_stats.cache = 2858 kzalloc(sizeof(*mlxsw_sp_port->hw_stats.cache), GFP_KERNEL); 2859 2860 if (!mlxsw_sp_port->hw_stats.cache) { 2861 err = -ENOMEM; 2862 goto err_alloc_hw_stats; 2863 } 2864 INIT_DELAYED_WORK(&mlxsw_sp_port->hw_stats.update_dw, 2865 &update_stats_cache); 2866 2867 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 2868 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 2869 2870 err = mlxsw_sp_port_module_map(mlxsw_sp_port, module, width, lane); 2871 if (err) { 2872 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 2873 mlxsw_sp_port->local_port); 2874 goto err_port_module_map; 2875 } 2876 2877 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 2878 if (err) { 2879 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 2880 mlxsw_sp_port->local_port); 2881 goto err_port_swid_set; 2882 } 2883 2884 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 2885 if (err) { 2886 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 2887 mlxsw_sp_port->local_port); 2888 goto err_dev_addr_init; 2889 } 2890 2891 netif_carrier_off(dev); 2892 2893 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 2894 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 2895 dev->hw_features |= NETIF_F_HW_TC; 2896 2897 dev->min_mtu = 0; 2898 dev->max_mtu = ETH_MAX_MTU; 2899 2900 /* Each packet needs to have a Tx header (metadata) on top all other 2901 * headers. 2902 */ 2903 dev->needed_headroom = MLXSW_TXHDR_LEN; 2904 2905 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 2906 if (err) { 2907 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 2908 mlxsw_sp_port->local_port); 2909 goto err_port_system_port_mapping_set; 2910 } 2911 2912 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width); 2913 if (err) { 2914 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 2915 mlxsw_sp_port->local_port); 2916 goto err_port_speed_by_width_set; 2917 } 2918 2919 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 2920 if (err) { 2921 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 2922 mlxsw_sp_port->local_port); 2923 goto err_port_mtu_set; 2924 } 2925 2926 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 2927 if (err) 2928 goto err_port_admin_status_set; 2929 2930 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 2931 if (err) { 2932 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 2933 mlxsw_sp_port->local_port); 2934 goto err_port_buffers_init; 2935 } 2936 2937 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 2938 if (err) { 2939 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 2940 mlxsw_sp_port->local_port); 2941 goto err_port_ets_init; 2942 } 2943 2944 /* ETS and buffers must be initialized before DCB. */ 2945 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 2946 if (err) { 2947 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 2948 mlxsw_sp_port->local_port); 2949 goto err_port_dcb_init; 2950 } 2951 2952 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 2953 if (err) { 2954 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 2955 mlxsw_sp_port->local_port); 2956 goto err_port_fids_init; 2957 } 2958 2959 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1); 2960 if (IS_ERR(mlxsw_sp_port_vlan)) { 2961 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 2962 mlxsw_sp_port->local_port); 2963 goto err_port_vlan_get; 2964 } 2965 2966 mlxsw_sp_port_switchdev_init(mlxsw_sp_port); 2967 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 2968 err = register_netdev(dev); 2969 if (err) { 2970 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 2971 mlxsw_sp_port->local_port); 2972 goto err_register_netdev; 2973 } 2974 2975 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 2976 mlxsw_sp_port, dev, mlxsw_sp_port->split, 2977 module); 2978 mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, 0); 2979 return 0; 2980 2981 err_register_netdev: 2982 mlxsw_sp->ports[local_port] = NULL; 2983 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 2984 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); 2985 err_port_vlan_get: 2986 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 2987 err_port_fids_init: 2988 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 2989 err_port_dcb_init: 2990 err_port_ets_init: 2991 err_port_buffers_init: 2992 err_port_admin_status_set: 2993 err_port_mtu_set: 2994 err_port_speed_by_width_set: 2995 err_port_system_port_mapping_set: 2996 err_dev_addr_init: 2997 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 2998 err_port_swid_set: 2999 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3000 err_port_module_map: 3001 kfree(mlxsw_sp_port->hw_stats.cache); 3002 err_alloc_hw_stats: 3003 kfree(mlxsw_sp_port->sample); 3004 err_alloc_sample: 3005 free_percpu(mlxsw_sp_port->pcpu_stats); 3006 err_alloc_stats: 3007 free_netdev(dev); 3008 err_alloc_etherdev: 3009 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3010 return err; 3011 } 3012 3013 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3014 { 3015 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3016 3017 cancel_delayed_work_sync(&mlxsw_sp_port->hw_stats.update_dw); 3018 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 3019 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 3020 mlxsw_sp->ports[local_port] = NULL; 3021 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 3022 mlxsw_sp_port_vlan_flush(mlxsw_sp_port); 3023 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3024 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3025 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3026 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3027 kfree(mlxsw_sp_port->hw_stats.cache); 3028 kfree(mlxsw_sp_port->sample); 3029 free_percpu(mlxsw_sp_port->pcpu_stats); 3030 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 3031 free_netdev(mlxsw_sp_port->dev); 3032 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3033 } 3034 3035 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3036 { 3037 return mlxsw_sp->ports[local_port] != NULL; 3038 } 3039 3040 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 3041 { 3042 int i; 3043 3044 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 3045 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3046 mlxsw_sp_port_remove(mlxsw_sp, i); 3047 kfree(mlxsw_sp->port_to_module); 3048 kfree(mlxsw_sp->ports); 3049 } 3050 3051 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 3052 { 3053 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 3054 u8 module, width, lane; 3055 size_t alloc_size; 3056 int i; 3057 int err; 3058 3059 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 3060 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 3061 if (!mlxsw_sp->ports) 3062 return -ENOMEM; 3063 3064 mlxsw_sp->port_to_module = kcalloc(max_ports, sizeof(u8), GFP_KERNEL); 3065 if (!mlxsw_sp->port_to_module) { 3066 err = -ENOMEM; 3067 goto err_port_to_module_alloc; 3068 } 3069 3070 for (i = 1; i < max_ports; i++) { 3071 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, 3072 &width, &lane); 3073 if (err) 3074 goto err_port_module_info_get; 3075 if (!width) 3076 continue; 3077 mlxsw_sp->port_to_module[i] = module; 3078 err = mlxsw_sp_port_create(mlxsw_sp, i, false, 3079 module, width, lane); 3080 if (err) 3081 goto err_port_create; 3082 } 3083 return 0; 3084 3085 err_port_create: 3086 err_port_module_info_get: 3087 for (i--; i >= 1; i--) 3088 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3089 mlxsw_sp_port_remove(mlxsw_sp, i); 3090 kfree(mlxsw_sp->port_to_module); 3091 err_port_to_module_alloc: 3092 kfree(mlxsw_sp->ports); 3093 return err; 3094 } 3095 3096 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port) 3097 { 3098 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX; 3099 3100 return local_port - offset; 3101 } 3102 3103 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 3104 u8 module, unsigned int count) 3105 { 3106 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; 3107 int err, i; 3108 3109 for (i = 0; i < count; i++) { 3110 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true, 3111 module, width, i * width); 3112 if (err) 3113 goto err_port_create; 3114 } 3115 3116 return 0; 3117 3118 err_port_create: 3119 for (i--; i >= 0; i--) 3120 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) 3121 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 3122 return err; 3123 } 3124 3125 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 3126 u8 base_port, unsigned int count) 3127 { 3128 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH; 3129 int i; 3130 3131 /* Split by four means we need to re-create two ports, otherwise 3132 * only one. 3133 */ 3134 count = count / 2; 3135 3136 for (i = 0; i < count; i++) { 3137 local_port = base_port + i * 2; 3138 module = mlxsw_sp->port_to_module[local_port]; 3139 3140 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module, 3141 width, 0); 3142 } 3143 } 3144 3145 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 3146 unsigned int count) 3147 { 3148 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3149 struct mlxsw_sp_port *mlxsw_sp_port; 3150 u8 module, cur_width, base_port; 3151 int i; 3152 int err; 3153 3154 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3155 if (!mlxsw_sp_port) { 3156 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 3157 local_port); 3158 return -EINVAL; 3159 } 3160 3161 module = mlxsw_sp_port->mapping.module; 3162 cur_width = mlxsw_sp_port->mapping.width; 3163 3164 if (count != 2 && count != 4) { 3165 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); 3166 return -EINVAL; 3167 } 3168 3169 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { 3170 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); 3171 return -EINVAL; 3172 } 3173 3174 /* Make sure we have enough slave (even) ports for the split. */ 3175 if (count == 2) { 3176 base_port = local_port; 3177 if (mlxsw_sp->ports[base_port + 1]) { 3178 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 3179 return -EINVAL; 3180 } 3181 } else { 3182 base_port = mlxsw_sp_cluster_base_port_get(local_port); 3183 if (mlxsw_sp->ports[base_port + 1] || 3184 mlxsw_sp->ports[base_port + 3]) { 3185 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 3186 return -EINVAL; 3187 } 3188 } 3189 3190 for (i = 0; i < count; i++) 3191 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) 3192 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 3193 3194 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count); 3195 if (err) { 3196 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 3197 goto err_port_split_create; 3198 } 3199 3200 return 0; 3201 3202 err_port_split_create: 3203 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 3204 return err; 3205 } 3206 3207 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port) 3208 { 3209 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3210 struct mlxsw_sp_port *mlxsw_sp_port; 3211 u8 cur_width, base_port; 3212 unsigned int count; 3213 int i; 3214 3215 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3216 if (!mlxsw_sp_port) { 3217 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 3218 local_port); 3219 return -EINVAL; 3220 } 3221 3222 if (!mlxsw_sp_port->split) { 3223 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n"); 3224 return -EINVAL; 3225 } 3226 3227 cur_width = mlxsw_sp_port->mapping.width; 3228 count = cur_width == 1 ? 4 : 2; 3229 3230 base_port = mlxsw_sp_cluster_base_port_get(local_port); 3231 3232 /* Determine which ports to remove. */ 3233 if (count == 2 && local_port >= base_port + 2) 3234 base_port = base_port + 2; 3235 3236 for (i = 0; i < count; i++) 3237 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) 3238 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 3239 3240 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 3241 3242 return 0; 3243 } 3244 3245 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 3246 char *pude_pl, void *priv) 3247 { 3248 struct mlxsw_sp *mlxsw_sp = priv; 3249 struct mlxsw_sp_port *mlxsw_sp_port; 3250 enum mlxsw_reg_pude_oper_status status; 3251 u8 local_port; 3252 3253 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 3254 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3255 if (!mlxsw_sp_port) 3256 return; 3257 3258 status = mlxsw_reg_pude_oper_status_get(pude_pl); 3259 if (status == MLXSW_PORT_OPER_STATUS_UP) { 3260 netdev_info(mlxsw_sp_port->dev, "link up\n"); 3261 netif_carrier_on(mlxsw_sp_port->dev); 3262 } else { 3263 netdev_info(mlxsw_sp_port->dev, "link down\n"); 3264 netif_carrier_off(mlxsw_sp_port->dev); 3265 } 3266 } 3267 3268 static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 3269 u8 local_port, void *priv) 3270 { 3271 struct mlxsw_sp *mlxsw_sp = priv; 3272 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3273 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 3274 3275 if (unlikely(!mlxsw_sp_port)) { 3276 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 3277 local_port); 3278 return; 3279 } 3280 3281 skb->dev = mlxsw_sp_port->dev; 3282 3283 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 3284 u64_stats_update_begin(&pcpu_stats->syncp); 3285 pcpu_stats->rx_packets++; 3286 pcpu_stats->rx_bytes += skb->len; 3287 u64_stats_update_end(&pcpu_stats->syncp); 3288 3289 skb->protocol = eth_type_trans(skb, skb->dev); 3290 netif_receive_skb(skb); 3291 } 3292 3293 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, 3294 void *priv) 3295 { 3296 skb->offload_fwd_mark = 1; 3297 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 3298 } 3299 3300 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port, 3301 void *priv) 3302 { 3303 struct mlxsw_sp *mlxsw_sp = priv; 3304 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3305 struct psample_group *psample_group; 3306 u32 size; 3307 3308 if (unlikely(!mlxsw_sp_port)) { 3309 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n", 3310 local_port); 3311 goto out; 3312 } 3313 if (unlikely(!mlxsw_sp_port->sample)) { 3314 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n", 3315 local_port); 3316 goto out; 3317 } 3318 3319 size = mlxsw_sp_port->sample->truncate ? 3320 mlxsw_sp_port->sample->trunc_size : skb->len; 3321 3322 rcu_read_lock(); 3323 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group); 3324 if (!psample_group) 3325 goto out_unlock; 3326 psample_sample_packet(psample_group, skb, size, 3327 mlxsw_sp_port->dev->ifindex, 0, 3328 mlxsw_sp_port->sample->rate); 3329 out_unlock: 3330 rcu_read_unlock(); 3331 out: 3332 consume_skb(skb); 3333 } 3334 3335 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 3336 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 3337 _is_ctrl, SP_##_trap_group, DISCARD) 3338 3339 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 3340 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 3341 _is_ctrl, SP_##_trap_group, DISCARD) 3342 3343 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 3344 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 3345 3346 static const struct mlxsw_listener mlxsw_sp_listener[] = { 3347 /* Events */ 3348 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 3349 /* L2 traps */ 3350 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true), 3351 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true), 3352 MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true), 3353 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false), 3354 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false), 3355 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false), 3356 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false), 3357 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false), 3358 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false), 3359 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false), 3360 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false), 3361 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false), 3362 MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD, 3363 false), 3364 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 3365 false), 3366 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD, 3367 false), 3368 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 3369 false), 3370 /* L3 traps */ 3371 MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3372 MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3373 MLXSW_SP_RXL_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3374 MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), 3375 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 3376 false), 3377 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false), 3378 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 3379 MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false), 3380 MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP, 3381 false), 3382 MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false), 3383 MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false), 3384 MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false), 3385 MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), 3386 MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false), 3387 MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false), 3388 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 3389 false), 3390 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 3391 false), 3392 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 3393 false), 3394 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 3395 false), 3396 MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false), 3397 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 3398 false), 3399 MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, HOST_MISS, false), 3400 MLXSW_SP_RXL_MARK(HOST_MISS_IPV6, TRAP_TO_CPU, HOST_MISS, false), 3401 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), 3402 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), 3403 MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3404 /* PKT Sample trap */ 3405 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, 3406 false, SP_IP2ME, DISCARD), 3407 /* ACL trap */ 3408 MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false), 3409 }; 3410 3411 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 3412 { 3413 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 3414 enum mlxsw_reg_qpcr_ir_units ir_units; 3415 int max_cpu_policers; 3416 bool is_bytes; 3417 u8 burst_size; 3418 u32 rate; 3419 int i, err; 3420 3421 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 3422 return -EIO; 3423 3424 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 3425 3426 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 3427 for (i = 0; i < max_cpu_policers; i++) { 3428 is_bytes = false; 3429 switch (i) { 3430 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 3431 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 3432 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 3433 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 3434 rate = 128; 3435 burst_size = 7; 3436 break; 3437 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 3438 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 3439 rate = 16 * 1024; 3440 burst_size = 10; 3441 break; 3442 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 3443 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 3444 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 3445 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 3446 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 3447 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 3448 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 3449 rate = 1024; 3450 burst_size = 7; 3451 break; 3452 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 3453 is_bytes = true; 3454 rate = 4 * 1024; 3455 burst_size = 4; 3456 break; 3457 default: 3458 continue; 3459 } 3460 3461 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 3462 burst_size); 3463 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 3464 if (err) 3465 return err; 3466 } 3467 3468 return 0; 3469 } 3470 3471 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 3472 { 3473 char htgt_pl[MLXSW_REG_HTGT_LEN]; 3474 enum mlxsw_reg_htgt_trap_group i; 3475 int max_cpu_policers; 3476 int max_trap_groups; 3477 u8 priority, tc; 3478 u16 policer_id; 3479 int err; 3480 3481 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 3482 return -EIO; 3483 3484 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 3485 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 3486 3487 for (i = 0; i < max_trap_groups; i++) { 3488 policer_id = i; 3489 switch (i) { 3490 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 3491 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 3492 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 3493 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 3494 priority = 5; 3495 tc = 5; 3496 break; 3497 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 3498 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 3499 priority = 4; 3500 tc = 4; 3501 break; 3502 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 3503 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 3504 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 3505 priority = 3; 3506 tc = 3; 3507 break; 3508 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 3509 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 3510 priority = 2; 3511 tc = 2; 3512 break; 3513 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 3514 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 3515 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 3516 priority = 1; 3517 tc = 1; 3518 break; 3519 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 3520 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 3521 tc = MLXSW_REG_HTGT_DEFAULT_TC; 3522 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 3523 break; 3524 default: 3525 continue; 3526 } 3527 3528 if (max_cpu_policers <= policer_id && 3529 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 3530 return -EIO; 3531 3532 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 3533 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 3534 if (err) 3535 return err; 3536 } 3537 3538 return 0; 3539 } 3540 3541 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 3542 { 3543 int i; 3544 int err; 3545 3546 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 3547 if (err) 3548 return err; 3549 3550 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 3551 if (err) 3552 return err; 3553 3554 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) { 3555 err = mlxsw_core_trap_register(mlxsw_sp->core, 3556 &mlxsw_sp_listener[i], 3557 mlxsw_sp); 3558 if (err) 3559 goto err_listener_register; 3560 3561 } 3562 return 0; 3563 3564 err_listener_register: 3565 for (i--; i >= 0; i--) { 3566 mlxsw_core_trap_unregister(mlxsw_sp->core, 3567 &mlxsw_sp_listener[i], 3568 mlxsw_sp); 3569 } 3570 return err; 3571 } 3572 3573 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 3574 { 3575 int i; 3576 3577 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) { 3578 mlxsw_core_trap_unregister(mlxsw_sp->core, 3579 &mlxsw_sp_listener[i], 3580 mlxsw_sp); 3581 } 3582 } 3583 3584 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 3585 { 3586 char slcr_pl[MLXSW_REG_SLCR_LEN]; 3587 int err; 3588 3589 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 3590 MLXSW_REG_SLCR_LAG_HASH_DMAC | 3591 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 3592 MLXSW_REG_SLCR_LAG_HASH_VLANID | 3593 MLXSW_REG_SLCR_LAG_HASH_SIP | 3594 MLXSW_REG_SLCR_LAG_HASH_DIP | 3595 MLXSW_REG_SLCR_LAG_HASH_SPORT | 3596 MLXSW_REG_SLCR_LAG_HASH_DPORT | 3597 MLXSW_REG_SLCR_LAG_HASH_IPPROTO); 3598 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 3599 if (err) 3600 return err; 3601 3602 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 3603 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 3604 return -EIO; 3605 3606 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 3607 sizeof(struct mlxsw_sp_upper), 3608 GFP_KERNEL); 3609 if (!mlxsw_sp->lags) 3610 return -ENOMEM; 3611 3612 return 0; 3613 } 3614 3615 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 3616 { 3617 kfree(mlxsw_sp->lags); 3618 } 3619 3620 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) 3621 { 3622 char htgt_pl[MLXSW_REG_HTGT_LEN]; 3623 3624 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 3625 MLXSW_REG_HTGT_INVALID_POLICER, 3626 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 3627 MLXSW_REG_HTGT_DEFAULT_TC); 3628 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 3629 } 3630 3631 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 3632 const struct mlxsw_bus_info *mlxsw_bus_info) 3633 { 3634 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3635 int err; 3636 3637 mlxsw_sp->core = mlxsw_core; 3638 mlxsw_sp->bus_info = mlxsw_bus_info; 3639 3640 err = mlxsw_sp_fw_rev_validate(mlxsw_sp); 3641 if (err) { 3642 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n"); 3643 return err; 3644 } 3645 3646 err = mlxsw_sp_base_mac_get(mlxsw_sp); 3647 if (err) { 3648 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 3649 return err; 3650 } 3651 3652 err = mlxsw_sp_fids_init(mlxsw_sp); 3653 if (err) { 3654 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 3655 return err; 3656 } 3657 3658 err = mlxsw_sp_traps_init(mlxsw_sp); 3659 if (err) { 3660 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 3661 goto err_traps_init; 3662 } 3663 3664 err = mlxsw_sp_buffers_init(mlxsw_sp); 3665 if (err) { 3666 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 3667 goto err_buffers_init; 3668 } 3669 3670 err = mlxsw_sp_lag_init(mlxsw_sp); 3671 if (err) { 3672 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 3673 goto err_lag_init; 3674 } 3675 3676 err = mlxsw_sp_switchdev_init(mlxsw_sp); 3677 if (err) { 3678 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 3679 goto err_switchdev_init; 3680 } 3681 3682 err = mlxsw_sp_router_init(mlxsw_sp); 3683 if (err) { 3684 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 3685 goto err_router_init; 3686 } 3687 3688 err = mlxsw_sp_span_init(mlxsw_sp); 3689 if (err) { 3690 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 3691 goto err_span_init; 3692 } 3693 3694 err = mlxsw_sp_acl_init(mlxsw_sp); 3695 if (err) { 3696 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 3697 goto err_acl_init; 3698 } 3699 3700 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 3701 if (err) { 3702 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 3703 goto err_counter_pool_init; 3704 } 3705 3706 err = mlxsw_sp_dpipe_init(mlxsw_sp); 3707 if (err) { 3708 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 3709 goto err_dpipe_init; 3710 } 3711 3712 err = mlxsw_sp_ports_create(mlxsw_sp); 3713 if (err) { 3714 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 3715 goto err_ports_create; 3716 } 3717 3718 return 0; 3719 3720 err_ports_create: 3721 mlxsw_sp_dpipe_fini(mlxsw_sp); 3722 err_dpipe_init: 3723 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3724 err_counter_pool_init: 3725 mlxsw_sp_acl_fini(mlxsw_sp); 3726 err_acl_init: 3727 mlxsw_sp_span_fini(mlxsw_sp); 3728 err_span_init: 3729 mlxsw_sp_router_fini(mlxsw_sp); 3730 err_router_init: 3731 mlxsw_sp_switchdev_fini(mlxsw_sp); 3732 err_switchdev_init: 3733 mlxsw_sp_lag_fini(mlxsw_sp); 3734 err_lag_init: 3735 mlxsw_sp_buffers_fini(mlxsw_sp); 3736 err_buffers_init: 3737 mlxsw_sp_traps_fini(mlxsw_sp); 3738 err_traps_init: 3739 mlxsw_sp_fids_fini(mlxsw_sp); 3740 return err; 3741 } 3742 3743 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 3744 { 3745 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3746 3747 mlxsw_sp_ports_remove(mlxsw_sp); 3748 mlxsw_sp_dpipe_fini(mlxsw_sp); 3749 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3750 mlxsw_sp_acl_fini(mlxsw_sp); 3751 mlxsw_sp_span_fini(mlxsw_sp); 3752 mlxsw_sp_router_fini(mlxsw_sp); 3753 mlxsw_sp_switchdev_fini(mlxsw_sp); 3754 mlxsw_sp_lag_fini(mlxsw_sp); 3755 mlxsw_sp_buffers_fini(mlxsw_sp); 3756 mlxsw_sp_traps_fini(mlxsw_sp); 3757 mlxsw_sp_fids_fini(mlxsw_sp); 3758 } 3759 3760 static const struct mlxsw_config_profile mlxsw_sp_config_profile = { 3761 .used_max_vepa_channels = 1, 3762 .max_vepa_channels = 0, 3763 .used_max_mid = 1, 3764 .max_mid = MLXSW_SP_MID_MAX, 3765 .used_max_pgt = 1, 3766 .max_pgt = 0, 3767 .used_flood_tables = 1, 3768 .used_flood_mode = 1, 3769 .flood_mode = 3, 3770 .max_fid_offset_flood_tables = 3, 3771 .fid_offset_flood_table_size = VLAN_N_VID - 1, 3772 .max_fid_flood_tables = 3, 3773 .fid_flood_table_size = MLXSW_SP_FID_8021D_MAX, 3774 .used_max_ib_mc = 1, 3775 .max_ib_mc = 0, 3776 .used_max_pkey = 1, 3777 .max_pkey = 0, 3778 .used_kvd_split_data = 1, 3779 .kvd_hash_granularity = MLXSW_SP_KVD_GRANULARITY, 3780 .kvd_hash_single_parts = 2, 3781 .kvd_hash_double_parts = 1, 3782 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 3783 .swid_config = { 3784 { 3785 .used_type = 1, 3786 .type = MLXSW_PORT_SWID_TYPE_ETH, 3787 } 3788 }, 3789 .resource_query_enable = 1, 3790 }; 3791 3792 static struct mlxsw_driver mlxsw_sp_driver = { 3793 .kind = mlxsw_sp_driver_name, 3794 .priv_size = sizeof(struct mlxsw_sp), 3795 .init = mlxsw_sp_init, 3796 .fini = mlxsw_sp_fini, 3797 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 3798 .port_split = mlxsw_sp_port_split, 3799 .port_unsplit = mlxsw_sp_port_unsplit, 3800 .sb_pool_get = mlxsw_sp_sb_pool_get, 3801 .sb_pool_set = mlxsw_sp_sb_pool_set, 3802 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3803 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3804 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3805 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3806 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3807 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3808 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3809 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3810 .txhdr_construct = mlxsw_sp_txhdr_construct, 3811 .txhdr_len = MLXSW_TXHDR_LEN, 3812 .profile = &mlxsw_sp_config_profile, 3813 }; 3814 3815 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 3816 { 3817 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 3818 } 3819 3820 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data) 3821 { 3822 struct mlxsw_sp_port **p_mlxsw_sp_port = data; 3823 int ret = 0; 3824 3825 if (mlxsw_sp_port_dev_check(lower_dev)) { 3826 *p_mlxsw_sp_port = netdev_priv(lower_dev); 3827 ret = 1; 3828 } 3829 3830 return ret; 3831 } 3832 3833 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 3834 { 3835 struct mlxsw_sp_port *mlxsw_sp_port; 3836 3837 if (mlxsw_sp_port_dev_check(dev)) 3838 return netdev_priv(dev); 3839 3840 mlxsw_sp_port = NULL; 3841 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port); 3842 3843 return mlxsw_sp_port; 3844 } 3845 3846 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 3847 { 3848 struct mlxsw_sp_port *mlxsw_sp_port; 3849 3850 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 3851 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 3852 } 3853 3854 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 3855 { 3856 struct mlxsw_sp_port *mlxsw_sp_port; 3857 3858 if (mlxsw_sp_port_dev_check(dev)) 3859 return netdev_priv(dev); 3860 3861 mlxsw_sp_port = NULL; 3862 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 3863 &mlxsw_sp_port); 3864 3865 return mlxsw_sp_port; 3866 } 3867 3868 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 3869 { 3870 struct mlxsw_sp_port *mlxsw_sp_port; 3871 3872 rcu_read_lock(); 3873 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 3874 if (mlxsw_sp_port) 3875 dev_hold(mlxsw_sp_port->dev); 3876 rcu_read_unlock(); 3877 return mlxsw_sp_port; 3878 } 3879 3880 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 3881 { 3882 dev_put(mlxsw_sp_port->dev); 3883 } 3884 3885 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 3886 { 3887 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3888 3889 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 3890 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3891 } 3892 3893 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 3894 { 3895 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3896 3897 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 3898 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3899 } 3900 3901 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 3902 u16 lag_id, u8 port_index) 3903 { 3904 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3905 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3906 3907 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 3908 lag_id, port_index); 3909 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3910 } 3911 3912 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 3913 u16 lag_id) 3914 { 3915 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3916 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3917 3918 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 3919 lag_id); 3920 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3921 } 3922 3923 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 3924 u16 lag_id) 3925 { 3926 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3927 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3928 3929 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 3930 lag_id); 3931 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3932 } 3933 3934 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 3935 u16 lag_id) 3936 { 3937 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3938 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3939 3940 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 3941 lag_id); 3942 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3943 } 3944 3945 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 3946 struct net_device *lag_dev, 3947 u16 *p_lag_id) 3948 { 3949 struct mlxsw_sp_upper *lag; 3950 int free_lag_id = -1; 3951 u64 max_lag; 3952 int i; 3953 3954 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 3955 for (i = 0; i < max_lag; i++) { 3956 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 3957 if (lag->ref_count) { 3958 if (lag->dev == lag_dev) { 3959 *p_lag_id = i; 3960 return 0; 3961 } 3962 } else if (free_lag_id < 0) { 3963 free_lag_id = i; 3964 } 3965 } 3966 if (free_lag_id < 0) 3967 return -EBUSY; 3968 *p_lag_id = free_lag_id; 3969 return 0; 3970 } 3971 3972 static bool 3973 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 3974 struct net_device *lag_dev, 3975 struct netdev_lag_upper_info *lag_upper_info) 3976 { 3977 u16 lag_id; 3978 3979 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) 3980 return false; 3981 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) 3982 return false; 3983 return true; 3984 } 3985 3986 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 3987 u16 lag_id, u8 *p_port_index) 3988 { 3989 u64 max_lag_members; 3990 int i; 3991 3992 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 3993 MAX_LAG_MEMBERS); 3994 for (i = 0; i < max_lag_members; i++) { 3995 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 3996 *p_port_index = i; 3997 return 0; 3998 } 3999 } 4000 return -EBUSY; 4001 } 4002 4003 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 4004 struct net_device *lag_dev) 4005 { 4006 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4007 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 4008 struct mlxsw_sp_upper *lag; 4009 u16 lag_id; 4010 u8 port_index; 4011 int err; 4012 4013 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 4014 if (err) 4015 return err; 4016 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 4017 if (!lag->ref_count) { 4018 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 4019 if (err) 4020 return err; 4021 lag->dev = lag_dev; 4022 } 4023 4024 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 4025 if (err) 4026 return err; 4027 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 4028 if (err) 4029 goto err_col_port_add; 4030 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id); 4031 if (err) 4032 goto err_col_port_enable; 4033 4034 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 4035 mlxsw_sp_port->local_port); 4036 mlxsw_sp_port->lag_id = lag_id; 4037 mlxsw_sp_port->lagged = 1; 4038 lag->ref_count++; 4039 4040 /* Port is no longer usable as a router interface */ 4041 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1); 4042 if (mlxsw_sp_port_vlan->fid) 4043 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 4044 4045 return 0; 4046 4047 err_col_port_enable: 4048 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4049 err_col_port_add: 4050 if (!lag->ref_count) 4051 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 4052 return err; 4053 } 4054 4055 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 4056 struct net_device *lag_dev) 4057 { 4058 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4059 u16 lag_id = mlxsw_sp_port->lag_id; 4060 struct mlxsw_sp_upper *lag; 4061 4062 if (!mlxsw_sp_port->lagged) 4063 return; 4064 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 4065 WARN_ON(lag->ref_count == 0); 4066 4067 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id); 4068 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4069 4070 /* Any VLANs configured on the port are no longer valid */ 4071 mlxsw_sp_port_vlan_flush(mlxsw_sp_port); 4072 4073 if (lag->ref_count == 1) 4074 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 4075 4076 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 4077 mlxsw_sp_port->local_port); 4078 mlxsw_sp_port->lagged = 0; 4079 lag->ref_count--; 4080 4081 mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1); 4082 /* Make sure untagged frames are allowed to ingress */ 4083 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1); 4084 } 4085 4086 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 4087 u16 lag_id) 4088 { 4089 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4090 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4091 4092 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 4093 mlxsw_sp_port->local_port); 4094 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4095 } 4096 4097 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 4098 u16 lag_id) 4099 { 4100 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4101 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4102 4103 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 4104 mlxsw_sp_port->local_port); 4105 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4106 } 4107 4108 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port, 4109 bool lag_tx_enabled) 4110 { 4111 if (lag_tx_enabled) 4112 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, 4113 mlxsw_sp_port->lag_id); 4114 else 4115 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 4116 mlxsw_sp_port->lag_id); 4117 } 4118 4119 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 4120 struct netdev_lag_lower_state_info *info) 4121 { 4122 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled); 4123 } 4124 4125 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 4126 bool enable) 4127 { 4128 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4129 enum mlxsw_reg_spms_state spms_state; 4130 char *spms_pl; 4131 u16 vid; 4132 int err; 4133 4134 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 4135 MLXSW_REG_SPMS_STATE_DISCARDING; 4136 4137 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 4138 if (!spms_pl) 4139 return -ENOMEM; 4140 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 4141 4142 for (vid = 0; vid < VLAN_N_VID; vid++) 4143 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 4144 4145 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 4146 kfree(spms_pl); 4147 return err; 4148 } 4149 4150 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 4151 { 4152 int err; 4153 4154 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 4155 if (err) 4156 return err; 4157 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 4158 if (err) 4159 goto err_port_stp_set; 4160 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1, 4161 true, false); 4162 if (err) 4163 goto err_port_vlan_set; 4164 return 0; 4165 4166 err_port_vlan_set: 4167 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4168 err_port_stp_set: 4169 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4170 return err; 4171 } 4172 4173 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 4174 { 4175 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1, 4176 false, false); 4177 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4178 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4179 } 4180 4181 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 4182 struct net_device *dev, 4183 unsigned long event, void *ptr) 4184 { 4185 struct netdev_notifier_changeupper_info *info; 4186 struct mlxsw_sp_port *mlxsw_sp_port; 4187 struct net_device *upper_dev; 4188 struct mlxsw_sp *mlxsw_sp; 4189 int err = 0; 4190 4191 mlxsw_sp_port = netdev_priv(dev); 4192 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4193 info = ptr; 4194 4195 switch (event) { 4196 case NETDEV_PRECHANGEUPPER: 4197 upper_dev = info->upper_dev; 4198 if (!is_vlan_dev(upper_dev) && 4199 !netif_is_lag_master(upper_dev) && 4200 !netif_is_bridge_master(upper_dev) && 4201 !netif_is_ovs_master(upper_dev)) 4202 return -EINVAL; 4203 if (!info->linking) 4204 break; 4205 if (netdev_has_any_upper_dev(upper_dev)) 4206 return -EINVAL; 4207 if (netif_is_lag_master(upper_dev) && 4208 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 4209 info->upper_info)) 4210 return -EINVAL; 4211 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) 4212 return -EINVAL; 4213 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 4214 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) 4215 return -EINVAL; 4216 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) 4217 return -EINVAL; 4218 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) 4219 return -EINVAL; 4220 break; 4221 case NETDEV_CHANGEUPPER: 4222 upper_dev = info->upper_dev; 4223 if (netif_is_bridge_master(upper_dev)) { 4224 if (info->linking) 4225 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4226 lower_dev, 4227 upper_dev); 4228 else 4229 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4230 lower_dev, 4231 upper_dev); 4232 } else if (netif_is_lag_master(upper_dev)) { 4233 if (info->linking) 4234 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 4235 upper_dev); 4236 else 4237 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 4238 upper_dev); 4239 } else if (netif_is_ovs_master(upper_dev)) { 4240 if (info->linking) 4241 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 4242 else 4243 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 4244 } 4245 break; 4246 } 4247 4248 return err; 4249 } 4250 4251 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 4252 unsigned long event, void *ptr) 4253 { 4254 struct netdev_notifier_changelowerstate_info *info; 4255 struct mlxsw_sp_port *mlxsw_sp_port; 4256 int err; 4257 4258 mlxsw_sp_port = netdev_priv(dev); 4259 info = ptr; 4260 4261 switch (event) { 4262 case NETDEV_CHANGELOWERSTATE: 4263 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 4264 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 4265 info->lower_state_info); 4266 if (err) 4267 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 4268 } 4269 break; 4270 } 4271 4272 return 0; 4273 } 4274 4275 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 4276 struct net_device *port_dev, 4277 unsigned long event, void *ptr) 4278 { 4279 switch (event) { 4280 case NETDEV_PRECHANGEUPPER: 4281 case NETDEV_CHANGEUPPER: 4282 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 4283 event, ptr); 4284 case NETDEV_CHANGELOWERSTATE: 4285 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 4286 ptr); 4287 } 4288 4289 return 0; 4290 } 4291 4292 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 4293 unsigned long event, void *ptr) 4294 { 4295 struct net_device *dev; 4296 struct list_head *iter; 4297 int ret; 4298 4299 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4300 if (mlxsw_sp_port_dev_check(dev)) { 4301 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 4302 ptr); 4303 if (ret) 4304 return ret; 4305 } 4306 } 4307 4308 return 0; 4309 } 4310 4311 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 4312 struct net_device *dev, 4313 unsigned long event, void *ptr, 4314 u16 vid) 4315 { 4316 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 4317 struct netdev_notifier_changeupper_info *info = ptr; 4318 struct net_device *upper_dev; 4319 int err = 0; 4320 4321 switch (event) { 4322 case NETDEV_PRECHANGEUPPER: 4323 upper_dev = info->upper_dev; 4324 if (!netif_is_bridge_master(upper_dev)) 4325 return -EINVAL; 4326 if (!info->linking) 4327 break; 4328 if (netdev_has_any_upper_dev(upper_dev)) 4329 return -EINVAL; 4330 break; 4331 case NETDEV_CHANGEUPPER: 4332 upper_dev = info->upper_dev; 4333 if (netif_is_bridge_master(upper_dev)) { 4334 if (info->linking) 4335 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4336 vlan_dev, 4337 upper_dev); 4338 else 4339 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4340 vlan_dev, 4341 upper_dev); 4342 } else { 4343 err = -EINVAL; 4344 WARN_ON(1); 4345 } 4346 break; 4347 } 4348 4349 return err; 4350 } 4351 4352 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 4353 struct net_device *lag_dev, 4354 unsigned long event, 4355 void *ptr, u16 vid) 4356 { 4357 struct net_device *dev; 4358 struct list_head *iter; 4359 int ret; 4360 4361 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4362 if (mlxsw_sp_port_dev_check(dev)) { 4363 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 4364 event, ptr, 4365 vid); 4366 if (ret) 4367 return ret; 4368 } 4369 } 4370 4371 return 0; 4372 } 4373 4374 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 4375 unsigned long event, void *ptr) 4376 { 4377 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 4378 u16 vid = vlan_dev_vlan_id(vlan_dev); 4379 4380 if (mlxsw_sp_port_dev_check(real_dev)) 4381 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 4382 event, ptr, vid); 4383 else if (netif_is_lag_master(real_dev)) 4384 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 4385 real_dev, event, 4386 ptr, vid); 4387 4388 return 0; 4389 } 4390 4391 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) 4392 { 4393 struct netdev_notifier_changeupper_info *info = ptr; 4394 4395 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) 4396 return false; 4397 return netif_is_l3_master(info->upper_dev); 4398 } 4399 4400 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 4401 unsigned long event, void *ptr) 4402 { 4403 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 4404 int err = 0; 4405 4406 if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU) 4407 err = mlxsw_sp_netdevice_router_port_event(dev); 4408 else if (mlxsw_sp_is_vrf_event(event, ptr)) 4409 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); 4410 else if (mlxsw_sp_port_dev_check(dev)) 4411 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 4412 else if (netif_is_lag_master(dev)) 4413 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 4414 else if (is_vlan_dev(dev)) 4415 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 4416 4417 return notifier_from_errno(err); 4418 } 4419 4420 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = { 4421 .notifier_call = mlxsw_sp_netdevice_event, 4422 }; 4423 4424 static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = { 4425 .notifier_call = mlxsw_sp_inetaddr_event, 4426 .priority = 10, /* Must be called before FIB notifier block */ 4427 }; 4428 4429 static struct notifier_block mlxsw_sp_inet6addr_nb __read_mostly = { 4430 .notifier_call = mlxsw_sp_inet6addr_event, 4431 }; 4432 4433 static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = { 4434 .notifier_call = mlxsw_sp_router_netevent_event, 4435 }; 4436 4437 static const struct pci_device_id mlxsw_sp_pci_id_table[] = { 4438 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 4439 {0, }, 4440 }; 4441 4442 static struct pci_driver mlxsw_sp_pci_driver = { 4443 .name = mlxsw_sp_driver_name, 4444 .id_table = mlxsw_sp_pci_id_table, 4445 }; 4446 4447 static int __init mlxsw_sp_module_init(void) 4448 { 4449 int err; 4450 4451 register_netdevice_notifier(&mlxsw_sp_netdevice_nb); 4452 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 4453 register_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); 4454 register_netevent_notifier(&mlxsw_sp_router_netevent_nb); 4455 4456 err = mlxsw_core_driver_register(&mlxsw_sp_driver); 4457 if (err) 4458 goto err_core_driver_register; 4459 4460 err = mlxsw_pci_driver_register(&mlxsw_sp_pci_driver); 4461 if (err) 4462 goto err_pci_driver_register; 4463 4464 return 0; 4465 4466 err_pci_driver_register: 4467 mlxsw_core_driver_unregister(&mlxsw_sp_driver); 4468 err_core_driver_register: 4469 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb); 4470 unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); 4471 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 4472 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); 4473 return err; 4474 } 4475 4476 static void __exit mlxsw_sp_module_exit(void) 4477 { 4478 mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver); 4479 mlxsw_core_driver_unregister(&mlxsw_sp_driver); 4480 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb); 4481 unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); 4482 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 4483 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); 4484 } 4485 4486 module_init(mlxsw_sp_module_init); 4487 module_exit(mlxsw_sp_module_exit); 4488 4489 MODULE_LICENSE("Dual BSD/GPL"); 4490 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 4491 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 4492 MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table); 4493 MODULE_FIRMWARE(MLXSW_SP_FW_FILENAME); 4494