1 /* 2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c 3 * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com> 5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> 6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <linux/kernel.h> 38 #include <linux/module.h> 39 #include <linux/types.h> 40 #include <linux/pci.h> 41 #include <linux/netdevice.h> 42 #include <linux/etherdevice.h> 43 #include <linux/ethtool.h> 44 #include <linux/slab.h> 45 #include <linux/device.h> 46 #include <linux/skbuff.h> 47 #include <linux/if_vlan.h> 48 #include <linux/if_bridge.h> 49 #include <linux/workqueue.h> 50 #include <linux/jiffies.h> 51 #include <linux/bitops.h> 52 #include <linux/list.h> 53 #include <linux/notifier.h> 54 #include <linux/dcbnl.h> 55 #include <linux/inetdevice.h> 56 #include <linux/netlink.h> 57 #include <net/switchdev.h> 58 #include <net/pkt_cls.h> 59 #include <net/tc_act/tc_mirred.h> 60 #include <net/netevent.h> 61 #include <net/tc_act/tc_sample.h> 62 #include <net/addrconf.h> 63 64 #include "spectrum.h" 65 #include "pci.h" 66 #include "core.h" 67 #include "reg.h" 68 #include "port.h" 69 #include "trap.h" 70 #include "txheader.h" 71 #include "spectrum_cnt.h" 72 #include "spectrum_dpipe.h" 73 #include "spectrum_acl_flex_actions.h" 74 #include "../mlxfw/mlxfw.h" 75 76 #define MLXSW_FWREV_MAJOR 13 77 #define MLXSW_FWREV_MINOR 1420 78 #define MLXSW_FWREV_SUBMINOR 122 79 80 static const struct mlxsw_fw_rev mlxsw_sp_supported_fw_rev = { 81 .major = MLXSW_FWREV_MAJOR, 82 .minor = MLXSW_FWREV_MINOR, 83 .subminor = MLXSW_FWREV_SUBMINOR 84 }; 85 86 #define MLXSW_SP_FW_FILENAME \ 87 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_FWREV_MAJOR) \ 88 "." __stringify(MLXSW_FWREV_MINOR) \ 89 "." __stringify(MLXSW_FWREV_SUBMINOR) ".mfa2" 90 91 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum"; 92 static const char mlxsw_sp_driver_version[] = "1.0"; 93 94 /* tx_hdr_version 95 * Tx header version. 96 * Must be set to 1. 97 */ 98 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 99 100 /* tx_hdr_ctl 101 * Packet control type. 102 * 0 - Ethernet control (e.g. EMADs, LACP) 103 * 1 - Ethernet data 104 */ 105 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 106 107 /* tx_hdr_proto 108 * Packet protocol type. Must be set to 1 (Ethernet). 109 */ 110 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 111 112 /* tx_hdr_rx_is_router 113 * Packet is sent from the router. Valid for data packets only. 114 */ 115 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 116 117 /* tx_hdr_fid_valid 118 * Indicates if the 'fid' field is valid and should be used for 119 * forwarding lookup. Valid for data packets only. 120 */ 121 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 122 123 /* tx_hdr_swid 124 * Switch partition ID. Must be set to 0. 125 */ 126 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 127 128 /* tx_hdr_control_tclass 129 * Indicates if the packet should use the control TClass and not one 130 * of the data TClasses. 131 */ 132 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 133 134 /* tx_hdr_etclass 135 * Egress TClass to be used on the egress device on the egress port. 136 */ 137 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 138 139 /* tx_hdr_port_mid 140 * Destination local port for unicast packets. 141 * Destination multicast ID for multicast packets. 142 * 143 * Control packets are directed to a specific egress port, while data 144 * packets are transmitted through the CPU port (0) into the switch partition, 145 * where forwarding rules are applied. 146 */ 147 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 148 149 /* tx_hdr_fid 150 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 151 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 152 * Valid for data packets only. 153 */ 154 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 155 156 /* tx_hdr_type 157 * 0 - Data packets 158 * 6 - Control packets 159 */ 160 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 161 162 struct mlxsw_sp_mlxfw_dev { 163 struct mlxfw_dev mlxfw_dev; 164 struct mlxsw_sp *mlxsw_sp; 165 }; 166 167 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev, 168 u16 component_index, u32 *p_max_size, 169 u8 *p_align_bits, u16 *p_max_write_size) 170 { 171 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 172 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 173 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 174 char mcqi_pl[MLXSW_REG_MCQI_LEN]; 175 int err; 176 177 mlxsw_reg_mcqi_pack(mcqi_pl, component_index); 178 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl); 179 if (err) 180 return err; 181 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, 182 p_max_write_size); 183 184 *p_align_bits = max_t(u8, *p_align_bits, 2); 185 *p_max_write_size = min_t(u16, *p_max_write_size, 186 MLXSW_REG_MCDA_MAX_DATA_LEN); 187 return 0; 188 } 189 190 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle) 191 { 192 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 193 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 194 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 195 char mcc_pl[MLXSW_REG_MCC_LEN]; 196 u8 control_state; 197 int err; 198 199 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0); 200 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 201 if (err) 202 return err; 203 204 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state); 205 if (control_state != MLXFW_FSM_STATE_IDLE) 206 return -EBUSY; 207 208 mlxsw_reg_mcc_pack(mcc_pl, 209 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 210 0, *fwhandle, 0); 211 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 212 } 213 214 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev, 215 u32 fwhandle, u16 component_index, 216 u32 component_size) 217 { 218 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 219 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 220 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 221 char mcc_pl[MLXSW_REG_MCC_LEN]; 222 223 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT, 224 component_index, fwhandle, component_size); 225 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 226 } 227 228 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev, 229 u32 fwhandle, u8 *data, u16 size, 230 u32 offset) 231 { 232 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 233 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 234 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 235 char mcda_pl[MLXSW_REG_MCDA_LEN]; 236 237 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data); 238 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl); 239 } 240 241 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, 242 u32 fwhandle, u16 component_index) 243 { 244 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 245 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 246 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 247 char mcc_pl[MLXSW_REG_MCC_LEN]; 248 249 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT, 250 component_index, fwhandle, 0); 251 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 252 } 253 254 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 255 { 256 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 257 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 258 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 259 char mcc_pl[MLXSW_REG_MCC_LEN]; 260 261 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, 262 fwhandle, 0); 263 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 264 } 265 266 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 267 enum mlxfw_fsm_state *fsm_state, 268 enum mlxfw_fsm_state_err *fsm_state_err) 269 { 270 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 271 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 272 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 273 char mcc_pl[MLXSW_REG_MCC_LEN]; 274 u8 control_state; 275 u8 error_code; 276 int err; 277 278 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0); 279 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 280 if (err) 281 return err; 282 283 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state); 284 *fsm_state = control_state; 285 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, 286 MLXFW_FSM_STATE_ERR_MAX); 287 return 0; 288 } 289 290 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 291 { 292 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 293 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 294 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 295 char mcc_pl[MLXSW_REG_MCC_LEN]; 296 297 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, 298 fwhandle, 0); 299 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 300 } 301 302 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 303 { 304 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 305 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 306 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 307 char mcc_pl[MLXSW_REG_MCC_LEN]; 308 309 mlxsw_reg_mcc_pack(mcc_pl, 310 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, 311 fwhandle, 0); 312 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 313 } 314 315 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = { 316 .component_query = mlxsw_sp_component_query, 317 .fsm_lock = mlxsw_sp_fsm_lock, 318 .fsm_component_update = mlxsw_sp_fsm_component_update, 319 .fsm_block_download = mlxsw_sp_fsm_block_download, 320 .fsm_component_verify = mlxsw_sp_fsm_component_verify, 321 .fsm_activate = mlxsw_sp_fsm_activate, 322 .fsm_query_state = mlxsw_sp_fsm_query_state, 323 .fsm_cancel = mlxsw_sp_fsm_cancel, 324 .fsm_release = mlxsw_sp_fsm_release 325 }; 326 327 static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp, 328 const struct firmware *firmware) 329 { 330 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = { 331 .mlxfw_dev = { 332 .ops = &mlxsw_sp_mlxfw_dev_ops, 333 .psid = mlxsw_sp->bus_info->psid, 334 .psid_size = strlen(mlxsw_sp->bus_info->psid), 335 }, 336 .mlxsw_sp = mlxsw_sp 337 }; 338 339 return mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware); 340 } 341 342 static bool mlxsw_sp_fw_rev_ge(const struct mlxsw_fw_rev *a, 343 const struct mlxsw_fw_rev *b) 344 { 345 if (a->major != b->major) 346 return a->major > b->major; 347 if (a->minor != b->minor) 348 return a->minor > b->minor; 349 return a->subminor >= b->subminor; 350 } 351 352 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) 353 { 354 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev; 355 const struct firmware *firmware; 356 int err; 357 358 if (mlxsw_sp_fw_rev_ge(rev, &mlxsw_sp_supported_fw_rev)) 359 return 0; 360 361 dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d out of data\n", 362 rev->major, rev->minor, rev->subminor); 363 dev_info(mlxsw_sp->bus_info->dev, "Upgrading firmware using file %s\n", 364 MLXSW_SP_FW_FILENAME); 365 366 err = request_firmware_direct(&firmware, MLXSW_SP_FW_FILENAME, 367 mlxsw_sp->bus_info->dev); 368 if (err) { 369 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n", 370 MLXSW_SP_FW_FILENAME); 371 return err; 372 } 373 374 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware); 375 release_firmware(firmware); 376 return err; 377 } 378 379 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 380 unsigned int counter_index, u64 *packets, 381 u64 *bytes) 382 { 383 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 384 int err; 385 386 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 387 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 388 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 389 if (err) 390 return err; 391 if (packets) 392 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 393 if (bytes) 394 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 395 return 0; 396 } 397 398 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 399 unsigned int counter_index) 400 { 401 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 402 403 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 404 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 405 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 406 } 407 408 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 409 unsigned int *p_counter_index) 410 { 411 int err; 412 413 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 414 p_counter_index); 415 if (err) 416 return err; 417 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 418 if (err) 419 goto err_counter_clear; 420 return 0; 421 422 err_counter_clear: 423 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 424 *p_counter_index); 425 return err; 426 } 427 428 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 429 unsigned int counter_index) 430 { 431 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 432 counter_index); 433 } 434 435 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 436 const struct mlxsw_tx_info *tx_info) 437 { 438 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 439 440 memset(txhdr, 0, MLXSW_TXHDR_LEN); 441 442 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 443 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 444 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 445 mlxsw_tx_hdr_swid_set(txhdr, 0); 446 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 447 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 448 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 449 } 450 451 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 452 u8 state) 453 { 454 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 455 enum mlxsw_reg_spms_state spms_state; 456 char *spms_pl; 457 int err; 458 459 switch (state) { 460 case BR_STATE_FORWARDING: 461 spms_state = MLXSW_REG_SPMS_STATE_FORWARDING; 462 break; 463 case BR_STATE_LEARNING: 464 spms_state = MLXSW_REG_SPMS_STATE_LEARNING; 465 break; 466 case BR_STATE_LISTENING: /* fall-through */ 467 case BR_STATE_DISABLED: /* fall-through */ 468 case BR_STATE_BLOCKING: 469 spms_state = MLXSW_REG_SPMS_STATE_DISCARDING; 470 break; 471 default: 472 BUG(); 473 } 474 475 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 476 if (!spms_pl) 477 return -ENOMEM; 478 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 479 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 480 481 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 482 kfree(spms_pl); 483 return err; 484 } 485 486 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 487 { 488 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 489 int err; 490 491 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 492 if (err) 493 return err; 494 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 495 return 0; 496 } 497 498 static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp) 499 { 500 int i; 501 502 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN)) 503 return -EIO; 504 505 mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, 506 MAX_SPAN); 507 mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count, 508 sizeof(struct mlxsw_sp_span_entry), 509 GFP_KERNEL); 510 if (!mlxsw_sp->span.entries) 511 return -ENOMEM; 512 513 for (i = 0; i < mlxsw_sp->span.entries_count; i++) 514 INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list); 515 516 return 0; 517 } 518 519 static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp) 520 { 521 int i; 522 523 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 524 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 525 526 WARN_ON_ONCE(!list_empty(&curr->bound_ports_list)); 527 } 528 kfree(mlxsw_sp->span.entries); 529 } 530 531 static struct mlxsw_sp_span_entry * 532 mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port) 533 { 534 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 535 struct mlxsw_sp_span_entry *span_entry; 536 char mpat_pl[MLXSW_REG_MPAT_LEN]; 537 u8 local_port = port->local_port; 538 int index; 539 int i; 540 int err; 541 542 /* find a free entry to use */ 543 index = -1; 544 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 545 if (!mlxsw_sp->span.entries[i].used) { 546 index = i; 547 span_entry = &mlxsw_sp->span.entries[i]; 548 break; 549 } 550 } 551 if (index < 0) 552 return NULL; 553 554 /* create a new port analayzer entry for local_port */ 555 mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true); 556 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 557 if (err) 558 return NULL; 559 560 span_entry->used = true; 561 span_entry->id = index; 562 span_entry->ref_count = 1; 563 span_entry->local_port = local_port; 564 return span_entry; 565 } 566 567 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp, 568 struct mlxsw_sp_span_entry *span_entry) 569 { 570 u8 local_port = span_entry->local_port; 571 char mpat_pl[MLXSW_REG_MPAT_LEN]; 572 int pa_id = span_entry->id; 573 574 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false); 575 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 576 span_entry->used = false; 577 } 578 579 static struct mlxsw_sp_span_entry * 580 mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port) 581 { 582 int i; 583 584 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 585 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 586 587 if (curr->used && curr->local_port == local_port) 588 return curr; 589 } 590 return NULL; 591 } 592 593 static struct mlxsw_sp_span_entry 594 *mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port) 595 { 596 struct mlxsw_sp_span_entry *span_entry; 597 598 span_entry = mlxsw_sp_span_entry_find(port->mlxsw_sp, 599 port->local_port); 600 if (span_entry) { 601 /* Already exists, just take a reference */ 602 span_entry->ref_count++; 603 return span_entry; 604 } 605 606 return mlxsw_sp_span_entry_create(port); 607 } 608 609 static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, 610 struct mlxsw_sp_span_entry *span_entry) 611 { 612 WARN_ON(!span_entry->ref_count); 613 if (--span_entry->ref_count == 0) 614 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry); 615 return 0; 616 } 617 618 static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port) 619 { 620 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 621 struct mlxsw_sp_span_inspected_port *p; 622 int i; 623 624 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 625 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 626 627 list_for_each_entry(p, &curr->bound_ports_list, list) 628 if (p->local_port == port->local_port && 629 p->type == MLXSW_SP_SPAN_EGRESS) 630 return true; 631 } 632 633 return false; 634 } 635 636 static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp, 637 int mtu) 638 { 639 return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1; 640 } 641 642 static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu) 643 { 644 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 645 char sbib_pl[MLXSW_REG_SBIB_LEN]; 646 int err; 647 648 /* If port is egress mirrored, the shared buffer size should be 649 * updated according to the mtu value 650 */ 651 if (mlxsw_sp_span_is_egress_mirror(port)) { 652 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu); 653 654 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize); 655 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 656 if (err) { 657 netdev_err(port->dev, "Could not update shared buffer for mirroring\n"); 658 return err; 659 } 660 } 661 662 return 0; 663 } 664 665 static struct mlxsw_sp_span_inspected_port * 666 mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port, 667 struct mlxsw_sp_span_entry *span_entry) 668 { 669 struct mlxsw_sp_span_inspected_port *p; 670 671 list_for_each_entry(p, &span_entry->bound_ports_list, list) 672 if (port->local_port == p->local_port) 673 return p; 674 return NULL; 675 } 676 677 static int 678 mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port, 679 struct mlxsw_sp_span_entry *span_entry, 680 enum mlxsw_sp_span_type type) 681 { 682 struct mlxsw_sp_span_inspected_port *inspected_port; 683 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 684 char mpar_pl[MLXSW_REG_MPAR_LEN]; 685 char sbib_pl[MLXSW_REG_SBIB_LEN]; 686 int pa_id = span_entry->id; 687 int err; 688 689 /* if it is an egress SPAN, bind a shared buffer to it */ 690 if (type == MLXSW_SP_SPAN_EGRESS) { 691 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, 692 port->dev->mtu); 693 694 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize); 695 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 696 if (err) { 697 netdev_err(port->dev, "Could not create shared buffer for mirroring\n"); 698 return err; 699 } 700 } 701 702 /* bind the port to the SPAN entry */ 703 mlxsw_reg_mpar_pack(mpar_pl, port->local_port, 704 (enum mlxsw_reg_mpar_i_e) type, true, pa_id); 705 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); 706 if (err) 707 goto err_mpar_reg_write; 708 709 inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL); 710 if (!inspected_port) { 711 err = -ENOMEM; 712 goto err_inspected_port_alloc; 713 } 714 inspected_port->local_port = port->local_port; 715 inspected_port->type = type; 716 list_add_tail(&inspected_port->list, &span_entry->bound_ports_list); 717 718 return 0; 719 720 err_mpar_reg_write: 721 err_inspected_port_alloc: 722 if (type == MLXSW_SP_SPAN_EGRESS) { 723 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); 724 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 725 } 726 return err; 727 } 728 729 static void 730 mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port, 731 struct mlxsw_sp_span_entry *span_entry, 732 enum mlxsw_sp_span_type type) 733 { 734 struct mlxsw_sp_span_inspected_port *inspected_port; 735 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 736 char mpar_pl[MLXSW_REG_MPAR_LEN]; 737 char sbib_pl[MLXSW_REG_SBIB_LEN]; 738 int pa_id = span_entry->id; 739 740 inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry); 741 if (!inspected_port) 742 return; 743 744 /* remove the inspected port */ 745 mlxsw_reg_mpar_pack(mpar_pl, port->local_port, 746 (enum mlxsw_reg_mpar_i_e) type, false, pa_id); 747 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); 748 749 /* remove the SBIB buffer if it was egress SPAN */ 750 if (type == MLXSW_SP_SPAN_EGRESS) { 751 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); 752 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 753 } 754 755 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); 756 757 list_del(&inspected_port->list); 758 kfree(inspected_port); 759 } 760 761 static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from, 762 struct mlxsw_sp_port *to, 763 enum mlxsw_sp_span_type type) 764 { 765 struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp; 766 struct mlxsw_sp_span_entry *span_entry; 767 int err; 768 769 span_entry = mlxsw_sp_span_entry_get(to); 770 if (!span_entry) 771 return -ENOENT; 772 773 netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n", 774 span_entry->id); 775 776 err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type); 777 if (err) 778 goto err_port_bind; 779 780 return 0; 781 782 err_port_bind: 783 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); 784 return err; 785 } 786 787 static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from, 788 u8 destination_port, 789 enum mlxsw_sp_span_type type) 790 { 791 struct mlxsw_sp_span_entry *span_entry; 792 793 span_entry = mlxsw_sp_span_entry_find(from->mlxsw_sp, 794 destination_port); 795 if (!span_entry) { 796 netdev_err(from->dev, "no span entry found\n"); 797 return; 798 } 799 800 netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n", 801 span_entry->id); 802 mlxsw_sp_span_inspected_port_unbind(from, span_entry, type); 803 } 804 805 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port, 806 bool enable, u32 rate) 807 { 808 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 809 char mpsc_pl[MLXSW_REG_MPSC_LEN]; 810 811 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate); 812 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl); 813 } 814 815 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 816 bool is_up) 817 { 818 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 819 char paos_pl[MLXSW_REG_PAOS_LEN]; 820 821 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 822 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 823 MLXSW_PORT_ADMIN_STATUS_DOWN); 824 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 825 } 826 827 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 828 unsigned char *addr) 829 { 830 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 831 char ppad_pl[MLXSW_REG_PPAD_LEN]; 832 833 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 834 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 835 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 836 } 837 838 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 839 { 840 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 841 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 842 843 ether_addr_copy(addr, mlxsw_sp->base_mac); 844 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 845 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 846 } 847 848 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 849 { 850 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 851 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 852 int max_mtu; 853 int err; 854 855 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 856 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 857 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 858 if (err) 859 return err; 860 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 861 862 if (mtu > max_mtu) 863 return -EINVAL; 864 865 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 866 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 867 } 868 869 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 870 { 871 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 872 char pspa_pl[MLXSW_REG_PSPA_LEN]; 873 874 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 875 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 876 } 877 878 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 879 { 880 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 881 char svpe_pl[MLXSW_REG_SVPE_LEN]; 882 883 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 884 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 885 } 886 887 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 888 bool learn_enable) 889 { 890 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 891 char *spvmlr_pl; 892 int err; 893 894 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 895 if (!spvmlr_pl) 896 return -ENOMEM; 897 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 898 learn_enable); 899 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 900 kfree(spvmlr_pl); 901 return err; 902 } 903 904 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 905 u16 vid) 906 { 907 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 908 char spvid_pl[MLXSW_REG_SPVID_LEN]; 909 910 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid); 911 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 912 } 913 914 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 915 bool allow) 916 { 917 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 918 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 919 920 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 921 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 922 } 923 924 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 925 { 926 int err; 927 928 if (!vid) { 929 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 930 if (err) 931 return err; 932 } else { 933 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); 934 if (err) 935 return err; 936 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 937 if (err) 938 goto err_port_allow_untagged_set; 939 } 940 941 mlxsw_sp_port->pvid = vid; 942 return 0; 943 944 err_port_allow_untagged_set: 945 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid); 946 return err; 947 } 948 949 static int 950 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 951 { 952 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 953 char sspr_pl[MLXSW_REG_SSPR_LEN]; 954 955 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 956 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 957 } 958 959 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, 960 u8 local_port, u8 *p_module, 961 u8 *p_width, u8 *p_lane) 962 { 963 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 964 int err; 965 966 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 967 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 968 if (err) 969 return err; 970 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 971 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl); 972 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 973 return 0; 974 } 975 976 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port, 977 u8 module, u8 width, u8 lane) 978 { 979 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 980 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 981 int i; 982 983 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 984 mlxsw_reg_pmlp_width_set(pmlp_pl, width); 985 for (i = 0; i < width; i++) { 986 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module); 987 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */ 988 } 989 990 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 991 } 992 993 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port) 994 { 995 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 996 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 997 998 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 999 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 1000 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 1001 } 1002 1003 static int mlxsw_sp_port_open(struct net_device *dev) 1004 { 1005 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1006 int err; 1007 1008 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1009 if (err) 1010 return err; 1011 netif_start_queue(dev); 1012 return 0; 1013 } 1014 1015 static int mlxsw_sp_port_stop(struct net_device *dev) 1016 { 1017 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1018 1019 netif_stop_queue(dev); 1020 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1021 } 1022 1023 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 1024 struct net_device *dev) 1025 { 1026 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1027 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1028 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 1029 const struct mlxsw_tx_info tx_info = { 1030 .local_port = mlxsw_sp_port->local_port, 1031 .is_emad = false, 1032 }; 1033 u64 len; 1034 int err; 1035 1036 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 1037 return NETDEV_TX_BUSY; 1038 1039 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { 1040 struct sk_buff *skb_orig = skb; 1041 1042 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); 1043 if (!skb) { 1044 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 1045 dev_kfree_skb_any(skb_orig); 1046 return NETDEV_TX_OK; 1047 } 1048 dev_consume_skb_any(skb_orig); 1049 } 1050 1051 if (eth_skb_pad(skb)) { 1052 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 1053 return NETDEV_TX_OK; 1054 } 1055 1056 mlxsw_sp_txhdr_construct(skb, &tx_info); 1057 /* TX header is consumed by HW on the way so we shouldn't count its 1058 * bytes as being sent. 1059 */ 1060 len = skb->len - MLXSW_TXHDR_LEN; 1061 1062 /* Due to a race we might fail here because of a full queue. In that 1063 * unlikely case we simply drop the packet. 1064 */ 1065 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 1066 1067 if (!err) { 1068 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 1069 u64_stats_update_begin(&pcpu_stats->syncp); 1070 pcpu_stats->tx_packets++; 1071 pcpu_stats->tx_bytes += len; 1072 u64_stats_update_end(&pcpu_stats->syncp); 1073 } else { 1074 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 1075 dev_kfree_skb_any(skb); 1076 } 1077 return NETDEV_TX_OK; 1078 } 1079 1080 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 1081 { 1082 } 1083 1084 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 1085 { 1086 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1087 struct sockaddr *addr = p; 1088 int err; 1089 1090 if (!is_valid_ether_addr(addr->sa_data)) 1091 return -EADDRNOTAVAIL; 1092 1093 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 1094 if (err) 1095 return err; 1096 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 1097 return 0; 1098 } 1099 1100 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, 1101 int mtu) 1102 { 1103 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu); 1104 } 1105 1106 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ 1107 1108 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 1109 u16 delay) 1110 { 1111 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay, 1112 BITS_PER_BYTE)); 1113 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp, 1114 mtu); 1115 } 1116 1117 /* Maximum delay buffer needed in case of PAUSE frames, in bytes. 1118 * Assumes 100m cable and maximum MTU. 1119 */ 1120 #define MLXSW_SP_PAUSE_DELAY 58752 1121 1122 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 1123 u16 delay, bool pfc, bool pause) 1124 { 1125 if (pfc) 1126 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay); 1127 else if (pause) 1128 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY); 1129 else 1130 return 0; 1131 } 1132 1133 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, 1134 bool lossy) 1135 { 1136 if (lossy) 1137 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size); 1138 else 1139 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size, 1140 thres); 1141 } 1142 1143 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, 1144 u8 *prio_tc, bool pause_en, 1145 struct ieee_pfc *my_pfc) 1146 { 1147 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1148 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; 1149 u16 delay = !!my_pfc ? my_pfc->delay : 0; 1150 char pbmc_pl[MLXSW_REG_PBMC_LEN]; 1151 int i, j, err; 1152 1153 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); 1154 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 1155 if (err) 1156 return err; 1157 1158 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1159 bool configure = false; 1160 bool pfc = false; 1161 bool lossy; 1162 u16 thres; 1163 1164 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { 1165 if (prio_tc[j] == i) { 1166 pfc = pfc_en & BIT(j); 1167 configure = true; 1168 break; 1169 } 1170 } 1171 1172 if (!configure) 1173 continue; 1174 1175 lossy = !(pfc || pause_en); 1176 thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); 1177 delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc, 1178 pause_en); 1179 mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy); 1180 } 1181 1182 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 1183 } 1184 1185 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, 1186 int mtu, bool pause_en) 1187 { 1188 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; 1189 bool dcb_en = !!mlxsw_sp_port->dcb.ets; 1190 struct ieee_pfc *my_pfc; 1191 u8 *prio_tc; 1192 1193 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; 1194 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; 1195 1196 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, 1197 pause_en, my_pfc); 1198 } 1199 1200 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 1201 { 1202 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1203 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 1204 int err; 1205 1206 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); 1207 if (err) 1208 return err; 1209 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu); 1210 if (err) 1211 goto err_span_port_mtu_update; 1212 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 1213 if (err) 1214 goto err_port_mtu_set; 1215 dev->mtu = mtu; 1216 return 0; 1217 1218 err_port_mtu_set: 1219 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu); 1220 err_span_port_mtu_update: 1221 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1222 return err; 1223 } 1224 1225 static int 1226 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 1227 struct rtnl_link_stats64 *stats) 1228 { 1229 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1230 struct mlxsw_sp_port_pcpu_stats *p; 1231 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1232 u32 tx_dropped = 0; 1233 unsigned int start; 1234 int i; 1235 1236 for_each_possible_cpu(i) { 1237 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 1238 do { 1239 start = u64_stats_fetch_begin_irq(&p->syncp); 1240 rx_packets = p->rx_packets; 1241 rx_bytes = p->rx_bytes; 1242 tx_packets = p->tx_packets; 1243 tx_bytes = p->tx_bytes; 1244 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 1245 1246 stats->rx_packets += rx_packets; 1247 stats->rx_bytes += rx_bytes; 1248 stats->tx_packets += tx_packets; 1249 stats->tx_bytes += tx_bytes; 1250 /* tx_dropped is u32, updated without syncp protection. */ 1251 tx_dropped += p->tx_dropped; 1252 } 1253 stats->tx_dropped = tx_dropped; 1254 return 0; 1255 } 1256 1257 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 1258 { 1259 switch (attr_id) { 1260 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1261 return true; 1262 } 1263 1264 return false; 1265 } 1266 1267 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 1268 void *sp) 1269 { 1270 switch (attr_id) { 1271 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1272 return mlxsw_sp_port_get_sw_stats64(dev, sp); 1273 } 1274 1275 return -EINVAL; 1276 } 1277 1278 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 1279 int prio, char *ppcnt_pl) 1280 { 1281 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1282 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1283 1284 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 1285 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 1286 } 1287 1288 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 1289 struct rtnl_link_stats64 *stats) 1290 { 1291 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1292 int err; 1293 1294 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 1295 0, ppcnt_pl); 1296 if (err) 1297 goto out; 1298 1299 stats->tx_packets = 1300 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 1301 stats->rx_packets = 1302 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 1303 stats->tx_bytes = 1304 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 1305 stats->rx_bytes = 1306 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 1307 stats->multicast = 1308 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 1309 1310 stats->rx_crc_errors = 1311 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 1312 stats->rx_frame_errors = 1313 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 1314 1315 stats->rx_length_errors = ( 1316 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 1317 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 1318 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 1319 1320 stats->rx_errors = (stats->rx_crc_errors + 1321 stats->rx_frame_errors + stats->rx_length_errors); 1322 1323 out: 1324 return err; 1325 } 1326 1327 static void update_stats_cache(struct work_struct *work) 1328 { 1329 struct mlxsw_sp_port *mlxsw_sp_port = 1330 container_of(work, struct mlxsw_sp_port, 1331 hw_stats.update_dw.work); 1332 1333 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 1334 goto out; 1335 1336 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 1337 mlxsw_sp_port->hw_stats.cache); 1338 1339 out: 1340 mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, 1341 MLXSW_HW_STATS_UPDATE_TIME); 1342 } 1343 1344 /* Return the stats from a cache that is updated periodically, 1345 * as this function might get called in an atomic context. 1346 */ 1347 static void 1348 mlxsw_sp_port_get_stats64(struct net_device *dev, 1349 struct rtnl_link_stats64 *stats) 1350 { 1351 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1352 1353 memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats)); 1354 } 1355 1356 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 1357 u16 vid_begin, u16 vid_end, 1358 bool is_member, bool untagged) 1359 { 1360 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1361 char *spvm_pl; 1362 int err; 1363 1364 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 1365 if (!spvm_pl) 1366 return -ENOMEM; 1367 1368 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 1369 vid_end, is_member, untagged); 1370 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 1371 kfree(spvm_pl); 1372 return err; 1373 } 1374 1375 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 1376 u16 vid_end, bool is_member, bool untagged) 1377 { 1378 u16 vid, vid_e; 1379 int err; 1380 1381 for (vid = vid_begin; vid <= vid_end; 1382 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 1383 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 1384 vid_end); 1385 1386 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 1387 is_member, untagged); 1388 if (err) 1389 return err; 1390 } 1391 1392 return 0; 1393 } 1394 1395 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port) 1396 { 1397 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 1398 1399 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 1400 &mlxsw_sp_port->vlans_list, list) 1401 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); 1402 } 1403 1404 static struct mlxsw_sp_port_vlan * 1405 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1406 { 1407 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1408 bool untagged = vid == 1; 1409 int err; 1410 1411 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 1412 if (err) 1413 return ERR_PTR(err); 1414 1415 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 1416 if (!mlxsw_sp_port_vlan) { 1417 err = -ENOMEM; 1418 goto err_port_vlan_alloc; 1419 } 1420 1421 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1422 mlxsw_sp_port_vlan->vid = vid; 1423 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1424 1425 return mlxsw_sp_port_vlan; 1426 1427 err_port_vlan_alloc: 1428 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1429 return ERR_PTR(err); 1430 } 1431 1432 static void 1433 mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1434 { 1435 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1436 u16 vid = mlxsw_sp_port_vlan->vid; 1437 1438 list_del(&mlxsw_sp_port_vlan->list); 1439 kfree(mlxsw_sp_port_vlan); 1440 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1441 } 1442 1443 struct mlxsw_sp_port_vlan * 1444 mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1445 { 1446 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1447 1448 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1449 if (mlxsw_sp_port_vlan) 1450 return mlxsw_sp_port_vlan; 1451 1452 return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid); 1453 } 1454 1455 void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1456 { 1457 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; 1458 1459 if (mlxsw_sp_port_vlan->bridge_port) 1460 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1461 else if (fid) 1462 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 1463 1464 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1465 } 1466 1467 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1468 __be16 __always_unused proto, u16 vid) 1469 { 1470 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1471 1472 /* VLAN 0 is added to HW filter when device goes up, but it is 1473 * reserved in our case, so simply return. 1474 */ 1475 if (!vid) 1476 return 0; 1477 1478 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid)); 1479 } 1480 1481 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1482 __be16 __always_unused proto, u16 vid) 1483 { 1484 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1485 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1486 1487 /* VLAN 0 is removed from HW filter when device goes down, but 1488 * it is reserved in our case, so simply return. 1489 */ 1490 if (!vid) 1491 return 0; 1492 1493 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1494 if (!mlxsw_sp_port_vlan) 1495 return 0; 1496 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); 1497 1498 return 0; 1499 } 1500 1501 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name, 1502 size_t len) 1503 { 1504 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1505 u8 module = mlxsw_sp_port->mapping.module; 1506 u8 width = mlxsw_sp_port->mapping.width; 1507 u8 lane = mlxsw_sp_port->mapping.lane; 1508 int err; 1509 1510 if (!mlxsw_sp_port->split) 1511 err = snprintf(name, len, "p%d", module + 1); 1512 else 1513 err = snprintf(name, len, "p%ds%d", module + 1, 1514 lane / width); 1515 1516 if (err >= len) 1517 return -EINVAL; 1518 1519 return 0; 1520 } 1521 1522 static struct mlxsw_sp_port_mall_tc_entry * 1523 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port, 1524 unsigned long cookie) { 1525 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1526 1527 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list) 1528 if (mall_tc_entry->cookie == cookie) 1529 return mall_tc_entry; 1530 1531 return NULL; 1532 } 1533 1534 static int 1535 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1536 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror, 1537 const struct tc_action *a, 1538 bool ingress) 1539 { 1540 struct net *net = dev_net(mlxsw_sp_port->dev); 1541 enum mlxsw_sp_span_type span_type; 1542 struct mlxsw_sp_port *to_port; 1543 struct net_device *to_dev; 1544 int ifindex; 1545 1546 ifindex = tcf_mirred_ifindex(a); 1547 to_dev = __dev_get_by_index(net, ifindex); 1548 if (!to_dev) { 1549 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); 1550 return -EINVAL; 1551 } 1552 1553 if (!mlxsw_sp_port_dev_check(to_dev)) { 1554 netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port"); 1555 return -EOPNOTSUPP; 1556 } 1557 to_port = netdev_priv(to_dev); 1558 1559 mirror->to_local_port = to_port->local_port; 1560 mirror->ingress = ingress; 1561 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1562 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type); 1563 } 1564 1565 static void 1566 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1567 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror) 1568 { 1569 enum mlxsw_sp_span_type span_type; 1570 1571 span_type = mirror->ingress ? 1572 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1573 mlxsw_sp_span_mirror_remove(mlxsw_sp_port, mirror->to_local_port, 1574 span_type); 1575 } 1576 1577 static int 1578 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port, 1579 struct tc_cls_matchall_offload *cls, 1580 const struct tc_action *a, 1581 bool ingress) 1582 { 1583 int err; 1584 1585 if (!mlxsw_sp_port->sample) 1586 return -EOPNOTSUPP; 1587 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) { 1588 netdev_err(mlxsw_sp_port->dev, "sample already active\n"); 1589 return -EEXIST; 1590 } 1591 if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) { 1592 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n"); 1593 return -EOPNOTSUPP; 1594 } 1595 1596 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group, 1597 tcf_sample_psample_group(a)); 1598 mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a); 1599 mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a); 1600 mlxsw_sp_port->sample->rate = tcf_sample_rate(a); 1601 1602 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a)); 1603 if (err) 1604 goto err_port_sample_set; 1605 return 0; 1606 1607 err_port_sample_set: 1608 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1609 return err; 1610 } 1611 1612 static void 1613 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port) 1614 { 1615 if (!mlxsw_sp_port->sample) 1616 return; 1617 1618 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1); 1619 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1620 } 1621 1622 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1623 struct tc_cls_matchall_offload *f, 1624 bool ingress) 1625 { 1626 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1627 __be16 protocol = f->common.protocol; 1628 const struct tc_action *a; 1629 LIST_HEAD(actions); 1630 int err; 1631 1632 if (!tcf_exts_has_one_action(f->exts)) { 1633 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); 1634 return -EOPNOTSUPP; 1635 } 1636 1637 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1638 if (!mall_tc_entry) 1639 return -ENOMEM; 1640 mall_tc_entry->cookie = f->cookie; 1641 1642 tcf_exts_to_list(f->exts, &actions); 1643 a = list_first_entry(&actions, struct tc_action, list); 1644 1645 if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { 1646 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; 1647 1648 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; 1649 mirror = &mall_tc_entry->mirror; 1650 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, 1651 mirror, a, ingress); 1652 } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) { 1653 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE; 1654 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f, 1655 a, ingress); 1656 } else { 1657 err = -EOPNOTSUPP; 1658 } 1659 1660 if (err) 1661 goto err_add_action; 1662 1663 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); 1664 return 0; 1665 1666 err_add_action: 1667 kfree(mall_tc_entry); 1668 return err; 1669 } 1670 1671 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1672 struct tc_cls_matchall_offload *f) 1673 { 1674 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1675 1676 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port, 1677 f->cookie); 1678 if (!mall_tc_entry) { 1679 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); 1680 return; 1681 } 1682 list_del(&mall_tc_entry->list); 1683 1684 switch (mall_tc_entry->type) { 1685 case MLXSW_SP_PORT_MALL_MIRROR: 1686 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port, 1687 &mall_tc_entry->mirror); 1688 break; 1689 case MLXSW_SP_PORT_MALL_SAMPLE: 1690 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port); 1691 break; 1692 default: 1693 WARN_ON(1); 1694 } 1695 1696 kfree(mall_tc_entry); 1697 } 1698 1699 static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1700 struct tc_cls_matchall_offload *f) 1701 { 1702 bool ingress; 1703 1704 if (is_classid_clsact_ingress(f->common.classid)) 1705 ingress = true; 1706 else if (is_classid_clsact_egress(f->common.classid)) 1707 ingress = false; 1708 else 1709 return -EOPNOTSUPP; 1710 1711 if (f->common.chain_index) 1712 return -EOPNOTSUPP; 1713 1714 switch (f->command) { 1715 case TC_CLSMATCHALL_REPLACE: 1716 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f, 1717 ingress); 1718 case TC_CLSMATCHALL_DESTROY: 1719 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f); 1720 return 0; 1721 default: 1722 return -EOPNOTSUPP; 1723 } 1724 } 1725 1726 static int 1727 mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_port *mlxsw_sp_port, 1728 struct tc_cls_flower_offload *f) 1729 { 1730 bool ingress; 1731 1732 if (is_classid_clsact_ingress(f->common.classid)) 1733 ingress = true; 1734 else if (is_classid_clsact_egress(f->common.classid)) 1735 ingress = false; 1736 else 1737 return -EOPNOTSUPP; 1738 1739 switch (f->command) { 1740 case TC_CLSFLOWER_REPLACE: 1741 return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress, f); 1742 case TC_CLSFLOWER_DESTROY: 1743 mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress, f); 1744 return 0; 1745 case TC_CLSFLOWER_STATS: 1746 return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress, f); 1747 default: 1748 return -EOPNOTSUPP; 1749 } 1750 } 1751 1752 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1753 void *type_data) 1754 { 1755 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1756 1757 switch (type) { 1758 case TC_SETUP_CLSMATCHALL: 1759 return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data); 1760 case TC_SETUP_CLSFLOWER: 1761 return mlxsw_sp_setup_tc_cls_flower(mlxsw_sp_port, type_data); 1762 default: 1763 return -EOPNOTSUPP; 1764 } 1765 } 1766 1767 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1768 .ndo_open = mlxsw_sp_port_open, 1769 .ndo_stop = mlxsw_sp_port_stop, 1770 .ndo_start_xmit = mlxsw_sp_port_xmit, 1771 .ndo_setup_tc = mlxsw_sp_setup_tc, 1772 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1773 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1774 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1775 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1776 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1777 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1778 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1779 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1780 .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name, 1781 }; 1782 1783 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 1784 struct ethtool_drvinfo *drvinfo) 1785 { 1786 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1787 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1788 1789 strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver)); 1790 strlcpy(drvinfo->version, mlxsw_sp_driver_version, 1791 sizeof(drvinfo->version)); 1792 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 1793 "%d.%d.%d", 1794 mlxsw_sp->bus_info->fw_rev.major, 1795 mlxsw_sp->bus_info->fw_rev.minor, 1796 mlxsw_sp->bus_info->fw_rev.subminor); 1797 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 1798 sizeof(drvinfo->bus_info)); 1799 } 1800 1801 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, 1802 struct ethtool_pauseparam *pause) 1803 { 1804 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1805 1806 pause->rx_pause = mlxsw_sp_port->link.rx_pause; 1807 pause->tx_pause = mlxsw_sp_port->link.tx_pause; 1808 } 1809 1810 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, 1811 struct ethtool_pauseparam *pause) 1812 { 1813 char pfcc_pl[MLXSW_REG_PFCC_LEN]; 1814 1815 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); 1816 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause); 1817 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause); 1818 1819 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), 1820 pfcc_pl); 1821 } 1822 1823 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, 1824 struct ethtool_pauseparam *pause) 1825 { 1826 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1827 bool pause_en = pause->tx_pause || pause->rx_pause; 1828 int err; 1829 1830 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { 1831 netdev_err(dev, "PFC already enabled on port\n"); 1832 return -EINVAL; 1833 } 1834 1835 if (pause->autoneg) { 1836 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); 1837 return -EINVAL; 1838 } 1839 1840 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1841 if (err) { 1842 netdev_err(dev, "Failed to configure port's headroom\n"); 1843 return err; 1844 } 1845 1846 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause); 1847 if (err) { 1848 netdev_err(dev, "Failed to set PAUSE parameters\n"); 1849 goto err_port_pause_configure; 1850 } 1851 1852 mlxsw_sp_port->link.rx_pause = pause->rx_pause; 1853 mlxsw_sp_port->link.tx_pause = pause->tx_pause; 1854 1855 return 0; 1856 1857 err_port_pause_configure: 1858 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 1859 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1860 return err; 1861 } 1862 1863 struct mlxsw_sp_port_hw_stats { 1864 char str[ETH_GSTRING_LEN]; 1865 u64 (*getter)(const char *payload); 1866 bool cells_bytes; 1867 }; 1868 1869 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 1870 { 1871 .str = "a_frames_transmitted_ok", 1872 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 1873 }, 1874 { 1875 .str = "a_frames_received_ok", 1876 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 1877 }, 1878 { 1879 .str = "a_frame_check_sequence_errors", 1880 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 1881 }, 1882 { 1883 .str = "a_alignment_errors", 1884 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 1885 }, 1886 { 1887 .str = "a_octets_transmitted_ok", 1888 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 1889 }, 1890 { 1891 .str = "a_octets_received_ok", 1892 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 1893 }, 1894 { 1895 .str = "a_multicast_frames_xmitted_ok", 1896 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 1897 }, 1898 { 1899 .str = "a_broadcast_frames_xmitted_ok", 1900 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 1901 }, 1902 { 1903 .str = "a_multicast_frames_received_ok", 1904 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 1905 }, 1906 { 1907 .str = "a_broadcast_frames_received_ok", 1908 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 1909 }, 1910 { 1911 .str = "a_in_range_length_errors", 1912 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 1913 }, 1914 { 1915 .str = "a_out_of_range_length_field", 1916 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 1917 }, 1918 { 1919 .str = "a_frame_too_long_errors", 1920 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 1921 }, 1922 { 1923 .str = "a_symbol_error_during_carrier", 1924 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 1925 }, 1926 { 1927 .str = "a_mac_control_frames_transmitted", 1928 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 1929 }, 1930 { 1931 .str = "a_mac_control_frames_received", 1932 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 1933 }, 1934 { 1935 .str = "a_unsupported_opcodes_received", 1936 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 1937 }, 1938 { 1939 .str = "a_pause_mac_ctrl_frames_received", 1940 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 1941 }, 1942 { 1943 .str = "a_pause_mac_ctrl_frames_xmitted", 1944 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 1945 }, 1946 }; 1947 1948 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 1949 1950 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { 1951 { 1952 .str = "rx_octets_prio", 1953 .getter = mlxsw_reg_ppcnt_rx_octets_get, 1954 }, 1955 { 1956 .str = "rx_frames_prio", 1957 .getter = mlxsw_reg_ppcnt_rx_frames_get, 1958 }, 1959 { 1960 .str = "tx_octets_prio", 1961 .getter = mlxsw_reg_ppcnt_tx_octets_get, 1962 }, 1963 { 1964 .str = "tx_frames_prio", 1965 .getter = mlxsw_reg_ppcnt_tx_frames_get, 1966 }, 1967 { 1968 .str = "rx_pause_prio", 1969 .getter = mlxsw_reg_ppcnt_rx_pause_get, 1970 }, 1971 { 1972 .str = "rx_pause_duration_prio", 1973 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get, 1974 }, 1975 { 1976 .str = "tx_pause_prio", 1977 .getter = mlxsw_reg_ppcnt_tx_pause_get, 1978 }, 1979 { 1980 .str = "tx_pause_duration_prio", 1981 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get, 1982 }, 1983 }; 1984 1985 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) 1986 1987 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { 1988 { 1989 .str = "tc_transmit_queue_tc", 1990 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get, 1991 .cells_bytes = true, 1992 }, 1993 { 1994 .str = "tc_no_buffer_discard_uc_tc", 1995 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get, 1996 }, 1997 }; 1998 1999 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) 2000 2001 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ 2002 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \ 2003 MLXSW_SP_PORT_HW_TC_STATS_LEN) * \ 2004 IEEE_8021QAZ_MAX_TCS) 2005 2006 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) 2007 { 2008 int i; 2009 2010 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { 2011 snprintf(*p, ETH_GSTRING_LEN, "%s_%d", 2012 mlxsw_sp_port_hw_prio_stats[i].str, prio); 2013 *p += ETH_GSTRING_LEN; 2014 } 2015 } 2016 2017 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) 2018 { 2019 int i; 2020 2021 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { 2022 snprintf(*p, ETH_GSTRING_LEN, "%s_%d", 2023 mlxsw_sp_port_hw_tc_stats[i].str, tc); 2024 *p += ETH_GSTRING_LEN; 2025 } 2026 } 2027 2028 static void mlxsw_sp_port_get_strings(struct net_device *dev, 2029 u32 stringset, u8 *data) 2030 { 2031 u8 *p = data; 2032 int i; 2033 2034 switch (stringset) { 2035 case ETH_SS_STATS: 2036 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 2037 memcpy(p, mlxsw_sp_port_hw_stats[i].str, 2038 ETH_GSTRING_LEN); 2039 p += ETH_GSTRING_LEN; 2040 } 2041 2042 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 2043 mlxsw_sp_port_get_prio_strings(&p, i); 2044 2045 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 2046 mlxsw_sp_port_get_tc_strings(&p, i); 2047 2048 break; 2049 } 2050 } 2051 2052 static int mlxsw_sp_port_set_phys_id(struct net_device *dev, 2053 enum ethtool_phys_id_state state) 2054 { 2055 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2056 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2057 char mlcr_pl[MLXSW_REG_MLCR_LEN]; 2058 bool active; 2059 2060 switch (state) { 2061 case ETHTOOL_ID_ACTIVE: 2062 active = true; 2063 break; 2064 case ETHTOOL_ID_INACTIVE: 2065 active = false; 2066 break; 2067 default: 2068 return -EOPNOTSUPP; 2069 } 2070 2071 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); 2072 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); 2073 } 2074 2075 static int 2076 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, 2077 int *p_len, enum mlxsw_reg_ppcnt_grp grp) 2078 { 2079 switch (grp) { 2080 case MLXSW_REG_PPCNT_IEEE_8023_CNT: 2081 *p_hw_stats = mlxsw_sp_port_hw_stats; 2082 *p_len = MLXSW_SP_PORT_HW_STATS_LEN; 2083 break; 2084 case MLXSW_REG_PPCNT_PRIO_CNT: 2085 *p_hw_stats = mlxsw_sp_port_hw_prio_stats; 2086 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2087 break; 2088 case MLXSW_REG_PPCNT_TC_CNT: 2089 *p_hw_stats = mlxsw_sp_port_hw_tc_stats; 2090 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN; 2091 break; 2092 default: 2093 WARN_ON(1); 2094 return -EOPNOTSUPP; 2095 } 2096 return 0; 2097 } 2098 2099 static void __mlxsw_sp_port_get_stats(struct net_device *dev, 2100 enum mlxsw_reg_ppcnt_grp grp, int prio, 2101 u64 *data, int data_index) 2102 { 2103 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2104 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2105 struct mlxsw_sp_port_hw_stats *hw_stats; 2106 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 2107 int i, len; 2108 int err; 2109 2110 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); 2111 if (err) 2112 return; 2113 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl); 2114 for (i = 0; i < len; i++) { 2115 data[data_index + i] = hw_stats[i].getter(ppcnt_pl); 2116 if (!hw_stats[i].cells_bytes) 2117 continue; 2118 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp, 2119 data[data_index + i]); 2120 } 2121 } 2122 2123 static void mlxsw_sp_port_get_stats(struct net_device *dev, 2124 struct ethtool_stats *stats, u64 *data) 2125 { 2126 int i, data_index = 0; 2127 2128 /* IEEE 802.3 Counters */ 2129 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0, 2130 data, data_index); 2131 data_index = MLXSW_SP_PORT_HW_STATS_LEN; 2132 2133 /* Per-Priority Counters */ 2134 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2135 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, 2136 data, data_index); 2137 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2138 } 2139 2140 /* Per-TC Counters */ 2141 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2142 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, 2143 data, data_index); 2144 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; 2145 } 2146 } 2147 2148 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 2149 { 2150 switch (sset) { 2151 case ETH_SS_STATS: 2152 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN; 2153 default: 2154 return -EOPNOTSUPP; 2155 } 2156 } 2157 2158 struct mlxsw_sp_port_link_mode { 2159 enum ethtool_link_mode_bit_indices mask_ethtool; 2160 u32 mask; 2161 u32 speed; 2162 }; 2163 2164 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = { 2165 { 2166 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 2167 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2168 .speed = SPEED_100, 2169 }, 2170 { 2171 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 2172 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 2173 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2174 .speed = SPEED_1000, 2175 }, 2176 { 2177 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 2178 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2179 .speed = SPEED_10000, 2180 }, 2181 { 2182 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 2183 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 2184 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 2185 .speed = SPEED_10000, 2186 }, 2187 { 2188 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2189 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2190 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2191 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 2192 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2193 .speed = SPEED_10000, 2194 }, 2195 { 2196 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 2197 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 2198 .speed = SPEED_20000, 2199 }, 2200 { 2201 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 2202 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2203 .speed = SPEED_40000, 2204 }, 2205 { 2206 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 2207 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2208 .speed = SPEED_40000, 2209 }, 2210 { 2211 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 2212 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2213 .speed = SPEED_40000, 2214 }, 2215 { 2216 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 2217 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2218 .speed = SPEED_40000, 2219 }, 2220 { 2221 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR, 2222 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2223 .speed = SPEED_25000, 2224 }, 2225 { 2226 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR, 2227 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2228 .speed = SPEED_25000, 2229 }, 2230 { 2231 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 2232 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2233 .speed = SPEED_25000, 2234 }, 2235 { 2236 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 2237 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2238 .speed = SPEED_25000, 2239 }, 2240 { 2241 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2, 2242 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2243 .speed = SPEED_50000, 2244 }, 2245 { 2246 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 2247 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2248 .speed = SPEED_50000, 2249 }, 2250 { 2251 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2, 2252 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2253 .speed = SPEED_50000, 2254 }, 2255 { 2256 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2257 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT, 2258 .speed = SPEED_56000, 2259 }, 2260 { 2261 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2262 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT, 2263 .speed = SPEED_56000, 2264 }, 2265 { 2266 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2267 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT, 2268 .speed = SPEED_56000, 2269 }, 2270 { 2271 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2272 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, 2273 .speed = SPEED_56000, 2274 }, 2275 { 2276 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4, 2277 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2278 .speed = SPEED_100000, 2279 }, 2280 { 2281 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4, 2282 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2283 .speed = SPEED_100000, 2284 }, 2285 { 2286 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4, 2287 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2288 .speed = SPEED_100000, 2289 }, 2290 { 2291 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 2292 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2293 .speed = SPEED_100000, 2294 }, 2295 }; 2296 2297 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode) 2298 2299 static void 2300 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto, 2301 struct ethtool_link_ksettings *cmd) 2302 { 2303 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2304 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2305 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2306 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2307 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2308 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2309 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 2310 2311 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2312 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2313 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2314 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 2315 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 2316 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 2317 } 2318 2319 static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode) 2320 { 2321 int i; 2322 2323 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2324 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) 2325 __set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool, 2326 mode); 2327 } 2328 } 2329 2330 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto, 2331 struct ethtool_link_ksettings *cmd) 2332 { 2333 u32 speed = SPEED_UNKNOWN; 2334 u8 duplex = DUPLEX_UNKNOWN; 2335 int i; 2336 2337 if (!carrier_ok) 2338 goto out; 2339 2340 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2341 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) { 2342 speed = mlxsw_sp_port_link_mode[i].speed; 2343 duplex = DUPLEX_FULL; 2344 break; 2345 } 2346 } 2347 out: 2348 cmd->base.speed = speed; 2349 cmd->base.duplex = duplex; 2350 } 2351 2352 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto) 2353 { 2354 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2355 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2356 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2357 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2358 return PORT_FIBRE; 2359 2360 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2361 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2362 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4)) 2363 return PORT_DA; 2364 2365 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2366 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2367 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2368 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4)) 2369 return PORT_NONE; 2370 2371 return PORT_OTHER; 2372 } 2373 2374 static u32 2375 mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd) 2376 { 2377 u32 ptys_proto = 0; 2378 int i; 2379 2380 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2381 if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool, 2382 cmd->link_modes.advertising)) 2383 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 2384 } 2385 return ptys_proto; 2386 } 2387 2388 static u32 mlxsw_sp_to_ptys_speed(u32 speed) 2389 { 2390 u32 ptys_proto = 0; 2391 int i; 2392 2393 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2394 if (speed == mlxsw_sp_port_link_mode[i].speed) 2395 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 2396 } 2397 return ptys_proto; 2398 } 2399 2400 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed) 2401 { 2402 u32 ptys_proto = 0; 2403 int i; 2404 2405 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2406 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed) 2407 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 2408 } 2409 return ptys_proto; 2410 } 2411 2412 static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap, 2413 struct ethtool_link_ksettings *cmd) 2414 { 2415 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); 2416 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); 2417 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); 2418 2419 mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd); 2420 mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported); 2421 } 2422 2423 static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg, 2424 struct ethtool_link_ksettings *cmd) 2425 { 2426 if (!autoneg) 2427 return; 2428 2429 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); 2430 mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising); 2431 } 2432 2433 static void 2434 mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status, 2435 struct ethtool_link_ksettings *cmd) 2436 { 2437 if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp) 2438 return; 2439 2440 ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg); 2441 mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising); 2442 } 2443 2444 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev, 2445 struct ethtool_link_ksettings *cmd) 2446 { 2447 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp; 2448 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2449 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2450 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2451 u8 autoneg_status; 2452 bool autoneg; 2453 int err; 2454 2455 autoneg = mlxsw_sp_port->link.autoneg; 2456 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0); 2457 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2458 if (err) 2459 return err; 2460 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, ð_proto_admin, 2461 ð_proto_oper); 2462 2463 mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd); 2464 2465 mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd); 2466 2467 eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl); 2468 autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl); 2469 mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd); 2470 2471 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 2472 cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper); 2473 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper, 2474 cmd); 2475 2476 return 0; 2477 } 2478 2479 static int 2480 mlxsw_sp_port_set_link_ksettings(struct net_device *dev, 2481 const struct ethtool_link_ksettings *cmd) 2482 { 2483 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2484 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2485 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2486 u32 eth_proto_cap, eth_proto_new; 2487 bool autoneg; 2488 int err; 2489 2490 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0); 2491 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2492 if (err) 2493 return err; 2494 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL); 2495 2496 autoneg = cmd->base.autoneg == AUTONEG_ENABLE; 2497 eth_proto_new = autoneg ? 2498 mlxsw_sp_to_ptys_advert_link(cmd) : 2499 mlxsw_sp_to_ptys_speed(cmd->base.speed); 2500 2501 eth_proto_new = eth_proto_new & eth_proto_cap; 2502 if (!eth_proto_new) { 2503 netdev_err(dev, "No supported speed requested\n"); 2504 return -EINVAL; 2505 } 2506 2507 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 2508 eth_proto_new); 2509 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2510 if (err) 2511 return err; 2512 2513 if (!netif_running(dev)) 2514 return 0; 2515 2516 mlxsw_sp_port->link.autoneg = autoneg; 2517 2518 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 2519 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 2520 2521 return 0; 2522 } 2523 2524 static int mlxsw_sp_flash_device(struct net_device *dev, 2525 struct ethtool_flash *flash) 2526 { 2527 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2528 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2529 const struct firmware *firmware; 2530 int err; 2531 2532 if (flash->region != ETHTOOL_FLASH_ALL_REGIONS) 2533 return -EOPNOTSUPP; 2534 2535 dev_hold(dev); 2536 rtnl_unlock(); 2537 2538 err = request_firmware_direct(&firmware, flash->data, &dev->dev); 2539 if (err) 2540 goto out; 2541 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware); 2542 release_firmware(firmware); 2543 out: 2544 rtnl_lock(); 2545 dev_put(dev); 2546 return err; 2547 } 2548 2549 #define MLXSW_SP_I2C_ADDR_LOW 0x50 2550 #define MLXSW_SP_I2C_ADDR_HIGH 0x51 2551 #define MLXSW_SP_EEPROM_PAGE_LENGTH 256 2552 2553 static int mlxsw_sp_query_module_eeprom(struct mlxsw_sp_port *mlxsw_sp_port, 2554 u16 offset, u16 size, void *data, 2555 unsigned int *p_read_size) 2556 { 2557 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2558 char eeprom_tmp[MLXSW_SP_REG_MCIA_EEPROM_SIZE]; 2559 char mcia_pl[MLXSW_REG_MCIA_LEN]; 2560 u16 i2c_addr; 2561 int status; 2562 int err; 2563 2564 size = min_t(u16, size, MLXSW_SP_REG_MCIA_EEPROM_SIZE); 2565 2566 if (offset < MLXSW_SP_EEPROM_PAGE_LENGTH && 2567 offset + size > MLXSW_SP_EEPROM_PAGE_LENGTH) 2568 /* Cross pages read, read until offset 256 in low page */ 2569 size = MLXSW_SP_EEPROM_PAGE_LENGTH - offset; 2570 2571 i2c_addr = MLXSW_SP_I2C_ADDR_LOW; 2572 if (offset >= MLXSW_SP_EEPROM_PAGE_LENGTH) { 2573 i2c_addr = MLXSW_SP_I2C_ADDR_HIGH; 2574 offset -= MLXSW_SP_EEPROM_PAGE_LENGTH; 2575 } 2576 2577 mlxsw_reg_mcia_pack(mcia_pl, mlxsw_sp_port->mapping.module, 2578 0, 0, offset, size, i2c_addr); 2579 2580 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcia), mcia_pl); 2581 if (err) 2582 return err; 2583 2584 status = mlxsw_reg_mcia_status_get(mcia_pl); 2585 if (status) 2586 return -EIO; 2587 2588 mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp); 2589 memcpy(data, eeprom_tmp, size); 2590 *p_read_size = size; 2591 2592 return 0; 2593 } 2594 2595 enum mlxsw_sp_eeprom_module_info_rev_id { 2596 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_UNSPC = 0x00, 2597 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8436 = 0x01, 2598 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636 = 0x03, 2599 }; 2600 2601 enum mlxsw_sp_eeprom_module_info_id { 2602 MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP = 0x03, 2603 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP = 0x0C, 2604 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS = 0x0D, 2605 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 = 0x11, 2606 }; 2607 2608 enum mlxsw_sp_eeprom_module_info { 2609 MLXSW_SP_EEPROM_MODULE_INFO_ID, 2610 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID, 2611 MLXSW_SP_EEPROM_MODULE_INFO_SIZE, 2612 }; 2613 2614 static int mlxsw_sp_get_module_info(struct net_device *netdev, 2615 struct ethtool_modinfo *modinfo) 2616 { 2617 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 2618 u8 module_info[MLXSW_SP_EEPROM_MODULE_INFO_SIZE]; 2619 u8 module_rev_id, module_id; 2620 unsigned int read_size; 2621 int err; 2622 2623 err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, 0, 2624 MLXSW_SP_EEPROM_MODULE_INFO_SIZE, 2625 module_info, &read_size); 2626 if (err) 2627 return err; 2628 2629 if (read_size < MLXSW_SP_EEPROM_MODULE_INFO_SIZE) 2630 return -EIO; 2631 2632 module_rev_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_REV_ID]; 2633 module_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_ID]; 2634 2635 switch (module_id) { 2636 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP: 2637 modinfo->type = ETH_MODULE_SFF_8436; 2638 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 2639 break; 2640 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS: 2641 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28: 2642 if (module_id == MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 || 2643 module_rev_id >= MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636) { 2644 modinfo->type = ETH_MODULE_SFF_8636; 2645 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; 2646 } else { 2647 modinfo->type = ETH_MODULE_SFF_8436; 2648 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 2649 } 2650 break; 2651 case MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP: 2652 modinfo->type = ETH_MODULE_SFF_8472; 2653 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 2654 break; 2655 default: 2656 return -EINVAL; 2657 } 2658 2659 return 0; 2660 } 2661 2662 static int mlxsw_sp_get_module_eeprom(struct net_device *netdev, 2663 struct ethtool_eeprom *ee, 2664 u8 *data) 2665 { 2666 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 2667 int offset = ee->offset; 2668 unsigned int read_size; 2669 int i = 0; 2670 int err; 2671 2672 if (!ee->len) 2673 return -EINVAL; 2674 2675 memset(data, 0, ee->len); 2676 2677 while (i < ee->len) { 2678 err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, offset, 2679 ee->len - i, data + i, 2680 &read_size); 2681 if (err) { 2682 netdev_err(mlxsw_sp_port->dev, "Eeprom query failed\n"); 2683 return err; 2684 } 2685 2686 i += read_size; 2687 offset += read_size; 2688 } 2689 2690 return 0; 2691 } 2692 2693 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 2694 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 2695 .get_link = ethtool_op_get_link, 2696 .get_pauseparam = mlxsw_sp_port_get_pauseparam, 2697 .set_pauseparam = mlxsw_sp_port_set_pauseparam, 2698 .get_strings = mlxsw_sp_port_get_strings, 2699 .set_phys_id = mlxsw_sp_port_set_phys_id, 2700 .get_ethtool_stats = mlxsw_sp_port_get_stats, 2701 .get_sset_count = mlxsw_sp_port_get_sset_count, 2702 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings, 2703 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings, 2704 .flash_device = mlxsw_sp_flash_device, 2705 .get_module_info = mlxsw_sp_get_module_info, 2706 .get_module_eeprom = mlxsw_sp_get_module_eeprom, 2707 }; 2708 2709 static int 2710 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width) 2711 { 2712 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2713 u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width; 2714 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2715 u32 eth_proto_admin; 2716 2717 eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed); 2718 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 2719 eth_proto_admin); 2720 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2721 } 2722 2723 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 2724 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 2725 bool dwrr, u8 dwrr_weight) 2726 { 2727 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2728 char qeec_pl[MLXSW_REG_QEEC_LEN]; 2729 2730 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 2731 next_index); 2732 mlxsw_reg_qeec_de_set(qeec_pl, true); 2733 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 2734 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 2735 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 2736 } 2737 2738 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 2739 enum mlxsw_reg_qeec_hr hr, u8 index, 2740 u8 next_index, u32 maxrate) 2741 { 2742 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2743 char qeec_pl[MLXSW_REG_QEEC_LEN]; 2744 2745 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 2746 next_index); 2747 mlxsw_reg_qeec_mase_set(qeec_pl, true); 2748 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 2749 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 2750 } 2751 2752 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 2753 u8 switch_prio, u8 tclass) 2754 { 2755 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2756 char qtct_pl[MLXSW_REG_QTCT_LEN]; 2757 2758 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 2759 tclass); 2760 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 2761 } 2762 2763 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 2764 { 2765 int err, i; 2766 2767 /* Setup the elements hierarcy, so that each TC is linked to 2768 * one subgroup, which are all member in the same group. 2769 */ 2770 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2771 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false, 2772 0); 2773 if (err) 2774 return err; 2775 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2776 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2777 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i, 2778 0, false, 0); 2779 if (err) 2780 return err; 2781 } 2782 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2783 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2784 MLXSW_REG_QEEC_HIERARCY_TC, i, i, 2785 false, 0); 2786 if (err) 2787 return err; 2788 } 2789 2790 /* Make sure the max shaper is disabled in all hierarcies that 2791 * support it. 2792 */ 2793 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2794 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0, 2795 MLXSW_REG_QEEC_MAS_DIS); 2796 if (err) 2797 return err; 2798 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2799 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2800 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, 2801 i, 0, 2802 MLXSW_REG_QEEC_MAS_DIS); 2803 if (err) 2804 return err; 2805 } 2806 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2807 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2808 MLXSW_REG_QEEC_HIERARCY_TC, 2809 i, i, 2810 MLXSW_REG_QEEC_MAS_DIS); 2811 if (err) 2812 return err; 2813 } 2814 2815 /* Map all priorities to traffic class 0. */ 2816 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2817 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 2818 if (err) 2819 return err; 2820 } 2821 2822 return 0; 2823 } 2824 2825 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 2826 bool split, u8 module, u8 width, u8 lane) 2827 { 2828 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 2829 struct mlxsw_sp_port *mlxsw_sp_port; 2830 struct net_device *dev; 2831 int err; 2832 2833 err = mlxsw_core_port_init(mlxsw_sp->core, local_port); 2834 if (err) { 2835 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 2836 local_port); 2837 return err; 2838 } 2839 2840 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 2841 if (!dev) { 2842 err = -ENOMEM; 2843 goto err_alloc_etherdev; 2844 } 2845 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 2846 mlxsw_sp_port = netdev_priv(dev); 2847 mlxsw_sp_port->dev = dev; 2848 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 2849 mlxsw_sp_port->local_port = local_port; 2850 mlxsw_sp_port->pvid = 1; 2851 mlxsw_sp_port->split = split; 2852 mlxsw_sp_port->mapping.module = module; 2853 mlxsw_sp_port->mapping.width = width; 2854 mlxsw_sp_port->mapping.lane = lane; 2855 mlxsw_sp_port->link.autoneg = 1; 2856 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 2857 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list); 2858 2859 mlxsw_sp_port->pcpu_stats = 2860 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 2861 if (!mlxsw_sp_port->pcpu_stats) { 2862 err = -ENOMEM; 2863 goto err_alloc_stats; 2864 } 2865 2866 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample), 2867 GFP_KERNEL); 2868 if (!mlxsw_sp_port->sample) { 2869 err = -ENOMEM; 2870 goto err_alloc_sample; 2871 } 2872 2873 mlxsw_sp_port->hw_stats.cache = 2874 kzalloc(sizeof(*mlxsw_sp_port->hw_stats.cache), GFP_KERNEL); 2875 2876 if (!mlxsw_sp_port->hw_stats.cache) { 2877 err = -ENOMEM; 2878 goto err_alloc_hw_stats; 2879 } 2880 INIT_DELAYED_WORK(&mlxsw_sp_port->hw_stats.update_dw, 2881 &update_stats_cache); 2882 2883 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 2884 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 2885 2886 err = mlxsw_sp_port_module_map(mlxsw_sp_port, module, width, lane); 2887 if (err) { 2888 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 2889 mlxsw_sp_port->local_port); 2890 goto err_port_module_map; 2891 } 2892 2893 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 2894 if (err) { 2895 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 2896 mlxsw_sp_port->local_port); 2897 goto err_port_swid_set; 2898 } 2899 2900 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 2901 if (err) { 2902 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 2903 mlxsw_sp_port->local_port); 2904 goto err_dev_addr_init; 2905 } 2906 2907 netif_carrier_off(dev); 2908 2909 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 2910 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 2911 dev->hw_features |= NETIF_F_HW_TC; 2912 2913 dev->min_mtu = 0; 2914 dev->max_mtu = ETH_MAX_MTU; 2915 2916 /* Each packet needs to have a Tx header (metadata) on top all other 2917 * headers. 2918 */ 2919 dev->needed_headroom = MLXSW_TXHDR_LEN; 2920 2921 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 2922 if (err) { 2923 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 2924 mlxsw_sp_port->local_port); 2925 goto err_port_system_port_mapping_set; 2926 } 2927 2928 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width); 2929 if (err) { 2930 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 2931 mlxsw_sp_port->local_port); 2932 goto err_port_speed_by_width_set; 2933 } 2934 2935 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 2936 if (err) { 2937 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 2938 mlxsw_sp_port->local_port); 2939 goto err_port_mtu_set; 2940 } 2941 2942 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 2943 if (err) 2944 goto err_port_admin_status_set; 2945 2946 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 2947 if (err) { 2948 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 2949 mlxsw_sp_port->local_port); 2950 goto err_port_buffers_init; 2951 } 2952 2953 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 2954 if (err) { 2955 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 2956 mlxsw_sp_port->local_port); 2957 goto err_port_ets_init; 2958 } 2959 2960 /* ETS and buffers must be initialized before DCB. */ 2961 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 2962 if (err) { 2963 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 2964 mlxsw_sp_port->local_port); 2965 goto err_port_dcb_init; 2966 } 2967 2968 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 2969 if (err) { 2970 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 2971 mlxsw_sp_port->local_port); 2972 goto err_port_fids_init; 2973 } 2974 2975 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1); 2976 if (IS_ERR(mlxsw_sp_port_vlan)) { 2977 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 2978 mlxsw_sp_port->local_port); 2979 goto err_port_vlan_get; 2980 } 2981 2982 mlxsw_sp_port_switchdev_init(mlxsw_sp_port); 2983 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 2984 err = register_netdev(dev); 2985 if (err) { 2986 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 2987 mlxsw_sp_port->local_port); 2988 goto err_register_netdev; 2989 } 2990 2991 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 2992 mlxsw_sp_port, dev, mlxsw_sp_port->split, 2993 module); 2994 mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, 0); 2995 return 0; 2996 2997 err_register_netdev: 2998 mlxsw_sp->ports[local_port] = NULL; 2999 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 3000 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); 3001 err_port_vlan_get: 3002 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3003 err_port_fids_init: 3004 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3005 err_port_dcb_init: 3006 err_port_ets_init: 3007 err_port_buffers_init: 3008 err_port_admin_status_set: 3009 err_port_mtu_set: 3010 err_port_speed_by_width_set: 3011 err_port_system_port_mapping_set: 3012 err_dev_addr_init: 3013 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3014 err_port_swid_set: 3015 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3016 err_port_module_map: 3017 kfree(mlxsw_sp_port->hw_stats.cache); 3018 err_alloc_hw_stats: 3019 kfree(mlxsw_sp_port->sample); 3020 err_alloc_sample: 3021 free_percpu(mlxsw_sp_port->pcpu_stats); 3022 err_alloc_stats: 3023 free_netdev(dev); 3024 err_alloc_etherdev: 3025 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3026 return err; 3027 } 3028 3029 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3030 { 3031 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3032 3033 cancel_delayed_work_sync(&mlxsw_sp_port->hw_stats.update_dw); 3034 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 3035 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 3036 mlxsw_sp->ports[local_port] = NULL; 3037 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 3038 mlxsw_sp_port_vlan_flush(mlxsw_sp_port); 3039 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3040 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3041 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3042 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3043 kfree(mlxsw_sp_port->hw_stats.cache); 3044 kfree(mlxsw_sp_port->sample); 3045 free_percpu(mlxsw_sp_port->pcpu_stats); 3046 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 3047 free_netdev(mlxsw_sp_port->dev); 3048 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3049 } 3050 3051 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3052 { 3053 return mlxsw_sp->ports[local_port] != NULL; 3054 } 3055 3056 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 3057 { 3058 int i; 3059 3060 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 3061 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3062 mlxsw_sp_port_remove(mlxsw_sp, i); 3063 kfree(mlxsw_sp->port_to_module); 3064 kfree(mlxsw_sp->ports); 3065 } 3066 3067 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 3068 { 3069 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 3070 u8 module, width, lane; 3071 size_t alloc_size; 3072 int i; 3073 int err; 3074 3075 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 3076 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 3077 if (!mlxsw_sp->ports) 3078 return -ENOMEM; 3079 3080 mlxsw_sp->port_to_module = kcalloc(max_ports, sizeof(u8), GFP_KERNEL); 3081 if (!mlxsw_sp->port_to_module) { 3082 err = -ENOMEM; 3083 goto err_port_to_module_alloc; 3084 } 3085 3086 for (i = 1; i < max_ports; i++) { 3087 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, 3088 &width, &lane); 3089 if (err) 3090 goto err_port_module_info_get; 3091 if (!width) 3092 continue; 3093 mlxsw_sp->port_to_module[i] = module; 3094 err = mlxsw_sp_port_create(mlxsw_sp, i, false, 3095 module, width, lane); 3096 if (err) 3097 goto err_port_create; 3098 } 3099 return 0; 3100 3101 err_port_create: 3102 err_port_module_info_get: 3103 for (i--; i >= 1; i--) 3104 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3105 mlxsw_sp_port_remove(mlxsw_sp, i); 3106 kfree(mlxsw_sp->port_to_module); 3107 err_port_to_module_alloc: 3108 kfree(mlxsw_sp->ports); 3109 return err; 3110 } 3111 3112 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port) 3113 { 3114 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX; 3115 3116 return local_port - offset; 3117 } 3118 3119 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 3120 u8 module, unsigned int count) 3121 { 3122 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; 3123 int err, i; 3124 3125 for (i = 0; i < count; i++) { 3126 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true, 3127 module, width, i * width); 3128 if (err) 3129 goto err_port_create; 3130 } 3131 3132 return 0; 3133 3134 err_port_create: 3135 for (i--; i >= 0; i--) 3136 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) 3137 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 3138 return err; 3139 } 3140 3141 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 3142 u8 base_port, unsigned int count) 3143 { 3144 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH; 3145 int i; 3146 3147 /* Split by four means we need to re-create two ports, otherwise 3148 * only one. 3149 */ 3150 count = count / 2; 3151 3152 for (i = 0; i < count; i++) { 3153 local_port = base_port + i * 2; 3154 module = mlxsw_sp->port_to_module[local_port]; 3155 3156 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module, 3157 width, 0); 3158 } 3159 } 3160 3161 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 3162 unsigned int count) 3163 { 3164 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3165 struct mlxsw_sp_port *mlxsw_sp_port; 3166 u8 module, cur_width, base_port; 3167 int i; 3168 int err; 3169 3170 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3171 if (!mlxsw_sp_port) { 3172 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 3173 local_port); 3174 return -EINVAL; 3175 } 3176 3177 module = mlxsw_sp_port->mapping.module; 3178 cur_width = mlxsw_sp_port->mapping.width; 3179 3180 if (count != 2 && count != 4) { 3181 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); 3182 return -EINVAL; 3183 } 3184 3185 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { 3186 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); 3187 return -EINVAL; 3188 } 3189 3190 /* Make sure we have enough slave (even) ports for the split. */ 3191 if (count == 2) { 3192 base_port = local_port; 3193 if (mlxsw_sp->ports[base_port + 1]) { 3194 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 3195 return -EINVAL; 3196 } 3197 } else { 3198 base_port = mlxsw_sp_cluster_base_port_get(local_port); 3199 if (mlxsw_sp->ports[base_port + 1] || 3200 mlxsw_sp->ports[base_port + 3]) { 3201 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 3202 return -EINVAL; 3203 } 3204 } 3205 3206 for (i = 0; i < count; i++) 3207 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) 3208 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 3209 3210 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count); 3211 if (err) { 3212 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 3213 goto err_port_split_create; 3214 } 3215 3216 return 0; 3217 3218 err_port_split_create: 3219 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 3220 return err; 3221 } 3222 3223 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port) 3224 { 3225 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3226 struct mlxsw_sp_port *mlxsw_sp_port; 3227 u8 cur_width, base_port; 3228 unsigned int count; 3229 int i; 3230 3231 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3232 if (!mlxsw_sp_port) { 3233 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 3234 local_port); 3235 return -EINVAL; 3236 } 3237 3238 if (!mlxsw_sp_port->split) { 3239 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n"); 3240 return -EINVAL; 3241 } 3242 3243 cur_width = mlxsw_sp_port->mapping.width; 3244 count = cur_width == 1 ? 4 : 2; 3245 3246 base_port = mlxsw_sp_cluster_base_port_get(local_port); 3247 3248 /* Determine which ports to remove. */ 3249 if (count == 2 && local_port >= base_port + 2) 3250 base_port = base_port + 2; 3251 3252 for (i = 0; i < count; i++) 3253 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) 3254 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 3255 3256 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 3257 3258 return 0; 3259 } 3260 3261 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 3262 char *pude_pl, void *priv) 3263 { 3264 struct mlxsw_sp *mlxsw_sp = priv; 3265 struct mlxsw_sp_port *mlxsw_sp_port; 3266 enum mlxsw_reg_pude_oper_status status; 3267 u8 local_port; 3268 3269 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 3270 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3271 if (!mlxsw_sp_port) 3272 return; 3273 3274 status = mlxsw_reg_pude_oper_status_get(pude_pl); 3275 if (status == MLXSW_PORT_OPER_STATUS_UP) { 3276 netdev_info(mlxsw_sp_port->dev, "link up\n"); 3277 netif_carrier_on(mlxsw_sp_port->dev); 3278 } else { 3279 netdev_info(mlxsw_sp_port->dev, "link down\n"); 3280 netif_carrier_off(mlxsw_sp_port->dev); 3281 } 3282 } 3283 3284 static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 3285 u8 local_port, void *priv) 3286 { 3287 struct mlxsw_sp *mlxsw_sp = priv; 3288 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3289 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 3290 3291 if (unlikely(!mlxsw_sp_port)) { 3292 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 3293 local_port); 3294 return; 3295 } 3296 3297 skb->dev = mlxsw_sp_port->dev; 3298 3299 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 3300 u64_stats_update_begin(&pcpu_stats->syncp); 3301 pcpu_stats->rx_packets++; 3302 pcpu_stats->rx_bytes += skb->len; 3303 u64_stats_update_end(&pcpu_stats->syncp); 3304 3305 skb->protocol = eth_type_trans(skb, skb->dev); 3306 netif_receive_skb(skb); 3307 } 3308 3309 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, 3310 void *priv) 3311 { 3312 skb->offload_fwd_mark = 1; 3313 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 3314 } 3315 3316 static void mlxsw_sp_rx_listener_mr_mark_func(struct sk_buff *skb, 3317 u8 local_port, void *priv) 3318 { 3319 skb->offload_mr_fwd_mark = 1; 3320 skb->offload_fwd_mark = 1; 3321 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 3322 } 3323 3324 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port, 3325 void *priv) 3326 { 3327 struct mlxsw_sp *mlxsw_sp = priv; 3328 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3329 struct psample_group *psample_group; 3330 u32 size; 3331 3332 if (unlikely(!mlxsw_sp_port)) { 3333 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n", 3334 local_port); 3335 goto out; 3336 } 3337 if (unlikely(!mlxsw_sp_port->sample)) { 3338 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n", 3339 local_port); 3340 goto out; 3341 } 3342 3343 size = mlxsw_sp_port->sample->truncate ? 3344 mlxsw_sp_port->sample->trunc_size : skb->len; 3345 3346 rcu_read_lock(); 3347 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group); 3348 if (!psample_group) 3349 goto out_unlock; 3350 psample_sample_packet(psample_group, skb, size, 3351 mlxsw_sp_port->dev->ifindex, 0, 3352 mlxsw_sp_port->sample->rate); 3353 out_unlock: 3354 rcu_read_unlock(); 3355 out: 3356 consume_skb(skb); 3357 } 3358 3359 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 3360 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 3361 _is_ctrl, SP_##_trap_group, DISCARD) 3362 3363 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 3364 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 3365 _is_ctrl, SP_##_trap_group, DISCARD) 3366 3367 #define MLXSW_SP_RXL_MR_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 3368 MLXSW_RXL(mlxsw_sp_rx_listener_mr_mark_func, _trap_id, _action, \ 3369 _is_ctrl, SP_##_trap_group, DISCARD) 3370 3371 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 3372 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 3373 3374 static const struct mlxsw_listener mlxsw_sp_listener[] = { 3375 /* Events */ 3376 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 3377 /* L2 traps */ 3378 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true), 3379 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true), 3380 MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true), 3381 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false), 3382 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false), 3383 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false), 3384 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false), 3385 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false), 3386 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false), 3387 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false), 3388 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false), 3389 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false), 3390 MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD, 3391 false), 3392 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 3393 false), 3394 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD, 3395 false), 3396 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 3397 false), 3398 /* L3 traps */ 3399 MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3400 MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3401 MLXSW_SP_RXL_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3402 MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), 3403 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 3404 false), 3405 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false), 3406 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 3407 MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false), 3408 MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP, 3409 false), 3410 MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false), 3411 MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false), 3412 MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false), 3413 MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), 3414 MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false), 3415 MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false), 3416 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 3417 false), 3418 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 3419 false), 3420 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 3421 false), 3422 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 3423 false), 3424 MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false), 3425 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 3426 false), 3427 MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, HOST_MISS, false), 3428 MLXSW_SP_RXL_MARK(HOST_MISS_IPV6, TRAP_TO_CPU, HOST_MISS, false), 3429 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), 3430 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), 3431 MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3432 /* PKT Sample trap */ 3433 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, 3434 false, SP_IP2ME, DISCARD), 3435 /* ACL trap */ 3436 MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false), 3437 /* Multicast Router Traps */ 3438 MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false), 3439 MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false), 3440 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 3441 MLXSW_SP_RXL_MR_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 3442 }; 3443 3444 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 3445 { 3446 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 3447 enum mlxsw_reg_qpcr_ir_units ir_units; 3448 int max_cpu_policers; 3449 bool is_bytes; 3450 u8 burst_size; 3451 u32 rate; 3452 int i, err; 3453 3454 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 3455 return -EIO; 3456 3457 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 3458 3459 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 3460 for (i = 0; i < max_cpu_policers; i++) { 3461 is_bytes = false; 3462 switch (i) { 3463 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 3464 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 3465 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 3466 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 3467 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 3468 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 3469 rate = 128; 3470 burst_size = 7; 3471 break; 3472 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 3473 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 3474 rate = 16 * 1024; 3475 burst_size = 10; 3476 break; 3477 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 3478 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 3479 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 3480 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 3481 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 3482 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 3483 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 3484 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 3485 rate = 1024; 3486 burst_size = 7; 3487 break; 3488 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 3489 is_bytes = true; 3490 rate = 4 * 1024; 3491 burst_size = 4; 3492 break; 3493 default: 3494 continue; 3495 } 3496 3497 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 3498 burst_size); 3499 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 3500 if (err) 3501 return err; 3502 } 3503 3504 return 0; 3505 } 3506 3507 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 3508 { 3509 char htgt_pl[MLXSW_REG_HTGT_LEN]; 3510 enum mlxsw_reg_htgt_trap_group i; 3511 int max_cpu_policers; 3512 int max_trap_groups; 3513 u8 priority, tc; 3514 u16 policer_id; 3515 int err; 3516 3517 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 3518 return -EIO; 3519 3520 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 3521 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 3522 3523 for (i = 0; i < max_trap_groups; i++) { 3524 policer_id = i; 3525 switch (i) { 3526 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 3527 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 3528 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 3529 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 3530 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 3531 priority = 5; 3532 tc = 5; 3533 break; 3534 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 3535 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 3536 priority = 4; 3537 tc = 4; 3538 break; 3539 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 3540 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 3541 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 3542 priority = 3; 3543 tc = 3; 3544 break; 3545 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 3546 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 3547 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 3548 priority = 2; 3549 tc = 2; 3550 break; 3551 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 3552 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 3553 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 3554 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 3555 priority = 1; 3556 tc = 1; 3557 break; 3558 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 3559 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 3560 tc = MLXSW_REG_HTGT_DEFAULT_TC; 3561 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 3562 break; 3563 default: 3564 continue; 3565 } 3566 3567 if (max_cpu_policers <= policer_id && 3568 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 3569 return -EIO; 3570 3571 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 3572 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 3573 if (err) 3574 return err; 3575 } 3576 3577 return 0; 3578 } 3579 3580 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 3581 { 3582 int i; 3583 int err; 3584 3585 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 3586 if (err) 3587 return err; 3588 3589 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 3590 if (err) 3591 return err; 3592 3593 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) { 3594 err = mlxsw_core_trap_register(mlxsw_sp->core, 3595 &mlxsw_sp_listener[i], 3596 mlxsw_sp); 3597 if (err) 3598 goto err_listener_register; 3599 3600 } 3601 return 0; 3602 3603 err_listener_register: 3604 for (i--; i >= 0; i--) { 3605 mlxsw_core_trap_unregister(mlxsw_sp->core, 3606 &mlxsw_sp_listener[i], 3607 mlxsw_sp); 3608 } 3609 return err; 3610 } 3611 3612 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 3613 { 3614 int i; 3615 3616 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) { 3617 mlxsw_core_trap_unregister(mlxsw_sp->core, 3618 &mlxsw_sp_listener[i], 3619 mlxsw_sp); 3620 } 3621 } 3622 3623 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 3624 { 3625 char slcr_pl[MLXSW_REG_SLCR_LEN]; 3626 int err; 3627 3628 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 3629 MLXSW_REG_SLCR_LAG_HASH_DMAC | 3630 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 3631 MLXSW_REG_SLCR_LAG_HASH_VLANID | 3632 MLXSW_REG_SLCR_LAG_HASH_SIP | 3633 MLXSW_REG_SLCR_LAG_HASH_DIP | 3634 MLXSW_REG_SLCR_LAG_HASH_SPORT | 3635 MLXSW_REG_SLCR_LAG_HASH_DPORT | 3636 MLXSW_REG_SLCR_LAG_HASH_IPPROTO); 3637 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 3638 if (err) 3639 return err; 3640 3641 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 3642 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 3643 return -EIO; 3644 3645 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 3646 sizeof(struct mlxsw_sp_upper), 3647 GFP_KERNEL); 3648 if (!mlxsw_sp->lags) 3649 return -ENOMEM; 3650 3651 return 0; 3652 } 3653 3654 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 3655 { 3656 kfree(mlxsw_sp->lags); 3657 } 3658 3659 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) 3660 { 3661 char htgt_pl[MLXSW_REG_HTGT_LEN]; 3662 3663 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 3664 MLXSW_REG_HTGT_INVALID_POLICER, 3665 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 3666 MLXSW_REG_HTGT_DEFAULT_TC); 3667 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 3668 } 3669 3670 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 3671 unsigned long event, void *ptr); 3672 3673 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 3674 const struct mlxsw_bus_info *mlxsw_bus_info) 3675 { 3676 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3677 int err; 3678 3679 mlxsw_sp->core = mlxsw_core; 3680 mlxsw_sp->bus_info = mlxsw_bus_info; 3681 3682 err = mlxsw_sp_fw_rev_validate(mlxsw_sp); 3683 if (err) { 3684 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n"); 3685 return err; 3686 } 3687 3688 err = mlxsw_sp_base_mac_get(mlxsw_sp); 3689 if (err) { 3690 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 3691 return err; 3692 } 3693 3694 err = mlxsw_sp_fids_init(mlxsw_sp); 3695 if (err) { 3696 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 3697 return err; 3698 } 3699 3700 err = mlxsw_sp_traps_init(mlxsw_sp); 3701 if (err) { 3702 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 3703 goto err_traps_init; 3704 } 3705 3706 err = mlxsw_sp_buffers_init(mlxsw_sp); 3707 if (err) { 3708 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 3709 goto err_buffers_init; 3710 } 3711 3712 err = mlxsw_sp_lag_init(mlxsw_sp); 3713 if (err) { 3714 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 3715 goto err_lag_init; 3716 } 3717 3718 err = mlxsw_sp_switchdev_init(mlxsw_sp); 3719 if (err) { 3720 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 3721 goto err_switchdev_init; 3722 } 3723 3724 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 3725 if (err) { 3726 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 3727 goto err_counter_pool_init; 3728 } 3729 3730 err = mlxsw_sp_afa_init(mlxsw_sp); 3731 if (err) { 3732 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 3733 goto err_afa_init; 3734 } 3735 3736 err = mlxsw_sp_router_init(mlxsw_sp); 3737 if (err) { 3738 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 3739 goto err_router_init; 3740 } 3741 3742 /* Initialize netdevice notifier after router is initialized, so that 3743 * the event handler can use router structures. 3744 */ 3745 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 3746 err = register_netdevice_notifier(&mlxsw_sp->netdevice_nb); 3747 if (err) { 3748 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 3749 goto err_netdev_notifier; 3750 } 3751 3752 err = mlxsw_sp_span_init(mlxsw_sp); 3753 if (err) { 3754 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 3755 goto err_span_init; 3756 } 3757 3758 err = mlxsw_sp_acl_init(mlxsw_sp); 3759 if (err) { 3760 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 3761 goto err_acl_init; 3762 } 3763 3764 err = mlxsw_sp_dpipe_init(mlxsw_sp); 3765 if (err) { 3766 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 3767 goto err_dpipe_init; 3768 } 3769 3770 err = mlxsw_sp_ports_create(mlxsw_sp); 3771 if (err) { 3772 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 3773 goto err_ports_create; 3774 } 3775 3776 return 0; 3777 3778 err_ports_create: 3779 mlxsw_sp_dpipe_fini(mlxsw_sp); 3780 err_dpipe_init: 3781 mlxsw_sp_acl_fini(mlxsw_sp); 3782 err_acl_init: 3783 mlxsw_sp_span_fini(mlxsw_sp); 3784 err_span_init: 3785 unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); 3786 err_netdev_notifier: 3787 mlxsw_sp_router_fini(mlxsw_sp); 3788 err_router_init: 3789 mlxsw_sp_afa_fini(mlxsw_sp); 3790 err_afa_init: 3791 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3792 err_counter_pool_init: 3793 mlxsw_sp_switchdev_fini(mlxsw_sp); 3794 err_switchdev_init: 3795 mlxsw_sp_lag_fini(mlxsw_sp); 3796 err_lag_init: 3797 mlxsw_sp_buffers_fini(mlxsw_sp); 3798 err_buffers_init: 3799 mlxsw_sp_traps_fini(mlxsw_sp); 3800 err_traps_init: 3801 mlxsw_sp_fids_fini(mlxsw_sp); 3802 return err; 3803 } 3804 3805 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 3806 { 3807 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3808 3809 mlxsw_sp_ports_remove(mlxsw_sp); 3810 mlxsw_sp_dpipe_fini(mlxsw_sp); 3811 mlxsw_sp_acl_fini(mlxsw_sp); 3812 mlxsw_sp_span_fini(mlxsw_sp); 3813 unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); 3814 mlxsw_sp_router_fini(mlxsw_sp); 3815 mlxsw_sp_afa_fini(mlxsw_sp); 3816 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3817 mlxsw_sp_switchdev_fini(mlxsw_sp); 3818 mlxsw_sp_lag_fini(mlxsw_sp); 3819 mlxsw_sp_buffers_fini(mlxsw_sp); 3820 mlxsw_sp_traps_fini(mlxsw_sp); 3821 mlxsw_sp_fids_fini(mlxsw_sp); 3822 } 3823 3824 static const struct mlxsw_config_profile mlxsw_sp_config_profile = { 3825 .used_max_vepa_channels = 1, 3826 .max_vepa_channels = 0, 3827 .used_max_mid = 1, 3828 .max_mid = MLXSW_SP_MID_MAX, 3829 .used_max_pgt = 1, 3830 .max_pgt = 0, 3831 .used_flood_tables = 1, 3832 .used_flood_mode = 1, 3833 .flood_mode = 3, 3834 .max_fid_offset_flood_tables = 3, 3835 .fid_offset_flood_table_size = VLAN_N_VID - 1, 3836 .max_fid_flood_tables = 3, 3837 .fid_flood_table_size = MLXSW_SP_FID_8021D_MAX, 3838 .used_max_ib_mc = 1, 3839 .max_ib_mc = 0, 3840 .used_max_pkey = 1, 3841 .max_pkey = 0, 3842 .used_kvd_split_data = 1, 3843 .kvd_hash_granularity = MLXSW_SP_KVD_GRANULARITY, 3844 .kvd_hash_single_parts = 2, 3845 .kvd_hash_double_parts = 1, 3846 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 3847 .swid_config = { 3848 { 3849 .used_type = 1, 3850 .type = MLXSW_PORT_SWID_TYPE_ETH, 3851 } 3852 }, 3853 .resource_query_enable = 1, 3854 }; 3855 3856 static struct mlxsw_driver mlxsw_sp_driver = { 3857 .kind = mlxsw_sp_driver_name, 3858 .priv_size = sizeof(struct mlxsw_sp), 3859 .init = mlxsw_sp_init, 3860 .fini = mlxsw_sp_fini, 3861 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 3862 .port_split = mlxsw_sp_port_split, 3863 .port_unsplit = mlxsw_sp_port_unsplit, 3864 .sb_pool_get = mlxsw_sp_sb_pool_get, 3865 .sb_pool_set = mlxsw_sp_sb_pool_set, 3866 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3867 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3868 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3869 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3870 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3871 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3872 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3873 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3874 .txhdr_construct = mlxsw_sp_txhdr_construct, 3875 .txhdr_len = MLXSW_TXHDR_LEN, 3876 .profile = &mlxsw_sp_config_profile, 3877 }; 3878 3879 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 3880 { 3881 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 3882 } 3883 3884 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data) 3885 { 3886 struct mlxsw_sp_port **p_mlxsw_sp_port = data; 3887 int ret = 0; 3888 3889 if (mlxsw_sp_port_dev_check(lower_dev)) { 3890 *p_mlxsw_sp_port = netdev_priv(lower_dev); 3891 ret = 1; 3892 } 3893 3894 return ret; 3895 } 3896 3897 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 3898 { 3899 struct mlxsw_sp_port *mlxsw_sp_port; 3900 3901 if (mlxsw_sp_port_dev_check(dev)) 3902 return netdev_priv(dev); 3903 3904 mlxsw_sp_port = NULL; 3905 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port); 3906 3907 return mlxsw_sp_port; 3908 } 3909 3910 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 3911 { 3912 struct mlxsw_sp_port *mlxsw_sp_port; 3913 3914 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 3915 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 3916 } 3917 3918 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 3919 { 3920 struct mlxsw_sp_port *mlxsw_sp_port; 3921 3922 if (mlxsw_sp_port_dev_check(dev)) 3923 return netdev_priv(dev); 3924 3925 mlxsw_sp_port = NULL; 3926 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 3927 &mlxsw_sp_port); 3928 3929 return mlxsw_sp_port; 3930 } 3931 3932 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 3933 { 3934 struct mlxsw_sp_port *mlxsw_sp_port; 3935 3936 rcu_read_lock(); 3937 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 3938 if (mlxsw_sp_port) 3939 dev_hold(mlxsw_sp_port->dev); 3940 rcu_read_unlock(); 3941 return mlxsw_sp_port; 3942 } 3943 3944 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 3945 { 3946 dev_put(mlxsw_sp_port->dev); 3947 } 3948 3949 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 3950 { 3951 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3952 3953 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 3954 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3955 } 3956 3957 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 3958 { 3959 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3960 3961 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 3962 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3963 } 3964 3965 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 3966 u16 lag_id, u8 port_index) 3967 { 3968 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3969 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3970 3971 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 3972 lag_id, port_index); 3973 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3974 } 3975 3976 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 3977 u16 lag_id) 3978 { 3979 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3980 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3981 3982 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 3983 lag_id); 3984 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3985 } 3986 3987 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 3988 u16 lag_id) 3989 { 3990 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3991 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3992 3993 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 3994 lag_id); 3995 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3996 } 3997 3998 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 3999 u16 lag_id) 4000 { 4001 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4002 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4003 4004 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 4005 lag_id); 4006 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4007 } 4008 4009 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 4010 struct net_device *lag_dev, 4011 u16 *p_lag_id) 4012 { 4013 struct mlxsw_sp_upper *lag; 4014 int free_lag_id = -1; 4015 u64 max_lag; 4016 int i; 4017 4018 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 4019 for (i = 0; i < max_lag; i++) { 4020 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 4021 if (lag->ref_count) { 4022 if (lag->dev == lag_dev) { 4023 *p_lag_id = i; 4024 return 0; 4025 } 4026 } else if (free_lag_id < 0) { 4027 free_lag_id = i; 4028 } 4029 } 4030 if (free_lag_id < 0) 4031 return -EBUSY; 4032 *p_lag_id = free_lag_id; 4033 return 0; 4034 } 4035 4036 static bool 4037 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 4038 struct net_device *lag_dev, 4039 struct netdev_lag_upper_info *lag_upper_info, 4040 struct netlink_ext_ack *extack) 4041 { 4042 u16 lag_id; 4043 4044 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 4045 NL_SET_ERR_MSG(extack, 4046 "spectrum: Exceeded number of supported LAG devices"); 4047 return false; 4048 } 4049 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 4050 NL_SET_ERR_MSG(extack, 4051 "spectrum: LAG device using unsupported Tx type"); 4052 return false; 4053 } 4054 return true; 4055 } 4056 4057 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 4058 u16 lag_id, u8 *p_port_index) 4059 { 4060 u64 max_lag_members; 4061 int i; 4062 4063 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 4064 MAX_LAG_MEMBERS); 4065 for (i = 0; i < max_lag_members; i++) { 4066 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 4067 *p_port_index = i; 4068 return 0; 4069 } 4070 } 4071 return -EBUSY; 4072 } 4073 4074 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 4075 struct net_device *lag_dev) 4076 { 4077 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4078 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 4079 struct mlxsw_sp_upper *lag; 4080 u16 lag_id; 4081 u8 port_index; 4082 int err; 4083 4084 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 4085 if (err) 4086 return err; 4087 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 4088 if (!lag->ref_count) { 4089 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 4090 if (err) 4091 return err; 4092 lag->dev = lag_dev; 4093 } 4094 4095 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 4096 if (err) 4097 return err; 4098 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 4099 if (err) 4100 goto err_col_port_add; 4101 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id); 4102 if (err) 4103 goto err_col_port_enable; 4104 4105 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 4106 mlxsw_sp_port->local_port); 4107 mlxsw_sp_port->lag_id = lag_id; 4108 mlxsw_sp_port->lagged = 1; 4109 lag->ref_count++; 4110 4111 /* Port is no longer usable as a router interface */ 4112 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1); 4113 if (mlxsw_sp_port_vlan->fid) 4114 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 4115 4116 return 0; 4117 4118 err_col_port_enable: 4119 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4120 err_col_port_add: 4121 if (!lag->ref_count) 4122 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 4123 return err; 4124 } 4125 4126 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 4127 struct net_device *lag_dev) 4128 { 4129 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4130 u16 lag_id = mlxsw_sp_port->lag_id; 4131 struct mlxsw_sp_upper *lag; 4132 4133 if (!mlxsw_sp_port->lagged) 4134 return; 4135 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 4136 WARN_ON(lag->ref_count == 0); 4137 4138 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id); 4139 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4140 4141 /* Any VLANs configured on the port are no longer valid */ 4142 mlxsw_sp_port_vlan_flush(mlxsw_sp_port); 4143 4144 if (lag->ref_count == 1) 4145 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 4146 4147 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 4148 mlxsw_sp_port->local_port); 4149 mlxsw_sp_port->lagged = 0; 4150 lag->ref_count--; 4151 4152 mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1); 4153 /* Make sure untagged frames are allowed to ingress */ 4154 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1); 4155 } 4156 4157 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 4158 u16 lag_id) 4159 { 4160 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4161 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4162 4163 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 4164 mlxsw_sp_port->local_port); 4165 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4166 } 4167 4168 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 4169 u16 lag_id) 4170 { 4171 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4172 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4173 4174 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 4175 mlxsw_sp_port->local_port); 4176 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4177 } 4178 4179 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port, 4180 bool lag_tx_enabled) 4181 { 4182 if (lag_tx_enabled) 4183 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, 4184 mlxsw_sp_port->lag_id); 4185 else 4186 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 4187 mlxsw_sp_port->lag_id); 4188 } 4189 4190 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 4191 struct netdev_lag_lower_state_info *info) 4192 { 4193 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled); 4194 } 4195 4196 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 4197 bool enable) 4198 { 4199 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4200 enum mlxsw_reg_spms_state spms_state; 4201 char *spms_pl; 4202 u16 vid; 4203 int err; 4204 4205 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 4206 MLXSW_REG_SPMS_STATE_DISCARDING; 4207 4208 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 4209 if (!spms_pl) 4210 return -ENOMEM; 4211 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 4212 4213 for (vid = 0; vid < VLAN_N_VID; vid++) 4214 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 4215 4216 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 4217 kfree(spms_pl); 4218 return err; 4219 } 4220 4221 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 4222 { 4223 int err; 4224 4225 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 4226 if (err) 4227 return err; 4228 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 4229 if (err) 4230 goto err_port_stp_set; 4231 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1, 4232 true, false); 4233 if (err) 4234 goto err_port_vlan_set; 4235 return 0; 4236 4237 err_port_vlan_set: 4238 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4239 err_port_stp_set: 4240 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4241 return err; 4242 } 4243 4244 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 4245 { 4246 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1, 4247 false, false); 4248 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4249 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4250 } 4251 4252 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 4253 struct net_device *dev, 4254 unsigned long event, void *ptr) 4255 { 4256 struct netdev_notifier_changeupper_info *info; 4257 struct mlxsw_sp_port *mlxsw_sp_port; 4258 struct netlink_ext_ack *extack; 4259 struct net_device *upper_dev; 4260 struct mlxsw_sp *mlxsw_sp; 4261 int err = 0; 4262 4263 mlxsw_sp_port = netdev_priv(dev); 4264 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4265 info = ptr; 4266 extack = netdev_notifier_info_to_extack(&info->info); 4267 4268 switch (event) { 4269 case NETDEV_PRECHANGEUPPER: 4270 upper_dev = info->upper_dev; 4271 if (!is_vlan_dev(upper_dev) && 4272 !netif_is_lag_master(upper_dev) && 4273 !netif_is_bridge_master(upper_dev) && 4274 !netif_is_ovs_master(upper_dev)) { 4275 NL_SET_ERR_MSG(extack, 4276 "spectrum: Unknown upper device type"); 4277 return -EINVAL; 4278 } 4279 if (!info->linking) 4280 break; 4281 if (netdev_has_any_upper_dev(upper_dev)) { 4282 NL_SET_ERR_MSG(extack, 4283 "spectrum: Enslaving a port to a device that already has an upper device is not supported"); 4284 return -EINVAL; 4285 } 4286 if (netif_is_lag_master(upper_dev) && 4287 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 4288 info->upper_info, extack)) 4289 return -EINVAL; 4290 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 4291 NL_SET_ERR_MSG(extack, 4292 "spectrum: Master device is a LAG master and this device has a VLAN"); 4293 return -EINVAL; 4294 } 4295 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 4296 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 4297 NL_SET_ERR_MSG(extack, 4298 "spectrum: Can not put a VLAN on a LAG port"); 4299 return -EINVAL; 4300 } 4301 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 4302 NL_SET_ERR_MSG(extack, 4303 "spectrum: Master device is an OVS master and this device has a VLAN"); 4304 return -EINVAL; 4305 } 4306 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 4307 NL_SET_ERR_MSG(extack, 4308 "spectrum: Can not put a VLAN on an OVS port"); 4309 return -EINVAL; 4310 } 4311 break; 4312 case NETDEV_CHANGEUPPER: 4313 upper_dev = info->upper_dev; 4314 if (netif_is_bridge_master(upper_dev)) { 4315 if (info->linking) 4316 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4317 lower_dev, 4318 upper_dev, 4319 extack); 4320 else 4321 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4322 lower_dev, 4323 upper_dev); 4324 } else if (netif_is_lag_master(upper_dev)) { 4325 if (info->linking) 4326 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 4327 upper_dev); 4328 else 4329 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 4330 upper_dev); 4331 } else if (netif_is_ovs_master(upper_dev)) { 4332 if (info->linking) 4333 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 4334 else 4335 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 4336 } 4337 break; 4338 } 4339 4340 return err; 4341 } 4342 4343 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 4344 unsigned long event, void *ptr) 4345 { 4346 struct netdev_notifier_changelowerstate_info *info; 4347 struct mlxsw_sp_port *mlxsw_sp_port; 4348 int err; 4349 4350 mlxsw_sp_port = netdev_priv(dev); 4351 info = ptr; 4352 4353 switch (event) { 4354 case NETDEV_CHANGELOWERSTATE: 4355 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 4356 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 4357 info->lower_state_info); 4358 if (err) 4359 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 4360 } 4361 break; 4362 } 4363 4364 return 0; 4365 } 4366 4367 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 4368 struct net_device *port_dev, 4369 unsigned long event, void *ptr) 4370 { 4371 switch (event) { 4372 case NETDEV_PRECHANGEUPPER: 4373 case NETDEV_CHANGEUPPER: 4374 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 4375 event, ptr); 4376 case NETDEV_CHANGELOWERSTATE: 4377 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 4378 ptr); 4379 } 4380 4381 return 0; 4382 } 4383 4384 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 4385 unsigned long event, void *ptr) 4386 { 4387 struct net_device *dev; 4388 struct list_head *iter; 4389 int ret; 4390 4391 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4392 if (mlxsw_sp_port_dev_check(dev)) { 4393 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 4394 ptr); 4395 if (ret) 4396 return ret; 4397 } 4398 } 4399 4400 return 0; 4401 } 4402 4403 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 4404 struct net_device *dev, 4405 unsigned long event, void *ptr, 4406 u16 vid) 4407 { 4408 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 4409 struct netdev_notifier_changeupper_info *info = ptr; 4410 struct netlink_ext_ack *extack; 4411 struct net_device *upper_dev; 4412 int err = 0; 4413 4414 extack = netdev_notifier_info_to_extack(&info->info); 4415 4416 switch (event) { 4417 case NETDEV_PRECHANGEUPPER: 4418 upper_dev = info->upper_dev; 4419 if (!netif_is_bridge_master(upper_dev)) { 4420 NL_SET_ERR_MSG(extack, "spectrum: VLAN devices only support bridge and VRF uppers"); 4421 return -EINVAL; 4422 } 4423 if (!info->linking) 4424 break; 4425 if (netdev_has_any_upper_dev(upper_dev)) { 4426 NL_SET_ERR_MSG(extack, "spectrum: Enslaving a port to a device that already has an upper device is not supported"); 4427 return -EINVAL; 4428 } 4429 break; 4430 case NETDEV_CHANGEUPPER: 4431 upper_dev = info->upper_dev; 4432 if (netif_is_bridge_master(upper_dev)) { 4433 if (info->linking) 4434 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4435 vlan_dev, 4436 upper_dev, 4437 extack); 4438 else 4439 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4440 vlan_dev, 4441 upper_dev); 4442 } else { 4443 err = -EINVAL; 4444 WARN_ON(1); 4445 } 4446 break; 4447 } 4448 4449 return err; 4450 } 4451 4452 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 4453 struct net_device *lag_dev, 4454 unsigned long event, 4455 void *ptr, u16 vid) 4456 { 4457 struct net_device *dev; 4458 struct list_head *iter; 4459 int ret; 4460 4461 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4462 if (mlxsw_sp_port_dev_check(dev)) { 4463 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 4464 event, ptr, 4465 vid); 4466 if (ret) 4467 return ret; 4468 } 4469 } 4470 4471 return 0; 4472 } 4473 4474 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 4475 unsigned long event, void *ptr) 4476 { 4477 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 4478 u16 vid = vlan_dev_vlan_id(vlan_dev); 4479 4480 if (mlxsw_sp_port_dev_check(real_dev)) 4481 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 4482 event, ptr, vid); 4483 else if (netif_is_lag_master(real_dev)) 4484 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 4485 real_dev, event, 4486 ptr, vid); 4487 4488 return 0; 4489 } 4490 4491 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) 4492 { 4493 struct netdev_notifier_changeupper_info *info = ptr; 4494 4495 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) 4496 return false; 4497 return netif_is_l3_master(info->upper_dev); 4498 } 4499 4500 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 4501 unsigned long event, void *ptr) 4502 { 4503 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 4504 struct mlxsw_sp *mlxsw_sp; 4505 int err = 0; 4506 4507 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 4508 if (mlxsw_sp_netdev_is_ipip(mlxsw_sp, dev)) 4509 err = mlxsw_sp_netdevice_ipip_event(mlxsw_sp, dev, event, ptr); 4510 else if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU) 4511 err = mlxsw_sp_netdevice_router_port_event(dev); 4512 else if (mlxsw_sp_is_vrf_event(event, ptr)) 4513 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); 4514 else if (mlxsw_sp_port_dev_check(dev)) 4515 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 4516 else if (netif_is_lag_master(dev)) 4517 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 4518 else if (is_vlan_dev(dev)) 4519 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 4520 4521 return notifier_from_errno(err); 4522 } 4523 4524 static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = { 4525 .notifier_call = mlxsw_sp_inetaddr_event, 4526 .priority = 10, /* Must be called before FIB notifier block */ 4527 }; 4528 4529 static struct notifier_block mlxsw_sp_inet6addr_nb __read_mostly = { 4530 .notifier_call = mlxsw_sp_inet6addr_event, 4531 }; 4532 4533 static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = { 4534 .notifier_call = mlxsw_sp_router_netevent_event, 4535 }; 4536 4537 static const struct pci_device_id mlxsw_sp_pci_id_table[] = { 4538 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 4539 {0, }, 4540 }; 4541 4542 static struct pci_driver mlxsw_sp_pci_driver = { 4543 .name = mlxsw_sp_driver_name, 4544 .id_table = mlxsw_sp_pci_id_table, 4545 }; 4546 4547 static int __init mlxsw_sp_module_init(void) 4548 { 4549 int err; 4550 4551 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 4552 register_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); 4553 register_netevent_notifier(&mlxsw_sp_router_netevent_nb); 4554 4555 err = mlxsw_core_driver_register(&mlxsw_sp_driver); 4556 if (err) 4557 goto err_core_driver_register; 4558 4559 err = mlxsw_pci_driver_register(&mlxsw_sp_pci_driver); 4560 if (err) 4561 goto err_pci_driver_register; 4562 4563 return 0; 4564 4565 err_pci_driver_register: 4566 mlxsw_core_driver_unregister(&mlxsw_sp_driver); 4567 err_core_driver_register: 4568 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb); 4569 unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); 4570 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 4571 return err; 4572 } 4573 4574 static void __exit mlxsw_sp_module_exit(void) 4575 { 4576 mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver); 4577 mlxsw_core_driver_unregister(&mlxsw_sp_driver); 4578 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb); 4579 unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); 4580 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 4581 } 4582 4583 module_init(mlxsw_sp_module_init); 4584 module_exit(mlxsw_sp_module_exit); 4585 4586 MODULE_LICENSE("Dual BSD/GPL"); 4587 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 4588 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 4589 MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table); 4590 MODULE_FIRMWARE(MLXSW_SP_FW_FILENAME); 4591