1 /* 2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c 3 * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com> 5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> 6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <linux/kernel.h> 38 #include <linux/module.h> 39 #include <linux/types.h> 40 #include <linux/pci.h> 41 #include <linux/netdevice.h> 42 #include <linux/etherdevice.h> 43 #include <linux/ethtool.h> 44 #include <linux/slab.h> 45 #include <linux/device.h> 46 #include <linux/skbuff.h> 47 #include <linux/if_vlan.h> 48 #include <linux/if_bridge.h> 49 #include <linux/workqueue.h> 50 #include <linux/jiffies.h> 51 #include <linux/bitops.h> 52 #include <linux/list.h> 53 #include <linux/notifier.h> 54 #include <linux/dcbnl.h> 55 #include <linux/inetdevice.h> 56 #include <linux/netlink.h> 57 #include <net/switchdev.h> 58 #include <net/pkt_cls.h> 59 #include <net/tc_act/tc_mirred.h> 60 #include <net/netevent.h> 61 #include <net/tc_act/tc_sample.h> 62 #include <net/addrconf.h> 63 64 #include "spectrum.h" 65 #include "pci.h" 66 #include "core.h" 67 #include "reg.h" 68 #include "port.h" 69 #include "trap.h" 70 #include "txheader.h" 71 #include "spectrum_cnt.h" 72 #include "spectrum_dpipe.h" 73 #include "spectrum_acl_flex_actions.h" 74 #include "../mlxfw/mlxfw.h" 75 76 #define MLXSW_FWREV_MAJOR 13 77 #define MLXSW_FWREV_MINOR 1420 78 #define MLXSW_FWREV_SUBMINOR 122 79 80 static const struct mlxsw_fw_rev mlxsw_sp_supported_fw_rev = { 81 .major = MLXSW_FWREV_MAJOR, 82 .minor = MLXSW_FWREV_MINOR, 83 .subminor = MLXSW_FWREV_SUBMINOR 84 }; 85 86 #define MLXSW_SP_FW_FILENAME \ 87 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_FWREV_MAJOR) \ 88 "." __stringify(MLXSW_FWREV_MINOR) \ 89 "." __stringify(MLXSW_FWREV_SUBMINOR) ".mfa2" 90 91 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum"; 92 static const char mlxsw_sp_driver_version[] = "1.0"; 93 94 /* tx_hdr_version 95 * Tx header version. 96 * Must be set to 1. 97 */ 98 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 99 100 /* tx_hdr_ctl 101 * Packet control type. 102 * 0 - Ethernet control (e.g. EMADs, LACP) 103 * 1 - Ethernet data 104 */ 105 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 106 107 /* tx_hdr_proto 108 * Packet protocol type. Must be set to 1 (Ethernet). 109 */ 110 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 111 112 /* tx_hdr_rx_is_router 113 * Packet is sent from the router. Valid for data packets only. 114 */ 115 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 116 117 /* tx_hdr_fid_valid 118 * Indicates if the 'fid' field is valid and should be used for 119 * forwarding lookup. Valid for data packets only. 120 */ 121 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 122 123 /* tx_hdr_swid 124 * Switch partition ID. Must be set to 0. 125 */ 126 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 127 128 /* tx_hdr_control_tclass 129 * Indicates if the packet should use the control TClass and not one 130 * of the data TClasses. 131 */ 132 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 133 134 /* tx_hdr_etclass 135 * Egress TClass to be used on the egress device on the egress port. 136 */ 137 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 138 139 /* tx_hdr_port_mid 140 * Destination local port for unicast packets. 141 * Destination multicast ID for multicast packets. 142 * 143 * Control packets are directed to a specific egress port, while data 144 * packets are transmitted through the CPU port (0) into the switch partition, 145 * where forwarding rules are applied. 146 */ 147 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 148 149 /* tx_hdr_fid 150 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 151 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 152 * Valid for data packets only. 153 */ 154 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 155 156 /* tx_hdr_type 157 * 0 - Data packets 158 * 6 - Control packets 159 */ 160 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 161 162 struct mlxsw_sp_mlxfw_dev { 163 struct mlxfw_dev mlxfw_dev; 164 struct mlxsw_sp *mlxsw_sp; 165 }; 166 167 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev, 168 u16 component_index, u32 *p_max_size, 169 u8 *p_align_bits, u16 *p_max_write_size) 170 { 171 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 172 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 173 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 174 char mcqi_pl[MLXSW_REG_MCQI_LEN]; 175 int err; 176 177 mlxsw_reg_mcqi_pack(mcqi_pl, component_index); 178 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl); 179 if (err) 180 return err; 181 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, 182 p_max_write_size); 183 184 *p_align_bits = max_t(u8, *p_align_bits, 2); 185 *p_max_write_size = min_t(u16, *p_max_write_size, 186 MLXSW_REG_MCDA_MAX_DATA_LEN); 187 return 0; 188 } 189 190 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle) 191 { 192 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 193 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 194 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 195 char mcc_pl[MLXSW_REG_MCC_LEN]; 196 u8 control_state; 197 int err; 198 199 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0); 200 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 201 if (err) 202 return err; 203 204 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state); 205 if (control_state != MLXFW_FSM_STATE_IDLE) 206 return -EBUSY; 207 208 mlxsw_reg_mcc_pack(mcc_pl, 209 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 210 0, *fwhandle, 0); 211 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 212 } 213 214 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev, 215 u32 fwhandle, u16 component_index, 216 u32 component_size) 217 { 218 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 219 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 220 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 221 char mcc_pl[MLXSW_REG_MCC_LEN]; 222 223 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT, 224 component_index, fwhandle, component_size); 225 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 226 } 227 228 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev, 229 u32 fwhandle, u8 *data, u16 size, 230 u32 offset) 231 { 232 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 233 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 234 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 235 char mcda_pl[MLXSW_REG_MCDA_LEN]; 236 237 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data); 238 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl); 239 } 240 241 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, 242 u32 fwhandle, u16 component_index) 243 { 244 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 245 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 246 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 247 char mcc_pl[MLXSW_REG_MCC_LEN]; 248 249 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT, 250 component_index, fwhandle, 0); 251 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 252 } 253 254 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 255 { 256 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 257 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 258 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 259 char mcc_pl[MLXSW_REG_MCC_LEN]; 260 261 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, 262 fwhandle, 0); 263 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 264 } 265 266 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 267 enum mlxfw_fsm_state *fsm_state, 268 enum mlxfw_fsm_state_err *fsm_state_err) 269 { 270 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 271 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 272 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 273 char mcc_pl[MLXSW_REG_MCC_LEN]; 274 u8 control_state; 275 u8 error_code; 276 int err; 277 278 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0); 279 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 280 if (err) 281 return err; 282 283 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state); 284 *fsm_state = control_state; 285 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, 286 MLXFW_FSM_STATE_ERR_MAX); 287 return 0; 288 } 289 290 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 291 { 292 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 293 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 294 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 295 char mcc_pl[MLXSW_REG_MCC_LEN]; 296 297 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, 298 fwhandle, 0); 299 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 300 } 301 302 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 303 { 304 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 305 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 306 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 307 char mcc_pl[MLXSW_REG_MCC_LEN]; 308 309 mlxsw_reg_mcc_pack(mcc_pl, 310 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, 311 fwhandle, 0); 312 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 313 } 314 315 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = { 316 .component_query = mlxsw_sp_component_query, 317 .fsm_lock = mlxsw_sp_fsm_lock, 318 .fsm_component_update = mlxsw_sp_fsm_component_update, 319 .fsm_block_download = mlxsw_sp_fsm_block_download, 320 .fsm_component_verify = mlxsw_sp_fsm_component_verify, 321 .fsm_activate = mlxsw_sp_fsm_activate, 322 .fsm_query_state = mlxsw_sp_fsm_query_state, 323 .fsm_cancel = mlxsw_sp_fsm_cancel, 324 .fsm_release = mlxsw_sp_fsm_release 325 }; 326 327 static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp, 328 const struct firmware *firmware) 329 { 330 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = { 331 .mlxfw_dev = { 332 .ops = &mlxsw_sp_mlxfw_dev_ops, 333 .psid = mlxsw_sp->bus_info->psid, 334 .psid_size = strlen(mlxsw_sp->bus_info->psid), 335 }, 336 .mlxsw_sp = mlxsw_sp 337 }; 338 339 return mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware); 340 } 341 342 static bool mlxsw_sp_fw_rev_ge(const struct mlxsw_fw_rev *a, 343 const struct mlxsw_fw_rev *b) 344 { 345 if (a->major != b->major) 346 return a->major > b->major; 347 if (a->minor != b->minor) 348 return a->minor > b->minor; 349 return a->subminor >= b->subminor; 350 } 351 352 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) 353 { 354 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev; 355 const struct firmware *firmware; 356 int err; 357 358 if (mlxsw_sp_fw_rev_ge(rev, &mlxsw_sp_supported_fw_rev)) 359 return 0; 360 361 dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d out of data\n", 362 rev->major, rev->minor, rev->subminor); 363 dev_info(mlxsw_sp->bus_info->dev, "Upgrading firmware using file %s\n", 364 MLXSW_SP_FW_FILENAME); 365 366 err = request_firmware_direct(&firmware, MLXSW_SP_FW_FILENAME, 367 mlxsw_sp->bus_info->dev); 368 if (err) { 369 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n", 370 MLXSW_SP_FW_FILENAME); 371 return err; 372 } 373 374 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware); 375 release_firmware(firmware); 376 return err; 377 } 378 379 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 380 unsigned int counter_index, u64 *packets, 381 u64 *bytes) 382 { 383 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 384 int err; 385 386 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 387 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 388 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 389 if (err) 390 return err; 391 if (packets) 392 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 393 if (bytes) 394 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 395 return 0; 396 } 397 398 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 399 unsigned int counter_index) 400 { 401 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 402 403 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 404 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 405 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 406 } 407 408 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 409 unsigned int *p_counter_index) 410 { 411 int err; 412 413 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 414 p_counter_index); 415 if (err) 416 return err; 417 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 418 if (err) 419 goto err_counter_clear; 420 return 0; 421 422 err_counter_clear: 423 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 424 *p_counter_index); 425 return err; 426 } 427 428 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 429 unsigned int counter_index) 430 { 431 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 432 counter_index); 433 } 434 435 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 436 const struct mlxsw_tx_info *tx_info) 437 { 438 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 439 440 memset(txhdr, 0, MLXSW_TXHDR_LEN); 441 442 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 443 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 444 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 445 mlxsw_tx_hdr_swid_set(txhdr, 0); 446 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 447 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 448 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 449 } 450 451 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 452 u8 state) 453 { 454 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 455 enum mlxsw_reg_spms_state spms_state; 456 char *spms_pl; 457 int err; 458 459 switch (state) { 460 case BR_STATE_FORWARDING: 461 spms_state = MLXSW_REG_SPMS_STATE_FORWARDING; 462 break; 463 case BR_STATE_LEARNING: 464 spms_state = MLXSW_REG_SPMS_STATE_LEARNING; 465 break; 466 case BR_STATE_LISTENING: /* fall-through */ 467 case BR_STATE_DISABLED: /* fall-through */ 468 case BR_STATE_BLOCKING: 469 spms_state = MLXSW_REG_SPMS_STATE_DISCARDING; 470 break; 471 default: 472 BUG(); 473 } 474 475 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 476 if (!spms_pl) 477 return -ENOMEM; 478 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 479 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 480 481 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 482 kfree(spms_pl); 483 return err; 484 } 485 486 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 487 { 488 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 489 int err; 490 491 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 492 if (err) 493 return err; 494 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 495 return 0; 496 } 497 498 static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp) 499 { 500 int i; 501 502 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN)) 503 return -EIO; 504 505 mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, 506 MAX_SPAN); 507 mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count, 508 sizeof(struct mlxsw_sp_span_entry), 509 GFP_KERNEL); 510 if (!mlxsw_sp->span.entries) 511 return -ENOMEM; 512 513 for (i = 0; i < mlxsw_sp->span.entries_count; i++) 514 INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list); 515 516 return 0; 517 } 518 519 static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp) 520 { 521 int i; 522 523 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 524 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 525 526 WARN_ON_ONCE(!list_empty(&curr->bound_ports_list)); 527 } 528 kfree(mlxsw_sp->span.entries); 529 } 530 531 static struct mlxsw_sp_span_entry * 532 mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port) 533 { 534 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 535 struct mlxsw_sp_span_entry *span_entry; 536 char mpat_pl[MLXSW_REG_MPAT_LEN]; 537 u8 local_port = port->local_port; 538 int index; 539 int i; 540 int err; 541 542 /* find a free entry to use */ 543 index = -1; 544 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 545 if (!mlxsw_sp->span.entries[i].used) { 546 index = i; 547 span_entry = &mlxsw_sp->span.entries[i]; 548 break; 549 } 550 } 551 if (index < 0) 552 return NULL; 553 554 /* create a new port analayzer entry for local_port */ 555 mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true); 556 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 557 if (err) 558 return NULL; 559 560 span_entry->used = true; 561 span_entry->id = index; 562 span_entry->ref_count = 1; 563 span_entry->local_port = local_port; 564 return span_entry; 565 } 566 567 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp, 568 struct mlxsw_sp_span_entry *span_entry) 569 { 570 u8 local_port = span_entry->local_port; 571 char mpat_pl[MLXSW_REG_MPAT_LEN]; 572 int pa_id = span_entry->id; 573 574 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false); 575 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 576 span_entry->used = false; 577 } 578 579 static struct mlxsw_sp_span_entry * 580 mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port) 581 { 582 int i; 583 584 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 585 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 586 587 if (curr->used && curr->local_port == local_port) 588 return curr; 589 } 590 return NULL; 591 } 592 593 static struct mlxsw_sp_span_entry 594 *mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port) 595 { 596 struct mlxsw_sp_span_entry *span_entry; 597 598 span_entry = mlxsw_sp_span_entry_find(port->mlxsw_sp, 599 port->local_port); 600 if (span_entry) { 601 /* Already exists, just take a reference */ 602 span_entry->ref_count++; 603 return span_entry; 604 } 605 606 return mlxsw_sp_span_entry_create(port); 607 } 608 609 static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, 610 struct mlxsw_sp_span_entry *span_entry) 611 { 612 WARN_ON(!span_entry->ref_count); 613 if (--span_entry->ref_count == 0) 614 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry); 615 return 0; 616 } 617 618 static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port) 619 { 620 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 621 struct mlxsw_sp_span_inspected_port *p; 622 int i; 623 624 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 625 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 626 627 list_for_each_entry(p, &curr->bound_ports_list, list) 628 if (p->local_port == port->local_port && 629 p->type == MLXSW_SP_SPAN_EGRESS) 630 return true; 631 } 632 633 return false; 634 } 635 636 static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp, 637 int mtu) 638 { 639 return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1; 640 } 641 642 static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu) 643 { 644 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 645 char sbib_pl[MLXSW_REG_SBIB_LEN]; 646 int err; 647 648 /* If port is egress mirrored, the shared buffer size should be 649 * updated according to the mtu value 650 */ 651 if (mlxsw_sp_span_is_egress_mirror(port)) { 652 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu); 653 654 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize); 655 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 656 if (err) { 657 netdev_err(port->dev, "Could not update shared buffer for mirroring\n"); 658 return err; 659 } 660 } 661 662 return 0; 663 } 664 665 static struct mlxsw_sp_span_inspected_port * 666 mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port, 667 struct mlxsw_sp_span_entry *span_entry) 668 { 669 struct mlxsw_sp_span_inspected_port *p; 670 671 list_for_each_entry(p, &span_entry->bound_ports_list, list) 672 if (port->local_port == p->local_port) 673 return p; 674 return NULL; 675 } 676 677 static int 678 mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port, 679 struct mlxsw_sp_span_entry *span_entry, 680 enum mlxsw_sp_span_type type) 681 { 682 struct mlxsw_sp_span_inspected_port *inspected_port; 683 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 684 char mpar_pl[MLXSW_REG_MPAR_LEN]; 685 char sbib_pl[MLXSW_REG_SBIB_LEN]; 686 int pa_id = span_entry->id; 687 int err; 688 689 /* if it is an egress SPAN, bind a shared buffer to it */ 690 if (type == MLXSW_SP_SPAN_EGRESS) { 691 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, 692 port->dev->mtu); 693 694 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize); 695 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 696 if (err) { 697 netdev_err(port->dev, "Could not create shared buffer for mirroring\n"); 698 return err; 699 } 700 } 701 702 /* bind the port to the SPAN entry */ 703 mlxsw_reg_mpar_pack(mpar_pl, port->local_port, 704 (enum mlxsw_reg_mpar_i_e) type, true, pa_id); 705 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); 706 if (err) 707 goto err_mpar_reg_write; 708 709 inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL); 710 if (!inspected_port) { 711 err = -ENOMEM; 712 goto err_inspected_port_alloc; 713 } 714 inspected_port->local_port = port->local_port; 715 inspected_port->type = type; 716 list_add_tail(&inspected_port->list, &span_entry->bound_ports_list); 717 718 return 0; 719 720 err_mpar_reg_write: 721 err_inspected_port_alloc: 722 if (type == MLXSW_SP_SPAN_EGRESS) { 723 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); 724 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 725 } 726 return err; 727 } 728 729 static void 730 mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port, 731 struct mlxsw_sp_span_entry *span_entry, 732 enum mlxsw_sp_span_type type) 733 { 734 struct mlxsw_sp_span_inspected_port *inspected_port; 735 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 736 char mpar_pl[MLXSW_REG_MPAR_LEN]; 737 char sbib_pl[MLXSW_REG_SBIB_LEN]; 738 int pa_id = span_entry->id; 739 740 inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry); 741 if (!inspected_port) 742 return; 743 744 /* remove the inspected port */ 745 mlxsw_reg_mpar_pack(mpar_pl, port->local_port, 746 (enum mlxsw_reg_mpar_i_e) type, false, pa_id); 747 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); 748 749 /* remove the SBIB buffer if it was egress SPAN */ 750 if (type == MLXSW_SP_SPAN_EGRESS) { 751 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); 752 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 753 } 754 755 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); 756 757 list_del(&inspected_port->list); 758 kfree(inspected_port); 759 } 760 761 static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from, 762 struct mlxsw_sp_port *to, 763 enum mlxsw_sp_span_type type) 764 { 765 struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp; 766 struct mlxsw_sp_span_entry *span_entry; 767 int err; 768 769 span_entry = mlxsw_sp_span_entry_get(to); 770 if (!span_entry) 771 return -ENOENT; 772 773 netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n", 774 span_entry->id); 775 776 err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type); 777 if (err) 778 goto err_port_bind; 779 780 return 0; 781 782 err_port_bind: 783 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); 784 return err; 785 } 786 787 static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from, 788 u8 destination_port, 789 enum mlxsw_sp_span_type type) 790 { 791 struct mlxsw_sp_span_entry *span_entry; 792 793 span_entry = mlxsw_sp_span_entry_find(from->mlxsw_sp, 794 destination_port); 795 if (!span_entry) { 796 netdev_err(from->dev, "no span entry found\n"); 797 return; 798 } 799 800 netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n", 801 span_entry->id); 802 mlxsw_sp_span_inspected_port_unbind(from, span_entry, type); 803 } 804 805 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port, 806 bool enable, u32 rate) 807 { 808 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 809 char mpsc_pl[MLXSW_REG_MPSC_LEN]; 810 811 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate); 812 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl); 813 } 814 815 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 816 bool is_up) 817 { 818 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 819 char paos_pl[MLXSW_REG_PAOS_LEN]; 820 821 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 822 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 823 MLXSW_PORT_ADMIN_STATUS_DOWN); 824 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 825 } 826 827 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 828 unsigned char *addr) 829 { 830 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 831 char ppad_pl[MLXSW_REG_PPAD_LEN]; 832 833 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 834 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 835 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 836 } 837 838 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 839 { 840 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 841 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 842 843 ether_addr_copy(addr, mlxsw_sp->base_mac); 844 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 845 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 846 } 847 848 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 849 { 850 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 851 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 852 int max_mtu; 853 int err; 854 855 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 856 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 857 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 858 if (err) 859 return err; 860 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 861 862 if (mtu > max_mtu) 863 return -EINVAL; 864 865 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 866 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 867 } 868 869 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 870 { 871 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 872 char pspa_pl[MLXSW_REG_PSPA_LEN]; 873 874 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 875 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 876 } 877 878 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 879 { 880 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 881 char svpe_pl[MLXSW_REG_SVPE_LEN]; 882 883 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 884 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 885 } 886 887 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 888 bool learn_enable) 889 { 890 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 891 char *spvmlr_pl; 892 int err; 893 894 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 895 if (!spvmlr_pl) 896 return -ENOMEM; 897 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 898 learn_enable); 899 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 900 kfree(spvmlr_pl); 901 return err; 902 } 903 904 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 905 u16 vid) 906 { 907 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 908 char spvid_pl[MLXSW_REG_SPVID_LEN]; 909 910 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid); 911 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 912 } 913 914 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 915 bool allow) 916 { 917 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 918 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 919 920 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 921 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 922 } 923 924 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 925 { 926 int err; 927 928 if (!vid) { 929 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 930 if (err) 931 return err; 932 } else { 933 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); 934 if (err) 935 return err; 936 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 937 if (err) 938 goto err_port_allow_untagged_set; 939 } 940 941 mlxsw_sp_port->pvid = vid; 942 return 0; 943 944 err_port_allow_untagged_set: 945 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid); 946 return err; 947 } 948 949 static int 950 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 951 { 952 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 953 char sspr_pl[MLXSW_REG_SSPR_LEN]; 954 955 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 956 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 957 } 958 959 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, 960 u8 local_port, u8 *p_module, 961 u8 *p_width, u8 *p_lane) 962 { 963 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 964 int err; 965 966 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 967 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 968 if (err) 969 return err; 970 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 971 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl); 972 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 973 return 0; 974 } 975 976 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port, 977 u8 module, u8 width, u8 lane) 978 { 979 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 980 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 981 int i; 982 983 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 984 mlxsw_reg_pmlp_width_set(pmlp_pl, width); 985 for (i = 0; i < width; i++) { 986 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module); 987 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */ 988 } 989 990 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 991 } 992 993 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port) 994 { 995 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 996 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 997 998 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 999 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 1000 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 1001 } 1002 1003 static int mlxsw_sp_port_open(struct net_device *dev) 1004 { 1005 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1006 int err; 1007 1008 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1009 if (err) 1010 return err; 1011 netif_start_queue(dev); 1012 return 0; 1013 } 1014 1015 static int mlxsw_sp_port_stop(struct net_device *dev) 1016 { 1017 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1018 1019 netif_stop_queue(dev); 1020 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1021 } 1022 1023 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 1024 struct net_device *dev) 1025 { 1026 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1027 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1028 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 1029 const struct mlxsw_tx_info tx_info = { 1030 .local_port = mlxsw_sp_port->local_port, 1031 .is_emad = false, 1032 }; 1033 u64 len; 1034 int err; 1035 1036 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 1037 return NETDEV_TX_BUSY; 1038 1039 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { 1040 struct sk_buff *skb_orig = skb; 1041 1042 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); 1043 if (!skb) { 1044 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 1045 dev_kfree_skb_any(skb_orig); 1046 return NETDEV_TX_OK; 1047 } 1048 dev_consume_skb_any(skb_orig); 1049 } 1050 1051 if (eth_skb_pad(skb)) { 1052 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 1053 return NETDEV_TX_OK; 1054 } 1055 1056 mlxsw_sp_txhdr_construct(skb, &tx_info); 1057 /* TX header is consumed by HW on the way so we shouldn't count its 1058 * bytes as being sent. 1059 */ 1060 len = skb->len - MLXSW_TXHDR_LEN; 1061 1062 /* Due to a race we might fail here because of a full queue. In that 1063 * unlikely case we simply drop the packet. 1064 */ 1065 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 1066 1067 if (!err) { 1068 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 1069 u64_stats_update_begin(&pcpu_stats->syncp); 1070 pcpu_stats->tx_packets++; 1071 pcpu_stats->tx_bytes += len; 1072 u64_stats_update_end(&pcpu_stats->syncp); 1073 } else { 1074 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 1075 dev_kfree_skb_any(skb); 1076 } 1077 return NETDEV_TX_OK; 1078 } 1079 1080 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 1081 { 1082 } 1083 1084 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 1085 { 1086 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1087 struct sockaddr *addr = p; 1088 int err; 1089 1090 if (!is_valid_ether_addr(addr->sa_data)) 1091 return -EADDRNOTAVAIL; 1092 1093 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 1094 if (err) 1095 return err; 1096 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 1097 return 0; 1098 } 1099 1100 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, 1101 int mtu) 1102 { 1103 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu); 1104 } 1105 1106 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ 1107 1108 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 1109 u16 delay) 1110 { 1111 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay, 1112 BITS_PER_BYTE)); 1113 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp, 1114 mtu); 1115 } 1116 1117 /* Maximum delay buffer needed in case of PAUSE frames, in bytes. 1118 * Assumes 100m cable and maximum MTU. 1119 */ 1120 #define MLXSW_SP_PAUSE_DELAY 58752 1121 1122 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 1123 u16 delay, bool pfc, bool pause) 1124 { 1125 if (pfc) 1126 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay); 1127 else if (pause) 1128 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY); 1129 else 1130 return 0; 1131 } 1132 1133 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, 1134 bool lossy) 1135 { 1136 if (lossy) 1137 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size); 1138 else 1139 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size, 1140 thres); 1141 } 1142 1143 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, 1144 u8 *prio_tc, bool pause_en, 1145 struct ieee_pfc *my_pfc) 1146 { 1147 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1148 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; 1149 u16 delay = !!my_pfc ? my_pfc->delay : 0; 1150 char pbmc_pl[MLXSW_REG_PBMC_LEN]; 1151 int i, j, err; 1152 1153 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); 1154 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 1155 if (err) 1156 return err; 1157 1158 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1159 bool configure = false; 1160 bool pfc = false; 1161 bool lossy; 1162 u16 thres; 1163 1164 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { 1165 if (prio_tc[j] == i) { 1166 pfc = pfc_en & BIT(j); 1167 configure = true; 1168 break; 1169 } 1170 } 1171 1172 if (!configure) 1173 continue; 1174 1175 lossy = !(pfc || pause_en); 1176 thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); 1177 delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc, 1178 pause_en); 1179 mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy); 1180 } 1181 1182 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 1183 } 1184 1185 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, 1186 int mtu, bool pause_en) 1187 { 1188 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; 1189 bool dcb_en = !!mlxsw_sp_port->dcb.ets; 1190 struct ieee_pfc *my_pfc; 1191 u8 *prio_tc; 1192 1193 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; 1194 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; 1195 1196 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, 1197 pause_en, my_pfc); 1198 } 1199 1200 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 1201 { 1202 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1203 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 1204 int err; 1205 1206 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); 1207 if (err) 1208 return err; 1209 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu); 1210 if (err) 1211 goto err_span_port_mtu_update; 1212 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 1213 if (err) 1214 goto err_port_mtu_set; 1215 dev->mtu = mtu; 1216 return 0; 1217 1218 err_port_mtu_set: 1219 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu); 1220 err_span_port_mtu_update: 1221 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1222 return err; 1223 } 1224 1225 static int 1226 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 1227 struct rtnl_link_stats64 *stats) 1228 { 1229 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1230 struct mlxsw_sp_port_pcpu_stats *p; 1231 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1232 u32 tx_dropped = 0; 1233 unsigned int start; 1234 int i; 1235 1236 for_each_possible_cpu(i) { 1237 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 1238 do { 1239 start = u64_stats_fetch_begin_irq(&p->syncp); 1240 rx_packets = p->rx_packets; 1241 rx_bytes = p->rx_bytes; 1242 tx_packets = p->tx_packets; 1243 tx_bytes = p->tx_bytes; 1244 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 1245 1246 stats->rx_packets += rx_packets; 1247 stats->rx_bytes += rx_bytes; 1248 stats->tx_packets += tx_packets; 1249 stats->tx_bytes += tx_bytes; 1250 /* tx_dropped is u32, updated without syncp protection. */ 1251 tx_dropped += p->tx_dropped; 1252 } 1253 stats->tx_dropped = tx_dropped; 1254 return 0; 1255 } 1256 1257 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 1258 { 1259 switch (attr_id) { 1260 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1261 return true; 1262 } 1263 1264 return false; 1265 } 1266 1267 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 1268 void *sp) 1269 { 1270 switch (attr_id) { 1271 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1272 return mlxsw_sp_port_get_sw_stats64(dev, sp); 1273 } 1274 1275 return -EINVAL; 1276 } 1277 1278 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 1279 int prio, char *ppcnt_pl) 1280 { 1281 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1282 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1283 1284 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 1285 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 1286 } 1287 1288 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 1289 struct rtnl_link_stats64 *stats) 1290 { 1291 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1292 int err; 1293 1294 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 1295 0, ppcnt_pl); 1296 if (err) 1297 goto out; 1298 1299 stats->tx_packets = 1300 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 1301 stats->rx_packets = 1302 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 1303 stats->tx_bytes = 1304 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 1305 stats->rx_bytes = 1306 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 1307 stats->multicast = 1308 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 1309 1310 stats->rx_crc_errors = 1311 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 1312 stats->rx_frame_errors = 1313 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 1314 1315 stats->rx_length_errors = ( 1316 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 1317 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 1318 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 1319 1320 stats->rx_errors = (stats->rx_crc_errors + 1321 stats->rx_frame_errors + stats->rx_length_errors); 1322 1323 out: 1324 return err; 1325 } 1326 1327 static void update_stats_cache(struct work_struct *work) 1328 { 1329 struct mlxsw_sp_port *mlxsw_sp_port = 1330 container_of(work, struct mlxsw_sp_port, 1331 periodic_hw_stats.update_dw.work); 1332 1333 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 1334 goto out; 1335 1336 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 1337 &mlxsw_sp_port->periodic_hw_stats.stats); 1338 1339 out: 1340 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1341 MLXSW_HW_STATS_UPDATE_TIME); 1342 } 1343 1344 /* Return the stats from a cache that is updated periodically, 1345 * as this function might get called in an atomic context. 1346 */ 1347 static void 1348 mlxsw_sp_port_get_stats64(struct net_device *dev, 1349 struct rtnl_link_stats64 *stats) 1350 { 1351 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1352 1353 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 1354 } 1355 1356 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 1357 u16 vid_begin, u16 vid_end, 1358 bool is_member, bool untagged) 1359 { 1360 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1361 char *spvm_pl; 1362 int err; 1363 1364 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 1365 if (!spvm_pl) 1366 return -ENOMEM; 1367 1368 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 1369 vid_end, is_member, untagged); 1370 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 1371 kfree(spvm_pl); 1372 return err; 1373 } 1374 1375 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 1376 u16 vid_end, bool is_member, bool untagged) 1377 { 1378 u16 vid, vid_e; 1379 int err; 1380 1381 for (vid = vid_begin; vid <= vid_end; 1382 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 1383 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 1384 vid_end); 1385 1386 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 1387 is_member, untagged); 1388 if (err) 1389 return err; 1390 } 1391 1392 return 0; 1393 } 1394 1395 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port) 1396 { 1397 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 1398 1399 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 1400 &mlxsw_sp_port->vlans_list, list) 1401 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); 1402 } 1403 1404 static struct mlxsw_sp_port_vlan * 1405 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1406 { 1407 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1408 bool untagged = vid == 1; 1409 int err; 1410 1411 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 1412 if (err) 1413 return ERR_PTR(err); 1414 1415 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 1416 if (!mlxsw_sp_port_vlan) { 1417 err = -ENOMEM; 1418 goto err_port_vlan_alloc; 1419 } 1420 1421 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1422 mlxsw_sp_port_vlan->vid = vid; 1423 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1424 1425 return mlxsw_sp_port_vlan; 1426 1427 err_port_vlan_alloc: 1428 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1429 return ERR_PTR(err); 1430 } 1431 1432 static void 1433 mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1434 { 1435 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1436 u16 vid = mlxsw_sp_port_vlan->vid; 1437 1438 list_del(&mlxsw_sp_port_vlan->list); 1439 kfree(mlxsw_sp_port_vlan); 1440 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1441 } 1442 1443 struct mlxsw_sp_port_vlan * 1444 mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1445 { 1446 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1447 1448 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1449 if (mlxsw_sp_port_vlan) 1450 return mlxsw_sp_port_vlan; 1451 1452 return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid); 1453 } 1454 1455 void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1456 { 1457 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; 1458 1459 if (mlxsw_sp_port_vlan->bridge_port) 1460 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1461 else if (fid) 1462 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 1463 1464 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1465 } 1466 1467 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1468 __be16 __always_unused proto, u16 vid) 1469 { 1470 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1471 1472 /* VLAN 0 is added to HW filter when device goes up, but it is 1473 * reserved in our case, so simply return. 1474 */ 1475 if (!vid) 1476 return 0; 1477 1478 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid)); 1479 } 1480 1481 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1482 __be16 __always_unused proto, u16 vid) 1483 { 1484 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1485 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1486 1487 /* VLAN 0 is removed from HW filter when device goes down, but 1488 * it is reserved in our case, so simply return. 1489 */ 1490 if (!vid) 1491 return 0; 1492 1493 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1494 if (!mlxsw_sp_port_vlan) 1495 return 0; 1496 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); 1497 1498 return 0; 1499 } 1500 1501 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name, 1502 size_t len) 1503 { 1504 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1505 u8 module = mlxsw_sp_port->mapping.module; 1506 u8 width = mlxsw_sp_port->mapping.width; 1507 u8 lane = mlxsw_sp_port->mapping.lane; 1508 int err; 1509 1510 if (!mlxsw_sp_port->split) 1511 err = snprintf(name, len, "p%d", module + 1); 1512 else 1513 err = snprintf(name, len, "p%ds%d", module + 1, 1514 lane / width); 1515 1516 if (err >= len) 1517 return -EINVAL; 1518 1519 return 0; 1520 } 1521 1522 static struct mlxsw_sp_port_mall_tc_entry * 1523 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port, 1524 unsigned long cookie) { 1525 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1526 1527 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list) 1528 if (mall_tc_entry->cookie == cookie) 1529 return mall_tc_entry; 1530 1531 return NULL; 1532 } 1533 1534 static int 1535 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1536 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror, 1537 const struct tc_action *a, 1538 bool ingress) 1539 { 1540 struct net *net = dev_net(mlxsw_sp_port->dev); 1541 enum mlxsw_sp_span_type span_type; 1542 struct mlxsw_sp_port *to_port; 1543 struct net_device *to_dev; 1544 int ifindex; 1545 1546 ifindex = tcf_mirred_ifindex(a); 1547 to_dev = __dev_get_by_index(net, ifindex); 1548 if (!to_dev) { 1549 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); 1550 return -EINVAL; 1551 } 1552 1553 if (!mlxsw_sp_port_dev_check(to_dev)) { 1554 netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port"); 1555 return -EOPNOTSUPP; 1556 } 1557 to_port = netdev_priv(to_dev); 1558 1559 mirror->to_local_port = to_port->local_port; 1560 mirror->ingress = ingress; 1561 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1562 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type); 1563 } 1564 1565 static void 1566 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1567 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror) 1568 { 1569 enum mlxsw_sp_span_type span_type; 1570 1571 span_type = mirror->ingress ? 1572 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1573 mlxsw_sp_span_mirror_remove(mlxsw_sp_port, mirror->to_local_port, 1574 span_type); 1575 } 1576 1577 static int 1578 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port, 1579 struct tc_cls_matchall_offload *cls, 1580 const struct tc_action *a, 1581 bool ingress) 1582 { 1583 int err; 1584 1585 if (!mlxsw_sp_port->sample) 1586 return -EOPNOTSUPP; 1587 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) { 1588 netdev_err(mlxsw_sp_port->dev, "sample already active\n"); 1589 return -EEXIST; 1590 } 1591 if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) { 1592 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n"); 1593 return -EOPNOTSUPP; 1594 } 1595 1596 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group, 1597 tcf_sample_psample_group(a)); 1598 mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a); 1599 mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a); 1600 mlxsw_sp_port->sample->rate = tcf_sample_rate(a); 1601 1602 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a)); 1603 if (err) 1604 goto err_port_sample_set; 1605 return 0; 1606 1607 err_port_sample_set: 1608 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1609 return err; 1610 } 1611 1612 static void 1613 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port) 1614 { 1615 if (!mlxsw_sp_port->sample) 1616 return; 1617 1618 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1); 1619 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1620 } 1621 1622 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1623 struct tc_cls_matchall_offload *f, 1624 bool ingress) 1625 { 1626 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1627 __be16 protocol = f->common.protocol; 1628 const struct tc_action *a; 1629 LIST_HEAD(actions); 1630 int err; 1631 1632 if (!tcf_exts_has_one_action(f->exts)) { 1633 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); 1634 return -EOPNOTSUPP; 1635 } 1636 1637 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1638 if (!mall_tc_entry) 1639 return -ENOMEM; 1640 mall_tc_entry->cookie = f->cookie; 1641 1642 tcf_exts_to_list(f->exts, &actions); 1643 a = list_first_entry(&actions, struct tc_action, list); 1644 1645 if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { 1646 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; 1647 1648 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; 1649 mirror = &mall_tc_entry->mirror; 1650 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, 1651 mirror, a, ingress); 1652 } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) { 1653 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE; 1654 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f, 1655 a, ingress); 1656 } else { 1657 err = -EOPNOTSUPP; 1658 } 1659 1660 if (err) 1661 goto err_add_action; 1662 1663 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); 1664 return 0; 1665 1666 err_add_action: 1667 kfree(mall_tc_entry); 1668 return err; 1669 } 1670 1671 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1672 struct tc_cls_matchall_offload *f) 1673 { 1674 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1675 1676 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port, 1677 f->cookie); 1678 if (!mall_tc_entry) { 1679 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); 1680 return; 1681 } 1682 list_del(&mall_tc_entry->list); 1683 1684 switch (mall_tc_entry->type) { 1685 case MLXSW_SP_PORT_MALL_MIRROR: 1686 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port, 1687 &mall_tc_entry->mirror); 1688 break; 1689 case MLXSW_SP_PORT_MALL_SAMPLE: 1690 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port); 1691 break; 1692 default: 1693 WARN_ON(1); 1694 } 1695 1696 kfree(mall_tc_entry); 1697 } 1698 1699 static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1700 struct tc_cls_matchall_offload *f, 1701 bool ingress) 1702 { 1703 if (f->common.chain_index) 1704 return -EOPNOTSUPP; 1705 1706 switch (f->command) { 1707 case TC_CLSMATCHALL_REPLACE: 1708 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f, 1709 ingress); 1710 case TC_CLSMATCHALL_DESTROY: 1711 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f); 1712 return 0; 1713 default: 1714 return -EOPNOTSUPP; 1715 } 1716 } 1717 1718 static int 1719 mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_port *mlxsw_sp_port, 1720 struct tc_cls_flower_offload *f, 1721 bool ingress) 1722 { 1723 switch (f->command) { 1724 case TC_CLSFLOWER_REPLACE: 1725 return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress, f); 1726 case TC_CLSFLOWER_DESTROY: 1727 mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress, f); 1728 return 0; 1729 case TC_CLSFLOWER_STATS: 1730 return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress, f); 1731 default: 1732 return -EOPNOTSUPP; 1733 } 1734 } 1735 1736 static int mlxsw_sp_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 1737 void *cb_priv, bool ingress) 1738 { 1739 struct mlxsw_sp_port *mlxsw_sp_port = cb_priv; 1740 1741 switch (type) { 1742 case TC_SETUP_CLSMATCHALL: 1743 return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data, 1744 ingress); 1745 case TC_SETUP_CLSFLOWER: 1746 return mlxsw_sp_setup_tc_cls_flower(mlxsw_sp_port, type_data, 1747 ingress); 1748 default: 1749 return -EOPNOTSUPP; 1750 } 1751 } 1752 1753 static int mlxsw_sp_setup_tc_block_cb_ig(enum tc_setup_type type, 1754 void *type_data, void *cb_priv) 1755 { 1756 return mlxsw_sp_setup_tc_block_cb(type, type_data, cb_priv, true); 1757 } 1758 1759 static int mlxsw_sp_setup_tc_block_cb_eg(enum tc_setup_type type, 1760 void *type_data, void *cb_priv) 1761 { 1762 return mlxsw_sp_setup_tc_block_cb(type, type_data, cb_priv, false); 1763 } 1764 1765 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 1766 struct tc_block_offload *f) 1767 { 1768 tc_setup_cb_t *cb; 1769 1770 if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 1771 cb = mlxsw_sp_setup_tc_block_cb_ig; 1772 else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) 1773 cb = mlxsw_sp_setup_tc_block_cb_eg; 1774 else 1775 return -EOPNOTSUPP; 1776 1777 switch (f->command) { 1778 case TC_BLOCK_BIND: 1779 return tcf_block_cb_register(f->block, cb, mlxsw_sp_port, 1780 mlxsw_sp_port); 1781 case TC_BLOCK_UNBIND: 1782 tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port); 1783 return 0; 1784 default: 1785 return -EOPNOTSUPP; 1786 } 1787 } 1788 1789 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1790 void *type_data) 1791 { 1792 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1793 1794 switch (type) { 1795 case TC_SETUP_BLOCK: 1796 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1797 default: 1798 return -EOPNOTSUPP; 1799 } 1800 } 1801 1802 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1803 .ndo_open = mlxsw_sp_port_open, 1804 .ndo_stop = mlxsw_sp_port_stop, 1805 .ndo_start_xmit = mlxsw_sp_port_xmit, 1806 .ndo_setup_tc = mlxsw_sp_setup_tc, 1807 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1808 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1809 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1810 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1811 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1812 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1813 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1814 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1815 .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name, 1816 }; 1817 1818 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 1819 struct ethtool_drvinfo *drvinfo) 1820 { 1821 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1822 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1823 1824 strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver)); 1825 strlcpy(drvinfo->version, mlxsw_sp_driver_version, 1826 sizeof(drvinfo->version)); 1827 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 1828 "%d.%d.%d", 1829 mlxsw_sp->bus_info->fw_rev.major, 1830 mlxsw_sp->bus_info->fw_rev.minor, 1831 mlxsw_sp->bus_info->fw_rev.subminor); 1832 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 1833 sizeof(drvinfo->bus_info)); 1834 } 1835 1836 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, 1837 struct ethtool_pauseparam *pause) 1838 { 1839 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1840 1841 pause->rx_pause = mlxsw_sp_port->link.rx_pause; 1842 pause->tx_pause = mlxsw_sp_port->link.tx_pause; 1843 } 1844 1845 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, 1846 struct ethtool_pauseparam *pause) 1847 { 1848 char pfcc_pl[MLXSW_REG_PFCC_LEN]; 1849 1850 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); 1851 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause); 1852 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause); 1853 1854 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), 1855 pfcc_pl); 1856 } 1857 1858 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, 1859 struct ethtool_pauseparam *pause) 1860 { 1861 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1862 bool pause_en = pause->tx_pause || pause->rx_pause; 1863 int err; 1864 1865 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { 1866 netdev_err(dev, "PFC already enabled on port\n"); 1867 return -EINVAL; 1868 } 1869 1870 if (pause->autoneg) { 1871 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); 1872 return -EINVAL; 1873 } 1874 1875 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1876 if (err) { 1877 netdev_err(dev, "Failed to configure port's headroom\n"); 1878 return err; 1879 } 1880 1881 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause); 1882 if (err) { 1883 netdev_err(dev, "Failed to set PAUSE parameters\n"); 1884 goto err_port_pause_configure; 1885 } 1886 1887 mlxsw_sp_port->link.rx_pause = pause->rx_pause; 1888 mlxsw_sp_port->link.tx_pause = pause->tx_pause; 1889 1890 return 0; 1891 1892 err_port_pause_configure: 1893 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 1894 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1895 return err; 1896 } 1897 1898 struct mlxsw_sp_port_hw_stats { 1899 char str[ETH_GSTRING_LEN]; 1900 u64 (*getter)(const char *payload); 1901 bool cells_bytes; 1902 }; 1903 1904 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 1905 { 1906 .str = "a_frames_transmitted_ok", 1907 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 1908 }, 1909 { 1910 .str = "a_frames_received_ok", 1911 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 1912 }, 1913 { 1914 .str = "a_frame_check_sequence_errors", 1915 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 1916 }, 1917 { 1918 .str = "a_alignment_errors", 1919 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 1920 }, 1921 { 1922 .str = "a_octets_transmitted_ok", 1923 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 1924 }, 1925 { 1926 .str = "a_octets_received_ok", 1927 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 1928 }, 1929 { 1930 .str = "a_multicast_frames_xmitted_ok", 1931 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 1932 }, 1933 { 1934 .str = "a_broadcast_frames_xmitted_ok", 1935 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 1936 }, 1937 { 1938 .str = "a_multicast_frames_received_ok", 1939 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 1940 }, 1941 { 1942 .str = "a_broadcast_frames_received_ok", 1943 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 1944 }, 1945 { 1946 .str = "a_in_range_length_errors", 1947 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 1948 }, 1949 { 1950 .str = "a_out_of_range_length_field", 1951 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 1952 }, 1953 { 1954 .str = "a_frame_too_long_errors", 1955 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 1956 }, 1957 { 1958 .str = "a_symbol_error_during_carrier", 1959 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 1960 }, 1961 { 1962 .str = "a_mac_control_frames_transmitted", 1963 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 1964 }, 1965 { 1966 .str = "a_mac_control_frames_received", 1967 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 1968 }, 1969 { 1970 .str = "a_unsupported_opcodes_received", 1971 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 1972 }, 1973 { 1974 .str = "a_pause_mac_ctrl_frames_received", 1975 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 1976 }, 1977 { 1978 .str = "a_pause_mac_ctrl_frames_xmitted", 1979 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 1980 }, 1981 }; 1982 1983 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 1984 1985 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { 1986 { 1987 .str = "rx_octets_prio", 1988 .getter = mlxsw_reg_ppcnt_rx_octets_get, 1989 }, 1990 { 1991 .str = "rx_frames_prio", 1992 .getter = mlxsw_reg_ppcnt_rx_frames_get, 1993 }, 1994 { 1995 .str = "tx_octets_prio", 1996 .getter = mlxsw_reg_ppcnt_tx_octets_get, 1997 }, 1998 { 1999 .str = "tx_frames_prio", 2000 .getter = mlxsw_reg_ppcnt_tx_frames_get, 2001 }, 2002 { 2003 .str = "rx_pause_prio", 2004 .getter = mlxsw_reg_ppcnt_rx_pause_get, 2005 }, 2006 { 2007 .str = "rx_pause_duration_prio", 2008 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get, 2009 }, 2010 { 2011 .str = "tx_pause_prio", 2012 .getter = mlxsw_reg_ppcnt_tx_pause_get, 2013 }, 2014 { 2015 .str = "tx_pause_duration_prio", 2016 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get, 2017 }, 2018 }; 2019 2020 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) 2021 2022 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { 2023 { 2024 .str = "tc_transmit_queue_tc", 2025 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get, 2026 .cells_bytes = true, 2027 }, 2028 { 2029 .str = "tc_no_buffer_discard_uc_tc", 2030 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get, 2031 }, 2032 }; 2033 2034 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) 2035 2036 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ 2037 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \ 2038 MLXSW_SP_PORT_HW_TC_STATS_LEN) * \ 2039 IEEE_8021QAZ_MAX_TCS) 2040 2041 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) 2042 { 2043 int i; 2044 2045 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { 2046 snprintf(*p, ETH_GSTRING_LEN, "%s_%d", 2047 mlxsw_sp_port_hw_prio_stats[i].str, prio); 2048 *p += ETH_GSTRING_LEN; 2049 } 2050 } 2051 2052 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) 2053 { 2054 int i; 2055 2056 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { 2057 snprintf(*p, ETH_GSTRING_LEN, "%s_%d", 2058 mlxsw_sp_port_hw_tc_stats[i].str, tc); 2059 *p += ETH_GSTRING_LEN; 2060 } 2061 } 2062 2063 static void mlxsw_sp_port_get_strings(struct net_device *dev, 2064 u32 stringset, u8 *data) 2065 { 2066 u8 *p = data; 2067 int i; 2068 2069 switch (stringset) { 2070 case ETH_SS_STATS: 2071 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 2072 memcpy(p, mlxsw_sp_port_hw_stats[i].str, 2073 ETH_GSTRING_LEN); 2074 p += ETH_GSTRING_LEN; 2075 } 2076 2077 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 2078 mlxsw_sp_port_get_prio_strings(&p, i); 2079 2080 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 2081 mlxsw_sp_port_get_tc_strings(&p, i); 2082 2083 break; 2084 } 2085 } 2086 2087 static int mlxsw_sp_port_set_phys_id(struct net_device *dev, 2088 enum ethtool_phys_id_state state) 2089 { 2090 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2091 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2092 char mlcr_pl[MLXSW_REG_MLCR_LEN]; 2093 bool active; 2094 2095 switch (state) { 2096 case ETHTOOL_ID_ACTIVE: 2097 active = true; 2098 break; 2099 case ETHTOOL_ID_INACTIVE: 2100 active = false; 2101 break; 2102 default: 2103 return -EOPNOTSUPP; 2104 } 2105 2106 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); 2107 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); 2108 } 2109 2110 static int 2111 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, 2112 int *p_len, enum mlxsw_reg_ppcnt_grp grp) 2113 { 2114 switch (grp) { 2115 case MLXSW_REG_PPCNT_IEEE_8023_CNT: 2116 *p_hw_stats = mlxsw_sp_port_hw_stats; 2117 *p_len = MLXSW_SP_PORT_HW_STATS_LEN; 2118 break; 2119 case MLXSW_REG_PPCNT_PRIO_CNT: 2120 *p_hw_stats = mlxsw_sp_port_hw_prio_stats; 2121 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2122 break; 2123 case MLXSW_REG_PPCNT_TC_CNT: 2124 *p_hw_stats = mlxsw_sp_port_hw_tc_stats; 2125 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN; 2126 break; 2127 default: 2128 WARN_ON(1); 2129 return -EOPNOTSUPP; 2130 } 2131 return 0; 2132 } 2133 2134 static void __mlxsw_sp_port_get_stats(struct net_device *dev, 2135 enum mlxsw_reg_ppcnt_grp grp, int prio, 2136 u64 *data, int data_index) 2137 { 2138 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2139 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2140 struct mlxsw_sp_port_hw_stats *hw_stats; 2141 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 2142 int i, len; 2143 int err; 2144 2145 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); 2146 if (err) 2147 return; 2148 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl); 2149 for (i = 0; i < len; i++) { 2150 data[data_index + i] = hw_stats[i].getter(ppcnt_pl); 2151 if (!hw_stats[i].cells_bytes) 2152 continue; 2153 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp, 2154 data[data_index + i]); 2155 } 2156 } 2157 2158 static void mlxsw_sp_port_get_stats(struct net_device *dev, 2159 struct ethtool_stats *stats, u64 *data) 2160 { 2161 int i, data_index = 0; 2162 2163 /* IEEE 802.3 Counters */ 2164 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0, 2165 data, data_index); 2166 data_index = MLXSW_SP_PORT_HW_STATS_LEN; 2167 2168 /* Per-Priority Counters */ 2169 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2170 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, 2171 data, data_index); 2172 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2173 } 2174 2175 /* Per-TC Counters */ 2176 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2177 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, 2178 data, data_index); 2179 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; 2180 } 2181 } 2182 2183 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 2184 { 2185 switch (sset) { 2186 case ETH_SS_STATS: 2187 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN; 2188 default: 2189 return -EOPNOTSUPP; 2190 } 2191 } 2192 2193 struct mlxsw_sp_port_link_mode { 2194 enum ethtool_link_mode_bit_indices mask_ethtool; 2195 u32 mask; 2196 u32 speed; 2197 }; 2198 2199 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = { 2200 { 2201 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 2202 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2203 .speed = SPEED_100, 2204 }, 2205 { 2206 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 2207 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 2208 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2209 .speed = SPEED_1000, 2210 }, 2211 { 2212 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 2213 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2214 .speed = SPEED_10000, 2215 }, 2216 { 2217 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 2218 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 2219 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 2220 .speed = SPEED_10000, 2221 }, 2222 { 2223 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2224 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2225 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2226 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 2227 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2228 .speed = SPEED_10000, 2229 }, 2230 { 2231 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 2232 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 2233 .speed = SPEED_20000, 2234 }, 2235 { 2236 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 2237 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2238 .speed = SPEED_40000, 2239 }, 2240 { 2241 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 2242 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2243 .speed = SPEED_40000, 2244 }, 2245 { 2246 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 2247 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2248 .speed = SPEED_40000, 2249 }, 2250 { 2251 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 2252 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2253 .speed = SPEED_40000, 2254 }, 2255 { 2256 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR, 2257 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2258 .speed = SPEED_25000, 2259 }, 2260 { 2261 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR, 2262 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2263 .speed = SPEED_25000, 2264 }, 2265 { 2266 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 2267 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2268 .speed = SPEED_25000, 2269 }, 2270 { 2271 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 2272 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2273 .speed = SPEED_25000, 2274 }, 2275 { 2276 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2, 2277 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2278 .speed = SPEED_50000, 2279 }, 2280 { 2281 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 2282 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2283 .speed = SPEED_50000, 2284 }, 2285 { 2286 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2, 2287 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2288 .speed = SPEED_50000, 2289 }, 2290 { 2291 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2292 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT, 2293 .speed = SPEED_56000, 2294 }, 2295 { 2296 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2297 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT, 2298 .speed = SPEED_56000, 2299 }, 2300 { 2301 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2302 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT, 2303 .speed = SPEED_56000, 2304 }, 2305 { 2306 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2307 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, 2308 .speed = SPEED_56000, 2309 }, 2310 { 2311 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4, 2312 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2313 .speed = SPEED_100000, 2314 }, 2315 { 2316 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4, 2317 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2318 .speed = SPEED_100000, 2319 }, 2320 { 2321 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4, 2322 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2323 .speed = SPEED_100000, 2324 }, 2325 { 2326 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 2327 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2328 .speed = SPEED_100000, 2329 }, 2330 }; 2331 2332 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode) 2333 2334 static void 2335 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto, 2336 struct ethtool_link_ksettings *cmd) 2337 { 2338 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2339 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2340 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2341 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2342 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2343 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2344 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 2345 2346 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2347 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2348 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2349 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 2350 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 2351 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 2352 } 2353 2354 static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode) 2355 { 2356 int i; 2357 2358 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2359 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) 2360 __set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool, 2361 mode); 2362 } 2363 } 2364 2365 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto, 2366 struct ethtool_link_ksettings *cmd) 2367 { 2368 u32 speed = SPEED_UNKNOWN; 2369 u8 duplex = DUPLEX_UNKNOWN; 2370 int i; 2371 2372 if (!carrier_ok) 2373 goto out; 2374 2375 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2376 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) { 2377 speed = mlxsw_sp_port_link_mode[i].speed; 2378 duplex = DUPLEX_FULL; 2379 break; 2380 } 2381 } 2382 out: 2383 cmd->base.speed = speed; 2384 cmd->base.duplex = duplex; 2385 } 2386 2387 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto) 2388 { 2389 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2390 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2391 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2392 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2393 return PORT_FIBRE; 2394 2395 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2396 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2397 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4)) 2398 return PORT_DA; 2399 2400 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2401 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2402 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2403 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4)) 2404 return PORT_NONE; 2405 2406 return PORT_OTHER; 2407 } 2408 2409 static u32 2410 mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd) 2411 { 2412 u32 ptys_proto = 0; 2413 int i; 2414 2415 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2416 if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool, 2417 cmd->link_modes.advertising)) 2418 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 2419 } 2420 return ptys_proto; 2421 } 2422 2423 static u32 mlxsw_sp_to_ptys_speed(u32 speed) 2424 { 2425 u32 ptys_proto = 0; 2426 int i; 2427 2428 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2429 if (speed == mlxsw_sp_port_link_mode[i].speed) 2430 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 2431 } 2432 return ptys_proto; 2433 } 2434 2435 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed) 2436 { 2437 u32 ptys_proto = 0; 2438 int i; 2439 2440 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2441 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed) 2442 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 2443 } 2444 return ptys_proto; 2445 } 2446 2447 static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap, 2448 struct ethtool_link_ksettings *cmd) 2449 { 2450 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); 2451 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); 2452 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); 2453 2454 mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd); 2455 mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported); 2456 } 2457 2458 static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg, 2459 struct ethtool_link_ksettings *cmd) 2460 { 2461 if (!autoneg) 2462 return; 2463 2464 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); 2465 mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising); 2466 } 2467 2468 static void 2469 mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status, 2470 struct ethtool_link_ksettings *cmd) 2471 { 2472 if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp) 2473 return; 2474 2475 ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg); 2476 mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising); 2477 } 2478 2479 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev, 2480 struct ethtool_link_ksettings *cmd) 2481 { 2482 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp; 2483 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2484 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2485 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2486 u8 autoneg_status; 2487 bool autoneg; 2488 int err; 2489 2490 autoneg = mlxsw_sp_port->link.autoneg; 2491 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0); 2492 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2493 if (err) 2494 return err; 2495 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, ð_proto_admin, 2496 ð_proto_oper); 2497 2498 mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd); 2499 2500 mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd); 2501 2502 eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl); 2503 autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl); 2504 mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd); 2505 2506 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 2507 cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper); 2508 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper, 2509 cmd); 2510 2511 return 0; 2512 } 2513 2514 static int 2515 mlxsw_sp_port_set_link_ksettings(struct net_device *dev, 2516 const struct ethtool_link_ksettings *cmd) 2517 { 2518 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2519 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2520 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2521 u32 eth_proto_cap, eth_proto_new; 2522 bool autoneg; 2523 int err; 2524 2525 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0); 2526 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2527 if (err) 2528 return err; 2529 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL); 2530 2531 autoneg = cmd->base.autoneg == AUTONEG_ENABLE; 2532 eth_proto_new = autoneg ? 2533 mlxsw_sp_to_ptys_advert_link(cmd) : 2534 mlxsw_sp_to_ptys_speed(cmd->base.speed); 2535 2536 eth_proto_new = eth_proto_new & eth_proto_cap; 2537 if (!eth_proto_new) { 2538 netdev_err(dev, "No supported speed requested\n"); 2539 return -EINVAL; 2540 } 2541 2542 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 2543 eth_proto_new); 2544 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2545 if (err) 2546 return err; 2547 2548 if (!netif_running(dev)) 2549 return 0; 2550 2551 mlxsw_sp_port->link.autoneg = autoneg; 2552 2553 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 2554 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 2555 2556 return 0; 2557 } 2558 2559 static int mlxsw_sp_flash_device(struct net_device *dev, 2560 struct ethtool_flash *flash) 2561 { 2562 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2563 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2564 const struct firmware *firmware; 2565 int err; 2566 2567 if (flash->region != ETHTOOL_FLASH_ALL_REGIONS) 2568 return -EOPNOTSUPP; 2569 2570 dev_hold(dev); 2571 rtnl_unlock(); 2572 2573 err = request_firmware_direct(&firmware, flash->data, &dev->dev); 2574 if (err) 2575 goto out; 2576 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware); 2577 release_firmware(firmware); 2578 out: 2579 rtnl_lock(); 2580 dev_put(dev); 2581 return err; 2582 } 2583 2584 #define MLXSW_SP_I2C_ADDR_LOW 0x50 2585 #define MLXSW_SP_I2C_ADDR_HIGH 0x51 2586 #define MLXSW_SP_EEPROM_PAGE_LENGTH 256 2587 2588 static int mlxsw_sp_query_module_eeprom(struct mlxsw_sp_port *mlxsw_sp_port, 2589 u16 offset, u16 size, void *data, 2590 unsigned int *p_read_size) 2591 { 2592 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2593 char eeprom_tmp[MLXSW_SP_REG_MCIA_EEPROM_SIZE]; 2594 char mcia_pl[MLXSW_REG_MCIA_LEN]; 2595 u16 i2c_addr; 2596 int status; 2597 int err; 2598 2599 size = min_t(u16, size, MLXSW_SP_REG_MCIA_EEPROM_SIZE); 2600 2601 if (offset < MLXSW_SP_EEPROM_PAGE_LENGTH && 2602 offset + size > MLXSW_SP_EEPROM_PAGE_LENGTH) 2603 /* Cross pages read, read until offset 256 in low page */ 2604 size = MLXSW_SP_EEPROM_PAGE_LENGTH - offset; 2605 2606 i2c_addr = MLXSW_SP_I2C_ADDR_LOW; 2607 if (offset >= MLXSW_SP_EEPROM_PAGE_LENGTH) { 2608 i2c_addr = MLXSW_SP_I2C_ADDR_HIGH; 2609 offset -= MLXSW_SP_EEPROM_PAGE_LENGTH; 2610 } 2611 2612 mlxsw_reg_mcia_pack(mcia_pl, mlxsw_sp_port->mapping.module, 2613 0, 0, offset, size, i2c_addr); 2614 2615 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcia), mcia_pl); 2616 if (err) 2617 return err; 2618 2619 status = mlxsw_reg_mcia_status_get(mcia_pl); 2620 if (status) 2621 return -EIO; 2622 2623 mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp); 2624 memcpy(data, eeprom_tmp, size); 2625 *p_read_size = size; 2626 2627 return 0; 2628 } 2629 2630 enum mlxsw_sp_eeprom_module_info_rev_id { 2631 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_UNSPC = 0x00, 2632 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8436 = 0x01, 2633 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636 = 0x03, 2634 }; 2635 2636 enum mlxsw_sp_eeprom_module_info_id { 2637 MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP = 0x03, 2638 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP = 0x0C, 2639 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS = 0x0D, 2640 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 = 0x11, 2641 }; 2642 2643 enum mlxsw_sp_eeprom_module_info { 2644 MLXSW_SP_EEPROM_MODULE_INFO_ID, 2645 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID, 2646 MLXSW_SP_EEPROM_MODULE_INFO_SIZE, 2647 }; 2648 2649 static int mlxsw_sp_get_module_info(struct net_device *netdev, 2650 struct ethtool_modinfo *modinfo) 2651 { 2652 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 2653 u8 module_info[MLXSW_SP_EEPROM_MODULE_INFO_SIZE]; 2654 u8 module_rev_id, module_id; 2655 unsigned int read_size; 2656 int err; 2657 2658 err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, 0, 2659 MLXSW_SP_EEPROM_MODULE_INFO_SIZE, 2660 module_info, &read_size); 2661 if (err) 2662 return err; 2663 2664 if (read_size < MLXSW_SP_EEPROM_MODULE_INFO_SIZE) 2665 return -EIO; 2666 2667 module_rev_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_REV_ID]; 2668 module_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_ID]; 2669 2670 switch (module_id) { 2671 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP: 2672 modinfo->type = ETH_MODULE_SFF_8436; 2673 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 2674 break; 2675 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS: 2676 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28: 2677 if (module_id == MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 || 2678 module_rev_id >= MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636) { 2679 modinfo->type = ETH_MODULE_SFF_8636; 2680 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; 2681 } else { 2682 modinfo->type = ETH_MODULE_SFF_8436; 2683 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 2684 } 2685 break; 2686 case MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP: 2687 modinfo->type = ETH_MODULE_SFF_8472; 2688 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 2689 break; 2690 default: 2691 return -EINVAL; 2692 } 2693 2694 return 0; 2695 } 2696 2697 static int mlxsw_sp_get_module_eeprom(struct net_device *netdev, 2698 struct ethtool_eeprom *ee, 2699 u8 *data) 2700 { 2701 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 2702 int offset = ee->offset; 2703 unsigned int read_size; 2704 int i = 0; 2705 int err; 2706 2707 if (!ee->len) 2708 return -EINVAL; 2709 2710 memset(data, 0, ee->len); 2711 2712 while (i < ee->len) { 2713 err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, offset, 2714 ee->len - i, data + i, 2715 &read_size); 2716 if (err) { 2717 netdev_err(mlxsw_sp_port->dev, "Eeprom query failed\n"); 2718 return err; 2719 } 2720 2721 i += read_size; 2722 offset += read_size; 2723 } 2724 2725 return 0; 2726 } 2727 2728 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 2729 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 2730 .get_link = ethtool_op_get_link, 2731 .get_pauseparam = mlxsw_sp_port_get_pauseparam, 2732 .set_pauseparam = mlxsw_sp_port_set_pauseparam, 2733 .get_strings = mlxsw_sp_port_get_strings, 2734 .set_phys_id = mlxsw_sp_port_set_phys_id, 2735 .get_ethtool_stats = mlxsw_sp_port_get_stats, 2736 .get_sset_count = mlxsw_sp_port_get_sset_count, 2737 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings, 2738 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings, 2739 .flash_device = mlxsw_sp_flash_device, 2740 .get_module_info = mlxsw_sp_get_module_info, 2741 .get_module_eeprom = mlxsw_sp_get_module_eeprom, 2742 }; 2743 2744 static int 2745 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width) 2746 { 2747 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2748 u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width; 2749 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2750 u32 eth_proto_admin; 2751 2752 eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed); 2753 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 2754 eth_proto_admin); 2755 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2756 } 2757 2758 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 2759 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 2760 bool dwrr, u8 dwrr_weight) 2761 { 2762 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2763 char qeec_pl[MLXSW_REG_QEEC_LEN]; 2764 2765 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 2766 next_index); 2767 mlxsw_reg_qeec_de_set(qeec_pl, true); 2768 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 2769 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 2770 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 2771 } 2772 2773 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 2774 enum mlxsw_reg_qeec_hr hr, u8 index, 2775 u8 next_index, u32 maxrate) 2776 { 2777 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2778 char qeec_pl[MLXSW_REG_QEEC_LEN]; 2779 2780 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 2781 next_index); 2782 mlxsw_reg_qeec_mase_set(qeec_pl, true); 2783 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 2784 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 2785 } 2786 2787 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 2788 u8 switch_prio, u8 tclass) 2789 { 2790 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2791 char qtct_pl[MLXSW_REG_QTCT_LEN]; 2792 2793 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 2794 tclass); 2795 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 2796 } 2797 2798 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 2799 { 2800 int err, i; 2801 2802 /* Setup the elements hierarcy, so that each TC is linked to 2803 * one subgroup, which are all member in the same group. 2804 */ 2805 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2806 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false, 2807 0); 2808 if (err) 2809 return err; 2810 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2811 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2812 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i, 2813 0, false, 0); 2814 if (err) 2815 return err; 2816 } 2817 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2818 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2819 MLXSW_REG_QEEC_HIERARCY_TC, i, i, 2820 false, 0); 2821 if (err) 2822 return err; 2823 } 2824 2825 /* Make sure the max shaper is disabled in all hierarcies that 2826 * support it. 2827 */ 2828 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2829 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0, 2830 MLXSW_REG_QEEC_MAS_DIS); 2831 if (err) 2832 return err; 2833 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2834 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2835 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, 2836 i, 0, 2837 MLXSW_REG_QEEC_MAS_DIS); 2838 if (err) 2839 return err; 2840 } 2841 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2842 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2843 MLXSW_REG_QEEC_HIERARCY_TC, 2844 i, i, 2845 MLXSW_REG_QEEC_MAS_DIS); 2846 if (err) 2847 return err; 2848 } 2849 2850 /* Map all priorities to traffic class 0. */ 2851 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2852 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 2853 if (err) 2854 return err; 2855 } 2856 2857 return 0; 2858 } 2859 2860 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 2861 bool split, u8 module, u8 width, u8 lane) 2862 { 2863 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 2864 struct mlxsw_sp_port *mlxsw_sp_port; 2865 struct net_device *dev; 2866 int err; 2867 2868 err = mlxsw_core_port_init(mlxsw_sp->core, local_port); 2869 if (err) { 2870 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 2871 local_port); 2872 return err; 2873 } 2874 2875 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 2876 if (!dev) { 2877 err = -ENOMEM; 2878 goto err_alloc_etherdev; 2879 } 2880 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 2881 mlxsw_sp_port = netdev_priv(dev); 2882 mlxsw_sp_port->dev = dev; 2883 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 2884 mlxsw_sp_port->local_port = local_port; 2885 mlxsw_sp_port->pvid = 1; 2886 mlxsw_sp_port->split = split; 2887 mlxsw_sp_port->mapping.module = module; 2888 mlxsw_sp_port->mapping.width = width; 2889 mlxsw_sp_port->mapping.lane = lane; 2890 mlxsw_sp_port->link.autoneg = 1; 2891 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 2892 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list); 2893 2894 mlxsw_sp_port->pcpu_stats = 2895 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 2896 if (!mlxsw_sp_port->pcpu_stats) { 2897 err = -ENOMEM; 2898 goto err_alloc_stats; 2899 } 2900 2901 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample), 2902 GFP_KERNEL); 2903 if (!mlxsw_sp_port->sample) { 2904 err = -ENOMEM; 2905 goto err_alloc_sample; 2906 } 2907 2908 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 2909 &update_stats_cache); 2910 2911 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 2912 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 2913 2914 err = mlxsw_sp_port_module_map(mlxsw_sp_port, module, width, lane); 2915 if (err) { 2916 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 2917 mlxsw_sp_port->local_port); 2918 goto err_port_module_map; 2919 } 2920 2921 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 2922 if (err) { 2923 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 2924 mlxsw_sp_port->local_port); 2925 goto err_port_swid_set; 2926 } 2927 2928 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 2929 if (err) { 2930 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 2931 mlxsw_sp_port->local_port); 2932 goto err_dev_addr_init; 2933 } 2934 2935 netif_carrier_off(dev); 2936 2937 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 2938 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 2939 dev->hw_features |= NETIF_F_HW_TC; 2940 2941 dev->min_mtu = 0; 2942 dev->max_mtu = ETH_MAX_MTU; 2943 2944 /* Each packet needs to have a Tx header (metadata) on top all other 2945 * headers. 2946 */ 2947 dev->needed_headroom = MLXSW_TXHDR_LEN; 2948 2949 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 2950 if (err) { 2951 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 2952 mlxsw_sp_port->local_port); 2953 goto err_port_system_port_mapping_set; 2954 } 2955 2956 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width); 2957 if (err) { 2958 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 2959 mlxsw_sp_port->local_port); 2960 goto err_port_speed_by_width_set; 2961 } 2962 2963 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 2964 if (err) { 2965 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 2966 mlxsw_sp_port->local_port); 2967 goto err_port_mtu_set; 2968 } 2969 2970 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 2971 if (err) 2972 goto err_port_admin_status_set; 2973 2974 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 2975 if (err) { 2976 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 2977 mlxsw_sp_port->local_port); 2978 goto err_port_buffers_init; 2979 } 2980 2981 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 2982 if (err) { 2983 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 2984 mlxsw_sp_port->local_port); 2985 goto err_port_ets_init; 2986 } 2987 2988 /* ETS and buffers must be initialized before DCB. */ 2989 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 2990 if (err) { 2991 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 2992 mlxsw_sp_port->local_port); 2993 goto err_port_dcb_init; 2994 } 2995 2996 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 2997 if (err) { 2998 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 2999 mlxsw_sp_port->local_port); 3000 goto err_port_fids_init; 3001 } 3002 3003 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1); 3004 if (IS_ERR(mlxsw_sp_port_vlan)) { 3005 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 3006 mlxsw_sp_port->local_port); 3007 goto err_port_vlan_get; 3008 } 3009 3010 mlxsw_sp_port_switchdev_init(mlxsw_sp_port); 3011 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 3012 err = register_netdev(dev); 3013 if (err) { 3014 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 3015 mlxsw_sp_port->local_port); 3016 goto err_register_netdev; 3017 } 3018 3019 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 3020 mlxsw_sp_port, dev, mlxsw_sp_port->split, 3021 module); 3022 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 3023 return 0; 3024 3025 err_register_netdev: 3026 mlxsw_sp->ports[local_port] = NULL; 3027 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 3028 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); 3029 err_port_vlan_get: 3030 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3031 err_port_fids_init: 3032 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3033 err_port_dcb_init: 3034 err_port_ets_init: 3035 err_port_buffers_init: 3036 err_port_admin_status_set: 3037 err_port_mtu_set: 3038 err_port_speed_by_width_set: 3039 err_port_system_port_mapping_set: 3040 err_dev_addr_init: 3041 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3042 err_port_swid_set: 3043 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3044 err_port_module_map: 3045 kfree(mlxsw_sp_port->sample); 3046 err_alloc_sample: 3047 free_percpu(mlxsw_sp_port->pcpu_stats); 3048 err_alloc_stats: 3049 free_netdev(dev); 3050 err_alloc_etherdev: 3051 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3052 return err; 3053 } 3054 3055 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3056 { 3057 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3058 3059 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 3060 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 3061 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 3062 mlxsw_sp->ports[local_port] = NULL; 3063 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 3064 mlxsw_sp_port_vlan_flush(mlxsw_sp_port); 3065 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3066 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3067 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3068 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3069 kfree(mlxsw_sp_port->sample); 3070 free_percpu(mlxsw_sp_port->pcpu_stats); 3071 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 3072 free_netdev(mlxsw_sp_port->dev); 3073 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3074 } 3075 3076 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3077 { 3078 return mlxsw_sp->ports[local_port] != NULL; 3079 } 3080 3081 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 3082 { 3083 int i; 3084 3085 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 3086 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3087 mlxsw_sp_port_remove(mlxsw_sp, i); 3088 kfree(mlxsw_sp->port_to_module); 3089 kfree(mlxsw_sp->ports); 3090 } 3091 3092 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 3093 { 3094 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 3095 u8 module, width, lane; 3096 size_t alloc_size; 3097 int i; 3098 int err; 3099 3100 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 3101 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 3102 if (!mlxsw_sp->ports) 3103 return -ENOMEM; 3104 3105 mlxsw_sp->port_to_module = kcalloc(max_ports, sizeof(u8), GFP_KERNEL); 3106 if (!mlxsw_sp->port_to_module) { 3107 err = -ENOMEM; 3108 goto err_port_to_module_alloc; 3109 } 3110 3111 for (i = 1; i < max_ports; i++) { 3112 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, 3113 &width, &lane); 3114 if (err) 3115 goto err_port_module_info_get; 3116 if (!width) 3117 continue; 3118 mlxsw_sp->port_to_module[i] = module; 3119 err = mlxsw_sp_port_create(mlxsw_sp, i, false, 3120 module, width, lane); 3121 if (err) 3122 goto err_port_create; 3123 } 3124 return 0; 3125 3126 err_port_create: 3127 err_port_module_info_get: 3128 for (i--; i >= 1; i--) 3129 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3130 mlxsw_sp_port_remove(mlxsw_sp, i); 3131 kfree(mlxsw_sp->port_to_module); 3132 err_port_to_module_alloc: 3133 kfree(mlxsw_sp->ports); 3134 return err; 3135 } 3136 3137 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port) 3138 { 3139 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX; 3140 3141 return local_port - offset; 3142 } 3143 3144 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 3145 u8 module, unsigned int count) 3146 { 3147 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; 3148 int err, i; 3149 3150 for (i = 0; i < count; i++) { 3151 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true, 3152 module, width, i * width); 3153 if (err) 3154 goto err_port_create; 3155 } 3156 3157 return 0; 3158 3159 err_port_create: 3160 for (i--; i >= 0; i--) 3161 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) 3162 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 3163 return err; 3164 } 3165 3166 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 3167 u8 base_port, unsigned int count) 3168 { 3169 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH; 3170 int i; 3171 3172 /* Split by four means we need to re-create two ports, otherwise 3173 * only one. 3174 */ 3175 count = count / 2; 3176 3177 for (i = 0; i < count; i++) { 3178 local_port = base_port + i * 2; 3179 module = mlxsw_sp->port_to_module[local_port]; 3180 3181 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module, 3182 width, 0); 3183 } 3184 } 3185 3186 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 3187 unsigned int count) 3188 { 3189 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3190 struct mlxsw_sp_port *mlxsw_sp_port; 3191 u8 module, cur_width, base_port; 3192 int i; 3193 int err; 3194 3195 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3196 if (!mlxsw_sp_port) { 3197 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 3198 local_port); 3199 return -EINVAL; 3200 } 3201 3202 module = mlxsw_sp_port->mapping.module; 3203 cur_width = mlxsw_sp_port->mapping.width; 3204 3205 if (count != 2 && count != 4) { 3206 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); 3207 return -EINVAL; 3208 } 3209 3210 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { 3211 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); 3212 return -EINVAL; 3213 } 3214 3215 /* Make sure we have enough slave (even) ports for the split. */ 3216 if (count == 2) { 3217 base_port = local_port; 3218 if (mlxsw_sp->ports[base_port + 1]) { 3219 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 3220 return -EINVAL; 3221 } 3222 } else { 3223 base_port = mlxsw_sp_cluster_base_port_get(local_port); 3224 if (mlxsw_sp->ports[base_port + 1] || 3225 mlxsw_sp->ports[base_port + 3]) { 3226 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 3227 return -EINVAL; 3228 } 3229 } 3230 3231 for (i = 0; i < count; i++) 3232 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) 3233 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 3234 3235 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count); 3236 if (err) { 3237 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 3238 goto err_port_split_create; 3239 } 3240 3241 return 0; 3242 3243 err_port_split_create: 3244 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 3245 return err; 3246 } 3247 3248 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port) 3249 { 3250 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3251 struct mlxsw_sp_port *mlxsw_sp_port; 3252 u8 cur_width, base_port; 3253 unsigned int count; 3254 int i; 3255 3256 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3257 if (!mlxsw_sp_port) { 3258 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 3259 local_port); 3260 return -EINVAL; 3261 } 3262 3263 if (!mlxsw_sp_port->split) { 3264 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n"); 3265 return -EINVAL; 3266 } 3267 3268 cur_width = mlxsw_sp_port->mapping.width; 3269 count = cur_width == 1 ? 4 : 2; 3270 3271 base_port = mlxsw_sp_cluster_base_port_get(local_port); 3272 3273 /* Determine which ports to remove. */ 3274 if (count == 2 && local_port >= base_port + 2) 3275 base_port = base_port + 2; 3276 3277 for (i = 0; i < count; i++) 3278 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) 3279 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 3280 3281 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 3282 3283 return 0; 3284 } 3285 3286 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 3287 char *pude_pl, void *priv) 3288 { 3289 struct mlxsw_sp *mlxsw_sp = priv; 3290 struct mlxsw_sp_port *mlxsw_sp_port; 3291 enum mlxsw_reg_pude_oper_status status; 3292 u8 local_port; 3293 3294 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 3295 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3296 if (!mlxsw_sp_port) 3297 return; 3298 3299 status = mlxsw_reg_pude_oper_status_get(pude_pl); 3300 if (status == MLXSW_PORT_OPER_STATUS_UP) { 3301 netdev_info(mlxsw_sp_port->dev, "link up\n"); 3302 netif_carrier_on(mlxsw_sp_port->dev); 3303 } else { 3304 netdev_info(mlxsw_sp_port->dev, "link down\n"); 3305 netif_carrier_off(mlxsw_sp_port->dev); 3306 } 3307 } 3308 3309 static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 3310 u8 local_port, void *priv) 3311 { 3312 struct mlxsw_sp *mlxsw_sp = priv; 3313 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3314 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 3315 3316 if (unlikely(!mlxsw_sp_port)) { 3317 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 3318 local_port); 3319 return; 3320 } 3321 3322 skb->dev = mlxsw_sp_port->dev; 3323 3324 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 3325 u64_stats_update_begin(&pcpu_stats->syncp); 3326 pcpu_stats->rx_packets++; 3327 pcpu_stats->rx_bytes += skb->len; 3328 u64_stats_update_end(&pcpu_stats->syncp); 3329 3330 skb->protocol = eth_type_trans(skb, skb->dev); 3331 netif_receive_skb(skb); 3332 } 3333 3334 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, 3335 void *priv) 3336 { 3337 skb->offload_fwd_mark = 1; 3338 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 3339 } 3340 3341 static void mlxsw_sp_rx_listener_mr_mark_func(struct sk_buff *skb, 3342 u8 local_port, void *priv) 3343 { 3344 skb->offload_mr_fwd_mark = 1; 3345 skb->offload_fwd_mark = 1; 3346 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 3347 } 3348 3349 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port, 3350 void *priv) 3351 { 3352 struct mlxsw_sp *mlxsw_sp = priv; 3353 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3354 struct psample_group *psample_group; 3355 u32 size; 3356 3357 if (unlikely(!mlxsw_sp_port)) { 3358 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n", 3359 local_port); 3360 goto out; 3361 } 3362 if (unlikely(!mlxsw_sp_port->sample)) { 3363 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n", 3364 local_port); 3365 goto out; 3366 } 3367 3368 size = mlxsw_sp_port->sample->truncate ? 3369 mlxsw_sp_port->sample->trunc_size : skb->len; 3370 3371 rcu_read_lock(); 3372 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group); 3373 if (!psample_group) 3374 goto out_unlock; 3375 psample_sample_packet(psample_group, skb, size, 3376 mlxsw_sp_port->dev->ifindex, 0, 3377 mlxsw_sp_port->sample->rate); 3378 out_unlock: 3379 rcu_read_unlock(); 3380 out: 3381 consume_skb(skb); 3382 } 3383 3384 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 3385 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 3386 _is_ctrl, SP_##_trap_group, DISCARD) 3387 3388 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 3389 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 3390 _is_ctrl, SP_##_trap_group, DISCARD) 3391 3392 #define MLXSW_SP_RXL_MR_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 3393 MLXSW_RXL(mlxsw_sp_rx_listener_mr_mark_func, _trap_id, _action, \ 3394 _is_ctrl, SP_##_trap_group, DISCARD) 3395 3396 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 3397 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 3398 3399 static const struct mlxsw_listener mlxsw_sp_listener[] = { 3400 /* Events */ 3401 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 3402 /* L2 traps */ 3403 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true), 3404 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true), 3405 MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true), 3406 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false), 3407 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false), 3408 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false), 3409 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false), 3410 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false), 3411 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false), 3412 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false), 3413 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false), 3414 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false), 3415 MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD, 3416 false), 3417 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 3418 false), 3419 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD, 3420 false), 3421 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 3422 false), 3423 /* L3 traps */ 3424 MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3425 MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3426 MLXSW_SP_RXL_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3427 MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), 3428 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 3429 false), 3430 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false), 3431 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 3432 MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false), 3433 MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP, 3434 false), 3435 MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false), 3436 MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false), 3437 MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false), 3438 MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), 3439 MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false), 3440 MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false), 3441 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 3442 false), 3443 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 3444 false), 3445 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 3446 false), 3447 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 3448 false), 3449 MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false), 3450 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 3451 false), 3452 MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, HOST_MISS, false), 3453 MLXSW_SP_RXL_MARK(HOST_MISS_IPV6, TRAP_TO_CPU, HOST_MISS, false), 3454 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), 3455 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), 3456 MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3457 /* PKT Sample trap */ 3458 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, 3459 false, SP_IP2ME, DISCARD), 3460 /* ACL trap */ 3461 MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false), 3462 /* Multicast Router Traps */ 3463 MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false), 3464 MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false), 3465 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 3466 MLXSW_SP_RXL_MR_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 3467 }; 3468 3469 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 3470 { 3471 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 3472 enum mlxsw_reg_qpcr_ir_units ir_units; 3473 int max_cpu_policers; 3474 bool is_bytes; 3475 u8 burst_size; 3476 u32 rate; 3477 int i, err; 3478 3479 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 3480 return -EIO; 3481 3482 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 3483 3484 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 3485 for (i = 0; i < max_cpu_policers; i++) { 3486 is_bytes = false; 3487 switch (i) { 3488 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 3489 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 3490 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 3491 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 3492 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 3493 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 3494 rate = 128; 3495 burst_size = 7; 3496 break; 3497 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 3498 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 3499 rate = 16 * 1024; 3500 burst_size = 10; 3501 break; 3502 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 3503 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 3504 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 3505 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 3506 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 3507 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 3508 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 3509 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 3510 rate = 1024; 3511 burst_size = 7; 3512 break; 3513 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 3514 is_bytes = true; 3515 rate = 4 * 1024; 3516 burst_size = 4; 3517 break; 3518 default: 3519 continue; 3520 } 3521 3522 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 3523 burst_size); 3524 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 3525 if (err) 3526 return err; 3527 } 3528 3529 return 0; 3530 } 3531 3532 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 3533 { 3534 char htgt_pl[MLXSW_REG_HTGT_LEN]; 3535 enum mlxsw_reg_htgt_trap_group i; 3536 int max_cpu_policers; 3537 int max_trap_groups; 3538 u8 priority, tc; 3539 u16 policer_id; 3540 int err; 3541 3542 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 3543 return -EIO; 3544 3545 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 3546 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 3547 3548 for (i = 0; i < max_trap_groups; i++) { 3549 policer_id = i; 3550 switch (i) { 3551 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 3552 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 3553 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 3554 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 3555 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 3556 priority = 5; 3557 tc = 5; 3558 break; 3559 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 3560 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 3561 priority = 4; 3562 tc = 4; 3563 break; 3564 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 3565 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 3566 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 3567 priority = 3; 3568 tc = 3; 3569 break; 3570 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 3571 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 3572 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 3573 priority = 2; 3574 tc = 2; 3575 break; 3576 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 3577 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 3578 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 3579 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 3580 priority = 1; 3581 tc = 1; 3582 break; 3583 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 3584 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 3585 tc = MLXSW_REG_HTGT_DEFAULT_TC; 3586 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 3587 break; 3588 default: 3589 continue; 3590 } 3591 3592 if (max_cpu_policers <= policer_id && 3593 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 3594 return -EIO; 3595 3596 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 3597 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 3598 if (err) 3599 return err; 3600 } 3601 3602 return 0; 3603 } 3604 3605 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 3606 { 3607 int i; 3608 int err; 3609 3610 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 3611 if (err) 3612 return err; 3613 3614 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 3615 if (err) 3616 return err; 3617 3618 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) { 3619 err = mlxsw_core_trap_register(mlxsw_sp->core, 3620 &mlxsw_sp_listener[i], 3621 mlxsw_sp); 3622 if (err) 3623 goto err_listener_register; 3624 3625 } 3626 return 0; 3627 3628 err_listener_register: 3629 for (i--; i >= 0; i--) { 3630 mlxsw_core_trap_unregister(mlxsw_sp->core, 3631 &mlxsw_sp_listener[i], 3632 mlxsw_sp); 3633 } 3634 return err; 3635 } 3636 3637 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 3638 { 3639 int i; 3640 3641 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) { 3642 mlxsw_core_trap_unregister(mlxsw_sp->core, 3643 &mlxsw_sp_listener[i], 3644 mlxsw_sp); 3645 } 3646 } 3647 3648 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 3649 { 3650 char slcr_pl[MLXSW_REG_SLCR_LEN]; 3651 int err; 3652 3653 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 3654 MLXSW_REG_SLCR_LAG_HASH_DMAC | 3655 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 3656 MLXSW_REG_SLCR_LAG_HASH_VLANID | 3657 MLXSW_REG_SLCR_LAG_HASH_SIP | 3658 MLXSW_REG_SLCR_LAG_HASH_DIP | 3659 MLXSW_REG_SLCR_LAG_HASH_SPORT | 3660 MLXSW_REG_SLCR_LAG_HASH_DPORT | 3661 MLXSW_REG_SLCR_LAG_HASH_IPPROTO); 3662 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 3663 if (err) 3664 return err; 3665 3666 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 3667 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 3668 return -EIO; 3669 3670 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 3671 sizeof(struct mlxsw_sp_upper), 3672 GFP_KERNEL); 3673 if (!mlxsw_sp->lags) 3674 return -ENOMEM; 3675 3676 return 0; 3677 } 3678 3679 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 3680 { 3681 kfree(mlxsw_sp->lags); 3682 } 3683 3684 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) 3685 { 3686 char htgt_pl[MLXSW_REG_HTGT_LEN]; 3687 3688 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 3689 MLXSW_REG_HTGT_INVALID_POLICER, 3690 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 3691 MLXSW_REG_HTGT_DEFAULT_TC); 3692 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 3693 } 3694 3695 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 3696 unsigned long event, void *ptr); 3697 3698 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 3699 const struct mlxsw_bus_info *mlxsw_bus_info) 3700 { 3701 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3702 int err; 3703 3704 mlxsw_sp->core = mlxsw_core; 3705 mlxsw_sp->bus_info = mlxsw_bus_info; 3706 3707 err = mlxsw_sp_fw_rev_validate(mlxsw_sp); 3708 if (err) { 3709 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n"); 3710 return err; 3711 } 3712 3713 err = mlxsw_sp_base_mac_get(mlxsw_sp); 3714 if (err) { 3715 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 3716 return err; 3717 } 3718 3719 err = mlxsw_sp_kvdl_init(mlxsw_sp); 3720 if (err) { 3721 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 3722 return err; 3723 } 3724 3725 err = mlxsw_sp_fids_init(mlxsw_sp); 3726 if (err) { 3727 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 3728 goto err_fids_init; 3729 } 3730 3731 err = mlxsw_sp_traps_init(mlxsw_sp); 3732 if (err) { 3733 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 3734 goto err_traps_init; 3735 } 3736 3737 err = mlxsw_sp_buffers_init(mlxsw_sp); 3738 if (err) { 3739 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 3740 goto err_buffers_init; 3741 } 3742 3743 err = mlxsw_sp_lag_init(mlxsw_sp); 3744 if (err) { 3745 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 3746 goto err_lag_init; 3747 } 3748 3749 err = mlxsw_sp_switchdev_init(mlxsw_sp); 3750 if (err) { 3751 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 3752 goto err_switchdev_init; 3753 } 3754 3755 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 3756 if (err) { 3757 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 3758 goto err_counter_pool_init; 3759 } 3760 3761 err = mlxsw_sp_afa_init(mlxsw_sp); 3762 if (err) { 3763 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 3764 goto err_afa_init; 3765 } 3766 3767 err = mlxsw_sp_router_init(mlxsw_sp); 3768 if (err) { 3769 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 3770 goto err_router_init; 3771 } 3772 3773 /* Initialize netdevice notifier after router is initialized, so that 3774 * the event handler can use router structures. 3775 */ 3776 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 3777 err = register_netdevice_notifier(&mlxsw_sp->netdevice_nb); 3778 if (err) { 3779 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 3780 goto err_netdev_notifier; 3781 } 3782 3783 err = mlxsw_sp_span_init(mlxsw_sp); 3784 if (err) { 3785 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 3786 goto err_span_init; 3787 } 3788 3789 err = mlxsw_sp_acl_init(mlxsw_sp); 3790 if (err) { 3791 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 3792 goto err_acl_init; 3793 } 3794 3795 err = mlxsw_sp_dpipe_init(mlxsw_sp); 3796 if (err) { 3797 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 3798 goto err_dpipe_init; 3799 } 3800 3801 err = mlxsw_sp_ports_create(mlxsw_sp); 3802 if (err) { 3803 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 3804 goto err_ports_create; 3805 } 3806 3807 return 0; 3808 3809 err_ports_create: 3810 mlxsw_sp_dpipe_fini(mlxsw_sp); 3811 err_dpipe_init: 3812 mlxsw_sp_acl_fini(mlxsw_sp); 3813 err_acl_init: 3814 mlxsw_sp_span_fini(mlxsw_sp); 3815 err_span_init: 3816 unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); 3817 err_netdev_notifier: 3818 mlxsw_sp_router_fini(mlxsw_sp); 3819 err_router_init: 3820 mlxsw_sp_afa_fini(mlxsw_sp); 3821 err_afa_init: 3822 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3823 err_counter_pool_init: 3824 mlxsw_sp_switchdev_fini(mlxsw_sp); 3825 err_switchdev_init: 3826 mlxsw_sp_lag_fini(mlxsw_sp); 3827 err_lag_init: 3828 mlxsw_sp_buffers_fini(mlxsw_sp); 3829 err_buffers_init: 3830 mlxsw_sp_traps_fini(mlxsw_sp); 3831 err_traps_init: 3832 mlxsw_sp_fids_fini(mlxsw_sp); 3833 err_fids_init: 3834 mlxsw_sp_kvdl_fini(mlxsw_sp); 3835 return err; 3836 } 3837 3838 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 3839 { 3840 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3841 3842 mlxsw_sp_ports_remove(mlxsw_sp); 3843 mlxsw_sp_dpipe_fini(mlxsw_sp); 3844 mlxsw_sp_acl_fini(mlxsw_sp); 3845 mlxsw_sp_span_fini(mlxsw_sp); 3846 unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); 3847 mlxsw_sp_router_fini(mlxsw_sp); 3848 mlxsw_sp_afa_fini(mlxsw_sp); 3849 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3850 mlxsw_sp_switchdev_fini(mlxsw_sp); 3851 mlxsw_sp_lag_fini(mlxsw_sp); 3852 mlxsw_sp_buffers_fini(mlxsw_sp); 3853 mlxsw_sp_traps_fini(mlxsw_sp); 3854 mlxsw_sp_fids_fini(mlxsw_sp); 3855 mlxsw_sp_kvdl_fini(mlxsw_sp); 3856 } 3857 3858 static const struct mlxsw_config_profile mlxsw_sp_config_profile = { 3859 .used_max_vepa_channels = 1, 3860 .max_vepa_channels = 0, 3861 .used_max_mid = 1, 3862 .max_mid = MLXSW_SP_MID_MAX, 3863 .used_max_pgt = 1, 3864 .max_pgt = 0, 3865 .used_flood_tables = 1, 3866 .used_flood_mode = 1, 3867 .flood_mode = 3, 3868 .max_fid_offset_flood_tables = 3, 3869 .fid_offset_flood_table_size = VLAN_N_VID - 1, 3870 .max_fid_flood_tables = 3, 3871 .fid_flood_table_size = MLXSW_SP_FID_8021D_MAX, 3872 .used_max_ib_mc = 1, 3873 .max_ib_mc = 0, 3874 .used_max_pkey = 1, 3875 .max_pkey = 0, 3876 .used_kvd_split_data = 1, 3877 .kvd_hash_granularity = MLXSW_SP_KVD_GRANULARITY, 3878 .kvd_hash_single_parts = 59, 3879 .kvd_hash_double_parts = 41, 3880 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 3881 .swid_config = { 3882 { 3883 .used_type = 1, 3884 .type = MLXSW_PORT_SWID_TYPE_ETH, 3885 } 3886 }, 3887 .resource_query_enable = 1, 3888 }; 3889 3890 static struct mlxsw_driver mlxsw_sp_driver = { 3891 .kind = mlxsw_sp_driver_name, 3892 .priv_size = sizeof(struct mlxsw_sp), 3893 .init = mlxsw_sp_init, 3894 .fini = mlxsw_sp_fini, 3895 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 3896 .port_split = mlxsw_sp_port_split, 3897 .port_unsplit = mlxsw_sp_port_unsplit, 3898 .sb_pool_get = mlxsw_sp_sb_pool_get, 3899 .sb_pool_set = mlxsw_sp_sb_pool_set, 3900 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3901 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3902 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3903 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3904 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3905 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3906 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3907 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3908 .txhdr_construct = mlxsw_sp_txhdr_construct, 3909 .txhdr_len = MLXSW_TXHDR_LEN, 3910 .profile = &mlxsw_sp_config_profile, 3911 }; 3912 3913 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 3914 { 3915 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 3916 } 3917 3918 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data) 3919 { 3920 struct mlxsw_sp_port **p_mlxsw_sp_port = data; 3921 int ret = 0; 3922 3923 if (mlxsw_sp_port_dev_check(lower_dev)) { 3924 *p_mlxsw_sp_port = netdev_priv(lower_dev); 3925 ret = 1; 3926 } 3927 3928 return ret; 3929 } 3930 3931 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 3932 { 3933 struct mlxsw_sp_port *mlxsw_sp_port; 3934 3935 if (mlxsw_sp_port_dev_check(dev)) 3936 return netdev_priv(dev); 3937 3938 mlxsw_sp_port = NULL; 3939 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port); 3940 3941 return mlxsw_sp_port; 3942 } 3943 3944 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 3945 { 3946 struct mlxsw_sp_port *mlxsw_sp_port; 3947 3948 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 3949 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 3950 } 3951 3952 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 3953 { 3954 struct mlxsw_sp_port *mlxsw_sp_port; 3955 3956 if (mlxsw_sp_port_dev_check(dev)) 3957 return netdev_priv(dev); 3958 3959 mlxsw_sp_port = NULL; 3960 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 3961 &mlxsw_sp_port); 3962 3963 return mlxsw_sp_port; 3964 } 3965 3966 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 3967 { 3968 struct mlxsw_sp_port *mlxsw_sp_port; 3969 3970 rcu_read_lock(); 3971 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 3972 if (mlxsw_sp_port) 3973 dev_hold(mlxsw_sp_port->dev); 3974 rcu_read_unlock(); 3975 return mlxsw_sp_port; 3976 } 3977 3978 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 3979 { 3980 dev_put(mlxsw_sp_port->dev); 3981 } 3982 3983 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 3984 { 3985 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3986 3987 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 3988 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3989 } 3990 3991 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 3992 { 3993 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3994 3995 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 3996 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3997 } 3998 3999 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 4000 u16 lag_id, u8 port_index) 4001 { 4002 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4003 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4004 4005 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 4006 lag_id, port_index); 4007 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4008 } 4009 4010 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 4011 u16 lag_id) 4012 { 4013 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4014 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4015 4016 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 4017 lag_id); 4018 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4019 } 4020 4021 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 4022 u16 lag_id) 4023 { 4024 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4025 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4026 4027 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 4028 lag_id); 4029 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4030 } 4031 4032 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 4033 u16 lag_id) 4034 { 4035 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4036 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4037 4038 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 4039 lag_id); 4040 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4041 } 4042 4043 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 4044 struct net_device *lag_dev, 4045 u16 *p_lag_id) 4046 { 4047 struct mlxsw_sp_upper *lag; 4048 int free_lag_id = -1; 4049 u64 max_lag; 4050 int i; 4051 4052 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 4053 for (i = 0; i < max_lag; i++) { 4054 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 4055 if (lag->ref_count) { 4056 if (lag->dev == lag_dev) { 4057 *p_lag_id = i; 4058 return 0; 4059 } 4060 } else if (free_lag_id < 0) { 4061 free_lag_id = i; 4062 } 4063 } 4064 if (free_lag_id < 0) 4065 return -EBUSY; 4066 *p_lag_id = free_lag_id; 4067 return 0; 4068 } 4069 4070 static bool 4071 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 4072 struct net_device *lag_dev, 4073 struct netdev_lag_upper_info *lag_upper_info, 4074 struct netlink_ext_ack *extack) 4075 { 4076 u16 lag_id; 4077 4078 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 4079 NL_SET_ERR_MSG(extack, 4080 "spectrum: Exceeded number of supported LAG devices"); 4081 return false; 4082 } 4083 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 4084 NL_SET_ERR_MSG(extack, 4085 "spectrum: LAG device using unsupported Tx type"); 4086 return false; 4087 } 4088 return true; 4089 } 4090 4091 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 4092 u16 lag_id, u8 *p_port_index) 4093 { 4094 u64 max_lag_members; 4095 int i; 4096 4097 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 4098 MAX_LAG_MEMBERS); 4099 for (i = 0; i < max_lag_members; i++) { 4100 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 4101 *p_port_index = i; 4102 return 0; 4103 } 4104 } 4105 return -EBUSY; 4106 } 4107 4108 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 4109 struct net_device *lag_dev) 4110 { 4111 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4112 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 4113 struct mlxsw_sp_upper *lag; 4114 u16 lag_id; 4115 u8 port_index; 4116 int err; 4117 4118 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 4119 if (err) 4120 return err; 4121 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 4122 if (!lag->ref_count) { 4123 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 4124 if (err) 4125 return err; 4126 lag->dev = lag_dev; 4127 } 4128 4129 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 4130 if (err) 4131 return err; 4132 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 4133 if (err) 4134 goto err_col_port_add; 4135 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id); 4136 if (err) 4137 goto err_col_port_enable; 4138 4139 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 4140 mlxsw_sp_port->local_port); 4141 mlxsw_sp_port->lag_id = lag_id; 4142 mlxsw_sp_port->lagged = 1; 4143 lag->ref_count++; 4144 4145 /* Port is no longer usable as a router interface */ 4146 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1); 4147 if (mlxsw_sp_port_vlan->fid) 4148 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 4149 4150 return 0; 4151 4152 err_col_port_enable: 4153 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4154 err_col_port_add: 4155 if (!lag->ref_count) 4156 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 4157 return err; 4158 } 4159 4160 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 4161 struct net_device *lag_dev) 4162 { 4163 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4164 u16 lag_id = mlxsw_sp_port->lag_id; 4165 struct mlxsw_sp_upper *lag; 4166 4167 if (!mlxsw_sp_port->lagged) 4168 return; 4169 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 4170 WARN_ON(lag->ref_count == 0); 4171 4172 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id); 4173 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4174 4175 /* Any VLANs configured on the port are no longer valid */ 4176 mlxsw_sp_port_vlan_flush(mlxsw_sp_port); 4177 4178 if (lag->ref_count == 1) 4179 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 4180 4181 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 4182 mlxsw_sp_port->local_port); 4183 mlxsw_sp_port->lagged = 0; 4184 lag->ref_count--; 4185 4186 mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1); 4187 /* Make sure untagged frames are allowed to ingress */ 4188 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1); 4189 } 4190 4191 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 4192 u16 lag_id) 4193 { 4194 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4195 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4196 4197 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 4198 mlxsw_sp_port->local_port); 4199 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4200 } 4201 4202 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 4203 u16 lag_id) 4204 { 4205 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4206 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4207 4208 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 4209 mlxsw_sp_port->local_port); 4210 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4211 } 4212 4213 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port, 4214 bool lag_tx_enabled) 4215 { 4216 if (lag_tx_enabled) 4217 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, 4218 mlxsw_sp_port->lag_id); 4219 else 4220 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 4221 mlxsw_sp_port->lag_id); 4222 } 4223 4224 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 4225 struct netdev_lag_lower_state_info *info) 4226 { 4227 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled); 4228 } 4229 4230 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 4231 bool enable) 4232 { 4233 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4234 enum mlxsw_reg_spms_state spms_state; 4235 char *spms_pl; 4236 u16 vid; 4237 int err; 4238 4239 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 4240 MLXSW_REG_SPMS_STATE_DISCARDING; 4241 4242 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 4243 if (!spms_pl) 4244 return -ENOMEM; 4245 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 4246 4247 for (vid = 0; vid < VLAN_N_VID; vid++) 4248 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 4249 4250 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 4251 kfree(spms_pl); 4252 return err; 4253 } 4254 4255 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 4256 { 4257 int err; 4258 4259 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 4260 if (err) 4261 return err; 4262 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 4263 if (err) 4264 goto err_port_stp_set; 4265 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1, 4266 true, false); 4267 if (err) 4268 goto err_port_vlan_set; 4269 return 0; 4270 4271 err_port_vlan_set: 4272 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4273 err_port_stp_set: 4274 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4275 return err; 4276 } 4277 4278 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 4279 { 4280 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1, 4281 false, false); 4282 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4283 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4284 } 4285 4286 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 4287 struct net_device *dev, 4288 unsigned long event, void *ptr) 4289 { 4290 struct netdev_notifier_changeupper_info *info; 4291 struct mlxsw_sp_port *mlxsw_sp_port; 4292 struct netlink_ext_ack *extack; 4293 struct net_device *upper_dev; 4294 struct mlxsw_sp *mlxsw_sp; 4295 int err = 0; 4296 4297 mlxsw_sp_port = netdev_priv(dev); 4298 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4299 info = ptr; 4300 extack = netdev_notifier_info_to_extack(&info->info); 4301 4302 switch (event) { 4303 case NETDEV_PRECHANGEUPPER: 4304 upper_dev = info->upper_dev; 4305 if (!is_vlan_dev(upper_dev) && 4306 !netif_is_lag_master(upper_dev) && 4307 !netif_is_bridge_master(upper_dev) && 4308 !netif_is_ovs_master(upper_dev)) { 4309 NL_SET_ERR_MSG(extack, 4310 "spectrum: Unknown upper device type"); 4311 return -EINVAL; 4312 } 4313 if (!info->linking) 4314 break; 4315 if (netdev_has_any_upper_dev(upper_dev)) { 4316 NL_SET_ERR_MSG(extack, 4317 "spectrum: Enslaving a port to a device that already has an upper device is not supported"); 4318 return -EINVAL; 4319 } 4320 if (netif_is_lag_master(upper_dev) && 4321 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 4322 info->upper_info, extack)) 4323 return -EINVAL; 4324 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 4325 NL_SET_ERR_MSG(extack, 4326 "spectrum: Master device is a LAG master and this device has a VLAN"); 4327 return -EINVAL; 4328 } 4329 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 4330 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 4331 NL_SET_ERR_MSG(extack, 4332 "spectrum: Can not put a VLAN on a LAG port"); 4333 return -EINVAL; 4334 } 4335 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 4336 NL_SET_ERR_MSG(extack, 4337 "spectrum: Master device is an OVS master and this device has a VLAN"); 4338 return -EINVAL; 4339 } 4340 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 4341 NL_SET_ERR_MSG(extack, 4342 "spectrum: Can not put a VLAN on an OVS port"); 4343 return -EINVAL; 4344 } 4345 break; 4346 case NETDEV_CHANGEUPPER: 4347 upper_dev = info->upper_dev; 4348 if (netif_is_bridge_master(upper_dev)) { 4349 if (info->linking) 4350 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4351 lower_dev, 4352 upper_dev, 4353 extack); 4354 else 4355 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4356 lower_dev, 4357 upper_dev); 4358 } else if (netif_is_lag_master(upper_dev)) { 4359 if (info->linking) 4360 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 4361 upper_dev); 4362 else 4363 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 4364 upper_dev); 4365 } else if (netif_is_ovs_master(upper_dev)) { 4366 if (info->linking) 4367 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 4368 else 4369 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 4370 } 4371 break; 4372 } 4373 4374 return err; 4375 } 4376 4377 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 4378 unsigned long event, void *ptr) 4379 { 4380 struct netdev_notifier_changelowerstate_info *info; 4381 struct mlxsw_sp_port *mlxsw_sp_port; 4382 int err; 4383 4384 mlxsw_sp_port = netdev_priv(dev); 4385 info = ptr; 4386 4387 switch (event) { 4388 case NETDEV_CHANGELOWERSTATE: 4389 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 4390 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 4391 info->lower_state_info); 4392 if (err) 4393 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 4394 } 4395 break; 4396 } 4397 4398 return 0; 4399 } 4400 4401 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 4402 struct net_device *port_dev, 4403 unsigned long event, void *ptr) 4404 { 4405 switch (event) { 4406 case NETDEV_PRECHANGEUPPER: 4407 case NETDEV_CHANGEUPPER: 4408 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 4409 event, ptr); 4410 case NETDEV_CHANGELOWERSTATE: 4411 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 4412 ptr); 4413 } 4414 4415 return 0; 4416 } 4417 4418 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 4419 unsigned long event, void *ptr) 4420 { 4421 struct net_device *dev; 4422 struct list_head *iter; 4423 int ret; 4424 4425 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4426 if (mlxsw_sp_port_dev_check(dev)) { 4427 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 4428 ptr); 4429 if (ret) 4430 return ret; 4431 } 4432 } 4433 4434 return 0; 4435 } 4436 4437 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 4438 struct net_device *dev, 4439 unsigned long event, void *ptr, 4440 u16 vid) 4441 { 4442 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 4443 struct netdev_notifier_changeupper_info *info = ptr; 4444 struct netlink_ext_ack *extack; 4445 struct net_device *upper_dev; 4446 int err = 0; 4447 4448 extack = netdev_notifier_info_to_extack(&info->info); 4449 4450 switch (event) { 4451 case NETDEV_PRECHANGEUPPER: 4452 upper_dev = info->upper_dev; 4453 if (!netif_is_bridge_master(upper_dev)) { 4454 NL_SET_ERR_MSG(extack, "spectrum: VLAN devices only support bridge and VRF uppers"); 4455 return -EINVAL; 4456 } 4457 if (!info->linking) 4458 break; 4459 if (netdev_has_any_upper_dev(upper_dev)) { 4460 NL_SET_ERR_MSG(extack, "spectrum: Enslaving a port to a device that already has an upper device is not supported"); 4461 return -EINVAL; 4462 } 4463 break; 4464 case NETDEV_CHANGEUPPER: 4465 upper_dev = info->upper_dev; 4466 if (netif_is_bridge_master(upper_dev)) { 4467 if (info->linking) 4468 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4469 vlan_dev, 4470 upper_dev, 4471 extack); 4472 else 4473 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4474 vlan_dev, 4475 upper_dev); 4476 } else { 4477 err = -EINVAL; 4478 WARN_ON(1); 4479 } 4480 break; 4481 } 4482 4483 return err; 4484 } 4485 4486 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 4487 struct net_device *lag_dev, 4488 unsigned long event, 4489 void *ptr, u16 vid) 4490 { 4491 struct net_device *dev; 4492 struct list_head *iter; 4493 int ret; 4494 4495 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4496 if (mlxsw_sp_port_dev_check(dev)) { 4497 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 4498 event, ptr, 4499 vid); 4500 if (ret) 4501 return ret; 4502 } 4503 } 4504 4505 return 0; 4506 } 4507 4508 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 4509 unsigned long event, void *ptr) 4510 { 4511 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 4512 u16 vid = vlan_dev_vlan_id(vlan_dev); 4513 4514 if (mlxsw_sp_port_dev_check(real_dev)) 4515 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 4516 event, ptr, vid); 4517 else if (netif_is_lag_master(real_dev)) 4518 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 4519 real_dev, event, 4520 ptr, vid); 4521 4522 return 0; 4523 } 4524 4525 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) 4526 { 4527 struct netdev_notifier_changeupper_info *info = ptr; 4528 4529 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) 4530 return false; 4531 return netif_is_l3_master(info->upper_dev); 4532 } 4533 4534 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 4535 unsigned long event, void *ptr) 4536 { 4537 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 4538 struct mlxsw_sp *mlxsw_sp; 4539 int err = 0; 4540 4541 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 4542 if (mlxsw_sp_netdev_is_ipip(mlxsw_sp, dev)) 4543 err = mlxsw_sp_netdevice_ipip_event(mlxsw_sp, dev, event, ptr); 4544 else if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU) 4545 err = mlxsw_sp_netdevice_router_port_event(dev); 4546 else if (mlxsw_sp_is_vrf_event(event, ptr)) 4547 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); 4548 else if (mlxsw_sp_port_dev_check(dev)) 4549 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 4550 else if (netif_is_lag_master(dev)) 4551 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 4552 else if (is_vlan_dev(dev)) 4553 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 4554 4555 return notifier_from_errno(err); 4556 } 4557 4558 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = { 4559 .notifier_call = mlxsw_sp_inetaddr_valid_event, 4560 }; 4561 4562 static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = { 4563 .notifier_call = mlxsw_sp_inetaddr_event, 4564 }; 4565 4566 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = { 4567 .notifier_call = mlxsw_sp_inet6addr_valid_event, 4568 }; 4569 4570 static struct notifier_block mlxsw_sp_inet6addr_nb __read_mostly = { 4571 .notifier_call = mlxsw_sp_inet6addr_event, 4572 }; 4573 4574 static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = { 4575 .notifier_call = mlxsw_sp_router_netevent_event, 4576 }; 4577 4578 static const struct pci_device_id mlxsw_sp_pci_id_table[] = { 4579 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 4580 {0, }, 4581 }; 4582 4583 static struct pci_driver mlxsw_sp_pci_driver = { 4584 .name = mlxsw_sp_driver_name, 4585 .id_table = mlxsw_sp_pci_id_table, 4586 }; 4587 4588 static int __init mlxsw_sp_module_init(void) 4589 { 4590 int err; 4591 4592 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 4593 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 4594 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 4595 register_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); 4596 register_netevent_notifier(&mlxsw_sp_router_netevent_nb); 4597 4598 err = mlxsw_core_driver_register(&mlxsw_sp_driver); 4599 if (err) 4600 goto err_core_driver_register; 4601 4602 err = mlxsw_pci_driver_register(&mlxsw_sp_pci_driver); 4603 if (err) 4604 goto err_pci_driver_register; 4605 4606 return 0; 4607 4608 err_pci_driver_register: 4609 mlxsw_core_driver_unregister(&mlxsw_sp_driver); 4610 err_core_driver_register: 4611 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb); 4612 unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); 4613 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 4614 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 4615 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 4616 return err; 4617 } 4618 4619 static void __exit mlxsw_sp_module_exit(void) 4620 { 4621 mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver); 4622 mlxsw_core_driver_unregister(&mlxsw_sp_driver); 4623 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb); 4624 unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); 4625 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 4626 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 4627 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 4628 } 4629 4630 module_init(mlxsw_sp_module_init); 4631 module_exit(mlxsw_sp_module_exit); 4632 4633 MODULE_LICENSE("Dual BSD/GPL"); 4634 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 4635 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 4636 MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table); 4637 MODULE_FIRMWARE(MLXSW_SP_FW_FILENAME); 4638