1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/types.h> 7 #include <linux/pci.h> 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/ethtool.h> 11 #include <linux/slab.h> 12 #include <linux/device.h> 13 #include <linux/skbuff.h> 14 #include <linux/if_vlan.h> 15 #include <linux/if_bridge.h> 16 #include <linux/workqueue.h> 17 #include <linux/jiffies.h> 18 #include <linux/bitops.h> 19 #include <linux/list.h> 20 #include <linux/notifier.h> 21 #include <linux/dcbnl.h> 22 #include <linux/inetdevice.h> 23 #include <linux/netlink.h> 24 #include <linux/random.h> 25 #include <net/switchdev.h> 26 #include <net/pkt_cls.h> 27 #include <net/tc_act/tc_mirred.h> 28 #include <net/netevent.h> 29 #include <net/tc_act/tc_sample.h> 30 #include <net/addrconf.h> 31 32 #include "spectrum.h" 33 #include "pci.h" 34 #include "core.h" 35 #include "reg.h" 36 #include "port.h" 37 #include "trap.h" 38 #include "txheader.h" 39 #include "spectrum_cnt.h" 40 #include "spectrum_dpipe.h" 41 #include "spectrum_acl_flex_actions.h" 42 #include "spectrum_span.h" 43 #include "../mlxfw/mlxfw.h" 44 45 #define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100) 46 47 #define MLXSW_SP1_FWREV_MAJOR 13 48 #define MLXSW_SP1_FWREV_MINOR 1703 49 #define MLXSW_SP1_FWREV_SUBMINOR 4 50 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 51 52 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { 53 .major = MLXSW_SP1_FWREV_MAJOR, 54 .minor = MLXSW_SP1_FWREV_MINOR, 55 .subminor = MLXSW_SP1_FWREV_SUBMINOR, 56 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR, 57 }; 58 59 #define MLXSW_SP1_FW_FILENAME \ 60 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ 61 "." __stringify(MLXSW_SP1_FWREV_MINOR) \ 62 "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2" 63 64 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; 65 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; 66 static const char mlxsw_sp_driver_version[] = "1.0"; 67 68 /* tx_hdr_version 69 * Tx header version. 70 * Must be set to 1. 71 */ 72 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 73 74 /* tx_hdr_ctl 75 * Packet control type. 76 * 0 - Ethernet control (e.g. EMADs, LACP) 77 * 1 - Ethernet data 78 */ 79 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 80 81 /* tx_hdr_proto 82 * Packet protocol type. Must be set to 1 (Ethernet). 83 */ 84 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 85 86 /* tx_hdr_rx_is_router 87 * Packet is sent from the router. Valid for data packets only. 88 */ 89 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 90 91 /* tx_hdr_fid_valid 92 * Indicates if the 'fid' field is valid and should be used for 93 * forwarding lookup. Valid for data packets only. 94 */ 95 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 96 97 /* tx_hdr_swid 98 * Switch partition ID. Must be set to 0. 99 */ 100 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 101 102 /* tx_hdr_control_tclass 103 * Indicates if the packet should use the control TClass and not one 104 * of the data TClasses. 105 */ 106 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 107 108 /* tx_hdr_etclass 109 * Egress TClass to be used on the egress device on the egress port. 110 */ 111 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 112 113 /* tx_hdr_port_mid 114 * Destination local port for unicast packets. 115 * Destination multicast ID for multicast packets. 116 * 117 * Control packets are directed to a specific egress port, while data 118 * packets are transmitted through the CPU port (0) into the switch partition, 119 * where forwarding rules are applied. 120 */ 121 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 122 123 /* tx_hdr_fid 124 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 125 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 126 * Valid for data packets only. 127 */ 128 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 129 130 /* tx_hdr_type 131 * 0 - Data packets 132 * 6 - Control packets 133 */ 134 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 135 136 struct mlxsw_sp_mlxfw_dev { 137 struct mlxfw_dev mlxfw_dev; 138 struct mlxsw_sp *mlxsw_sp; 139 }; 140 141 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev, 142 u16 component_index, u32 *p_max_size, 143 u8 *p_align_bits, u16 *p_max_write_size) 144 { 145 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 146 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 147 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 148 char mcqi_pl[MLXSW_REG_MCQI_LEN]; 149 int err; 150 151 mlxsw_reg_mcqi_pack(mcqi_pl, component_index); 152 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl); 153 if (err) 154 return err; 155 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, 156 p_max_write_size); 157 158 *p_align_bits = max_t(u8, *p_align_bits, 2); 159 *p_max_write_size = min_t(u16, *p_max_write_size, 160 MLXSW_REG_MCDA_MAX_DATA_LEN); 161 return 0; 162 } 163 164 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle) 165 { 166 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 167 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 168 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 169 char mcc_pl[MLXSW_REG_MCC_LEN]; 170 u8 control_state; 171 int err; 172 173 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0); 174 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 175 if (err) 176 return err; 177 178 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state); 179 if (control_state != MLXFW_FSM_STATE_IDLE) 180 return -EBUSY; 181 182 mlxsw_reg_mcc_pack(mcc_pl, 183 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 184 0, *fwhandle, 0); 185 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 186 } 187 188 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev, 189 u32 fwhandle, u16 component_index, 190 u32 component_size) 191 { 192 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 193 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 194 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 195 char mcc_pl[MLXSW_REG_MCC_LEN]; 196 197 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT, 198 component_index, fwhandle, component_size); 199 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 200 } 201 202 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev, 203 u32 fwhandle, u8 *data, u16 size, 204 u32 offset) 205 { 206 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 207 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 208 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 209 char mcda_pl[MLXSW_REG_MCDA_LEN]; 210 211 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data); 212 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl); 213 } 214 215 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, 216 u32 fwhandle, u16 component_index) 217 { 218 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 219 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 220 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 221 char mcc_pl[MLXSW_REG_MCC_LEN]; 222 223 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT, 224 component_index, fwhandle, 0); 225 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 226 } 227 228 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 229 { 230 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 231 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 232 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 233 char mcc_pl[MLXSW_REG_MCC_LEN]; 234 235 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, 236 fwhandle, 0); 237 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 238 } 239 240 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 241 enum mlxfw_fsm_state *fsm_state, 242 enum mlxfw_fsm_state_err *fsm_state_err) 243 { 244 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 245 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 246 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 247 char mcc_pl[MLXSW_REG_MCC_LEN]; 248 u8 control_state; 249 u8 error_code; 250 int err; 251 252 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0); 253 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 254 if (err) 255 return err; 256 257 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state); 258 *fsm_state = control_state; 259 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, 260 MLXFW_FSM_STATE_ERR_MAX); 261 return 0; 262 } 263 264 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 265 { 266 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 267 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 268 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 269 char mcc_pl[MLXSW_REG_MCC_LEN]; 270 271 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, 272 fwhandle, 0); 273 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 274 } 275 276 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 277 { 278 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 279 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 280 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 281 char mcc_pl[MLXSW_REG_MCC_LEN]; 282 283 mlxsw_reg_mcc_pack(mcc_pl, 284 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, 285 fwhandle, 0); 286 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 287 } 288 289 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = { 290 .component_query = mlxsw_sp_component_query, 291 .fsm_lock = mlxsw_sp_fsm_lock, 292 .fsm_component_update = mlxsw_sp_fsm_component_update, 293 .fsm_block_download = mlxsw_sp_fsm_block_download, 294 .fsm_component_verify = mlxsw_sp_fsm_component_verify, 295 .fsm_activate = mlxsw_sp_fsm_activate, 296 .fsm_query_state = mlxsw_sp_fsm_query_state, 297 .fsm_cancel = mlxsw_sp_fsm_cancel, 298 .fsm_release = mlxsw_sp_fsm_release 299 }; 300 301 static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp, 302 const struct firmware *firmware) 303 { 304 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = { 305 .mlxfw_dev = { 306 .ops = &mlxsw_sp_mlxfw_dev_ops, 307 .psid = mlxsw_sp->bus_info->psid, 308 .psid_size = strlen(mlxsw_sp->bus_info->psid), 309 }, 310 .mlxsw_sp = mlxsw_sp 311 }; 312 313 return mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware); 314 } 315 316 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) 317 { 318 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev; 319 const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev; 320 const char *fw_filename = mlxsw_sp->fw_filename; 321 union devlink_param_value value; 322 const struct firmware *firmware; 323 int err; 324 325 /* Don't check if driver does not require it */ 326 if (!req_rev || !fw_filename) 327 return 0; 328 329 /* Don't check if devlink 'fw_load_policy' param is 'flash' */ 330 err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_sp->core), 331 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 332 &value); 333 if (err) 334 return err; 335 if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) 336 return 0; 337 338 /* Validate driver & FW are compatible */ 339 if (rev->major != req_rev->major) { 340 WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n", 341 rev->major, req_rev->major); 342 return -EINVAL; 343 } 344 if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) == 345 MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor) && 346 (rev->minor > req_rev->minor || 347 (rev->minor == req_rev->minor && 348 rev->subminor >= req_rev->subminor))) 349 return 0; 350 351 dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n", 352 rev->major, rev->minor, rev->subminor); 353 dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n", 354 fw_filename); 355 356 err = request_firmware_direct(&firmware, fw_filename, 357 mlxsw_sp->bus_info->dev); 358 if (err) { 359 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n", 360 fw_filename); 361 return err; 362 } 363 364 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware); 365 release_firmware(firmware); 366 if (err) 367 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n"); 368 369 /* On FW flash success, tell the caller FW reset is needed 370 * if current FW supports it. 371 */ 372 if (rev->minor >= req_rev->can_reset_minor) 373 return err ? err : -EAGAIN; 374 else 375 return 0; 376 } 377 378 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 379 unsigned int counter_index, u64 *packets, 380 u64 *bytes) 381 { 382 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 383 int err; 384 385 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 386 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 387 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 388 if (err) 389 return err; 390 if (packets) 391 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 392 if (bytes) 393 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 394 return 0; 395 } 396 397 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 398 unsigned int counter_index) 399 { 400 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 401 402 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 403 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 404 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 405 } 406 407 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 408 unsigned int *p_counter_index) 409 { 410 int err; 411 412 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 413 p_counter_index); 414 if (err) 415 return err; 416 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 417 if (err) 418 goto err_counter_clear; 419 return 0; 420 421 err_counter_clear: 422 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 423 *p_counter_index); 424 return err; 425 } 426 427 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 428 unsigned int counter_index) 429 { 430 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 431 counter_index); 432 } 433 434 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 435 const struct mlxsw_tx_info *tx_info) 436 { 437 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 438 439 memset(txhdr, 0, MLXSW_TXHDR_LEN); 440 441 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 442 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 443 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 444 mlxsw_tx_hdr_swid_set(txhdr, 0); 445 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 446 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 447 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 448 } 449 450 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) 451 { 452 switch (state) { 453 case BR_STATE_FORWARDING: 454 return MLXSW_REG_SPMS_STATE_FORWARDING; 455 case BR_STATE_LEARNING: 456 return MLXSW_REG_SPMS_STATE_LEARNING; 457 case BR_STATE_LISTENING: /* fall-through */ 458 case BR_STATE_DISABLED: /* fall-through */ 459 case BR_STATE_BLOCKING: 460 return MLXSW_REG_SPMS_STATE_DISCARDING; 461 default: 462 BUG(); 463 } 464 } 465 466 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 467 u8 state) 468 { 469 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state); 470 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 471 char *spms_pl; 472 int err; 473 474 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 475 if (!spms_pl) 476 return -ENOMEM; 477 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 478 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 479 480 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 481 kfree(spms_pl); 482 return err; 483 } 484 485 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 486 { 487 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 488 int err; 489 490 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 491 if (err) 492 return err; 493 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 494 return 0; 495 } 496 497 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port, 498 bool enable, u32 rate) 499 { 500 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 501 char mpsc_pl[MLXSW_REG_MPSC_LEN]; 502 503 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate); 504 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl); 505 } 506 507 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 508 bool is_up) 509 { 510 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 511 char paos_pl[MLXSW_REG_PAOS_LEN]; 512 513 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 514 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 515 MLXSW_PORT_ADMIN_STATUS_DOWN); 516 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 517 } 518 519 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 520 unsigned char *addr) 521 { 522 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 523 char ppad_pl[MLXSW_REG_PPAD_LEN]; 524 525 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 526 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 527 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 528 } 529 530 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 531 { 532 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 533 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 534 535 ether_addr_copy(addr, mlxsw_sp->base_mac); 536 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 537 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 538 } 539 540 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 541 { 542 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 543 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 544 int max_mtu; 545 int err; 546 547 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 548 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 549 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 550 if (err) 551 return err; 552 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 553 554 if (mtu > max_mtu) 555 return -EINVAL; 556 557 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 558 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 559 } 560 561 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 562 { 563 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 564 char pspa_pl[MLXSW_REG_PSPA_LEN]; 565 566 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 567 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 568 } 569 570 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 571 { 572 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 573 char svpe_pl[MLXSW_REG_SVPE_LEN]; 574 575 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 576 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 577 } 578 579 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 580 bool learn_enable) 581 { 582 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 583 char *spvmlr_pl; 584 int err; 585 586 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 587 if (!spvmlr_pl) 588 return -ENOMEM; 589 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 590 learn_enable); 591 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 592 kfree(spvmlr_pl); 593 return err; 594 } 595 596 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 597 u16 vid) 598 { 599 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 600 char spvid_pl[MLXSW_REG_SPVID_LEN]; 601 602 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid); 603 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 604 } 605 606 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 607 bool allow) 608 { 609 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 610 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 611 612 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 613 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 614 } 615 616 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 617 { 618 int err; 619 620 if (!vid) { 621 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 622 if (err) 623 return err; 624 } else { 625 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); 626 if (err) 627 return err; 628 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 629 if (err) 630 goto err_port_allow_untagged_set; 631 } 632 633 mlxsw_sp_port->pvid = vid; 634 return 0; 635 636 err_port_allow_untagged_set: 637 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid); 638 return err; 639 } 640 641 static int 642 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 643 { 644 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 645 char sspr_pl[MLXSW_REG_SSPR_LEN]; 646 647 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 648 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 649 } 650 651 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, 652 u8 local_port, u8 *p_module, 653 u8 *p_width, u8 *p_lane) 654 { 655 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 656 int err; 657 658 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 659 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 660 if (err) 661 return err; 662 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 663 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl); 664 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 665 return 0; 666 } 667 668 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port, 669 u8 module, u8 width, u8 lane) 670 { 671 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 672 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 673 int i; 674 675 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 676 mlxsw_reg_pmlp_width_set(pmlp_pl, width); 677 for (i = 0; i < width; i++) { 678 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module); 679 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */ 680 } 681 682 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 683 } 684 685 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port) 686 { 687 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 688 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 689 690 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 691 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 692 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 693 } 694 695 static int mlxsw_sp_port_open(struct net_device *dev) 696 { 697 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 698 int err; 699 700 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 701 if (err) 702 return err; 703 netif_start_queue(dev); 704 return 0; 705 } 706 707 static int mlxsw_sp_port_stop(struct net_device *dev) 708 { 709 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 710 711 netif_stop_queue(dev); 712 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 713 } 714 715 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 716 struct net_device *dev) 717 { 718 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 719 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 720 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 721 const struct mlxsw_tx_info tx_info = { 722 .local_port = mlxsw_sp_port->local_port, 723 .is_emad = false, 724 }; 725 u64 len; 726 int err; 727 728 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 729 return NETDEV_TX_BUSY; 730 731 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { 732 struct sk_buff *skb_orig = skb; 733 734 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); 735 if (!skb) { 736 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 737 dev_kfree_skb_any(skb_orig); 738 return NETDEV_TX_OK; 739 } 740 dev_consume_skb_any(skb_orig); 741 } 742 743 if (eth_skb_pad(skb)) { 744 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 745 return NETDEV_TX_OK; 746 } 747 748 mlxsw_sp_txhdr_construct(skb, &tx_info); 749 /* TX header is consumed by HW on the way so we shouldn't count its 750 * bytes as being sent. 751 */ 752 len = skb->len - MLXSW_TXHDR_LEN; 753 754 /* Due to a race we might fail here because of a full queue. In that 755 * unlikely case we simply drop the packet. 756 */ 757 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 758 759 if (!err) { 760 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 761 u64_stats_update_begin(&pcpu_stats->syncp); 762 pcpu_stats->tx_packets++; 763 pcpu_stats->tx_bytes += len; 764 u64_stats_update_end(&pcpu_stats->syncp); 765 } else { 766 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 767 dev_kfree_skb_any(skb); 768 } 769 return NETDEV_TX_OK; 770 } 771 772 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 773 { 774 } 775 776 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 777 { 778 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 779 struct sockaddr *addr = p; 780 int err; 781 782 if (!is_valid_ether_addr(addr->sa_data)) 783 return -EADDRNOTAVAIL; 784 785 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 786 if (err) 787 return err; 788 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 789 return 0; 790 } 791 792 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, 793 int mtu) 794 { 795 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu); 796 } 797 798 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ 799 800 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 801 u16 delay) 802 { 803 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay, 804 BITS_PER_BYTE)); 805 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp, 806 mtu); 807 } 808 809 /* Maximum delay buffer needed in case of PAUSE frames, in bytes. 810 * Assumes 100m cable and maximum MTU. 811 */ 812 #define MLXSW_SP_PAUSE_DELAY 58752 813 814 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 815 u16 delay, bool pfc, bool pause) 816 { 817 if (pfc) 818 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay); 819 else if (pause) 820 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY); 821 else 822 return 0; 823 } 824 825 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, 826 bool lossy) 827 { 828 if (lossy) 829 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size); 830 else 831 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size, 832 thres); 833 } 834 835 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, 836 u8 *prio_tc, bool pause_en, 837 struct ieee_pfc *my_pfc) 838 { 839 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 840 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; 841 u16 delay = !!my_pfc ? my_pfc->delay : 0; 842 char pbmc_pl[MLXSW_REG_PBMC_LEN]; 843 int i, j, err; 844 845 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); 846 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 847 if (err) 848 return err; 849 850 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 851 bool configure = false; 852 bool pfc = false; 853 bool lossy; 854 u16 thres; 855 856 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { 857 if (prio_tc[j] == i) { 858 pfc = pfc_en & BIT(j); 859 configure = true; 860 break; 861 } 862 } 863 864 if (!configure) 865 continue; 866 867 lossy = !(pfc || pause_en); 868 thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); 869 delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc, 870 pause_en); 871 mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy); 872 } 873 874 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 875 } 876 877 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, 878 int mtu, bool pause_en) 879 { 880 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; 881 bool dcb_en = !!mlxsw_sp_port->dcb.ets; 882 struct ieee_pfc *my_pfc; 883 u8 *prio_tc; 884 885 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; 886 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; 887 888 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, 889 pause_en, my_pfc); 890 } 891 892 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 893 { 894 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 895 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 896 int err; 897 898 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); 899 if (err) 900 return err; 901 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu); 902 if (err) 903 goto err_span_port_mtu_update; 904 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 905 if (err) 906 goto err_port_mtu_set; 907 dev->mtu = mtu; 908 return 0; 909 910 err_port_mtu_set: 911 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu); 912 err_span_port_mtu_update: 913 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 914 return err; 915 } 916 917 static int 918 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 919 struct rtnl_link_stats64 *stats) 920 { 921 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 922 struct mlxsw_sp_port_pcpu_stats *p; 923 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 924 u32 tx_dropped = 0; 925 unsigned int start; 926 int i; 927 928 for_each_possible_cpu(i) { 929 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 930 do { 931 start = u64_stats_fetch_begin_irq(&p->syncp); 932 rx_packets = p->rx_packets; 933 rx_bytes = p->rx_bytes; 934 tx_packets = p->tx_packets; 935 tx_bytes = p->tx_bytes; 936 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 937 938 stats->rx_packets += rx_packets; 939 stats->rx_bytes += rx_bytes; 940 stats->tx_packets += tx_packets; 941 stats->tx_bytes += tx_bytes; 942 /* tx_dropped is u32, updated without syncp protection. */ 943 tx_dropped += p->tx_dropped; 944 } 945 stats->tx_dropped = tx_dropped; 946 return 0; 947 } 948 949 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 950 { 951 switch (attr_id) { 952 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 953 return true; 954 } 955 956 return false; 957 } 958 959 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 960 void *sp) 961 { 962 switch (attr_id) { 963 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 964 return mlxsw_sp_port_get_sw_stats64(dev, sp); 965 } 966 967 return -EINVAL; 968 } 969 970 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 971 int prio, char *ppcnt_pl) 972 { 973 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 974 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 975 976 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 977 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 978 } 979 980 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 981 struct rtnl_link_stats64 *stats) 982 { 983 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 984 int err; 985 986 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 987 0, ppcnt_pl); 988 if (err) 989 goto out; 990 991 stats->tx_packets = 992 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 993 stats->rx_packets = 994 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 995 stats->tx_bytes = 996 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 997 stats->rx_bytes = 998 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 999 stats->multicast = 1000 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 1001 1002 stats->rx_crc_errors = 1003 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 1004 stats->rx_frame_errors = 1005 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 1006 1007 stats->rx_length_errors = ( 1008 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 1009 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 1010 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 1011 1012 stats->rx_errors = (stats->rx_crc_errors + 1013 stats->rx_frame_errors + stats->rx_length_errors); 1014 1015 out: 1016 return err; 1017 } 1018 1019 static void 1020 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 1021 struct mlxsw_sp_port_xstats *xstats) 1022 { 1023 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1024 int err, i; 1025 1026 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 1027 ppcnt_pl); 1028 if (!err) 1029 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 1030 1031 for (i = 0; i < TC_MAX_QUEUE; i++) { 1032 err = mlxsw_sp_port_get_stats_raw(dev, 1033 MLXSW_REG_PPCNT_TC_CONG_TC, 1034 i, ppcnt_pl); 1035 if (!err) 1036 xstats->wred_drop[i] = 1037 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 1038 1039 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 1040 i, ppcnt_pl); 1041 if (err) 1042 continue; 1043 1044 xstats->backlog[i] = 1045 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 1046 xstats->tail_drop[i] = 1047 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 1048 } 1049 1050 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1051 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, 1052 i, ppcnt_pl); 1053 if (err) 1054 continue; 1055 1056 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); 1057 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); 1058 } 1059 } 1060 1061 static void update_stats_cache(struct work_struct *work) 1062 { 1063 struct mlxsw_sp_port *mlxsw_sp_port = 1064 container_of(work, struct mlxsw_sp_port, 1065 periodic_hw_stats.update_dw.work); 1066 1067 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 1068 goto out; 1069 1070 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 1071 &mlxsw_sp_port->periodic_hw_stats.stats); 1072 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 1073 &mlxsw_sp_port->periodic_hw_stats.xstats); 1074 1075 out: 1076 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1077 MLXSW_HW_STATS_UPDATE_TIME); 1078 } 1079 1080 /* Return the stats from a cache that is updated periodically, 1081 * as this function might get called in an atomic context. 1082 */ 1083 static void 1084 mlxsw_sp_port_get_stats64(struct net_device *dev, 1085 struct rtnl_link_stats64 *stats) 1086 { 1087 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1088 1089 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 1090 } 1091 1092 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 1093 u16 vid_begin, u16 vid_end, 1094 bool is_member, bool untagged) 1095 { 1096 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1097 char *spvm_pl; 1098 int err; 1099 1100 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 1101 if (!spvm_pl) 1102 return -ENOMEM; 1103 1104 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 1105 vid_end, is_member, untagged); 1106 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 1107 kfree(spvm_pl); 1108 return err; 1109 } 1110 1111 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 1112 u16 vid_end, bool is_member, bool untagged) 1113 { 1114 u16 vid, vid_e; 1115 int err; 1116 1117 for (vid = vid_begin; vid <= vid_end; 1118 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 1119 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 1120 vid_end); 1121 1122 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 1123 is_member, untagged); 1124 if (err) 1125 return err; 1126 } 1127 1128 return 0; 1129 } 1130 1131 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port) 1132 { 1133 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 1134 1135 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 1136 &mlxsw_sp_port->vlans_list, list) 1137 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); 1138 } 1139 1140 static struct mlxsw_sp_port_vlan * 1141 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1142 { 1143 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1144 bool untagged = vid == 1; 1145 int err; 1146 1147 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 1148 if (err) 1149 return ERR_PTR(err); 1150 1151 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 1152 if (!mlxsw_sp_port_vlan) { 1153 err = -ENOMEM; 1154 goto err_port_vlan_alloc; 1155 } 1156 1157 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1158 mlxsw_sp_port_vlan->ref_count = 1; 1159 mlxsw_sp_port_vlan->vid = vid; 1160 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1161 1162 return mlxsw_sp_port_vlan; 1163 1164 err_port_vlan_alloc: 1165 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1166 return ERR_PTR(err); 1167 } 1168 1169 static void 1170 mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1171 { 1172 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1173 u16 vid = mlxsw_sp_port_vlan->vid; 1174 1175 list_del(&mlxsw_sp_port_vlan->list); 1176 kfree(mlxsw_sp_port_vlan); 1177 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1178 } 1179 1180 struct mlxsw_sp_port_vlan * 1181 mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1182 { 1183 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1184 1185 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1186 if (mlxsw_sp_port_vlan) { 1187 mlxsw_sp_port_vlan->ref_count++; 1188 return mlxsw_sp_port_vlan; 1189 } 1190 1191 return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid); 1192 } 1193 1194 void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1195 { 1196 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; 1197 1198 if (--mlxsw_sp_port_vlan->ref_count != 0) 1199 return; 1200 1201 if (mlxsw_sp_port_vlan->bridge_port) 1202 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1203 else if (fid) 1204 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 1205 1206 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1207 } 1208 1209 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1210 __be16 __always_unused proto, u16 vid) 1211 { 1212 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1213 1214 /* VLAN 0 is added to HW filter when device goes up, but it is 1215 * reserved in our case, so simply return. 1216 */ 1217 if (!vid) 1218 return 0; 1219 1220 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid)); 1221 } 1222 1223 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1224 __be16 __always_unused proto, u16 vid) 1225 { 1226 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1227 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1228 1229 /* VLAN 0 is removed from HW filter when device goes down, but 1230 * it is reserved in our case, so simply return. 1231 */ 1232 if (!vid) 1233 return 0; 1234 1235 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1236 if (!mlxsw_sp_port_vlan) 1237 return 0; 1238 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); 1239 1240 return 0; 1241 } 1242 1243 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name, 1244 size_t len) 1245 { 1246 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1247 1248 return mlxsw_core_port_get_phys_port_name(mlxsw_sp_port->mlxsw_sp->core, 1249 mlxsw_sp_port->local_port, 1250 name, len); 1251 } 1252 1253 static struct mlxsw_sp_port_mall_tc_entry * 1254 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port, 1255 unsigned long cookie) { 1256 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1257 1258 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list) 1259 if (mall_tc_entry->cookie == cookie) 1260 return mall_tc_entry; 1261 1262 return NULL; 1263 } 1264 1265 static int 1266 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1267 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror, 1268 const struct tc_action *a, 1269 bool ingress) 1270 { 1271 enum mlxsw_sp_span_type span_type; 1272 struct net_device *to_dev; 1273 1274 to_dev = tcf_mirred_dev(a); 1275 if (!to_dev) { 1276 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); 1277 return -EINVAL; 1278 } 1279 1280 mirror->ingress = ingress; 1281 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1282 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_dev, span_type, 1283 true, &mirror->span_id); 1284 } 1285 1286 static void 1287 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1288 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror) 1289 { 1290 enum mlxsw_sp_span_type span_type; 1291 1292 span_type = mirror->ingress ? 1293 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1294 mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->span_id, 1295 span_type, true); 1296 } 1297 1298 static int 1299 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port, 1300 struct tc_cls_matchall_offload *cls, 1301 const struct tc_action *a, 1302 bool ingress) 1303 { 1304 int err; 1305 1306 if (!mlxsw_sp_port->sample) 1307 return -EOPNOTSUPP; 1308 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) { 1309 netdev_err(mlxsw_sp_port->dev, "sample already active\n"); 1310 return -EEXIST; 1311 } 1312 if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) { 1313 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n"); 1314 return -EOPNOTSUPP; 1315 } 1316 1317 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group, 1318 tcf_sample_psample_group(a)); 1319 mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a); 1320 mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a); 1321 mlxsw_sp_port->sample->rate = tcf_sample_rate(a); 1322 1323 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a)); 1324 if (err) 1325 goto err_port_sample_set; 1326 return 0; 1327 1328 err_port_sample_set: 1329 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1330 return err; 1331 } 1332 1333 static void 1334 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port) 1335 { 1336 if (!mlxsw_sp_port->sample) 1337 return; 1338 1339 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1); 1340 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1341 } 1342 1343 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1344 struct tc_cls_matchall_offload *f, 1345 bool ingress) 1346 { 1347 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1348 __be16 protocol = f->common.protocol; 1349 const struct tc_action *a; 1350 LIST_HEAD(actions); 1351 int err; 1352 1353 if (!tcf_exts_has_one_action(f->exts)) { 1354 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); 1355 return -EOPNOTSUPP; 1356 } 1357 1358 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1359 if (!mall_tc_entry) 1360 return -ENOMEM; 1361 mall_tc_entry->cookie = f->cookie; 1362 1363 a = tcf_exts_first_action(f->exts); 1364 1365 if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { 1366 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; 1367 1368 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; 1369 mirror = &mall_tc_entry->mirror; 1370 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, 1371 mirror, a, ingress); 1372 } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) { 1373 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE; 1374 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f, 1375 a, ingress); 1376 } else { 1377 err = -EOPNOTSUPP; 1378 } 1379 1380 if (err) 1381 goto err_add_action; 1382 1383 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); 1384 return 0; 1385 1386 err_add_action: 1387 kfree(mall_tc_entry); 1388 return err; 1389 } 1390 1391 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1392 struct tc_cls_matchall_offload *f) 1393 { 1394 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1395 1396 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port, 1397 f->cookie); 1398 if (!mall_tc_entry) { 1399 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); 1400 return; 1401 } 1402 list_del(&mall_tc_entry->list); 1403 1404 switch (mall_tc_entry->type) { 1405 case MLXSW_SP_PORT_MALL_MIRROR: 1406 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port, 1407 &mall_tc_entry->mirror); 1408 break; 1409 case MLXSW_SP_PORT_MALL_SAMPLE: 1410 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port); 1411 break; 1412 default: 1413 WARN_ON(1); 1414 } 1415 1416 kfree(mall_tc_entry); 1417 } 1418 1419 static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1420 struct tc_cls_matchall_offload *f, 1421 bool ingress) 1422 { 1423 switch (f->command) { 1424 case TC_CLSMATCHALL_REPLACE: 1425 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f, 1426 ingress); 1427 case TC_CLSMATCHALL_DESTROY: 1428 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f); 1429 return 0; 1430 default: 1431 return -EOPNOTSUPP; 1432 } 1433 } 1434 1435 static int 1436 mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block, 1437 struct tc_cls_flower_offload *f) 1438 { 1439 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block); 1440 1441 switch (f->command) { 1442 case TC_CLSFLOWER_REPLACE: 1443 return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f); 1444 case TC_CLSFLOWER_DESTROY: 1445 mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f); 1446 return 0; 1447 case TC_CLSFLOWER_STATS: 1448 return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f); 1449 case TC_CLSFLOWER_TMPLT_CREATE: 1450 return mlxsw_sp_flower_tmplt_create(mlxsw_sp, acl_block, f); 1451 case TC_CLSFLOWER_TMPLT_DESTROY: 1452 mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, acl_block, f); 1453 return 0; 1454 default: 1455 return -EOPNOTSUPP; 1456 } 1457 } 1458 1459 static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type, 1460 void *type_data, 1461 void *cb_priv, bool ingress) 1462 { 1463 struct mlxsw_sp_port *mlxsw_sp_port = cb_priv; 1464 1465 switch (type) { 1466 case TC_SETUP_CLSMATCHALL: 1467 if (!tc_cls_can_offload_and_chain0(mlxsw_sp_port->dev, 1468 type_data)) 1469 return -EOPNOTSUPP; 1470 1471 return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data, 1472 ingress); 1473 case TC_SETUP_CLSFLOWER: 1474 return 0; 1475 default: 1476 return -EOPNOTSUPP; 1477 } 1478 } 1479 1480 static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type, 1481 void *type_data, 1482 void *cb_priv) 1483 { 1484 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1485 cb_priv, true); 1486 } 1487 1488 static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type, 1489 void *type_data, 1490 void *cb_priv) 1491 { 1492 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1493 cb_priv, false); 1494 } 1495 1496 static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type, 1497 void *type_data, void *cb_priv) 1498 { 1499 struct mlxsw_sp_acl_block *acl_block = cb_priv; 1500 1501 switch (type) { 1502 case TC_SETUP_CLSMATCHALL: 1503 return 0; 1504 case TC_SETUP_CLSFLOWER: 1505 if (mlxsw_sp_acl_block_disabled(acl_block)) 1506 return -EOPNOTSUPP; 1507 1508 return mlxsw_sp_setup_tc_cls_flower(acl_block, type_data); 1509 default: 1510 return -EOPNOTSUPP; 1511 } 1512 } 1513 1514 static int 1515 mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port, 1516 struct tcf_block *block, bool ingress, 1517 struct netlink_ext_ack *extack) 1518 { 1519 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1520 struct mlxsw_sp_acl_block *acl_block; 1521 struct tcf_block_cb *block_cb; 1522 int err; 1523 1524 block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower, 1525 mlxsw_sp); 1526 if (!block_cb) { 1527 acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, block->net); 1528 if (!acl_block) 1529 return -ENOMEM; 1530 block_cb = __tcf_block_cb_register(block, 1531 mlxsw_sp_setup_tc_block_cb_flower, 1532 mlxsw_sp, acl_block, extack); 1533 if (IS_ERR(block_cb)) { 1534 err = PTR_ERR(block_cb); 1535 goto err_cb_register; 1536 } 1537 } else { 1538 acl_block = tcf_block_cb_priv(block_cb); 1539 } 1540 tcf_block_cb_incref(block_cb); 1541 err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block, 1542 mlxsw_sp_port, ingress); 1543 if (err) 1544 goto err_block_bind; 1545 1546 if (ingress) 1547 mlxsw_sp_port->ing_acl_block = acl_block; 1548 else 1549 mlxsw_sp_port->eg_acl_block = acl_block; 1550 1551 return 0; 1552 1553 err_block_bind: 1554 if (!tcf_block_cb_decref(block_cb)) { 1555 __tcf_block_cb_unregister(block, block_cb); 1556 err_cb_register: 1557 mlxsw_sp_acl_block_destroy(acl_block); 1558 } 1559 return err; 1560 } 1561 1562 static void 1563 mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port, 1564 struct tcf_block *block, bool ingress) 1565 { 1566 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1567 struct mlxsw_sp_acl_block *acl_block; 1568 struct tcf_block_cb *block_cb; 1569 int err; 1570 1571 block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower, 1572 mlxsw_sp); 1573 if (!block_cb) 1574 return; 1575 1576 if (ingress) 1577 mlxsw_sp_port->ing_acl_block = NULL; 1578 else 1579 mlxsw_sp_port->eg_acl_block = NULL; 1580 1581 acl_block = tcf_block_cb_priv(block_cb); 1582 err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block, 1583 mlxsw_sp_port, ingress); 1584 if (!err && !tcf_block_cb_decref(block_cb)) { 1585 __tcf_block_cb_unregister(block, block_cb); 1586 mlxsw_sp_acl_block_destroy(acl_block); 1587 } 1588 } 1589 1590 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 1591 struct tc_block_offload *f) 1592 { 1593 tc_setup_cb_t *cb; 1594 bool ingress; 1595 int err; 1596 1597 if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { 1598 cb = mlxsw_sp_setup_tc_block_cb_matchall_ig; 1599 ingress = true; 1600 } else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { 1601 cb = mlxsw_sp_setup_tc_block_cb_matchall_eg; 1602 ingress = false; 1603 } else { 1604 return -EOPNOTSUPP; 1605 } 1606 1607 switch (f->command) { 1608 case TC_BLOCK_BIND: 1609 err = tcf_block_cb_register(f->block, cb, mlxsw_sp_port, 1610 mlxsw_sp_port, f->extack); 1611 if (err) 1612 return err; 1613 err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port, 1614 f->block, ingress, 1615 f->extack); 1616 if (err) { 1617 tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port); 1618 return err; 1619 } 1620 return 0; 1621 case TC_BLOCK_UNBIND: 1622 mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port, 1623 f->block, ingress); 1624 tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port); 1625 return 0; 1626 default: 1627 return -EOPNOTSUPP; 1628 } 1629 } 1630 1631 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1632 void *type_data) 1633 { 1634 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1635 1636 switch (type) { 1637 case TC_SETUP_BLOCK: 1638 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1639 case TC_SETUP_QDISC_RED: 1640 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1641 case TC_SETUP_QDISC_PRIO: 1642 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); 1643 default: 1644 return -EOPNOTSUPP; 1645 } 1646 } 1647 1648 1649 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) 1650 { 1651 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1652 1653 if (!enable) { 1654 if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->ing_acl_block) || 1655 mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->eg_acl_block) || 1656 !list_empty(&mlxsw_sp_port->mall_tc_list)) { 1657 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 1658 return -EINVAL; 1659 } 1660 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->ing_acl_block); 1661 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->eg_acl_block); 1662 } else { 1663 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->ing_acl_block); 1664 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->eg_acl_block); 1665 } 1666 return 0; 1667 } 1668 1669 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); 1670 1671 static int mlxsw_sp_handle_feature(struct net_device *dev, 1672 netdev_features_t wanted_features, 1673 netdev_features_t feature, 1674 mlxsw_sp_feature_handler feature_handler) 1675 { 1676 netdev_features_t changes = wanted_features ^ dev->features; 1677 bool enable = !!(wanted_features & feature); 1678 int err; 1679 1680 if (!(changes & feature)) 1681 return 0; 1682 1683 err = feature_handler(dev, enable); 1684 if (err) { 1685 netdev_err(dev, "%s feature %pNF failed, err %d\n", 1686 enable ? "Enable" : "Disable", &feature, err); 1687 return err; 1688 } 1689 1690 if (enable) 1691 dev->features |= feature; 1692 else 1693 dev->features &= ~feature; 1694 1695 return 0; 1696 } 1697 static int mlxsw_sp_set_features(struct net_device *dev, 1698 netdev_features_t features) 1699 { 1700 return mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, 1701 mlxsw_sp_feature_hw_tc); 1702 } 1703 1704 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1705 .ndo_open = mlxsw_sp_port_open, 1706 .ndo_stop = mlxsw_sp_port_stop, 1707 .ndo_start_xmit = mlxsw_sp_port_xmit, 1708 .ndo_setup_tc = mlxsw_sp_setup_tc, 1709 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1710 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1711 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1712 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1713 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1714 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1715 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1716 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1717 .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name, 1718 .ndo_set_features = mlxsw_sp_set_features, 1719 }; 1720 1721 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 1722 struct ethtool_drvinfo *drvinfo) 1723 { 1724 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1725 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1726 1727 strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind, 1728 sizeof(drvinfo->driver)); 1729 strlcpy(drvinfo->version, mlxsw_sp_driver_version, 1730 sizeof(drvinfo->version)); 1731 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 1732 "%d.%d.%d", 1733 mlxsw_sp->bus_info->fw_rev.major, 1734 mlxsw_sp->bus_info->fw_rev.minor, 1735 mlxsw_sp->bus_info->fw_rev.subminor); 1736 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 1737 sizeof(drvinfo->bus_info)); 1738 } 1739 1740 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, 1741 struct ethtool_pauseparam *pause) 1742 { 1743 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1744 1745 pause->rx_pause = mlxsw_sp_port->link.rx_pause; 1746 pause->tx_pause = mlxsw_sp_port->link.tx_pause; 1747 } 1748 1749 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, 1750 struct ethtool_pauseparam *pause) 1751 { 1752 char pfcc_pl[MLXSW_REG_PFCC_LEN]; 1753 1754 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); 1755 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause); 1756 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause); 1757 1758 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), 1759 pfcc_pl); 1760 } 1761 1762 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, 1763 struct ethtool_pauseparam *pause) 1764 { 1765 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1766 bool pause_en = pause->tx_pause || pause->rx_pause; 1767 int err; 1768 1769 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { 1770 netdev_err(dev, "PFC already enabled on port\n"); 1771 return -EINVAL; 1772 } 1773 1774 if (pause->autoneg) { 1775 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); 1776 return -EINVAL; 1777 } 1778 1779 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1780 if (err) { 1781 netdev_err(dev, "Failed to configure port's headroom\n"); 1782 return err; 1783 } 1784 1785 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause); 1786 if (err) { 1787 netdev_err(dev, "Failed to set PAUSE parameters\n"); 1788 goto err_port_pause_configure; 1789 } 1790 1791 mlxsw_sp_port->link.rx_pause = pause->rx_pause; 1792 mlxsw_sp_port->link.tx_pause = pause->tx_pause; 1793 1794 return 0; 1795 1796 err_port_pause_configure: 1797 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 1798 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1799 return err; 1800 } 1801 1802 struct mlxsw_sp_port_hw_stats { 1803 char str[ETH_GSTRING_LEN]; 1804 u64 (*getter)(const char *payload); 1805 bool cells_bytes; 1806 }; 1807 1808 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 1809 { 1810 .str = "a_frames_transmitted_ok", 1811 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 1812 }, 1813 { 1814 .str = "a_frames_received_ok", 1815 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 1816 }, 1817 { 1818 .str = "a_frame_check_sequence_errors", 1819 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 1820 }, 1821 { 1822 .str = "a_alignment_errors", 1823 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 1824 }, 1825 { 1826 .str = "a_octets_transmitted_ok", 1827 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 1828 }, 1829 { 1830 .str = "a_octets_received_ok", 1831 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 1832 }, 1833 { 1834 .str = "a_multicast_frames_xmitted_ok", 1835 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 1836 }, 1837 { 1838 .str = "a_broadcast_frames_xmitted_ok", 1839 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 1840 }, 1841 { 1842 .str = "a_multicast_frames_received_ok", 1843 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 1844 }, 1845 { 1846 .str = "a_broadcast_frames_received_ok", 1847 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 1848 }, 1849 { 1850 .str = "a_in_range_length_errors", 1851 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 1852 }, 1853 { 1854 .str = "a_out_of_range_length_field", 1855 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 1856 }, 1857 { 1858 .str = "a_frame_too_long_errors", 1859 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 1860 }, 1861 { 1862 .str = "a_symbol_error_during_carrier", 1863 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 1864 }, 1865 { 1866 .str = "a_mac_control_frames_transmitted", 1867 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 1868 }, 1869 { 1870 .str = "a_mac_control_frames_received", 1871 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 1872 }, 1873 { 1874 .str = "a_unsupported_opcodes_received", 1875 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 1876 }, 1877 { 1878 .str = "a_pause_mac_ctrl_frames_received", 1879 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 1880 }, 1881 { 1882 .str = "a_pause_mac_ctrl_frames_xmitted", 1883 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 1884 }, 1885 }; 1886 1887 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 1888 1889 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2863_stats[] = { 1890 { 1891 .str = "if_in_discards", 1892 .getter = mlxsw_reg_ppcnt_if_in_discards_get, 1893 }, 1894 { 1895 .str = "if_out_discards", 1896 .getter = mlxsw_reg_ppcnt_if_out_discards_get, 1897 }, 1898 { 1899 .str = "if_out_errors", 1900 .getter = mlxsw_reg_ppcnt_if_out_errors_get, 1901 }, 1902 }; 1903 1904 #define MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN \ 1905 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2863_stats) 1906 1907 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = { 1908 { 1909 .str = "ether_stats_undersize_pkts", 1910 .getter = mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get, 1911 }, 1912 { 1913 .str = "ether_stats_oversize_pkts", 1914 .getter = mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get, 1915 }, 1916 { 1917 .str = "ether_stats_fragments", 1918 .getter = mlxsw_reg_ppcnt_ether_stats_fragments_get, 1919 }, 1920 { 1921 .str = "ether_pkts64octets", 1922 .getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get, 1923 }, 1924 { 1925 .str = "ether_pkts65to127octets", 1926 .getter = mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get, 1927 }, 1928 { 1929 .str = "ether_pkts128to255octets", 1930 .getter = mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get, 1931 }, 1932 { 1933 .str = "ether_pkts256to511octets", 1934 .getter = mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get, 1935 }, 1936 { 1937 .str = "ether_pkts512to1023octets", 1938 .getter = mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get, 1939 }, 1940 { 1941 .str = "ether_pkts1024to1518octets", 1942 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get, 1943 }, 1944 { 1945 .str = "ether_pkts1519to2047octets", 1946 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get, 1947 }, 1948 { 1949 .str = "ether_pkts2048to4095octets", 1950 .getter = mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get, 1951 }, 1952 { 1953 .str = "ether_pkts4096to8191octets", 1954 .getter = mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get, 1955 }, 1956 { 1957 .str = "ether_pkts8192to10239octets", 1958 .getter = mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get, 1959 }, 1960 }; 1961 1962 #define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \ 1963 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats) 1964 1965 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_3635_stats[] = { 1966 { 1967 .str = "dot3stats_fcs_errors", 1968 .getter = mlxsw_reg_ppcnt_dot3stats_fcs_errors_get, 1969 }, 1970 { 1971 .str = "dot3stats_symbol_errors", 1972 .getter = mlxsw_reg_ppcnt_dot3stats_symbol_errors_get, 1973 }, 1974 { 1975 .str = "dot3control_in_unknown_opcodes", 1976 .getter = mlxsw_reg_ppcnt_dot3control_in_unknown_opcodes_get, 1977 }, 1978 { 1979 .str = "dot3in_pause_frames", 1980 .getter = mlxsw_reg_ppcnt_dot3in_pause_frames_get, 1981 }, 1982 }; 1983 1984 #define MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN \ 1985 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_3635_stats) 1986 1987 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_discard_stats[] = { 1988 { 1989 .str = "discard_ingress_general", 1990 .getter = mlxsw_reg_ppcnt_ingress_general_get, 1991 }, 1992 { 1993 .str = "discard_ingress_policy_engine", 1994 .getter = mlxsw_reg_ppcnt_ingress_policy_engine_get, 1995 }, 1996 { 1997 .str = "discard_ingress_vlan_membership", 1998 .getter = mlxsw_reg_ppcnt_ingress_vlan_membership_get, 1999 }, 2000 { 2001 .str = "discard_ingress_tag_frame_type", 2002 .getter = mlxsw_reg_ppcnt_ingress_tag_frame_type_get, 2003 }, 2004 { 2005 .str = "discard_egress_vlan_membership", 2006 .getter = mlxsw_reg_ppcnt_egress_vlan_membership_get, 2007 }, 2008 { 2009 .str = "discard_loopback_filter", 2010 .getter = mlxsw_reg_ppcnt_loopback_filter_get, 2011 }, 2012 { 2013 .str = "discard_egress_general", 2014 .getter = mlxsw_reg_ppcnt_egress_general_get, 2015 }, 2016 { 2017 .str = "discard_egress_hoq", 2018 .getter = mlxsw_reg_ppcnt_egress_hoq_get, 2019 }, 2020 { 2021 .str = "discard_egress_policy_engine", 2022 .getter = mlxsw_reg_ppcnt_egress_policy_engine_get, 2023 }, 2024 { 2025 .str = "discard_ingress_tx_link_down", 2026 .getter = mlxsw_reg_ppcnt_ingress_tx_link_down_get, 2027 }, 2028 { 2029 .str = "discard_egress_stp_filter", 2030 .getter = mlxsw_reg_ppcnt_egress_stp_filter_get, 2031 }, 2032 { 2033 .str = "discard_egress_sll", 2034 .getter = mlxsw_reg_ppcnt_egress_sll_get, 2035 }, 2036 }; 2037 2038 #define MLXSW_SP_PORT_HW_DISCARD_STATS_LEN \ 2039 ARRAY_SIZE(mlxsw_sp_port_hw_discard_stats) 2040 2041 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { 2042 { 2043 .str = "rx_octets_prio", 2044 .getter = mlxsw_reg_ppcnt_rx_octets_get, 2045 }, 2046 { 2047 .str = "rx_frames_prio", 2048 .getter = mlxsw_reg_ppcnt_rx_frames_get, 2049 }, 2050 { 2051 .str = "tx_octets_prio", 2052 .getter = mlxsw_reg_ppcnt_tx_octets_get, 2053 }, 2054 { 2055 .str = "tx_frames_prio", 2056 .getter = mlxsw_reg_ppcnt_tx_frames_get, 2057 }, 2058 { 2059 .str = "rx_pause_prio", 2060 .getter = mlxsw_reg_ppcnt_rx_pause_get, 2061 }, 2062 { 2063 .str = "rx_pause_duration_prio", 2064 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get, 2065 }, 2066 { 2067 .str = "tx_pause_prio", 2068 .getter = mlxsw_reg_ppcnt_tx_pause_get, 2069 }, 2070 { 2071 .str = "tx_pause_duration_prio", 2072 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get, 2073 }, 2074 }; 2075 2076 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) 2077 2078 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { 2079 { 2080 .str = "tc_transmit_queue_tc", 2081 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get, 2082 .cells_bytes = true, 2083 }, 2084 { 2085 .str = "tc_no_buffer_discard_uc_tc", 2086 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get, 2087 }, 2088 }; 2089 2090 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) 2091 2092 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ 2093 MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN + \ 2094 MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \ 2095 MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN + \ 2096 MLXSW_SP_PORT_HW_DISCARD_STATS_LEN + \ 2097 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \ 2098 IEEE_8021QAZ_MAX_TCS) + \ 2099 (MLXSW_SP_PORT_HW_TC_STATS_LEN * \ 2100 TC_MAX_QUEUE)) 2101 2102 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) 2103 { 2104 int i; 2105 2106 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { 2107 snprintf(*p, ETH_GSTRING_LEN, "%s_%d", 2108 mlxsw_sp_port_hw_prio_stats[i].str, prio); 2109 *p += ETH_GSTRING_LEN; 2110 } 2111 } 2112 2113 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) 2114 { 2115 int i; 2116 2117 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { 2118 snprintf(*p, ETH_GSTRING_LEN, "%s_%d", 2119 mlxsw_sp_port_hw_tc_stats[i].str, tc); 2120 *p += ETH_GSTRING_LEN; 2121 } 2122 } 2123 2124 static void mlxsw_sp_port_get_strings(struct net_device *dev, 2125 u32 stringset, u8 *data) 2126 { 2127 u8 *p = data; 2128 int i; 2129 2130 switch (stringset) { 2131 case ETH_SS_STATS: 2132 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 2133 memcpy(p, mlxsw_sp_port_hw_stats[i].str, 2134 ETH_GSTRING_LEN); 2135 p += ETH_GSTRING_LEN; 2136 } 2137 2138 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; i++) { 2139 memcpy(p, mlxsw_sp_port_hw_rfc_2863_stats[i].str, 2140 ETH_GSTRING_LEN); 2141 p += ETH_GSTRING_LEN; 2142 } 2143 2144 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) { 2145 memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str, 2146 ETH_GSTRING_LEN); 2147 p += ETH_GSTRING_LEN; 2148 } 2149 2150 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; i++) { 2151 memcpy(p, mlxsw_sp_port_hw_rfc_3635_stats[i].str, 2152 ETH_GSTRING_LEN); 2153 p += ETH_GSTRING_LEN; 2154 } 2155 2156 for (i = 0; i < MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; i++) { 2157 memcpy(p, mlxsw_sp_port_hw_discard_stats[i].str, 2158 ETH_GSTRING_LEN); 2159 p += ETH_GSTRING_LEN; 2160 } 2161 2162 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 2163 mlxsw_sp_port_get_prio_strings(&p, i); 2164 2165 for (i = 0; i < TC_MAX_QUEUE; i++) 2166 mlxsw_sp_port_get_tc_strings(&p, i); 2167 2168 break; 2169 } 2170 } 2171 2172 static int mlxsw_sp_port_set_phys_id(struct net_device *dev, 2173 enum ethtool_phys_id_state state) 2174 { 2175 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2176 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2177 char mlcr_pl[MLXSW_REG_MLCR_LEN]; 2178 bool active; 2179 2180 switch (state) { 2181 case ETHTOOL_ID_ACTIVE: 2182 active = true; 2183 break; 2184 case ETHTOOL_ID_INACTIVE: 2185 active = false; 2186 break; 2187 default: 2188 return -EOPNOTSUPP; 2189 } 2190 2191 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); 2192 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); 2193 } 2194 2195 static int 2196 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, 2197 int *p_len, enum mlxsw_reg_ppcnt_grp grp) 2198 { 2199 switch (grp) { 2200 case MLXSW_REG_PPCNT_IEEE_8023_CNT: 2201 *p_hw_stats = mlxsw_sp_port_hw_stats; 2202 *p_len = MLXSW_SP_PORT_HW_STATS_LEN; 2203 break; 2204 case MLXSW_REG_PPCNT_RFC_2863_CNT: 2205 *p_hw_stats = mlxsw_sp_port_hw_rfc_2863_stats; 2206 *p_len = MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2207 break; 2208 case MLXSW_REG_PPCNT_RFC_2819_CNT: 2209 *p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats; 2210 *p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2211 break; 2212 case MLXSW_REG_PPCNT_RFC_3635_CNT: 2213 *p_hw_stats = mlxsw_sp_port_hw_rfc_3635_stats; 2214 *p_len = MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2215 break; 2216 case MLXSW_REG_PPCNT_DISCARD_CNT: 2217 *p_hw_stats = mlxsw_sp_port_hw_discard_stats; 2218 *p_len = MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2219 break; 2220 case MLXSW_REG_PPCNT_PRIO_CNT: 2221 *p_hw_stats = mlxsw_sp_port_hw_prio_stats; 2222 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2223 break; 2224 case MLXSW_REG_PPCNT_TC_CNT: 2225 *p_hw_stats = mlxsw_sp_port_hw_tc_stats; 2226 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN; 2227 break; 2228 default: 2229 WARN_ON(1); 2230 return -EOPNOTSUPP; 2231 } 2232 return 0; 2233 } 2234 2235 static void __mlxsw_sp_port_get_stats(struct net_device *dev, 2236 enum mlxsw_reg_ppcnt_grp grp, int prio, 2237 u64 *data, int data_index) 2238 { 2239 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2240 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2241 struct mlxsw_sp_port_hw_stats *hw_stats; 2242 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 2243 int i, len; 2244 int err; 2245 2246 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); 2247 if (err) 2248 return; 2249 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl); 2250 for (i = 0; i < len; i++) { 2251 data[data_index + i] = hw_stats[i].getter(ppcnt_pl); 2252 if (!hw_stats[i].cells_bytes) 2253 continue; 2254 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp, 2255 data[data_index + i]); 2256 } 2257 } 2258 2259 static void mlxsw_sp_port_get_stats(struct net_device *dev, 2260 struct ethtool_stats *stats, u64 *data) 2261 { 2262 int i, data_index = 0; 2263 2264 /* IEEE 802.3 Counters */ 2265 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0, 2266 data, data_index); 2267 data_index = MLXSW_SP_PORT_HW_STATS_LEN; 2268 2269 /* RFC 2863 Counters */ 2270 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2863_CNT, 0, 2271 data, data_index); 2272 data_index += MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2273 2274 /* RFC 2819 Counters */ 2275 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0, 2276 data, data_index); 2277 data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2278 2279 /* RFC 3635 Counters */ 2280 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_3635_CNT, 0, 2281 data, data_index); 2282 data_index += MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2283 2284 /* Discard Counters */ 2285 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_DISCARD_CNT, 0, 2286 data, data_index); 2287 data_index += MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2288 2289 /* Per-Priority Counters */ 2290 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2291 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, 2292 data, data_index); 2293 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2294 } 2295 2296 /* Per-TC Counters */ 2297 for (i = 0; i < TC_MAX_QUEUE; i++) { 2298 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, 2299 data, data_index); 2300 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; 2301 } 2302 } 2303 2304 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 2305 { 2306 switch (sset) { 2307 case ETH_SS_STATS: 2308 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN; 2309 default: 2310 return -EOPNOTSUPP; 2311 } 2312 } 2313 2314 struct mlxsw_sp_port_link_mode { 2315 enum ethtool_link_mode_bit_indices mask_ethtool; 2316 u32 mask; 2317 u32 speed; 2318 }; 2319 2320 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = { 2321 { 2322 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 2323 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2324 .speed = SPEED_100, 2325 }, 2326 { 2327 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 2328 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 2329 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2330 .speed = SPEED_1000, 2331 }, 2332 { 2333 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 2334 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2335 .speed = SPEED_10000, 2336 }, 2337 { 2338 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 2339 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 2340 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 2341 .speed = SPEED_10000, 2342 }, 2343 { 2344 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2345 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2346 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2347 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 2348 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2349 .speed = SPEED_10000, 2350 }, 2351 { 2352 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 2353 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 2354 .speed = SPEED_20000, 2355 }, 2356 { 2357 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 2358 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2359 .speed = SPEED_40000, 2360 }, 2361 { 2362 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 2363 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2364 .speed = SPEED_40000, 2365 }, 2366 { 2367 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 2368 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2369 .speed = SPEED_40000, 2370 }, 2371 { 2372 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 2373 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2374 .speed = SPEED_40000, 2375 }, 2376 { 2377 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR, 2378 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2379 .speed = SPEED_25000, 2380 }, 2381 { 2382 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR, 2383 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2384 .speed = SPEED_25000, 2385 }, 2386 { 2387 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 2388 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2389 .speed = SPEED_25000, 2390 }, 2391 { 2392 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 2393 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2394 .speed = SPEED_25000, 2395 }, 2396 { 2397 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2, 2398 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2399 .speed = SPEED_50000, 2400 }, 2401 { 2402 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 2403 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2404 .speed = SPEED_50000, 2405 }, 2406 { 2407 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2, 2408 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2409 .speed = SPEED_50000, 2410 }, 2411 { 2412 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2413 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT, 2414 .speed = SPEED_56000, 2415 }, 2416 { 2417 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2418 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT, 2419 .speed = SPEED_56000, 2420 }, 2421 { 2422 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2423 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT, 2424 .speed = SPEED_56000, 2425 }, 2426 { 2427 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2428 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, 2429 .speed = SPEED_56000, 2430 }, 2431 { 2432 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4, 2433 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2434 .speed = SPEED_100000, 2435 }, 2436 { 2437 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4, 2438 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2439 .speed = SPEED_100000, 2440 }, 2441 { 2442 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4, 2443 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2444 .speed = SPEED_100000, 2445 }, 2446 { 2447 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 2448 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2449 .speed = SPEED_100000, 2450 }, 2451 }; 2452 2453 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode) 2454 2455 static void 2456 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto, 2457 struct ethtool_link_ksettings *cmd) 2458 { 2459 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2460 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2461 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2462 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2463 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2464 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2465 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 2466 2467 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2468 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2469 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2470 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 2471 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 2472 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 2473 } 2474 2475 static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode) 2476 { 2477 int i; 2478 2479 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2480 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) 2481 __set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool, 2482 mode); 2483 } 2484 } 2485 2486 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto, 2487 struct ethtool_link_ksettings *cmd) 2488 { 2489 u32 speed = SPEED_UNKNOWN; 2490 u8 duplex = DUPLEX_UNKNOWN; 2491 int i; 2492 2493 if (!carrier_ok) 2494 goto out; 2495 2496 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2497 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) { 2498 speed = mlxsw_sp_port_link_mode[i].speed; 2499 duplex = DUPLEX_FULL; 2500 break; 2501 } 2502 } 2503 out: 2504 cmd->base.speed = speed; 2505 cmd->base.duplex = duplex; 2506 } 2507 2508 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto) 2509 { 2510 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2511 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2512 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2513 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2514 return PORT_FIBRE; 2515 2516 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2517 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2518 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4)) 2519 return PORT_DA; 2520 2521 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2522 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2523 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2524 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4)) 2525 return PORT_NONE; 2526 2527 return PORT_OTHER; 2528 } 2529 2530 static u32 2531 mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd) 2532 { 2533 u32 ptys_proto = 0; 2534 int i; 2535 2536 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2537 if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool, 2538 cmd->link_modes.advertising)) 2539 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 2540 } 2541 return ptys_proto; 2542 } 2543 2544 static u32 mlxsw_sp_to_ptys_speed(u32 speed) 2545 { 2546 u32 ptys_proto = 0; 2547 int i; 2548 2549 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2550 if (speed == mlxsw_sp_port_link_mode[i].speed) 2551 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 2552 } 2553 return ptys_proto; 2554 } 2555 2556 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed) 2557 { 2558 u32 ptys_proto = 0; 2559 int i; 2560 2561 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2562 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed) 2563 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 2564 } 2565 return ptys_proto; 2566 } 2567 2568 static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap, 2569 struct ethtool_link_ksettings *cmd) 2570 { 2571 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); 2572 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); 2573 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); 2574 2575 mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd); 2576 mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported); 2577 } 2578 2579 static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg, 2580 struct ethtool_link_ksettings *cmd) 2581 { 2582 if (!autoneg) 2583 return; 2584 2585 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); 2586 mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising); 2587 } 2588 2589 static void 2590 mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status, 2591 struct ethtool_link_ksettings *cmd) 2592 { 2593 if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp) 2594 return; 2595 2596 ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg); 2597 mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising); 2598 } 2599 2600 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev, 2601 struct ethtool_link_ksettings *cmd) 2602 { 2603 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp; 2604 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2605 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2606 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2607 u8 autoneg_status; 2608 bool autoneg; 2609 int err; 2610 2611 autoneg = mlxsw_sp_port->link.autoneg; 2612 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0, false); 2613 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2614 if (err) 2615 return err; 2616 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, ð_proto_admin, 2617 ð_proto_oper); 2618 2619 mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd); 2620 2621 mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd); 2622 2623 eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl); 2624 autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl); 2625 mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd); 2626 2627 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 2628 cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper); 2629 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper, 2630 cmd); 2631 2632 return 0; 2633 } 2634 2635 static int 2636 mlxsw_sp_port_set_link_ksettings(struct net_device *dev, 2637 const struct ethtool_link_ksettings *cmd) 2638 { 2639 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2640 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2641 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2642 u32 eth_proto_cap, eth_proto_new; 2643 bool autoneg; 2644 int err; 2645 2646 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0, false); 2647 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2648 if (err) 2649 return err; 2650 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL); 2651 2652 autoneg = cmd->base.autoneg == AUTONEG_ENABLE; 2653 eth_proto_new = autoneg ? 2654 mlxsw_sp_to_ptys_advert_link(cmd) : 2655 mlxsw_sp_to_ptys_speed(cmd->base.speed); 2656 2657 eth_proto_new = eth_proto_new & eth_proto_cap; 2658 if (!eth_proto_new) { 2659 netdev_err(dev, "No supported speed requested\n"); 2660 return -EINVAL; 2661 } 2662 2663 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 2664 eth_proto_new, autoneg); 2665 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2666 if (err) 2667 return err; 2668 2669 if (!netif_running(dev)) 2670 return 0; 2671 2672 mlxsw_sp_port->link.autoneg = autoneg; 2673 2674 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 2675 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 2676 2677 return 0; 2678 } 2679 2680 static int mlxsw_sp_flash_device(struct net_device *dev, 2681 struct ethtool_flash *flash) 2682 { 2683 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2684 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2685 const struct firmware *firmware; 2686 int err; 2687 2688 if (flash->region != ETHTOOL_FLASH_ALL_REGIONS) 2689 return -EOPNOTSUPP; 2690 2691 dev_hold(dev); 2692 rtnl_unlock(); 2693 2694 err = request_firmware_direct(&firmware, flash->data, &dev->dev); 2695 if (err) 2696 goto out; 2697 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware); 2698 release_firmware(firmware); 2699 out: 2700 rtnl_lock(); 2701 dev_put(dev); 2702 return err; 2703 } 2704 2705 #define MLXSW_SP_I2C_ADDR_LOW 0x50 2706 #define MLXSW_SP_I2C_ADDR_HIGH 0x51 2707 #define MLXSW_SP_EEPROM_PAGE_LENGTH 256 2708 2709 static int mlxsw_sp_query_module_eeprom(struct mlxsw_sp_port *mlxsw_sp_port, 2710 u16 offset, u16 size, void *data, 2711 unsigned int *p_read_size) 2712 { 2713 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2714 char eeprom_tmp[MLXSW_SP_REG_MCIA_EEPROM_SIZE]; 2715 char mcia_pl[MLXSW_REG_MCIA_LEN]; 2716 u16 i2c_addr; 2717 int status; 2718 int err; 2719 2720 size = min_t(u16, size, MLXSW_SP_REG_MCIA_EEPROM_SIZE); 2721 2722 if (offset < MLXSW_SP_EEPROM_PAGE_LENGTH && 2723 offset + size > MLXSW_SP_EEPROM_PAGE_LENGTH) 2724 /* Cross pages read, read until offset 256 in low page */ 2725 size = MLXSW_SP_EEPROM_PAGE_LENGTH - offset; 2726 2727 i2c_addr = MLXSW_SP_I2C_ADDR_LOW; 2728 if (offset >= MLXSW_SP_EEPROM_PAGE_LENGTH) { 2729 i2c_addr = MLXSW_SP_I2C_ADDR_HIGH; 2730 offset -= MLXSW_SP_EEPROM_PAGE_LENGTH; 2731 } 2732 2733 mlxsw_reg_mcia_pack(mcia_pl, mlxsw_sp_port->mapping.module, 2734 0, 0, offset, size, i2c_addr); 2735 2736 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcia), mcia_pl); 2737 if (err) 2738 return err; 2739 2740 status = mlxsw_reg_mcia_status_get(mcia_pl); 2741 if (status) 2742 return -EIO; 2743 2744 mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp); 2745 memcpy(data, eeprom_tmp, size); 2746 *p_read_size = size; 2747 2748 return 0; 2749 } 2750 2751 enum mlxsw_sp_eeprom_module_info_rev_id { 2752 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_UNSPC = 0x00, 2753 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8436 = 0x01, 2754 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636 = 0x03, 2755 }; 2756 2757 enum mlxsw_sp_eeprom_module_info_id { 2758 MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP = 0x03, 2759 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP = 0x0C, 2760 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS = 0x0D, 2761 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 = 0x11, 2762 }; 2763 2764 enum mlxsw_sp_eeprom_module_info { 2765 MLXSW_SP_EEPROM_MODULE_INFO_ID, 2766 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID, 2767 MLXSW_SP_EEPROM_MODULE_INFO_SIZE, 2768 }; 2769 2770 static int mlxsw_sp_get_module_info(struct net_device *netdev, 2771 struct ethtool_modinfo *modinfo) 2772 { 2773 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 2774 u8 module_info[MLXSW_SP_EEPROM_MODULE_INFO_SIZE]; 2775 u8 module_rev_id, module_id; 2776 unsigned int read_size; 2777 int err; 2778 2779 err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, 0, 2780 MLXSW_SP_EEPROM_MODULE_INFO_SIZE, 2781 module_info, &read_size); 2782 if (err) 2783 return err; 2784 2785 if (read_size < MLXSW_SP_EEPROM_MODULE_INFO_SIZE) 2786 return -EIO; 2787 2788 module_rev_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_REV_ID]; 2789 module_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_ID]; 2790 2791 switch (module_id) { 2792 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP: 2793 modinfo->type = ETH_MODULE_SFF_8436; 2794 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 2795 break; 2796 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS: 2797 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28: 2798 if (module_id == MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 || 2799 module_rev_id >= MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636) { 2800 modinfo->type = ETH_MODULE_SFF_8636; 2801 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; 2802 } else { 2803 modinfo->type = ETH_MODULE_SFF_8436; 2804 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 2805 } 2806 break; 2807 case MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP: 2808 modinfo->type = ETH_MODULE_SFF_8472; 2809 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 2810 break; 2811 default: 2812 return -EINVAL; 2813 } 2814 2815 return 0; 2816 } 2817 2818 static int mlxsw_sp_get_module_eeprom(struct net_device *netdev, 2819 struct ethtool_eeprom *ee, 2820 u8 *data) 2821 { 2822 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 2823 int offset = ee->offset; 2824 unsigned int read_size; 2825 int i = 0; 2826 int err; 2827 2828 if (!ee->len) 2829 return -EINVAL; 2830 2831 memset(data, 0, ee->len); 2832 2833 while (i < ee->len) { 2834 err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, offset, 2835 ee->len - i, data + i, 2836 &read_size); 2837 if (err) { 2838 netdev_err(mlxsw_sp_port->dev, "Eeprom query failed\n"); 2839 return err; 2840 } 2841 2842 i += read_size; 2843 offset += read_size; 2844 } 2845 2846 return 0; 2847 } 2848 2849 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 2850 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 2851 .get_link = ethtool_op_get_link, 2852 .get_pauseparam = mlxsw_sp_port_get_pauseparam, 2853 .set_pauseparam = mlxsw_sp_port_set_pauseparam, 2854 .get_strings = mlxsw_sp_port_get_strings, 2855 .set_phys_id = mlxsw_sp_port_set_phys_id, 2856 .get_ethtool_stats = mlxsw_sp_port_get_stats, 2857 .get_sset_count = mlxsw_sp_port_get_sset_count, 2858 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings, 2859 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings, 2860 .flash_device = mlxsw_sp_flash_device, 2861 .get_module_info = mlxsw_sp_get_module_info, 2862 .get_module_eeprom = mlxsw_sp_get_module_eeprom, 2863 }; 2864 2865 static int 2866 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width) 2867 { 2868 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2869 u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width; 2870 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2871 u32 eth_proto_admin; 2872 2873 eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed); 2874 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 2875 eth_proto_admin, mlxsw_sp_port->link.autoneg); 2876 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2877 } 2878 2879 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 2880 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 2881 bool dwrr, u8 dwrr_weight) 2882 { 2883 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2884 char qeec_pl[MLXSW_REG_QEEC_LEN]; 2885 2886 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 2887 next_index); 2888 mlxsw_reg_qeec_de_set(qeec_pl, true); 2889 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 2890 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 2891 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 2892 } 2893 2894 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 2895 enum mlxsw_reg_qeec_hr hr, u8 index, 2896 u8 next_index, u32 maxrate) 2897 { 2898 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2899 char qeec_pl[MLXSW_REG_QEEC_LEN]; 2900 2901 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 2902 next_index); 2903 mlxsw_reg_qeec_mase_set(qeec_pl, true); 2904 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 2905 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 2906 } 2907 2908 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port, 2909 enum mlxsw_reg_qeec_hr hr, u8 index, 2910 u8 next_index, u32 minrate) 2911 { 2912 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2913 char qeec_pl[MLXSW_REG_QEEC_LEN]; 2914 2915 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 2916 next_index); 2917 mlxsw_reg_qeec_mise_set(qeec_pl, true); 2918 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate); 2919 2920 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 2921 } 2922 2923 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 2924 u8 switch_prio, u8 tclass) 2925 { 2926 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2927 char qtct_pl[MLXSW_REG_QTCT_LEN]; 2928 2929 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 2930 tclass); 2931 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 2932 } 2933 2934 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 2935 { 2936 int err, i; 2937 2938 /* Setup the elements hierarcy, so that each TC is linked to 2939 * one subgroup, which are all member in the same group. 2940 */ 2941 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2942 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false, 2943 0); 2944 if (err) 2945 return err; 2946 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2947 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2948 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i, 2949 0, false, 0); 2950 if (err) 2951 return err; 2952 } 2953 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2954 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2955 MLXSW_REG_QEEC_HIERARCY_TC, i, i, 2956 false, 0); 2957 if (err) 2958 return err; 2959 2960 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2961 MLXSW_REG_QEEC_HIERARCY_TC, 2962 i + 8, i, 2963 false, 0); 2964 if (err) 2965 return err; 2966 } 2967 2968 /* Make sure the max shaper is disabled in all hierarchies that 2969 * support it. 2970 */ 2971 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2972 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0, 2973 MLXSW_REG_QEEC_MAS_DIS); 2974 if (err) 2975 return err; 2976 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2977 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2978 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, 2979 i, 0, 2980 MLXSW_REG_QEEC_MAS_DIS); 2981 if (err) 2982 return err; 2983 } 2984 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2985 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2986 MLXSW_REG_QEEC_HIERARCY_TC, 2987 i, i, 2988 MLXSW_REG_QEEC_MAS_DIS); 2989 if (err) 2990 return err; 2991 2992 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2993 MLXSW_REG_QEEC_HIERARCY_TC, 2994 i + 8, i, 2995 MLXSW_REG_QEEC_MAS_DIS); 2996 if (err) 2997 return err; 2998 } 2999 3000 /* Configure the min shaper for multicast TCs. */ 3001 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3002 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port, 3003 MLXSW_REG_QEEC_HIERARCY_TC, 3004 i + 8, i, 3005 MLXSW_REG_QEEC_MIS_MIN); 3006 if (err) 3007 return err; 3008 } 3009 3010 /* Map all priorities to traffic class 0. */ 3011 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3012 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 3013 if (err) 3014 return err; 3015 } 3016 3017 return 0; 3018 } 3019 3020 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 3021 bool enable) 3022 { 3023 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3024 char qtctm_pl[MLXSW_REG_QTCTM_LEN]; 3025 3026 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable); 3027 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl); 3028 } 3029 3030 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 3031 bool split, u8 module, u8 width, u8 lane) 3032 { 3033 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 3034 struct mlxsw_sp_port *mlxsw_sp_port; 3035 struct net_device *dev; 3036 int err; 3037 3038 err = mlxsw_core_port_init(mlxsw_sp->core, local_port); 3039 if (err) { 3040 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 3041 local_port); 3042 return err; 3043 } 3044 3045 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 3046 if (!dev) { 3047 err = -ENOMEM; 3048 goto err_alloc_etherdev; 3049 } 3050 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 3051 mlxsw_sp_port = netdev_priv(dev); 3052 mlxsw_sp_port->dev = dev; 3053 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 3054 mlxsw_sp_port->local_port = local_port; 3055 mlxsw_sp_port->pvid = 1; 3056 mlxsw_sp_port->split = split; 3057 mlxsw_sp_port->mapping.module = module; 3058 mlxsw_sp_port->mapping.width = width; 3059 mlxsw_sp_port->mapping.lane = lane; 3060 mlxsw_sp_port->link.autoneg = 1; 3061 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 3062 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list); 3063 3064 mlxsw_sp_port->pcpu_stats = 3065 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 3066 if (!mlxsw_sp_port->pcpu_stats) { 3067 err = -ENOMEM; 3068 goto err_alloc_stats; 3069 } 3070 3071 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample), 3072 GFP_KERNEL); 3073 if (!mlxsw_sp_port->sample) { 3074 err = -ENOMEM; 3075 goto err_alloc_sample; 3076 } 3077 3078 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 3079 &update_stats_cache); 3080 3081 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 3082 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 3083 3084 err = mlxsw_sp_port_module_map(mlxsw_sp_port, module, width, lane); 3085 if (err) { 3086 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 3087 mlxsw_sp_port->local_port); 3088 goto err_port_module_map; 3089 } 3090 3091 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 3092 if (err) { 3093 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 3094 mlxsw_sp_port->local_port); 3095 goto err_port_swid_set; 3096 } 3097 3098 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 3099 if (err) { 3100 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 3101 mlxsw_sp_port->local_port); 3102 goto err_dev_addr_init; 3103 } 3104 3105 netif_carrier_off(dev); 3106 3107 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 3108 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 3109 dev->hw_features |= NETIF_F_HW_TC; 3110 3111 dev->min_mtu = 0; 3112 dev->max_mtu = ETH_MAX_MTU; 3113 3114 /* Each packet needs to have a Tx header (metadata) on top all other 3115 * headers. 3116 */ 3117 dev->needed_headroom = MLXSW_TXHDR_LEN; 3118 3119 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 3120 if (err) { 3121 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 3122 mlxsw_sp_port->local_port); 3123 goto err_port_system_port_mapping_set; 3124 } 3125 3126 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width); 3127 if (err) { 3128 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 3129 mlxsw_sp_port->local_port); 3130 goto err_port_speed_by_width_set; 3131 } 3132 3133 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 3134 if (err) { 3135 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 3136 mlxsw_sp_port->local_port); 3137 goto err_port_mtu_set; 3138 } 3139 3140 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3141 if (err) 3142 goto err_port_admin_status_set; 3143 3144 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 3145 if (err) { 3146 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 3147 mlxsw_sp_port->local_port); 3148 goto err_port_buffers_init; 3149 } 3150 3151 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 3152 if (err) { 3153 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 3154 mlxsw_sp_port->local_port); 3155 goto err_port_ets_init; 3156 } 3157 3158 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true); 3159 if (err) { 3160 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n", 3161 mlxsw_sp_port->local_port); 3162 goto err_port_tc_mc_mode; 3163 } 3164 3165 /* ETS and buffers must be initialized before DCB. */ 3166 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 3167 if (err) { 3168 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 3169 mlxsw_sp_port->local_port); 3170 goto err_port_dcb_init; 3171 } 3172 3173 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 3174 if (err) { 3175 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 3176 mlxsw_sp_port->local_port); 3177 goto err_port_fids_init; 3178 } 3179 3180 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); 3181 if (err) { 3182 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", 3183 mlxsw_sp_port->local_port); 3184 goto err_port_qdiscs_init; 3185 } 3186 3187 err = mlxsw_sp_port_nve_init(mlxsw_sp_port); 3188 if (err) { 3189 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n", 3190 mlxsw_sp_port->local_port); 3191 goto err_port_nve_init; 3192 } 3193 3194 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1); 3195 if (IS_ERR(mlxsw_sp_port_vlan)) { 3196 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 3197 mlxsw_sp_port->local_port); 3198 err = PTR_ERR(mlxsw_sp_port_vlan); 3199 goto err_port_vlan_get; 3200 } 3201 3202 mlxsw_sp_port_switchdev_init(mlxsw_sp_port); 3203 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 3204 err = register_netdev(dev); 3205 if (err) { 3206 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 3207 mlxsw_sp_port->local_port); 3208 goto err_register_netdev; 3209 } 3210 3211 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 3212 mlxsw_sp_port, dev, module + 1, 3213 mlxsw_sp_port->split, lane / width); 3214 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 3215 return 0; 3216 3217 err_register_netdev: 3218 mlxsw_sp->ports[local_port] = NULL; 3219 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 3220 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); 3221 err_port_vlan_get: 3222 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3223 err_port_nve_init: 3224 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3225 err_port_qdiscs_init: 3226 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3227 err_port_fids_init: 3228 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3229 err_port_dcb_init: 3230 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3231 err_port_tc_mc_mode: 3232 err_port_ets_init: 3233 err_port_buffers_init: 3234 err_port_admin_status_set: 3235 err_port_mtu_set: 3236 err_port_speed_by_width_set: 3237 err_port_system_port_mapping_set: 3238 err_dev_addr_init: 3239 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3240 err_port_swid_set: 3241 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3242 err_port_module_map: 3243 kfree(mlxsw_sp_port->sample); 3244 err_alloc_sample: 3245 free_percpu(mlxsw_sp_port->pcpu_stats); 3246 err_alloc_stats: 3247 free_netdev(dev); 3248 err_alloc_etherdev: 3249 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3250 return err; 3251 } 3252 3253 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3254 { 3255 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3256 3257 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 3258 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 3259 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 3260 mlxsw_sp->ports[local_port] = NULL; 3261 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 3262 mlxsw_sp_port_vlan_flush(mlxsw_sp_port); 3263 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3264 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3265 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3266 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3267 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3268 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3269 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3270 kfree(mlxsw_sp_port->sample); 3271 free_percpu(mlxsw_sp_port->pcpu_stats); 3272 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 3273 free_netdev(mlxsw_sp_port->dev); 3274 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3275 } 3276 3277 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3278 { 3279 return mlxsw_sp->ports[local_port] != NULL; 3280 } 3281 3282 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 3283 { 3284 int i; 3285 3286 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 3287 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3288 mlxsw_sp_port_remove(mlxsw_sp, i); 3289 kfree(mlxsw_sp->port_to_module); 3290 kfree(mlxsw_sp->ports); 3291 } 3292 3293 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 3294 { 3295 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 3296 u8 module, width, lane; 3297 size_t alloc_size; 3298 int i; 3299 int err; 3300 3301 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 3302 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 3303 if (!mlxsw_sp->ports) 3304 return -ENOMEM; 3305 3306 mlxsw_sp->port_to_module = kmalloc_array(max_ports, sizeof(int), 3307 GFP_KERNEL); 3308 if (!mlxsw_sp->port_to_module) { 3309 err = -ENOMEM; 3310 goto err_port_to_module_alloc; 3311 } 3312 3313 for (i = 1; i < max_ports; i++) { 3314 /* Mark as invalid */ 3315 mlxsw_sp->port_to_module[i] = -1; 3316 3317 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, 3318 &width, &lane); 3319 if (err) 3320 goto err_port_module_info_get; 3321 if (!width) 3322 continue; 3323 mlxsw_sp->port_to_module[i] = module; 3324 err = mlxsw_sp_port_create(mlxsw_sp, i, false, 3325 module, width, lane); 3326 if (err) 3327 goto err_port_create; 3328 } 3329 return 0; 3330 3331 err_port_create: 3332 err_port_module_info_get: 3333 for (i--; i >= 1; i--) 3334 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3335 mlxsw_sp_port_remove(mlxsw_sp, i); 3336 kfree(mlxsw_sp->port_to_module); 3337 err_port_to_module_alloc: 3338 kfree(mlxsw_sp->ports); 3339 return err; 3340 } 3341 3342 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port) 3343 { 3344 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX; 3345 3346 return local_port - offset; 3347 } 3348 3349 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 3350 u8 module, unsigned int count) 3351 { 3352 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; 3353 int err, i; 3354 3355 for (i = 0; i < count; i++) { 3356 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true, 3357 module, width, i * width); 3358 if (err) 3359 goto err_port_create; 3360 } 3361 3362 return 0; 3363 3364 err_port_create: 3365 for (i--; i >= 0; i--) 3366 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) 3367 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 3368 return err; 3369 } 3370 3371 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 3372 u8 base_port, unsigned int count) 3373 { 3374 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH; 3375 int i; 3376 3377 /* Split by four means we need to re-create two ports, otherwise 3378 * only one. 3379 */ 3380 count = count / 2; 3381 3382 for (i = 0; i < count; i++) { 3383 local_port = base_port + i * 2; 3384 if (mlxsw_sp->port_to_module[local_port] < 0) 3385 continue; 3386 module = mlxsw_sp->port_to_module[local_port]; 3387 3388 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module, 3389 width, 0); 3390 } 3391 } 3392 3393 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 3394 unsigned int count, 3395 struct netlink_ext_ack *extack) 3396 { 3397 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3398 struct mlxsw_sp_port *mlxsw_sp_port; 3399 u8 module, cur_width, base_port; 3400 int i; 3401 int err; 3402 3403 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3404 if (!mlxsw_sp_port) { 3405 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 3406 local_port); 3407 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 3408 return -EINVAL; 3409 } 3410 3411 module = mlxsw_sp_port->mapping.module; 3412 cur_width = mlxsw_sp_port->mapping.width; 3413 3414 if (count != 2 && count != 4) { 3415 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); 3416 NL_SET_ERR_MSG_MOD(extack, "Port can only be split into 2 or 4 ports"); 3417 return -EINVAL; 3418 } 3419 3420 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { 3421 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); 3422 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further"); 3423 return -EINVAL; 3424 } 3425 3426 /* Make sure we have enough slave (even) ports for the split. */ 3427 if (count == 2) { 3428 base_port = local_port; 3429 if (mlxsw_sp->ports[base_port + 1]) { 3430 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 3431 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 3432 return -EINVAL; 3433 } 3434 } else { 3435 base_port = mlxsw_sp_cluster_base_port_get(local_port); 3436 if (mlxsw_sp->ports[base_port + 1] || 3437 mlxsw_sp->ports[base_port + 3]) { 3438 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 3439 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 3440 return -EINVAL; 3441 } 3442 } 3443 3444 for (i = 0; i < count; i++) 3445 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) 3446 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 3447 3448 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count); 3449 if (err) { 3450 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 3451 goto err_port_split_create; 3452 } 3453 3454 return 0; 3455 3456 err_port_split_create: 3457 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 3458 return err; 3459 } 3460 3461 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, 3462 struct netlink_ext_ack *extack) 3463 { 3464 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3465 struct mlxsw_sp_port *mlxsw_sp_port; 3466 u8 cur_width, base_port; 3467 unsigned int count; 3468 int i; 3469 3470 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3471 if (!mlxsw_sp_port) { 3472 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 3473 local_port); 3474 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 3475 return -EINVAL; 3476 } 3477 3478 if (!mlxsw_sp_port->split) { 3479 netdev_err(mlxsw_sp_port->dev, "Port was not split\n"); 3480 NL_SET_ERR_MSG_MOD(extack, "Port was not split"); 3481 return -EINVAL; 3482 } 3483 3484 cur_width = mlxsw_sp_port->mapping.width; 3485 count = cur_width == 1 ? 4 : 2; 3486 3487 base_port = mlxsw_sp_cluster_base_port_get(local_port); 3488 3489 /* Determine which ports to remove. */ 3490 if (count == 2 && local_port >= base_port + 2) 3491 base_port = base_port + 2; 3492 3493 for (i = 0; i < count; i++) 3494 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) 3495 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 3496 3497 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 3498 3499 return 0; 3500 } 3501 3502 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 3503 char *pude_pl, void *priv) 3504 { 3505 struct mlxsw_sp *mlxsw_sp = priv; 3506 struct mlxsw_sp_port *mlxsw_sp_port; 3507 enum mlxsw_reg_pude_oper_status status; 3508 u8 local_port; 3509 3510 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 3511 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3512 if (!mlxsw_sp_port) 3513 return; 3514 3515 status = mlxsw_reg_pude_oper_status_get(pude_pl); 3516 if (status == MLXSW_PORT_OPER_STATUS_UP) { 3517 netdev_info(mlxsw_sp_port->dev, "link up\n"); 3518 netif_carrier_on(mlxsw_sp_port->dev); 3519 } else { 3520 netdev_info(mlxsw_sp_port->dev, "link down\n"); 3521 netif_carrier_off(mlxsw_sp_port->dev); 3522 } 3523 } 3524 3525 static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 3526 u8 local_port, void *priv) 3527 { 3528 struct mlxsw_sp *mlxsw_sp = priv; 3529 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3530 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 3531 3532 if (unlikely(!mlxsw_sp_port)) { 3533 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 3534 local_port); 3535 return; 3536 } 3537 3538 skb->dev = mlxsw_sp_port->dev; 3539 3540 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 3541 u64_stats_update_begin(&pcpu_stats->syncp); 3542 pcpu_stats->rx_packets++; 3543 pcpu_stats->rx_bytes += skb->len; 3544 u64_stats_update_end(&pcpu_stats->syncp); 3545 3546 skb->protocol = eth_type_trans(skb, skb->dev); 3547 netif_receive_skb(skb); 3548 } 3549 3550 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, 3551 void *priv) 3552 { 3553 skb->offload_fwd_mark = 1; 3554 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 3555 } 3556 3557 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, 3558 u8 local_port, void *priv) 3559 { 3560 skb->offload_l3_fwd_mark = 1; 3561 skb->offload_fwd_mark = 1; 3562 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 3563 } 3564 3565 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port, 3566 void *priv) 3567 { 3568 struct mlxsw_sp *mlxsw_sp = priv; 3569 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3570 struct psample_group *psample_group; 3571 u32 size; 3572 3573 if (unlikely(!mlxsw_sp_port)) { 3574 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n", 3575 local_port); 3576 goto out; 3577 } 3578 if (unlikely(!mlxsw_sp_port->sample)) { 3579 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n", 3580 local_port); 3581 goto out; 3582 } 3583 3584 size = mlxsw_sp_port->sample->truncate ? 3585 mlxsw_sp_port->sample->trunc_size : skb->len; 3586 3587 rcu_read_lock(); 3588 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group); 3589 if (!psample_group) 3590 goto out_unlock; 3591 psample_sample_packet(psample_group, skb, size, 3592 mlxsw_sp_port->dev->ifindex, 0, 3593 mlxsw_sp_port->sample->rate); 3594 out_unlock: 3595 rcu_read_unlock(); 3596 out: 3597 consume_skb(skb); 3598 } 3599 3600 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 3601 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 3602 _is_ctrl, SP_##_trap_group, DISCARD) 3603 3604 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 3605 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 3606 _is_ctrl, SP_##_trap_group, DISCARD) 3607 3608 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 3609 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \ 3610 _is_ctrl, SP_##_trap_group, DISCARD) 3611 3612 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 3613 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 3614 3615 static const struct mlxsw_listener mlxsw_sp_listener[] = { 3616 /* Events */ 3617 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 3618 /* L2 traps */ 3619 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true), 3620 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true), 3621 MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true), 3622 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false), 3623 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false), 3624 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false), 3625 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false), 3626 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false), 3627 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false), 3628 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false), 3629 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false), 3630 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false), 3631 MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD, 3632 false), 3633 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 3634 false), 3635 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD, 3636 false), 3637 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 3638 false), 3639 /* L3 traps */ 3640 MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3641 MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3642 MLXSW_SP_RXL_L3_MARK(LBERROR, MIRROR_TO_CPU, LBERROR, false), 3643 MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), 3644 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 3645 false), 3646 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false), 3647 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 3648 MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false), 3649 MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP, 3650 false), 3651 MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false), 3652 MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false), 3653 MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false), 3654 MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), 3655 MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false), 3656 MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false), 3657 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 3658 false), 3659 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 3660 false), 3661 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 3662 false), 3663 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 3664 false), 3665 MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false), 3666 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 3667 false), 3668 MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, HOST_MISS, false), 3669 MLXSW_SP_RXL_MARK(HOST_MISS_IPV6, TRAP_TO_CPU, HOST_MISS, false), 3670 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), 3671 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), 3672 MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3673 MLXSW_SP_RXL_MARK(DECAP_ECN0, TRAP_TO_CPU, ROUTER_EXP, false), 3674 MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), 3675 MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), 3676 /* PKT Sample trap */ 3677 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, 3678 false, SP_IP2ME, DISCARD), 3679 /* ACL trap */ 3680 MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false), 3681 /* Multicast Router Traps */ 3682 MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false), 3683 MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false), 3684 MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false), 3685 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 3686 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 3687 /* NVE traps */ 3688 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false), 3689 }; 3690 3691 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 3692 { 3693 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 3694 enum mlxsw_reg_qpcr_ir_units ir_units; 3695 int max_cpu_policers; 3696 bool is_bytes; 3697 u8 burst_size; 3698 u32 rate; 3699 int i, err; 3700 3701 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 3702 return -EIO; 3703 3704 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 3705 3706 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 3707 for (i = 0; i < max_cpu_policers; i++) { 3708 is_bytes = false; 3709 switch (i) { 3710 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 3711 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 3712 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 3713 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 3714 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 3715 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 3716 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: 3717 rate = 128; 3718 burst_size = 7; 3719 break; 3720 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 3721 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 3722 rate = 16 * 1024; 3723 burst_size = 10; 3724 break; 3725 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 3726 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 3727 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 3728 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 3729 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 3730 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 3731 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 3732 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 3733 rate = 1024; 3734 burst_size = 7; 3735 break; 3736 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 3737 rate = 4 * 1024; 3738 burst_size = 4; 3739 break; 3740 default: 3741 continue; 3742 } 3743 3744 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 3745 burst_size); 3746 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 3747 if (err) 3748 return err; 3749 } 3750 3751 return 0; 3752 } 3753 3754 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 3755 { 3756 char htgt_pl[MLXSW_REG_HTGT_LEN]; 3757 enum mlxsw_reg_htgt_trap_group i; 3758 int max_cpu_policers; 3759 int max_trap_groups; 3760 u8 priority, tc; 3761 u16 policer_id; 3762 int err; 3763 3764 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 3765 return -EIO; 3766 3767 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 3768 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 3769 3770 for (i = 0; i < max_trap_groups; i++) { 3771 policer_id = i; 3772 switch (i) { 3773 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 3774 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 3775 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 3776 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 3777 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 3778 priority = 5; 3779 tc = 5; 3780 break; 3781 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 3782 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 3783 priority = 4; 3784 tc = 4; 3785 break; 3786 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 3787 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 3788 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 3789 priority = 3; 3790 tc = 3; 3791 break; 3792 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 3793 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 3794 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 3795 priority = 2; 3796 tc = 2; 3797 break; 3798 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 3799 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 3800 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 3801 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 3802 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: 3803 priority = 1; 3804 tc = 1; 3805 break; 3806 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 3807 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 3808 tc = MLXSW_REG_HTGT_DEFAULT_TC; 3809 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 3810 break; 3811 default: 3812 continue; 3813 } 3814 3815 if (max_cpu_policers <= policer_id && 3816 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 3817 return -EIO; 3818 3819 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 3820 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 3821 if (err) 3822 return err; 3823 } 3824 3825 return 0; 3826 } 3827 3828 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 3829 { 3830 int i; 3831 int err; 3832 3833 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 3834 if (err) 3835 return err; 3836 3837 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 3838 if (err) 3839 return err; 3840 3841 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) { 3842 err = mlxsw_core_trap_register(mlxsw_sp->core, 3843 &mlxsw_sp_listener[i], 3844 mlxsw_sp); 3845 if (err) 3846 goto err_listener_register; 3847 3848 } 3849 return 0; 3850 3851 err_listener_register: 3852 for (i--; i >= 0; i--) { 3853 mlxsw_core_trap_unregister(mlxsw_sp->core, 3854 &mlxsw_sp_listener[i], 3855 mlxsw_sp); 3856 } 3857 return err; 3858 } 3859 3860 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 3861 { 3862 int i; 3863 3864 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) { 3865 mlxsw_core_trap_unregister(mlxsw_sp->core, 3866 &mlxsw_sp_listener[i], 3867 mlxsw_sp); 3868 } 3869 } 3870 3871 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 3872 { 3873 char slcr_pl[MLXSW_REG_SLCR_LEN]; 3874 u32 seed; 3875 int err; 3876 3877 get_random_bytes(&seed, sizeof(seed)); 3878 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 3879 MLXSW_REG_SLCR_LAG_HASH_DMAC | 3880 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 3881 MLXSW_REG_SLCR_LAG_HASH_VLANID | 3882 MLXSW_REG_SLCR_LAG_HASH_SIP | 3883 MLXSW_REG_SLCR_LAG_HASH_DIP | 3884 MLXSW_REG_SLCR_LAG_HASH_SPORT | 3885 MLXSW_REG_SLCR_LAG_HASH_DPORT | 3886 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed); 3887 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 3888 if (err) 3889 return err; 3890 3891 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 3892 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 3893 return -EIO; 3894 3895 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 3896 sizeof(struct mlxsw_sp_upper), 3897 GFP_KERNEL); 3898 if (!mlxsw_sp->lags) 3899 return -ENOMEM; 3900 3901 return 0; 3902 } 3903 3904 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 3905 { 3906 kfree(mlxsw_sp->lags); 3907 } 3908 3909 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) 3910 { 3911 char htgt_pl[MLXSW_REG_HTGT_LEN]; 3912 3913 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 3914 MLXSW_REG_HTGT_INVALID_POLICER, 3915 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 3916 MLXSW_REG_HTGT_DEFAULT_TC); 3917 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 3918 } 3919 3920 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 3921 unsigned long event, void *ptr); 3922 3923 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 3924 const struct mlxsw_bus_info *mlxsw_bus_info) 3925 { 3926 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3927 int err; 3928 3929 mlxsw_sp->core = mlxsw_core; 3930 mlxsw_sp->bus_info = mlxsw_bus_info; 3931 3932 err = mlxsw_sp_fw_rev_validate(mlxsw_sp); 3933 if (err) 3934 return err; 3935 3936 err = mlxsw_sp_base_mac_get(mlxsw_sp); 3937 if (err) { 3938 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 3939 return err; 3940 } 3941 3942 err = mlxsw_sp_kvdl_init(mlxsw_sp); 3943 if (err) { 3944 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 3945 return err; 3946 } 3947 3948 err = mlxsw_sp_fids_init(mlxsw_sp); 3949 if (err) { 3950 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 3951 goto err_fids_init; 3952 } 3953 3954 err = mlxsw_sp_traps_init(mlxsw_sp); 3955 if (err) { 3956 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 3957 goto err_traps_init; 3958 } 3959 3960 err = mlxsw_sp_buffers_init(mlxsw_sp); 3961 if (err) { 3962 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 3963 goto err_buffers_init; 3964 } 3965 3966 err = mlxsw_sp_lag_init(mlxsw_sp); 3967 if (err) { 3968 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 3969 goto err_lag_init; 3970 } 3971 3972 /* Initialize SPAN before router and switchdev, so that those components 3973 * can call mlxsw_sp_span_respin(). 3974 */ 3975 err = mlxsw_sp_span_init(mlxsw_sp); 3976 if (err) { 3977 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 3978 goto err_span_init; 3979 } 3980 3981 err = mlxsw_sp_switchdev_init(mlxsw_sp); 3982 if (err) { 3983 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 3984 goto err_switchdev_init; 3985 } 3986 3987 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 3988 if (err) { 3989 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 3990 goto err_counter_pool_init; 3991 } 3992 3993 err = mlxsw_sp_afa_init(mlxsw_sp); 3994 if (err) { 3995 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 3996 goto err_afa_init; 3997 } 3998 3999 err = mlxsw_sp_nve_init(mlxsw_sp); 4000 if (err) { 4001 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n"); 4002 goto err_nve_init; 4003 } 4004 4005 err = mlxsw_sp_router_init(mlxsw_sp); 4006 if (err) { 4007 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 4008 goto err_router_init; 4009 } 4010 4011 /* Initialize netdevice notifier after router and SPAN is initialized, 4012 * so that the event handler can use router structures and call SPAN 4013 * respin. 4014 */ 4015 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 4016 err = register_netdevice_notifier(&mlxsw_sp->netdevice_nb); 4017 if (err) { 4018 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 4019 goto err_netdev_notifier; 4020 } 4021 4022 err = mlxsw_sp_acl_init(mlxsw_sp); 4023 if (err) { 4024 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 4025 goto err_acl_init; 4026 } 4027 4028 err = mlxsw_sp_dpipe_init(mlxsw_sp); 4029 if (err) { 4030 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 4031 goto err_dpipe_init; 4032 } 4033 4034 err = mlxsw_sp_ports_create(mlxsw_sp); 4035 if (err) { 4036 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 4037 goto err_ports_create; 4038 } 4039 4040 return 0; 4041 4042 err_ports_create: 4043 mlxsw_sp_dpipe_fini(mlxsw_sp); 4044 err_dpipe_init: 4045 mlxsw_sp_acl_fini(mlxsw_sp); 4046 err_acl_init: 4047 unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); 4048 err_netdev_notifier: 4049 mlxsw_sp_router_fini(mlxsw_sp); 4050 err_router_init: 4051 mlxsw_sp_nve_fini(mlxsw_sp); 4052 err_nve_init: 4053 mlxsw_sp_afa_fini(mlxsw_sp); 4054 err_afa_init: 4055 mlxsw_sp_counter_pool_fini(mlxsw_sp); 4056 err_counter_pool_init: 4057 mlxsw_sp_switchdev_fini(mlxsw_sp); 4058 err_switchdev_init: 4059 mlxsw_sp_span_fini(mlxsw_sp); 4060 err_span_init: 4061 mlxsw_sp_lag_fini(mlxsw_sp); 4062 err_lag_init: 4063 mlxsw_sp_buffers_fini(mlxsw_sp); 4064 err_buffers_init: 4065 mlxsw_sp_traps_fini(mlxsw_sp); 4066 err_traps_init: 4067 mlxsw_sp_fids_fini(mlxsw_sp); 4068 err_fids_init: 4069 mlxsw_sp_kvdl_fini(mlxsw_sp); 4070 return err; 4071 } 4072 4073 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, 4074 const struct mlxsw_bus_info *mlxsw_bus_info) 4075 { 4076 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4077 4078 mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev; 4079 mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME; 4080 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; 4081 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; 4082 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; 4083 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; 4084 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; 4085 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; 4086 4087 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info); 4088 } 4089 4090 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, 4091 const struct mlxsw_bus_info *mlxsw_bus_info) 4092 { 4093 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4094 4095 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 4096 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 4097 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 4098 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 4099 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 4100 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 4101 4102 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info); 4103 } 4104 4105 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 4106 { 4107 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4108 4109 mlxsw_sp_ports_remove(mlxsw_sp); 4110 mlxsw_sp_dpipe_fini(mlxsw_sp); 4111 mlxsw_sp_acl_fini(mlxsw_sp); 4112 unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); 4113 mlxsw_sp_router_fini(mlxsw_sp); 4114 mlxsw_sp_nve_fini(mlxsw_sp); 4115 mlxsw_sp_afa_fini(mlxsw_sp); 4116 mlxsw_sp_counter_pool_fini(mlxsw_sp); 4117 mlxsw_sp_switchdev_fini(mlxsw_sp); 4118 mlxsw_sp_span_fini(mlxsw_sp); 4119 mlxsw_sp_lag_fini(mlxsw_sp); 4120 mlxsw_sp_buffers_fini(mlxsw_sp); 4121 mlxsw_sp_traps_fini(mlxsw_sp); 4122 mlxsw_sp_fids_fini(mlxsw_sp); 4123 mlxsw_sp_kvdl_fini(mlxsw_sp); 4124 } 4125 4126 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated 4127 * 802.1Q FIDs 4128 */ 4129 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \ 4130 VLAN_VID_MASK - 1) 4131 4132 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { 4133 .used_max_mid = 1, 4134 .max_mid = MLXSW_SP_MID_MAX, 4135 .used_flood_tables = 1, 4136 .used_flood_mode = 1, 4137 .flood_mode = 3, 4138 .max_fid_flood_tables = 3, 4139 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 4140 .used_max_ib_mc = 1, 4141 .max_ib_mc = 0, 4142 .used_max_pkey = 1, 4143 .max_pkey = 0, 4144 .used_kvd_sizes = 1, 4145 .kvd_hash_single_parts = 59, 4146 .kvd_hash_double_parts = 41, 4147 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 4148 .swid_config = { 4149 { 4150 .used_type = 1, 4151 .type = MLXSW_PORT_SWID_TYPE_ETH, 4152 } 4153 }, 4154 }; 4155 4156 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { 4157 .used_max_mid = 1, 4158 .max_mid = MLXSW_SP_MID_MAX, 4159 .used_flood_tables = 1, 4160 .used_flood_mode = 1, 4161 .flood_mode = 3, 4162 .max_fid_flood_tables = 3, 4163 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 4164 .used_max_ib_mc = 1, 4165 .max_ib_mc = 0, 4166 .used_max_pkey = 1, 4167 .max_pkey = 0, 4168 .swid_config = { 4169 { 4170 .used_type = 1, 4171 .type = MLXSW_PORT_SWID_TYPE_ETH, 4172 } 4173 }, 4174 }; 4175 4176 static void 4177 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, 4178 struct devlink_resource_size_params *kvd_size_params, 4179 struct devlink_resource_size_params *linear_size_params, 4180 struct devlink_resource_size_params *hash_double_size_params, 4181 struct devlink_resource_size_params *hash_single_size_params) 4182 { 4183 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 4184 KVD_SINGLE_MIN_SIZE); 4185 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 4186 KVD_DOUBLE_MIN_SIZE); 4187 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 4188 u32 linear_size_min = 0; 4189 4190 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, 4191 MLXSW_SP_KVD_GRANULARITY, 4192 DEVLINK_RESOURCE_UNIT_ENTRY); 4193 devlink_resource_size_params_init(linear_size_params, linear_size_min, 4194 kvd_size - single_size_min - 4195 double_size_min, 4196 MLXSW_SP_KVD_GRANULARITY, 4197 DEVLINK_RESOURCE_UNIT_ENTRY); 4198 devlink_resource_size_params_init(hash_double_size_params, 4199 double_size_min, 4200 kvd_size - single_size_min - 4201 linear_size_min, 4202 MLXSW_SP_KVD_GRANULARITY, 4203 DEVLINK_RESOURCE_UNIT_ENTRY); 4204 devlink_resource_size_params_init(hash_single_size_params, 4205 single_size_min, 4206 kvd_size - double_size_min - 4207 linear_size_min, 4208 MLXSW_SP_KVD_GRANULARITY, 4209 DEVLINK_RESOURCE_UNIT_ENTRY); 4210 } 4211 4212 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) 4213 { 4214 struct devlink *devlink = priv_to_devlink(mlxsw_core); 4215 struct devlink_resource_size_params hash_single_size_params; 4216 struct devlink_resource_size_params hash_double_size_params; 4217 struct devlink_resource_size_params linear_size_params; 4218 struct devlink_resource_size_params kvd_size_params; 4219 u32 kvd_size, single_size, double_size, linear_size; 4220 const struct mlxsw_config_profile *profile; 4221 int err; 4222 4223 profile = &mlxsw_sp1_config_profile; 4224 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 4225 return -EIO; 4226 4227 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, 4228 &linear_size_params, 4229 &hash_double_size_params, 4230 &hash_single_size_params); 4231 4232 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 4233 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 4234 kvd_size, MLXSW_SP_RESOURCE_KVD, 4235 DEVLINK_RESOURCE_ID_PARENT_TOP, 4236 &kvd_size_params); 4237 if (err) 4238 return err; 4239 4240 linear_size = profile->kvd_linear_size; 4241 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, 4242 linear_size, 4243 MLXSW_SP_RESOURCE_KVD_LINEAR, 4244 MLXSW_SP_RESOURCE_KVD, 4245 &linear_size_params); 4246 if (err) 4247 return err; 4248 4249 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); 4250 if (err) 4251 return err; 4252 4253 double_size = kvd_size - linear_size; 4254 double_size *= profile->kvd_hash_double_parts; 4255 double_size /= profile->kvd_hash_double_parts + 4256 profile->kvd_hash_single_parts; 4257 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY); 4258 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, 4259 double_size, 4260 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 4261 MLXSW_SP_RESOURCE_KVD, 4262 &hash_double_size_params); 4263 if (err) 4264 return err; 4265 4266 single_size = kvd_size - double_size - linear_size; 4267 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, 4268 single_size, 4269 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 4270 MLXSW_SP_RESOURCE_KVD, 4271 &hash_single_size_params); 4272 if (err) 4273 return err; 4274 4275 return 0; 4276 } 4277 4278 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) 4279 { 4280 return mlxsw_sp1_resources_kvd_register(mlxsw_core); 4281 } 4282 4283 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) 4284 { 4285 return 0; 4286 } 4287 4288 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 4289 const struct mlxsw_config_profile *profile, 4290 u64 *p_single_size, u64 *p_double_size, 4291 u64 *p_linear_size) 4292 { 4293 struct devlink *devlink = priv_to_devlink(mlxsw_core); 4294 u32 double_size; 4295 int err; 4296 4297 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 4298 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) 4299 return -EIO; 4300 4301 /* The hash part is what left of the kvd without the 4302 * linear part. It is split to the single size and 4303 * double size by the parts ratio from the profile. 4304 * Both sizes must be a multiplications of the 4305 * granularity from the profile. In case the user 4306 * provided the sizes they are obtained via devlink. 4307 */ 4308 err = devlink_resource_size_get(devlink, 4309 MLXSW_SP_RESOURCE_KVD_LINEAR, 4310 p_linear_size); 4311 if (err) 4312 *p_linear_size = profile->kvd_linear_size; 4313 4314 err = devlink_resource_size_get(devlink, 4315 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 4316 p_double_size); 4317 if (err) { 4318 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 4319 *p_linear_size; 4320 double_size *= profile->kvd_hash_double_parts; 4321 double_size /= profile->kvd_hash_double_parts + 4322 profile->kvd_hash_single_parts; 4323 *p_double_size = rounddown(double_size, 4324 MLXSW_SP_KVD_GRANULARITY); 4325 } 4326 4327 err = devlink_resource_size_get(devlink, 4328 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 4329 p_single_size); 4330 if (err) 4331 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 4332 *p_double_size - *p_linear_size; 4333 4334 /* Check results are legal. */ 4335 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 4336 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || 4337 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size) 4338 return -EIO; 4339 4340 return 0; 4341 } 4342 4343 static int 4344 mlxsw_sp_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id, 4345 union devlink_param_value val, 4346 struct netlink_ext_ack *extack) 4347 { 4348 if ((val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER) && 4349 (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)) { 4350 NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'"); 4351 return -EINVAL; 4352 } 4353 4354 return 0; 4355 } 4356 4357 static const struct devlink_param mlxsw_sp_devlink_params[] = { 4358 DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY, 4359 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 4360 NULL, NULL, 4361 mlxsw_sp_devlink_param_fw_load_policy_validate), 4362 }; 4363 4364 static int mlxsw_sp_params_register(struct mlxsw_core *mlxsw_core) 4365 { 4366 struct devlink *devlink = priv_to_devlink(mlxsw_core); 4367 union devlink_param_value value; 4368 int err; 4369 4370 err = devlink_params_register(devlink, mlxsw_sp_devlink_params, 4371 ARRAY_SIZE(mlxsw_sp_devlink_params)); 4372 if (err) 4373 return err; 4374 4375 value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER; 4376 devlink_param_driverinit_value_set(devlink, 4377 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 4378 value); 4379 return 0; 4380 } 4381 4382 static void mlxsw_sp_params_unregister(struct mlxsw_core *mlxsw_core) 4383 { 4384 devlink_params_unregister(priv_to_devlink(mlxsw_core), 4385 mlxsw_sp_devlink_params, 4386 ARRAY_SIZE(mlxsw_sp_devlink_params)); 4387 } 4388 4389 static struct mlxsw_driver mlxsw_sp1_driver = { 4390 .kind = mlxsw_sp1_driver_name, 4391 .priv_size = sizeof(struct mlxsw_sp), 4392 .init = mlxsw_sp1_init, 4393 .fini = mlxsw_sp_fini, 4394 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 4395 .port_split = mlxsw_sp_port_split, 4396 .port_unsplit = mlxsw_sp_port_unsplit, 4397 .sb_pool_get = mlxsw_sp_sb_pool_get, 4398 .sb_pool_set = mlxsw_sp_sb_pool_set, 4399 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 4400 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 4401 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 4402 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 4403 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 4404 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 4405 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 4406 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 4407 .txhdr_construct = mlxsw_sp_txhdr_construct, 4408 .resources_register = mlxsw_sp1_resources_register, 4409 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, 4410 .params_register = mlxsw_sp_params_register, 4411 .params_unregister = mlxsw_sp_params_unregister, 4412 .txhdr_len = MLXSW_TXHDR_LEN, 4413 .profile = &mlxsw_sp1_config_profile, 4414 .res_query_enabled = true, 4415 }; 4416 4417 static struct mlxsw_driver mlxsw_sp2_driver = { 4418 .kind = mlxsw_sp2_driver_name, 4419 .priv_size = sizeof(struct mlxsw_sp), 4420 .init = mlxsw_sp2_init, 4421 .fini = mlxsw_sp_fini, 4422 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 4423 .port_split = mlxsw_sp_port_split, 4424 .port_unsplit = mlxsw_sp_port_unsplit, 4425 .sb_pool_get = mlxsw_sp_sb_pool_get, 4426 .sb_pool_set = mlxsw_sp_sb_pool_set, 4427 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 4428 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 4429 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 4430 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 4431 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 4432 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 4433 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 4434 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 4435 .txhdr_construct = mlxsw_sp_txhdr_construct, 4436 .resources_register = mlxsw_sp2_resources_register, 4437 .params_register = mlxsw_sp_params_register, 4438 .params_unregister = mlxsw_sp_params_unregister, 4439 .txhdr_len = MLXSW_TXHDR_LEN, 4440 .profile = &mlxsw_sp2_config_profile, 4441 .res_query_enabled = true, 4442 }; 4443 4444 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 4445 { 4446 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 4447 } 4448 4449 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data) 4450 { 4451 struct mlxsw_sp_port **p_mlxsw_sp_port = data; 4452 int ret = 0; 4453 4454 if (mlxsw_sp_port_dev_check(lower_dev)) { 4455 *p_mlxsw_sp_port = netdev_priv(lower_dev); 4456 ret = 1; 4457 } 4458 4459 return ret; 4460 } 4461 4462 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 4463 { 4464 struct mlxsw_sp_port *mlxsw_sp_port; 4465 4466 if (mlxsw_sp_port_dev_check(dev)) 4467 return netdev_priv(dev); 4468 4469 mlxsw_sp_port = NULL; 4470 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port); 4471 4472 return mlxsw_sp_port; 4473 } 4474 4475 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 4476 { 4477 struct mlxsw_sp_port *mlxsw_sp_port; 4478 4479 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 4480 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 4481 } 4482 4483 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 4484 { 4485 struct mlxsw_sp_port *mlxsw_sp_port; 4486 4487 if (mlxsw_sp_port_dev_check(dev)) 4488 return netdev_priv(dev); 4489 4490 mlxsw_sp_port = NULL; 4491 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 4492 &mlxsw_sp_port); 4493 4494 return mlxsw_sp_port; 4495 } 4496 4497 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 4498 { 4499 struct mlxsw_sp_port *mlxsw_sp_port; 4500 4501 rcu_read_lock(); 4502 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 4503 if (mlxsw_sp_port) 4504 dev_hold(mlxsw_sp_port->dev); 4505 rcu_read_unlock(); 4506 return mlxsw_sp_port; 4507 } 4508 4509 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 4510 { 4511 dev_put(mlxsw_sp_port->dev); 4512 } 4513 4514 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 4515 { 4516 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4517 4518 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 4519 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4520 } 4521 4522 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 4523 { 4524 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4525 4526 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 4527 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4528 } 4529 4530 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 4531 u16 lag_id, u8 port_index) 4532 { 4533 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4534 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4535 4536 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 4537 lag_id, port_index); 4538 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4539 } 4540 4541 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 4542 u16 lag_id) 4543 { 4544 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4545 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4546 4547 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 4548 lag_id); 4549 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4550 } 4551 4552 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 4553 u16 lag_id) 4554 { 4555 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4556 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4557 4558 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 4559 lag_id); 4560 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4561 } 4562 4563 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 4564 u16 lag_id) 4565 { 4566 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4567 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4568 4569 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 4570 lag_id); 4571 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4572 } 4573 4574 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 4575 struct net_device *lag_dev, 4576 u16 *p_lag_id) 4577 { 4578 struct mlxsw_sp_upper *lag; 4579 int free_lag_id = -1; 4580 u64 max_lag; 4581 int i; 4582 4583 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 4584 for (i = 0; i < max_lag; i++) { 4585 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 4586 if (lag->ref_count) { 4587 if (lag->dev == lag_dev) { 4588 *p_lag_id = i; 4589 return 0; 4590 } 4591 } else if (free_lag_id < 0) { 4592 free_lag_id = i; 4593 } 4594 } 4595 if (free_lag_id < 0) 4596 return -EBUSY; 4597 *p_lag_id = free_lag_id; 4598 return 0; 4599 } 4600 4601 static bool 4602 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 4603 struct net_device *lag_dev, 4604 struct netdev_lag_upper_info *lag_upper_info, 4605 struct netlink_ext_ack *extack) 4606 { 4607 u16 lag_id; 4608 4609 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 4610 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices"); 4611 return false; 4612 } 4613 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 4614 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 4615 return false; 4616 } 4617 return true; 4618 } 4619 4620 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 4621 u16 lag_id, u8 *p_port_index) 4622 { 4623 u64 max_lag_members; 4624 int i; 4625 4626 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 4627 MAX_LAG_MEMBERS); 4628 for (i = 0; i < max_lag_members; i++) { 4629 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 4630 *p_port_index = i; 4631 return 0; 4632 } 4633 } 4634 return -EBUSY; 4635 } 4636 4637 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 4638 struct net_device *lag_dev) 4639 { 4640 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4641 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 4642 struct mlxsw_sp_upper *lag; 4643 u16 lag_id; 4644 u8 port_index; 4645 int err; 4646 4647 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 4648 if (err) 4649 return err; 4650 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 4651 if (!lag->ref_count) { 4652 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 4653 if (err) 4654 return err; 4655 lag->dev = lag_dev; 4656 } 4657 4658 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 4659 if (err) 4660 return err; 4661 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 4662 if (err) 4663 goto err_col_port_add; 4664 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id); 4665 if (err) 4666 goto err_col_port_enable; 4667 4668 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 4669 mlxsw_sp_port->local_port); 4670 mlxsw_sp_port->lag_id = lag_id; 4671 mlxsw_sp_port->lagged = 1; 4672 lag->ref_count++; 4673 4674 /* Port is no longer usable as a router interface */ 4675 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1); 4676 if (mlxsw_sp_port_vlan->fid) 4677 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 4678 4679 return 0; 4680 4681 err_col_port_enable: 4682 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4683 err_col_port_add: 4684 if (!lag->ref_count) 4685 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 4686 return err; 4687 } 4688 4689 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 4690 struct net_device *lag_dev) 4691 { 4692 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4693 u16 lag_id = mlxsw_sp_port->lag_id; 4694 struct mlxsw_sp_upper *lag; 4695 4696 if (!mlxsw_sp_port->lagged) 4697 return; 4698 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 4699 WARN_ON(lag->ref_count == 0); 4700 4701 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id); 4702 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4703 4704 /* Any VLANs configured on the port are no longer valid */ 4705 mlxsw_sp_port_vlan_flush(mlxsw_sp_port); 4706 4707 if (lag->ref_count == 1) 4708 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 4709 4710 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 4711 mlxsw_sp_port->local_port); 4712 mlxsw_sp_port->lagged = 0; 4713 lag->ref_count--; 4714 4715 mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1); 4716 /* Make sure untagged frames are allowed to ingress */ 4717 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1); 4718 } 4719 4720 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 4721 u16 lag_id) 4722 { 4723 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4724 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4725 4726 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 4727 mlxsw_sp_port->local_port); 4728 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4729 } 4730 4731 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 4732 u16 lag_id) 4733 { 4734 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4735 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4736 4737 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 4738 mlxsw_sp_port->local_port); 4739 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4740 } 4741 4742 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port, 4743 bool lag_tx_enabled) 4744 { 4745 if (lag_tx_enabled) 4746 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, 4747 mlxsw_sp_port->lag_id); 4748 else 4749 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 4750 mlxsw_sp_port->lag_id); 4751 } 4752 4753 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 4754 struct netdev_lag_lower_state_info *info) 4755 { 4756 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled); 4757 } 4758 4759 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 4760 bool enable) 4761 { 4762 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4763 enum mlxsw_reg_spms_state spms_state; 4764 char *spms_pl; 4765 u16 vid; 4766 int err; 4767 4768 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 4769 MLXSW_REG_SPMS_STATE_DISCARDING; 4770 4771 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 4772 if (!spms_pl) 4773 return -ENOMEM; 4774 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 4775 4776 for (vid = 0; vid < VLAN_N_VID; vid++) 4777 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 4778 4779 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 4780 kfree(spms_pl); 4781 return err; 4782 } 4783 4784 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 4785 { 4786 u16 vid = 1; 4787 int err; 4788 4789 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 4790 if (err) 4791 return err; 4792 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 4793 if (err) 4794 goto err_port_stp_set; 4795 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1, 4796 true, false); 4797 if (err) 4798 goto err_port_vlan_set; 4799 4800 for (; vid <= VLAN_N_VID - 1; vid++) { 4801 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4802 vid, false); 4803 if (err) 4804 goto err_vid_learning_set; 4805 } 4806 4807 return 0; 4808 4809 err_vid_learning_set: 4810 for (vid--; vid >= 1; vid--) 4811 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 4812 err_port_vlan_set: 4813 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4814 err_port_stp_set: 4815 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4816 return err; 4817 } 4818 4819 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 4820 { 4821 u16 vid; 4822 4823 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 4824 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4825 vid, true); 4826 4827 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1, 4828 false, false); 4829 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4830 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4831 } 4832 4833 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev) 4834 { 4835 unsigned int num_vxlans = 0; 4836 struct net_device *dev; 4837 struct list_head *iter; 4838 4839 netdev_for_each_lower_dev(br_dev, dev, iter) { 4840 if (netif_is_vxlan(dev)) 4841 num_vxlans++; 4842 } 4843 4844 return num_vxlans > 1; 4845 } 4846 4847 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev) 4848 { 4849 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0}; 4850 struct net_device *dev; 4851 struct list_head *iter; 4852 4853 netdev_for_each_lower_dev(br_dev, dev, iter) { 4854 u16 pvid; 4855 int err; 4856 4857 if (!netif_is_vxlan(dev)) 4858 continue; 4859 4860 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid); 4861 if (err || !pvid) 4862 continue; 4863 4864 if (test_and_set_bit(pvid, vlans)) 4865 return false; 4866 } 4867 4868 return true; 4869 } 4870 4871 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev, 4872 struct netlink_ext_ack *extack) 4873 { 4874 if (br_multicast_enabled(br_dev)) { 4875 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device"); 4876 return false; 4877 } 4878 4879 if (!br_vlan_enabled(br_dev) && 4880 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) { 4881 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge"); 4882 return false; 4883 } 4884 4885 if (br_vlan_enabled(br_dev) && 4886 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) { 4887 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged"); 4888 return false; 4889 } 4890 4891 return true; 4892 } 4893 4894 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 4895 struct net_device *dev, 4896 unsigned long event, void *ptr) 4897 { 4898 struct netdev_notifier_changeupper_info *info; 4899 struct mlxsw_sp_port *mlxsw_sp_port; 4900 struct netlink_ext_ack *extack; 4901 struct net_device *upper_dev; 4902 struct mlxsw_sp *mlxsw_sp; 4903 int err = 0; 4904 4905 mlxsw_sp_port = netdev_priv(dev); 4906 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4907 info = ptr; 4908 extack = netdev_notifier_info_to_extack(&info->info); 4909 4910 switch (event) { 4911 case NETDEV_PRECHANGEUPPER: 4912 upper_dev = info->upper_dev; 4913 if (!is_vlan_dev(upper_dev) && 4914 !netif_is_lag_master(upper_dev) && 4915 !netif_is_bridge_master(upper_dev) && 4916 !netif_is_ovs_master(upper_dev) && 4917 !netif_is_macvlan(upper_dev)) { 4918 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4919 return -EINVAL; 4920 } 4921 if (!info->linking) 4922 break; 4923 if (netif_is_bridge_master(upper_dev) && 4924 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 4925 mlxsw_sp_bridge_has_vxlan(upper_dev) && 4926 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 4927 return -EOPNOTSUPP; 4928 if (netdev_has_any_upper_dev(upper_dev) && 4929 (!netif_is_bridge_master(upper_dev) || 4930 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 4931 upper_dev))) { 4932 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 4933 return -EINVAL; 4934 } 4935 if (netif_is_lag_master(upper_dev) && 4936 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 4937 info->upper_info, extack)) 4938 return -EINVAL; 4939 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 4940 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); 4941 return -EINVAL; 4942 } 4943 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 4944 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 4945 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); 4946 return -EINVAL; 4947 } 4948 if (netif_is_macvlan(upper_dev) && 4949 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, lower_dev)) { 4950 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4951 return -EOPNOTSUPP; 4952 } 4953 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 4954 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); 4955 return -EINVAL; 4956 } 4957 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 4958 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 4959 return -EINVAL; 4960 } 4961 if (is_vlan_dev(upper_dev) && 4962 vlan_dev_vlan_id(upper_dev) == 1) { 4963 NL_SET_ERR_MSG_MOD(extack, "Creating a VLAN device with VID 1 is unsupported: VLAN 1 carries untagged traffic"); 4964 return -EINVAL; 4965 } 4966 break; 4967 case NETDEV_CHANGEUPPER: 4968 upper_dev = info->upper_dev; 4969 if (netif_is_bridge_master(upper_dev)) { 4970 if (info->linking) 4971 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4972 lower_dev, 4973 upper_dev, 4974 extack); 4975 else 4976 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4977 lower_dev, 4978 upper_dev); 4979 } else if (netif_is_lag_master(upper_dev)) { 4980 if (info->linking) 4981 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 4982 upper_dev); 4983 else 4984 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 4985 upper_dev); 4986 } else if (netif_is_ovs_master(upper_dev)) { 4987 if (info->linking) 4988 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 4989 else 4990 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 4991 } else if (netif_is_macvlan(upper_dev)) { 4992 if (!info->linking) 4993 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4994 } 4995 break; 4996 } 4997 4998 return err; 4999 } 5000 5001 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 5002 unsigned long event, void *ptr) 5003 { 5004 struct netdev_notifier_changelowerstate_info *info; 5005 struct mlxsw_sp_port *mlxsw_sp_port; 5006 int err; 5007 5008 mlxsw_sp_port = netdev_priv(dev); 5009 info = ptr; 5010 5011 switch (event) { 5012 case NETDEV_CHANGELOWERSTATE: 5013 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 5014 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 5015 info->lower_state_info); 5016 if (err) 5017 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 5018 } 5019 break; 5020 } 5021 5022 return 0; 5023 } 5024 5025 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 5026 struct net_device *port_dev, 5027 unsigned long event, void *ptr) 5028 { 5029 switch (event) { 5030 case NETDEV_PRECHANGEUPPER: 5031 case NETDEV_CHANGEUPPER: 5032 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 5033 event, ptr); 5034 case NETDEV_CHANGELOWERSTATE: 5035 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 5036 ptr); 5037 } 5038 5039 return 0; 5040 } 5041 5042 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 5043 unsigned long event, void *ptr) 5044 { 5045 struct net_device *dev; 5046 struct list_head *iter; 5047 int ret; 5048 5049 netdev_for_each_lower_dev(lag_dev, dev, iter) { 5050 if (mlxsw_sp_port_dev_check(dev)) { 5051 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 5052 ptr); 5053 if (ret) 5054 return ret; 5055 } 5056 } 5057 5058 return 0; 5059 } 5060 5061 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 5062 struct net_device *dev, 5063 unsigned long event, void *ptr, 5064 u16 vid) 5065 { 5066 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 5067 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5068 struct netdev_notifier_changeupper_info *info = ptr; 5069 struct netlink_ext_ack *extack; 5070 struct net_device *upper_dev; 5071 int err = 0; 5072 5073 extack = netdev_notifier_info_to_extack(&info->info); 5074 5075 switch (event) { 5076 case NETDEV_PRECHANGEUPPER: 5077 upper_dev = info->upper_dev; 5078 if (!netif_is_bridge_master(upper_dev) && 5079 !netif_is_macvlan(upper_dev)) { 5080 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5081 return -EINVAL; 5082 } 5083 if (!info->linking) 5084 break; 5085 if (netif_is_bridge_master(upper_dev) && 5086 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 5087 mlxsw_sp_bridge_has_vxlan(upper_dev) && 5088 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 5089 return -EOPNOTSUPP; 5090 if (netdev_has_any_upper_dev(upper_dev) && 5091 (!netif_is_bridge_master(upper_dev) || 5092 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 5093 upper_dev))) { 5094 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 5095 return -EINVAL; 5096 } 5097 if (netif_is_macvlan(upper_dev) && 5098 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) { 5099 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 5100 return -EOPNOTSUPP; 5101 } 5102 break; 5103 case NETDEV_CHANGEUPPER: 5104 upper_dev = info->upper_dev; 5105 if (netif_is_bridge_master(upper_dev)) { 5106 if (info->linking) 5107 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 5108 vlan_dev, 5109 upper_dev, 5110 extack); 5111 else 5112 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 5113 vlan_dev, 5114 upper_dev); 5115 } else if (netif_is_macvlan(upper_dev)) { 5116 if (!info->linking) 5117 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 5118 } else { 5119 err = -EINVAL; 5120 WARN_ON(1); 5121 } 5122 break; 5123 } 5124 5125 return err; 5126 } 5127 5128 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 5129 struct net_device *lag_dev, 5130 unsigned long event, 5131 void *ptr, u16 vid) 5132 { 5133 struct net_device *dev; 5134 struct list_head *iter; 5135 int ret; 5136 5137 netdev_for_each_lower_dev(lag_dev, dev, iter) { 5138 if (mlxsw_sp_port_dev_check(dev)) { 5139 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 5140 event, ptr, 5141 vid); 5142 if (ret) 5143 return ret; 5144 } 5145 } 5146 5147 return 0; 5148 } 5149 5150 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 5151 unsigned long event, void *ptr) 5152 { 5153 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 5154 u16 vid = vlan_dev_vlan_id(vlan_dev); 5155 5156 if (mlxsw_sp_port_dev_check(real_dev)) 5157 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 5158 event, ptr, vid); 5159 else if (netif_is_lag_master(real_dev)) 5160 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 5161 real_dev, event, 5162 ptr, vid); 5163 5164 return 0; 5165 } 5166 5167 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, 5168 unsigned long event, void *ptr) 5169 { 5170 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev); 5171 struct netdev_notifier_changeupper_info *info = ptr; 5172 struct netlink_ext_ack *extack; 5173 struct net_device *upper_dev; 5174 5175 if (!mlxsw_sp) 5176 return 0; 5177 5178 extack = netdev_notifier_info_to_extack(&info->info); 5179 5180 switch (event) { 5181 case NETDEV_PRECHANGEUPPER: 5182 upper_dev = info->upper_dev; 5183 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) { 5184 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5185 return -EOPNOTSUPP; 5186 } 5187 if (!info->linking) 5188 break; 5189 if (netif_is_macvlan(upper_dev) && 5190 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev)) { 5191 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 5192 return -EOPNOTSUPP; 5193 } 5194 break; 5195 case NETDEV_CHANGEUPPER: 5196 upper_dev = info->upper_dev; 5197 if (info->linking) 5198 break; 5199 if (is_vlan_dev(upper_dev)) 5200 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev); 5201 if (netif_is_macvlan(upper_dev)) 5202 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 5203 break; 5204 } 5205 5206 return 0; 5207 } 5208 5209 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, 5210 unsigned long event, void *ptr) 5211 { 5212 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); 5213 struct netdev_notifier_changeupper_info *info = ptr; 5214 struct netlink_ext_ack *extack; 5215 5216 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) 5217 return 0; 5218 5219 extack = netdev_notifier_info_to_extack(&info->info); 5220 5221 /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */ 5222 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5223 5224 return -EOPNOTSUPP; 5225 } 5226 5227 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) 5228 { 5229 struct netdev_notifier_changeupper_info *info = ptr; 5230 5231 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) 5232 return false; 5233 return netif_is_l3_master(info->upper_dev); 5234 } 5235 5236 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, 5237 struct net_device *dev, 5238 unsigned long event, void *ptr) 5239 { 5240 struct netdev_notifier_changeupper_info *cu_info; 5241 struct netdev_notifier_info *info = ptr; 5242 struct netlink_ext_ack *extack; 5243 struct net_device *upper_dev; 5244 5245 extack = netdev_notifier_info_to_extack(info); 5246 5247 switch (event) { 5248 case NETDEV_CHANGEUPPER: 5249 cu_info = container_of(info, 5250 struct netdev_notifier_changeupper_info, 5251 info); 5252 upper_dev = cu_info->upper_dev; 5253 if (!netif_is_bridge_master(upper_dev)) 5254 return 0; 5255 if (!mlxsw_sp_lower_get(upper_dev)) 5256 return 0; 5257 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 5258 return -EOPNOTSUPP; 5259 if (cu_info->linking) { 5260 if (!netif_running(dev)) 5261 return 0; 5262 /* When the bridge is VLAN-aware, the VNI of the VxLAN 5263 * device needs to be mapped to a VLAN, but at this 5264 * point no VLANs are configured on the VxLAN device 5265 */ 5266 if (br_vlan_enabled(upper_dev)) 5267 return 0; 5268 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, 5269 dev, 0, extack); 5270 } else { 5271 /* VLANs were already flushed, which triggered the 5272 * necessary cleanup 5273 */ 5274 if (br_vlan_enabled(upper_dev)) 5275 return 0; 5276 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 5277 } 5278 break; 5279 case NETDEV_PRE_UP: 5280 upper_dev = netdev_master_upper_dev_get(dev); 5281 if (!upper_dev) 5282 return 0; 5283 if (!netif_is_bridge_master(upper_dev)) 5284 return 0; 5285 if (!mlxsw_sp_lower_get(upper_dev)) 5286 return 0; 5287 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0, 5288 extack); 5289 case NETDEV_DOWN: 5290 upper_dev = netdev_master_upper_dev_get(dev); 5291 if (!upper_dev) 5292 return 0; 5293 if (!netif_is_bridge_master(upper_dev)) 5294 return 0; 5295 if (!mlxsw_sp_lower_get(upper_dev)) 5296 return 0; 5297 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 5298 break; 5299 } 5300 5301 return 0; 5302 } 5303 5304 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 5305 unsigned long event, void *ptr) 5306 { 5307 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 5308 struct mlxsw_sp_span_entry *span_entry; 5309 struct mlxsw_sp *mlxsw_sp; 5310 int err = 0; 5311 5312 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 5313 if (event == NETDEV_UNREGISTER) { 5314 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); 5315 if (span_entry) 5316 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); 5317 } 5318 mlxsw_sp_span_respin(mlxsw_sp); 5319 5320 if (netif_is_vxlan(dev)) 5321 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr); 5322 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) 5323 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, 5324 event, ptr); 5325 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev)) 5326 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev, 5327 event, ptr); 5328 else if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU) 5329 err = mlxsw_sp_netdevice_router_port_event(dev); 5330 else if (mlxsw_sp_is_vrf_event(event, ptr)) 5331 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); 5332 else if (mlxsw_sp_port_dev_check(dev)) 5333 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 5334 else if (netif_is_lag_master(dev)) 5335 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 5336 else if (is_vlan_dev(dev)) 5337 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 5338 else if (netif_is_bridge_master(dev)) 5339 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); 5340 else if (netif_is_macvlan(dev)) 5341 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); 5342 5343 return notifier_from_errno(err); 5344 } 5345 5346 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = { 5347 .notifier_call = mlxsw_sp_inetaddr_valid_event, 5348 }; 5349 5350 static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = { 5351 .notifier_call = mlxsw_sp_inetaddr_event, 5352 }; 5353 5354 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = { 5355 .notifier_call = mlxsw_sp_inet6addr_valid_event, 5356 }; 5357 5358 static struct notifier_block mlxsw_sp_inet6addr_nb __read_mostly = { 5359 .notifier_call = mlxsw_sp_inet6addr_event, 5360 }; 5361 5362 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { 5363 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 5364 {0, }, 5365 }; 5366 5367 static struct pci_driver mlxsw_sp1_pci_driver = { 5368 .name = mlxsw_sp1_driver_name, 5369 .id_table = mlxsw_sp1_pci_id_table, 5370 }; 5371 5372 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { 5373 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, 5374 {0, }, 5375 }; 5376 5377 static struct pci_driver mlxsw_sp2_pci_driver = { 5378 .name = mlxsw_sp2_driver_name, 5379 .id_table = mlxsw_sp2_pci_id_table, 5380 }; 5381 5382 static int __init mlxsw_sp_module_init(void) 5383 { 5384 int err; 5385 5386 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 5387 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 5388 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 5389 register_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); 5390 5391 err = mlxsw_core_driver_register(&mlxsw_sp1_driver); 5392 if (err) 5393 goto err_sp1_core_driver_register; 5394 5395 err = mlxsw_core_driver_register(&mlxsw_sp2_driver); 5396 if (err) 5397 goto err_sp2_core_driver_register; 5398 5399 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); 5400 if (err) 5401 goto err_sp1_pci_driver_register; 5402 5403 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); 5404 if (err) 5405 goto err_sp2_pci_driver_register; 5406 5407 return 0; 5408 5409 err_sp2_pci_driver_register: 5410 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 5411 err_sp1_pci_driver_register: 5412 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 5413 err_sp2_core_driver_register: 5414 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 5415 err_sp1_core_driver_register: 5416 unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); 5417 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 5418 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 5419 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 5420 return err; 5421 } 5422 5423 static void __exit mlxsw_sp_module_exit(void) 5424 { 5425 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 5426 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 5427 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 5428 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 5429 unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); 5430 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 5431 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 5432 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 5433 } 5434 5435 module_init(mlxsw_sp_module_init); 5436 module_exit(mlxsw_sp_module_exit); 5437 5438 MODULE_LICENSE("Dual BSD/GPL"); 5439 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 5440 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 5441 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); 5442 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); 5443 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); 5444