1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/types.h> 7 #include <linux/pci.h> 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/ethtool.h> 11 #include <linux/slab.h> 12 #include <linux/device.h> 13 #include <linux/skbuff.h> 14 #include <linux/if_vlan.h> 15 #include <linux/if_bridge.h> 16 #include <linux/workqueue.h> 17 #include <linux/jiffies.h> 18 #include <linux/bitops.h> 19 #include <linux/list.h> 20 #include <linux/notifier.h> 21 #include <linux/dcbnl.h> 22 #include <linux/inetdevice.h> 23 #include <linux/netlink.h> 24 #include <linux/jhash.h> 25 #include <net/switchdev.h> 26 #include <net/pkt_cls.h> 27 #include <net/tc_act/tc_mirred.h> 28 #include <net/netevent.h> 29 #include <net/tc_act/tc_sample.h> 30 #include <net/addrconf.h> 31 32 #include "spectrum.h" 33 #include "pci.h" 34 #include "core.h" 35 #include "core_env.h" 36 #include "reg.h" 37 #include "port.h" 38 #include "trap.h" 39 #include "txheader.h" 40 #include "spectrum_cnt.h" 41 #include "spectrum_dpipe.h" 42 #include "spectrum_acl_flex_actions.h" 43 #include "spectrum_span.h" 44 #include "spectrum_ptp.h" 45 #include "../mlxfw/mlxfw.h" 46 47 #define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100) 48 49 #define MLXSW_SP1_FWREV_MAJOR 13 50 #define MLXSW_SP1_FWREV_MINOR 2000 51 #define MLXSW_SP1_FWREV_SUBMINOR 1122 52 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 53 54 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { 55 .major = MLXSW_SP1_FWREV_MAJOR, 56 .minor = MLXSW_SP1_FWREV_MINOR, 57 .subminor = MLXSW_SP1_FWREV_SUBMINOR, 58 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR, 59 }; 60 61 #define MLXSW_SP1_FW_FILENAME \ 62 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ 63 "." __stringify(MLXSW_SP1_FWREV_MINOR) \ 64 "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2" 65 66 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; 67 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; 68 static const char mlxsw_sp_driver_version[] = "1.0"; 69 70 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = { 71 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00 72 }; 73 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = { 74 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00 75 }; 76 77 /* tx_hdr_version 78 * Tx header version. 79 * Must be set to 1. 80 */ 81 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 82 83 /* tx_hdr_ctl 84 * Packet control type. 85 * 0 - Ethernet control (e.g. EMADs, LACP) 86 * 1 - Ethernet data 87 */ 88 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 89 90 /* tx_hdr_proto 91 * Packet protocol type. Must be set to 1 (Ethernet). 92 */ 93 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 94 95 /* tx_hdr_rx_is_router 96 * Packet is sent from the router. Valid for data packets only. 97 */ 98 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 99 100 /* tx_hdr_fid_valid 101 * Indicates if the 'fid' field is valid and should be used for 102 * forwarding lookup. Valid for data packets only. 103 */ 104 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 105 106 /* tx_hdr_swid 107 * Switch partition ID. Must be set to 0. 108 */ 109 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 110 111 /* tx_hdr_control_tclass 112 * Indicates if the packet should use the control TClass and not one 113 * of the data TClasses. 114 */ 115 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 116 117 /* tx_hdr_etclass 118 * Egress TClass to be used on the egress device on the egress port. 119 */ 120 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 121 122 /* tx_hdr_port_mid 123 * Destination local port for unicast packets. 124 * Destination multicast ID for multicast packets. 125 * 126 * Control packets are directed to a specific egress port, while data 127 * packets are transmitted through the CPU port (0) into the switch partition, 128 * where forwarding rules are applied. 129 */ 130 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 131 132 /* tx_hdr_fid 133 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 134 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 135 * Valid for data packets only. 136 */ 137 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 138 139 /* tx_hdr_type 140 * 0 - Data packets 141 * 6 - Control packets 142 */ 143 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 144 145 struct mlxsw_sp_mlxfw_dev { 146 struct mlxfw_dev mlxfw_dev; 147 struct mlxsw_sp *mlxsw_sp; 148 }; 149 150 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev, 151 u16 component_index, u32 *p_max_size, 152 u8 *p_align_bits, u16 *p_max_write_size) 153 { 154 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 155 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 156 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 157 char mcqi_pl[MLXSW_REG_MCQI_LEN]; 158 int err; 159 160 mlxsw_reg_mcqi_pack(mcqi_pl, component_index); 161 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl); 162 if (err) 163 return err; 164 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, 165 p_max_write_size); 166 167 *p_align_bits = max_t(u8, *p_align_bits, 2); 168 *p_max_write_size = min_t(u16, *p_max_write_size, 169 MLXSW_REG_MCDA_MAX_DATA_LEN); 170 return 0; 171 } 172 173 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle) 174 { 175 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 176 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 177 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 178 char mcc_pl[MLXSW_REG_MCC_LEN]; 179 u8 control_state; 180 int err; 181 182 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0); 183 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 184 if (err) 185 return err; 186 187 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state); 188 if (control_state != MLXFW_FSM_STATE_IDLE) 189 return -EBUSY; 190 191 mlxsw_reg_mcc_pack(mcc_pl, 192 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 193 0, *fwhandle, 0); 194 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 195 } 196 197 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev, 198 u32 fwhandle, u16 component_index, 199 u32 component_size) 200 { 201 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 202 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 203 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 204 char mcc_pl[MLXSW_REG_MCC_LEN]; 205 206 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT, 207 component_index, fwhandle, component_size); 208 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 209 } 210 211 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev, 212 u32 fwhandle, u8 *data, u16 size, 213 u32 offset) 214 { 215 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 216 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 217 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 218 char mcda_pl[MLXSW_REG_MCDA_LEN]; 219 220 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data); 221 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl); 222 } 223 224 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, 225 u32 fwhandle, u16 component_index) 226 { 227 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 228 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 229 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 230 char mcc_pl[MLXSW_REG_MCC_LEN]; 231 232 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT, 233 component_index, fwhandle, 0); 234 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 235 } 236 237 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 238 { 239 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 240 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 241 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 242 char mcc_pl[MLXSW_REG_MCC_LEN]; 243 244 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, 245 fwhandle, 0); 246 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 247 } 248 249 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 250 enum mlxfw_fsm_state *fsm_state, 251 enum mlxfw_fsm_state_err *fsm_state_err) 252 { 253 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 254 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 255 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 256 char mcc_pl[MLXSW_REG_MCC_LEN]; 257 u8 control_state; 258 u8 error_code; 259 int err; 260 261 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0); 262 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 263 if (err) 264 return err; 265 266 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state); 267 *fsm_state = control_state; 268 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, 269 MLXFW_FSM_STATE_ERR_MAX); 270 return 0; 271 } 272 273 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 274 { 275 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 276 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 277 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 278 char mcc_pl[MLXSW_REG_MCC_LEN]; 279 280 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, 281 fwhandle, 0); 282 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 283 } 284 285 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 286 { 287 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 288 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 289 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 290 char mcc_pl[MLXSW_REG_MCC_LEN]; 291 292 mlxsw_reg_mcc_pack(mcc_pl, 293 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, 294 fwhandle, 0); 295 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 296 } 297 298 static void mlxsw_sp_status_notify(struct mlxfw_dev *mlxfw_dev, 299 const char *msg, const char *comp_name, 300 u32 done_bytes, u32 total_bytes) 301 { 302 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 303 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 304 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 305 306 devlink_flash_update_status_notify(priv_to_devlink(mlxsw_sp->core), 307 msg, comp_name, 308 done_bytes, total_bytes); 309 } 310 311 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = { 312 .component_query = mlxsw_sp_component_query, 313 .fsm_lock = mlxsw_sp_fsm_lock, 314 .fsm_component_update = mlxsw_sp_fsm_component_update, 315 .fsm_block_download = mlxsw_sp_fsm_block_download, 316 .fsm_component_verify = mlxsw_sp_fsm_component_verify, 317 .fsm_activate = mlxsw_sp_fsm_activate, 318 .fsm_query_state = mlxsw_sp_fsm_query_state, 319 .fsm_cancel = mlxsw_sp_fsm_cancel, 320 .fsm_release = mlxsw_sp_fsm_release, 321 .status_notify = mlxsw_sp_status_notify, 322 }; 323 324 static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp, 325 const struct firmware *firmware, 326 struct netlink_ext_ack *extack) 327 { 328 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = { 329 .mlxfw_dev = { 330 .ops = &mlxsw_sp_mlxfw_dev_ops, 331 .psid = mlxsw_sp->bus_info->psid, 332 .psid_size = strlen(mlxsw_sp->bus_info->psid), 333 }, 334 .mlxsw_sp = mlxsw_sp 335 }; 336 int err; 337 338 mlxsw_core_fw_flash_start(mlxsw_sp->core); 339 devlink_flash_update_begin_notify(priv_to_devlink(mlxsw_sp->core)); 340 err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, 341 firmware, extack); 342 devlink_flash_update_end_notify(priv_to_devlink(mlxsw_sp->core)); 343 mlxsw_core_fw_flash_end(mlxsw_sp->core); 344 345 return err; 346 } 347 348 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) 349 { 350 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev; 351 const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev; 352 const char *fw_filename = mlxsw_sp->fw_filename; 353 union devlink_param_value value; 354 const struct firmware *firmware; 355 int err; 356 357 /* Don't check if driver does not require it */ 358 if (!req_rev || !fw_filename) 359 return 0; 360 361 /* Don't check if devlink 'fw_load_policy' param is 'flash' */ 362 err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_sp->core), 363 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 364 &value); 365 if (err) 366 return err; 367 if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) 368 return 0; 369 370 /* Validate driver & FW are compatible */ 371 if (rev->major != req_rev->major) { 372 WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n", 373 rev->major, req_rev->major); 374 return -EINVAL; 375 } 376 if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) == 377 MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor) && 378 (rev->minor > req_rev->minor || 379 (rev->minor == req_rev->minor && 380 rev->subminor >= req_rev->subminor))) 381 return 0; 382 383 dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n", 384 rev->major, rev->minor, rev->subminor); 385 dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n", 386 fw_filename); 387 388 err = request_firmware_direct(&firmware, fw_filename, 389 mlxsw_sp->bus_info->dev); 390 if (err) { 391 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n", 392 fw_filename); 393 return err; 394 } 395 396 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, NULL); 397 release_firmware(firmware); 398 if (err) 399 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n"); 400 401 /* On FW flash success, tell the caller FW reset is needed 402 * if current FW supports it. 403 */ 404 if (rev->minor >= req_rev->can_reset_minor) 405 return err ? err : -EAGAIN; 406 else 407 return 0; 408 } 409 410 static int mlxsw_sp_flash_update(struct mlxsw_core *mlxsw_core, 411 const char *file_name, const char *component, 412 struct netlink_ext_ack *extack) 413 { 414 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 415 const struct firmware *firmware; 416 int err; 417 418 if (component) 419 return -EOPNOTSUPP; 420 421 err = request_firmware_direct(&firmware, file_name, 422 mlxsw_sp->bus_info->dev); 423 if (err) 424 return err; 425 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, extack); 426 release_firmware(firmware); 427 428 return err; 429 } 430 431 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 432 unsigned int counter_index, u64 *packets, 433 u64 *bytes) 434 { 435 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 436 int err; 437 438 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 439 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 440 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 441 if (err) 442 return err; 443 if (packets) 444 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 445 if (bytes) 446 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 447 return 0; 448 } 449 450 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 451 unsigned int counter_index) 452 { 453 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 454 455 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 456 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 457 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 458 } 459 460 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 461 unsigned int *p_counter_index) 462 { 463 int err; 464 465 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 466 p_counter_index); 467 if (err) 468 return err; 469 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 470 if (err) 471 goto err_counter_clear; 472 return 0; 473 474 err_counter_clear: 475 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 476 *p_counter_index); 477 return err; 478 } 479 480 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 481 unsigned int counter_index) 482 { 483 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 484 counter_index); 485 } 486 487 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 488 const struct mlxsw_tx_info *tx_info) 489 { 490 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 491 492 memset(txhdr, 0, MLXSW_TXHDR_LEN); 493 494 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 495 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 496 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 497 mlxsw_tx_hdr_swid_set(txhdr, 0); 498 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 499 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 500 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 501 } 502 503 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) 504 { 505 switch (state) { 506 case BR_STATE_FORWARDING: 507 return MLXSW_REG_SPMS_STATE_FORWARDING; 508 case BR_STATE_LEARNING: 509 return MLXSW_REG_SPMS_STATE_LEARNING; 510 case BR_STATE_LISTENING: /* fall-through */ 511 case BR_STATE_DISABLED: /* fall-through */ 512 case BR_STATE_BLOCKING: 513 return MLXSW_REG_SPMS_STATE_DISCARDING; 514 default: 515 BUG(); 516 } 517 } 518 519 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 520 u8 state) 521 { 522 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state); 523 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 524 char *spms_pl; 525 int err; 526 527 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 528 if (!spms_pl) 529 return -ENOMEM; 530 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 531 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 532 533 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 534 kfree(spms_pl); 535 return err; 536 } 537 538 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 539 { 540 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 541 int err; 542 543 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 544 if (err) 545 return err; 546 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 547 return 0; 548 } 549 550 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port, 551 bool enable, u32 rate) 552 { 553 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 554 char mpsc_pl[MLXSW_REG_MPSC_LEN]; 555 556 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate); 557 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl); 558 } 559 560 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 561 bool is_up) 562 { 563 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 564 char paos_pl[MLXSW_REG_PAOS_LEN]; 565 566 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 567 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 568 MLXSW_PORT_ADMIN_STATUS_DOWN); 569 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 570 } 571 572 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 573 unsigned char *addr) 574 { 575 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 576 char ppad_pl[MLXSW_REG_PPAD_LEN]; 577 578 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 579 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 580 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 581 } 582 583 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 584 { 585 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 586 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 587 588 ether_addr_copy(addr, mlxsw_sp->base_mac); 589 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 590 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 591 } 592 593 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 594 { 595 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 596 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 597 int max_mtu; 598 int err; 599 600 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 601 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 602 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 603 if (err) 604 return err; 605 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 606 607 if (mtu > max_mtu) 608 return -EINVAL; 609 610 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 611 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 612 } 613 614 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 615 { 616 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 617 char pspa_pl[MLXSW_REG_PSPA_LEN]; 618 619 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 620 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 621 } 622 623 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 624 { 625 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 626 char svpe_pl[MLXSW_REG_SVPE_LEN]; 627 628 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 629 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 630 } 631 632 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 633 bool learn_enable) 634 { 635 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 636 char *spvmlr_pl; 637 int err; 638 639 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 640 if (!spvmlr_pl) 641 return -ENOMEM; 642 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 643 learn_enable); 644 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 645 kfree(spvmlr_pl); 646 return err; 647 } 648 649 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 650 u16 vid) 651 { 652 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 653 char spvid_pl[MLXSW_REG_SPVID_LEN]; 654 655 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid); 656 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 657 } 658 659 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 660 bool allow) 661 { 662 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 663 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 664 665 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 666 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 667 } 668 669 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 670 { 671 int err; 672 673 if (!vid) { 674 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 675 if (err) 676 return err; 677 } else { 678 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); 679 if (err) 680 return err; 681 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 682 if (err) 683 goto err_port_allow_untagged_set; 684 } 685 686 mlxsw_sp_port->pvid = vid; 687 return 0; 688 689 err_port_allow_untagged_set: 690 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid); 691 return err; 692 } 693 694 static int 695 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 696 { 697 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 698 char sspr_pl[MLXSW_REG_SSPR_LEN]; 699 700 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 701 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 702 } 703 704 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, 705 u8 local_port, u8 *p_module, 706 u8 *p_width, u8 *p_lane) 707 { 708 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 709 int err; 710 711 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 712 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 713 if (err) 714 return err; 715 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 716 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl); 717 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 718 return 0; 719 } 720 721 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port, 722 u8 module, u8 width, u8 lane) 723 { 724 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 725 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 726 int i; 727 728 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 729 mlxsw_reg_pmlp_width_set(pmlp_pl, width); 730 for (i = 0; i < width; i++) { 731 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module); 732 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */ 733 } 734 735 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 736 } 737 738 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port) 739 { 740 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 741 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 742 743 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 744 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 745 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 746 } 747 748 static int mlxsw_sp_port_open(struct net_device *dev) 749 { 750 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 751 int err; 752 753 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 754 if (err) 755 return err; 756 netif_start_queue(dev); 757 return 0; 758 } 759 760 static int mlxsw_sp_port_stop(struct net_device *dev) 761 { 762 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 763 764 netif_stop_queue(dev); 765 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 766 } 767 768 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 769 struct net_device *dev) 770 { 771 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 772 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 773 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 774 const struct mlxsw_tx_info tx_info = { 775 .local_port = mlxsw_sp_port->local_port, 776 .is_emad = false, 777 }; 778 u64 len; 779 int err; 780 781 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 782 return NETDEV_TX_BUSY; 783 784 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { 785 struct sk_buff *skb_orig = skb; 786 787 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); 788 if (!skb) { 789 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 790 dev_kfree_skb_any(skb_orig); 791 return NETDEV_TX_OK; 792 } 793 dev_consume_skb_any(skb_orig); 794 } 795 796 if (eth_skb_pad(skb)) { 797 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 798 return NETDEV_TX_OK; 799 } 800 801 mlxsw_sp_txhdr_construct(skb, &tx_info); 802 /* TX header is consumed by HW on the way so we shouldn't count its 803 * bytes as being sent. 804 */ 805 len = skb->len - MLXSW_TXHDR_LEN; 806 807 /* Due to a race we might fail here because of a full queue. In that 808 * unlikely case we simply drop the packet. 809 */ 810 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 811 812 if (!err) { 813 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 814 u64_stats_update_begin(&pcpu_stats->syncp); 815 pcpu_stats->tx_packets++; 816 pcpu_stats->tx_bytes += len; 817 u64_stats_update_end(&pcpu_stats->syncp); 818 } else { 819 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 820 dev_kfree_skb_any(skb); 821 } 822 return NETDEV_TX_OK; 823 } 824 825 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 826 { 827 } 828 829 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 830 { 831 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 832 struct sockaddr *addr = p; 833 int err; 834 835 if (!is_valid_ether_addr(addr->sa_data)) 836 return -EADDRNOTAVAIL; 837 838 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 839 if (err) 840 return err; 841 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 842 return 0; 843 } 844 845 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, 846 int mtu) 847 { 848 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu); 849 } 850 851 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ 852 853 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 854 u16 delay) 855 { 856 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay, 857 BITS_PER_BYTE)); 858 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp, 859 mtu); 860 } 861 862 /* Maximum delay buffer needed in case of PAUSE frames, in bytes. 863 * Assumes 100m cable and maximum MTU. 864 */ 865 #define MLXSW_SP_PAUSE_DELAY 58752 866 867 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 868 u16 delay, bool pfc, bool pause) 869 { 870 if (pfc) 871 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay); 872 else if (pause) 873 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY); 874 else 875 return 0; 876 } 877 878 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, 879 bool lossy) 880 { 881 if (lossy) 882 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size); 883 else 884 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size, 885 thres); 886 } 887 888 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, 889 u8 *prio_tc, bool pause_en, 890 struct ieee_pfc *my_pfc) 891 { 892 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 893 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; 894 u16 delay = !!my_pfc ? my_pfc->delay : 0; 895 char pbmc_pl[MLXSW_REG_PBMC_LEN]; 896 u32 taken_headroom_cells = 0; 897 u32 max_headroom_cells; 898 int i, j, err; 899 900 max_headroom_cells = mlxsw_sp_sb_max_headroom_cells(mlxsw_sp); 901 902 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); 903 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 904 if (err) 905 return err; 906 907 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 908 bool configure = false; 909 bool pfc = false; 910 u16 thres_cells; 911 u16 delay_cells; 912 u16 total_cells; 913 bool lossy; 914 915 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { 916 if (prio_tc[j] == i) { 917 pfc = pfc_en & BIT(j); 918 configure = true; 919 break; 920 } 921 } 922 923 if (!configure) 924 continue; 925 926 lossy = !(pfc || pause_en); 927 thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); 928 delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, 929 pfc, pause_en); 930 total_cells = thres_cells + delay_cells; 931 932 taken_headroom_cells += total_cells; 933 if (taken_headroom_cells > max_headroom_cells) 934 return -ENOBUFS; 935 936 mlxsw_sp_pg_buf_pack(pbmc_pl, i, total_cells, 937 thres_cells, lossy); 938 } 939 940 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 941 } 942 943 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, 944 int mtu, bool pause_en) 945 { 946 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; 947 bool dcb_en = !!mlxsw_sp_port->dcb.ets; 948 struct ieee_pfc *my_pfc; 949 u8 *prio_tc; 950 951 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; 952 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; 953 954 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, 955 pause_en, my_pfc); 956 } 957 958 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 959 { 960 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 961 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 962 int err; 963 964 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); 965 if (err) 966 return err; 967 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu); 968 if (err) 969 goto err_span_port_mtu_update; 970 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 971 if (err) 972 goto err_port_mtu_set; 973 dev->mtu = mtu; 974 return 0; 975 976 err_port_mtu_set: 977 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu); 978 err_span_port_mtu_update: 979 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 980 return err; 981 } 982 983 static int 984 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 985 struct rtnl_link_stats64 *stats) 986 { 987 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 988 struct mlxsw_sp_port_pcpu_stats *p; 989 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 990 u32 tx_dropped = 0; 991 unsigned int start; 992 int i; 993 994 for_each_possible_cpu(i) { 995 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 996 do { 997 start = u64_stats_fetch_begin_irq(&p->syncp); 998 rx_packets = p->rx_packets; 999 rx_bytes = p->rx_bytes; 1000 tx_packets = p->tx_packets; 1001 tx_bytes = p->tx_bytes; 1002 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 1003 1004 stats->rx_packets += rx_packets; 1005 stats->rx_bytes += rx_bytes; 1006 stats->tx_packets += tx_packets; 1007 stats->tx_bytes += tx_bytes; 1008 /* tx_dropped is u32, updated without syncp protection. */ 1009 tx_dropped += p->tx_dropped; 1010 } 1011 stats->tx_dropped = tx_dropped; 1012 return 0; 1013 } 1014 1015 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 1016 { 1017 switch (attr_id) { 1018 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1019 return true; 1020 } 1021 1022 return false; 1023 } 1024 1025 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 1026 void *sp) 1027 { 1028 switch (attr_id) { 1029 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1030 return mlxsw_sp_port_get_sw_stats64(dev, sp); 1031 } 1032 1033 return -EINVAL; 1034 } 1035 1036 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 1037 int prio, char *ppcnt_pl) 1038 { 1039 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1040 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1041 1042 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 1043 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 1044 } 1045 1046 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 1047 struct rtnl_link_stats64 *stats) 1048 { 1049 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1050 int err; 1051 1052 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 1053 0, ppcnt_pl); 1054 if (err) 1055 goto out; 1056 1057 stats->tx_packets = 1058 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 1059 stats->rx_packets = 1060 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 1061 stats->tx_bytes = 1062 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 1063 stats->rx_bytes = 1064 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 1065 stats->multicast = 1066 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 1067 1068 stats->rx_crc_errors = 1069 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 1070 stats->rx_frame_errors = 1071 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 1072 1073 stats->rx_length_errors = ( 1074 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 1075 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 1076 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 1077 1078 stats->rx_errors = (stats->rx_crc_errors + 1079 stats->rx_frame_errors + stats->rx_length_errors); 1080 1081 out: 1082 return err; 1083 } 1084 1085 static void 1086 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 1087 struct mlxsw_sp_port_xstats *xstats) 1088 { 1089 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1090 int err, i; 1091 1092 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 1093 ppcnt_pl); 1094 if (!err) 1095 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 1096 1097 for (i = 0; i < TC_MAX_QUEUE; i++) { 1098 err = mlxsw_sp_port_get_stats_raw(dev, 1099 MLXSW_REG_PPCNT_TC_CONG_TC, 1100 i, ppcnt_pl); 1101 if (!err) 1102 xstats->wred_drop[i] = 1103 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 1104 1105 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 1106 i, ppcnt_pl); 1107 if (err) 1108 continue; 1109 1110 xstats->backlog[i] = 1111 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 1112 xstats->tail_drop[i] = 1113 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 1114 } 1115 1116 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1117 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, 1118 i, ppcnt_pl); 1119 if (err) 1120 continue; 1121 1122 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); 1123 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); 1124 } 1125 } 1126 1127 static void update_stats_cache(struct work_struct *work) 1128 { 1129 struct mlxsw_sp_port *mlxsw_sp_port = 1130 container_of(work, struct mlxsw_sp_port, 1131 periodic_hw_stats.update_dw.work); 1132 1133 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 1134 goto out; 1135 1136 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 1137 &mlxsw_sp_port->periodic_hw_stats.stats); 1138 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 1139 &mlxsw_sp_port->periodic_hw_stats.xstats); 1140 1141 out: 1142 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1143 MLXSW_HW_STATS_UPDATE_TIME); 1144 } 1145 1146 /* Return the stats from a cache that is updated periodically, 1147 * as this function might get called in an atomic context. 1148 */ 1149 static void 1150 mlxsw_sp_port_get_stats64(struct net_device *dev, 1151 struct rtnl_link_stats64 *stats) 1152 { 1153 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1154 1155 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 1156 } 1157 1158 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 1159 u16 vid_begin, u16 vid_end, 1160 bool is_member, bool untagged) 1161 { 1162 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1163 char *spvm_pl; 1164 int err; 1165 1166 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 1167 if (!spvm_pl) 1168 return -ENOMEM; 1169 1170 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 1171 vid_end, is_member, untagged); 1172 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 1173 kfree(spvm_pl); 1174 return err; 1175 } 1176 1177 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 1178 u16 vid_end, bool is_member, bool untagged) 1179 { 1180 u16 vid, vid_e; 1181 int err; 1182 1183 for (vid = vid_begin; vid <= vid_end; 1184 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 1185 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 1186 vid_end); 1187 1188 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 1189 is_member, untagged); 1190 if (err) 1191 return err; 1192 } 1193 1194 return 0; 1195 } 1196 1197 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port, 1198 bool flush_default) 1199 { 1200 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 1201 1202 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 1203 &mlxsw_sp_port->vlans_list, list) { 1204 if (!flush_default && 1205 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID) 1206 continue; 1207 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1208 } 1209 } 1210 1211 static void 1212 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1213 { 1214 if (mlxsw_sp_port_vlan->bridge_port) 1215 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1216 else if (mlxsw_sp_port_vlan->fid) 1217 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 1218 } 1219 1220 struct mlxsw_sp_port_vlan * 1221 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1222 { 1223 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1224 bool untagged = vid == MLXSW_SP_DEFAULT_VID; 1225 int err; 1226 1227 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1228 if (mlxsw_sp_port_vlan) 1229 return ERR_PTR(-EEXIST); 1230 1231 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 1232 if (err) 1233 return ERR_PTR(err); 1234 1235 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 1236 if (!mlxsw_sp_port_vlan) { 1237 err = -ENOMEM; 1238 goto err_port_vlan_alloc; 1239 } 1240 1241 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1242 mlxsw_sp_port_vlan->vid = vid; 1243 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1244 1245 return mlxsw_sp_port_vlan; 1246 1247 err_port_vlan_alloc: 1248 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1249 return ERR_PTR(err); 1250 } 1251 1252 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1253 { 1254 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1255 u16 vid = mlxsw_sp_port_vlan->vid; 1256 1257 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan); 1258 list_del(&mlxsw_sp_port_vlan->list); 1259 kfree(mlxsw_sp_port_vlan); 1260 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1261 } 1262 1263 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1264 __be16 __always_unused proto, u16 vid) 1265 { 1266 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1267 1268 /* VLAN 0 is added to HW filter when device goes up, but it is 1269 * reserved in our case, so simply return. 1270 */ 1271 if (!vid) 1272 return 0; 1273 1274 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid)); 1275 } 1276 1277 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1278 __be16 __always_unused proto, u16 vid) 1279 { 1280 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1281 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1282 1283 /* VLAN 0 is removed from HW filter when device goes down, but 1284 * it is reserved in our case, so simply return. 1285 */ 1286 if (!vid) 1287 return 0; 1288 1289 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1290 if (!mlxsw_sp_port_vlan) 1291 return 0; 1292 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1293 1294 return 0; 1295 } 1296 1297 static struct mlxsw_sp_port_mall_tc_entry * 1298 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port, 1299 unsigned long cookie) { 1300 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1301 1302 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list) 1303 if (mall_tc_entry->cookie == cookie) 1304 return mall_tc_entry; 1305 1306 return NULL; 1307 } 1308 1309 static int 1310 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1311 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror, 1312 const struct flow_action_entry *act, 1313 bool ingress) 1314 { 1315 enum mlxsw_sp_span_type span_type; 1316 1317 if (!act->dev) { 1318 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); 1319 return -EINVAL; 1320 } 1321 1322 mirror->ingress = ingress; 1323 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1324 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, act->dev, span_type, 1325 true, &mirror->span_id); 1326 } 1327 1328 static void 1329 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1330 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror) 1331 { 1332 enum mlxsw_sp_span_type span_type; 1333 1334 span_type = mirror->ingress ? 1335 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1336 mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->span_id, 1337 span_type, true); 1338 } 1339 1340 static int 1341 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port, 1342 struct tc_cls_matchall_offload *cls, 1343 const struct flow_action_entry *act, 1344 bool ingress) 1345 { 1346 int err; 1347 1348 if (!mlxsw_sp_port->sample) 1349 return -EOPNOTSUPP; 1350 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) { 1351 netdev_err(mlxsw_sp_port->dev, "sample already active\n"); 1352 return -EEXIST; 1353 } 1354 if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) { 1355 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n"); 1356 return -EOPNOTSUPP; 1357 } 1358 1359 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group, 1360 act->sample.psample_group); 1361 mlxsw_sp_port->sample->truncate = act->sample.truncate; 1362 mlxsw_sp_port->sample->trunc_size = act->sample.trunc_size; 1363 mlxsw_sp_port->sample->rate = act->sample.rate; 1364 1365 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, act->sample.rate); 1366 if (err) 1367 goto err_port_sample_set; 1368 return 0; 1369 1370 err_port_sample_set: 1371 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1372 return err; 1373 } 1374 1375 static void 1376 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port) 1377 { 1378 if (!mlxsw_sp_port->sample) 1379 return; 1380 1381 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1); 1382 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1383 } 1384 1385 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1386 struct tc_cls_matchall_offload *f, 1387 bool ingress) 1388 { 1389 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1390 __be16 protocol = f->common.protocol; 1391 struct flow_action_entry *act; 1392 int err; 1393 1394 if (!flow_offload_has_one_action(&f->rule->action)) { 1395 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); 1396 return -EOPNOTSUPP; 1397 } 1398 1399 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1400 if (!mall_tc_entry) 1401 return -ENOMEM; 1402 mall_tc_entry->cookie = f->cookie; 1403 1404 act = &f->rule->action.entries[0]; 1405 1406 if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) { 1407 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; 1408 1409 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; 1410 mirror = &mall_tc_entry->mirror; 1411 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, 1412 mirror, act, 1413 ingress); 1414 } else if (act->id == FLOW_ACTION_SAMPLE && 1415 protocol == htons(ETH_P_ALL)) { 1416 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE; 1417 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f, 1418 act, ingress); 1419 } else { 1420 err = -EOPNOTSUPP; 1421 } 1422 1423 if (err) 1424 goto err_add_action; 1425 1426 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); 1427 return 0; 1428 1429 err_add_action: 1430 kfree(mall_tc_entry); 1431 return err; 1432 } 1433 1434 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1435 struct tc_cls_matchall_offload *f) 1436 { 1437 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1438 1439 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port, 1440 f->cookie); 1441 if (!mall_tc_entry) { 1442 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); 1443 return; 1444 } 1445 list_del(&mall_tc_entry->list); 1446 1447 switch (mall_tc_entry->type) { 1448 case MLXSW_SP_PORT_MALL_MIRROR: 1449 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port, 1450 &mall_tc_entry->mirror); 1451 break; 1452 case MLXSW_SP_PORT_MALL_SAMPLE: 1453 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port); 1454 break; 1455 default: 1456 WARN_ON(1); 1457 } 1458 1459 kfree(mall_tc_entry); 1460 } 1461 1462 static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1463 struct tc_cls_matchall_offload *f, 1464 bool ingress) 1465 { 1466 switch (f->command) { 1467 case TC_CLSMATCHALL_REPLACE: 1468 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f, 1469 ingress); 1470 case TC_CLSMATCHALL_DESTROY: 1471 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f); 1472 return 0; 1473 default: 1474 return -EOPNOTSUPP; 1475 } 1476 } 1477 1478 static int 1479 mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block, 1480 struct tc_cls_flower_offload *f) 1481 { 1482 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block); 1483 1484 switch (f->command) { 1485 case TC_CLSFLOWER_REPLACE: 1486 return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f); 1487 case TC_CLSFLOWER_DESTROY: 1488 mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f); 1489 return 0; 1490 case TC_CLSFLOWER_STATS: 1491 return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f); 1492 case TC_CLSFLOWER_TMPLT_CREATE: 1493 return mlxsw_sp_flower_tmplt_create(mlxsw_sp, acl_block, f); 1494 case TC_CLSFLOWER_TMPLT_DESTROY: 1495 mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, acl_block, f); 1496 return 0; 1497 default: 1498 return -EOPNOTSUPP; 1499 } 1500 } 1501 1502 static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type, 1503 void *type_data, 1504 void *cb_priv, bool ingress) 1505 { 1506 struct mlxsw_sp_port *mlxsw_sp_port = cb_priv; 1507 1508 switch (type) { 1509 case TC_SETUP_CLSMATCHALL: 1510 if (!tc_cls_can_offload_and_chain0(mlxsw_sp_port->dev, 1511 type_data)) 1512 return -EOPNOTSUPP; 1513 1514 return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data, 1515 ingress); 1516 case TC_SETUP_CLSFLOWER: 1517 return 0; 1518 default: 1519 return -EOPNOTSUPP; 1520 } 1521 } 1522 1523 static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type, 1524 void *type_data, 1525 void *cb_priv) 1526 { 1527 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1528 cb_priv, true); 1529 } 1530 1531 static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type, 1532 void *type_data, 1533 void *cb_priv) 1534 { 1535 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1536 cb_priv, false); 1537 } 1538 1539 static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type, 1540 void *type_data, void *cb_priv) 1541 { 1542 struct mlxsw_sp_acl_block *acl_block = cb_priv; 1543 1544 switch (type) { 1545 case TC_SETUP_CLSMATCHALL: 1546 return 0; 1547 case TC_SETUP_CLSFLOWER: 1548 if (mlxsw_sp_acl_block_disabled(acl_block)) 1549 return -EOPNOTSUPP; 1550 1551 return mlxsw_sp_setup_tc_cls_flower(acl_block, type_data); 1552 default: 1553 return -EOPNOTSUPP; 1554 } 1555 } 1556 1557 static int 1558 mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port, 1559 struct tcf_block *block, bool ingress, 1560 struct netlink_ext_ack *extack) 1561 { 1562 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1563 struct mlxsw_sp_acl_block *acl_block; 1564 struct tcf_block_cb *block_cb; 1565 int err; 1566 1567 block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower, 1568 mlxsw_sp); 1569 if (!block_cb) { 1570 acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, block->net); 1571 if (!acl_block) 1572 return -ENOMEM; 1573 block_cb = __tcf_block_cb_register(block, 1574 mlxsw_sp_setup_tc_block_cb_flower, 1575 mlxsw_sp, acl_block, extack); 1576 if (IS_ERR(block_cb)) { 1577 err = PTR_ERR(block_cb); 1578 goto err_cb_register; 1579 } 1580 } else { 1581 acl_block = tcf_block_cb_priv(block_cb); 1582 } 1583 tcf_block_cb_incref(block_cb); 1584 err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block, 1585 mlxsw_sp_port, ingress); 1586 if (err) 1587 goto err_block_bind; 1588 1589 if (ingress) 1590 mlxsw_sp_port->ing_acl_block = acl_block; 1591 else 1592 mlxsw_sp_port->eg_acl_block = acl_block; 1593 1594 return 0; 1595 1596 err_block_bind: 1597 if (!tcf_block_cb_decref(block_cb)) { 1598 __tcf_block_cb_unregister(block, block_cb); 1599 err_cb_register: 1600 mlxsw_sp_acl_block_destroy(acl_block); 1601 } 1602 return err; 1603 } 1604 1605 static void 1606 mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port, 1607 struct tcf_block *block, bool ingress) 1608 { 1609 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1610 struct mlxsw_sp_acl_block *acl_block; 1611 struct tcf_block_cb *block_cb; 1612 int err; 1613 1614 block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower, 1615 mlxsw_sp); 1616 if (!block_cb) 1617 return; 1618 1619 if (ingress) 1620 mlxsw_sp_port->ing_acl_block = NULL; 1621 else 1622 mlxsw_sp_port->eg_acl_block = NULL; 1623 1624 acl_block = tcf_block_cb_priv(block_cb); 1625 err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block, 1626 mlxsw_sp_port, ingress); 1627 if (!err && !tcf_block_cb_decref(block_cb)) { 1628 __tcf_block_cb_unregister(block, block_cb); 1629 mlxsw_sp_acl_block_destroy(acl_block); 1630 } 1631 } 1632 1633 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 1634 struct tc_block_offload *f) 1635 { 1636 tc_setup_cb_t *cb; 1637 bool ingress; 1638 int err; 1639 1640 if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { 1641 cb = mlxsw_sp_setup_tc_block_cb_matchall_ig; 1642 ingress = true; 1643 } else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { 1644 cb = mlxsw_sp_setup_tc_block_cb_matchall_eg; 1645 ingress = false; 1646 } else { 1647 return -EOPNOTSUPP; 1648 } 1649 1650 switch (f->command) { 1651 case TC_BLOCK_BIND: 1652 err = tcf_block_cb_register(f->block, cb, mlxsw_sp_port, 1653 mlxsw_sp_port, f->extack); 1654 if (err) 1655 return err; 1656 err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port, 1657 f->block, ingress, 1658 f->extack); 1659 if (err) { 1660 tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port); 1661 return err; 1662 } 1663 return 0; 1664 case TC_BLOCK_UNBIND: 1665 mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port, 1666 f->block, ingress); 1667 tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port); 1668 return 0; 1669 default: 1670 return -EOPNOTSUPP; 1671 } 1672 } 1673 1674 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1675 void *type_data) 1676 { 1677 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1678 1679 switch (type) { 1680 case TC_SETUP_BLOCK: 1681 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1682 case TC_SETUP_QDISC_RED: 1683 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1684 case TC_SETUP_QDISC_PRIO: 1685 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); 1686 default: 1687 return -EOPNOTSUPP; 1688 } 1689 } 1690 1691 1692 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) 1693 { 1694 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1695 1696 if (!enable) { 1697 if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->ing_acl_block) || 1698 mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->eg_acl_block) || 1699 !list_empty(&mlxsw_sp_port->mall_tc_list)) { 1700 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 1701 return -EINVAL; 1702 } 1703 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->ing_acl_block); 1704 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->eg_acl_block); 1705 } else { 1706 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->ing_acl_block); 1707 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->eg_acl_block); 1708 } 1709 return 0; 1710 } 1711 1712 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable) 1713 { 1714 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1715 char pplr_pl[MLXSW_REG_PPLR_LEN]; 1716 int err; 1717 1718 if (netif_running(dev)) 1719 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1720 1721 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable); 1722 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr), 1723 pplr_pl); 1724 1725 if (netif_running(dev)) 1726 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1727 1728 return err; 1729 } 1730 1731 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); 1732 1733 static int mlxsw_sp_handle_feature(struct net_device *dev, 1734 netdev_features_t wanted_features, 1735 netdev_features_t feature, 1736 mlxsw_sp_feature_handler feature_handler) 1737 { 1738 netdev_features_t changes = wanted_features ^ dev->features; 1739 bool enable = !!(wanted_features & feature); 1740 int err; 1741 1742 if (!(changes & feature)) 1743 return 0; 1744 1745 err = feature_handler(dev, enable); 1746 if (err) { 1747 netdev_err(dev, "%s feature %pNF failed, err %d\n", 1748 enable ? "Enable" : "Disable", &feature, err); 1749 return err; 1750 } 1751 1752 if (enable) 1753 dev->features |= feature; 1754 else 1755 dev->features &= ~feature; 1756 1757 return 0; 1758 } 1759 static int mlxsw_sp_set_features(struct net_device *dev, 1760 netdev_features_t features) 1761 { 1762 netdev_features_t oper_features = dev->features; 1763 int err = 0; 1764 1765 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, 1766 mlxsw_sp_feature_hw_tc); 1767 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK, 1768 mlxsw_sp_feature_loopback); 1769 1770 if (err) { 1771 dev->features = oper_features; 1772 return -EINVAL; 1773 } 1774 1775 return 0; 1776 } 1777 1778 static struct devlink_port * 1779 mlxsw_sp_port_get_devlink_port(struct net_device *dev) 1780 { 1781 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1782 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1783 1784 return mlxsw_core_port_devlink_port_get(mlxsw_sp->core, 1785 mlxsw_sp_port->local_port); 1786 } 1787 1788 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1789 .ndo_open = mlxsw_sp_port_open, 1790 .ndo_stop = mlxsw_sp_port_stop, 1791 .ndo_start_xmit = mlxsw_sp_port_xmit, 1792 .ndo_setup_tc = mlxsw_sp_setup_tc, 1793 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1794 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1795 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1796 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1797 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1798 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1799 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1800 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1801 .ndo_set_features = mlxsw_sp_set_features, 1802 .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port, 1803 }; 1804 1805 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 1806 struct ethtool_drvinfo *drvinfo) 1807 { 1808 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1809 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1810 1811 strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind, 1812 sizeof(drvinfo->driver)); 1813 strlcpy(drvinfo->version, mlxsw_sp_driver_version, 1814 sizeof(drvinfo->version)); 1815 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 1816 "%d.%d.%d", 1817 mlxsw_sp->bus_info->fw_rev.major, 1818 mlxsw_sp->bus_info->fw_rev.minor, 1819 mlxsw_sp->bus_info->fw_rev.subminor); 1820 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 1821 sizeof(drvinfo->bus_info)); 1822 } 1823 1824 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, 1825 struct ethtool_pauseparam *pause) 1826 { 1827 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1828 1829 pause->rx_pause = mlxsw_sp_port->link.rx_pause; 1830 pause->tx_pause = mlxsw_sp_port->link.tx_pause; 1831 } 1832 1833 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, 1834 struct ethtool_pauseparam *pause) 1835 { 1836 char pfcc_pl[MLXSW_REG_PFCC_LEN]; 1837 1838 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); 1839 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause); 1840 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause); 1841 1842 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), 1843 pfcc_pl); 1844 } 1845 1846 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, 1847 struct ethtool_pauseparam *pause) 1848 { 1849 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1850 bool pause_en = pause->tx_pause || pause->rx_pause; 1851 int err; 1852 1853 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { 1854 netdev_err(dev, "PFC already enabled on port\n"); 1855 return -EINVAL; 1856 } 1857 1858 if (pause->autoneg) { 1859 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); 1860 return -EINVAL; 1861 } 1862 1863 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1864 if (err) { 1865 netdev_err(dev, "Failed to configure port's headroom\n"); 1866 return err; 1867 } 1868 1869 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause); 1870 if (err) { 1871 netdev_err(dev, "Failed to set PAUSE parameters\n"); 1872 goto err_port_pause_configure; 1873 } 1874 1875 mlxsw_sp_port->link.rx_pause = pause->rx_pause; 1876 mlxsw_sp_port->link.tx_pause = pause->tx_pause; 1877 1878 return 0; 1879 1880 err_port_pause_configure: 1881 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 1882 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1883 return err; 1884 } 1885 1886 struct mlxsw_sp_port_hw_stats { 1887 char str[ETH_GSTRING_LEN]; 1888 u64 (*getter)(const char *payload); 1889 bool cells_bytes; 1890 }; 1891 1892 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 1893 { 1894 .str = "a_frames_transmitted_ok", 1895 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 1896 }, 1897 { 1898 .str = "a_frames_received_ok", 1899 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 1900 }, 1901 { 1902 .str = "a_frame_check_sequence_errors", 1903 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 1904 }, 1905 { 1906 .str = "a_alignment_errors", 1907 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 1908 }, 1909 { 1910 .str = "a_octets_transmitted_ok", 1911 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 1912 }, 1913 { 1914 .str = "a_octets_received_ok", 1915 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 1916 }, 1917 { 1918 .str = "a_multicast_frames_xmitted_ok", 1919 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 1920 }, 1921 { 1922 .str = "a_broadcast_frames_xmitted_ok", 1923 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 1924 }, 1925 { 1926 .str = "a_multicast_frames_received_ok", 1927 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 1928 }, 1929 { 1930 .str = "a_broadcast_frames_received_ok", 1931 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 1932 }, 1933 { 1934 .str = "a_in_range_length_errors", 1935 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 1936 }, 1937 { 1938 .str = "a_out_of_range_length_field", 1939 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 1940 }, 1941 { 1942 .str = "a_frame_too_long_errors", 1943 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 1944 }, 1945 { 1946 .str = "a_symbol_error_during_carrier", 1947 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 1948 }, 1949 { 1950 .str = "a_mac_control_frames_transmitted", 1951 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 1952 }, 1953 { 1954 .str = "a_mac_control_frames_received", 1955 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 1956 }, 1957 { 1958 .str = "a_unsupported_opcodes_received", 1959 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 1960 }, 1961 { 1962 .str = "a_pause_mac_ctrl_frames_received", 1963 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 1964 }, 1965 { 1966 .str = "a_pause_mac_ctrl_frames_xmitted", 1967 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 1968 }, 1969 }; 1970 1971 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 1972 1973 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2863_stats[] = { 1974 { 1975 .str = "if_in_discards", 1976 .getter = mlxsw_reg_ppcnt_if_in_discards_get, 1977 }, 1978 { 1979 .str = "if_out_discards", 1980 .getter = mlxsw_reg_ppcnt_if_out_discards_get, 1981 }, 1982 { 1983 .str = "if_out_errors", 1984 .getter = mlxsw_reg_ppcnt_if_out_errors_get, 1985 }, 1986 }; 1987 1988 #define MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN \ 1989 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2863_stats) 1990 1991 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = { 1992 { 1993 .str = "ether_stats_undersize_pkts", 1994 .getter = mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get, 1995 }, 1996 { 1997 .str = "ether_stats_oversize_pkts", 1998 .getter = mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get, 1999 }, 2000 { 2001 .str = "ether_stats_fragments", 2002 .getter = mlxsw_reg_ppcnt_ether_stats_fragments_get, 2003 }, 2004 { 2005 .str = "ether_pkts64octets", 2006 .getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get, 2007 }, 2008 { 2009 .str = "ether_pkts65to127octets", 2010 .getter = mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get, 2011 }, 2012 { 2013 .str = "ether_pkts128to255octets", 2014 .getter = mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get, 2015 }, 2016 { 2017 .str = "ether_pkts256to511octets", 2018 .getter = mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get, 2019 }, 2020 { 2021 .str = "ether_pkts512to1023octets", 2022 .getter = mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get, 2023 }, 2024 { 2025 .str = "ether_pkts1024to1518octets", 2026 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get, 2027 }, 2028 { 2029 .str = "ether_pkts1519to2047octets", 2030 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get, 2031 }, 2032 { 2033 .str = "ether_pkts2048to4095octets", 2034 .getter = mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get, 2035 }, 2036 { 2037 .str = "ether_pkts4096to8191octets", 2038 .getter = mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get, 2039 }, 2040 { 2041 .str = "ether_pkts8192to10239octets", 2042 .getter = mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get, 2043 }, 2044 }; 2045 2046 #define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \ 2047 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats) 2048 2049 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_3635_stats[] = { 2050 { 2051 .str = "dot3stats_fcs_errors", 2052 .getter = mlxsw_reg_ppcnt_dot3stats_fcs_errors_get, 2053 }, 2054 { 2055 .str = "dot3stats_symbol_errors", 2056 .getter = mlxsw_reg_ppcnt_dot3stats_symbol_errors_get, 2057 }, 2058 { 2059 .str = "dot3control_in_unknown_opcodes", 2060 .getter = mlxsw_reg_ppcnt_dot3control_in_unknown_opcodes_get, 2061 }, 2062 { 2063 .str = "dot3in_pause_frames", 2064 .getter = mlxsw_reg_ppcnt_dot3in_pause_frames_get, 2065 }, 2066 }; 2067 2068 #define MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN \ 2069 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_3635_stats) 2070 2071 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_discard_stats[] = { 2072 { 2073 .str = "discard_ingress_general", 2074 .getter = mlxsw_reg_ppcnt_ingress_general_get, 2075 }, 2076 { 2077 .str = "discard_ingress_policy_engine", 2078 .getter = mlxsw_reg_ppcnt_ingress_policy_engine_get, 2079 }, 2080 { 2081 .str = "discard_ingress_vlan_membership", 2082 .getter = mlxsw_reg_ppcnt_ingress_vlan_membership_get, 2083 }, 2084 { 2085 .str = "discard_ingress_tag_frame_type", 2086 .getter = mlxsw_reg_ppcnt_ingress_tag_frame_type_get, 2087 }, 2088 { 2089 .str = "discard_egress_vlan_membership", 2090 .getter = mlxsw_reg_ppcnt_egress_vlan_membership_get, 2091 }, 2092 { 2093 .str = "discard_loopback_filter", 2094 .getter = mlxsw_reg_ppcnt_loopback_filter_get, 2095 }, 2096 { 2097 .str = "discard_egress_general", 2098 .getter = mlxsw_reg_ppcnt_egress_general_get, 2099 }, 2100 { 2101 .str = "discard_egress_hoq", 2102 .getter = mlxsw_reg_ppcnt_egress_hoq_get, 2103 }, 2104 { 2105 .str = "discard_egress_policy_engine", 2106 .getter = mlxsw_reg_ppcnt_egress_policy_engine_get, 2107 }, 2108 { 2109 .str = "discard_ingress_tx_link_down", 2110 .getter = mlxsw_reg_ppcnt_ingress_tx_link_down_get, 2111 }, 2112 { 2113 .str = "discard_egress_stp_filter", 2114 .getter = mlxsw_reg_ppcnt_egress_stp_filter_get, 2115 }, 2116 { 2117 .str = "discard_egress_sll", 2118 .getter = mlxsw_reg_ppcnt_egress_sll_get, 2119 }, 2120 }; 2121 2122 #define MLXSW_SP_PORT_HW_DISCARD_STATS_LEN \ 2123 ARRAY_SIZE(mlxsw_sp_port_hw_discard_stats) 2124 2125 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { 2126 { 2127 .str = "rx_octets_prio", 2128 .getter = mlxsw_reg_ppcnt_rx_octets_get, 2129 }, 2130 { 2131 .str = "rx_frames_prio", 2132 .getter = mlxsw_reg_ppcnt_rx_frames_get, 2133 }, 2134 { 2135 .str = "tx_octets_prio", 2136 .getter = mlxsw_reg_ppcnt_tx_octets_get, 2137 }, 2138 { 2139 .str = "tx_frames_prio", 2140 .getter = mlxsw_reg_ppcnt_tx_frames_get, 2141 }, 2142 { 2143 .str = "rx_pause_prio", 2144 .getter = mlxsw_reg_ppcnt_rx_pause_get, 2145 }, 2146 { 2147 .str = "rx_pause_duration_prio", 2148 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get, 2149 }, 2150 { 2151 .str = "tx_pause_prio", 2152 .getter = mlxsw_reg_ppcnt_tx_pause_get, 2153 }, 2154 { 2155 .str = "tx_pause_duration_prio", 2156 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get, 2157 }, 2158 }; 2159 2160 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) 2161 2162 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { 2163 { 2164 .str = "tc_transmit_queue_tc", 2165 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get, 2166 .cells_bytes = true, 2167 }, 2168 { 2169 .str = "tc_no_buffer_discard_uc_tc", 2170 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get, 2171 }, 2172 }; 2173 2174 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) 2175 2176 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ 2177 MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN + \ 2178 MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \ 2179 MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN + \ 2180 MLXSW_SP_PORT_HW_DISCARD_STATS_LEN + \ 2181 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \ 2182 IEEE_8021QAZ_MAX_TCS) + \ 2183 (MLXSW_SP_PORT_HW_TC_STATS_LEN * \ 2184 TC_MAX_QUEUE)) 2185 2186 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) 2187 { 2188 int i; 2189 2190 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { 2191 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", 2192 mlxsw_sp_port_hw_prio_stats[i].str, prio); 2193 *p += ETH_GSTRING_LEN; 2194 } 2195 } 2196 2197 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) 2198 { 2199 int i; 2200 2201 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { 2202 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", 2203 mlxsw_sp_port_hw_tc_stats[i].str, tc); 2204 *p += ETH_GSTRING_LEN; 2205 } 2206 } 2207 2208 static void mlxsw_sp_port_get_strings(struct net_device *dev, 2209 u32 stringset, u8 *data) 2210 { 2211 u8 *p = data; 2212 int i; 2213 2214 switch (stringset) { 2215 case ETH_SS_STATS: 2216 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 2217 memcpy(p, mlxsw_sp_port_hw_stats[i].str, 2218 ETH_GSTRING_LEN); 2219 p += ETH_GSTRING_LEN; 2220 } 2221 2222 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; i++) { 2223 memcpy(p, mlxsw_sp_port_hw_rfc_2863_stats[i].str, 2224 ETH_GSTRING_LEN); 2225 p += ETH_GSTRING_LEN; 2226 } 2227 2228 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) { 2229 memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str, 2230 ETH_GSTRING_LEN); 2231 p += ETH_GSTRING_LEN; 2232 } 2233 2234 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; i++) { 2235 memcpy(p, mlxsw_sp_port_hw_rfc_3635_stats[i].str, 2236 ETH_GSTRING_LEN); 2237 p += ETH_GSTRING_LEN; 2238 } 2239 2240 for (i = 0; i < MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; i++) { 2241 memcpy(p, mlxsw_sp_port_hw_discard_stats[i].str, 2242 ETH_GSTRING_LEN); 2243 p += ETH_GSTRING_LEN; 2244 } 2245 2246 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 2247 mlxsw_sp_port_get_prio_strings(&p, i); 2248 2249 for (i = 0; i < TC_MAX_QUEUE; i++) 2250 mlxsw_sp_port_get_tc_strings(&p, i); 2251 2252 break; 2253 } 2254 } 2255 2256 static int mlxsw_sp_port_set_phys_id(struct net_device *dev, 2257 enum ethtool_phys_id_state state) 2258 { 2259 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2260 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2261 char mlcr_pl[MLXSW_REG_MLCR_LEN]; 2262 bool active; 2263 2264 switch (state) { 2265 case ETHTOOL_ID_ACTIVE: 2266 active = true; 2267 break; 2268 case ETHTOOL_ID_INACTIVE: 2269 active = false; 2270 break; 2271 default: 2272 return -EOPNOTSUPP; 2273 } 2274 2275 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); 2276 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); 2277 } 2278 2279 static int 2280 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, 2281 int *p_len, enum mlxsw_reg_ppcnt_grp grp) 2282 { 2283 switch (grp) { 2284 case MLXSW_REG_PPCNT_IEEE_8023_CNT: 2285 *p_hw_stats = mlxsw_sp_port_hw_stats; 2286 *p_len = MLXSW_SP_PORT_HW_STATS_LEN; 2287 break; 2288 case MLXSW_REG_PPCNT_RFC_2863_CNT: 2289 *p_hw_stats = mlxsw_sp_port_hw_rfc_2863_stats; 2290 *p_len = MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2291 break; 2292 case MLXSW_REG_PPCNT_RFC_2819_CNT: 2293 *p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats; 2294 *p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2295 break; 2296 case MLXSW_REG_PPCNT_RFC_3635_CNT: 2297 *p_hw_stats = mlxsw_sp_port_hw_rfc_3635_stats; 2298 *p_len = MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2299 break; 2300 case MLXSW_REG_PPCNT_DISCARD_CNT: 2301 *p_hw_stats = mlxsw_sp_port_hw_discard_stats; 2302 *p_len = MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2303 break; 2304 case MLXSW_REG_PPCNT_PRIO_CNT: 2305 *p_hw_stats = mlxsw_sp_port_hw_prio_stats; 2306 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2307 break; 2308 case MLXSW_REG_PPCNT_TC_CNT: 2309 *p_hw_stats = mlxsw_sp_port_hw_tc_stats; 2310 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN; 2311 break; 2312 default: 2313 WARN_ON(1); 2314 return -EOPNOTSUPP; 2315 } 2316 return 0; 2317 } 2318 2319 static void __mlxsw_sp_port_get_stats(struct net_device *dev, 2320 enum mlxsw_reg_ppcnt_grp grp, int prio, 2321 u64 *data, int data_index) 2322 { 2323 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2324 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2325 struct mlxsw_sp_port_hw_stats *hw_stats; 2326 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 2327 int i, len; 2328 int err; 2329 2330 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); 2331 if (err) 2332 return; 2333 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl); 2334 for (i = 0; i < len; i++) { 2335 data[data_index + i] = hw_stats[i].getter(ppcnt_pl); 2336 if (!hw_stats[i].cells_bytes) 2337 continue; 2338 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp, 2339 data[data_index + i]); 2340 } 2341 } 2342 2343 static void mlxsw_sp_port_get_stats(struct net_device *dev, 2344 struct ethtool_stats *stats, u64 *data) 2345 { 2346 int i, data_index = 0; 2347 2348 /* IEEE 802.3 Counters */ 2349 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0, 2350 data, data_index); 2351 data_index = MLXSW_SP_PORT_HW_STATS_LEN; 2352 2353 /* RFC 2863 Counters */ 2354 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2863_CNT, 0, 2355 data, data_index); 2356 data_index += MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2357 2358 /* RFC 2819 Counters */ 2359 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0, 2360 data, data_index); 2361 data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2362 2363 /* RFC 3635 Counters */ 2364 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_3635_CNT, 0, 2365 data, data_index); 2366 data_index += MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2367 2368 /* Discard Counters */ 2369 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_DISCARD_CNT, 0, 2370 data, data_index); 2371 data_index += MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2372 2373 /* Per-Priority Counters */ 2374 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2375 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, 2376 data, data_index); 2377 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2378 } 2379 2380 /* Per-TC Counters */ 2381 for (i = 0; i < TC_MAX_QUEUE; i++) { 2382 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, 2383 data, data_index); 2384 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; 2385 } 2386 } 2387 2388 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 2389 { 2390 switch (sset) { 2391 case ETH_SS_STATS: 2392 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN; 2393 default: 2394 return -EOPNOTSUPP; 2395 } 2396 } 2397 2398 struct mlxsw_sp1_port_link_mode { 2399 enum ethtool_link_mode_bit_indices mask_ethtool; 2400 u32 mask; 2401 u32 speed; 2402 }; 2403 2404 static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = { 2405 { 2406 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 2407 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2408 .speed = SPEED_100, 2409 }, 2410 { 2411 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 2412 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 2413 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2414 .speed = SPEED_1000, 2415 }, 2416 { 2417 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 2418 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2419 .speed = SPEED_10000, 2420 }, 2421 { 2422 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 2423 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 2424 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 2425 .speed = SPEED_10000, 2426 }, 2427 { 2428 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2429 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2430 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2431 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 2432 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2433 .speed = SPEED_10000, 2434 }, 2435 { 2436 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 2437 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 2438 .speed = SPEED_20000, 2439 }, 2440 { 2441 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 2442 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2443 .speed = SPEED_40000, 2444 }, 2445 { 2446 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 2447 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2448 .speed = SPEED_40000, 2449 }, 2450 { 2451 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 2452 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2453 .speed = SPEED_40000, 2454 }, 2455 { 2456 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 2457 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2458 .speed = SPEED_40000, 2459 }, 2460 { 2461 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR, 2462 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2463 .speed = SPEED_25000, 2464 }, 2465 { 2466 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR, 2467 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2468 .speed = SPEED_25000, 2469 }, 2470 { 2471 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 2472 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2473 .speed = SPEED_25000, 2474 }, 2475 { 2476 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2, 2477 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2478 .speed = SPEED_50000, 2479 }, 2480 { 2481 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 2482 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2483 .speed = SPEED_50000, 2484 }, 2485 { 2486 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2, 2487 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2488 .speed = SPEED_50000, 2489 }, 2490 { 2491 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2492 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT, 2493 .speed = SPEED_56000, 2494 }, 2495 { 2496 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2497 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT, 2498 .speed = SPEED_56000, 2499 }, 2500 { 2501 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2502 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT, 2503 .speed = SPEED_56000, 2504 }, 2505 { 2506 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2507 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, 2508 .speed = SPEED_56000, 2509 }, 2510 { 2511 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4, 2512 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2513 .speed = SPEED_100000, 2514 }, 2515 { 2516 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4, 2517 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2518 .speed = SPEED_100000, 2519 }, 2520 { 2521 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4, 2522 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2523 .speed = SPEED_100000, 2524 }, 2525 { 2526 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 2527 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2528 .speed = SPEED_100000, 2529 }, 2530 }; 2531 2532 #define MLXSW_SP1_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp1_port_link_mode) 2533 2534 static void 2535 mlxsw_sp1_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, 2536 u32 ptys_eth_proto, 2537 struct ethtool_link_ksettings *cmd) 2538 { 2539 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2540 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2541 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2542 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2543 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2544 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2545 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 2546 2547 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2548 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2549 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2550 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 2551 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 2552 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 2553 } 2554 2555 static void 2556 mlxsw_sp1_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, 2557 unsigned long *mode) 2558 { 2559 int i; 2560 2561 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2562 if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) 2563 __set_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, 2564 mode); 2565 } 2566 } 2567 2568 static void 2569 mlxsw_sp1_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, 2570 u32 ptys_eth_proto, 2571 struct ethtool_link_ksettings *cmd) 2572 { 2573 u32 speed = SPEED_UNKNOWN; 2574 u8 duplex = DUPLEX_UNKNOWN; 2575 int i; 2576 2577 if (!carrier_ok) 2578 goto out; 2579 2580 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2581 if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) { 2582 speed = mlxsw_sp1_port_link_mode[i].speed; 2583 duplex = DUPLEX_FULL; 2584 break; 2585 } 2586 } 2587 out: 2588 cmd->base.speed = speed; 2589 cmd->base.duplex = duplex; 2590 } 2591 2592 static u32 2593 mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, 2594 const struct ethtool_link_ksettings *cmd) 2595 { 2596 u32 ptys_proto = 0; 2597 int i; 2598 2599 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2600 if (test_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, 2601 cmd->link_modes.advertising)) 2602 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2603 } 2604 return ptys_proto; 2605 } 2606 2607 static u32 mlxsw_sp1_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 speed) 2608 { 2609 u32 ptys_proto = 0; 2610 int i; 2611 2612 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2613 if (speed == mlxsw_sp1_port_link_mode[i].speed) 2614 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2615 } 2616 return ptys_proto; 2617 } 2618 2619 static u32 2620 mlxsw_sp1_to_ptys_upper_speed(struct mlxsw_sp *mlxsw_sp, u32 upper_speed) 2621 { 2622 u32 ptys_proto = 0; 2623 int i; 2624 2625 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2626 if (mlxsw_sp1_port_link_mode[i].speed <= upper_speed) 2627 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2628 } 2629 return ptys_proto; 2630 } 2631 2632 static int 2633 mlxsw_sp1_port_speed_base(struct mlxsw_sp *mlxsw_sp, u8 local_port, 2634 u32 *base_speed) 2635 { 2636 *base_speed = MLXSW_SP_PORT_BASE_SPEED_25G; 2637 return 0; 2638 } 2639 2640 static void 2641 mlxsw_sp1_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, 2642 u8 local_port, u32 proto_admin, bool autoneg) 2643 { 2644 mlxsw_reg_ptys_eth_pack(payload, local_port, proto_admin, autoneg); 2645 } 2646 2647 static void 2648 mlxsw_sp1_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, 2649 u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, 2650 u32 *p_eth_proto_oper) 2651 { 2652 mlxsw_reg_ptys_eth_unpack(payload, p_eth_proto_cap, p_eth_proto_admin, 2653 p_eth_proto_oper); 2654 } 2655 2656 static const struct mlxsw_sp_port_type_speed_ops 2657 mlxsw_sp1_port_type_speed_ops = { 2658 .from_ptys_supported_port = mlxsw_sp1_from_ptys_supported_port, 2659 .from_ptys_link = mlxsw_sp1_from_ptys_link, 2660 .from_ptys_speed_duplex = mlxsw_sp1_from_ptys_speed_duplex, 2661 .to_ptys_advert_link = mlxsw_sp1_to_ptys_advert_link, 2662 .to_ptys_speed = mlxsw_sp1_to_ptys_speed, 2663 .to_ptys_upper_speed = mlxsw_sp1_to_ptys_upper_speed, 2664 .port_speed_base = mlxsw_sp1_port_speed_base, 2665 .reg_ptys_eth_pack = mlxsw_sp1_reg_ptys_eth_pack, 2666 .reg_ptys_eth_unpack = mlxsw_sp1_reg_ptys_eth_unpack, 2667 }; 2668 2669 static const enum ethtool_link_mode_bit_indices 2670 mlxsw_sp2_mask_ethtool_sgmii_100m[] = { 2671 ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2672 }; 2673 2674 #define MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN \ 2675 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_sgmii_100m) 2676 2677 static const enum ethtool_link_mode_bit_indices 2678 mlxsw_sp2_mask_ethtool_1000base_x_sgmii[] = { 2679 ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 2680 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2681 }; 2682 2683 #define MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN \ 2684 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_1000base_x_sgmii) 2685 2686 static const enum ethtool_link_mode_bit_indices 2687 mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii[] = { 2688 ETHTOOL_LINK_MODE_2500baseX_Full_BIT, 2689 }; 2690 2691 #define MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN \ 2692 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii) 2693 2694 static const enum ethtool_link_mode_bit_indices 2695 mlxsw_sp2_mask_ethtool_5gbase_r[] = { 2696 ETHTOOL_LINK_MODE_5000baseT_Full_BIT, 2697 }; 2698 2699 #define MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN \ 2700 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_5gbase_r) 2701 2702 static const enum ethtool_link_mode_bit_indices 2703 mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g[] = { 2704 ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2705 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2706 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, 2707 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, 2708 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 2709 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 2710 ETHTOOL_LINK_MODE_10000baseER_Full_BIT, 2711 }; 2712 2713 #define MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN \ 2714 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g) 2715 2716 static const enum ethtool_link_mode_bit_indices 2717 mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g[] = { 2718 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2719 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2720 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2721 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2722 }; 2723 2724 #define MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN \ 2725 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g) 2726 2727 static const enum ethtool_link_mode_bit_indices 2728 mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr[] = { 2729 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2730 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2731 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2732 }; 2733 2734 #define MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN \ 2735 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr) 2736 2737 static const enum ethtool_link_mode_bit_indices 2738 mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2[] = { 2739 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2740 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2741 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2742 }; 2743 2744 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN \ 2745 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2) 2746 2747 static const enum ethtool_link_mode_bit_indices 2748 mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr[] = { 2749 ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, 2750 ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, 2751 ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, 2752 ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, 2753 ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, 2754 }; 2755 2756 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN \ 2757 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr) 2758 2759 static const enum ethtool_link_mode_bit_indices 2760 mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4[] = { 2761 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2762 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2763 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2764 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2765 }; 2766 2767 #define MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN \ 2768 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4) 2769 2770 static const enum ethtool_link_mode_bit_indices 2771 mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2[] = { 2772 ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, 2773 ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, 2774 ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, 2775 ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, 2776 ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, 2777 }; 2778 2779 #define MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN \ 2780 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2) 2781 2782 static const enum ethtool_link_mode_bit_indices 2783 mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = { 2784 ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, 2785 ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, 2786 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, 2787 ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT, 2788 ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, 2789 }; 2790 2791 #define MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN \ 2792 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4) 2793 2794 struct mlxsw_sp2_port_link_mode { 2795 const enum ethtool_link_mode_bit_indices *mask_ethtool; 2796 int m_ethtool_len; 2797 u32 mask; 2798 u32 speed; 2799 }; 2800 2801 static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = { 2802 { 2803 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_SGMII_100M, 2804 .mask_ethtool = mlxsw_sp2_mask_ethtool_sgmii_100m, 2805 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN, 2806 .speed = SPEED_100, 2807 }, 2808 { 2809 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_1000BASE_X_SGMII, 2810 .mask_ethtool = mlxsw_sp2_mask_ethtool_1000base_x_sgmii, 2811 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN, 2812 .speed = SPEED_1000, 2813 }, 2814 { 2815 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_2_5GBASE_X_2_5GMII, 2816 .mask_ethtool = mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii, 2817 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN, 2818 .speed = SPEED_2500, 2819 }, 2820 { 2821 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R, 2822 .mask_ethtool = mlxsw_sp2_mask_ethtool_5gbase_r, 2823 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN, 2824 .speed = SPEED_5000, 2825 }, 2826 { 2827 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XFI_XAUI_1_10G, 2828 .mask_ethtool = mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g, 2829 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN, 2830 .speed = SPEED_10000, 2831 }, 2832 { 2833 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G, 2834 .mask_ethtool = mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g, 2835 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN, 2836 .speed = SPEED_40000, 2837 }, 2838 { 2839 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR, 2840 .mask_ethtool = mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr, 2841 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN, 2842 .speed = SPEED_25000, 2843 }, 2844 { 2845 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_2_LAUI_2_50GBASE_CR2_KR2, 2846 .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2, 2847 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN, 2848 .speed = SPEED_50000, 2849 }, 2850 { 2851 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR, 2852 .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr, 2853 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN, 2854 .speed = SPEED_50000, 2855 }, 2856 { 2857 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4, 2858 .mask_ethtool = mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4, 2859 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN, 2860 .speed = SPEED_100000, 2861 }, 2862 { 2863 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2, 2864 .mask_ethtool = mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2, 2865 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN, 2866 .speed = SPEED_100000, 2867 }, 2868 { 2869 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4, 2870 .mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4, 2871 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN, 2872 .speed = SPEED_200000, 2873 }, 2874 }; 2875 2876 #define MLXSW_SP2_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp2_port_link_mode) 2877 2878 static void 2879 mlxsw_sp2_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, 2880 u32 ptys_eth_proto, 2881 struct ethtool_link_ksettings *cmd) 2882 { 2883 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 2884 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 2885 } 2886 2887 static void 2888 mlxsw_sp2_set_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, 2889 unsigned long *mode) 2890 { 2891 int i; 2892 2893 for (i = 0; i < link_mode->m_ethtool_len; i++) 2894 __set_bit(link_mode->mask_ethtool[i], mode); 2895 } 2896 2897 static void 2898 mlxsw_sp2_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, 2899 unsigned long *mode) 2900 { 2901 int i; 2902 2903 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 2904 if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) 2905 mlxsw_sp2_set_bit_ethtool(&mlxsw_sp2_port_link_mode[i], 2906 mode); 2907 } 2908 } 2909 2910 static void 2911 mlxsw_sp2_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, 2912 u32 ptys_eth_proto, 2913 struct ethtool_link_ksettings *cmd) 2914 { 2915 u32 speed = SPEED_UNKNOWN; 2916 u8 duplex = DUPLEX_UNKNOWN; 2917 int i; 2918 2919 if (!carrier_ok) 2920 goto out; 2921 2922 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 2923 if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) { 2924 speed = mlxsw_sp2_port_link_mode[i].speed; 2925 duplex = DUPLEX_FULL; 2926 break; 2927 } 2928 } 2929 out: 2930 cmd->base.speed = speed; 2931 cmd->base.duplex = duplex; 2932 } 2933 2934 static bool 2935 mlxsw_sp2_test_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, 2936 const unsigned long *mode) 2937 { 2938 int cnt = 0; 2939 int i; 2940 2941 for (i = 0; i < link_mode->m_ethtool_len; i++) { 2942 if (test_bit(link_mode->mask_ethtool[i], mode)) 2943 cnt++; 2944 } 2945 2946 return cnt == link_mode->m_ethtool_len; 2947 } 2948 2949 static u32 2950 mlxsw_sp2_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, 2951 const struct ethtool_link_ksettings *cmd) 2952 { 2953 u32 ptys_proto = 0; 2954 int i; 2955 2956 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 2957 if (mlxsw_sp2_test_bit_ethtool(&mlxsw_sp2_port_link_mode[i], 2958 cmd->link_modes.advertising)) 2959 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 2960 } 2961 return ptys_proto; 2962 } 2963 2964 static u32 mlxsw_sp2_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 speed) 2965 { 2966 u32 ptys_proto = 0; 2967 int i; 2968 2969 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 2970 if (speed == mlxsw_sp2_port_link_mode[i].speed) 2971 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 2972 } 2973 return ptys_proto; 2974 } 2975 2976 static u32 2977 mlxsw_sp2_to_ptys_upper_speed(struct mlxsw_sp *mlxsw_sp, u32 upper_speed) 2978 { 2979 u32 ptys_proto = 0; 2980 int i; 2981 2982 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 2983 if (mlxsw_sp2_port_link_mode[i].speed <= upper_speed) 2984 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 2985 } 2986 return ptys_proto; 2987 } 2988 2989 static int 2990 mlxsw_sp2_port_speed_base(struct mlxsw_sp *mlxsw_sp, u8 local_port, 2991 u32 *base_speed) 2992 { 2993 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2994 u32 eth_proto_cap; 2995 int err; 2996 2997 /* In Spectrum-2, the speed of 1x can change from port to port, so query 2998 * it from firmware. 2999 */ 3000 mlxsw_reg_ptys_ext_eth_pack(ptys_pl, local_port, 0, false); 3001 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3002 if (err) 3003 return err; 3004 mlxsw_reg_ptys_ext_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL); 3005 3006 if (eth_proto_cap & 3007 MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR) { 3008 *base_speed = MLXSW_SP_PORT_BASE_SPEED_50G; 3009 return 0; 3010 } 3011 3012 if (eth_proto_cap & 3013 MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR) { 3014 *base_speed = MLXSW_SP_PORT_BASE_SPEED_25G; 3015 return 0; 3016 } 3017 3018 return -EIO; 3019 } 3020 3021 static void 3022 mlxsw_sp2_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, 3023 u8 local_port, u32 proto_admin, 3024 bool autoneg) 3025 { 3026 mlxsw_reg_ptys_ext_eth_pack(payload, local_port, proto_admin, autoneg); 3027 } 3028 3029 static void 3030 mlxsw_sp2_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, 3031 u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, 3032 u32 *p_eth_proto_oper) 3033 { 3034 mlxsw_reg_ptys_ext_eth_unpack(payload, p_eth_proto_cap, 3035 p_eth_proto_admin, p_eth_proto_oper); 3036 } 3037 3038 static const struct mlxsw_sp_port_type_speed_ops 3039 mlxsw_sp2_port_type_speed_ops = { 3040 .from_ptys_supported_port = mlxsw_sp2_from_ptys_supported_port, 3041 .from_ptys_link = mlxsw_sp2_from_ptys_link, 3042 .from_ptys_speed_duplex = mlxsw_sp2_from_ptys_speed_duplex, 3043 .to_ptys_advert_link = mlxsw_sp2_to_ptys_advert_link, 3044 .to_ptys_speed = mlxsw_sp2_to_ptys_speed, 3045 .to_ptys_upper_speed = mlxsw_sp2_to_ptys_upper_speed, 3046 .port_speed_base = mlxsw_sp2_port_speed_base, 3047 .reg_ptys_eth_pack = mlxsw_sp2_reg_ptys_eth_pack, 3048 .reg_ptys_eth_unpack = mlxsw_sp2_reg_ptys_eth_unpack, 3049 }; 3050 3051 static void 3052 mlxsw_sp_port_get_link_supported(struct mlxsw_sp *mlxsw_sp, u32 eth_proto_cap, 3053 struct ethtool_link_ksettings *cmd) 3054 { 3055 const struct mlxsw_sp_port_type_speed_ops *ops; 3056 3057 ops = mlxsw_sp->port_type_speed_ops; 3058 3059 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); 3060 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); 3061 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); 3062 3063 ops->from_ptys_supported_port(mlxsw_sp, eth_proto_cap, cmd); 3064 ops->from_ptys_link(mlxsw_sp, eth_proto_cap, cmd->link_modes.supported); 3065 } 3066 3067 static void 3068 mlxsw_sp_port_get_link_advertise(struct mlxsw_sp *mlxsw_sp, 3069 u32 eth_proto_admin, bool autoneg, 3070 struct ethtool_link_ksettings *cmd) 3071 { 3072 const struct mlxsw_sp_port_type_speed_ops *ops; 3073 3074 ops = mlxsw_sp->port_type_speed_ops; 3075 3076 if (!autoneg) 3077 return; 3078 3079 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); 3080 ops->from_ptys_link(mlxsw_sp, eth_proto_admin, 3081 cmd->link_modes.advertising); 3082 } 3083 3084 static u8 3085 mlxsw_sp_port_connector_port(enum mlxsw_reg_ptys_connector_type connector_type) 3086 { 3087 switch (connector_type) { 3088 case MLXSW_REG_PTYS_CONNECTOR_TYPE_UNKNOWN_OR_NO_CONNECTOR: 3089 return PORT_OTHER; 3090 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_NONE: 3091 return PORT_NONE; 3092 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_TP: 3093 return PORT_TP; 3094 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_AUI: 3095 return PORT_AUI; 3096 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_BNC: 3097 return PORT_BNC; 3098 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_MII: 3099 return PORT_MII; 3100 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_FIBRE: 3101 return PORT_FIBRE; 3102 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_DA: 3103 return PORT_DA; 3104 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_OTHER: 3105 return PORT_OTHER; 3106 default: 3107 WARN_ON_ONCE(1); 3108 return PORT_OTHER; 3109 } 3110 } 3111 3112 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev, 3113 struct ethtool_link_ksettings *cmd) 3114 { 3115 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; 3116 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 3117 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3118 const struct mlxsw_sp_port_type_speed_ops *ops; 3119 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3120 u8 connector_type; 3121 bool autoneg; 3122 int err; 3123 3124 ops = mlxsw_sp->port_type_speed_ops; 3125 3126 autoneg = mlxsw_sp_port->link.autoneg; 3127 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3128 0, false); 3129 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3130 if (err) 3131 return err; 3132 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, 3133 ð_proto_admin, ð_proto_oper); 3134 3135 mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap, cmd); 3136 3137 mlxsw_sp_port_get_link_advertise(mlxsw_sp, eth_proto_admin, autoneg, 3138 cmd); 3139 3140 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 3141 connector_type = mlxsw_reg_ptys_connector_type_get(ptys_pl); 3142 cmd->base.port = mlxsw_sp_port_connector_port(connector_type); 3143 ops->from_ptys_speed_duplex(mlxsw_sp, netif_carrier_ok(dev), 3144 eth_proto_oper, cmd); 3145 3146 return 0; 3147 } 3148 3149 static int 3150 mlxsw_sp_port_set_link_ksettings(struct net_device *dev, 3151 const struct ethtool_link_ksettings *cmd) 3152 { 3153 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 3154 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3155 const struct mlxsw_sp_port_type_speed_ops *ops; 3156 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3157 u32 eth_proto_cap, eth_proto_new; 3158 bool autoneg; 3159 int err; 3160 3161 ops = mlxsw_sp->port_type_speed_ops; 3162 3163 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3164 0, false); 3165 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3166 if (err) 3167 return err; 3168 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, NULL, NULL); 3169 3170 autoneg = cmd->base.autoneg == AUTONEG_ENABLE; 3171 if (!autoneg && cmd->base.speed == SPEED_56000) { 3172 netdev_err(dev, "56G not supported with autoneg off\n"); 3173 return -EINVAL; 3174 } 3175 eth_proto_new = autoneg ? 3176 ops->to_ptys_advert_link(mlxsw_sp, cmd) : 3177 ops->to_ptys_speed(mlxsw_sp, cmd->base.speed); 3178 3179 eth_proto_new = eth_proto_new & eth_proto_cap; 3180 if (!eth_proto_new) { 3181 netdev_err(dev, "No supported speed requested\n"); 3182 return -EINVAL; 3183 } 3184 3185 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3186 eth_proto_new, autoneg); 3187 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3188 if (err) 3189 return err; 3190 3191 mlxsw_sp_port->link.autoneg = autoneg; 3192 3193 if (!netif_running(dev)) 3194 return 0; 3195 3196 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3197 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 3198 3199 return 0; 3200 } 3201 3202 static int mlxsw_sp_get_module_info(struct net_device *netdev, 3203 struct ethtool_modinfo *modinfo) 3204 { 3205 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3206 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3207 int err; 3208 3209 err = mlxsw_env_get_module_info(mlxsw_sp->core, 3210 mlxsw_sp_port->mapping.module, 3211 modinfo); 3212 3213 return err; 3214 } 3215 3216 static int mlxsw_sp_get_module_eeprom(struct net_device *netdev, 3217 struct ethtool_eeprom *ee, 3218 u8 *data) 3219 { 3220 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3221 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3222 int err; 3223 3224 err = mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core, 3225 mlxsw_sp_port->mapping.module, ee, 3226 data); 3227 3228 return err; 3229 } 3230 3231 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 3232 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 3233 .get_link = ethtool_op_get_link, 3234 .get_pauseparam = mlxsw_sp_port_get_pauseparam, 3235 .set_pauseparam = mlxsw_sp_port_set_pauseparam, 3236 .get_strings = mlxsw_sp_port_get_strings, 3237 .set_phys_id = mlxsw_sp_port_set_phys_id, 3238 .get_ethtool_stats = mlxsw_sp_port_get_stats, 3239 .get_sset_count = mlxsw_sp_port_get_sset_count, 3240 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings, 3241 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings, 3242 .get_module_info = mlxsw_sp_get_module_info, 3243 .get_module_eeprom = mlxsw_sp_get_module_eeprom, 3244 }; 3245 3246 static int 3247 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width) 3248 { 3249 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3250 const struct mlxsw_sp_port_type_speed_ops *ops; 3251 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3252 u32 eth_proto_admin; 3253 u32 upper_speed; 3254 u32 base_speed; 3255 int err; 3256 3257 ops = mlxsw_sp->port_type_speed_ops; 3258 3259 err = ops->port_speed_base(mlxsw_sp, mlxsw_sp_port->local_port, 3260 &base_speed); 3261 if (err) 3262 return err; 3263 upper_speed = base_speed * width; 3264 3265 eth_proto_admin = ops->to_ptys_upper_speed(mlxsw_sp, upper_speed); 3266 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3267 eth_proto_admin, mlxsw_sp_port->link.autoneg); 3268 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3269 } 3270 3271 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 3272 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 3273 bool dwrr, u8 dwrr_weight) 3274 { 3275 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3276 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3277 3278 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3279 next_index); 3280 mlxsw_reg_qeec_de_set(qeec_pl, true); 3281 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 3282 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 3283 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3284 } 3285 3286 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 3287 enum mlxsw_reg_qeec_hr hr, u8 index, 3288 u8 next_index, u32 maxrate) 3289 { 3290 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3291 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3292 3293 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3294 next_index); 3295 mlxsw_reg_qeec_mase_set(qeec_pl, true); 3296 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 3297 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3298 } 3299 3300 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port, 3301 enum mlxsw_reg_qeec_hr hr, u8 index, 3302 u8 next_index, u32 minrate) 3303 { 3304 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3305 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3306 3307 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3308 next_index); 3309 mlxsw_reg_qeec_mise_set(qeec_pl, true); 3310 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate); 3311 3312 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3313 } 3314 3315 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 3316 u8 switch_prio, u8 tclass) 3317 { 3318 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3319 char qtct_pl[MLXSW_REG_QTCT_LEN]; 3320 3321 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 3322 tclass); 3323 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 3324 } 3325 3326 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 3327 { 3328 int err, i; 3329 3330 /* Setup the elements hierarcy, so that each TC is linked to 3331 * one subgroup, which are all member in the same group. 3332 */ 3333 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3334 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false, 3335 0); 3336 if (err) 3337 return err; 3338 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3339 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3340 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i, 3341 0, false, 0); 3342 if (err) 3343 return err; 3344 } 3345 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3346 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3347 MLXSW_REG_QEEC_HIERARCY_TC, i, i, 3348 false, 0); 3349 if (err) 3350 return err; 3351 3352 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3353 MLXSW_REG_QEEC_HIERARCY_TC, 3354 i + 8, i, 3355 true, 100); 3356 if (err) 3357 return err; 3358 } 3359 3360 /* Make sure the max shaper is disabled in all hierarchies that 3361 * support it. 3362 */ 3363 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3364 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0, 3365 MLXSW_REG_QEEC_MAS_DIS); 3366 if (err) 3367 return err; 3368 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3369 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3370 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, 3371 i, 0, 3372 MLXSW_REG_QEEC_MAS_DIS); 3373 if (err) 3374 return err; 3375 } 3376 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3377 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3378 MLXSW_REG_QEEC_HIERARCY_TC, 3379 i, i, 3380 MLXSW_REG_QEEC_MAS_DIS); 3381 if (err) 3382 return err; 3383 3384 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3385 MLXSW_REG_QEEC_HIERARCY_TC, 3386 i + 8, i, 3387 MLXSW_REG_QEEC_MAS_DIS); 3388 if (err) 3389 return err; 3390 } 3391 3392 /* Configure the min shaper for multicast TCs. */ 3393 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3394 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port, 3395 MLXSW_REG_QEEC_HIERARCY_TC, 3396 i + 8, i, 3397 MLXSW_REG_QEEC_MIS_MIN); 3398 if (err) 3399 return err; 3400 } 3401 3402 /* Map all priorities to traffic class 0. */ 3403 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3404 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 3405 if (err) 3406 return err; 3407 } 3408 3409 return 0; 3410 } 3411 3412 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 3413 bool enable) 3414 { 3415 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3416 char qtctm_pl[MLXSW_REG_QTCTM_LEN]; 3417 3418 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable); 3419 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl); 3420 } 3421 3422 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 3423 bool split, u8 module, u8 width, u8 lane) 3424 { 3425 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 3426 struct mlxsw_sp_port *mlxsw_sp_port; 3427 struct net_device *dev; 3428 int err; 3429 3430 err = mlxsw_core_port_init(mlxsw_sp->core, local_port, 3431 module + 1, split, lane / width, 3432 mlxsw_sp->base_mac, 3433 sizeof(mlxsw_sp->base_mac)); 3434 if (err) { 3435 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 3436 local_port); 3437 return err; 3438 } 3439 3440 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 3441 if (!dev) { 3442 err = -ENOMEM; 3443 goto err_alloc_etherdev; 3444 } 3445 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 3446 mlxsw_sp_port = netdev_priv(dev); 3447 mlxsw_sp_port->dev = dev; 3448 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 3449 mlxsw_sp_port->local_port = local_port; 3450 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID; 3451 mlxsw_sp_port->split = split; 3452 mlxsw_sp_port->mapping.module = module; 3453 mlxsw_sp_port->mapping.width = width; 3454 mlxsw_sp_port->mapping.lane = lane; 3455 mlxsw_sp_port->link.autoneg = 1; 3456 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 3457 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list); 3458 3459 mlxsw_sp_port->pcpu_stats = 3460 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 3461 if (!mlxsw_sp_port->pcpu_stats) { 3462 err = -ENOMEM; 3463 goto err_alloc_stats; 3464 } 3465 3466 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample), 3467 GFP_KERNEL); 3468 if (!mlxsw_sp_port->sample) { 3469 err = -ENOMEM; 3470 goto err_alloc_sample; 3471 } 3472 3473 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 3474 &update_stats_cache); 3475 3476 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 3477 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 3478 3479 err = mlxsw_sp_port_module_map(mlxsw_sp_port, module, width, lane); 3480 if (err) { 3481 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 3482 mlxsw_sp_port->local_port); 3483 goto err_port_module_map; 3484 } 3485 3486 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 3487 if (err) { 3488 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 3489 mlxsw_sp_port->local_port); 3490 goto err_port_swid_set; 3491 } 3492 3493 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 3494 if (err) { 3495 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 3496 mlxsw_sp_port->local_port); 3497 goto err_dev_addr_init; 3498 } 3499 3500 netif_carrier_off(dev); 3501 3502 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 3503 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 3504 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK; 3505 3506 dev->min_mtu = 0; 3507 dev->max_mtu = ETH_MAX_MTU; 3508 3509 /* Each packet needs to have a Tx header (metadata) on top all other 3510 * headers. 3511 */ 3512 dev->needed_headroom = MLXSW_TXHDR_LEN; 3513 3514 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 3515 if (err) { 3516 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 3517 mlxsw_sp_port->local_port); 3518 goto err_port_system_port_mapping_set; 3519 } 3520 3521 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width); 3522 if (err) { 3523 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 3524 mlxsw_sp_port->local_port); 3525 goto err_port_speed_by_width_set; 3526 } 3527 3528 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 3529 if (err) { 3530 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 3531 mlxsw_sp_port->local_port); 3532 goto err_port_mtu_set; 3533 } 3534 3535 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3536 if (err) 3537 goto err_port_admin_status_set; 3538 3539 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 3540 if (err) { 3541 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 3542 mlxsw_sp_port->local_port); 3543 goto err_port_buffers_init; 3544 } 3545 3546 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 3547 if (err) { 3548 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 3549 mlxsw_sp_port->local_port); 3550 goto err_port_ets_init; 3551 } 3552 3553 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true); 3554 if (err) { 3555 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n", 3556 mlxsw_sp_port->local_port); 3557 goto err_port_tc_mc_mode; 3558 } 3559 3560 /* ETS and buffers must be initialized before DCB. */ 3561 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 3562 if (err) { 3563 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 3564 mlxsw_sp_port->local_port); 3565 goto err_port_dcb_init; 3566 } 3567 3568 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 3569 if (err) { 3570 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 3571 mlxsw_sp_port->local_port); 3572 goto err_port_fids_init; 3573 } 3574 3575 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); 3576 if (err) { 3577 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", 3578 mlxsw_sp_port->local_port); 3579 goto err_port_qdiscs_init; 3580 } 3581 3582 err = mlxsw_sp_port_nve_init(mlxsw_sp_port); 3583 if (err) { 3584 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n", 3585 mlxsw_sp_port->local_port); 3586 goto err_port_nve_init; 3587 } 3588 3589 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 3590 if (err) { 3591 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n", 3592 mlxsw_sp_port->local_port); 3593 goto err_port_pvid_set; 3594 } 3595 3596 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, 3597 MLXSW_SP_DEFAULT_VID); 3598 if (IS_ERR(mlxsw_sp_port_vlan)) { 3599 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 3600 mlxsw_sp_port->local_port); 3601 err = PTR_ERR(mlxsw_sp_port_vlan); 3602 goto err_port_vlan_create; 3603 } 3604 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan; 3605 3606 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 3607 err = register_netdev(dev); 3608 if (err) { 3609 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 3610 mlxsw_sp_port->local_port); 3611 goto err_register_netdev; 3612 } 3613 3614 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 3615 mlxsw_sp_port, dev); 3616 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 3617 return 0; 3618 3619 err_register_netdev: 3620 mlxsw_sp->ports[local_port] = NULL; 3621 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 3622 err_port_vlan_create: 3623 err_port_pvid_set: 3624 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3625 err_port_nve_init: 3626 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3627 err_port_qdiscs_init: 3628 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3629 err_port_fids_init: 3630 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3631 err_port_dcb_init: 3632 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3633 err_port_tc_mc_mode: 3634 err_port_ets_init: 3635 err_port_buffers_init: 3636 err_port_admin_status_set: 3637 err_port_mtu_set: 3638 err_port_speed_by_width_set: 3639 err_port_system_port_mapping_set: 3640 err_dev_addr_init: 3641 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3642 err_port_swid_set: 3643 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3644 err_port_module_map: 3645 kfree(mlxsw_sp_port->sample); 3646 err_alloc_sample: 3647 free_percpu(mlxsw_sp_port->pcpu_stats); 3648 err_alloc_stats: 3649 free_netdev(dev); 3650 err_alloc_etherdev: 3651 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3652 return err; 3653 } 3654 3655 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3656 { 3657 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3658 3659 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 3660 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 3661 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 3662 mlxsw_sp->ports[local_port] = NULL; 3663 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true); 3664 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3665 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3666 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3667 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3668 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3669 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3670 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3671 kfree(mlxsw_sp_port->sample); 3672 free_percpu(mlxsw_sp_port->pcpu_stats); 3673 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 3674 free_netdev(mlxsw_sp_port->dev); 3675 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3676 } 3677 3678 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3679 { 3680 return mlxsw_sp->ports[local_port] != NULL; 3681 } 3682 3683 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 3684 { 3685 int i; 3686 3687 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 3688 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3689 mlxsw_sp_port_remove(mlxsw_sp, i); 3690 kfree(mlxsw_sp->port_to_module); 3691 kfree(mlxsw_sp->ports); 3692 } 3693 3694 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 3695 { 3696 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 3697 u8 module, width, lane; 3698 size_t alloc_size; 3699 int i; 3700 int err; 3701 3702 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 3703 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 3704 if (!mlxsw_sp->ports) 3705 return -ENOMEM; 3706 3707 mlxsw_sp->port_to_module = kmalloc_array(max_ports, sizeof(int), 3708 GFP_KERNEL); 3709 if (!mlxsw_sp->port_to_module) { 3710 err = -ENOMEM; 3711 goto err_port_to_module_alloc; 3712 } 3713 3714 for (i = 1; i < max_ports; i++) { 3715 /* Mark as invalid */ 3716 mlxsw_sp->port_to_module[i] = -1; 3717 3718 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, 3719 &width, &lane); 3720 if (err) 3721 goto err_port_module_info_get; 3722 if (!width) 3723 continue; 3724 mlxsw_sp->port_to_module[i] = module; 3725 err = mlxsw_sp_port_create(mlxsw_sp, i, false, 3726 module, width, lane); 3727 if (err) 3728 goto err_port_create; 3729 } 3730 return 0; 3731 3732 err_port_create: 3733 err_port_module_info_get: 3734 for (i--; i >= 1; i--) 3735 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3736 mlxsw_sp_port_remove(mlxsw_sp, i); 3737 kfree(mlxsw_sp->port_to_module); 3738 err_port_to_module_alloc: 3739 kfree(mlxsw_sp->ports); 3740 return err; 3741 } 3742 3743 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port) 3744 { 3745 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX; 3746 3747 return local_port - offset; 3748 } 3749 3750 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 3751 u8 module, unsigned int count, u8 offset) 3752 { 3753 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; 3754 int err, i; 3755 3756 for (i = 0; i < count; i++) { 3757 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset, 3758 true, module, width, i * width); 3759 if (err) 3760 goto err_port_create; 3761 } 3762 3763 return 0; 3764 3765 err_port_create: 3766 for (i--; i >= 0; i--) 3767 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 3768 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 3769 return err; 3770 } 3771 3772 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 3773 u8 base_port, unsigned int count) 3774 { 3775 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH; 3776 int i; 3777 3778 /* Split by four means we need to re-create two ports, otherwise 3779 * only one. 3780 */ 3781 count = count / 2; 3782 3783 for (i = 0; i < count; i++) { 3784 local_port = base_port + i * 2; 3785 if (mlxsw_sp->port_to_module[local_port] < 0) 3786 continue; 3787 module = mlxsw_sp->port_to_module[local_port]; 3788 3789 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module, 3790 width, 0); 3791 } 3792 } 3793 3794 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 3795 unsigned int count, 3796 struct netlink_ext_ack *extack) 3797 { 3798 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3799 u8 local_ports_in_1x, local_ports_in_2x, offset; 3800 struct mlxsw_sp_port *mlxsw_sp_port; 3801 u8 module, cur_width, base_port; 3802 int i; 3803 int err; 3804 3805 if (!MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_1X) || 3806 !MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_2X)) 3807 return -EIO; 3808 3809 local_ports_in_1x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_1X); 3810 local_ports_in_2x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_2X); 3811 3812 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3813 if (!mlxsw_sp_port) { 3814 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 3815 local_port); 3816 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 3817 return -EINVAL; 3818 } 3819 3820 module = mlxsw_sp_port->mapping.module; 3821 cur_width = mlxsw_sp_port->mapping.width; 3822 3823 if (count != 2 && count != 4) { 3824 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); 3825 NL_SET_ERR_MSG_MOD(extack, "Port can only be split into 2 or 4 ports"); 3826 return -EINVAL; 3827 } 3828 3829 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { 3830 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); 3831 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further"); 3832 return -EINVAL; 3833 } 3834 3835 /* Make sure we have enough slave (even) ports for the split. */ 3836 if (count == 2) { 3837 offset = local_ports_in_2x; 3838 base_port = local_port; 3839 if (mlxsw_sp->ports[base_port + local_ports_in_2x]) { 3840 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 3841 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 3842 return -EINVAL; 3843 } 3844 } else { 3845 offset = local_ports_in_1x; 3846 base_port = mlxsw_sp_cluster_base_port_get(local_port); 3847 if (mlxsw_sp->ports[base_port + 1] || 3848 mlxsw_sp->ports[base_port + 3]) { 3849 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 3850 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 3851 return -EINVAL; 3852 } 3853 } 3854 3855 for (i = 0; i < count; i++) 3856 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 3857 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 3858 3859 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count, 3860 offset); 3861 if (err) { 3862 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 3863 goto err_port_split_create; 3864 } 3865 3866 return 0; 3867 3868 err_port_split_create: 3869 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 3870 return err; 3871 } 3872 3873 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, 3874 struct netlink_ext_ack *extack) 3875 { 3876 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3877 u8 local_ports_in_1x, local_ports_in_2x, offset; 3878 struct mlxsw_sp_port *mlxsw_sp_port; 3879 u8 cur_width, base_port; 3880 unsigned int count; 3881 int i; 3882 3883 if (!MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_1X) || 3884 !MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_2X)) 3885 return -EIO; 3886 3887 local_ports_in_1x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_1X); 3888 local_ports_in_2x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_2X); 3889 3890 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3891 if (!mlxsw_sp_port) { 3892 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 3893 local_port); 3894 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 3895 return -EINVAL; 3896 } 3897 3898 if (!mlxsw_sp_port->split) { 3899 netdev_err(mlxsw_sp_port->dev, "Port was not split\n"); 3900 NL_SET_ERR_MSG_MOD(extack, "Port was not split"); 3901 return -EINVAL; 3902 } 3903 3904 cur_width = mlxsw_sp_port->mapping.width; 3905 count = cur_width == 1 ? 4 : 2; 3906 3907 if (count == 2) 3908 offset = local_ports_in_2x; 3909 else 3910 offset = local_ports_in_1x; 3911 3912 base_port = mlxsw_sp_cluster_base_port_get(local_port); 3913 3914 /* Determine which ports to remove. */ 3915 if (count == 2 && local_port >= base_port + 2) 3916 base_port = base_port + 2; 3917 3918 for (i = 0; i < count; i++) 3919 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 3920 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 3921 3922 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 3923 3924 return 0; 3925 } 3926 3927 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 3928 char *pude_pl, void *priv) 3929 { 3930 struct mlxsw_sp *mlxsw_sp = priv; 3931 struct mlxsw_sp_port *mlxsw_sp_port; 3932 enum mlxsw_reg_pude_oper_status status; 3933 u8 local_port; 3934 3935 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 3936 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3937 if (!mlxsw_sp_port) 3938 return; 3939 3940 status = mlxsw_reg_pude_oper_status_get(pude_pl); 3941 if (status == MLXSW_PORT_OPER_STATUS_UP) { 3942 netdev_info(mlxsw_sp_port->dev, "link up\n"); 3943 netif_carrier_on(mlxsw_sp_port->dev); 3944 } else { 3945 netdev_info(mlxsw_sp_port->dev, "link down\n"); 3946 netif_carrier_off(mlxsw_sp_port->dev); 3947 } 3948 } 3949 3950 static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 3951 u8 local_port, void *priv) 3952 { 3953 struct mlxsw_sp *mlxsw_sp = priv; 3954 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3955 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 3956 3957 if (unlikely(!mlxsw_sp_port)) { 3958 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 3959 local_port); 3960 return; 3961 } 3962 3963 skb->dev = mlxsw_sp_port->dev; 3964 3965 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 3966 u64_stats_update_begin(&pcpu_stats->syncp); 3967 pcpu_stats->rx_packets++; 3968 pcpu_stats->rx_bytes += skb->len; 3969 u64_stats_update_end(&pcpu_stats->syncp); 3970 3971 skb->protocol = eth_type_trans(skb, skb->dev); 3972 netif_receive_skb(skb); 3973 } 3974 3975 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, 3976 void *priv) 3977 { 3978 skb->offload_fwd_mark = 1; 3979 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 3980 } 3981 3982 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, 3983 u8 local_port, void *priv) 3984 { 3985 skb->offload_l3_fwd_mark = 1; 3986 skb->offload_fwd_mark = 1; 3987 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 3988 } 3989 3990 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port, 3991 void *priv) 3992 { 3993 struct mlxsw_sp *mlxsw_sp = priv; 3994 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3995 struct psample_group *psample_group; 3996 u32 size; 3997 3998 if (unlikely(!mlxsw_sp_port)) { 3999 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n", 4000 local_port); 4001 goto out; 4002 } 4003 if (unlikely(!mlxsw_sp_port->sample)) { 4004 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n", 4005 local_port); 4006 goto out; 4007 } 4008 4009 size = mlxsw_sp_port->sample->truncate ? 4010 mlxsw_sp_port->sample->trunc_size : skb->len; 4011 4012 rcu_read_lock(); 4013 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group); 4014 if (!psample_group) 4015 goto out_unlock; 4016 psample_sample_packet(psample_group, skb, size, 4017 mlxsw_sp_port->dev->ifindex, 0, 4018 mlxsw_sp_port->sample->rate); 4019 out_unlock: 4020 rcu_read_unlock(); 4021 out: 4022 consume_skb(skb); 4023 } 4024 4025 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4026 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 4027 _is_ctrl, SP_##_trap_group, DISCARD) 4028 4029 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4030 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 4031 _is_ctrl, SP_##_trap_group, DISCARD) 4032 4033 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4034 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \ 4035 _is_ctrl, SP_##_trap_group, DISCARD) 4036 4037 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 4038 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 4039 4040 static const struct mlxsw_listener mlxsw_sp_listener[] = { 4041 /* Events */ 4042 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 4043 /* L2 traps */ 4044 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true), 4045 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true), 4046 MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true), 4047 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false), 4048 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false), 4049 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false), 4050 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false), 4051 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false), 4052 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false), 4053 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false), 4054 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false), 4055 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false), 4056 MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD, 4057 false), 4058 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 4059 false), 4060 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD, 4061 false), 4062 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 4063 false), 4064 /* L3 traps */ 4065 MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false), 4066 MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false), 4067 MLXSW_SP_RXL_L3_MARK(LBERROR, MIRROR_TO_CPU, LBERROR, false), 4068 MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), 4069 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 4070 false), 4071 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false), 4072 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 4073 MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false), 4074 MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP, 4075 false), 4076 MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false), 4077 MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false), 4078 MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false), 4079 MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), 4080 MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false), 4081 MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false), 4082 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 4083 false), 4084 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 4085 false), 4086 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 4087 false), 4088 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 4089 false), 4090 MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false), 4091 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 4092 false), 4093 MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, HOST_MISS, false), 4094 MLXSW_SP_RXL_MARK(HOST_MISS_IPV6, TRAP_TO_CPU, HOST_MISS, false), 4095 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), 4096 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), 4097 MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false), 4098 MLXSW_SP_RXL_MARK(DECAP_ECN0, TRAP_TO_CPU, ROUTER_EXP, false), 4099 MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), 4100 MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), 4101 /* PKT Sample trap */ 4102 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, 4103 false, SP_IP2ME, DISCARD), 4104 /* ACL trap */ 4105 MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false), 4106 /* Multicast Router Traps */ 4107 MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false), 4108 MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false), 4109 MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false), 4110 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 4111 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 4112 /* NVE traps */ 4113 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false), 4114 MLXSW_SP_RXL_NO_MARK(NVE_DECAP_ARP, TRAP_TO_CPU, ARP, false), 4115 }; 4116 4117 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 4118 { 4119 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 4120 enum mlxsw_reg_qpcr_ir_units ir_units; 4121 int max_cpu_policers; 4122 bool is_bytes; 4123 u8 burst_size; 4124 u32 rate; 4125 int i, err; 4126 4127 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 4128 return -EIO; 4129 4130 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 4131 4132 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 4133 for (i = 0; i < max_cpu_policers; i++) { 4134 is_bytes = false; 4135 switch (i) { 4136 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 4137 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 4138 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 4139 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 4140 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 4141 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 4142 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: 4143 rate = 128; 4144 burst_size = 7; 4145 break; 4146 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 4147 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 4148 rate = 16 * 1024; 4149 burst_size = 10; 4150 break; 4151 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 4152 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 4153 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 4154 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 4155 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 4156 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 4157 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 4158 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 4159 rate = 1024; 4160 burst_size = 7; 4161 break; 4162 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 4163 rate = 1024; 4164 burst_size = 7; 4165 break; 4166 default: 4167 continue; 4168 } 4169 4170 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 4171 burst_size); 4172 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 4173 if (err) 4174 return err; 4175 } 4176 4177 return 0; 4178 } 4179 4180 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 4181 { 4182 char htgt_pl[MLXSW_REG_HTGT_LEN]; 4183 enum mlxsw_reg_htgt_trap_group i; 4184 int max_cpu_policers; 4185 int max_trap_groups; 4186 u8 priority, tc; 4187 u16 policer_id; 4188 int err; 4189 4190 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 4191 return -EIO; 4192 4193 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 4194 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 4195 4196 for (i = 0; i < max_trap_groups; i++) { 4197 policer_id = i; 4198 switch (i) { 4199 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 4200 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 4201 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 4202 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 4203 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 4204 priority = 5; 4205 tc = 5; 4206 break; 4207 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 4208 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 4209 priority = 4; 4210 tc = 4; 4211 break; 4212 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 4213 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 4214 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 4215 priority = 3; 4216 tc = 3; 4217 break; 4218 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 4219 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 4220 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 4221 priority = 2; 4222 tc = 2; 4223 break; 4224 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 4225 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 4226 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 4227 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 4228 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: 4229 priority = 1; 4230 tc = 1; 4231 break; 4232 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 4233 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 4234 tc = MLXSW_REG_HTGT_DEFAULT_TC; 4235 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 4236 break; 4237 default: 4238 continue; 4239 } 4240 4241 if (max_cpu_policers <= policer_id && 4242 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 4243 return -EIO; 4244 4245 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 4246 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 4247 if (err) 4248 return err; 4249 } 4250 4251 return 0; 4252 } 4253 4254 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 4255 { 4256 int i; 4257 int err; 4258 4259 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 4260 if (err) 4261 return err; 4262 4263 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 4264 if (err) 4265 return err; 4266 4267 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) { 4268 err = mlxsw_core_trap_register(mlxsw_sp->core, 4269 &mlxsw_sp_listener[i], 4270 mlxsw_sp); 4271 if (err) 4272 goto err_listener_register; 4273 4274 } 4275 return 0; 4276 4277 err_listener_register: 4278 for (i--; i >= 0; i--) { 4279 mlxsw_core_trap_unregister(mlxsw_sp->core, 4280 &mlxsw_sp_listener[i], 4281 mlxsw_sp); 4282 } 4283 return err; 4284 } 4285 4286 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 4287 { 4288 int i; 4289 4290 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) { 4291 mlxsw_core_trap_unregister(mlxsw_sp->core, 4292 &mlxsw_sp_listener[i], 4293 mlxsw_sp); 4294 } 4295 } 4296 4297 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe 4298 4299 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 4300 { 4301 char slcr_pl[MLXSW_REG_SLCR_LEN]; 4302 u32 seed; 4303 int err; 4304 4305 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 4306 MLXSW_SP_LAG_SEED_INIT); 4307 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 4308 MLXSW_REG_SLCR_LAG_HASH_DMAC | 4309 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 4310 MLXSW_REG_SLCR_LAG_HASH_VLANID | 4311 MLXSW_REG_SLCR_LAG_HASH_SIP | 4312 MLXSW_REG_SLCR_LAG_HASH_DIP | 4313 MLXSW_REG_SLCR_LAG_HASH_SPORT | 4314 MLXSW_REG_SLCR_LAG_HASH_DPORT | 4315 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed); 4316 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 4317 if (err) 4318 return err; 4319 4320 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 4321 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 4322 return -EIO; 4323 4324 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 4325 sizeof(struct mlxsw_sp_upper), 4326 GFP_KERNEL); 4327 if (!mlxsw_sp->lags) 4328 return -ENOMEM; 4329 4330 return 0; 4331 } 4332 4333 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 4334 { 4335 kfree(mlxsw_sp->lags); 4336 } 4337 4338 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) 4339 { 4340 char htgt_pl[MLXSW_REG_HTGT_LEN]; 4341 4342 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 4343 MLXSW_REG_HTGT_INVALID_POLICER, 4344 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 4345 MLXSW_REG_HTGT_DEFAULT_TC); 4346 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 4347 } 4348 4349 struct mlxsw_sp_ptp_ops { 4350 struct mlxsw_sp_ptp_clock * 4351 (*clock_init)(struct mlxsw_sp *mlxsw_sp, struct device *dev); 4352 void (*clock_fini)(struct mlxsw_sp_ptp_clock *clock); 4353 }; 4354 4355 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = { 4356 .clock_init = mlxsw_sp1_ptp_clock_init, 4357 .clock_fini = mlxsw_sp1_ptp_clock_fini, 4358 }; 4359 4360 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { 4361 .clock_init = mlxsw_sp2_ptp_clock_init, 4362 .clock_fini = mlxsw_sp2_ptp_clock_fini, 4363 }; 4364 4365 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 4366 unsigned long event, void *ptr); 4367 4368 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 4369 const struct mlxsw_bus_info *mlxsw_bus_info) 4370 { 4371 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4372 int err; 4373 4374 mlxsw_sp->core = mlxsw_core; 4375 mlxsw_sp->bus_info = mlxsw_bus_info; 4376 4377 err = mlxsw_sp_fw_rev_validate(mlxsw_sp); 4378 if (err) 4379 return err; 4380 4381 err = mlxsw_sp_base_mac_get(mlxsw_sp); 4382 if (err) { 4383 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 4384 return err; 4385 } 4386 4387 err = mlxsw_sp_kvdl_init(mlxsw_sp); 4388 if (err) { 4389 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 4390 return err; 4391 } 4392 4393 err = mlxsw_sp_fids_init(mlxsw_sp); 4394 if (err) { 4395 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 4396 goto err_fids_init; 4397 } 4398 4399 err = mlxsw_sp_traps_init(mlxsw_sp); 4400 if (err) { 4401 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 4402 goto err_traps_init; 4403 } 4404 4405 err = mlxsw_sp_buffers_init(mlxsw_sp); 4406 if (err) { 4407 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 4408 goto err_buffers_init; 4409 } 4410 4411 err = mlxsw_sp_lag_init(mlxsw_sp); 4412 if (err) { 4413 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 4414 goto err_lag_init; 4415 } 4416 4417 /* Initialize SPAN before router and switchdev, so that those components 4418 * can call mlxsw_sp_span_respin(). 4419 */ 4420 err = mlxsw_sp_span_init(mlxsw_sp); 4421 if (err) { 4422 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 4423 goto err_span_init; 4424 } 4425 4426 err = mlxsw_sp_switchdev_init(mlxsw_sp); 4427 if (err) { 4428 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 4429 goto err_switchdev_init; 4430 } 4431 4432 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 4433 if (err) { 4434 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 4435 goto err_counter_pool_init; 4436 } 4437 4438 err = mlxsw_sp_afa_init(mlxsw_sp); 4439 if (err) { 4440 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 4441 goto err_afa_init; 4442 } 4443 4444 err = mlxsw_sp_nve_init(mlxsw_sp); 4445 if (err) { 4446 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n"); 4447 goto err_nve_init; 4448 } 4449 4450 err = mlxsw_sp_acl_init(mlxsw_sp); 4451 if (err) { 4452 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 4453 goto err_acl_init; 4454 } 4455 4456 err = mlxsw_sp_router_init(mlxsw_sp); 4457 if (err) { 4458 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 4459 goto err_router_init; 4460 } 4461 4462 if (mlxsw_sp->bus_info->read_frc_capable) { 4463 /* NULL is a valid return value from clock_init */ 4464 mlxsw_sp->clock = 4465 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp, 4466 mlxsw_sp->bus_info->dev); 4467 if (IS_ERR(mlxsw_sp->clock)) { 4468 err = PTR_ERR(mlxsw_sp->clock); 4469 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n"); 4470 goto err_ptp_clock_init; 4471 } 4472 } 4473 4474 /* Initialize netdevice notifier after router and SPAN is initialized, 4475 * so that the event handler can use router structures and call SPAN 4476 * respin. 4477 */ 4478 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 4479 err = register_netdevice_notifier(&mlxsw_sp->netdevice_nb); 4480 if (err) { 4481 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 4482 goto err_netdev_notifier; 4483 } 4484 4485 err = mlxsw_sp_dpipe_init(mlxsw_sp); 4486 if (err) { 4487 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 4488 goto err_dpipe_init; 4489 } 4490 4491 err = mlxsw_sp_ports_create(mlxsw_sp); 4492 if (err) { 4493 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 4494 goto err_ports_create; 4495 } 4496 4497 return 0; 4498 4499 err_ports_create: 4500 mlxsw_sp_dpipe_fini(mlxsw_sp); 4501 err_dpipe_init: 4502 unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); 4503 err_netdev_notifier: 4504 if (mlxsw_sp->clock) 4505 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 4506 err_ptp_clock_init: 4507 mlxsw_sp_router_fini(mlxsw_sp); 4508 err_router_init: 4509 mlxsw_sp_acl_fini(mlxsw_sp); 4510 err_acl_init: 4511 mlxsw_sp_nve_fini(mlxsw_sp); 4512 err_nve_init: 4513 mlxsw_sp_afa_fini(mlxsw_sp); 4514 err_afa_init: 4515 mlxsw_sp_counter_pool_fini(mlxsw_sp); 4516 err_counter_pool_init: 4517 mlxsw_sp_switchdev_fini(mlxsw_sp); 4518 err_switchdev_init: 4519 mlxsw_sp_span_fini(mlxsw_sp); 4520 err_span_init: 4521 mlxsw_sp_lag_fini(mlxsw_sp); 4522 err_lag_init: 4523 mlxsw_sp_buffers_fini(mlxsw_sp); 4524 err_buffers_init: 4525 mlxsw_sp_traps_fini(mlxsw_sp); 4526 err_traps_init: 4527 mlxsw_sp_fids_fini(mlxsw_sp); 4528 err_fids_init: 4529 mlxsw_sp_kvdl_fini(mlxsw_sp); 4530 return err; 4531 } 4532 4533 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, 4534 const struct mlxsw_bus_info *mlxsw_bus_info) 4535 { 4536 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4537 4538 mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev; 4539 mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME; 4540 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; 4541 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; 4542 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; 4543 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; 4544 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; 4545 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; 4546 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; 4547 mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr; 4548 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals; 4549 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops; 4550 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops; 4551 4552 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info); 4553 } 4554 4555 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, 4556 const struct mlxsw_bus_info *mlxsw_bus_info) 4557 { 4558 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4559 4560 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 4561 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 4562 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 4563 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 4564 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 4565 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 4566 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 4567 mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; 4568 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 4569 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 4570 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 4571 4572 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info); 4573 } 4574 4575 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 4576 { 4577 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4578 4579 mlxsw_sp_ports_remove(mlxsw_sp); 4580 mlxsw_sp_dpipe_fini(mlxsw_sp); 4581 unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); 4582 if (mlxsw_sp->clock) 4583 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 4584 mlxsw_sp_router_fini(mlxsw_sp); 4585 mlxsw_sp_acl_fini(mlxsw_sp); 4586 mlxsw_sp_nve_fini(mlxsw_sp); 4587 mlxsw_sp_afa_fini(mlxsw_sp); 4588 mlxsw_sp_counter_pool_fini(mlxsw_sp); 4589 mlxsw_sp_switchdev_fini(mlxsw_sp); 4590 mlxsw_sp_span_fini(mlxsw_sp); 4591 mlxsw_sp_lag_fini(mlxsw_sp); 4592 mlxsw_sp_buffers_fini(mlxsw_sp); 4593 mlxsw_sp_traps_fini(mlxsw_sp); 4594 mlxsw_sp_fids_fini(mlxsw_sp); 4595 mlxsw_sp_kvdl_fini(mlxsw_sp); 4596 } 4597 4598 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated 4599 * 802.1Q FIDs 4600 */ 4601 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \ 4602 VLAN_VID_MASK - 1) 4603 4604 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { 4605 .used_max_mid = 1, 4606 .max_mid = MLXSW_SP_MID_MAX, 4607 .used_flood_tables = 1, 4608 .used_flood_mode = 1, 4609 .flood_mode = 3, 4610 .max_fid_flood_tables = 3, 4611 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 4612 .used_max_ib_mc = 1, 4613 .max_ib_mc = 0, 4614 .used_max_pkey = 1, 4615 .max_pkey = 0, 4616 .used_kvd_sizes = 1, 4617 .kvd_hash_single_parts = 59, 4618 .kvd_hash_double_parts = 41, 4619 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 4620 .swid_config = { 4621 { 4622 .used_type = 1, 4623 .type = MLXSW_PORT_SWID_TYPE_ETH, 4624 } 4625 }, 4626 }; 4627 4628 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { 4629 .used_max_mid = 1, 4630 .max_mid = MLXSW_SP_MID_MAX, 4631 .used_flood_tables = 1, 4632 .used_flood_mode = 1, 4633 .flood_mode = 3, 4634 .max_fid_flood_tables = 3, 4635 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 4636 .used_max_ib_mc = 1, 4637 .max_ib_mc = 0, 4638 .used_max_pkey = 1, 4639 .max_pkey = 0, 4640 .swid_config = { 4641 { 4642 .used_type = 1, 4643 .type = MLXSW_PORT_SWID_TYPE_ETH, 4644 } 4645 }, 4646 }; 4647 4648 static void 4649 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, 4650 struct devlink_resource_size_params *kvd_size_params, 4651 struct devlink_resource_size_params *linear_size_params, 4652 struct devlink_resource_size_params *hash_double_size_params, 4653 struct devlink_resource_size_params *hash_single_size_params) 4654 { 4655 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 4656 KVD_SINGLE_MIN_SIZE); 4657 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 4658 KVD_DOUBLE_MIN_SIZE); 4659 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 4660 u32 linear_size_min = 0; 4661 4662 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, 4663 MLXSW_SP_KVD_GRANULARITY, 4664 DEVLINK_RESOURCE_UNIT_ENTRY); 4665 devlink_resource_size_params_init(linear_size_params, linear_size_min, 4666 kvd_size - single_size_min - 4667 double_size_min, 4668 MLXSW_SP_KVD_GRANULARITY, 4669 DEVLINK_RESOURCE_UNIT_ENTRY); 4670 devlink_resource_size_params_init(hash_double_size_params, 4671 double_size_min, 4672 kvd_size - single_size_min - 4673 linear_size_min, 4674 MLXSW_SP_KVD_GRANULARITY, 4675 DEVLINK_RESOURCE_UNIT_ENTRY); 4676 devlink_resource_size_params_init(hash_single_size_params, 4677 single_size_min, 4678 kvd_size - double_size_min - 4679 linear_size_min, 4680 MLXSW_SP_KVD_GRANULARITY, 4681 DEVLINK_RESOURCE_UNIT_ENTRY); 4682 } 4683 4684 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) 4685 { 4686 struct devlink *devlink = priv_to_devlink(mlxsw_core); 4687 struct devlink_resource_size_params hash_single_size_params; 4688 struct devlink_resource_size_params hash_double_size_params; 4689 struct devlink_resource_size_params linear_size_params; 4690 struct devlink_resource_size_params kvd_size_params; 4691 u32 kvd_size, single_size, double_size, linear_size; 4692 const struct mlxsw_config_profile *profile; 4693 int err; 4694 4695 profile = &mlxsw_sp1_config_profile; 4696 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 4697 return -EIO; 4698 4699 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, 4700 &linear_size_params, 4701 &hash_double_size_params, 4702 &hash_single_size_params); 4703 4704 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 4705 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 4706 kvd_size, MLXSW_SP_RESOURCE_KVD, 4707 DEVLINK_RESOURCE_ID_PARENT_TOP, 4708 &kvd_size_params); 4709 if (err) 4710 return err; 4711 4712 linear_size = profile->kvd_linear_size; 4713 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, 4714 linear_size, 4715 MLXSW_SP_RESOURCE_KVD_LINEAR, 4716 MLXSW_SP_RESOURCE_KVD, 4717 &linear_size_params); 4718 if (err) 4719 return err; 4720 4721 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); 4722 if (err) 4723 return err; 4724 4725 double_size = kvd_size - linear_size; 4726 double_size *= profile->kvd_hash_double_parts; 4727 double_size /= profile->kvd_hash_double_parts + 4728 profile->kvd_hash_single_parts; 4729 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY); 4730 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, 4731 double_size, 4732 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 4733 MLXSW_SP_RESOURCE_KVD, 4734 &hash_double_size_params); 4735 if (err) 4736 return err; 4737 4738 single_size = kvd_size - double_size - linear_size; 4739 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, 4740 single_size, 4741 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 4742 MLXSW_SP_RESOURCE_KVD, 4743 &hash_single_size_params); 4744 if (err) 4745 return err; 4746 4747 return 0; 4748 } 4749 4750 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) 4751 { 4752 return mlxsw_sp1_resources_kvd_register(mlxsw_core); 4753 } 4754 4755 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) 4756 { 4757 return 0; 4758 } 4759 4760 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 4761 const struct mlxsw_config_profile *profile, 4762 u64 *p_single_size, u64 *p_double_size, 4763 u64 *p_linear_size) 4764 { 4765 struct devlink *devlink = priv_to_devlink(mlxsw_core); 4766 u32 double_size; 4767 int err; 4768 4769 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 4770 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) 4771 return -EIO; 4772 4773 /* The hash part is what left of the kvd without the 4774 * linear part. It is split to the single size and 4775 * double size by the parts ratio from the profile. 4776 * Both sizes must be a multiplications of the 4777 * granularity from the profile. In case the user 4778 * provided the sizes they are obtained via devlink. 4779 */ 4780 err = devlink_resource_size_get(devlink, 4781 MLXSW_SP_RESOURCE_KVD_LINEAR, 4782 p_linear_size); 4783 if (err) 4784 *p_linear_size = profile->kvd_linear_size; 4785 4786 err = devlink_resource_size_get(devlink, 4787 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 4788 p_double_size); 4789 if (err) { 4790 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 4791 *p_linear_size; 4792 double_size *= profile->kvd_hash_double_parts; 4793 double_size /= profile->kvd_hash_double_parts + 4794 profile->kvd_hash_single_parts; 4795 *p_double_size = rounddown(double_size, 4796 MLXSW_SP_KVD_GRANULARITY); 4797 } 4798 4799 err = devlink_resource_size_get(devlink, 4800 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 4801 p_single_size); 4802 if (err) 4803 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 4804 *p_double_size - *p_linear_size; 4805 4806 /* Check results are legal. */ 4807 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 4808 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || 4809 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size) 4810 return -EIO; 4811 4812 return 0; 4813 } 4814 4815 static int 4816 mlxsw_sp_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id, 4817 union devlink_param_value val, 4818 struct netlink_ext_ack *extack) 4819 { 4820 if ((val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER) && 4821 (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)) { 4822 NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'"); 4823 return -EINVAL; 4824 } 4825 4826 return 0; 4827 } 4828 4829 static const struct devlink_param mlxsw_sp_devlink_params[] = { 4830 DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY, 4831 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 4832 NULL, NULL, 4833 mlxsw_sp_devlink_param_fw_load_policy_validate), 4834 }; 4835 4836 static int mlxsw_sp_params_register(struct mlxsw_core *mlxsw_core) 4837 { 4838 struct devlink *devlink = priv_to_devlink(mlxsw_core); 4839 union devlink_param_value value; 4840 int err; 4841 4842 err = devlink_params_register(devlink, mlxsw_sp_devlink_params, 4843 ARRAY_SIZE(mlxsw_sp_devlink_params)); 4844 if (err) 4845 return err; 4846 4847 value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER; 4848 devlink_param_driverinit_value_set(devlink, 4849 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 4850 value); 4851 return 0; 4852 } 4853 4854 static void mlxsw_sp_params_unregister(struct mlxsw_core *mlxsw_core) 4855 { 4856 devlink_params_unregister(priv_to_devlink(mlxsw_core), 4857 mlxsw_sp_devlink_params, 4858 ARRAY_SIZE(mlxsw_sp_devlink_params)); 4859 } 4860 4861 static int 4862 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id, 4863 struct devlink_param_gset_ctx *ctx) 4864 { 4865 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 4866 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4867 4868 ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp); 4869 return 0; 4870 } 4871 4872 static int 4873 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id, 4874 struct devlink_param_gset_ctx *ctx) 4875 { 4876 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 4877 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4878 4879 return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32); 4880 } 4881 4882 static const struct devlink_param mlxsw_sp2_devlink_params[] = { 4883 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 4884 "acl_region_rehash_interval", 4885 DEVLINK_PARAM_TYPE_U32, 4886 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 4887 mlxsw_sp_params_acl_region_rehash_intrvl_get, 4888 mlxsw_sp_params_acl_region_rehash_intrvl_set, 4889 NULL), 4890 }; 4891 4892 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core) 4893 { 4894 struct devlink *devlink = priv_to_devlink(mlxsw_core); 4895 union devlink_param_value value; 4896 int err; 4897 4898 err = mlxsw_sp_params_register(mlxsw_core); 4899 if (err) 4900 return err; 4901 4902 err = devlink_params_register(devlink, mlxsw_sp2_devlink_params, 4903 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 4904 if (err) 4905 goto err_devlink_params_register; 4906 4907 value.vu32 = 0; 4908 devlink_param_driverinit_value_set(devlink, 4909 MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 4910 value); 4911 return 0; 4912 4913 err_devlink_params_register: 4914 mlxsw_sp_params_unregister(mlxsw_core); 4915 return err; 4916 } 4917 4918 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core) 4919 { 4920 devlink_params_unregister(priv_to_devlink(mlxsw_core), 4921 mlxsw_sp2_devlink_params, 4922 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 4923 mlxsw_sp_params_unregister(mlxsw_core); 4924 } 4925 4926 static struct mlxsw_driver mlxsw_sp1_driver = { 4927 .kind = mlxsw_sp1_driver_name, 4928 .priv_size = sizeof(struct mlxsw_sp), 4929 .init = mlxsw_sp1_init, 4930 .fini = mlxsw_sp_fini, 4931 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 4932 .port_split = mlxsw_sp_port_split, 4933 .port_unsplit = mlxsw_sp_port_unsplit, 4934 .sb_pool_get = mlxsw_sp_sb_pool_get, 4935 .sb_pool_set = mlxsw_sp_sb_pool_set, 4936 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 4937 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 4938 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 4939 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 4940 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 4941 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 4942 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 4943 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 4944 .flash_update = mlxsw_sp_flash_update, 4945 .txhdr_construct = mlxsw_sp_txhdr_construct, 4946 .resources_register = mlxsw_sp1_resources_register, 4947 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, 4948 .params_register = mlxsw_sp_params_register, 4949 .params_unregister = mlxsw_sp_params_unregister, 4950 .txhdr_len = MLXSW_TXHDR_LEN, 4951 .profile = &mlxsw_sp1_config_profile, 4952 .res_query_enabled = true, 4953 }; 4954 4955 static struct mlxsw_driver mlxsw_sp2_driver = { 4956 .kind = mlxsw_sp2_driver_name, 4957 .priv_size = sizeof(struct mlxsw_sp), 4958 .init = mlxsw_sp2_init, 4959 .fini = mlxsw_sp_fini, 4960 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 4961 .port_split = mlxsw_sp_port_split, 4962 .port_unsplit = mlxsw_sp_port_unsplit, 4963 .sb_pool_get = mlxsw_sp_sb_pool_get, 4964 .sb_pool_set = mlxsw_sp_sb_pool_set, 4965 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 4966 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 4967 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 4968 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 4969 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 4970 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 4971 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 4972 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 4973 .flash_update = mlxsw_sp_flash_update, 4974 .txhdr_construct = mlxsw_sp_txhdr_construct, 4975 .resources_register = mlxsw_sp2_resources_register, 4976 .params_register = mlxsw_sp2_params_register, 4977 .params_unregister = mlxsw_sp2_params_unregister, 4978 .txhdr_len = MLXSW_TXHDR_LEN, 4979 .profile = &mlxsw_sp2_config_profile, 4980 .res_query_enabled = true, 4981 }; 4982 4983 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 4984 { 4985 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 4986 } 4987 4988 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data) 4989 { 4990 struct mlxsw_sp_port **p_mlxsw_sp_port = data; 4991 int ret = 0; 4992 4993 if (mlxsw_sp_port_dev_check(lower_dev)) { 4994 *p_mlxsw_sp_port = netdev_priv(lower_dev); 4995 ret = 1; 4996 } 4997 4998 return ret; 4999 } 5000 5001 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 5002 { 5003 struct mlxsw_sp_port *mlxsw_sp_port; 5004 5005 if (mlxsw_sp_port_dev_check(dev)) 5006 return netdev_priv(dev); 5007 5008 mlxsw_sp_port = NULL; 5009 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port); 5010 5011 return mlxsw_sp_port; 5012 } 5013 5014 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 5015 { 5016 struct mlxsw_sp_port *mlxsw_sp_port; 5017 5018 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 5019 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 5020 } 5021 5022 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 5023 { 5024 struct mlxsw_sp_port *mlxsw_sp_port; 5025 5026 if (mlxsw_sp_port_dev_check(dev)) 5027 return netdev_priv(dev); 5028 5029 mlxsw_sp_port = NULL; 5030 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 5031 &mlxsw_sp_port); 5032 5033 return mlxsw_sp_port; 5034 } 5035 5036 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 5037 { 5038 struct mlxsw_sp_port *mlxsw_sp_port; 5039 5040 rcu_read_lock(); 5041 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 5042 if (mlxsw_sp_port) 5043 dev_hold(mlxsw_sp_port->dev); 5044 rcu_read_unlock(); 5045 return mlxsw_sp_port; 5046 } 5047 5048 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 5049 { 5050 dev_put(mlxsw_sp_port->dev); 5051 } 5052 5053 static void 5054 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port, 5055 struct net_device *lag_dev) 5056 { 5057 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev); 5058 struct net_device *upper_dev; 5059 struct list_head *iter; 5060 5061 if (netif_is_bridge_port(lag_dev)) 5062 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev); 5063 5064 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 5065 if (!netif_is_bridge_port(upper_dev)) 5066 continue; 5067 br_dev = netdev_master_upper_dev_get(upper_dev); 5068 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev); 5069 } 5070 } 5071 5072 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 5073 { 5074 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5075 5076 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 5077 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5078 } 5079 5080 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 5081 { 5082 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5083 5084 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 5085 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5086 } 5087 5088 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 5089 u16 lag_id, u8 port_index) 5090 { 5091 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5092 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5093 5094 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 5095 lag_id, port_index); 5096 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5097 } 5098 5099 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 5100 u16 lag_id) 5101 { 5102 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5103 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5104 5105 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 5106 lag_id); 5107 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5108 } 5109 5110 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 5111 u16 lag_id) 5112 { 5113 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5114 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5115 5116 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 5117 lag_id); 5118 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5119 } 5120 5121 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 5122 u16 lag_id) 5123 { 5124 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5125 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5126 5127 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 5128 lag_id); 5129 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5130 } 5131 5132 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 5133 struct net_device *lag_dev, 5134 u16 *p_lag_id) 5135 { 5136 struct mlxsw_sp_upper *lag; 5137 int free_lag_id = -1; 5138 u64 max_lag; 5139 int i; 5140 5141 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 5142 for (i = 0; i < max_lag; i++) { 5143 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 5144 if (lag->ref_count) { 5145 if (lag->dev == lag_dev) { 5146 *p_lag_id = i; 5147 return 0; 5148 } 5149 } else if (free_lag_id < 0) { 5150 free_lag_id = i; 5151 } 5152 } 5153 if (free_lag_id < 0) 5154 return -EBUSY; 5155 *p_lag_id = free_lag_id; 5156 return 0; 5157 } 5158 5159 static bool 5160 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 5161 struct net_device *lag_dev, 5162 struct netdev_lag_upper_info *lag_upper_info, 5163 struct netlink_ext_ack *extack) 5164 { 5165 u16 lag_id; 5166 5167 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 5168 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices"); 5169 return false; 5170 } 5171 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 5172 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 5173 return false; 5174 } 5175 return true; 5176 } 5177 5178 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 5179 u16 lag_id, u8 *p_port_index) 5180 { 5181 u64 max_lag_members; 5182 int i; 5183 5184 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 5185 MAX_LAG_MEMBERS); 5186 for (i = 0; i < max_lag_members; i++) { 5187 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 5188 *p_port_index = i; 5189 return 0; 5190 } 5191 } 5192 return -EBUSY; 5193 } 5194 5195 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 5196 struct net_device *lag_dev) 5197 { 5198 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5199 struct mlxsw_sp_upper *lag; 5200 u16 lag_id; 5201 u8 port_index; 5202 int err; 5203 5204 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 5205 if (err) 5206 return err; 5207 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 5208 if (!lag->ref_count) { 5209 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 5210 if (err) 5211 return err; 5212 lag->dev = lag_dev; 5213 } 5214 5215 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 5216 if (err) 5217 return err; 5218 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 5219 if (err) 5220 goto err_col_port_add; 5221 5222 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 5223 mlxsw_sp_port->local_port); 5224 mlxsw_sp_port->lag_id = lag_id; 5225 mlxsw_sp_port->lagged = 1; 5226 lag->ref_count++; 5227 5228 /* Port is no longer usable as a router interface */ 5229 if (mlxsw_sp_port->default_vlan->fid) 5230 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); 5231 5232 return 0; 5233 5234 err_col_port_add: 5235 if (!lag->ref_count) 5236 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 5237 return err; 5238 } 5239 5240 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 5241 struct net_device *lag_dev) 5242 { 5243 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5244 u16 lag_id = mlxsw_sp_port->lag_id; 5245 struct mlxsw_sp_upper *lag; 5246 5247 if (!mlxsw_sp_port->lagged) 5248 return; 5249 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 5250 WARN_ON(lag->ref_count == 0); 5251 5252 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 5253 5254 /* Any VLANs configured on the port are no longer valid */ 5255 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false); 5256 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan); 5257 /* Make the LAG and its directly linked uppers leave bridges they 5258 * are memeber in 5259 */ 5260 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev); 5261 5262 if (lag->ref_count == 1) 5263 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 5264 5265 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 5266 mlxsw_sp_port->local_port); 5267 mlxsw_sp_port->lagged = 0; 5268 lag->ref_count--; 5269 5270 /* Make sure untagged frames are allowed to ingress */ 5271 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 5272 } 5273 5274 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 5275 u16 lag_id) 5276 { 5277 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5278 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5279 5280 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 5281 mlxsw_sp_port->local_port); 5282 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5283 } 5284 5285 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 5286 u16 lag_id) 5287 { 5288 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5289 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5290 5291 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 5292 mlxsw_sp_port->local_port); 5293 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5294 } 5295 5296 static int 5297 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port) 5298 { 5299 int err; 5300 5301 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, 5302 mlxsw_sp_port->lag_id); 5303 if (err) 5304 return err; 5305 5306 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 5307 if (err) 5308 goto err_dist_port_add; 5309 5310 return 0; 5311 5312 err_dist_port_add: 5313 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id); 5314 return err; 5315 } 5316 5317 static int 5318 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port) 5319 { 5320 int err; 5321 5322 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 5323 mlxsw_sp_port->lag_id); 5324 if (err) 5325 return err; 5326 5327 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, 5328 mlxsw_sp_port->lag_id); 5329 if (err) 5330 goto err_col_port_disable; 5331 5332 return 0; 5333 5334 err_col_port_disable: 5335 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 5336 return err; 5337 } 5338 5339 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 5340 struct netdev_lag_lower_state_info *info) 5341 { 5342 if (info->tx_enabled) 5343 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port); 5344 else 5345 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 5346 } 5347 5348 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 5349 bool enable) 5350 { 5351 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5352 enum mlxsw_reg_spms_state spms_state; 5353 char *spms_pl; 5354 u16 vid; 5355 int err; 5356 5357 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 5358 MLXSW_REG_SPMS_STATE_DISCARDING; 5359 5360 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 5361 if (!spms_pl) 5362 return -ENOMEM; 5363 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 5364 5365 for (vid = 0; vid < VLAN_N_VID; vid++) 5366 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 5367 5368 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 5369 kfree(spms_pl); 5370 return err; 5371 } 5372 5373 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 5374 { 5375 u16 vid = 1; 5376 int err; 5377 5378 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 5379 if (err) 5380 return err; 5381 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 5382 if (err) 5383 goto err_port_stp_set; 5384 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 5385 true, false); 5386 if (err) 5387 goto err_port_vlan_set; 5388 5389 for (; vid <= VLAN_N_VID - 1; vid++) { 5390 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 5391 vid, false); 5392 if (err) 5393 goto err_vid_learning_set; 5394 } 5395 5396 return 0; 5397 5398 err_vid_learning_set: 5399 for (vid--; vid >= 1; vid--) 5400 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 5401 err_port_vlan_set: 5402 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 5403 err_port_stp_set: 5404 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 5405 return err; 5406 } 5407 5408 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 5409 { 5410 u16 vid; 5411 5412 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 5413 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 5414 vid, true); 5415 5416 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 5417 false, false); 5418 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 5419 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 5420 } 5421 5422 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev) 5423 { 5424 unsigned int num_vxlans = 0; 5425 struct net_device *dev; 5426 struct list_head *iter; 5427 5428 netdev_for_each_lower_dev(br_dev, dev, iter) { 5429 if (netif_is_vxlan(dev)) 5430 num_vxlans++; 5431 } 5432 5433 return num_vxlans > 1; 5434 } 5435 5436 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev) 5437 { 5438 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0}; 5439 struct net_device *dev; 5440 struct list_head *iter; 5441 5442 netdev_for_each_lower_dev(br_dev, dev, iter) { 5443 u16 pvid; 5444 int err; 5445 5446 if (!netif_is_vxlan(dev)) 5447 continue; 5448 5449 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid); 5450 if (err || !pvid) 5451 continue; 5452 5453 if (test_and_set_bit(pvid, vlans)) 5454 return false; 5455 } 5456 5457 return true; 5458 } 5459 5460 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev, 5461 struct netlink_ext_ack *extack) 5462 { 5463 if (br_multicast_enabled(br_dev)) { 5464 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device"); 5465 return false; 5466 } 5467 5468 if (!br_vlan_enabled(br_dev) && 5469 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) { 5470 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge"); 5471 return false; 5472 } 5473 5474 if (br_vlan_enabled(br_dev) && 5475 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) { 5476 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged"); 5477 return false; 5478 } 5479 5480 return true; 5481 } 5482 5483 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 5484 struct net_device *dev, 5485 unsigned long event, void *ptr) 5486 { 5487 struct netdev_notifier_changeupper_info *info; 5488 struct mlxsw_sp_port *mlxsw_sp_port; 5489 struct netlink_ext_ack *extack; 5490 struct net_device *upper_dev; 5491 struct mlxsw_sp *mlxsw_sp; 5492 int err = 0; 5493 5494 mlxsw_sp_port = netdev_priv(dev); 5495 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5496 info = ptr; 5497 extack = netdev_notifier_info_to_extack(&info->info); 5498 5499 switch (event) { 5500 case NETDEV_PRECHANGEUPPER: 5501 upper_dev = info->upper_dev; 5502 if (!is_vlan_dev(upper_dev) && 5503 !netif_is_lag_master(upper_dev) && 5504 !netif_is_bridge_master(upper_dev) && 5505 !netif_is_ovs_master(upper_dev) && 5506 !netif_is_macvlan(upper_dev)) { 5507 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5508 return -EINVAL; 5509 } 5510 if (!info->linking) 5511 break; 5512 if (netif_is_bridge_master(upper_dev) && 5513 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 5514 mlxsw_sp_bridge_has_vxlan(upper_dev) && 5515 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 5516 return -EOPNOTSUPP; 5517 if (netdev_has_any_upper_dev(upper_dev) && 5518 (!netif_is_bridge_master(upper_dev) || 5519 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 5520 upper_dev))) { 5521 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 5522 return -EINVAL; 5523 } 5524 if (netif_is_lag_master(upper_dev) && 5525 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 5526 info->upper_info, extack)) 5527 return -EINVAL; 5528 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 5529 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); 5530 return -EINVAL; 5531 } 5532 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 5533 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 5534 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); 5535 return -EINVAL; 5536 } 5537 if (netif_is_macvlan(upper_dev) && 5538 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, lower_dev)) { 5539 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 5540 return -EOPNOTSUPP; 5541 } 5542 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 5543 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); 5544 return -EINVAL; 5545 } 5546 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 5547 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 5548 return -EINVAL; 5549 } 5550 break; 5551 case NETDEV_CHANGEUPPER: 5552 upper_dev = info->upper_dev; 5553 if (netif_is_bridge_master(upper_dev)) { 5554 if (info->linking) 5555 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 5556 lower_dev, 5557 upper_dev, 5558 extack); 5559 else 5560 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 5561 lower_dev, 5562 upper_dev); 5563 } else if (netif_is_lag_master(upper_dev)) { 5564 if (info->linking) { 5565 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 5566 upper_dev); 5567 } else { 5568 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 5569 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 5570 upper_dev); 5571 } 5572 } else if (netif_is_ovs_master(upper_dev)) { 5573 if (info->linking) 5574 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 5575 else 5576 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 5577 } else if (netif_is_macvlan(upper_dev)) { 5578 if (!info->linking) 5579 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 5580 } else if (is_vlan_dev(upper_dev)) { 5581 struct net_device *br_dev; 5582 5583 if (!netif_is_bridge_port(upper_dev)) 5584 break; 5585 if (info->linking) 5586 break; 5587 br_dev = netdev_master_upper_dev_get(upper_dev); 5588 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, 5589 br_dev); 5590 } 5591 break; 5592 } 5593 5594 return err; 5595 } 5596 5597 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 5598 unsigned long event, void *ptr) 5599 { 5600 struct netdev_notifier_changelowerstate_info *info; 5601 struct mlxsw_sp_port *mlxsw_sp_port; 5602 int err; 5603 5604 mlxsw_sp_port = netdev_priv(dev); 5605 info = ptr; 5606 5607 switch (event) { 5608 case NETDEV_CHANGELOWERSTATE: 5609 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 5610 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 5611 info->lower_state_info); 5612 if (err) 5613 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 5614 } 5615 break; 5616 } 5617 5618 return 0; 5619 } 5620 5621 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 5622 struct net_device *port_dev, 5623 unsigned long event, void *ptr) 5624 { 5625 switch (event) { 5626 case NETDEV_PRECHANGEUPPER: 5627 case NETDEV_CHANGEUPPER: 5628 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 5629 event, ptr); 5630 case NETDEV_CHANGELOWERSTATE: 5631 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 5632 ptr); 5633 } 5634 5635 return 0; 5636 } 5637 5638 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 5639 unsigned long event, void *ptr) 5640 { 5641 struct net_device *dev; 5642 struct list_head *iter; 5643 int ret; 5644 5645 netdev_for_each_lower_dev(lag_dev, dev, iter) { 5646 if (mlxsw_sp_port_dev_check(dev)) { 5647 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 5648 ptr); 5649 if (ret) 5650 return ret; 5651 } 5652 } 5653 5654 return 0; 5655 } 5656 5657 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 5658 struct net_device *dev, 5659 unsigned long event, void *ptr, 5660 u16 vid) 5661 { 5662 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 5663 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5664 struct netdev_notifier_changeupper_info *info = ptr; 5665 struct netlink_ext_ack *extack; 5666 struct net_device *upper_dev; 5667 int err = 0; 5668 5669 extack = netdev_notifier_info_to_extack(&info->info); 5670 5671 switch (event) { 5672 case NETDEV_PRECHANGEUPPER: 5673 upper_dev = info->upper_dev; 5674 if (!netif_is_bridge_master(upper_dev) && 5675 !netif_is_macvlan(upper_dev)) { 5676 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5677 return -EINVAL; 5678 } 5679 if (!info->linking) 5680 break; 5681 if (netif_is_bridge_master(upper_dev) && 5682 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 5683 mlxsw_sp_bridge_has_vxlan(upper_dev) && 5684 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 5685 return -EOPNOTSUPP; 5686 if (netdev_has_any_upper_dev(upper_dev) && 5687 (!netif_is_bridge_master(upper_dev) || 5688 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 5689 upper_dev))) { 5690 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 5691 return -EINVAL; 5692 } 5693 if (netif_is_macvlan(upper_dev) && 5694 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) { 5695 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 5696 return -EOPNOTSUPP; 5697 } 5698 break; 5699 case NETDEV_CHANGEUPPER: 5700 upper_dev = info->upper_dev; 5701 if (netif_is_bridge_master(upper_dev)) { 5702 if (info->linking) 5703 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 5704 vlan_dev, 5705 upper_dev, 5706 extack); 5707 else 5708 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 5709 vlan_dev, 5710 upper_dev); 5711 } else if (netif_is_macvlan(upper_dev)) { 5712 if (!info->linking) 5713 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 5714 } else { 5715 err = -EINVAL; 5716 WARN_ON(1); 5717 } 5718 break; 5719 } 5720 5721 return err; 5722 } 5723 5724 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 5725 struct net_device *lag_dev, 5726 unsigned long event, 5727 void *ptr, u16 vid) 5728 { 5729 struct net_device *dev; 5730 struct list_head *iter; 5731 int ret; 5732 5733 netdev_for_each_lower_dev(lag_dev, dev, iter) { 5734 if (mlxsw_sp_port_dev_check(dev)) { 5735 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 5736 event, ptr, 5737 vid); 5738 if (ret) 5739 return ret; 5740 } 5741 } 5742 5743 return 0; 5744 } 5745 5746 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev, 5747 struct net_device *br_dev, 5748 unsigned long event, void *ptr, 5749 u16 vid) 5750 { 5751 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); 5752 struct netdev_notifier_changeupper_info *info = ptr; 5753 struct netlink_ext_ack *extack; 5754 struct net_device *upper_dev; 5755 5756 if (!mlxsw_sp) 5757 return 0; 5758 5759 extack = netdev_notifier_info_to_extack(&info->info); 5760 5761 switch (event) { 5762 case NETDEV_PRECHANGEUPPER: 5763 upper_dev = info->upper_dev; 5764 if (!netif_is_macvlan(upper_dev)) { 5765 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5766 return -EOPNOTSUPP; 5767 } 5768 if (!info->linking) 5769 break; 5770 if (netif_is_macvlan(upper_dev) && 5771 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) { 5772 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 5773 return -EOPNOTSUPP; 5774 } 5775 break; 5776 case NETDEV_CHANGEUPPER: 5777 upper_dev = info->upper_dev; 5778 if (info->linking) 5779 break; 5780 if (netif_is_macvlan(upper_dev)) 5781 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 5782 break; 5783 } 5784 5785 return 0; 5786 } 5787 5788 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 5789 unsigned long event, void *ptr) 5790 { 5791 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 5792 u16 vid = vlan_dev_vlan_id(vlan_dev); 5793 5794 if (mlxsw_sp_port_dev_check(real_dev)) 5795 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 5796 event, ptr, vid); 5797 else if (netif_is_lag_master(real_dev)) 5798 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 5799 real_dev, event, 5800 ptr, vid); 5801 else if (netif_is_bridge_master(real_dev)) 5802 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev, 5803 event, ptr, vid); 5804 5805 return 0; 5806 } 5807 5808 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, 5809 unsigned long event, void *ptr) 5810 { 5811 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev); 5812 struct netdev_notifier_changeupper_info *info = ptr; 5813 struct netlink_ext_ack *extack; 5814 struct net_device *upper_dev; 5815 5816 if (!mlxsw_sp) 5817 return 0; 5818 5819 extack = netdev_notifier_info_to_extack(&info->info); 5820 5821 switch (event) { 5822 case NETDEV_PRECHANGEUPPER: 5823 upper_dev = info->upper_dev; 5824 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) { 5825 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5826 return -EOPNOTSUPP; 5827 } 5828 if (!info->linking) 5829 break; 5830 if (netif_is_macvlan(upper_dev) && 5831 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev)) { 5832 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 5833 return -EOPNOTSUPP; 5834 } 5835 break; 5836 case NETDEV_CHANGEUPPER: 5837 upper_dev = info->upper_dev; 5838 if (info->linking) 5839 break; 5840 if (is_vlan_dev(upper_dev)) 5841 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev); 5842 if (netif_is_macvlan(upper_dev)) 5843 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 5844 break; 5845 } 5846 5847 return 0; 5848 } 5849 5850 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, 5851 unsigned long event, void *ptr) 5852 { 5853 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); 5854 struct netdev_notifier_changeupper_info *info = ptr; 5855 struct netlink_ext_ack *extack; 5856 5857 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) 5858 return 0; 5859 5860 extack = netdev_notifier_info_to_extack(&info->info); 5861 5862 /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */ 5863 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5864 5865 return -EOPNOTSUPP; 5866 } 5867 5868 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) 5869 { 5870 struct netdev_notifier_changeupper_info *info = ptr; 5871 5872 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) 5873 return false; 5874 return netif_is_l3_master(info->upper_dev); 5875 } 5876 5877 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, 5878 struct net_device *dev, 5879 unsigned long event, void *ptr) 5880 { 5881 struct netdev_notifier_changeupper_info *cu_info; 5882 struct netdev_notifier_info *info = ptr; 5883 struct netlink_ext_ack *extack; 5884 struct net_device *upper_dev; 5885 5886 extack = netdev_notifier_info_to_extack(info); 5887 5888 switch (event) { 5889 case NETDEV_CHANGEUPPER: 5890 cu_info = container_of(info, 5891 struct netdev_notifier_changeupper_info, 5892 info); 5893 upper_dev = cu_info->upper_dev; 5894 if (!netif_is_bridge_master(upper_dev)) 5895 return 0; 5896 if (!mlxsw_sp_lower_get(upper_dev)) 5897 return 0; 5898 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 5899 return -EOPNOTSUPP; 5900 if (cu_info->linking) { 5901 if (!netif_running(dev)) 5902 return 0; 5903 /* When the bridge is VLAN-aware, the VNI of the VxLAN 5904 * device needs to be mapped to a VLAN, but at this 5905 * point no VLANs are configured on the VxLAN device 5906 */ 5907 if (br_vlan_enabled(upper_dev)) 5908 return 0; 5909 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, 5910 dev, 0, extack); 5911 } else { 5912 /* VLANs were already flushed, which triggered the 5913 * necessary cleanup 5914 */ 5915 if (br_vlan_enabled(upper_dev)) 5916 return 0; 5917 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 5918 } 5919 break; 5920 case NETDEV_PRE_UP: 5921 upper_dev = netdev_master_upper_dev_get(dev); 5922 if (!upper_dev) 5923 return 0; 5924 if (!netif_is_bridge_master(upper_dev)) 5925 return 0; 5926 if (!mlxsw_sp_lower_get(upper_dev)) 5927 return 0; 5928 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0, 5929 extack); 5930 case NETDEV_DOWN: 5931 upper_dev = netdev_master_upper_dev_get(dev); 5932 if (!upper_dev) 5933 return 0; 5934 if (!netif_is_bridge_master(upper_dev)) 5935 return 0; 5936 if (!mlxsw_sp_lower_get(upper_dev)) 5937 return 0; 5938 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 5939 break; 5940 } 5941 5942 return 0; 5943 } 5944 5945 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 5946 unsigned long event, void *ptr) 5947 { 5948 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 5949 struct mlxsw_sp_span_entry *span_entry; 5950 struct mlxsw_sp *mlxsw_sp; 5951 int err = 0; 5952 5953 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 5954 if (event == NETDEV_UNREGISTER) { 5955 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); 5956 if (span_entry) 5957 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); 5958 } 5959 mlxsw_sp_span_respin(mlxsw_sp); 5960 5961 if (netif_is_vxlan(dev)) 5962 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr); 5963 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) 5964 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, 5965 event, ptr); 5966 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev)) 5967 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev, 5968 event, ptr); 5969 else if (event == NETDEV_PRE_CHANGEADDR || 5970 event == NETDEV_CHANGEADDR || 5971 event == NETDEV_CHANGEMTU) 5972 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr); 5973 else if (mlxsw_sp_is_vrf_event(event, ptr)) 5974 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); 5975 else if (mlxsw_sp_port_dev_check(dev)) 5976 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 5977 else if (netif_is_lag_master(dev)) 5978 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 5979 else if (is_vlan_dev(dev)) 5980 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 5981 else if (netif_is_bridge_master(dev)) 5982 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); 5983 else if (netif_is_macvlan(dev)) 5984 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); 5985 5986 return notifier_from_errno(err); 5987 } 5988 5989 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = { 5990 .notifier_call = mlxsw_sp_inetaddr_valid_event, 5991 }; 5992 5993 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = { 5994 .notifier_call = mlxsw_sp_inet6addr_valid_event, 5995 }; 5996 5997 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { 5998 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 5999 {0, }, 6000 }; 6001 6002 static struct pci_driver mlxsw_sp1_pci_driver = { 6003 .name = mlxsw_sp1_driver_name, 6004 .id_table = mlxsw_sp1_pci_id_table, 6005 }; 6006 6007 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { 6008 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, 6009 {0, }, 6010 }; 6011 6012 static struct pci_driver mlxsw_sp2_pci_driver = { 6013 .name = mlxsw_sp2_driver_name, 6014 .id_table = mlxsw_sp2_pci_id_table, 6015 }; 6016 6017 static int __init mlxsw_sp_module_init(void) 6018 { 6019 int err; 6020 6021 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6022 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6023 6024 err = mlxsw_core_driver_register(&mlxsw_sp1_driver); 6025 if (err) 6026 goto err_sp1_core_driver_register; 6027 6028 err = mlxsw_core_driver_register(&mlxsw_sp2_driver); 6029 if (err) 6030 goto err_sp2_core_driver_register; 6031 6032 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); 6033 if (err) 6034 goto err_sp1_pci_driver_register; 6035 6036 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); 6037 if (err) 6038 goto err_sp2_pci_driver_register; 6039 6040 return 0; 6041 6042 err_sp2_pci_driver_register: 6043 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 6044 err_sp1_pci_driver_register: 6045 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 6046 err_sp2_core_driver_register: 6047 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 6048 err_sp1_core_driver_register: 6049 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6050 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6051 return err; 6052 } 6053 6054 static void __exit mlxsw_sp_module_exit(void) 6055 { 6056 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 6057 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 6058 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 6059 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 6060 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6061 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6062 } 6063 6064 module_init(mlxsw_sp_module_init); 6065 module_exit(mlxsw_sp_module_exit); 6066 6067 MODULE_LICENSE("Dual BSD/GPL"); 6068 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 6069 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 6070 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); 6071 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); 6072 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); 6073