1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/types.h> 7 #include <linux/pci.h> 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/ethtool.h> 11 #include <linux/slab.h> 12 #include <linux/device.h> 13 #include <linux/skbuff.h> 14 #include <linux/if_vlan.h> 15 #include <linux/if_bridge.h> 16 #include <linux/workqueue.h> 17 #include <linux/jiffies.h> 18 #include <linux/bitops.h> 19 #include <linux/list.h> 20 #include <linux/notifier.h> 21 #include <linux/dcbnl.h> 22 #include <linux/inetdevice.h> 23 #include <linux/netlink.h> 24 #include <linux/jhash.h> 25 #include <linux/log2.h> 26 #include <net/switchdev.h> 27 #include <net/pkt_cls.h> 28 #include <net/netevent.h> 29 #include <net/addrconf.h> 30 31 #include "spectrum.h" 32 #include "pci.h" 33 #include "core.h" 34 #include "core_env.h" 35 #include "reg.h" 36 #include "port.h" 37 #include "trap.h" 38 #include "txheader.h" 39 #include "spectrum_cnt.h" 40 #include "spectrum_dpipe.h" 41 #include "spectrum_acl_flex_actions.h" 42 #include "spectrum_span.h" 43 #include "spectrum_ptp.h" 44 #include "spectrum_trap.h" 45 #include "../mlxfw/mlxfw.h" 46 47 #define MLXSW_SP1_FWREV_MAJOR 13 48 #define MLXSW_SP1_FWREV_MINOR 2000 49 #define MLXSW_SP1_FWREV_SUBMINOR 2714 50 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 51 52 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { 53 .major = MLXSW_SP1_FWREV_MAJOR, 54 .minor = MLXSW_SP1_FWREV_MINOR, 55 .subminor = MLXSW_SP1_FWREV_SUBMINOR, 56 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR, 57 }; 58 59 #define MLXSW_SP1_FW_FILENAME \ 60 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ 61 "." __stringify(MLXSW_SP1_FWREV_MINOR) \ 62 "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2" 63 64 #define MLXSW_SP2_FWREV_MAJOR 29 65 #define MLXSW_SP2_FWREV_MINOR 2000 66 #define MLXSW_SP2_FWREV_SUBMINOR 2714 67 68 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { 69 .major = MLXSW_SP2_FWREV_MAJOR, 70 .minor = MLXSW_SP2_FWREV_MINOR, 71 .subminor = MLXSW_SP2_FWREV_SUBMINOR, 72 }; 73 74 #define MLXSW_SP2_FW_FILENAME \ 75 "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \ 76 "." __stringify(MLXSW_SP2_FWREV_MINOR) \ 77 "." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2" 78 79 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; 80 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; 81 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3"; 82 static const char mlxsw_sp_driver_version[] = "1.0"; 83 84 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = { 85 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00 86 }; 87 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = { 88 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00 89 }; 90 91 /* tx_hdr_version 92 * Tx header version. 93 * Must be set to 1. 94 */ 95 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 96 97 /* tx_hdr_ctl 98 * Packet control type. 99 * 0 - Ethernet control (e.g. EMADs, LACP) 100 * 1 - Ethernet data 101 */ 102 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 103 104 /* tx_hdr_proto 105 * Packet protocol type. Must be set to 1 (Ethernet). 106 */ 107 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 108 109 /* tx_hdr_rx_is_router 110 * Packet is sent from the router. Valid for data packets only. 111 */ 112 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 113 114 /* tx_hdr_fid_valid 115 * Indicates if the 'fid' field is valid and should be used for 116 * forwarding lookup. Valid for data packets only. 117 */ 118 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 119 120 /* tx_hdr_swid 121 * Switch partition ID. Must be set to 0. 122 */ 123 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 124 125 /* tx_hdr_control_tclass 126 * Indicates if the packet should use the control TClass and not one 127 * of the data TClasses. 128 */ 129 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 130 131 /* tx_hdr_etclass 132 * Egress TClass to be used on the egress device on the egress port. 133 */ 134 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 135 136 /* tx_hdr_port_mid 137 * Destination local port for unicast packets. 138 * Destination multicast ID for multicast packets. 139 * 140 * Control packets are directed to a specific egress port, while data 141 * packets are transmitted through the CPU port (0) into the switch partition, 142 * where forwarding rules are applied. 143 */ 144 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 145 146 /* tx_hdr_fid 147 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 148 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 149 * Valid for data packets only. 150 */ 151 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 152 153 /* tx_hdr_type 154 * 0 - Data packets 155 * 6 - Control packets 156 */ 157 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 158 159 struct mlxsw_sp_mlxfw_dev { 160 struct mlxfw_dev mlxfw_dev; 161 struct mlxsw_sp *mlxsw_sp; 162 }; 163 164 struct mlxsw_sp_ptp_ops { 165 struct mlxsw_sp_ptp_clock * 166 (*clock_init)(struct mlxsw_sp *mlxsw_sp, struct device *dev); 167 void (*clock_fini)(struct mlxsw_sp_ptp_clock *clock); 168 169 struct mlxsw_sp_ptp_state *(*init)(struct mlxsw_sp *mlxsw_sp); 170 void (*fini)(struct mlxsw_sp_ptp_state *ptp_state); 171 172 /* Notify a driver that a packet that might be PTP was received. Driver 173 * is responsible for freeing the passed-in SKB. 174 */ 175 void (*receive)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 176 u8 local_port); 177 178 /* Notify a driver that a timestamped packet was transmitted. Driver 179 * is responsible for freeing the passed-in SKB. 180 */ 181 void (*transmitted)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 182 u8 local_port); 183 184 int (*hwtstamp_get)(struct mlxsw_sp_port *mlxsw_sp_port, 185 struct hwtstamp_config *config); 186 int (*hwtstamp_set)(struct mlxsw_sp_port *mlxsw_sp_port, 187 struct hwtstamp_config *config); 188 void (*shaper_work)(struct work_struct *work); 189 int (*get_ts_info)(struct mlxsw_sp *mlxsw_sp, 190 struct ethtool_ts_info *info); 191 int (*get_stats_count)(void); 192 void (*get_stats_strings)(u8 **p); 193 void (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port, 194 u64 *data, int data_index); 195 }; 196 197 struct mlxsw_sp_span_ops { 198 u32 (*buffsize_get)(int mtu, u32 speed); 199 }; 200 201 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev, 202 u16 component_index, u32 *p_max_size, 203 u8 *p_align_bits, u16 *p_max_write_size) 204 { 205 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 206 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 207 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 208 char mcqi_pl[MLXSW_REG_MCQI_LEN]; 209 int err; 210 211 mlxsw_reg_mcqi_pack(mcqi_pl, component_index); 212 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl); 213 if (err) 214 return err; 215 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, 216 p_max_write_size); 217 218 *p_align_bits = max_t(u8, *p_align_bits, 2); 219 *p_max_write_size = min_t(u16, *p_max_write_size, 220 MLXSW_REG_MCDA_MAX_DATA_LEN); 221 return 0; 222 } 223 224 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle) 225 { 226 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 227 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 228 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 229 char mcc_pl[MLXSW_REG_MCC_LEN]; 230 u8 control_state; 231 int err; 232 233 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0); 234 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 235 if (err) 236 return err; 237 238 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state); 239 if (control_state != MLXFW_FSM_STATE_IDLE) 240 return -EBUSY; 241 242 mlxsw_reg_mcc_pack(mcc_pl, 243 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 244 0, *fwhandle, 0); 245 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 246 } 247 248 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev, 249 u32 fwhandle, u16 component_index, 250 u32 component_size) 251 { 252 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 253 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 254 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 255 char mcc_pl[MLXSW_REG_MCC_LEN]; 256 257 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT, 258 component_index, fwhandle, component_size); 259 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 260 } 261 262 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev, 263 u32 fwhandle, u8 *data, u16 size, 264 u32 offset) 265 { 266 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 267 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 268 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 269 char mcda_pl[MLXSW_REG_MCDA_LEN]; 270 271 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data); 272 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl); 273 } 274 275 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, 276 u32 fwhandle, u16 component_index) 277 { 278 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 279 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 280 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 281 char mcc_pl[MLXSW_REG_MCC_LEN]; 282 283 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT, 284 component_index, fwhandle, 0); 285 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 286 } 287 288 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 289 { 290 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 291 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 292 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 293 char mcc_pl[MLXSW_REG_MCC_LEN]; 294 295 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, 296 fwhandle, 0); 297 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 298 } 299 300 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 301 enum mlxfw_fsm_state *fsm_state, 302 enum mlxfw_fsm_state_err *fsm_state_err) 303 { 304 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 305 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 306 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 307 char mcc_pl[MLXSW_REG_MCC_LEN]; 308 u8 control_state; 309 u8 error_code; 310 int err; 311 312 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0); 313 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 314 if (err) 315 return err; 316 317 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state); 318 *fsm_state = control_state; 319 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, 320 MLXFW_FSM_STATE_ERR_MAX); 321 return 0; 322 } 323 324 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 325 { 326 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 327 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 328 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 329 char mcc_pl[MLXSW_REG_MCC_LEN]; 330 331 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, 332 fwhandle, 0); 333 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 334 } 335 336 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 337 { 338 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 339 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 340 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 341 char mcc_pl[MLXSW_REG_MCC_LEN]; 342 343 mlxsw_reg_mcc_pack(mcc_pl, 344 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, 345 fwhandle, 0); 346 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 347 } 348 349 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = { 350 .component_query = mlxsw_sp_component_query, 351 .fsm_lock = mlxsw_sp_fsm_lock, 352 .fsm_component_update = mlxsw_sp_fsm_component_update, 353 .fsm_block_download = mlxsw_sp_fsm_block_download, 354 .fsm_component_verify = mlxsw_sp_fsm_component_verify, 355 .fsm_activate = mlxsw_sp_fsm_activate, 356 .fsm_query_state = mlxsw_sp_fsm_query_state, 357 .fsm_cancel = mlxsw_sp_fsm_cancel, 358 .fsm_release = mlxsw_sp_fsm_release, 359 }; 360 361 static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp, 362 const struct firmware *firmware, 363 struct netlink_ext_ack *extack) 364 { 365 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = { 366 .mlxfw_dev = { 367 .ops = &mlxsw_sp_mlxfw_dev_ops, 368 .psid = mlxsw_sp->bus_info->psid, 369 .psid_size = strlen(mlxsw_sp->bus_info->psid), 370 .devlink = priv_to_devlink(mlxsw_sp->core), 371 }, 372 .mlxsw_sp = mlxsw_sp 373 }; 374 int err; 375 376 mlxsw_core_fw_flash_start(mlxsw_sp->core); 377 err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, 378 firmware, extack); 379 mlxsw_core_fw_flash_end(mlxsw_sp->core); 380 381 return err; 382 } 383 384 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) 385 { 386 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev; 387 const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev; 388 const char *fw_filename = mlxsw_sp->fw_filename; 389 union devlink_param_value value; 390 const struct firmware *firmware; 391 int err; 392 393 /* Don't check if driver does not require it */ 394 if (!req_rev || !fw_filename) 395 return 0; 396 397 /* Don't check if devlink 'fw_load_policy' param is 'flash' */ 398 err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_sp->core), 399 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 400 &value); 401 if (err) 402 return err; 403 if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) 404 return 0; 405 406 /* Validate driver & FW are compatible */ 407 if (rev->major != req_rev->major) { 408 WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n", 409 rev->major, req_rev->major); 410 return -EINVAL; 411 } 412 if (mlxsw_core_fw_rev_minor_subminor_validate(rev, req_rev)) 413 return 0; 414 415 dev_err(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver (required >= %d.%d.%d)\n", 416 rev->major, rev->minor, rev->subminor, req_rev->major, 417 req_rev->minor, req_rev->subminor); 418 dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n", 419 fw_filename); 420 421 err = request_firmware_direct(&firmware, fw_filename, 422 mlxsw_sp->bus_info->dev); 423 if (err) { 424 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n", 425 fw_filename); 426 return err; 427 } 428 429 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, NULL); 430 release_firmware(firmware); 431 if (err) 432 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n"); 433 434 /* On FW flash success, tell the caller FW reset is needed 435 * if current FW supports it. 436 */ 437 if (rev->minor >= req_rev->can_reset_minor) 438 return err ? err : -EAGAIN; 439 else 440 return 0; 441 } 442 443 static int mlxsw_sp_flash_update(struct mlxsw_core *mlxsw_core, 444 const char *file_name, const char *component, 445 struct netlink_ext_ack *extack) 446 { 447 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 448 const struct firmware *firmware; 449 int err; 450 451 if (component) 452 return -EOPNOTSUPP; 453 454 err = request_firmware_direct(&firmware, file_name, 455 mlxsw_sp->bus_info->dev); 456 if (err) 457 return err; 458 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, extack); 459 release_firmware(firmware); 460 461 return err; 462 } 463 464 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 465 unsigned int counter_index, u64 *packets, 466 u64 *bytes) 467 { 468 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 469 int err; 470 471 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 472 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 473 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 474 if (err) 475 return err; 476 if (packets) 477 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 478 if (bytes) 479 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 480 return 0; 481 } 482 483 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 484 unsigned int counter_index) 485 { 486 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 487 488 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 489 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 490 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 491 } 492 493 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 494 unsigned int *p_counter_index) 495 { 496 int err; 497 498 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 499 p_counter_index); 500 if (err) 501 return err; 502 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 503 if (err) 504 goto err_counter_clear; 505 return 0; 506 507 err_counter_clear: 508 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 509 *p_counter_index); 510 return err; 511 } 512 513 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 514 unsigned int counter_index) 515 { 516 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 517 counter_index); 518 } 519 520 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 521 const struct mlxsw_tx_info *tx_info) 522 { 523 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 524 525 memset(txhdr, 0, MLXSW_TXHDR_LEN); 526 527 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 528 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 529 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 530 mlxsw_tx_hdr_swid_set(txhdr, 0); 531 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 532 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 533 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 534 } 535 536 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) 537 { 538 switch (state) { 539 case BR_STATE_FORWARDING: 540 return MLXSW_REG_SPMS_STATE_FORWARDING; 541 case BR_STATE_LEARNING: 542 return MLXSW_REG_SPMS_STATE_LEARNING; 543 case BR_STATE_LISTENING: /* fall-through */ 544 case BR_STATE_DISABLED: /* fall-through */ 545 case BR_STATE_BLOCKING: 546 return MLXSW_REG_SPMS_STATE_DISCARDING; 547 default: 548 BUG(); 549 } 550 } 551 552 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 553 u8 state) 554 { 555 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state); 556 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 557 char *spms_pl; 558 int err; 559 560 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 561 if (!spms_pl) 562 return -ENOMEM; 563 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 564 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 565 566 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 567 kfree(spms_pl); 568 return err; 569 } 570 571 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 572 { 573 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 574 int err; 575 576 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 577 if (err) 578 return err; 579 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 580 return 0; 581 } 582 583 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 584 bool is_up) 585 { 586 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 587 char paos_pl[MLXSW_REG_PAOS_LEN]; 588 589 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 590 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 591 MLXSW_PORT_ADMIN_STATUS_DOWN); 592 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 593 } 594 595 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 596 unsigned char *addr) 597 { 598 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 599 char ppad_pl[MLXSW_REG_PPAD_LEN]; 600 601 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 602 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 603 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 604 } 605 606 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 607 { 608 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 609 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 610 611 ether_addr_copy(addr, mlxsw_sp->base_mac); 612 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 613 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 614 } 615 616 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 617 { 618 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 619 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 620 int max_mtu; 621 int err; 622 623 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 624 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 625 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 626 if (err) 627 return err; 628 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 629 630 if (mtu > max_mtu) 631 return -EINVAL; 632 633 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 634 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 635 } 636 637 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 638 { 639 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 640 char pspa_pl[MLXSW_REG_PSPA_LEN]; 641 642 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 643 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 644 } 645 646 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 647 { 648 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 649 char svpe_pl[MLXSW_REG_SVPE_LEN]; 650 651 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 652 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 653 } 654 655 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 656 bool learn_enable) 657 { 658 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 659 char *spvmlr_pl; 660 int err; 661 662 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 663 if (!spvmlr_pl) 664 return -ENOMEM; 665 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 666 learn_enable); 667 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 668 kfree(spvmlr_pl); 669 return err; 670 } 671 672 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 673 u16 vid) 674 { 675 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 676 char spvid_pl[MLXSW_REG_SPVID_LEN]; 677 678 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid); 679 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 680 } 681 682 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 683 bool allow) 684 { 685 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 686 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 687 688 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 689 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 690 } 691 692 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 693 { 694 int err; 695 696 if (!vid) { 697 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 698 if (err) 699 return err; 700 } else { 701 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); 702 if (err) 703 return err; 704 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 705 if (err) 706 goto err_port_allow_untagged_set; 707 } 708 709 mlxsw_sp_port->pvid = vid; 710 return 0; 711 712 err_port_allow_untagged_set: 713 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid); 714 return err; 715 } 716 717 static int 718 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 719 { 720 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 721 char sspr_pl[MLXSW_REG_SSPR_LEN]; 722 723 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 724 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 725 } 726 727 static int 728 mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port, 729 struct mlxsw_sp_port_mapping *port_mapping) 730 { 731 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 732 bool separate_rxtx; 733 u8 module; 734 u8 width; 735 int err; 736 int i; 737 738 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 739 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 740 if (err) 741 return err; 742 module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 743 width = mlxsw_reg_pmlp_width_get(pmlp_pl); 744 separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl); 745 746 if (width && !is_power_of_2(width)) { 747 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n", 748 local_port); 749 return -EINVAL; 750 } 751 752 for (i = 0; i < width; i++) { 753 if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) { 754 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n", 755 local_port); 756 return -EINVAL; 757 } 758 if (separate_rxtx && 759 mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != 760 mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) { 761 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n", 762 local_port); 763 return -EINVAL; 764 } 765 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i) { 766 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n", 767 local_port); 768 return -EINVAL; 769 } 770 } 771 772 port_mapping->module = module; 773 port_mapping->width = width; 774 port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 775 return 0; 776 } 777 778 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port) 779 { 780 struct mlxsw_sp_port_mapping *port_mapping = &mlxsw_sp_port->mapping; 781 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 782 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 783 int i; 784 785 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 786 mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width); 787 for (i = 0; i < port_mapping->width; i++) { 788 mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module); 789 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */ 790 } 791 792 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 793 } 794 795 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port) 796 { 797 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 798 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 799 800 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 801 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 802 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 803 } 804 805 static int mlxsw_sp_port_open(struct net_device *dev) 806 { 807 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 808 int err; 809 810 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 811 if (err) 812 return err; 813 netif_start_queue(dev); 814 return 0; 815 } 816 817 static int mlxsw_sp_port_stop(struct net_device *dev) 818 { 819 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 820 821 netif_stop_queue(dev); 822 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 823 } 824 825 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 826 struct net_device *dev) 827 { 828 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 829 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 830 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 831 const struct mlxsw_tx_info tx_info = { 832 .local_port = mlxsw_sp_port->local_port, 833 .is_emad = false, 834 }; 835 u64 len; 836 int err; 837 838 if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) { 839 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 840 dev_kfree_skb_any(skb); 841 return NETDEV_TX_OK; 842 } 843 844 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); 845 846 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 847 return NETDEV_TX_BUSY; 848 849 if (eth_skb_pad(skb)) { 850 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 851 return NETDEV_TX_OK; 852 } 853 854 mlxsw_sp_txhdr_construct(skb, &tx_info); 855 /* TX header is consumed by HW on the way so we shouldn't count its 856 * bytes as being sent. 857 */ 858 len = skb->len - MLXSW_TXHDR_LEN; 859 860 /* Due to a race we might fail here because of a full queue. In that 861 * unlikely case we simply drop the packet. 862 */ 863 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 864 865 if (!err) { 866 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 867 u64_stats_update_begin(&pcpu_stats->syncp); 868 pcpu_stats->tx_packets++; 869 pcpu_stats->tx_bytes += len; 870 u64_stats_update_end(&pcpu_stats->syncp); 871 } else { 872 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 873 dev_kfree_skb_any(skb); 874 } 875 return NETDEV_TX_OK; 876 } 877 878 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 879 { 880 } 881 882 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 883 { 884 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 885 struct sockaddr *addr = p; 886 int err; 887 888 if (!is_valid_ether_addr(addr->sa_data)) 889 return -EADDRNOTAVAIL; 890 891 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 892 if (err) 893 return err; 894 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 895 return 0; 896 } 897 898 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, 899 int mtu) 900 { 901 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu); 902 } 903 904 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ 905 906 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 907 u16 delay) 908 { 909 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay, 910 BITS_PER_BYTE)); 911 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp, 912 mtu); 913 } 914 915 /* Maximum delay buffer needed in case of PAUSE frames, in bytes. 916 * Assumes 100m cable and maximum MTU. 917 */ 918 #define MLXSW_SP_PAUSE_DELAY 58752 919 920 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 921 u16 delay, bool pfc, bool pause) 922 { 923 if (pfc) 924 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay); 925 else if (pause) 926 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY); 927 else 928 return 0; 929 } 930 931 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, 932 bool lossy) 933 { 934 if (lossy) 935 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size); 936 else 937 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size, 938 thres); 939 } 940 941 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, 942 u8 *prio_tc, bool pause_en, 943 struct ieee_pfc *my_pfc) 944 { 945 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 946 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; 947 u16 delay = !!my_pfc ? my_pfc->delay : 0; 948 char pbmc_pl[MLXSW_REG_PBMC_LEN]; 949 u32 taken_headroom_cells = 0; 950 u32 max_headroom_cells; 951 int i, j, err; 952 953 max_headroom_cells = mlxsw_sp_sb_max_headroom_cells(mlxsw_sp); 954 955 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); 956 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 957 if (err) 958 return err; 959 960 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 961 bool configure = false; 962 bool pfc = false; 963 u16 thres_cells; 964 u16 delay_cells; 965 u16 total_cells; 966 bool lossy; 967 968 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { 969 if (prio_tc[j] == i) { 970 pfc = pfc_en & BIT(j); 971 configure = true; 972 break; 973 } 974 } 975 976 if (!configure) 977 continue; 978 979 lossy = !(pfc || pause_en); 980 thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); 981 delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, 982 pfc, pause_en); 983 total_cells = thres_cells + delay_cells; 984 985 taken_headroom_cells += total_cells; 986 if (taken_headroom_cells > max_headroom_cells) 987 return -ENOBUFS; 988 989 mlxsw_sp_pg_buf_pack(pbmc_pl, i, total_cells, 990 thres_cells, lossy); 991 } 992 993 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 994 } 995 996 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, 997 int mtu, bool pause_en) 998 { 999 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; 1000 bool dcb_en = !!mlxsw_sp_port->dcb.ets; 1001 struct ieee_pfc *my_pfc; 1002 u8 *prio_tc; 1003 1004 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; 1005 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; 1006 1007 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, 1008 pause_en, my_pfc); 1009 } 1010 1011 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 1012 { 1013 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1014 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 1015 int err; 1016 1017 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); 1018 if (err) 1019 return err; 1020 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu); 1021 if (err) 1022 goto err_span_port_mtu_update; 1023 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 1024 if (err) 1025 goto err_port_mtu_set; 1026 dev->mtu = mtu; 1027 return 0; 1028 1029 err_port_mtu_set: 1030 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu); 1031 err_span_port_mtu_update: 1032 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1033 return err; 1034 } 1035 1036 static int 1037 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 1038 struct rtnl_link_stats64 *stats) 1039 { 1040 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1041 struct mlxsw_sp_port_pcpu_stats *p; 1042 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1043 u32 tx_dropped = 0; 1044 unsigned int start; 1045 int i; 1046 1047 for_each_possible_cpu(i) { 1048 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 1049 do { 1050 start = u64_stats_fetch_begin_irq(&p->syncp); 1051 rx_packets = p->rx_packets; 1052 rx_bytes = p->rx_bytes; 1053 tx_packets = p->tx_packets; 1054 tx_bytes = p->tx_bytes; 1055 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 1056 1057 stats->rx_packets += rx_packets; 1058 stats->rx_bytes += rx_bytes; 1059 stats->tx_packets += tx_packets; 1060 stats->tx_bytes += tx_bytes; 1061 /* tx_dropped is u32, updated without syncp protection. */ 1062 tx_dropped += p->tx_dropped; 1063 } 1064 stats->tx_dropped = tx_dropped; 1065 return 0; 1066 } 1067 1068 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 1069 { 1070 switch (attr_id) { 1071 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1072 return true; 1073 } 1074 1075 return false; 1076 } 1077 1078 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 1079 void *sp) 1080 { 1081 switch (attr_id) { 1082 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1083 return mlxsw_sp_port_get_sw_stats64(dev, sp); 1084 } 1085 1086 return -EINVAL; 1087 } 1088 1089 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 1090 int prio, char *ppcnt_pl) 1091 { 1092 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1093 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1094 1095 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 1096 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 1097 } 1098 1099 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 1100 struct rtnl_link_stats64 *stats) 1101 { 1102 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1103 int err; 1104 1105 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 1106 0, ppcnt_pl); 1107 if (err) 1108 goto out; 1109 1110 stats->tx_packets = 1111 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 1112 stats->rx_packets = 1113 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 1114 stats->tx_bytes = 1115 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 1116 stats->rx_bytes = 1117 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 1118 stats->multicast = 1119 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 1120 1121 stats->rx_crc_errors = 1122 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 1123 stats->rx_frame_errors = 1124 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 1125 1126 stats->rx_length_errors = ( 1127 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 1128 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 1129 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 1130 1131 stats->rx_errors = (stats->rx_crc_errors + 1132 stats->rx_frame_errors + stats->rx_length_errors); 1133 1134 out: 1135 return err; 1136 } 1137 1138 static void 1139 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 1140 struct mlxsw_sp_port_xstats *xstats) 1141 { 1142 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1143 int err, i; 1144 1145 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 1146 ppcnt_pl); 1147 if (!err) 1148 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 1149 1150 for (i = 0; i < TC_MAX_QUEUE; i++) { 1151 err = mlxsw_sp_port_get_stats_raw(dev, 1152 MLXSW_REG_PPCNT_TC_CONG_TC, 1153 i, ppcnt_pl); 1154 if (!err) 1155 xstats->wred_drop[i] = 1156 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 1157 1158 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 1159 i, ppcnt_pl); 1160 if (err) 1161 continue; 1162 1163 xstats->backlog[i] = 1164 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 1165 xstats->tail_drop[i] = 1166 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 1167 } 1168 1169 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1170 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, 1171 i, ppcnt_pl); 1172 if (err) 1173 continue; 1174 1175 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); 1176 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); 1177 } 1178 } 1179 1180 static void update_stats_cache(struct work_struct *work) 1181 { 1182 struct mlxsw_sp_port *mlxsw_sp_port = 1183 container_of(work, struct mlxsw_sp_port, 1184 periodic_hw_stats.update_dw.work); 1185 1186 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 1187 /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as 1188 * necessary when port goes down. 1189 */ 1190 goto out; 1191 1192 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 1193 &mlxsw_sp_port->periodic_hw_stats.stats); 1194 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 1195 &mlxsw_sp_port->periodic_hw_stats.xstats); 1196 1197 out: 1198 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1199 MLXSW_HW_STATS_UPDATE_TIME); 1200 } 1201 1202 /* Return the stats from a cache that is updated periodically, 1203 * as this function might get called in an atomic context. 1204 */ 1205 static void 1206 mlxsw_sp_port_get_stats64(struct net_device *dev, 1207 struct rtnl_link_stats64 *stats) 1208 { 1209 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1210 1211 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 1212 } 1213 1214 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 1215 u16 vid_begin, u16 vid_end, 1216 bool is_member, bool untagged) 1217 { 1218 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1219 char *spvm_pl; 1220 int err; 1221 1222 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 1223 if (!spvm_pl) 1224 return -ENOMEM; 1225 1226 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 1227 vid_end, is_member, untagged); 1228 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 1229 kfree(spvm_pl); 1230 return err; 1231 } 1232 1233 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 1234 u16 vid_end, bool is_member, bool untagged) 1235 { 1236 u16 vid, vid_e; 1237 int err; 1238 1239 for (vid = vid_begin; vid <= vid_end; 1240 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 1241 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 1242 vid_end); 1243 1244 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 1245 is_member, untagged); 1246 if (err) 1247 return err; 1248 } 1249 1250 return 0; 1251 } 1252 1253 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port, 1254 bool flush_default) 1255 { 1256 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 1257 1258 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 1259 &mlxsw_sp_port->vlans_list, list) { 1260 if (!flush_default && 1261 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID) 1262 continue; 1263 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1264 } 1265 } 1266 1267 static void 1268 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1269 { 1270 if (mlxsw_sp_port_vlan->bridge_port) 1271 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1272 else if (mlxsw_sp_port_vlan->fid) 1273 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 1274 } 1275 1276 struct mlxsw_sp_port_vlan * 1277 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1278 { 1279 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1280 bool untagged = vid == MLXSW_SP_DEFAULT_VID; 1281 int err; 1282 1283 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1284 if (mlxsw_sp_port_vlan) 1285 return ERR_PTR(-EEXIST); 1286 1287 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 1288 if (err) 1289 return ERR_PTR(err); 1290 1291 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 1292 if (!mlxsw_sp_port_vlan) { 1293 err = -ENOMEM; 1294 goto err_port_vlan_alloc; 1295 } 1296 1297 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1298 mlxsw_sp_port_vlan->vid = vid; 1299 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1300 1301 return mlxsw_sp_port_vlan; 1302 1303 err_port_vlan_alloc: 1304 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1305 return ERR_PTR(err); 1306 } 1307 1308 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1309 { 1310 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1311 u16 vid = mlxsw_sp_port_vlan->vid; 1312 1313 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan); 1314 list_del(&mlxsw_sp_port_vlan->list); 1315 kfree(mlxsw_sp_port_vlan); 1316 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1317 } 1318 1319 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1320 __be16 __always_unused proto, u16 vid) 1321 { 1322 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1323 1324 /* VLAN 0 is added to HW filter when device goes up, but it is 1325 * reserved in our case, so simply return. 1326 */ 1327 if (!vid) 1328 return 0; 1329 1330 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid)); 1331 } 1332 1333 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1334 __be16 __always_unused proto, u16 vid) 1335 { 1336 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1337 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1338 1339 /* VLAN 0 is removed from HW filter when device goes down, but 1340 * it is reserved in our case, so simply return. 1341 */ 1342 if (!vid) 1343 return 0; 1344 1345 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1346 if (!mlxsw_sp_port_vlan) 1347 return 0; 1348 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1349 1350 return 0; 1351 } 1352 1353 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1354 void *type_data) 1355 { 1356 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1357 1358 switch (type) { 1359 case TC_SETUP_BLOCK: 1360 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1361 case TC_SETUP_QDISC_RED: 1362 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1363 case TC_SETUP_QDISC_PRIO: 1364 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); 1365 case TC_SETUP_QDISC_ETS: 1366 return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data); 1367 case TC_SETUP_QDISC_TBF: 1368 return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data); 1369 case TC_SETUP_QDISC_FIFO: 1370 return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data); 1371 default: 1372 return -EOPNOTSUPP; 1373 } 1374 } 1375 1376 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) 1377 { 1378 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1379 1380 if (!enable) { 1381 if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) || 1382 mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) { 1383 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 1384 return -EINVAL; 1385 } 1386 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block); 1387 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block); 1388 } else { 1389 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block); 1390 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block); 1391 } 1392 return 0; 1393 } 1394 1395 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable) 1396 { 1397 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1398 char pplr_pl[MLXSW_REG_PPLR_LEN]; 1399 int err; 1400 1401 if (netif_running(dev)) 1402 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1403 1404 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable); 1405 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr), 1406 pplr_pl); 1407 1408 if (netif_running(dev)) 1409 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1410 1411 return err; 1412 } 1413 1414 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); 1415 1416 static int mlxsw_sp_handle_feature(struct net_device *dev, 1417 netdev_features_t wanted_features, 1418 netdev_features_t feature, 1419 mlxsw_sp_feature_handler feature_handler) 1420 { 1421 netdev_features_t changes = wanted_features ^ dev->features; 1422 bool enable = !!(wanted_features & feature); 1423 int err; 1424 1425 if (!(changes & feature)) 1426 return 0; 1427 1428 err = feature_handler(dev, enable); 1429 if (err) { 1430 netdev_err(dev, "%s feature %pNF failed, err %d\n", 1431 enable ? "Enable" : "Disable", &feature, err); 1432 return err; 1433 } 1434 1435 if (enable) 1436 dev->features |= feature; 1437 else 1438 dev->features &= ~feature; 1439 1440 return 0; 1441 } 1442 static int mlxsw_sp_set_features(struct net_device *dev, 1443 netdev_features_t features) 1444 { 1445 netdev_features_t oper_features = dev->features; 1446 int err = 0; 1447 1448 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, 1449 mlxsw_sp_feature_hw_tc); 1450 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK, 1451 mlxsw_sp_feature_loopback); 1452 1453 if (err) { 1454 dev->features = oper_features; 1455 return -EINVAL; 1456 } 1457 1458 return 0; 1459 } 1460 1461 static struct devlink_port * 1462 mlxsw_sp_port_get_devlink_port(struct net_device *dev) 1463 { 1464 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1465 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1466 1467 return mlxsw_core_port_devlink_port_get(mlxsw_sp->core, 1468 mlxsw_sp_port->local_port); 1469 } 1470 1471 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, 1472 struct ifreq *ifr) 1473 { 1474 struct hwtstamp_config config; 1475 int err; 1476 1477 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 1478 return -EFAULT; 1479 1480 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, 1481 &config); 1482 if (err) 1483 return err; 1484 1485 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1486 return -EFAULT; 1487 1488 return 0; 1489 } 1490 1491 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port, 1492 struct ifreq *ifr) 1493 { 1494 struct hwtstamp_config config; 1495 int err; 1496 1497 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port, 1498 &config); 1499 if (err) 1500 return err; 1501 1502 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1503 return -EFAULT; 1504 1505 return 0; 1506 } 1507 1508 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port) 1509 { 1510 struct hwtstamp_config config = {0}; 1511 1512 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config); 1513 } 1514 1515 static int 1516 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1517 { 1518 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1519 1520 switch (cmd) { 1521 case SIOCSHWTSTAMP: 1522 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr); 1523 case SIOCGHWTSTAMP: 1524 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr); 1525 default: 1526 return -EOPNOTSUPP; 1527 } 1528 } 1529 1530 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1531 .ndo_open = mlxsw_sp_port_open, 1532 .ndo_stop = mlxsw_sp_port_stop, 1533 .ndo_start_xmit = mlxsw_sp_port_xmit, 1534 .ndo_setup_tc = mlxsw_sp_setup_tc, 1535 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1536 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1537 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1538 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1539 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1540 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1541 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1542 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1543 .ndo_set_features = mlxsw_sp_set_features, 1544 .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port, 1545 .ndo_do_ioctl = mlxsw_sp_port_ioctl, 1546 }; 1547 1548 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 1549 struct ethtool_drvinfo *drvinfo) 1550 { 1551 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1552 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1553 1554 strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind, 1555 sizeof(drvinfo->driver)); 1556 strlcpy(drvinfo->version, mlxsw_sp_driver_version, 1557 sizeof(drvinfo->version)); 1558 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 1559 "%d.%d.%d", 1560 mlxsw_sp->bus_info->fw_rev.major, 1561 mlxsw_sp->bus_info->fw_rev.minor, 1562 mlxsw_sp->bus_info->fw_rev.subminor); 1563 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 1564 sizeof(drvinfo->bus_info)); 1565 } 1566 1567 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, 1568 struct ethtool_pauseparam *pause) 1569 { 1570 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1571 1572 pause->rx_pause = mlxsw_sp_port->link.rx_pause; 1573 pause->tx_pause = mlxsw_sp_port->link.tx_pause; 1574 } 1575 1576 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, 1577 struct ethtool_pauseparam *pause) 1578 { 1579 char pfcc_pl[MLXSW_REG_PFCC_LEN]; 1580 1581 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); 1582 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause); 1583 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause); 1584 1585 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), 1586 pfcc_pl); 1587 } 1588 1589 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, 1590 struct ethtool_pauseparam *pause) 1591 { 1592 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1593 bool pause_en = pause->tx_pause || pause->rx_pause; 1594 int err; 1595 1596 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { 1597 netdev_err(dev, "PFC already enabled on port\n"); 1598 return -EINVAL; 1599 } 1600 1601 if (pause->autoneg) { 1602 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); 1603 return -EINVAL; 1604 } 1605 1606 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1607 if (err) { 1608 netdev_err(dev, "Failed to configure port's headroom\n"); 1609 return err; 1610 } 1611 1612 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause); 1613 if (err) { 1614 netdev_err(dev, "Failed to set PAUSE parameters\n"); 1615 goto err_port_pause_configure; 1616 } 1617 1618 mlxsw_sp_port->link.rx_pause = pause->rx_pause; 1619 mlxsw_sp_port->link.tx_pause = pause->tx_pause; 1620 1621 return 0; 1622 1623 err_port_pause_configure: 1624 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 1625 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1626 return err; 1627 } 1628 1629 struct mlxsw_sp_port_hw_stats { 1630 char str[ETH_GSTRING_LEN]; 1631 u64 (*getter)(const char *payload); 1632 bool cells_bytes; 1633 }; 1634 1635 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 1636 { 1637 .str = "a_frames_transmitted_ok", 1638 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 1639 }, 1640 { 1641 .str = "a_frames_received_ok", 1642 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 1643 }, 1644 { 1645 .str = "a_frame_check_sequence_errors", 1646 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 1647 }, 1648 { 1649 .str = "a_alignment_errors", 1650 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 1651 }, 1652 { 1653 .str = "a_octets_transmitted_ok", 1654 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 1655 }, 1656 { 1657 .str = "a_octets_received_ok", 1658 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 1659 }, 1660 { 1661 .str = "a_multicast_frames_xmitted_ok", 1662 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 1663 }, 1664 { 1665 .str = "a_broadcast_frames_xmitted_ok", 1666 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 1667 }, 1668 { 1669 .str = "a_multicast_frames_received_ok", 1670 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 1671 }, 1672 { 1673 .str = "a_broadcast_frames_received_ok", 1674 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 1675 }, 1676 { 1677 .str = "a_in_range_length_errors", 1678 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 1679 }, 1680 { 1681 .str = "a_out_of_range_length_field", 1682 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 1683 }, 1684 { 1685 .str = "a_frame_too_long_errors", 1686 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 1687 }, 1688 { 1689 .str = "a_symbol_error_during_carrier", 1690 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 1691 }, 1692 { 1693 .str = "a_mac_control_frames_transmitted", 1694 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 1695 }, 1696 { 1697 .str = "a_mac_control_frames_received", 1698 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 1699 }, 1700 { 1701 .str = "a_unsupported_opcodes_received", 1702 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 1703 }, 1704 { 1705 .str = "a_pause_mac_ctrl_frames_received", 1706 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 1707 }, 1708 { 1709 .str = "a_pause_mac_ctrl_frames_xmitted", 1710 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 1711 }, 1712 }; 1713 1714 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 1715 1716 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2863_stats[] = { 1717 { 1718 .str = "if_in_discards", 1719 .getter = mlxsw_reg_ppcnt_if_in_discards_get, 1720 }, 1721 { 1722 .str = "if_out_discards", 1723 .getter = mlxsw_reg_ppcnt_if_out_discards_get, 1724 }, 1725 { 1726 .str = "if_out_errors", 1727 .getter = mlxsw_reg_ppcnt_if_out_errors_get, 1728 }, 1729 }; 1730 1731 #define MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN \ 1732 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2863_stats) 1733 1734 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = { 1735 { 1736 .str = "ether_stats_undersize_pkts", 1737 .getter = mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get, 1738 }, 1739 { 1740 .str = "ether_stats_oversize_pkts", 1741 .getter = mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get, 1742 }, 1743 { 1744 .str = "ether_stats_fragments", 1745 .getter = mlxsw_reg_ppcnt_ether_stats_fragments_get, 1746 }, 1747 { 1748 .str = "ether_pkts64octets", 1749 .getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get, 1750 }, 1751 { 1752 .str = "ether_pkts65to127octets", 1753 .getter = mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get, 1754 }, 1755 { 1756 .str = "ether_pkts128to255octets", 1757 .getter = mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get, 1758 }, 1759 { 1760 .str = "ether_pkts256to511octets", 1761 .getter = mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get, 1762 }, 1763 { 1764 .str = "ether_pkts512to1023octets", 1765 .getter = mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get, 1766 }, 1767 { 1768 .str = "ether_pkts1024to1518octets", 1769 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get, 1770 }, 1771 { 1772 .str = "ether_pkts1519to2047octets", 1773 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get, 1774 }, 1775 { 1776 .str = "ether_pkts2048to4095octets", 1777 .getter = mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get, 1778 }, 1779 { 1780 .str = "ether_pkts4096to8191octets", 1781 .getter = mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get, 1782 }, 1783 { 1784 .str = "ether_pkts8192to10239octets", 1785 .getter = mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get, 1786 }, 1787 }; 1788 1789 #define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \ 1790 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats) 1791 1792 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_3635_stats[] = { 1793 { 1794 .str = "dot3stats_fcs_errors", 1795 .getter = mlxsw_reg_ppcnt_dot3stats_fcs_errors_get, 1796 }, 1797 { 1798 .str = "dot3stats_symbol_errors", 1799 .getter = mlxsw_reg_ppcnt_dot3stats_symbol_errors_get, 1800 }, 1801 { 1802 .str = "dot3control_in_unknown_opcodes", 1803 .getter = mlxsw_reg_ppcnt_dot3control_in_unknown_opcodes_get, 1804 }, 1805 { 1806 .str = "dot3in_pause_frames", 1807 .getter = mlxsw_reg_ppcnt_dot3in_pause_frames_get, 1808 }, 1809 }; 1810 1811 #define MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN \ 1812 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_3635_stats) 1813 1814 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_ext_stats[] = { 1815 { 1816 .str = "ecn_marked", 1817 .getter = mlxsw_reg_ppcnt_ecn_marked_get, 1818 }, 1819 }; 1820 1821 #define MLXSW_SP_PORT_HW_EXT_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_ext_stats) 1822 1823 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_discard_stats[] = { 1824 { 1825 .str = "discard_ingress_general", 1826 .getter = mlxsw_reg_ppcnt_ingress_general_get, 1827 }, 1828 { 1829 .str = "discard_ingress_policy_engine", 1830 .getter = mlxsw_reg_ppcnt_ingress_policy_engine_get, 1831 }, 1832 { 1833 .str = "discard_ingress_vlan_membership", 1834 .getter = mlxsw_reg_ppcnt_ingress_vlan_membership_get, 1835 }, 1836 { 1837 .str = "discard_ingress_tag_frame_type", 1838 .getter = mlxsw_reg_ppcnt_ingress_tag_frame_type_get, 1839 }, 1840 { 1841 .str = "discard_egress_vlan_membership", 1842 .getter = mlxsw_reg_ppcnt_egress_vlan_membership_get, 1843 }, 1844 { 1845 .str = "discard_loopback_filter", 1846 .getter = mlxsw_reg_ppcnt_loopback_filter_get, 1847 }, 1848 { 1849 .str = "discard_egress_general", 1850 .getter = mlxsw_reg_ppcnt_egress_general_get, 1851 }, 1852 { 1853 .str = "discard_egress_hoq", 1854 .getter = mlxsw_reg_ppcnt_egress_hoq_get, 1855 }, 1856 { 1857 .str = "discard_egress_policy_engine", 1858 .getter = mlxsw_reg_ppcnt_egress_policy_engine_get, 1859 }, 1860 { 1861 .str = "discard_ingress_tx_link_down", 1862 .getter = mlxsw_reg_ppcnt_ingress_tx_link_down_get, 1863 }, 1864 { 1865 .str = "discard_egress_stp_filter", 1866 .getter = mlxsw_reg_ppcnt_egress_stp_filter_get, 1867 }, 1868 { 1869 .str = "discard_egress_sll", 1870 .getter = mlxsw_reg_ppcnt_egress_sll_get, 1871 }, 1872 }; 1873 1874 #define MLXSW_SP_PORT_HW_DISCARD_STATS_LEN \ 1875 ARRAY_SIZE(mlxsw_sp_port_hw_discard_stats) 1876 1877 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { 1878 { 1879 .str = "rx_octets_prio", 1880 .getter = mlxsw_reg_ppcnt_rx_octets_get, 1881 }, 1882 { 1883 .str = "rx_frames_prio", 1884 .getter = mlxsw_reg_ppcnt_rx_frames_get, 1885 }, 1886 { 1887 .str = "tx_octets_prio", 1888 .getter = mlxsw_reg_ppcnt_tx_octets_get, 1889 }, 1890 { 1891 .str = "tx_frames_prio", 1892 .getter = mlxsw_reg_ppcnt_tx_frames_get, 1893 }, 1894 { 1895 .str = "rx_pause_prio", 1896 .getter = mlxsw_reg_ppcnt_rx_pause_get, 1897 }, 1898 { 1899 .str = "rx_pause_duration_prio", 1900 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get, 1901 }, 1902 { 1903 .str = "tx_pause_prio", 1904 .getter = mlxsw_reg_ppcnt_tx_pause_get, 1905 }, 1906 { 1907 .str = "tx_pause_duration_prio", 1908 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get, 1909 }, 1910 }; 1911 1912 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) 1913 1914 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { 1915 { 1916 .str = "tc_transmit_queue_tc", 1917 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get, 1918 .cells_bytes = true, 1919 }, 1920 { 1921 .str = "tc_no_buffer_discard_uc_tc", 1922 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get, 1923 }, 1924 }; 1925 1926 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) 1927 1928 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ 1929 MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN + \ 1930 MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \ 1931 MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN + \ 1932 MLXSW_SP_PORT_HW_EXT_STATS_LEN + \ 1933 MLXSW_SP_PORT_HW_DISCARD_STATS_LEN + \ 1934 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \ 1935 IEEE_8021QAZ_MAX_TCS) + \ 1936 (MLXSW_SP_PORT_HW_TC_STATS_LEN * \ 1937 TC_MAX_QUEUE)) 1938 1939 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) 1940 { 1941 int i; 1942 1943 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { 1944 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", 1945 mlxsw_sp_port_hw_prio_stats[i].str, prio); 1946 *p += ETH_GSTRING_LEN; 1947 } 1948 } 1949 1950 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) 1951 { 1952 int i; 1953 1954 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { 1955 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", 1956 mlxsw_sp_port_hw_tc_stats[i].str, tc); 1957 *p += ETH_GSTRING_LEN; 1958 } 1959 } 1960 1961 static void mlxsw_sp_port_get_strings(struct net_device *dev, 1962 u32 stringset, u8 *data) 1963 { 1964 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1965 u8 *p = data; 1966 int i; 1967 1968 switch (stringset) { 1969 case ETH_SS_STATS: 1970 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 1971 memcpy(p, mlxsw_sp_port_hw_stats[i].str, 1972 ETH_GSTRING_LEN); 1973 p += ETH_GSTRING_LEN; 1974 } 1975 1976 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; i++) { 1977 memcpy(p, mlxsw_sp_port_hw_rfc_2863_stats[i].str, 1978 ETH_GSTRING_LEN); 1979 p += ETH_GSTRING_LEN; 1980 } 1981 1982 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) { 1983 memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str, 1984 ETH_GSTRING_LEN); 1985 p += ETH_GSTRING_LEN; 1986 } 1987 1988 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; i++) { 1989 memcpy(p, mlxsw_sp_port_hw_rfc_3635_stats[i].str, 1990 ETH_GSTRING_LEN); 1991 p += ETH_GSTRING_LEN; 1992 } 1993 1994 for (i = 0; i < MLXSW_SP_PORT_HW_EXT_STATS_LEN; i++) { 1995 memcpy(p, mlxsw_sp_port_hw_ext_stats[i].str, 1996 ETH_GSTRING_LEN); 1997 p += ETH_GSTRING_LEN; 1998 } 1999 2000 for (i = 0; i < MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; i++) { 2001 memcpy(p, mlxsw_sp_port_hw_discard_stats[i].str, 2002 ETH_GSTRING_LEN); 2003 p += ETH_GSTRING_LEN; 2004 } 2005 2006 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 2007 mlxsw_sp_port_get_prio_strings(&p, i); 2008 2009 for (i = 0; i < TC_MAX_QUEUE; i++) 2010 mlxsw_sp_port_get_tc_strings(&p, i); 2011 2012 mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_strings(&p); 2013 break; 2014 } 2015 } 2016 2017 static int mlxsw_sp_port_set_phys_id(struct net_device *dev, 2018 enum ethtool_phys_id_state state) 2019 { 2020 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2021 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2022 char mlcr_pl[MLXSW_REG_MLCR_LEN]; 2023 bool active; 2024 2025 switch (state) { 2026 case ETHTOOL_ID_ACTIVE: 2027 active = true; 2028 break; 2029 case ETHTOOL_ID_INACTIVE: 2030 active = false; 2031 break; 2032 default: 2033 return -EOPNOTSUPP; 2034 } 2035 2036 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); 2037 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); 2038 } 2039 2040 static int 2041 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, 2042 int *p_len, enum mlxsw_reg_ppcnt_grp grp) 2043 { 2044 switch (grp) { 2045 case MLXSW_REG_PPCNT_IEEE_8023_CNT: 2046 *p_hw_stats = mlxsw_sp_port_hw_stats; 2047 *p_len = MLXSW_SP_PORT_HW_STATS_LEN; 2048 break; 2049 case MLXSW_REG_PPCNT_RFC_2863_CNT: 2050 *p_hw_stats = mlxsw_sp_port_hw_rfc_2863_stats; 2051 *p_len = MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2052 break; 2053 case MLXSW_REG_PPCNT_RFC_2819_CNT: 2054 *p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats; 2055 *p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2056 break; 2057 case MLXSW_REG_PPCNT_RFC_3635_CNT: 2058 *p_hw_stats = mlxsw_sp_port_hw_rfc_3635_stats; 2059 *p_len = MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2060 break; 2061 case MLXSW_REG_PPCNT_EXT_CNT: 2062 *p_hw_stats = mlxsw_sp_port_hw_ext_stats; 2063 *p_len = MLXSW_SP_PORT_HW_EXT_STATS_LEN; 2064 break; 2065 case MLXSW_REG_PPCNT_DISCARD_CNT: 2066 *p_hw_stats = mlxsw_sp_port_hw_discard_stats; 2067 *p_len = MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2068 break; 2069 case MLXSW_REG_PPCNT_PRIO_CNT: 2070 *p_hw_stats = mlxsw_sp_port_hw_prio_stats; 2071 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2072 break; 2073 case MLXSW_REG_PPCNT_TC_CNT: 2074 *p_hw_stats = mlxsw_sp_port_hw_tc_stats; 2075 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN; 2076 break; 2077 default: 2078 WARN_ON(1); 2079 return -EOPNOTSUPP; 2080 } 2081 return 0; 2082 } 2083 2084 static void __mlxsw_sp_port_get_stats(struct net_device *dev, 2085 enum mlxsw_reg_ppcnt_grp grp, int prio, 2086 u64 *data, int data_index) 2087 { 2088 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2089 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2090 struct mlxsw_sp_port_hw_stats *hw_stats; 2091 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 2092 int i, len; 2093 int err; 2094 2095 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); 2096 if (err) 2097 return; 2098 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl); 2099 for (i = 0; i < len; i++) { 2100 data[data_index + i] = hw_stats[i].getter(ppcnt_pl); 2101 if (!hw_stats[i].cells_bytes) 2102 continue; 2103 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp, 2104 data[data_index + i]); 2105 } 2106 } 2107 2108 static void mlxsw_sp_port_get_stats(struct net_device *dev, 2109 struct ethtool_stats *stats, u64 *data) 2110 { 2111 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2112 int i, data_index = 0; 2113 2114 /* IEEE 802.3 Counters */ 2115 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0, 2116 data, data_index); 2117 data_index = MLXSW_SP_PORT_HW_STATS_LEN; 2118 2119 /* RFC 2863 Counters */ 2120 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2863_CNT, 0, 2121 data, data_index); 2122 data_index += MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2123 2124 /* RFC 2819 Counters */ 2125 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0, 2126 data, data_index); 2127 data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2128 2129 /* RFC 3635 Counters */ 2130 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_3635_CNT, 0, 2131 data, data_index); 2132 data_index += MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2133 2134 /* Extended Counters */ 2135 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 2136 data, data_index); 2137 data_index += MLXSW_SP_PORT_HW_EXT_STATS_LEN; 2138 2139 /* Discard Counters */ 2140 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_DISCARD_CNT, 0, 2141 data, data_index); 2142 data_index += MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2143 2144 /* Per-Priority Counters */ 2145 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2146 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, 2147 data, data_index); 2148 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2149 } 2150 2151 /* Per-TC Counters */ 2152 for (i = 0; i < TC_MAX_QUEUE; i++) { 2153 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, 2154 data, data_index); 2155 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; 2156 } 2157 2158 /* PTP counters */ 2159 mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats(mlxsw_sp_port, 2160 data, data_index); 2161 data_index += mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_count(); 2162 } 2163 2164 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 2165 { 2166 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2167 2168 switch (sset) { 2169 case ETH_SS_STATS: 2170 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN + 2171 mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_count(); 2172 default: 2173 return -EOPNOTSUPP; 2174 } 2175 } 2176 2177 struct mlxsw_sp1_port_link_mode { 2178 enum ethtool_link_mode_bit_indices mask_ethtool; 2179 u32 mask; 2180 u32 speed; 2181 }; 2182 2183 static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = { 2184 { 2185 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 2186 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2187 .speed = SPEED_100, 2188 }, 2189 { 2190 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 2191 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 2192 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2193 .speed = SPEED_1000, 2194 }, 2195 { 2196 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 2197 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2198 .speed = SPEED_10000, 2199 }, 2200 { 2201 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 2202 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 2203 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 2204 .speed = SPEED_10000, 2205 }, 2206 { 2207 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2208 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2209 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2210 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 2211 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2212 .speed = SPEED_10000, 2213 }, 2214 { 2215 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 2216 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 2217 .speed = SPEED_20000, 2218 }, 2219 { 2220 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 2221 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2222 .speed = SPEED_40000, 2223 }, 2224 { 2225 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 2226 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2227 .speed = SPEED_40000, 2228 }, 2229 { 2230 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 2231 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2232 .speed = SPEED_40000, 2233 }, 2234 { 2235 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 2236 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2237 .speed = SPEED_40000, 2238 }, 2239 { 2240 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR, 2241 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2242 .speed = SPEED_25000, 2243 }, 2244 { 2245 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR, 2246 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2247 .speed = SPEED_25000, 2248 }, 2249 { 2250 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 2251 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2252 .speed = SPEED_25000, 2253 }, 2254 { 2255 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2, 2256 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2257 .speed = SPEED_50000, 2258 }, 2259 { 2260 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 2261 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2262 .speed = SPEED_50000, 2263 }, 2264 { 2265 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2, 2266 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2267 .speed = SPEED_50000, 2268 }, 2269 { 2270 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4, 2271 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2272 .speed = SPEED_100000, 2273 }, 2274 { 2275 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4, 2276 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2277 .speed = SPEED_100000, 2278 }, 2279 { 2280 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4, 2281 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2282 .speed = SPEED_100000, 2283 }, 2284 { 2285 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 2286 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2287 .speed = SPEED_100000, 2288 }, 2289 }; 2290 2291 #define MLXSW_SP1_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp1_port_link_mode) 2292 2293 static void 2294 mlxsw_sp1_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, 2295 u32 ptys_eth_proto, 2296 struct ethtool_link_ksettings *cmd) 2297 { 2298 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2299 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2300 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2301 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2302 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2303 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2304 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 2305 2306 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2307 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2308 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2309 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 2310 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 2311 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 2312 } 2313 2314 static void 2315 mlxsw_sp1_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, 2316 u8 width, unsigned long *mode) 2317 { 2318 int i; 2319 2320 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2321 if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) 2322 __set_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, 2323 mode); 2324 } 2325 } 2326 2327 static u32 2328 mlxsw_sp1_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto) 2329 { 2330 int i; 2331 2332 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2333 if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) 2334 return mlxsw_sp1_port_link_mode[i].speed; 2335 } 2336 2337 return SPEED_UNKNOWN; 2338 } 2339 2340 static void 2341 mlxsw_sp1_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, 2342 u32 ptys_eth_proto, 2343 struct ethtool_link_ksettings *cmd) 2344 { 2345 cmd->base.speed = SPEED_UNKNOWN; 2346 cmd->base.duplex = DUPLEX_UNKNOWN; 2347 2348 if (!carrier_ok) 2349 return; 2350 2351 cmd->base.speed = mlxsw_sp1_from_ptys_speed(mlxsw_sp, ptys_eth_proto); 2352 if (cmd->base.speed != SPEED_UNKNOWN) 2353 cmd->base.duplex = DUPLEX_FULL; 2354 } 2355 2356 static u32 2357 mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width, 2358 const struct ethtool_link_ksettings *cmd) 2359 { 2360 u32 ptys_proto = 0; 2361 int i; 2362 2363 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2364 if (test_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, 2365 cmd->link_modes.advertising)) 2366 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2367 } 2368 return ptys_proto; 2369 } 2370 2371 static u32 mlxsw_sp1_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u8 width, 2372 u32 speed) 2373 { 2374 u32 ptys_proto = 0; 2375 int i; 2376 2377 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2378 if (speed == mlxsw_sp1_port_link_mode[i].speed) 2379 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2380 } 2381 return ptys_proto; 2382 } 2383 2384 static void 2385 mlxsw_sp1_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, 2386 u8 local_port, u32 proto_admin, bool autoneg) 2387 { 2388 mlxsw_reg_ptys_eth_pack(payload, local_port, proto_admin, autoneg); 2389 } 2390 2391 static void 2392 mlxsw_sp1_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, 2393 u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, 2394 u32 *p_eth_proto_oper) 2395 { 2396 mlxsw_reg_ptys_eth_unpack(payload, p_eth_proto_cap, p_eth_proto_admin, 2397 p_eth_proto_oper); 2398 } 2399 2400 static const struct mlxsw_sp_port_type_speed_ops 2401 mlxsw_sp1_port_type_speed_ops = { 2402 .from_ptys_supported_port = mlxsw_sp1_from_ptys_supported_port, 2403 .from_ptys_link = mlxsw_sp1_from_ptys_link, 2404 .from_ptys_speed = mlxsw_sp1_from_ptys_speed, 2405 .from_ptys_speed_duplex = mlxsw_sp1_from_ptys_speed_duplex, 2406 .to_ptys_advert_link = mlxsw_sp1_to_ptys_advert_link, 2407 .to_ptys_speed = mlxsw_sp1_to_ptys_speed, 2408 .reg_ptys_eth_pack = mlxsw_sp1_reg_ptys_eth_pack, 2409 .reg_ptys_eth_unpack = mlxsw_sp1_reg_ptys_eth_unpack, 2410 }; 2411 2412 static const enum ethtool_link_mode_bit_indices 2413 mlxsw_sp2_mask_ethtool_sgmii_100m[] = { 2414 ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2415 }; 2416 2417 #define MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN \ 2418 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_sgmii_100m) 2419 2420 static const enum ethtool_link_mode_bit_indices 2421 mlxsw_sp2_mask_ethtool_1000base_x_sgmii[] = { 2422 ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 2423 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2424 }; 2425 2426 #define MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN \ 2427 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_1000base_x_sgmii) 2428 2429 static const enum ethtool_link_mode_bit_indices 2430 mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii[] = { 2431 ETHTOOL_LINK_MODE_2500baseX_Full_BIT, 2432 }; 2433 2434 #define MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN \ 2435 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii) 2436 2437 static const enum ethtool_link_mode_bit_indices 2438 mlxsw_sp2_mask_ethtool_5gbase_r[] = { 2439 ETHTOOL_LINK_MODE_5000baseT_Full_BIT, 2440 }; 2441 2442 #define MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN \ 2443 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_5gbase_r) 2444 2445 static const enum ethtool_link_mode_bit_indices 2446 mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g[] = { 2447 ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2448 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2449 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, 2450 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, 2451 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 2452 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 2453 ETHTOOL_LINK_MODE_10000baseER_Full_BIT, 2454 }; 2455 2456 #define MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN \ 2457 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g) 2458 2459 static const enum ethtool_link_mode_bit_indices 2460 mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g[] = { 2461 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2462 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2463 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2464 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2465 }; 2466 2467 #define MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN \ 2468 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g) 2469 2470 static const enum ethtool_link_mode_bit_indices 2471 mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr[] = { 2472 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2473 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2474 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2475 }; 2476 2477 #define MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN \ 2478 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr) 2479 2480 static const enum ethtool_link_mode_bit_indices 2481 mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2[] = { 2482 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2483 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2484 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2485 }; 2486 2487 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN \ 2488 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2) 2489 2490 static const enum ethtool_link_mode_bit_indices 2491 mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr[] = { 2492 ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, 2493 ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, 2494 ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, 2495 ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, 2496 ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, 2497 }; 2498 2499 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN \ 2500 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr) 2501 2502 static const enum ethtool_link_mode_bit_indices 2503 mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4[] = { 2504 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2505 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2506 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2507 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2508 }; 2509 2510 #define MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN \ 2511 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4) 2512 2513 static const enum ethtool_link_mode_bit_indices 2514 mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2[] = { 2515 ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, 2516 ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, 2517 ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, 2518 ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, 2519 ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, 2520 }; 2521 2522 #define MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN \ 2523 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2) 2524 2525 static const enum ethtool_link_mode_bit_indices 2526 mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = { 2527 ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, 2528 ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, 2529 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, 2530 ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT, 2531 ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, 2532 }; 2533 2534 #define MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN \ 2535 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4) 2536 2537 static const enum ethtool_link_mode_bit_indices 2538 mlxsw_sp2_mask_ethtool_400gaui_8[] = { 2539 ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT, 2540 ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT, 2541 ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT, 2542 ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT, 2543 ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT, 2544 }; 2545 2546 #define MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN \ 2547 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_400gaui_8) 2548 2549 #define MLXSW_SP_PORT_MASK_WIDTH_1X BIT(0) 2550 #define MLXSW_SP_PORT_MASK_WIDTH_2X BIT(1) 2551 #define MLXSW_SP_PORT_MASK_WIDTH_4X BIT(2) 2552 #define MLXSW_SP_PORT_MASK_WIDTH_8X BIT(3) 2553 2554 static u8 mlxsw_sp_port_mask_width_get(u8 width) 2555 { 2556 switch (width) { 2557 case 1: 2558 return MLXSW_SP_PORT_MASK_WIDTH_1X; 2559 case 2: 2560 return MLXSW_SP_PORT_MASK_WIDTH_2X; 2561 case 4: 2562 return MLXSW_SP_PORT_MASK_WIDTH_4X; 2563 case 8: 2564 return MLXSW_SP_PORT_MASK_WIDTH_8X; 2565 default: 2566 WARN_ON_ONCE(1); 2567 return 0; 2568 } 2569 } 2570 2571 struct mlxsw_sp2_port_link_mode { 2572 const enum ethtool_link_mode_bit_indices *mask_ethtool; 2573 int m_ethtool_len; 2574 u32 mask; 2575 u32 speed; 2576 u8 mask_width; 2577 }; 2578 2579 static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = { 2580 { 2581 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_SGMII_100M, 2582 .mask_ethtool = mlxsw_sp2_mask_ethtool_sgmii_100m, 2583 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN, 2584 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 2585 MLXSW_SP_PORT_MASK_WIDTH_2X | 2586 MLXSW_SP_PORT_MASK_WIDTH_4X | 2587 MLXSW_SP_PORT_MASK_WIDTH_8X, 2588 .speed = SPEED_100, 2589 }, 2590 { 2591 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_1000BASE_X_SGMII, 2592 .mask_ethtool = mlxsw_sp2_mask_ethtool_1000base_x_sgmii, 2593 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN, 2594 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 2595 MLXSW_SP_PORT_MASK_WIDTH_2X | 2596 MLXSW_SP_PORT_MASK_WIDTH_4X | 2597 MLXSW_SP_PORT_MASK_WIDTH_8X, 2598 .speed = SPEED_1000, 2599 }, 2600 { 2601 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_2_5GBASE_X_2_5GMII, 2602 .mask_ethtool = mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii, 2603 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN, 2604 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 2605 MLXSW_SP_PORT_MASK_WIDTH_2X | 2606 MLXSW_SP_PORT_MASK_WIDTH_4X | 2607 MLXSW_SP_PORT_MASK_WIDTH_8X, 2608 .speed = SPEED_2500, 2609 }, 2610 { 2611 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R, 2612 .mask_ethtool = mlxsw_sp2_mask_ethtool_5gbase_r, 2613 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN, 2614 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 2615 MLXSW_SP_PORT_MASK_WIDTH_2X | 2616 MLXSW_SP_PORT_MASK_WIDTH_4X | 2617 MLXSW_SP_PORT_MASK_WIDTH_8X, 2618 .speed = SPEED_5000, 2619 }, 2620 { 2621 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XFI_XAUI_1_10G, 2622 .mask_ethtool = mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g, 2623 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN, 2624 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 2625 MLXSW_SP_PORT_MASK_WIDTH_2X | 2626 MLXSW_SP_PORT_MASK_WIDTH_4X | 2627 MLXSW_SP_PORT_MASK_WIDTH_8X, 2628 .speed = SPEED_10000, 2629 }, 2630 { 2631 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G, 2632 .mask_ethtool = mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g, 2633 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN, 2634 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X | 2635 MLXSW_SP_PORT_MASK_WIDTH_8X, 2636 .speed = SPEED_40000, 2637 }, 2638 { 2639 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR, 2640 .mask_ethtool = mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr, 2641 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN, 2642 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 2643 MLXSW_SP_PORT_MASK_WIDTH_2X | 2644 MLXSW_SP_PORT_MASK_WIDTH_4X | 2645 MLXSW_SP_PORT_MASK_WIDTH_8X, 2646 .speed = SPEED_25000, 2647 }, 2648 { 2649 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_2_LAUI_2_50GBASE_CR2_KR2, 2650 .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2, 2651 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN, 2652 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_2X | 2653 MLXSW_SP_PORT_MASK_WIDTH_4X | 2654 MLXSW_SP_PORT_MASK_WIDTH_8X, 2655 .speed = SPEED_50000, 2656 }, 2657 { 2658 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR, 2659 .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr, 2660 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN, 2661 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X, 2662 .speed = SPEED_50000, 2663 }, 2664 { 2665 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4, 2666 .mask_ethtool = mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4, 2667 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN, 2668 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X | 2669 MLXSW_SP_PORT_MASK_WIDTH_8X, 2670 .speed = SPEED_100000, 2671 }, 2672 { 2673 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2, 2674 .mask_ethtool = mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2, 2675 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN, 2676 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_2X, 2677 .speed = SPEED_100000, 2678 }, 2679 { 2680 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4, 2681 .mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4, 2682 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN, 2683 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X | 2684 MLXSW_SP_PORT_MASK_WIDTH_8X, 2685 .speed = SPEED_200000, 2686 }, 2687 { 2688 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_8, 2689 .mask_ethtool = mlxsw_sp2_mask_ethtool_400gaui_8, 2690 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN, 2691 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_8X, 2692 .speed = SPEED_400000, 2693 }, 2694 }; 2695 2696 #define MLXSW_SP2_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp2_port_link_mode) 2697 2698 static void 2699 mlxsw_sp2_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, 2700 u32 ptys_eth_proto, 2701 struct ethtool_link_ksettings *cmd) 2702 { 2703 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 2704 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 2705 } 2706 2707 static void 2708 mlxsw_sp2_set_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, 2709 unsigned long *mode) 2710 { 2711 int i; 2712 2713 for (i = 0; i < link_mode->m_ethtool_len; i++) 2714 __set_bit(link_mode->mask_ethtool[i], mode); 2715 } 2716 2717 static void 2718 mlxsw_sp2_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, 2719 u8 width, unsigned long *mode) 2720 { 2721 u8 mask_width = mlxsw_sp_port_mask_width_get(width); 2722 int i; 2723 2724 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 2725 if ((ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) && 2726 (mask_width & mlxsw_sp2_port_link_mode[i].mask_width)) 2727 mlxsw_sp2_set_bit_ethtool(&mlxsw_sp2_port_link_mode[i], 2728 mode); 2729 } 2730 } 2731 2732 static u32 2733 mlxsw_sp2_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto) 2734 { 2735 int i; 2736 2737 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 2738 if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) 2739 return mlxsw_sp2_port_link_mode[i].speed; 2740 } 2741 2742 return SPEED_UNKNOWN; 2743 } 2744 2745 static void 2746 mlxsw_sp2_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, 2747 u32 ptys_eth_proto, 2748 struct ethtool_link_ksettings *cmd) 2749 { 2750 cmd->base.speed = SPEED_UNKNOWN; 2751 cmd->base.duplex = DUPLEX_UNKNOWN; 2752 2753 if (!carrier_ok) 2754 return; 2755 2756 cmd->base.speed = mlxsw_sp2_from_ptys_speed(mlxsw_sp, ptys_eth_proto); 2757 if (cmd->base.speed != SPEED_UNKNOWN) 2758 cmd->base.duplex = DUPLEX_FULL; 2759 } 2760 2761 static bool 2762 mlxsw_sp2_test_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, 2763 const unsigned long *mode) 2764 { 2765 int cnt = 0; 2766 int i; 2767 2768 for (i = 0; i < link_mode->m_ethtool_len; i++) { 2769 if (test_bit(link_mode->mask_ethtool[i], mode)) 2770 cnt++; 2771 } 2772 2773 return cnt == link_mode->m_ethtool_len; 2774 } 2775 2776 static u32 2777 mlxsw_sp2_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width, 2778 const struct ethtool_link_ksettings *cmd) 2779 { 2780 u8 mask_width = mlxsw_sp_port_mask_width_get(width); 2781 u32 ptys_proto = 0; 2782 int i; 2783 2784 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 2785 if ((mask_width & mlxsw_sp2_port_link_mode[i].mask_width) && 2786 mlxsw_sp2_test_bit_ethtool(&mlxsw_sp2_port_link_mode[i], 2787 cmd->link_modes.advertising)) 2788 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 2789 } 2790 return ptys_proto; 2791 } 2792 2793 static u32 mlxsw_sp2_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, 2794 u8 width, u32 speed) 2795 { 2796 u8 mask_width = mlxsw_sp_port_mask_width_get(width); 2797 u32 ptys_proto = 0; 2798 int i; 2799 2800 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 2801 if ((speed == mlxsw_sp2_port_link_mode[i].speed) && 2802 (mask_width & mlxsw_sp2_port_link_mode[i].mask_width)) 2803 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 2804 } 2805 return ptys_proto; 2806 } 2807 2808 static void 2809 mlxsw_sp2_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, 2810 u8 local_port, u32 proto_admin, 2811 bool autoneg) 2812 { 2813 mlxsw_reg_ptys_ext_eth_pack(payload, local_port, proto_admin, autoneg); 2814 } 2815 2816 static void 2817 mlxsw_sp2_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, 2818 u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, 2819 u32 *p_eth_proto_oper) 2820 { 2821 mlxsw_reg_ptys_ext_eth_unpack(payload, p_eth_proto_cap, 2822 p_eth_proto_admin, p_eth_proto_oper); 2823 } 2824 2825 static const struct mlxsw_sp_port_type_speed_ops 2826 mlxsw_sp2_port_type_speed_ops = { 2827 .from_ptys_supported_port = mlxsw_sp2_from_ptys_supported_port, 2828 .from_ptys_link = mlxsw_sp2_from_ptys_link, 2829 .from_ptys_speed = mlxsw_sp2_from_ptys_speed, 2830 .from_ptys_speed_duplex = mlxsw_sp2_from_ptys_speed_duplex, 2831 .to_ptys_advert_link = mlxsw_sp2_to_ptys_advert_link, 2832 .to_ptys_speed = mlxsw_sp2_to_ptys_speed, 2833 .reg_ptys_eth_pack = mlxsw_sp2_reg_ptys_eth_pack, 2834 .reg_ptys_eth_unpack = mlxsw_sp2_reg_ptys_eth_unpack, 2835 }; 2836 2837 static void 2838 mlxsw_sp_port_get_link_supported(struct mlxsw_sp *mlxsw_sp, u32 eth_proto_cap, 2839 u8 width, struct ethtool_link_ksettings *cmd) 2840 { 2841 const struct mlxsw_sp_port_type_speed_ops *ops; 2842 2843 ops = mlxsw_sp->port_type_speed_ops; 2844 2845 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); 2846 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); 2847 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); 2848 2849 ops->from_ptys_supported_port(mlxsw_sp, eth_proto_cap, cmd); 2850 ops->from_ptys_link(mlxsw_sp, eth_proto_cap, width, 2851 cmd->link_modes.supported); 2852 } 2853 2854 static void 2855 mlxsw_sp_port_get_link_advertise(struct mlxsw_sp *mlxsw_sp, 2856 u32 eth_proto_admin, bool autoneg, u8 width, 2857 struct ethtool_link_ksettings *cmd) 2858 { 2859 const struct mlxsw_sp_port_type_speed_ops *ops; 2860 2861 ops = mlxsw_sp->port_type_speed_ops; 2862 2863 if (!autoneg) 2864 return; 2865 2866 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); 2867 ops->from_ptys_link(mlxsw_sp, eth_proto_admin, width, 2868 cmd->link_modes.advertising); 2869 } 2870 2871 static u8 2872 mlxsw_sp_port_connector_port(enum mlxsw_reg_ptys_connector_type connector_type) 2873 { 2874 switch (connector_type) { 2875 case MLXSW_REG_PTYS_CONNECTOR_TYPE_UNKNOWN_OR_NO_CONNECTOR: 2876 return PORT_OTHER; 2877 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_NONE: 2878 return PORT_NONE; 2879 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_TP: 2880 return PORT_TP; 2881 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_AUI: 2882 return PORT_AUI; 2883 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_BNC: 2884 return PORT_BNC; 2885 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_MII: 2886 return PORT_MII; 2887 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_FIBRE: 2888 return PORT_FIBRE; 2889 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_DA: 2890 return PORT_DA; 2891 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_OTHER: 2892 return PORT_OTHER; 2893 default: 2894 WARN_ON_ONCE(1); 2895 return PORT_OTHER; 2896 } 2897 } 2898 2899 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev, 2900 struct ethtool_link_ksettings *cmd) 2901 { 2902 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; 2903 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2904 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2905 const struct mlxsw_sp_port_type_speed_ops *ops; 2906 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2907 u8 connector_type; 2908 bool autoneg; 2909 int err; 2910 2911 ops = mlxsw_sp->port_type_speed_ops; 2912 2913 autoneg = mlxsw_sp_port->link.autoneg; 2914 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 2915 0, false); 2916 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2917 if (err) 2918 return err; 2919 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, 2920 ð_proto_admin, ð_proto_oper); 2921 2922 mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap, 2923 mlxsw_sp_port->mapping.width, cmd); 2924 2925 mlxsw_sp_port_get_link_advertise(mlxsw_sp, eth_proto_admin, autoneg, 2926 mlxsw_sp_port->mapping.width, cmd); 2927 2928 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 2929 connector_type = mlxsw_reg_ptys_connector_type_get(ptys_pl); 2930 cmd->base.port = mlxsw_sp_port_connector_port(connector_type); 2931 ops->from_ptys_speed_duplex(mlxsw_sp, netif_carrier_ok(dev), 2932 eth_proto_oper, cmd); 2933 2934 return 0; 2935 } 2936 2937 static int 2938 mlxsw_sp_port_set_link_ksettings(struct net_device *dev, 2939 const struct ethtool_link_ksettings *cmd) 2940 { 2941 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2942 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2943 const struct mlxsw_sp_port_type_speed_ops *ops; 2944 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2945 u32 eth_proto_cap, eth_proto_new; 2946 bool autoneg; 2947 int err; 2948 2949 ops = mlxsw_sp->port_type_speed_ops; 2950 2951 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 2952 0, false); 2953 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2954 if (err) 2955 return err; 2956 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, NULL, NULL); 2957 2958 autoneg = cmd->base.autoneg == AUTONEG_ENABLE; 2959 eth_proto_new = autoneg ? 2960 ops->to_ptys_advert_link(mlxsw_sp, mlxsw_sp_port->mapping.width, 2961 cmd) : 2962 ops->to_ptys_speed(mlxsw_sp, mlxsw_sp_port->mapping.width, 2963 cmd->base.speed); 2964 2965 eth_proto_new = eth_proto_new & eth_proto_cap; 2966 if (!eth_proto_new) { 2967 netdev_err(dev, "No supported speed requested\n"); 2968 return -EINVAL; 2969 } 2970 2971 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 2972 eth_proto_new, autoneg); 2973 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2974 if (err) 2975 return err; 2976 2977 mlxsw_sp_port->link.autoneg = autoneg; 2978 2979 if (!netif_running(dev)) 2980 return 0; 2981 2982 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 2983 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 2984 2985 return 0; 2986 } 2987 2988 static int mlxsw_sp_get_module_info(struct net_device *netdev, 2989 struct ethtool_modinfo *modinfo) 2990 { 2991 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 2992 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2993 int err; 2994 2995 err = mlxsw_env_get_module_info(mlxsw_sp->core, 2996 mlxsw_sp_port->mapping.module, 2997 modinfo); 2998 2999 return err; 3000 } 3001 3002 static int mlxsw_sp_get_module_eeprom(struct net_device *netdev, 3003 struct ethtool_eeprom *ee, 3004 u8 *data) 3005 { 3006 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3007 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3008 int err; 3009 3010 err = mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core, 3011 mlxsw_sp_port->mapping.module, ee, 3012 data); 3013 3014 return err; 3015 } 3016 3017 static int 3018 mlxsw_sp_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info) 3019 { 3020 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3021 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3022 3023 return mlxsw_sp->ptp_ops->get_ts_info(mlxsw_sp, info); 3024 } 3025 3026 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 3027 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 3028 .get_link = ethtool_op_get_link, 3029 .get_pauseparam = mlxsw_sp_port_get_pauseparam, 3030 .set_pauseparam = mlxsw_sp_port_set_pauseparam, 3031 .get_strings = mlxsw_sp_port_get_strings, 3032 .set_phys_id = mlxsw_sp_port_set_phys_id, 3033 .get_ethtool_stats = mlxsw_sp_port_get_stats, 3034 .get_sset_count = mlxsw_sp_port_get_sset_count, 3035 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings, 3036 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings, 3037 .get_module_info = mlxsw_sp_get_module_info, 3038 .get_module_eeprom = mlxsw_sp_get_module_eeprom, 3039 .get_ts_info = mlxsw_sp_get_ts_info, 3040 }; 3041 3042 static int 3043 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port) 3044 { 3045 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3046 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; 3047 const struct mlxsw_sp_port_type_speed_ops *ops; 3048 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3049 int err; 3050 3051 ops = mlxsw_sp->port_type_speed_ops; 3052 3053 /* Set advertised speeds to supported speeds. */ 3054 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3055 0, false); 3056 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3057 if (err) 3058 return err; 3059 3060 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, 3061 ð_proto_admin, ð_proto_oper); 3062 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3063 eth_proto_cap, mlxsw_sp_port->link.autoneg); 3064 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3065 } 3066 3067 int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed) 3068 { 3069 const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops; 3070 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3071 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3072 u32 eth_proto_oper; 3073 int err; 3074 3075 port_type_speed_ops = mlxsw_sp->port_type_speed_ops; 3076 port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, 3077 mlxsw_sp_port->local_port, 0, 3078 false); 3079 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3080 if (err) 3081 return err; 3082 port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL, 3083 ð_proto_oper); 3084 *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper); 3085 return 0; 3086 } 3087 3088 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 3089 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 3090 bool dwrr, u8 dwrr_weight) 3091 { 3092 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3093 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3094 3095 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3096 next_index); 3097 mlxsw_reg_qeec_de_set(qeec_pl, true); 3098 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 3099 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 3100 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3101 } 3102 3103 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 3104 enum mlxsw_reg_qeec_hr hr, u8 index, 3105 u8 next_index, u32 maxrate, u8 burst_size) 3106 { 3107 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3108 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3109 3110 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3111 next_index); 3112 mlxsw_reg_qeec_mase_set(qeec_pl, true); 3113 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 3114 mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size); 3115 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3116 } 3117 3118 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port, 3119 enum mlxsw_reg_qeec_hr hr, u8 index, 3120 u8 next_index, u32 minrate) 3121 { 3122 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3123 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3124 3125 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3126 next_index); 3127 mlxsw_reg_qeec_mise_set(qeec_pl, true); 3128 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate); 3129 3130 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3131 } 3132 3133 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 3134 u8 switch_prio, u8 tclass) 3135 { 3136 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3137 char qtct_pl[MLXSW_REG_QTCT_LEN]; 3138 3139 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 3140 tclass); 3141 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 3142 } 3143 3144 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 3145 { 3146 int err, i; 3147 3148 /* Setup the elements hierarcy, so that each TC is linked to 3149 * one subgroup, which are all member in the same group. 3150 */ 3151 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3152 MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0); 3153 if (err) 3154 return err; 3155 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3156 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3157 MLXSW_REG_QEEC_HR_SUBGROUP, i, 3158 0, false, 0); 3159 if (err) 3160 return err; 3161 } 3162 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3163 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3164 MLXSW_REG_QEEC_HR_TC, i, i, 3165 false, 0); 3166 if (err) 3167 return err; 3168 3169 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3170 MLXSW_REG_QEEC_HR_TC, 3171 i + 8, i, 3172 true, 100); 3173 if (err) 3174 return err; 3175 } 3176 3177 /* Make sure the max shaper is disabled in all hierarchies that support 3178 * it. Note that this disables ptps (PTP shaper), but that is intended 3179 * for the initial configuration. 3180 */ 3181 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3182 MLXSW_REG_QEEC_HR_PORT, 0, 0, 3183 MLXSW_REG_QEEC_MAS_DIS, 0); 3184 if (err) 3185 return err; 3186 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3187 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3188 MLXSW_REG_QEEC_HR_SUBGROUP, 3189 i, 0, 3190 MLXSW_REG_QEEC_MAS_DIS, 0); 3191 if (err) 3192 return err; 3193 } 3194 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3195 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3196 MLXSW_REG_QEEC_HR_TC, 3197 i, i, 3198 MLXSW_REG_QEEC_MAS_DIS, 0); 3199 if (err) 3200 return err; 3201 3202 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3203 MLXSW_REG_QEEC_HR_TC, 3204 i + 8, i, 3205 MLXSW_REG_QEEC_MAS_DIS, 0); 3206 if (err) 3207 return err; 3208 } 3209 3210 /* Configure the min shaper for multicast TCs. */ 3211 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3212 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port, 3213 MLXSW_REG_QEEC_HR_TC, 3214 i + 8, i, 3215 MLXSW_REG_QEEC_MIS_MIN); 3216 if (err) 3217 return err; 3218 } 3219 3220 /* Map all priorities to traffic class 0. */ 3221 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3222 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 3223 if (err) 3224 return err; 3225 } 3226 3227 return 0; 3228 } 3229 3230 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 3231 bool enable) 3232 { 3233 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3234 char qtctm_pl[MLXSW_REG_QTCTM_LEN]; 3235 3236 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable); 3237 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl); 3238 } 3239 3240 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 3241 u8 split_base_local_port, 3242 struct mlxsw_sp_port_mapping *port_mapping) 3243 { 3244 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 3245 bool split = !!split_base_local_port; 3246 struct mlxsw_sp_port *mlxsw_sp_port; 3247 struct net_device *dev; 3248 int err; 3249 3250 err = mlxsw_core_port_init(mlxsw_sp->core, local_port, 3251 port_mapping->module + 1, split, 3252 port_mapping->lane / port_mapping->width, 3253 mlxsw_sp->base_mac, 3254 sizeof(mlxsw_sp->base_mac)); 3255 if (err) { 3256 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 3257 local_port); 3258 return err; 3259 } 3260 3261 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 3262 if (!dev) { 3263 err = -ENOMEM; 3264 goto err_alloc_etherdev; 3265 } 3266 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 3267 dev_net_set(dev, mlxsw_sp_net(mlxsw_sp)); 3268 mlxsw_sp_port = netdev_priv(dev); 3269 mlxsw_sp_port->dev = dev; 3270 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 3271 mlxsw_sp_port->local_port = local_port; 3272 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID; 3273 mlxsw_sp_port->split = split; 3274 mlxsw_sp_port->split_base_local_port = split_base_local_port; 3275 mlxsw_sp_port->mapping = *port_mapping; 3276 mlxsw_sp_port->link.autoneg = 1; 3277 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 3278 3279 mlxsw_sp_port->pcpu_stats = 3280 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 3281 if (!mlxsw_sp_port->pcpu_stats) { 3282 err = -ENOMEM; 3283 goto err_alloc_stats; 3284 } 3285 3286 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 3287 &update_stats_cache); 3288 3289 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 3290 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 3291 3292 err = mlxsw_sp_port_module_map(mlxsw_sp_port); 3293 if (err) { 3294 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 3295 mlxsw_sp_port->local_port); 3296 goto err_port_module_map; 3297 } 3298 3299 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 3300 if (err) { 3301 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 3302 mlxsw_sp_port->local_port); 3303 goto err_port_swid_set; 3304 } 3305 3306 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 3307 if (err) { 3308 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 3309 mlxsw_sp_port->local_port); 3310 goto err_dev_addr_init; 3311 } 3312 3313 netif_carrier_off(dev); 3314 3315 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 3316 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 3317 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK; 3318 3319 dev->min_mtu = 0; 3320 dev->max_mtu = ETH_MAX_MTU; 3321 3322 /* Each packet needs to have a Tx header (metadata) on top all other 3323 * headers. 3324 */ 3325 dev->needed_headroom = MLXSW_TXHDR_LEN; 3326 3327 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 3328 if (err) { 3329 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 3330 mlxsw_sp_port->local_port); 3331 goto err_port_system_port_mapping_set; 3332 } 3333 3334 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port); 3335 if (err) { 3336 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 3337 mlxsw_sp_port->local_port); 3338 goto err_port_speed_by_width_set; 3339 } 3340 3341 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 3342 if (err) { 3343 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 3344 mlxsw_sp_port->local_port); 3345 goto err_port_mtu_set; 3346 } 3347 3348 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3349 if (err) 3350 goto err_port_admin_status_set; 3351 3352 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 3353 if (err) { 3354 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 3355 mlxsw_sp_port->local_port); 3356 goto err_port_buffers_init; 3357 } 3358 3359 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 3360 if (err) { 3361 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 3362 mlxsw_sp_port->local_port); 3363 goto err_port_ets_init; 3364 } 3365 3366 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true); 3367 if (err) { 3368 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n", 3369 mlxsw_sp_port->local_port); 3370 goto err_port_tc_mc_mode; 3371 } 3372 3373 /* ETS and buffers must be initialized before DCB. */ 3374 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 3375 if (err) { 3376 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 3377 mlxsw_sp_port->local_port); 3378 goto err_port_dcb_init; 3379 } 3380 3381 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 3382 if (err) { 3383 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 3384 mlxsw_sp_port->local_port); 3385 goto err_port_fids_init; 3386 } 3387 3388 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); 3389 if (err) { 3390 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", 3391 mlxsw_sp_port->local_port); 3392 goto err_port_qdiscs_init; 3393 } 3394 3395 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false, 3396 false); 3397 if (err) { 3398 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n", 3399 mlxsw_sp_port->local_port); 3400 goto err_port_vlan_clear; 3401 } 3402 3403 err = mlxsw_sp_port_nve_init(mlxsw_sp_port); 3404 if (err) { 3405 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n", 3406 mlxsw_sp_port->local_port); 3407 goto err_port_nve_init; 3408 } 3409 3410 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 3411 if (err) { 3412 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n", 3413 mlxsw_sp_port->local_port); 3414 goto err_port_pvid_set; 3415 } 3416 3417 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, 3418 MLXSW_SP_DEFAULT_VID); 3419 if (IS_ERR(mlxsw_sp_port_vlan)) { 3420 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 3421 mlxsw_sp_port->local_port); 3422 err = PTR_ERR(mlxsw_sp_port_vlan); 3423 goto err_port_vlan_create; 3424 } 3425 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan; 3426 3427 INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw, 3428 mlxsw_sp->ptp_ops->shaper_work); 3429 INIT_DELAYED_WORK(&mlxsw_sp_port->span.speed_update_dw, 3430 mlxsw_sp_span_speed_update_work); 3431 3432 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 3433 err = register_netdev(dev); 3434 if (err) { 3435 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 3436 mlxsw_sp_port->local_port); 3437 goto err_register_netdev; 3438 } 3439 3440 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 3441 mlxsw_sp_port, dev); 3442 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 3443 return 0; 3444 3445 err_register_netdev: 3446 mlxsw_sp->ports[local_port] = NULL; 3447 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 3448 err_port_vlan_create: 3449 err_port_pvid_set: 3450 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3451 err_port_nve_init: 3452 err_port_vlan_clear: 3453 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3454 err_port_qdiscs_init: 3455 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3456 err_port_fids_init: 3457 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3458 err_port_dcb_init: 3459 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3460 err_port_tc_mc_mode: 3461 err_port_ets_init: 3462 err_port_buffers_init: 3463 err_port_admin_status_set: 3464 err_port_mtu_set: 3465 err_port_speed_by_width_set: 3466 err_port_system_port_mapping_set: 3467 err_dev_addr_init: 3468 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3469 err_port_swid_set: 3470 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3471 err_port_module_map: 3472 free_percpu(mlxsw_sp_port->pcpu_stats); 3473 err_alloc_stats: 3474 free_netdev(dev); 3475 err_alloc_etherdev: 3476 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3477 return err; 3478 } 3479 3480 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3481 { 3482 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3483 3484 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 3485 cancel_delayed_work_sync(&mlxsw_sp_port->span.speed_update_dw); 3486 cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw); 3487 mlxsw_sp_port_ptp_clear(mlxsw_sp_port); 3488 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 3489 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 3490 mlxsw_sp->ports[local_port] = NULL; 3491 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true); 3492 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3493 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3494 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3495 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3496 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3497 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3498 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3499 free_percpu(mlxsw_sp_port->pcpu_stats); 3500 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 3501 free_netdev(mlxsw_sp_port->dev); 3502 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3503 } 3504 3505 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp) 3506 { 3507 struct mlxsw_sp_port *mlxsw_sp_port; 3508 int err; 3509 3510 mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL); 3511 if (!mlxsw_sp_port) 3512 return -ENOMEM; 3513 3514 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 3515 mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT; 3516 3517 err = mlxsw_core_cpu_port_init(mlxsw_sp->core, 3518 mlxsw_sp_port, 3519 mlxsw_sp->base_mac, 3520 sizeof(mlxsw_sp->base_mac)); 3521 if (err) { 3522 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n"); 3523 goto err_core_cpu_port_init; 3524 } 3525 3526 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port; 3527 return 0; 3528 3529 err_core_cpu_port_init: 3530 kfree(mlxsw_sp_port); 3531 return err; 3532 } 3533 3534 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp) 3535 { 3536 struct mlxsw_sp_port *mlxsw_sp_port = 3537 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT]; 3538 3539 mlxsw_core_cpu_port_fini(mlxsw_sp->core); 3540 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL; 3541 kfree(mlxsw_sp_port); 3542 } 3543 3544 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3545 { 3546 return mlxsw_sp->ports[local_port] != NULL; 3547 } 3548 3549 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 3550 { 3551 int i; 3552 3553 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 3554 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3555 mlxsw_sp_port_remove(mlxsw_sp, i); 3556 mlxsw_sp_cpu_port_remove(mlxsw_sp); 3557 kfree(mlxsw_sp->ports); 3558 } 3559 3560 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 3561 { 3562 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 3563 struct mlxsw_sp_port_mapping *port_mapping; 3564 size_t alloc_size; 3565 int i; 3566 int err; 3567 3568 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 3569 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 3570 if (!mlxsw_sp->ports) 3571 return -ENOMEM; 3572 3573 err = mlxsw_sp_cpu_port_create(mlxsw_sp); 3574 if (err) 3575 goto err_cpu_port_create; 3576 3577 for (i = 1; i < max_ports; i++) { 3578 port_mapping = mlxsw_sp->port_mapping[i]; 3579 if (!port_mapping) 3580 continue; 3581 err = mlxsw_sp_port_create(mlxsw_sp, i, 0, port_mapping); 3582 if (err) 3583 goto err_port_create; 3584 } 3585 return 0; 3586 3587 err_port_create: 3588 for (i--; i >= 1; i--) 3589 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3590 mlxsw_sp_port_remove(mlxsw_sp, i); 3591 mlxsw_sp_cpu_port_remove(mlxsw_sp); 3592 err_cpu_port_create: 3593 kfree(mlxsw_sp->ports); 3594 return err; 3595 } 3596 3597 static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp) 3598 { 3599 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 3600 struct mlxsw_sp_port_mapping port_mapping; 3601 int i; 3602 int err; 3603 3604 mlxsw_sp->port_mapping = kcalloc(max_ports, 3605 sizeof(struct mlxsw_sp_port_mapping *), 3606 GFP_KERNEL); 3607 if (!mlxsw_sp->port_mapping) 3608 return -ENOMEM; 3609 3610 for (i = 1; i < max_ports; i++) { 3611 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &port_mapping); 3612 if (err) 3613 goto err_port_module_info_get; 3614 if (!port_mapping.width) 3615 continue; 3616 3617 mlxsw_sp->port_mapping[i] = kmemdup(&port_mapping, 3618 sizeof(port_mapping), 3619 GFP_KERNEL); 3620 if (!mlxsw_sp->port_mapping[i]) { 3621 err = -ENOMEM; 3622 goto err_port_module_info_dup; 3623 } 3624 } 3625 return 0; 3626 3627 err_port_module_info_get: 3628 err_port_module_info_dup: 3629 for (i--; i >= 1; i--) 3630 kfree(mlxsw_sp->port_mapping[i]); 3631 kfree(mlxsw_sp->port_mapping); 3632 return err; 3633 } 3634 3635 static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp) 3636 { 3637 int i; 3638 3639 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 3640 kfree(mlxsw_sp->port_mapping[i]); 3641 kfree(mlxsw_sp->port_mapping); 3642 } 3643 3644 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port, unsigned int max_width) 3645 { 3646 u8 offset = (local_port - 1) % max_width; 3647 3648 return local_port - offset; 3649 } 3650 3651 static int 3652 mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 3653 struct mlxsw_sp_port_mapping *port_mapping, 3654 unsigned int count, u8 offset) 3655 { 3656 struct mlxsw_sp_port_mapping split_port_mapping; 3657 int err, i; 3658 3659 split_port_mapping = *port_mapping; 3660 split_port_mapping.width /= count; 3661 for (i = 0; i < count; i++) { 3662 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset, 3663 base_port, &split_port_mapping); 3664 if (err) 3665 goto err_port_create; 3666 split_port_mapping.lane += split_port_mapping.width; 3667 } 3668 3669 return 0; 3670 3671 err_port_create: 3672 for (i--; i >= 0; i--) 3673 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 3674 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 3675 return err; 3676 } 3677 3678 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 3679 u8 base_port, 3680 unsigned int count, u8 offset) 3681 { 3682 struct mlxsw_sp_port_mapping *port_mapping; 3683 int i; 3684 3685 /* Go over original unsplit ports in the gap and recreate them. */ 3686 for (i = 0; i < count * offset; i++) { 3687 port_mapping = mlxsw_sp->port_mapping[base_port + i]; 3688 if (!port_mapping) 3689 continue; 3690 mlxsw_sp_port_create(mlxsw_sp, base_port + i, 0, port_mapping); 3691 } 3692 } 3693 3694 static int mlxsw_sp_local_ports_offset(struct mlxsw_core *mlxsw_core, 3695 unsigned int count, 3696 unsigned int max_width) 3697 { 3698 enum mlxsw_res_id local_ports_in_x_res_id; 3699 int split_width = max_width / count; 3700 3701 if (split_width == 1) 3702 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_1X; 3703 else if (split_width == 2) 3704 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_2X; 3705 else if (split_width == 4) 3706 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_4X; 3707 else 3708 return -EINVAL; 3709 3710 if (!mlxsw_core_res_valid(mlxsw_core, local_ports_in_x_res_id)) 3711 return -EINVAL; 3712 return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id); 3713 } 3714 3715 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 3716 unsigned int count, 3717 struct netlink_ext_ack *extack) 3718 { 3719 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3720 struct mlxsw_sp_port_mapping port_mapping; 3721 struct mlxsw_sp_port *mlxsw_sp_port; 3722 int max_width; 3723 u8 base_port; 3724 int offset; 3725 int i; 3726 int err; 3727 3728 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3729 if (!mlxsw_sp_port) { 3730 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 3731 local_port); 3732 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 3733 return -EINVAL; 3734 } 3735 3736 /* Split ports cannot be split. */ 3737 if (mlxsw_sp_port->split) { 3738 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); 3739 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further"); 3740 return -EINVAL; 3741 } 3742 3743 max_width = mlxsw_core_module_max_width(mlxsw_core, 3744 mlxsw_sp_port->mapping.module); 3745 if (max_width < 0) { 3746 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n"); 3747 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module"); 3748 return max_width; 3749 } 3750 3751 /* Split port with non-max and 1 module width cannot be split. */ 3752 if (mlxsw_sp_port->mapping.width != max_width || max_width == 1) { 3753 netdev_err(mlxsw_sp_port->dev, "Port cannot be split\n"); 3754 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split"); 3755 return -EINVAL; 3756 } 3757 3758 if (count == 1 || !is_power_of_2(count) || count > max_width) { 3759 netdev_err(mlxsw_sp_port->dev, "Invalid split count\n"); 3760 NL_SET_ERR_MSG_MOD(extack, "Invalid split count"); 3761 return -EINVAL; 3762 } 3763 3764 offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width); 3765 if (offset < 0) { 3766 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n"); 3767 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset"); 3768 return -EINVAL; 3769 } 3770 3771 /* Only in case max split is being done, the local port and 3772 * base port may differ. 3773 */ 3774 base_port = count == max_width ? 3775 mlxsw_sp_cluster_base_port_get(local_port, max_width) : 3776 local_port; 3777 3778 for (i = 0; i < count * offset; i++) { 3779 /* Expect base port to exist and also the one in the middle in 3780 * case of maximal split count. 3781 */ 3782 if (i == 0 || (count == max_width && i == count / 2)) 3783 continue; 3784 3785 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) { 3786 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 3787 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 3788 return -EINVAL; 3789 } 3790 } 3791 3792 port_mapping = mlxsw_sp_port->mapping; 3793 3794 for (i = 0; i < count; i++) 3795 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 3796 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 3797 3798 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, &port_mapping, 3799 count, offset); 3800 if (err) { 3801 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 3802 goto err_port_split_create; 3803 } 3804 3805 return 0; 3806 3807 err_port_split_create: 3808 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset); 3809 return err; 3810 } 3811 3812 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, 3813 struct netlink_ext_ack *extack) 3814 { 3815 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3816 struct mlxsw_sp_port *mlxsw_sp_port; 3817 unsigned int count; 3818 int max_width; 3819 u8 base_port; 3820 int offset; 3821 int i; 3822 3823 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3824 if (!mlxsw_sp_port) { 3825 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 3826 local_port); 3827 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 3828 return -EINVAL; 3829 } 3830 3831 if (!mlxsw_sp_port->split) { 3832 netdev_err(mlxsw_sp_port->dev, "Port was not split\n"); 3833 NL_SET_ERR_MSG_MOD(extack, "Port was not split"); 3834 return -EINVAL; 3835 } 3836 3837 max_width = mlxsw_core_module_max_width(mlxsw_core, 3838 mlxsw_sp_port->mapping.module); 3839 if (max_width < 0) { 3840 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n"); 3841 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module"); 3842 return max_width; 3843 } 3844 3845 count = max_width / mlxsw_sp_port->mapping.width; 3846 3847 offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width); 3848 if (WARN_ON(offset < 0)) { 3849 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n"); 3850 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset"); 3851 return -EINVAL; 3852 } 3853 3854 base_port = mlxsw_sp_port->split_base_local_port; 3855 3856 for (i = 0; i < count; i++) 3857 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 3858 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 3859 3860 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset); 3861 3862 return 0; 3863 } 3864 3865 static void 3866 mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port) 3867 { 3868 int i; 3869 3870 for (i = 0; i < TC_MAX_QUEUE; i++) 3871 mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0; 3872 } 3873 3874 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 3875 char *pude_pl, void *priv) 3876 { 3877 struct mlxsw_sp *mlxsw_sp = priv; 3878 struct mlxsw_sp_port *mlxsw_sp_port; 3879 enum mlxsw_reg_pude_oper_status status; 3880 u8 local_port; 3881 3882 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 3883 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3884 if (!mlxsw_sp_port) 3885 return; 3886 3887 status = mlxsw_reg_pude_oper_status_get(pude_pl); 3888 if (status == MLXSW_PORT_OPER_STATUS_UP) { 3889 netdev_info(mlxsw_sp_port->dev, "link up\n"); 3890 netif_carrier_on(mlxsw_sp_port->dev); 3891 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0); 3892 mlxsw_core_schedule_dw(&mlxsw_sp_port->span.speed_update_dw, 0); 3893 } else { 3894 netdev_info(mlxsw_sp_port->dev, "link down\n"); 3895 netif_carrier_off(mlxsw_sp_port->dev); 3896 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port); 3897 } 3898 } 3899 3900 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp, 3901 char *mtpptr_pl, bool ingress) 3902 { 3903 u8 local_port; 3904 u8 num_rec; 3905 int i; 3906 3907 local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl); 3908 num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl); 3909 for (i = 0; i < num_rec; i++) { 3910 u8 domain_number; 3911 u8 message_type; 3912 u16 sequence_id; 3913 u64 timestamp; 3914 3915 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type, 3916 &domain_number, &sequence_id, 3917 ×tamp); 3918 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port, 3919 message_type, domain_number, 3920 sequence_id, timestamp); 3921 } 3922 } 3923 3924 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg, 3925 char *mtpptr_pl, void *priv) 3926 { 3927 struct mlxsw_sp *mlxsw_sp = priv; 3928 3929 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true); 3930 } 3931 3932 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg, 3933 char *mtpptr_pl, void *priv) 3934 { 3935 struct mlxsw_sp *mlxsw_sp = priv; 3936 3937 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false); 3938 } 3939 3940 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 3941 u8 local_port, void *priv) 3942 { 3943 struct mlxsw_sp *mlxsw_sp = priv; 3944 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3945 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 3946 3947 if (unlikely(!mlxsw_sp_port)) { 3948 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 3949 local_port); 3950 return; 3951 } 3952 3953 skb->dev = mlxsw_sp_port->dev; 3954 3955 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 3956 u64_stats_update_begin(&pcpu_stats->syncp); 3957 pcpu_stats->rx_packets++; 3958 pcpu_stats->rx_bytes += skb->len; 3959 u64_stats_update_end(&pcpu_stats->syncp); 3960 3961 skb->protocol = eth_type_trans(skb, skb->dev); 3962 netif_receive_skb(skb); 3963 } 3964 3965 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, 3966 void *priv) 3967 { 3968 skb->offload_fwd_mark = 1; 3969 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 3970 } 3971 3972 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, 3973 u8 local_port, void *priv) 3974 { 3975 skb->offload_l3_fwd_mark = 1; 3976 skb->offload_fwd_mark = 1; 3977 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 3978 } 3979 3980 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port, 3981 void *priv) 3982 { 3983 struct mlxsw_sp *mlxsw_sp = priv; 3984 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3985 struct mlxsw_sp_port_sample *sample; 3986 u32 size; 3987 3988 if (unlikely(!mlxsw_sp_port)) { 3989 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n", 3990 local_port); 3991 goto out; 3992 } 3993 3994 rcu_read_lock(); 3995 sample = rcu_dereference(mlxsw_sp_port->sample); 3996 if (!sample) 3997 goto out_unlock; 3998 size = sample->truncate ? sample->trunc_size : skb->len; 3999 psample_sample_packet(sample->psample_group, skb, size, 4000 mlxsw_sp_port->dev->ifindex, 0, sample->rate); 4001 out_unlock: 4002 rcu_read_unlock(); 4003 out: 4004 consume_skb(skb); 4005 } 4006 4007 static void mlxsw_sp_rx_listener_ptp(struct sk_buff *skb, u8 local_port, 4008 void *priv) 4009 { 4010 struct mlxsw_sp *mlxsw_sp = priv; 4011 4012 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port); 4013 } 4014 4015 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4016 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 4017 _is_ctrl, SP_##_trap_group, DISCARD) 4018 4019 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4020 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 4021 _is_ctrl, SP_##_trap_group, DISCARD) 4022 4023 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4024 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \ 4025 _is_ctrl, SP_##_trap_group, DISCARD) 4026 4027 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 4028 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 4029 4030 static const struct mlxsw_listener mlxsw_sp_listener[] = { 4031 /* Events */ 4032 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 4033 /* L2 traps */ 4034 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true), 4035 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true), 4036 MLXSW_RXL(mlxsw_sp_rx_listener_ptp, LLDP, TRAP_TO_CPU, 4037 false, SP_LLDP, DISCARD), 4038 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false), 4039 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false), 4040 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false), 4041 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false), 4042 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false), 4043 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false), 4044 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false), 4045 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false), 4046 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false), 4047 MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD, 4048 false), 4049 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 4050 false), 4051 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD, 4052 false), 4053 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 4054 false), 4055 /* L3 traps */ 4056 MLXSW_SP_RXL_L3_MARK(LBERROR, MIRROR_TO_CPU, LBERROR, false), 4057 MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), 4058 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 4059 false), 4060 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false), 4061 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 4062 MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false), 4063 MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP, 4064 false), 4065 MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false), 4066 MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false), 4067 MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false), 4068 MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), 4069 MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false), 4070 MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false), 4071 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 4072 false), 4073 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 4074 false), 4075 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 4076 false), 4077 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 4078 false), 4079 MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false), 4080 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 4081 false), 4082 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), 4083 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), 4084 MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, VRRP, false), 4085 MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, VRRP, false), 4086 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD, 4087 ROUTER_EXP, false), 4088 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD, 4089 ROUTER_EXP, false), 4090 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD, 4091 ROUTER_EXP, false), 4092 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD, 4093 ROUTER_EXP, false), 4094 /* PKT Sample trap */ 4095 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, 4096 false, SP_IP2ME, DISCARD), 4097 /* ACL trap */ 4098 MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false), 4099 /* Multicast Router Traps */ 4100 MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false), 4101 MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false), 4102 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 4103 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 4104 /* NVE traps */ 4105 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false), 4106 MLXSW_SP_RXL_NO_MARK(NVE_DECAP_ARP, TRAP_TO_CPU, ARP, false), 4107 /* PTP traps */ 4108 MLXSW_RXL(mlxsw_sp_rx_listener_ptp, PTP0, TRAP_TO_CPU, 4109 false, SP_PTP0, DISCARD), 4110 MLXSW_SP_RXL_NO_MARK(PTP1, TRAP_TO_CPU, PTP1, false), 4111 }; 4112 4113 static const struct mlxsw_listener mlxsw_sp1_listener[] = { 4114 /* Events */ 4115 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0), 4116 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0), 4117 }; 4118 4119 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 4120 { 4121 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4122 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 4123 enum mlxsw_reg_qpcr_ir_units ir_units; 4124 int max_cpu_policers; 4125 bool is_bytes; 4126 u8 burst_size; 4127 u32 rate; 4128 int i, err; 4129 4130 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 4131 return -EIO; 4132 4133 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 4134 4135 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 4136 for (i = 0; i < max_cpu_policers; i++) { 4137 is_bytes = false; 4138 switch (i) { 4139 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 4140 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 4141 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 4142 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 4143 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 4144 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: 4145 rate = 128; 4146 burst_size = 7; 4147 break; 4148 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 4149 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 4150 rate = 16 * 1024; 4151 burst_size = 10; 4152 break; 4153 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 4154 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 4155 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 4156 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 4157 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 4158 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 4159 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 4160 rate = 1024; 4161 burst_size = 7; 4162 break; 4163 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 4164 rate = 1024; 4165 burst_size = 7; 4166 break; 4167 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0: 4168 rate = 24 * 1024; 4169 burst_size = 12; 4170 break; 4171 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1: 4172 rate = 19 * 1024; 4173 burst_size = 12; 4174 break; 4175 case MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP: 4176 rate = 360; 4177 burst_size = 7; 4178 break; 4179 default: 4180 continue; 4181 } 4182 4183 __set_bit(i, mlxsw_sp->trap->policers_usage); 4184 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 4185 burst_size); 4186 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 4187 if (err) 4188 return err; 4189 } 4190 4191 return 0; 4192 } 4193 4194 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 4195 { 4196 char htgt_pl[MLXSW_REG_HTGT_LEN]; 4197 enum mlxsw_reg_htgt_trap_group i; 4198 int max_cpu_policers; 4199 int max_trap_groups; 4200 u8 priority, tc; 4201 u16 policer_id; 4202 int err; 4203 4204 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 4205 return -EIO; 4206 4207 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 4208 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 4209 4210 for (i = 0; i < max_trap_groups; i++) { 4211 policer_id = i; 4212 switch (i) { 4213 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 4214 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 4215 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 4216 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 4217 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 4218 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0: 4219 case MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP: 4220 priority = 5; 4221 tc = 5; 4222 break; 4223 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 4224 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 4225 priority = 4; 4226 tc = 4; 4227 break; 4228 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 4229 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 4230 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 4231 priority = 3; 4232 tc = 3; 4233 break; 4234 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 4235 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 4236 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1: 4237 priority = 2; 4238 tc = 2; 4239 break; 4240 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 4241 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 4242 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 4243 priority = 1; 4244 tc = 1; 4245 break; 4246 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: 4247 priority = 0; 4248 tc = 1; 4249 break; 4250 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 4251 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 4252 tc = MLXSW_REG_HTGT_DEFAULT_TC; 4253 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 4254 break; 4255 default: 4256 continue; 4257 } 4258 4259 if (max_cpu_policers <= policer_id && 4260 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 4261 return -EIO; 4262 4263 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 4264 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 4265 if (err) 4266 return err; 4267 } 4268 4269 return 0; 4270 } 4271 4272 static int mlxsw_sp_traps_register(struct mlxsw_sp *mlxsw_sp, 4273 const struct mlxsw_listener listeners[], 4274 size_t listeners_count) 4275 { 4276 int i; 4277 int err; 4278 4279 for (i = 0; i < listeners_count; i++) { 4280 err = mlxsw_core_trap_register(mlxsw_sp->core, 4281 &listeners[i], 4282 mlxsw_sp); 4283 if (err) 4284 goto err_listener_register; 4285 4286 } 4287 return 0; 4288 4289 err_listener_register: 4290 for (i--; i >= 0; i--) { 4291 mlxsw_core_trap_unregister(mlxsw_sp->core, 4292 &listeners[i], 4293 mlxsw_sp); 4294 } 4295 return err; 4296 } 4297 4298 static void mlxsw_sp_traps_unregister(struct mlxsw_sp *mlxsw_sp, 4299 const struct mlxsw_listener listeners[], 4300 size_t listeners_count) 4301 { 4302 int i; 4303 4304 for (i = 0; i < listeners_count; i++) { 4305 mlxsw_core_trap_unregister(mlxsw_sp->core, 4306 &listeners[i], 4307 mlxsw_sp); 4308 } 4309 } 4310 4311 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 4312 { 4313 struct mlxsw_sp_trap *trap; 4314 u64 max_policers; 4315 int err; 4316 4317 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS)) 4318 return -EIO; 4319 max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS); 4320 trap = kzalloc(struct_size(trap, policers_usage, 4321 BITS_TO_LONGS(max_policers)), GFP_KERNEL); 4322 if (!trap) 4323 return -ENOMEM; 4324 trap->max_policers = max_policers; 4325 mlxsw_sp->trap = trap; 4326 4327 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 4328 if (err) 4329 goto err_cpu_policers_set; 4330 4331 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 4332 if (err) 4333 goto err_trap_groups_set; 4334 4335 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp_listener, 4336 ARRAY_SIZE(mlxsw_sp_listener)); 4337 if (err) 4338 goto err_traps_register; 4339 4340 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp->listeners, 4341 mlxsw_sp->listeners_count); 4342 if (err) 4343 goto err_extra_traps_init; 4344 4345 return 0; 4346 4347 err_extra_traps_init: 4348 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 4349 ARRAY_SIZE(mlxsw_sp_listener)); 4350 err_traps_register: 4351 err_trap_groups_set: 4352 err_cpu_policers_set: 4353 kfree(trap); 4354 return err; 4355 } 4356 4357 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 4358 { 4359 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp->listeners, 4360 mlxsw_sp->listeners_count); 4361 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 4362 ARRAY_SIZE(mlxsw_sp_listener)); 4363 kfree(mlxsw_sp->trap); 4364 } 4365 4366 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe 4367 4368 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 4369 { 4370 char slcr_pl[MLXSW_REG_SLCR_LEN]; 4371 u32 seed; 4372 int err; 4373 4374 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 4375 MLXSW_SP_LAG_SEED_INIT); 4376 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 4377 MLXSW_REG_SLCR_LAG_HASH_DMAC | 4378 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 4379 MLXSW_REG_SLCR_LAG_HASH_VLANID | 4380 MLXSW_REG_SLCR_LAG_HASH_SIP | 4381 MLXSW_REG_SLCR_LAG_HASH_DIP | 4382 MLXSW_REG_SLCR_LAG_HASH_SPORT | 4383 MLXSW_REG_SLCR_LAG_HASH_DPORT | 4384 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed); 4385 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 4386 if (err) 4387 return err; 4388 4389 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 4390 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 4391 return -EIO; 4392 4393 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 4394 sizeof(struct mlxsw_sp_upper), 4395 GFP_KERNEL); 4396 if (!mlxsw_sp->lags) 4397 return -ENOMEM; 4398 4399 return 0; 4400 } 4401 4402 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 4403 { 4404 kfree(mlxsw_sp->lags); 4405 } 4406 4407 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) 4408 { 4409 char htgt_pl[MLXSW_REG_HTGT_LEN]; 4410 4411 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 4412 MLXSW_REG_HTGT_INVALID_POLICER, 4413 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 4414 MLXSW_REG_HTGT_DEFAULT_TC); 4415 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 4416 } 4417 4418 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = { 4419 .clock_init = mlxsw_sp1_ptp_clock_init, 4420 .clock_fini = mlxsw_sp1_ptp_clock_fini, 4421 .init = mlxsw_sp1_ptp_init, 4422 .fini = mlxsw_sp1_ptp_fini, 4423 .receive = mlxsw_sp1_ptp_receive, 4424 .transmitted = mlxsw_sp1_ptp_transmitted, 4425 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get, 4426 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set, 4427 .shaper_work = mlxsw_sp1_ptp_shaper_work, 4428 .get_ts_info = mlxsw_sp1_ptp_get_ts_info, 4429 .get_stats_count = mlxsw_sp1_get_stats_count, 4430 .get_stats_strings = mlxsw_sp1_get_stats_strings, 4431 .get_stats = mlxsw_sp1_get_stats, 4432 }; 4433 4434 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { 4435 .clock_init = mlxsw_sp2_ptp_clock_init, 4436 .clock_fini = mlxsw_sp2_ptp_clock_fini, 4437 .init = mlxsw_sp2_ptp_init, 4438 .fini = mlxsw_sp2_ptp_fini, 4439 .receive = mlxsw_sp2_ptp_receive, 4440 .transmitted = mlxsw_sp2_ptp_transmitted, 4441 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get, 4442 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set, 4443 .shaper_work = mlxsw_sp2_ptp_shaper_work, 4444 .get_ts_info = mlxsw_sp2_ptp_get_ts_info, 4445 .get_stats_count = mlxsw_sp2_get_stats_count, 4446 .get_stats_strings = mlxsw_sp2_get_stats_strings, 4447 .get_stats = mlxsw_sp2_get_stats, 4448 }; 4449 4450 static u32 mlxsw_sp1_span_buffsize_get(int mtu, u32 speed) 4451 { 4452 return mtu * 5 / 2; 4453 } 4454 4455 static const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops = { 4456 .buffsize_get = mlxsw_sp1_span_buffsize_get, 4457 }; 4458 4459 #define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38 4460 #define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50 4461 4462 static u32 __mlxsw_sp_span_buffsize_get(int mtu, u32 speed, u32 buffer_factor) 4463 { 4464 return 3 * mtu + buffer_factor * speed / 1000; 4465 } 4466 4467 static u32 mlxsw_sp2_span_buffsize_get(int mtu, u32 speed) 4468 { 4469 int factor = MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR; 4470 4471 return __mlxsw_sp_span_buffsize_get(mtu, speed, factor); 4472 } 4473 4474 static const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops = { 4475 .buffsize_get = mlxsw_sp2_span_buffsize_get, 4476 }; 4477 4478 static u32 mlxsw_sp3_span_buffsize_get(int mtu, u32 speed) 4479 { 4480 int factor = MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR; 4481 4482 return __mlxsw_sp_span_buffsize_get(mtu, speed, factor); 4483 } 4484 4485 static const struct mlxsw_sp_span_ops mlxsw_sp3_span_ops = { 4486 .buffsize_get = mlxsw_sp3_span_buffsize_get, 4487 }; 4488 4489 u32 mlxsw_sp_span_buffsize_get(struct mlxsw_sp *mlxsw_sp, int mtu, u32 speed) 4490 { 4491 u32 buffsize = mlxsw_sp->span_ops->buffsize_get(speed, mtu); 4492 4493 return mlxsw_sp_bytes_cells(mlxsw_sp, buffsize) + 1; 4494 } 4495 4496 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 4497 unsigned long event, void *ptr); 4498 4499 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 4500 const struct mlxsw_bus_info *mlxsw_bus_info, 4501 struct netlink_ext_ack *extack) 4502 { 4503 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4504 int err; 4505 4506 mlxsw_sp->core = mlxsw_core; 4507 mlxsw_sp->bus_info = mlxsw_bus_info; 4508 4509 err = mlxsw_sp_fw_rev_validate(mlxsw_sp); 4510 if (err) 4511 return err; 4512 4513 mlxsw_core_emad_string_tlv_enable(mlxsw_core); 4514 4515 err = mlxsw_sp_base_mac_get(mlxsw_sp); 4516 if (err) { 4517 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 4518 return err; 4519 } 4520 4521 err = mlxsw_sp_kvdl_init(mlxsw_sp); 4522 if (err) { 4523 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 4524 return err; 4525 } 4526 4527 err = mlxsw_sp_fids_init(mlxsw_sp); 4528 if (err) { 4529 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 4530 goto err_fids_init; 4531 } 4532 4533 err = mlxsw_sp_traps_init(mlxsw_sp); 4534 if (err) { 4535 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 4536 goto err_traps_init; 4537 } 4538 4539 err = mlxsw_sp_devlink_traps_init(mlxsw_sp); 4540 if (err) { 4541 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n"); 4542 goto err_devlink_traps_init; 4543 } 4544 4545 err = mlxsw_sp_buffers_init(mlxsw_sp); 4546 if (err) { 4547 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 4548 goto err_buffers_init; 4549 } 4550 4551 err = mlxsw_sp_lag_init(mlxsw_sp); 4552 if (err) { 4553 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 4554 goto err_lag_init; 4555 } 4556 4557 /* Initialize SPAN before router and switchdev, so that those components 4558 * can call mlxsw_sp_span_respin(). 4559 */ 4560 err = mlxsw_sp_span_init(mlxsw_sp); 4561 if (err) { 4562 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 4563 goto err_span_init; 4564 } 4565 4566 err = mlxsw_sp_switchdev_init(mlxsw_sp); 4567 if (err) { 4568 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 4569 goto err_switchdev_init; 4570 } 4571 4572 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 4573 if (err) { 4574 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 4575 goto err_counter_pool_init; 4576 } 4577 4578 err = mlxsw_sp_afa_init(mlxsw_sp); 4579 if (err) { 4580 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 4581 goto err_afa_init; 4582 } 4583 4584 err = mlxsw_sp_nve_init(mlxsw_sp); 4585 if (err) { 4586 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n"); 4587 goto err_nve_init; 4588 } 4589 4590 err = mlxsw_sp_acl_init(mlxsw_sp); 4591 if (err) { 4592 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 4593 goto err_acl_init; 4594 } 4595 4596 err = mlxsw_sp_router_init(mlxsw_sp, extack); 4597 if (err) { 4598 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 4599 goto err_router_init; 4600 } 4601 4602 if (mlxsw_sp->bus_info->read_frc_capable) { 4603 /* NULL is a valid return value from clock_init */ 4604 mlxsw_sp->clock = 4605 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp, 4606 mlxsw_sp->bus_info->dev); 4607 if (IS_ERR(mlxsw_sp->clock)) { 4608 err = PTR_ERR(mlxsw_sp->clock); 4609 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n"); 4610 goto err_ptp_clock_init; 4611 } 4612 } 4613 4614 if (mlxsw_sp->clock) { 4615 /* NULL is a valid return value from ptp_ops->init */ 4616 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp); 4617 if (IS_ERR(mlxsw_sp->ptp_state)) { 4618 err = PTR_ERR(mlxsw_sp->ptp_state); 4619 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n"); 4620 goto err_ptp_init; 4621 } 4622 } 4623 4624 /* Initialize netdevice notifier after router and SPAN is initialized, 4625 * so that the event handler can use router structures and call SPAN 4626 * respin. 4627 */ 4628 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 4629 err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 4630 &mlxsw_sp->netdevice_nb); 4631 if (err) { 4632 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 4633 goto err_netdev_notifier; 4634 } 4635 4636 err = mlxsw_sp_dpipe_init(mlxsw_sp); 4637 if (err) { 4638 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 4639 goto err_dpipe_init; 4640 } 4641 4642 err = mlxsw_sp_port_module_info_init(mlxsw_sp); 4643 if (err) { 4644 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n"); 4645 goto err_port_module_info_init; 4646 } 4647 4648 err = mlxsw_sp_ports_create(mlxsw_sp); 4649 if (err) { 4650 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 4651 goto err_ports_create; 4652 } 4653 4654 return 0; 4655 4656 err_ports_create: 4657 mlxsw_sp_port_module_info_fini(mlxsw_sp); 4658 err_port_module_info_init: 4659 mlxsw_sp_dpipe_fini(mlxsw_sp); 4660 err_dpipe_init: 4661 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 4662 &mlxsw_sp->netdevice_nb); 4663 err_netdev_notifier: 4664 if (mlxsw_sp->clock) 4665 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 4666 err_ptp_init: 4667 if (mlxsw_sp->clock) 4668 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 4669 err_ptp_clock_init: 4670 mlxsw_sp_router_fini(mlxsw_sp); 4671 err_router_init: 4672 mlxsw_sp_acl_fini(mlxsw_sp); 4673 err_acl_init: 4674 mlxsw_sp_nve_fini(mlxsw_sp); 4675 err_nve_init: 4676 mlxsw_sp_afa_fini(mlxsw_sp); 4677 err_afa_init: 4678 mlxsw_sp_counter_pool_fini(mlxsw_sp); 4679 err_counter_pool_init: 4680 mlxsw_sp_switchdev_fini(mlxsw_sp); 4681 err_switchdev_init: 4682 mlxsw_sp_span_fini(mlxsw_sp); 4683 err_span_init: 4684 mlxsw_sp_lag_fini(mlxsw_sp); 4685 err_lag_init: 4686 mlxsw_sp_buffers_fini(mlxsw_sp); 4687 err_buffers_init: 4688 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 4689 err_devlink_traps_init: 4690 mlxsw_sp_traps_fini(mlxsw_sp); 4691 err_traps_init: 4692 mlxsw_sp_fids_fini(mlxsw_sp); 4693 err_fids_init: 4694 mlxsw_sp_kvdl_fini(mlxsw_sp); 4695 return err; 4696 } 4697 4698 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, 4699 const struct mlxsw_bus_info *mlxsw_bus_info, 4700 struct netlink_ext_ack *extack) 4701 { 4702 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4703 4704 mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev; 4705 mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME; 4706 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; 4707 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; 4708 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; 4709 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; 4710 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; 4711 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; 4712 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; 4713 mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr; 4714 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals; 4715 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops; 4716 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops; 4717 mlxsw_sp->span_ops = &mlxsw_sp1_span_ops; 4718 mlxsw_sp->listeners = mlxsw_sp1_listener; 4719 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener); 4720 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1; 4721 4722 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 4723 } 4724 4725 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, 4726 const struct mlxsw_bus_info *mlxsw_bus_info, 4727 struct netlink_ext_ack *extack) 4728 { 4729 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4730 4731 mlxsw_sp->req_rev = &mlxsw_sp2_fw_rev; 4732 mlxsw_sp->fw_filename = MLXSW_SP2_FW_FILENAME; 4733 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 4734 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 4735 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 4736 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 4737 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 4738 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 4739 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 4740 mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; 4741 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 4742 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 4743 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 4744 mlxsw_sp->span_ops = &mlxsw_sp2_span_ops; 4745 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2; 4746 4747 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 4748 } 4749 4750 static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core, 4751 const struct mlxsw_bus_info *mlxsw_bus_info, 4752 struct netlink_ext_ack *extack) 4753 { 4754 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4755 4756 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 4757 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 4758 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 4759 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 4760 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 4761 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 4762 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 4763 mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; 4764 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 4765 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 4766 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 4767 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops; 4768 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3; 4769 4770 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 4771 } 4772 4773 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 4774 { 4775 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4776 4777 mlxsw_sp_ports_remove(mlxsw_sp); 4778 mlxsw_sp_port_module_info_fini(mlxsw_sp); 4779 mlxsw_sp_dpipe_fini(mlxsw_sp); 4780 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 4781 &mlxsw_sp->netdevice_nb); 4782 if (mlxsw_sp->clock) { 4783 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 4784 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 4785 } 4786 mlxsw_sp_router_fini(mlxsw_sp); 4787 mlxsw_sp_acl_fini(mlxsw_sp); 4788 mlxsw_sp_nve_fini(mlxsw_sp); 4789 mlxsw_sp_afa_fini(mlxsw_sp); 4790 mlxsw_sp_counter_pool_fini(mlxsw_sp); 4791 mlxsw_sp_switchdev_fini(mlxsw_sp); 4792 mlxsw_sp_span_fini(mlxsw_sp); 4793 mlxsw_sp_lag_fini(mlxsw_sp); 4794 mlxsw_sp_buffers_fini(mlxsw_sp); 4795 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 4796 mlxsw_sp_traps_fini(mlxsw_sp); 4797 mlxsw_sp_fids_fini(mlxsw_sp); 4798 mlxsw_sp_kvdl_fini(mlxsw_sp); 4799 } 4800 4801 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated 4802 * 802.1Q FIDs 4803 */ 4804 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \ 4805 VLAN_VID_MASK - 1) 4806 4807 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { 4808 .used_max_mid = 1, 4809 .max_mid = MLXSW_SP_MID_MAX, 4810 .used_flood_tables = 1, 4811 .used_flood_mode = 1, 4812 .flood_mode = 3, 4813 .max_fid_flood_tables = 3, 4814 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 4815 .used_max_ib_mc = 1, 4816 .max_ib_mc = 0, 4817 .used_max_pkey = 1, 4818 .max_pkey = 0, 4819 .used_kvd_sizes = 1, 4820 .kvd_hash_single_parts = 59, 4821 .kvd_hash_double_parts = 41, 4822 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 4823 .swid_config = { 4824 { 4825 .used_type = 1, 4826 .type = MLXSW_PORT_SWID_TYPE_ETH, 4827 } 4828 }, 4829 }; 4830 4831 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { 4832 .used_max_mid = 1, 4833 .max_mid = MLXSW_SP_MID_MAX, 4834 .used_flood_tables = 1, 4835 .used_flood_mode = 1, 4836 .flood_mode = 3, 4837 .max_fid_flood_tables = 3, 4838 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 4839 .used_max_ib_mc = 1, 4840 .max_ib_mc = 0, 4841 .used_max_pkey = 1, 4842 .max_pkey = 0, 4843 .swid_config = { 4844 { 4845 .used_type = 1, 4846 .type = MLXSW_PORT_SWID_TYPE_ETH, 4847 } 4848 }, 4849 }; 4850 4851 static void 4852 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, 4853 struct devlink_resource_size_params *kvd_size_params, 4854 struct devlink_resource_size_params *linear_size_params, 4855 struct devlink_resource_size_params *hash_double_size_params, 4856 struct devlink_resource_size_params *hash_single_size_params) 4857 { 4858 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 4859 KVD_SINGLE_MIN_SIZE); 4860 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 4861 KVD_DOUBLE_MIN_SIZE); 4862 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 4863 u32 linear_size_min = 0; 4864 4865 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, 4866 MLXSW_SP_KVD_GRANULARITY, 4867 DEVLINK_RESOURCE_UNIT_ENTRY); 4868 devlink_resource_size_params_init(linear_size_params, linear_size_min, 4869 kvd_size - single_size_min - 4870 double_size_min, 4871 MLXSW_SP_KVD_GRANULARITY, 4872 DEVLINK_RESOURCE_UNIT_ENTRY); 4873 devlink_resource_size_params_init(hash_double_size_params, 4874 double_size_min, 4875 kvd_size - single_size_min - 4876 linear_size_min, 4877 MLXSW_SP_KVD_GRANULARITY, 4878 DEVLINK_RESOURCE_UNIT_ENTRY); 4879 devlink_resource_size_params_init(hash_single_size_params, 4880 single_size_min, 4881 kvd_size - double_size_min - 4882 linear_size_min, 4883 MLXSW_SP_KVD_GRANULARITY, 4884 DEVLINK_RESOURCE_UNIT_ENTRY); 4885 } 4886 4887 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) 4888 { 4889 struct devlink *devlink = priv_to_devlink(mlxsw_core); 4890 struct devlink_resource_size_params hash_single_size_params; 4891 struct devlink_resource_size_params hash_double_size_params; 4892 struct devlink_resource_size_params linear_size_params; 4893 struct devlink_resource_size_params kvd_size_params; 4894 u32 kvd_size, single_size, double_size, linear_size; 4895 const struct mlxsw_config_profile *profile; 4896 int err; 4897 4898 profile = &mlxsw_sp1_config_profile; 4899 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 4900 return -EIO; 4901 4902 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, 4903 &linear_size_params, 4904 &hash_double_size_params, 4905 &hash_single_size_params); 4906 4907 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 4908 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 4909 kvd_size, MLXSW_SP_RESOURCE_KVD, 4910 DEVLINK_RESOURCE_ID_PARENT_TOP, 4911 &kvd_size_params); 4912 if (err) 4913 return err; 4914 4915 linear_size = profile->kvd_linear_size; 4916 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, 4917 linear_size, 4918 MLXSW_SP_RESOURCE_KVD_LINEAR, 4919 MLXSW_SP_RESOURCE_KVD, 4920 &linear_size_params); 4921 if (err) 4922 return err; 4923 4924 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); 4925 if (err) 4926 return err; 4927 4928 double_size = kvd_size - linear_size; 4929 double_size *= profile->kvd_hash_double_parts; 4930 double_size /= profile->kvd_hash_double_parts + 4931 profile->kvd_hash_single_parts; 4932 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY); 4933 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, 4934 double_size, 4935 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 4936 MLXSW_SP_RESOURCE_KVD, 4937 &hash_double_size_params); 4938 if (err) 4939 return err; 4940 4941 single_size = kvd_size - double_size - linear_size; 4942 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, 4943 single_size, 4944 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 4945 MLXSW_SP_RESOURCE_KVD, 4946 &hash_single_size_params); 4947 if (err) 4948 return err; 4949 4950 return 0; 4951 } 4952 4953 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core) 4954 { 4955 struct devlink *devlink = priv_to_devlink(mlxsw_core); 4956 struct devlink_resource_size_params kvd_size_params; 4957 u32 kvd_size; 4958 4959 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 4960 return -EIO; 4961 4962 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 4963 devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size, 4964 MLXSW_SP_KVD_GRANULARITY, 4965 DEVLINK_RESOURCE_UNIT_ENTRY); 4966 4967 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 4968 kvd_size, MLXSW_SP_RESOURCE_KVD, 4969 DEVLINK_RESOURCE_ID_PARENT_TOP, 4970 &kvd_size_params); 4971 } 4972 4973 static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core) 4974 { 4975 struct devlink *devlink = priv_to_devlink(mlxsw_core); 4976 struct devlink_resource_size_params span_size_params; 4977 u32 max_span; 4978 4979 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN)) 4980 return -EIO; 4981 4982 max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN); 4983 devlink_resource_size_params_init(&span_size_params, max_span, max_span, 4984 1, DEVLINK_RESOURCE_UNIT_ENTRY); 4985 4986 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN, 4987 max_span, MLXSW_SP_RESOURCE_SPAN, 4988 DEVLINK_RESOURCE_ID_PARENT_TOP, 4989 &span_size_params); 4990 } 4991 4992 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) 4993 { 4994 int err; 4995 4996 err = mlxsw_sp1_resources_kvd_register(mlxsw_core); 4997 if (err) 4998 return err; 4999 5000 err = mlxsw_sp_resources_span_register(mlxsw_core); 5001 if (err) 5002 goto err_resources_span_register; 5003 5004 err = mlxsw_sp_counter_resources_register(mlxsw_core); 5005 if (err) 5006 goto err_resources_counter_register; 5007 5008 return 0; 5009 5010 err_resources_counter_register: 5011 err_resources_span_register: 5012 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL); 5013 return err; 5014 } 5015 5016 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) 5017 { 5018 int err; 5019 5020 err = mlxsw_sp2_resources_kvd_register(mlxsw_core); 5021 if (err) 5022 return err; 5023 5024 err = mlxsw_sp_resources_span_register(mlxsw_core); 5025 if (err) 5026 goto err_resources_span_register; 5027 5028 err = mlxsw_sp_counter_resources_register(mlxsw_core); 5029 if (err) 5030 goto err_resources_counter_register; 5031 5032 return 0; 5033 5034 err_resources_counter_register: 5035 err_resources_span_register: 5036 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL); 5037 return err; 5038 } 5039 5040 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 5041 const struct mlxsw_config_profile *profile, 5042 u64 *p_single_size, u64 *p_double_size, 5043 u64 *p_linear_size) 5044 { 5045 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5046 u32 double_size; 5047 int err; 5048 5049 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 5050 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) 5051 return -EIO; 5052 5053 /* The hash part is what left of the kvd without the 5054 * linear part. It is split to the single size and 5055 * double size by the parts ratio from the profile. 5056 * Both sizes must be a multiplications of the 5057 * granularity from the profile. In case the user 5058 * provided the sizes they are obtained via devlink. 5059 */ 5060 err = devlink_resource_size_get(devlink, 5061 MLXSW_SP_RESOURCE_KVD_LINEAR, 5062 p_linear_size); 5063 if (err) 5064 *p_linear_size = profile->kvd_linear_size; 5065 5066 err = devlink_resource_size_get(devlink, 5067 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 5068 p_double_size); 5069 if (err) { 5070 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 5071 *p_linear_size; 5072 double_size *= profile->kvd_hash_double_parts; 5073 double_size /= profile->kvd_hash_double_parts + 5074 profile->kvd_hash_single_parts; 5075 *p_double_size = rounddown(double_size, 5076 MLXSW_SP_KVD_GRANULARITY); 5077 } 5078 5079 err = devlink_resource_size_get(devlink, 5080 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 5081 p_single_size); 5082 if (err) 5083 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 5084 *p_double_size - *p_linear_size; 5085 5086 /* Check results are legal. */ 5087 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 5088 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || 5089 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size) 5090 return -EIO; 5091 5092 return 0; 5093 } 5094 5095 static int 5096 mlxsw_sp_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id, 5097 union devlink_param_value val, 5098 struct netlink_ext_ack *extack) 5099 { 5100 if ((val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER) && 5101 (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)) { 5102 NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'"); 5103 return -EINVAL; 5104 } 5105 5106 return 0; 5107 } 5108 5109 static const struct devlink_param mlxsw_sp_devlink_params[] = { 5110 DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY, 5111 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 5112 NULL, NULL, 5113 mlxsw_sp_devlink_param_fw_load_policy_validate), 5114 }; 5115 5116 static int mlxsw_sp_params_register(struct mlxsw_core *mlxsw_core) 5117 { 5118 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5119 union devlink_param_value value; 5120 int err; 5121 5122 err = devlink_params_register(devlink, mlxsw_sp_devlink_params, 5123 ARRAY_SIZE(mlxsw_sp_devlink_params)); 5124 if (err) 5125 return err; 5126 5127 value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER; 5128 devlink_param_driverinit_value_set(devlink, 5129 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 5130 value); 5131 return 0; 5132 } 5133 5134 static void mlxsw_sp_params_unregister(struct mlxsw_core *mlxsw_core) 5135 { 5136 devlink_params_unregister(priv_to_devlink(mlxsw_core), 5137 mlxsw_sp_devlink_params, 5138 ARRAY_SIZE(mlxsw_sp_devlink_params)); 5139 } 5140 5141 static int 5142 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id, 5143 struct devlink_param_gset_ctx *ctx) 5144 { 5145 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 5146 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5147 5148 ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp); 5149 return 0; 5150 } 5151 5152 static int 5153 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id, 5154 struct devlink_param_gset_ctx *ctx) 5155 { 5156 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 5157 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5158 5159 return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32); 5160 } 5161 5162 static const struct devlink_param mlxsw_sp2_devlink_params[] = { 5163 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 5164 "acl_region_rehash_interval", 5165 DEVLINK_PARAM_TYPE_U32, 5166 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 5167 mlxsw_sp_params_acl_region_rehash_intrvl_get, 5168 mlxsw_sp_params_acl_region_rehash_intrvl_set, 5169 NULL), 5170 }; 5171 5172 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core) 5173 { 5174 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5175 union devlink_param_value value; 5176 int err; 5177 5178 err = mlxsw_sp_params_register(mlxsw_core); 5179 if (err) 5180 return err; 5181 5182 err = devlink_params_register(devlink, mlxsw_sp2_devlink_params, 5183 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 5184 if (err) 5185 goto err_devlink_params_register; 5186 5187 value.vu32 = 0; 5188 devlink_param_driverinit_value_set(devlink, 5189 MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 5190 value); 5191 return 0; 5192 5193 err_devlink_params_register: 5194 mlxsw_sp_params_unregister(mlxsw_core); 5195 return err; 5196 } 5197 5198 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core) 5199 { 5200 devlink_params_unregister(priv_to_devlink(mlxsw_core), 5201 mlxsw_sp2_devlink_params, 5202 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 5203 mlxsw_sp_params_unregister(mlxsw_core); 5204 } 5205 5206 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core, 5207 struct sk_buff *skb, u8 local_port) 5208 { 5209 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5210 5211 skb_pull(skb, MLXSW_TXHDR_LEN); 5212 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port); 5213 } 5214 5215 static struct mlxsw_driver mlxsw_sp1_driver = { 5216 .kind = mlxsw_sp1_driver_name, 5217 .priv_size = sizeof(struct mlxsw_sp), 5218 .init = mlxsw_sp1_init, 5219 .fini = mlxsw_sp_fini, 5220 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5221 .port_split = mlxsw_sp_port_split, 5222 .port_unsplit = mlxsw_sp_port_unsplit, 5223 .sb_pool_get = mlxsw_sp_sb_pool_get, 5224 .sb_pool_set = mlxsw_sp_sb_pool_set, 5225 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5226 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5227 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5228 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5229 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5230 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5231 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5232 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5233 .flash_update = mlxsw_sp_flash_update, 5234 .trap_init = mlxsw_sp_trap_init, 5235 .trap_fini = mlxsw_sp_trap_fini, 5236 .trap_action_set = mlxsw_sp_trap_action_set, 5237 .trap_group_init = mlxsw_sp_trap_group_init, 5238 .trap_group_set = mlxsw_sp_trap_group_set, 5239 .trap_policer_init = mlxsw_sp_trap_policer_init, 5240 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 5241 .trap_policer_set = mlxsw_sp_trap_policer_set, 5242 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 5243 .txhdr_construct = mlxsw_sp_txhdr_construct, 5244 .resources_register = mlxsw_sp1_resources_register, 5245 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, 5246 .params_register = mlxsw_sp_params_register, 5247 .params_unregister = mlxsw_sp_params_unregister, 5248 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5249 .txhdr_len = MLXSW_TXHDR_LEN, 5250 .profile = &mlxsw_sp1_config_profile, 5251 .res_query_enabled = true, 5252 }; 5253 5254 static struct mlxsw_driver mlxsw_sp2_driver = { 5255 .kind = mlxsw_sp2_driver_name, 5256 .priv_size = sizeof(struct mlxsw_sp), 5257 .init = mlxsw_sp2_init, 5258 .fini = mlxsw_sp_fini, 5259 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5260 .port_split = mlxsw_sp_port_split, 5261 .port_unsplit = mlxsw_sp_port_unsplit, 5262 .sb_pool_get = mlxsw_sp_sb_pool_get, 5263 .sb_pool_set = mlxsw_sp_sb_pool_set, 5264 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5265 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5266 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5267 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5268 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5269 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5270 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5271 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5272 .flash_update = mlxsw_sp_flash_update, 5273 .trap_init = mlxsw_sp_trap_init, 5274 .trap_fini = mlxsw_sp_trap_fini, 5275 .trap_action_set = mlxsw_sp_trap_action_set, 5276 .trap_group_init = mlxsw_sp_trap_group_init, 5277 .trap_group_set = mlxsw_sp_trap_group_set, 5278 .trap_policer_init = mlxsw_sp_trap_policer_init, 5279 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 5280 .trap_policer_set = mlxsw_sp_trap_policer_set, 5281 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 5282 .txhdr_construct = mlxsw_sp_txhdr_construct, 5283 .resources_register = mlxsw_sp2_resources_register, 5284 .params_register = mlxsw_sp2_params_register, 5285 .params_unregister = mlxsw_sp2_params_unregister, 5286 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5287 .txhdr_len = MLXSW_TXHDR_LEN, 5288 .profile = &mlxsw_sp2_config_profile, 5289 .res_query_enabled = true, 5290 }; 5291 5292 static struct mlxsw_driver mlxsw_sp3_driver = { 5293 .kind = mlxsw_sp3_driver_name, 5294 .priv_size = sizeof(struct mlxsw_sp), 5295 .init = mlxsw_sp3_init, 5296 .fini = mlxsw_sp_fini, 5297 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5298 .port_split = mlxsw_sp_port_split, 5299 .port_unsplit = mlxsw_sp_port_unsplit, 5300 .sb_pool_get = mlxsw_sp_sb_pool_get, 5301 .sb_pool_set = mlxsw_sp_sb_pool_set, 5302 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5303 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5304 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5305 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5306 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5307 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5308 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5309 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5310 .flash_update = mlxsw_sp_flash_update, 5311 .trap_init = mlxsw_sp_trap_init, 5312 .trap_fini = mlxsw_sp_trap_fini, 5313 .trap_action_set = mlxsw_sp_trap_action_set, 5314 .trap_group_init = mlxsw_sp_trap_group_init, 5315 .trap_group_set = mlxsw_sp_trap_group_set, 5316 .trap_policer_init = mlxsw_sp_trap_policer_init, 5317 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 5318 .trap_policer_set = mlxsw_sp_trap_policer_set, 5319 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 5320 .txhdr_construct = mlxsw_sp_txhdr_construct, 5321 .resources_register = mlxsw_sp2_resources_register, 5322 .params_register = mlxsw_sp2_params_register, 5323 .params_unregister = mlxsw_sp2_params_unregister, 5324 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5325 .txhdr_len = MLXSW_TXHDR_LEN, 5326 .profile = &mlxsw_sp2_config_profile, 5327 .res_query_enabled = true, 5328 }; 5329 5330 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 5331 { 5332 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 5333 } 5334 5335 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data) 5336 { 5337 struct mlxsw_sp_port **p_mlxsw_sp_port = data; 5338 int ret = 0; 5339 5340 if (mlxsw_sp_port_dev_check(lower_dev)) { 5341 *p_mlxsw_sp_port = netdev_priv(lower_dev); 5342 ret = 1; 5343 } 5344 5345 return ret; 5346 } 5347 5348 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 5349 { 5350 struct mlxsw_sp_port *mlxsw_sp_port; 5351 5352 if (mlxsw_sp_port_dev_check(dev)) 5353 return netdev_priv(dev); 5354 5355 mlxsw_sp_port = NULL; 5356 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port); 5357 5358 return mlxsw_sp_port; 5359 } 5360 5361 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 5362 { 5363 struct mlxsw_sp_port *mlxsw_sp_port; 5364 5365 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 5366 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 5367 } 5368 5369 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 5370 { 5371 struct mlxsw_sp_port *mlxsw_sp_port; 5372 5373 if (mlxsw_sp_port_dev_check(dev)) 5374 return netdev_priv(dev); 5375 5376 mlxsw_sp_port = NULL; 5377 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 5378 &mlxsw_sp_port); 5379 5380 return mlxsw_sp_port; 5381 } 5382 5383 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 5384 { 5385 struct mlxsw_sp_port *mlxsw_sp_port; 5386 5387 rcu_read_lock(); 5388 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 5389 if (mlxsw_sp_port) 5390 dev_hold(mlxsw_sp_port->dev); 5391 rcu_read_unlock(); 5392 return mlxsw_sp_port; 5393 } 5394 5395 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 5396 { 5397 dev_put(mlxsw_sp_port->dev); 5398 } 5399 5400 static void 5401 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port, 5402 struct net_device *lag_dev) 5403 { 5404 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev); 5405 struct net_device *upper_dev; 5406 struct list_head *iter; 5407 5408 if (netif_is_bridge_port(lag_dev)) 5409 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev); 5410 5411 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 5412 if (!netif_is_bridge_port(upper_dev)) 5413 continue; 5414 br_dev = netdev_master_upper_dev_get(upper_dev); 5415 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev); 5416 } 5417 } 5418 5419 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 5420 { 5421 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5422 5423 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 5424 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5425 } 5426 5427 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 5428 { 5429 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5430 5431 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 5432 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5433 } 5434 5435 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 5436 u16 lag_id, u8 port_index) 5437 { 5438 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5439 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5440 5441 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 5442 lag_id, port_index); 5443 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5444 } 5445 5446 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 5447 u16 lag_id) 5448 { 5449 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5450 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5451 5452 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 5453 lag_id); 5454 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5455 } 5456 5457 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 5458 u16 lag_id) 5459 { 5460 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5461 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5462 5463 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 5464 lag_id); 5465 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5466 } 5467 5468 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 5469 u16 lag_id) 5470 { 5471 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5472 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5473 5474 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 5475 lag_id); 5476 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5477 } 5478 5479 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 5480 struct net_device *lag_dev, 5481 u16 *p_lag_id) 5482 { 5483 struct mlxsw_sp_upper *lag; 5484 int free_lag_id = -1; 5485 u64 max_lag; 5486 int i; 5487 5488 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 5489 for (i = 0; i < max_lag; i++) { 5490 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 5491 if (lag->ref_count) { 5492 if (lag->dev == lag_dev) { 5493 *p_lag_id = i; 5494 return 0; 5495 } 5496 } else if (free_lag_id < 0) { 5497 free_lag_id = i; 5498 } 5499 } 5500 if (free_lag_id < 0) 5501 return -EBUSY; 5502 *p_lag_id = free_lag_id; 5503 return 0; 5504 } 5505 5506 static bool 5507 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 5508 struct net_device *lag_dev, 5509 struct netdev_lag_upper_info *lag_upper_info, 5510 struct netlink_ext_ack *extack) 5511 { 5512 u16 lag_id; 5513 5514 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 5515 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices"); 5516 return false; 5517 } 5518 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 5519 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 5520 return false; 5521 } 5522 return true; 5523 } 5524 5525 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 5526 u16 lag_id, u8 *p_port_index) 5527 { 5528 u64 max_lag_members; 5529 int i; 5530 5531 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 5532 MAX_LAG_MEMBERS); 5533 for (i = 0; i < max_lag_members; i++) { 5534 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 5535 *p_port_index = i; 5536 return 0; 5537 } 5538 } 5539 return -EBUSY; 5540 } 5541 5542 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 5543 struct net_device *lag_dev) 5544 { 5545 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5546 struct mlxsw_sp_upper *lag; 5547 u16 lag_id; 5548 u8 port_index; 5549 int err; 5550 5551 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 5552 if (err) 5553 return err; 5554 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 5555 if (!lag->ref_count) { 5556 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 5557 if (err) 5558 return err; 5559 lag->dev = lag_dev; 5560 } 5561 5562 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 5563 if (err) 5564 return err; 5565 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 5566 if (err) 5567 goto err_col_port_add; 5568 5569 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 5570 mlxsw_sp_port->local_port); 5571 mlxsw_sp_port->lag_id = lag_id; 5572 mlxsw_sp_port->lagged = 1; 5573 lag->ref_count++; 5574 5575 /* Port is no longer usable as a router interface */ 5576 if (mlxsw_sp_port->default_vlan->fid) 5577 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); 5578 5579 return 0; 5580 5581 err_col_port_add: 5582 if (!lag->ref_count) 5583 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 5584 return err; 5585 } 5586 5587 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 5588 struct net_device *lag_dev) 5589 { 5590 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5591 u16 lag_id = mlxsw_sp_port->lag_id; 5592 struct mlxsw_sp_upper *lag; 5593 5594 if (!mlxsw_sp_port->lagged) 5595 return; 5596 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 5597 WARN_ON(lag->ref_count == 0); 5598 5599 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 5600 5601 /* Any VLANs configured on the port are no longer valid */ 5602 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false); 5603 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan); 5604 /* Make the LAG and its directly linked uppers leave bridges they 5605 * are memeber in 5606 */ 5607 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev); 5608 5609 if (lag->ref_count == 1) 5610 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 5611 5612 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 5613 mlxsw_sp_port->local_port); 5614 mlxsw_sp_port->lagged = 0; 5615 lag->ref_count--; 5616 5617 /* Make sure untagged frames are allowed to ingress */ 5618 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 5619 } 5620 5621 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 5622 u16 lag_id) 5623 { 5624 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5625 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5626 5627 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 5628 mlxsw_sp_port->local_port); 5629 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5630 } 5631 5632 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 5633 u16 lag_id) 5634 { 5635 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5636 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5637 5638 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 5639 mlxsw_sp_port->local_port); 5640 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5641 } 5642 5643 static int 5644 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port) 5645 { 5646 int err; 5647 5648 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, 5649 mlxsw_sp_port->lag_id); 5650 if (err) 5651 return err; 5652 5653 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 5654 if (err) 5655 goto err_dist_port_add; 5656 5657 return 0; 5658 5659 err_dist_port_add: 5660 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id); 5661 return err; 5662 } 5663 5664 static int 5665 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port) 5666 { 5667 int err; 5668 5669 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 5670 mlxsw_sp_port->lag_id); 5671 if (err) 5672 return err; 5673 5674 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, 5675 mlxsw_sp_port->lag_id); 5676 if (err) 5677 goto err_col_port_disable; 5678 5679 return 0; 5680 5681 err_col_port_disable: 5682 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 5683 return err; 5684 } 5685 5686 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 5687 struct netdev_lag_lower_state_info *info) 5688 { 5689 if (info->tx_enabled) 5690 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port); 5691 else 5692 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 5693 } 5694 5695 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 5696 bool enable) 5697 { 5698 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5699 enum mlxsw_reg_spms_state spms_state; 5700 char *spms_pl; 5701 u16 vid; 5702 int err; 5703 5704 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 5705 MLXSW_REG_SPMS_STATE_DISCARDING; 5706 5707 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 5708 if (!spms_pl) 5709 return -ENOMEM; 5710 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 5711 5712 for (vid = 0; vid < VLAN_N_VID; vid++) 5713 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 5714 5715 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 5716 kfree(spms_pl); 5717 return err; 5718 } 5719 5720 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 5721 { 5722 u16 vid = 1; 5723 int err; 5724 5725 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 5726 if (err) 5727 return err; 5728 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 5729 if (err) 5730 goto err_port_stp_set; 5731 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 5732 true, false); 5733 if (err) 5734 goto err_port_vlan_set; 5735 5736 for (; vid <= VLAN_N_VID - 1; vid++) { 5737 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 5738 vid, false); 5739 if (err) 5740 goto err_vid_learning_set; 5741 } 5742 5743 return 0; 5744 5745 err_vid_learning_set: 5746 for (vid--; vid >= 1; vid--) 5747 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 5748 err_port_vlan_set: 5749 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 5750 err_port_stp_set: 5751 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 5752 return err; 5753 } 5754 5755 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 5756 { 5757 u16 vid; 5758 5759 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 5760 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 5761 vid, true); 5762 5763 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 5764 false, false); 5765 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 5766 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 5767 } 5768 5769 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev) 5770 { 5771 unsigned int num_vxlans = 0; 5772 struct net_device *dev; 5773 struct list_head *iter; 5774 5775 netdev_for_each_lower_dev(br_dev, dev, iter) { 5776 if (netif_is_vxlan(dev)) 5777 num_vxlans++; 5778 } 5779 5780 return num_vxlans > 1; 5781 } 5782 5783 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev) 5784 { 5785 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0}; 5786 struct net_device *dev; 5787 struct list_head *iter; 5788 5789 netdev_for_each_lower_dev(br_dev, dev, iter) { 5790 u16 pvid; 5791 int err; 5792 5793 if (!netif_is_vxlan(dev)) 5794 continue; 5795 5796 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid); 5797 if (err || !pvid) 5798 continue; 5799 5800 if (test_and_set_bit(pvid, vlans)) 5801 return false; 5802 } 5803 5804 return true; 5805 } 5806 5807 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev, 5808 struct netlink_ext_ack *extack) 5809 { 5810 if (br_multicast_enabled(br_dev)) { 5811 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device"); 5812 return false; 5813 } 5814 5815 if (!br_vlan_enabled(br_dev) && 5816 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) { 5817 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge"); 5818 return false; 5819 } 5820 5821 if (br_vlan_enabled(br_dev) && 5822 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) { 5823 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged"); 5824 return false; 5825 } 5826 5827 return true; 5828 } 5829 5830 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 5831 struct net_device *dev, 5832 unsigned long event, void *ptr) 5833 { 5834 struct netdev_notifier_changeupper_info *info; 5835 struct mlxsw_sp_port *mlxsw_sp_port; 5836 struct netlink_ext_ack *extack; 5837 struct net_device *upper_dev; 5838 struct mlxsw_sp *mlxsw_sp; 5839 int err = 0; 5840 5841 mlxsw_sp_port = netdev_priv(dev); 5842 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5843 info = ptr; 5844 extack = netdev_notifier_info_to_extack(&info->info); 5845 5846 switch (event) { 5847 case NETDEV_PRECHANGEUPPER: 5848 upper_dev = info->upper_dev; 5849 if (!is_vlan_dev(upper_dev) && 5850 !netif_is_lag_master(upper_dev) && 5851 !netif_is_bridge_master(upper_dev) && 5852 !netif_is_ovs_master(upper_dev) && 5853 !netif_is_macvlan(upper_dev)) { 5854 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5855 return -EINVAL; 5856 } 5857 if (!info->linking) 5858 break; 5859 if (netif_is_bridge_master(upper_dev) && 5860 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 5861 mlxsw_sp_bridge_has_vxlan(upper_dev) && 5862 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 5863 return -EOPNOTSUPP; 5864 if (netdev_has_any_upper_dev(upper_dev) && 5865 (!netif_is_bridge_master(upper_dev) || 5866 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 5867 upper_dev))) { 5868 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 5869 return -EINVAL; 5870 } 5871 if (netif_is_lag_master(upper_dev) && 5872 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 5873 info->upper_info, extack)) 5874 return -EINVAL; 5875 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 5876 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); 5877 return -EINVAL; 5878 } 5879 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 5880 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 5881 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); 5882 return -EINVAL; 5883 } 5884 if (netif_is_macvlan(upper_dev) && 5885 !mlxsw_sp_rif_exists(mlxsw_sp, lower_dev)) { 5886 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 5887 return -EOPNOTSUPP; 5888 } 5889 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 5890 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); 5891 return -EINVAL; 5892 } 5893 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 5894 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 5895 return -EINVAL; 5896 } 5897 break; 5898 case NETDEV_CHANGEUPPER: 5899 upper_dev = info->upper_dev; 5900 if (netif_is_bridge_master(upper_dev)) { 5901 if (info->linking) 5902 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 5903 lower_dev, 5904 upper_dev, 5905 extack); 5906 else 5907 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 5908 lower_dev, 5909 upper_dev); 5910 } else if (netif_is_lag_master(upper_dev)) { 5911 if (info->linking) { 5912 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 5913 upper_dev); 5914 } else { 5915 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 5916 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 5917 upper_dev); 5918 } 5919 } else if (netif_is_ovs_master(upper_dev)) { 5920 if (info->linking) 5921 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 5922 else 5923 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 5924 } else if (netif_is_macvlan(upper_dev)) { 5925 if (!info->linking) 5926 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 5927 } else if (is_vlan_dev(upper_dev)) { 5928 struct net_device *br_dev; 5929 5930 if (!netif_is_bridge_port(upper_dev)) 5931 break; 5932 if (info->linking) 5933 break; 5934 br_dev = netdev_master_upper_dev_get(upper_dev); 5935 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, 5936 br_dev); 5937 } 5938 break; 5939 } 5940 5941 return err; 5942 } 5943 5944 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 5945 unsigned long event, void *ptr) 5946 { 5947 struct netdev_notifier_changelowerstate_info *info; 5948 struct mlxsw_sp_port *mlxsw_sp_port; 5949 int err; 5950 5951 mlxsw_sp_port = netdev_priv(dev); 5952 info = ptr; 5953 5954 switch (event) { 5955 case NETDEV_CHANGELOWERSTATE: 5956 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 5957 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 5958 info->lower_state_info); 5959 if (err) 5960 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 5961 } 5962 break; 5963 } 5964 5965 return 0; 5966 } 5967 5968 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 5969 struct net_device *port_dev, 5970 unsigned long event, void *ptr) 5971 { 5972 switch (event) { 5973 case NETDEV_PRECHANGEUPPER: 5974 case NETDEV_CHANGEUPPER: 5975 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 5976 event, ptr); 5977 case NETDEV_CHANGELOWERSTATE: 5978 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 5979 ptr); 5980 } 5981 5982 return 0; 5983 } 5984 5985 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 5986 unsigned long event, void *ptr) 5987 { 5988 struct net_device *dev; 5989 struct list_head *iter; 5990 int ret; 5991 5992 netdev_for_each_lower_dev(lag_dev, dev, iter) { 5993 if (mlxsw_sp_port_dev_check(dev)) { 5994 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 5995 ptr); 5996 if (ret) 5997 return ret; 5998 } 5999 } 6000 6001 return 0; 6002 } 6003 6004 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 6005 struct net_device *dev, 6006 unsigned long event, void *ptr, 6007 u16 vid) 6008 { 6009 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 6010 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 6011 struct netdev_notifier_changeupper_info *info = ptr; 6012 struct netlink_ext_ack *extack; 6013 struct net_device *upper_dev; 6014 int err = 0; 6015 6016 extack = netdev_notifier_info_to_extack(&info->info); 6017 6018 switch (event) { 6019 case NETDEV_PRECHANGEUPPER: 6020 upper_dev = info->upper_dev; 6021 if (!netif_is_bridge_master(upper_dev) && 6022 !netif_is_macvlan(upper_dev)) { 6023 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6024 return -EINVAL; 6025 } 6026 if (!info->linking) 6027 break; 6028 if (netif_is_bridge_master(upper_dev) && 6029 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 6030 mlxsw_sp_bridge_has_vxlan(upper_dev) && 6031 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 6032 return -EOPNOTSUPP; 6033 if (netdev_has_any_upper_dev(upper_dev) && 6034 (!netif_is_bridge_master(upper_dev) || 6035 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 6036 upper_dev))) { 6037 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 6038 return -EINVAL; 6039 } 6040 if (netif_is_macvlan(upper_dev) && 6041 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) { 6042 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6043 return -EOPNOTSUPP; 6044 } 6045 break; 6046 case NETDEV_CHANGEUPPER: 6047 upper_dev = info->upper_dev; 6048 if (netif_is_bridge_master(upper_dev)) { 6049 if (info->linking) 6050 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 6051 vlan_dev, 6052 upper_dev, 6053 extack); 6054 else 6055 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 6056 vlan_dev, 6057 upper_dev); 6058 } else if (netif_is_macvlan(upper_dev)) { 6059 if (!info->linking) 6060 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6061 } else { 6062 err = -EINVAL; 6063 WARN_ON(1); 6064 } 6065 break; 6066 } 6067 6068 return err; 6069 } 6070 6071 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 6072 struct net_device *lag_dev, 6073 unsigned long event, 6074 void *ptr, u16 vid) 6075 { 6076 struct net_device *dev; 6077 struct list_head *iter; 6078 int ret; 6079 6080 netdev_for_each_lower_dev(lag_dev, dev, iter) { 6081 if (mlxsw_sp_port_dev_check(dev)) { 6082 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 6083 event, ptr, 6084 vid); 6085 if (ret) 6086 return ret; 6087 } 6088 } 6089 6090 return 0; 6091 } 6092 6093 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev, 6094 struct net_device *br_dev, 6095 unsigned long event, void *ptr, 6096 u16 vid) 6097 { 6098 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); 6099 struct netdev_notifier_changeupper_info *info = ptr; 6100 struct netlink_ext_ack *extack; 6101 struct net_device *upper_dev; 6102 6103 if (!mlxsw_sp) 6104 return 0; 6105 6106 extack = netdev_notifier_info_to_extack(&info->info); 6107 6108 switch (event) { 6109 case NETDEV_PRECHANGEUPPER: 6110 upper_dev = info->upper_dev; 6111 if (!netif_is_macvlan(upper_dev)) { 6112 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6113 return -EOPNOTSUPP; 6114 } 6115 if (!info->linking) 6116 break; 6117 if (netif_is_macvlan(upper_dev) && 6118 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) { 6119 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6120 return -EOPNOTSUPP; 6121 } 6122 break; 6123 case NETDEV_CHANGEUPPER: 6124 upper_dev = info->upper_dev; 6125 if (info->linking) 6126 break; 6127 if (netif_is_macvlan(upper_dev)) 6128 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6129 break; 6130 } 6131 6132 return 0; 6133 } 6134 6135 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 6136 unsigned long event, void *ptr) 6137 { 6138 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 6139 u16 vid = vlan_dev_vlan_id(vlan_dev); 6140 6141 if (mlxsw_sp_port_dev_check(real_dev)) 6142 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 6143 event, ptr, vid); 6144 else if (netif_is_lag_master(real_dev)) 6145 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 6146 real_dev, event, 6147 ptr, vid); 6148 else if (netif_is_bridge_master(real_dev)) 6149 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev, 6150 event, ptr, vid); 6151 6152 return 0; 6153 } 6154 6155 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, 6156 unsigned long event, void *ptr) 6157 { 6158 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev); 6159 struct netdev_notifier_changeupper_info *info = ptr; 6160 struct netlink_ext_ack *extack; 6161 struct net_device *upper_dev; 6162 6163 if (!mlxsw_sp) 6164 return 0; 6165 6166 extack = netdev_notifier_info_to_extack(&info->info); 6167 6168 switch (event) { 6169 case NETDEV_PRECHANGEUPPER: 6170 upper_dev = info->upper_dev; 6171 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) { 6172 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6173 return -EOPNOTSUPP; 6174 } 6175 if (!info->linking) 6176 break; 6177 if (netif_is_macvlan(upper_dev) && 6178 !mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) { 6179 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6180 return -EOPNOTSUPP; 6181 } 6182 break; 6183 case NETDEV_CHANGEUPPER: 6184 upper_dev = info->upper_dev; 6185 if (info->linking) 6186 break; 6187 if (is_vlan_dev(upper_dev)) 6188 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev); 6189 if (netif_is_macvlan(upper_dev)) 6190 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6191 break; 6192 } 6193 6194 return 0; 6195 } 6196 6197 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, 6198 unsigned long event, void *ptr) 6199 { 6200 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); 6201 struct netdev_notifier_changeupper_info *info = ptr; 6202 struct netlink_ext_ack *extack; 6203 6204 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) 6205 return 0; 6206 6207 extack = netdev_notifier_info_to_extack(&info->info); 6208 6209 /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */ 6210 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6211 6212 return -EOPNOTSUPP; 6213 } 6214 6215 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) 6216 { 6217 struct netdev_notifier_changeupper_info *info = ptr; 6218 6219 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) 6220 return false; 6221 return netif_is_l3_master(info->upper_dev); 6222 } 6223 6224 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, 6225 struct net_device *dev, 6226 unsigned long event, void *ptr) 6227 { 6228 struct netdev_notifier_changeupper_info *cu_info; 6229 struct netdev_notifier_info *info = ptr; 6230 struct netlink_ext_ack *extack; 6231 struct net_device *upper_dev; 6232 6233 extack = netdev_notifier_info_to_extack(info); 6234 6235 switch (event) { 6236 case NETDEV_CHANGEUPPER: 6237 cu_info = container_of(info, 6238 struct netdev_notifier_changeupper_info, 6239 info); 6240 upper_dev = cu_info->upper_dev; 6241 if (!netif_is_bridge_master(upper_dev)) 6242 return 0; 6243 if (!mlxsw_sp_lower_get(upper_dev)) 6244 return 0; 6245 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 6246 return -EOPNOTSUPP; 6247 if (cu_info->linking) { 6248 if (!netif_running(dev)) 6249 return 0; 6250 /* When the bridge is VLAN-aware, the VNI of the VxLAN 6251 * device needs to be mapped to a VLAN, but at this 6252 * point no VLANs are configured on the VxLAN device 6253 */ 6254 if (br_vlan_enabled(upper_dev)) 6255 return 0; 6256 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, 6257 dev, 0, extack); 6258 } else { 6259 /* VLANs were already flushed, which triggered the 6260 * necessary cleanup 6261 */ 6262 if (br_vlan_enabled(upper_dev)) 6263 return 0; 6264 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 6265 } 6266 break; 6267 case NETDEV_PRE_UP: 6268 upper_dev = netdev_master_upper_dev_get(dev); 6269 if (!upper_dev) 6270 return 0; 6271 if (!netif_is_bridge_master(upper_dev)) 6272 return 0; 6273 if (!mlxsw_sp_lower_get(upper_dev)) 6274 return 0; 6275 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0, 6276 extack); 6277 case NETDEV_DOWN: 6278 upper_dev = netdev_master_upper_dev_get(dev); 6279 if (!upper_dev) 6280 return 0; 6281 if (!netif_is_bridge_master(upper_dev)) 6282 return 0; 6283 if (!mlxsw_sp_lower_get(upper_dev)) 6284 return 0; 6285 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 6286 break; 6287 } 6288 6289 return 0; 6290 } 6291 6292 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 6293 unsigned long event, void *ptr) 6294 { 6295 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 6296 struct mlxsw_sp_span_entry *span_entry; 6297 struct mlxsw_sp *mlxsw_sp; 6298 int err = 0; 6299 6300 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 6301 if (event == NETDEV_UNREGISTER) { 6302 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); 6303 if (span_entry) 6304 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); 6305 } 6306 mlxsw_sp_span_respin(mlxsw_sp); 6307 6308 if (netif_is_vxlan(dev)) 6309 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr); 6310 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) 6311 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, 6312 event, ptr); 6313 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev)) 6314 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev, 6315 event, ptr); 6316 else if (event == NETDEV_PRE_CHANGEADDR || 6317 event == NETDEV_CHANGEADDR || 6318 event == NETDEV_CHANGEMTU) 6319 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr); 6320 else if (mlxsw_sp_is_vrf_event(event, ptr)) 6321 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); 6322 else if (mlxsw_sp_port_dev_check(dev)) 6323 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 6324 else if (netif_is_lag_master(dev)) 6325 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 6326 else if (is_vlan_dev(dev)) 6327 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 6328 else if (netif_is_bridge_master(dev)) 6329 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); 6330 else if (netif_is_macvlan(dev)) 6331 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); 6332 6333 return notifier_from_errno(err); 6334 } 6335 6336 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = { 6337 .notifier_call = mlxsw_sp_inetaddr_valid_event, 6338 }; 6339 6340 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = { 6341 .notifier_call = mlxsw_sp_inet6addr_valid_event, 6342 }; 6343 6344 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { 6345 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 6346 {0, }, 6347 }; 6348 6349 static struct pci_driver mlxsw_sp1_pci_driver = { 6350 .name = mlxsw_sp1_driver_name, 6351 .id_table = mlxsw_sp1_pci_id_table, 6352 }; 6353 6354 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { 6355 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, 6356 {0, }, 6357 }; 6358 6359 static struct pci_driver mlxsw_sp2_pci_driver = { 6360 .name = mlxsw_sp2_driver_name, 6361 .id_table = mlxsw_sp2_pci_id_table, 6362 }; 6363 6364 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = { 6365 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0}, 6366 {0, }, 6367 }; 6368 6369 static struct pci_driver mlxsw_sp3_pci_driver = { 6370 .name = mlxsw_sp3_driver_name, 6371 .id_table = mlxsw_sp3_pci_id_table, 6372 }; 6373 6374 static int __init mlxsw_sp_module_init(void) 6375 { 6376 int err; 6377 6378 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6379 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6380 6381 err = mlxsw_core_driver_register(&mlxsw_sp1_driver); 6382 if (err) 6383 goto err_sp1_core_driver_register; 6384 6385 err = mlxsw_core_driver_register(&mlxsw_sp2_driver); 6386 if (err) 6387 goto err_sp2_core_driver_register; 6388 6389 err = mlxsw_core_driver_register(&mlxsw_sp3_driver); 6390 if (err) 6391 goto err_sp3_core_driver_register; 6392 6393 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); 6394 if (err) 6395 goto err_sp1_pci_driver_register; 6396 6397 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); 6398 if (err) 6399 goto err_sp2_pci_driver_register; 6400 6401 err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver); 6402 if (err) 6403 goto err_sp3_pci_driver_register; 6404 6405 return 0; 6406 6407 err_sp3_pci_driver_register: 6408 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 6409 err_sp2_pci_driver_register: 6410 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 6411 err_sp1_pci_driver_register: 6412 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 6413 err_sp3_core_driver_register: 6414 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 6415 err_sp2_core_driver_register: 6416 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 6417 err_sp1_core_driver_register: 6418 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6419 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6420 return err; 6421 } 6422 6423 static void __exit mlxsw_sp_module_exit(void) 6424 { 6425 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver); 6426 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 6427 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 6428 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 6429 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 6430 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 6431 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6432 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6433 } 6434 6435 module_init(mlxsw_sp_module_init); 6436 module_exit(mlxsw_sp_module_exit); 6437 6438 MODULE_LICENSE("Dual BSD/GPL"); 6439 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 6440 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 6441 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); 6442 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); 6443 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table); 6444 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); 6445 MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME); 6446