1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/types.h> 7 #include <linux/pci.h> 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/ethtool.h> 11 #include <linux/slab.h> 12 #include <linux/device.h> 13 #include <linux/skbuff.h> 14 #include <linux/if_vlan.h> 15 #include <linux/if_bridge.h> 16 #include <linux/workqueue.h> 17 #include <linux/jiffies.h> 18 #include <linux/bitops.h> 19 #include <linux/list.h> 20 #include <linux/notifier.h> 21 #include <linux/dcbnl.h> 22 #include <linux/inetdevice.h> 23 #include <linux/netlink.h> 24 #include <linux/jhash.h> 25 #include <linux/log2.h> 26 #include <net/switchdev.h> 27 #include <net/pkt_cls.h> 28 #include <net/tc_act/tc_mirred.h> 29 #include <net/netevent.h> 30 #include <net/tc_act/tc_sample.h> 31 #include <net/addrconf.h> 32 33 #include "spectrum.h" 34 #include "pci.h" 35 #include "core.h" 36 #include "core_env.h" 37 #include "reg.h" 38 #include "port.h" 39 #include "trap.h" 40 #include "txheader.h" 41 #include "spectrum_cnt.h" 42 #include "spectrum_dpipe.h" 43 #include "spectrum_acl_flex_actions.h" 44 #include "spectrum_span.h" 45 #include "spectrum_ptp.h" 46 #include "../mlxfw/mlxfw.h" 47 48 #define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100) 49 50 #define MLXSW_SP1_FWREV_MAJOR 13 51 #define MLXSW_SP1_FWREV_MINOR 2000 52 #define MLXSW_SP1_FWREV_SUBMINOR 2308 53 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 54 55 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { 56 .major = MLXSW_SP1_FWREV_MAJOR, 57 .minor = MLXSW_SP1_FWREV_MINOR, 58 .subminor = MLXSW_SP1_FWREV_SUBMINOR, 59 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR, 60 }; 61 62 #define MLXSW_SP1_FW_FILENAME \ 63 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ 64 "." __stringify(MLXSW_SP1_FWREV_MINOR) \ 65 "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2" 66 67 #define MLXSW_SP2_FWREV_MAJOR 29 68 #define MLXSW_SP2_FWREV_MINOR 2000 69 #define MLXSW_SP2_FWREV_SUBMINOR 2308 70 71 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { 72 .major = MLXSW_SP2_FWREV_MAJOR, 73 .minor = MLXSW_SP2_FWREV_MINOR, 74 .subminor = MLXSW_SP2_FWREV_SUBMINOR, 75 }; 76 77 #define MLXSW_SP2_FW_FILENAME \ 78 "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \ 79 "." __stringify(MLXSW_SP2_FWREV_MINOR) \ 80 "." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2" 81 82 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; 83 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; 84 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3"; 85 static const char mlxsw_sp_driver_version[] = "1.0"; 86 87 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = { 88 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00 89 }; 90 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = { 91 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00 92 }; 93 94 /* tx_hdr_version 95 * Tx header version. 96 * Must be set to 1. 97 */ 98 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 99 100 /* tx_hdr_ctl 101 * Packet control type. 102 * 0 - Ethernet control (e.g. EMADs, LACP) 103 * 1 - Ethernet data 104 */ 105 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 106 107 /* tx_hdr_proto 108 * Packet protocol type. Must be set to 1 (Ethernet). 109 */ 110 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 111 112 /* tx_hdr_rx_is_router 113 * Packet is sent from the router. Valid for data packets only. 114 */ 115 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 116 117 /* tx_hdr_fid_valid 118 * Indicates if the 'fid' field is valid and should be used for 119 * forwarding lookup. Valid for data packets only. 120 */ 121 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 122 123 /* tx_hdr_swid 124 * Switch partition ID. Must be set to 0. 125 */ 126 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 127 128 /* tx_hdr_control_tclass 129 * Indicates if the packet should use the control TClass and not one 130 * of the data TClasses. 131 */ 132 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 133 134 /* tx_hdr_etclass 135 * Egress TClass to be used on the egress device on the egress port. 136 */ 137 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 138 139 /* tx_hdr_port_mid 140 * Destination local port for unicast packets. 141 * Destination multicast ID for multicast packets. 142 * 143 * Control packets are directed to a specific egress port, while data 144 * packets are transmitted through the CPU port (0) into the switch partition, 145 * where forwarding rules are applied. 146 */ 147 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 148 149 /* tx_hdr_fid 150 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 151 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 152 * Valid for data packets only. 153 */ 154 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 155 156 /* tx_hdr_type 157 * 0 - Data packets 158 * 6 - Control packets 159 */ 160 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 161 162 struct mlxsw_sp_mlxfw_dev { 163 struct mlxfw_dev mlxfw_dev; 164 struct mlxsw_sp *mlxsw_sp; 165 }; 166 167 struct mlxsw_sp_ptp_ops { 168 struct mlxsw_sp_ptp_clock * 169 (*clock_init)(struct mlxsw_sp *mlxsw_sp, struct device *dev); 170 void (*clock_fini)(struct mlxsw_sp_ptp_clock *clock); 171 172 struct mlxsw_sp_ptp_state *(*init)(struct mlxsw_sp *mlxsw_sp); 173 void (*fini)(struct mlxsw_sp_ptp_state *ptp_state); 174 175 /* Notify a driver that a packet that might be PTP was received. Driver 176 * is responsible for freeing the passed-in SKB. 177 */ 178 void (*receive)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 179 u8 local_port); 180 181 /* Notify a driver that a timestamped packet was transmitted. Driver 182 * is responsible for freeing the passed-in SKB. 183 */ 184 void (*transmitted)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 185 u8 local_port); 186 187 int (*hwtstamp_get)(struct mlxsw_sp_port *mlxsw_sp_port, 188 struct hwtstamp_config *config); 189 int (*hwtstamp_set)(struct mlxsw_sp_port *mlxsw_sp_port, 190 struct hwtstamp_config *config); 191 void (*shaper_work)(struct work_struct *work); 192 int (*get_ts_info)(struct mlxsw_sp *mlxsw_sp, 193 struct ethtool_ts_info *info); 194 int (*get_stats_count)(void); 195 void (*get_stats_strings)(u8 **p); 196 void (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port, 197 u64 *data, int data_index); 198 }; 199 200 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev, 201 u16 component_index, u32 *p_max_size, 202 u8 *p_align_bits, u16 *p_max_write_size) 203 { 204 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 205 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 206 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 207 char mcqi_pl[MLXSW_REG_MCQI_LEN]; 208 int err; 209 210 mlxsw_reg_mcqi_pack(mcqi_pl, component_index); 211 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl); 212 if (err) 213 return err; 214 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, 215 p_max_write_size); 216 217 *p_align_bits = max_t(u8, *p_align_bits, 2); 218 *p_max_write_size = min_t(u16, *p_max_write_size, 219 MLXSW_REG_MCDA_MAX_DATA_LEN); 220 return 0; 221 } 222 223 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle) 224 { 225 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 226 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 227 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 228 char mcc_pl[MLXSW_REG_MCC_LEN]; 229 u8 control_state; 230 int err; 231 232 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0); 233 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 234 if (err) 235 return err; 236 237 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state); 238 if (control_state != MLXFW_FSM_STATE_IDLE) 239 return -EBUSY; 240 241 mlxsw_reg_mcc_pack(mcc_pl, 242 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 243 0, *fwhandle, 0); 244 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 245 } 246 247 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev, 248 u32 fwhandle, u16 component_index, 249 u32 component_size) 250 { 251 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 252 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 253 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 254 char mcc_pl[MLXSW_REG_MCC_LEN]; 255 256 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT, 257 component_index, fwhandle, component_size); 258 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 259 } 260 261 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev, 262 u32 fwhandle, u8 *data, u16 size, 263 u32 offset) 264 { 265 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 266 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 267 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 268 char mcda_pl[MLXSW_REG_MCDA_LEN]; 269 270 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data); 271 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl); 272 } 273 274 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, 275 u32 fwhandle, u16 component_index) 276 { 277 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 278 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 279 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 280 char mcc_pl[MLXSW_REG_MCC_LEN]; 281 282 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT, 283 component_index, fwhandle, 0); 284 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 285 } 286 287 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 288 { 289 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 290 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 291 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 292 char mcc_pl[MLXSW_REG_MCC_LEN]; 293 294 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, 295 fwhandle, 0); 296 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 297 } 298 299 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 300 enum mlxfw_fsm_state *fsm_state, 301 enum mlxfw_fsm_state_err *fsm_state_err) 302 { 303 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 304 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 305 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 306 char mcc_pl[MLXSW_REG_MCC_LEN]; 307 u8 control_state; 308 u8 error_code; 309 int err; 310 311 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0); 312 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 313 if (err) 314 return err; 315 316 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state); 317 *fsm_state = control_state; 318 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, 319 MLXFW_FSM_STATE_ERR_MAX); 320 return 0; 321 } 322 323 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 324 { 325 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 326 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 327 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 328 char mcc_pl[MLXSW_REG_MCC_LEN]; 329 330 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, 331 fwhandle, 0); 332 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 333 } 334 335 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 336 { 337 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 338 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 339 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 340 char mcc_pl[MLXSW_REG_MCC_LEN]; 341 342 mlxsw_reg_mcc_pack(mcc_pl, 343 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, 344 fwhandle, 0); 345 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 346 } 347 348 static void mlxsw_sp_status_notify(struct mlxfw_dev *mlxfw_dev, 349 const char *msg, const char *comp_name, 350 u32 done_bytes, u32 total_bytes) 351 { 352 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 353 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 354 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 355 356 devlink_flash_update_status_notify(priv_to_devlink(mlxsw_sp->core), 357 msg, comp_name, 358 done_bytes, total_bytes); 359 } 360 361 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = { 362 .component_query = mlxsw_sp_component_query, 363 .fsm_lock = mlxsw_sp_fsm_lock, 364 .fsm_component_update = mlxsw_sp_fsm_component_update, 365 .fsm_block_download = mlxsw_sp_fsm_block_download, 366 .fsm_component_verify = mlxsw_sp_fsm_component_verify, 367 .fsm_activate = mlxsw_sp_fsm_activate, 368 .fsm_query_state = mlxsw_sp_fsm_query_state, 369 .fsm_cancel = mlxsw_sp_fsm_cancel, 370 .fsm_release = mlxsw_sp_fsm_release, 371 .status_notify = mlxsw_sp_status_notify, 372 }; 373 374 static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp, 375 const struct firmware *firmware, 376 struct netlink_ext_ack *extack) 377 { 378 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = { 379 .mlxfw_dev = { 380 .ops = &mlxsw_sp_mlxfw_dev_ops, 381 .psid = mlxsw_sp->bus_info->psid, 382 .psid_size = strlen(mlxsw_sp->bus_info->psid), 383 }, 384 .mlxsw_sp = mlxsw_sp 385 }; 386 int err; 387 388 mlxsw_core_fw_flash_start(mlxsw_sp->core); 389 devlink_flash_update_begin_notify(priv_to_devlink(mlxsw_sp->core)); 390 err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, 391 firmware, extack); 392 devlink_flash_update_end_notify(priv_to_devlink(mlxsw_sp->core)); 393 mlxsw_core_fw_flash_end(mlxsw_sp->core); 394 395 return err; 396 } 397 398 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) 399 { 400 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev; 401 const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev; 402 const char *fw_filename = mlxsw_sp->fw_filename; 403 union devlink_param_value value; 404 const struct firmware *firmware; 405 int err; 406 407 /* Don't check if driver does not require it */ 408 if (!req_rev || !fw_filename) 409 return 0; 410 411 /* Don't check if devlink 'fw_load_policy' param is 'flash' */ 412 err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_sp->core), 413 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 414 &value); 415 if (err) 416 return err; 417 if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) 418 return 0; 419 420 /* Validate driver & FW are compatible */ 421 if (rev->major != req_rev->major) { 422 WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n", 423 rev->major, req_rev->major); 424 return -EINVAL; 425 } 426 if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) == 427 MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor) && 428 mlxsw_core_fw_rev_minor_subminor_validate(rev, req_rev)) 429 return 0; 430 431 dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n", 432 rev->major, rev->minor, rev->subminor); 433 dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n", 434 fw_filename); 435 436 err = request_firmware_direct(&firmware, fw_filename, 437 mlxsw_sp->bus_info->dev); 438 if (err) { 439 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n", 440 fw_filename); 441 return err; 442 } 443 444 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, NULL); 445 release_firmware(firmware); 446 if (err) 447 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n"); 448 449 /* On FW flash success, tell the caller FW reset is needed 450 * if current FW supports it. 451 */ 452 if (rev->minor >= req_rev->can_reset_minor) 453 return err ? err : -EAGAIN; 454 else 455 return 0; 456 } 457 458 static int mlxsw_sp_flash_update(struct mlxsw_core *mlxsw_core, 459 const char *file_name, const char *component, 460 struct netlink_ext_ack *extack) 461 { 462 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 463 const struct firmware *firmware; 464 int err; 465 466 if (component) 467 return -EOPNOTSUPP; 468 469 err = request_firmware_direct(&firmware, file_name, 470 mlxsw_sp->bus_info->dev); 471 if (err) 472 return err; 473 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, extack); 474 release_firmware(firmware); 475 476 return err; 477 } 478 479 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 480 unsigned int counter_index, u64 *packets, 481 u64 *bytes) 482 { 483 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 484 int err; 485 486 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 487 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 488 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 489 if (err) 490 return err; 491 if (packets) 492 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 493 if (bytes) 494 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 495 return 0; 496 } 497 498 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 499 unsigned int counter_index) 500 { 501 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 502 503 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 504 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 505 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 506 } 507 508 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 509 unsigned int *p_counter_index) 510 { 511 int err; 512 513 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 514 p_counter_index); 515 if (err) 516 return err; 517 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 518 if (err) 519 goto err_counter_clear; 520 return 0; 521 522 err_counter_clear: 523 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 524 *p_counter_index); 525 return err; 526 } 527 528 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 529 unsigned int counter_index) 530 { 531 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 532 counter_index); 533 } 534 535 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 536 const struct mlxsw_tx_info *tx_info) 537 { 538 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 539 540 memset(txhdr, 0, MLXSW_TXHDR_LEN); 541 542 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 543 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 544 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 545 mlxsw_tx_hdr_swid_set(txhdr, 0); 546 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 547 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 548 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 549 } 550 551 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) 552 { 553 switch (state) { 554 case BR_STATE_FORWARDING: 555 return MLXSW_REG_SPMS_STATE_FORWARDING; 556 case BR_STATE_LEARNING: 557 return MLXSW_REG_SPMS_STATE_LEARNING; 558 case BR_STATE_LISTENING: /* fall-through */ 559 case BR_STATE_DISABLED: /* fall-through */ 560 case BR_STATE_BLOCKING: 561 return MLXSW_REG_SPMS_STATE_DISCARDING; 562 default: 563 BUG(); 564 } 565 } 566 567 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 568 u8 state) 569 { 570 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state); 571 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 572 char *spms_pl; 573 int err; 574 575 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 576 if (!spms_pl) 577 return -ENOMEM; 578 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 579 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 580 581 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 582 kfree(spms_pl); 583 return err; 584 } 585 586 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 587 { 588 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 589 int err; 590 591 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 592 if (err) 593 return err; 594 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 595 return 0; 596 } 597 598 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port, 599 bool enable, u32 rate) 600 { 601 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 602 char mpsc_pl[MLXSW_REG_MPSC_LEN]; 603 604 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate); 605 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl); 606 } 607 608 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 609 bool is_up) 610 { 611 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 612 char paos_pl[MLXSW_REG_PAOS_LEN]; 613 614 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 615 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 616 MLXSW_PORT_ADMIN_STATUS_DOWN); 617 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 618 } 619 620 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 621 unsigned char *addr) 622 { 623 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 624 char ppad_pl[MLXSW_REG_PPAD_LEN]; 625 626 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 627 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 628 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 629 } 630 631 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 632 { 633 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 634 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 635 636 ether_addr_copy(addr, mlxsw_sp->base_mac); 637 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 638 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 639 } 640 641 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 642 { 643 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 644 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 645 int max_mtu; 646 int err; 647 648 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 649 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 650 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 651 if (err) 652 return err; 653 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 654 655 if (mtu > max_mtu) 656 return -EINVAL; 657 658 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 659 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 660 } 661 662 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 663 { 664 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 665 char pspa_pl[MLXSW_REG_PSPA_LEN]; 666 667 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 668 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 669 } 670 671 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 672 { 673 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 674 char svpe_pl[MLXSW_REG_SVPE_LEN]; 675 676 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 677 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 678 } 679 680 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 681 bool learn_enable) 682 { 683 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 684 char *spvmlr_pl; 685 int err; 686 687 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 688 if (!spvmlr_pl) 689 return -ENOMEM; 690 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 691 learn_enable); 692 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 693 kfree(spvmlr_pl); 694 return err; 695 } 696 697 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 698 u16 vid) 699 { 700 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 701 char spvid_pl[MLXSW_REG_SPVID_LEN]; 702 703 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid); 704 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 705 } 706 707 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 708 bool allow) 709 { 710 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 711 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 712 713 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 714 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 715 } 716 717 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 718 { 719 int err; 720 721 if (!vid) { 722 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 723 if (err) 724 return err; 725 } else { 726 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); 727 if (err) 728 return err; 729 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 730 if (err) 731 goto err_port_allow_untagged_set; 732 } 733 734 mlxsw_sp_port->pvid = vid; 735 return 0; 736 737 err_port_allow_untagged_set: 738 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid); 739 return err; 740 } 741 742 static int 743 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 744 { 745 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 746 char sspr_pl[MLXSW_REG_SSPR_LEN]; 747 748 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 749 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 750 } 751 752 static int 753 mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port, 754 struct mlxsw_sp_port_mapping *port_mapping) 755 { 756 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 757 bool separate_rxtx; 758 u8 module; 759 u8 width; 760 int err; 761 int i; 762 763 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 764 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 765 if (err) 766 return err; 767 module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 768 width = mlxsw_reg_pmlp_width_get(pmlp_pl); 769 separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl); 770 771 if (width && !is_power_of_2(width)) { 772 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n", 773 local_port); 774 return -EINVAL; 775 } 776 777 for (i = 0; i < width; i++) { 778 if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) { 779 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n", 780 local_port); 781 return -EINVAL; 782 } 783 if (separate_rxtx && 784 mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != 785 mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) { 786 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n", 787 local_port); 788 return -EINVAL; 789 } 790 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i) { 791 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n", 792 local_port); 793 return -EINVAL; 794 } 795 } 796 797 port_mapping->module = module; 798 port_mapping->width = width; 799 port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 800 return 0; 801 } 802 803 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port) 804 { 805 struct mlxsw_sp_port_mapping *port_mapping = &mlxsw_sp_port->mapping; 806 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 807 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 808 int i; 809 810 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 811 mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width); 812 for (i = 0; i < port_mapping->width; i++) { 813 mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module); 814 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */ 815 } 816 817 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 818 } 819 820 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port) 821 { 822 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 823 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 824 825 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 826 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 827 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 828 } 829 830 static int mlxsw_sp_port_open(struct net_device *dev) 831 { 832 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 833 int err; 834 835 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 836 if (err) 837 return err; 838 netif_start_queue(dev); 839 return 0; 840 } 841 842 static int mlxsw_sp_port_stop(struct net_device *dev) 843 { 844 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 845 846 netif_stop_queue(dev); 847 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 848 } 849 850 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 851 struct net_device *dev) 852 { 853 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 854 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 855 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 856 const struct mlxsw_tx_info tx_info = { 857 .local_port = mlxsw_sp_port->local_port, 858 .is_emad = false, 859 }; 860 u64 len; 861 int err; 862 863 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); 864 865 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 866 return NETDEV_TX_BUSY; 867 868 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { 869 struct sk_buff *skb_orig = skb; 870 871 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); 872 if (!skb) { 873 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 874 dev_kfree_skb_any(skb_orig); 875 return NETDEV_TX_OK; 876 } 877 dev_consume_skb_any(skb_orig); 878 } 879 880 if (eth_skb_pad(skb)) { 881 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 882 return NETDEV_TX_OK; 883 } 884 885 mlxsw_sp_txhdr_construct(skb, &tx_info); 886 /* TX header is consumed by HW on the way so we shouldn't count its 887 * bytes as being sent. 888 */ 889 len = skb->len - MLXSW_TXHDR_LEN; 890 891 /* Due to a race we might fail here because of a full queue. In that 892 * unlikely case we simply drop the packet. 893 */ 894 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 895 896 if (!err) { 897 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 898 u64_stats_update_begin(&pcpu_stats->syncp); 899 pcpu_stats->tx_packets++; 900 pcpu_stats->tx_bytes += len; 901 u64_stats_update_end(&pcpu_stats->syncp); 902 } else { 903 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 904 dev_kfree_skb_any(skb); 905 } 906 return NETDEV_TX_OK; 907 } 908 909 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 910 { 911 } 912 913 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 914 { 915 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 916 struct sockaddr *addr = p; 917 int err; 918 919 if (!is_valid_ether_addr(addr->sa_data)) 920 return -EADDRNOTAVAIL; 921 922 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 923 if (err) 924 return err; 925 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 926 return 0; 927 } 928 929 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, 930 int mtu) 931 { 932 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu); 933 } 934 935 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ 936 937 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 938 u16 delay) 939 { 940 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay, 941 BITS_PER_BYTE)); 942 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp, 943 mtu); 944 } 945 946 /* Maximum delay buffer needed in case of PAUSE frames, in bytes. 947 * Assumes 100m cable and maximum MTU. 948 */ 949 #define MLXSW_SP_PAUSE_DELAY 58752 950 951 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 952 u16 delay, bool pfc, bool pause) 953 { 954 if (pfc) 955 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay); 956 else if (pause) 957 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY); 958 else 959 return 0; 960 } 961 962 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, 963 bool lossy) 964 { 965 if (lossy) 966 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size); 967 else 968 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size, 969 thres); 970 } 971 972 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, 973 u8 *prio_tc, bool pause_en, 974 struct ieee_pfc *my_pfc) 975 { 976 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 977 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; 978 u16 delay = !!my_pfc ? my_pfc->delay : 0; 979 char pbmc_pl[MLXSW_REG_PBMC_LEN]; 980 u32 taken_headroom_cells = 0; 981 u32 max_headroom_cells; 982 int i, j, err; 983 984 max_headroom_cells = mlxsw_sp_sb_max_headroom_cells(mlxsw_sp); 985 986 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); 987 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 988 if (err) 989 return err; 990 991 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 992 bool configure = false; 993 bool pfc = false; 994 u16 thres_cells; 995 u16 delay_cells; 996 u16 total_cells; 997 bool lossy; 998 999 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { 1000 if (prio_tc[j] == i) { 1001 pfc = pfc_en & BIT(j); 1002 configure = true; 1003 break; 1004 } 1005 } 1006 1007 if (!configure) 1008 continue; 1009 1010 lossy = !(pfc || pause_en); 1011 thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); 1012 delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, 1013 pfc, pause_en); 1014 total_cells = thres_cells + delay_cells; 1015 1016 taken_headroom_cells += total_cells; 1017 if (taken_headroom_cells > max_headroom_cells) 1018 return -ENOBUFS; 1019 1020 mlxsw_sp_pg_buf_pack(pbmc_pl, i, total_cells, 1021 thres_cells, lossy); 1022 } 1023 1024 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 1025 } 1026 1027 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, 1028 int mtu, bool pause_en) 1029 { 1030 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; 1031 bool dcb_en = !!mlxsw_sp_port->dcb.ets; 1032 struct ieee_pfc *my_pfc; 1033 u8 *prio_tc; 1034 1035 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; 1036 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; 1037 1038 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, 1039 pause_en, my_pfc); 1040 } 1041 1042 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 1043 { 1044 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1045 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 1046 int err; 1047 1048 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); 1049 if (err) 1050 return err; 1051 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu); 1052 if (err) 1053 goto err_span_port_mtu_update; 1054 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 1055 if (err) 1056 goto err_port_mtu_set; 1057 dev->mtu = mtu; 1058 return 0; 1059 1060 err_port_mtu_set: 1061 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu); 1062 err_span_port_mtu_update: 1063 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1064 return err; 1065 } 1066 1067 static int 1068 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 1069 struct rtnl_link_stats64 *stats) 1070 { 1071 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1072 struct mlxsw_sp_port_pcpu_stats *p; 1073 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1074 u32 tx_dropped = 0; 1075 unsigned int start; 1076 int i; 1077 1078 for_each_possible_cpu(i) { 1079 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 1080 do { 1081 start = u64_stats_fetch_begin_irq(&p->syncp); 1082 rx_packets = p->rx_packets; 1083 rx_bytes = p->rx_bytes; 1084 tx_packets = p->tx_packets; 1085 tx_bytes = p->tx_bytes; 1086 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 1087 1088 stats->rx_packets += rx_packets; 1089 stats->rx_bytes += rx_bytes; 1090 stats->tx_packets += tx_packets; 1091 stats->tx_bytes += tx_bytes; 1092 /* tx_dropped is u32, updated without syncp protection. */ 1093 tx_dropped += p->tx_dropped; 1094 } 1095 stats->tx_dropped = tx_dropped; 1096 return 0; 1097 } 1098 1099 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 1100 { 1101 switch (attr_id) { 1102 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1103 return true; 1104 } 1105 1106 return false; 1107 } 1108 1109 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 1110 void *sp) 1111 { 1112 switch (attr_id) { 1113 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1114 return mlxsw_sp_port_get_sw_stats64(dev, sp); 1115 } 1116 1117 return -EINVAL; 1118 } 1119 1120 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 1121 int prio, char *ppcnt_pl) 1122 { 1123 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1124 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1125 1126 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 1127 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 1128 } 1129 1130 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 1131 struct rtnl_link_stats64 *stats) 1132 { 1133 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1134 int err; 1135 1136 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 1137 0, ppcnt_pl); 1138 if (err) 1139 goto out; 1140 1141 stats->tx_packets = 1142 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 1143 stats->rx_packets = 1144 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 1145 stats->tx_bytes = 1146 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 1147 stats->rx_bytes = 1148 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 1149 stats->multicast = 1150 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 1151 1152 stats->rx_crc_errors = 1153 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 1154 stats->rx_frame_errors = 1155 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 1156 1157 stats->rx_length_errors = ( 1158 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 1159 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 1160 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 1161 1162 stats->rx_errors = (stats->rx_crc_errors + 1163 stats->rx_frame_errors + stats->rx_length_errors); 1164 1165 out: 1166 return err; 1167 } 1168 1169 static void 1170 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 1171 struct mlxsw_sp_port_xstats *xstats) 1172 { 1173 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1174 int err, i; 1175 1176 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 1177 ppcnt_pl); 1178 if (!err) 1179 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 1180 1181 for (i = 0; i < TC_MAX_QUEUE; i++) { 1182 err = mlxsw_sp_port_get_stats_raw(dev, 1183 MLXSW_REG_PPCNT_TC_CONG_TC, 1184 i, ppcnt_pl); 1185 if (!err) 1186 xstats->wred_drop[i] = 1187 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 1188 1189 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 1190 i, ppcnt_pl); 1191 if (err) 1192 continue; 1193 1194 xstats->backlog[i] = 1195 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 1196 xstats->tail_drop[i] = 1197 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 1198 } 1199 1200 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1201 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, 1202 i, ppcnt_pl); 1203 if (err) 1204 continue; 1205 1206 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); 1207 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); 1208 } 1209 } 1210 1211 static void update_stats_cache(struct work_struct *work) 1212 { 1213 struct mlxsw_sp_port *mlxsw_sp_port = 1214 container_of(work, struct mlxsw_sp_port, 1215 periodic_hw_stats.update_dw.work); 1216 1217 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 1218 goto out; 1219 1220 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 1221 &mlxsw_sp_port->periodic_hw_stats.stats); 1222 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 1223 &mlxsw_sp_port->periodic_hw_stats.xstats); 1224 1225 out: 1226 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1227 MLXSW_HW_STATS_UPDATE_TIME); 1228 } 1229 1230 /* Return the stats from a cache that is updated periodically, 1231 * as this function might get called in an atomic context. 1232 */ 1233 static void 1234 mlxsw_sp_port_get_stats64(struct net_device *dev, 1235 struct rtnl_link_stats64 *stats) 1236 { 1237 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1238 1239 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 1240 } 1241 1242 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 1243 u16 vid_begin, u16 vid_end, 1244 bool is_member, bool untagged) 1245 { 1246 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1247 char *spvm_pl; 1248 int err; 1249 1250 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 1251 if (!spvm_pl) 1252 return -ENOMEM; 1253 1254 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 1255 vid_end, is_member, untagged); 1256 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 1257 kfree(spvm_pl); 1258 return err; 1259 } 1260 1261 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 1262 u16 vid_end, bool is_member, bool untagged) 1263 { 1264 u16 vid, vid_e; 1265 int err; 1266 1267 for (vid = vid_begin; vid <= vid_end; 1268 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 1269 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 1270 vid_end); 1271 1272 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 1273 is_member, untagged); 1274 if (err) 1275 return err; 1276 } 1277 1278 return 0; 1279 } 1280 1281 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port, 1282 bool flush_default) 1283 { 1284 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 1285 1286 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 1287 &mlxsw_sp_port->vlans_list, list) { 1288 if (!flush_default && 1289 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID) 1290 continue; 1291 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1292 } 1293 } 1294 1295 static void 1296 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1297 { 1298 if (mlxsw_sp_port_vlan->bridge_port) 1299 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1300 else if (mlxsw_sp_port_vlan->fid) 1301 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 1302 } 1303 1304 struct mlxsw_sp_port_vlan * 1305 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1306 { 1307 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1308 bool untagged = vid == MLXSW_SP_DEFAULT_VID; 1309 int err; 1310 1311 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1312 if (mlxsw_sp_port_vlan) 1313 return ERR_PTR(-EEXIST); 1314 1315 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 1316 if (err) 1317 return ERR_PTR(err); 1318 1319 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 1320 if (!mlxsw_sp_port_vlan) { 1321 err = -ENOMEM; 1322 goto err_port_vlan_alloc; 1323 } 1324 1325 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1326 mlxsw_sp_port_vlan->vid = vid; 1327 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1328 1329 return mlxsw_sp_port_vlan; 1330 1331 err_port_vlan_alloc: 1332 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1333 return ERR_PTR(err); 1334 } 1335 1336 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1337 { 1338 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1339 u16 vid = mlxsw_sp_port_vlan->vid; 1340 1341 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan); 1342 list_del(&mlxsw_sp_port_vlan->list); 1343 kfree(mlxsw_sp_port_vlan); 1344 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1345 } 1346 1347 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1348 __be16 __always_unused proto, u16 vid) 1349 { 1350 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1351 1352 /* VLAN 0 is added to HW filter when device goes up, but it is 1353 * reserved in our case, so simply return. 1354 */ 1355 if (!vid) 1356 return 0; 1357 1358 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid)); 1359 } 1360 1361 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1362 __be16 __always_unused proto, u16 vid) 1363 { 1364 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1365 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1366 1367 /* VLAN 0 is removed from HW filter when device goes down, but 1368 * it is reserved in our case, so simply return. 1369 */ 1370 if (!vid) 1371 return 0; 1372 1373 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1374 if (!mlxsw_sp_port_vlan) 1375 return 0; 1376 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1377 1378 return 0; 1379 } 1380 1381 static struct mlxsw_sp_port_mall_tc_entry * 1382 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port, 1383 unsigned long cookie) { 1384 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1385 1386 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list) 1387 if (mall_tc_entry->cookie == cookie) 1388 return mall_tc_entry; 1389 1390 return NULL; 1391 } 1392 1393 static int 1394 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1395 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror, 1396 const struct flow_action_entry *act, 1397 bool ingress) 1398 { 1399 enum mlxsw_sp_span_type span_type; 1400 1401 if (!act->dev) { 1402 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); 1403 return -EINVAL; 1404 } 1405 1406 mirror->ingress = ingress; 1407 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1408 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, act->dev, span_type, 1409 true, &mirror->span_id); 1410 } 1411 1412 static void 1413 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1414 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror) 1415 { 1416 enum mlxsw_sp_span_type span_type; 1417 1418 span_type = mirror->ingress ? 1419 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1420 mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->span_id, 1421 span_type, true); 1422 } 1423 1424 static int 1425 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port, 1426 struct tc_cls_matchall_offload *cls, 1427 const struct flow_action_entry *act, 1428 bool ingress) 1429 { 1430 int err; 1431 1432 if (!mlxsw_sp_port->sample) 1433 return -EOPNOTSUPP; 1434 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) { 1435 netdev_err(mlxsw_sp_port->dev, "sample already active\n"); 1436 return -EEXIST; 1437 } 1438 if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) { 1439 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n"); 1440 return -EOPNOTSUPP; 1441 } 1442 1443 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group, 1444 act->sample.psample_group); 1445 mlxsw_sp_port->sample->truncate = act->sample.truncate; 1446 mlxsw_sp_port->sample->trunc_size = act->sample.trunc_size; 1447 mlxsw_sp_port->sample->rate = act->sample.rate; 1448 1449 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, act->sample.rate); 1450 if (err) 1451 goto err_port_sample_set; 1452 return 0; 1453 1454 err_port_sample_set: 1455 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1456 return err; 1457 } 1458 1459 static void 1460 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port) 1461 { 1462 if (!mlxsw_sp_port->sample) 1463 return; 1464 1465 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1); 1466 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1467 } 1468 1469 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1470 struct tc_cls_matchall_offload *f, 1471 bool ingress) 1472 { 1473 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1474 __be16 protocol = f->common.protocol; 1475 struct flow_action_entry *act; 1476 int err; 1477 1478 if (!flow_offload_has_one_action(&f->rule->action)) { 1479 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); 1480 return -EOPNOTSUPP; 1481 } 1482 1483 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1484 if (!mall_tc_entry) 1485 return -ENOMEM; 1486 mall_tc_entry->cookie = f->cookie; 1487 1488 act = &f->rule->action.entries[0]; 1489 1490 if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) { 1491 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; 1492 1493 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; 1494 mirror = &mall_tc_entry->mirror; 1495 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, 1496 mirror, act, 1497 ingress); 1498 } else if (act->id == FLOW_ACTION_SAMPLE && 1499 protocol == htons(ETH_P_ALL)) { 1500 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE; 1501 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f, 1502 act, ingress); 1503 } else { 1504 err = -EOPNOTSUPP; 1505 } 1506 1507 if (err) 1508 goto err_add_action; 1509 1510 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); 1511 return 0; 1512 1513 err_add_action: 1514 kfree(mall_tc_entry); 1515 return err; 1516 } 1517 1518 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1519 struct tc_cls_matchall_offload *f) 1520 { 1521 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1522 1523 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port, 1524 f->cookie); 1525 if (!mall_tc_entry) { 1526 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); 1527 return; 1528 } 1529 list_del(&mall_tc_entry->list); 1530 1531 switch (mall_tc_entry->type) { 1532 case MLXSW_SP_PORT_MALL_MIRROR: 1533 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port, 1534 &mall_tc_entry->mirror); 1535 break; 1536 case MLXSW_SP_PORT_MALL_SAMPLE: 1537 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port); 1538 break; 1539 default: 1540 WARN_ON(1); 1541 } 1542 1543 kfree(mall_tc_entry); 1544 } 1545 1546 static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1547 struct tc_cls_matchall_offload *f, 1548 bool ingress) 1549 { 1550 switch (f->command) { 1551 case TC_CLSMATCHALL_REPLACE: 1552 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f, 1553 ingress); 1554 case TC_CLSMATCHALL_DESTROY: 1555 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f); 1556 return 0; 1557 default: 1558 return -EOPNOTSUPP; 1559 } 1560 } 1561 1562 static int 1563 mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block, 1564 struct flow_cls_offload *f) 1565 { 1566 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block); 1567 1568 switch (f->command) { 1569 case FLOW_CLS_REPLACE: 1570 return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f); 1571 case FLOW_CLS_DESTROY: 1572 mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f); 1573 return 0; 1574 case FLOW_CLS_STATS: 1575 return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f); 1576 case FLOW_CLS_TMPLT_CREATE: 1577 return mlxsw_sp_flower_tmplt_create(mlxsw_sp, acl_block, f); 1578 case FLOW_CLS_TMPLT_DESTROY: 1579 mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, acl_block, f); 1580 return 0; 1581 default: 1582 return -EOPNOTSUPP; 1583 } 1584 } 1585 1586 static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type, 1587 void *type_data, 1588 void *cb_priv, bool ingress) 1589 { 1590 struct mlxsw_sp_port *mlxsw_sp_port = cb_priv; 1591 1592 switch (type) { 1593 case TC_SETUP_CLSMATCHALL: 1594 if (!tc_cls_can_offload_and_chain0(mlxsw_sp_port->dev, 1595 type_data)) 1596 return -EOPNOTSUPP; 1597 1598 return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data, 1599 ingress); 1600 case TC_SETUP_CLSFLOWER: 1601 return 0; 1602 default: 1603 return -EOPNOTSUPP; 1604 } 1605 } 1606 1607 static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type, 1608 void *type_data, 1609 void *cb_priv) 1610 { 1611 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1612 cb_priv, true); 1613 } 1614 1615 static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type, 1616 void *type_data, 1617 void *cb_priv) 1618 { 1619 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1620 cb_priv, false); 1621 } 1622 1623 static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type, 1624 void *type_data, void *cb_priv) 1625 { 1626 struct mlxsw_sp_acl_block *acl_block = cb_priv; 1627 1628 switch (type) { 1629 case TC_SETUP_CLSMATCHALL: 1630 return 0; 1631 case TC_SETUP_CLSFLOWER: 1632 if (mlxsw_sp_acl_block_disabled(acl_block)) 1633 return -EOPNOTSUPP; 1634 1635 return mlxsw_sp_setup_tc_cls_flower(acl_block, type_data); 1636 default: 1637 return -EOPNOTSUPP; 1638 } 1639 } 1640 1641 static void mlxsw_sp_tc_block_flower_release(void *cb_priv) 1642 { 1643 struct mlxsw_sp_acl_block *acl_block = cb_priv; 1644 1645 mlxsw_sp_acl_block_destroy(acl_block); 1646 } 1647 1648 static LIST_HEAD(mlxsw_sp_block_cb_list); 1649 1650 static int 1651 mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port, 1652 struct flow_block_offload *f, bool ingress) 1653 { 1654 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1655 struct mlxsw_sp_acl_block *acl_block; 1656 struct flow_block_cb *block_cb; 1657 bool register_block = false; 1658 int err; 1659 1660 block_cb = flow_block_cb_lookup(f->block, 1661 mlxsw_sp_setup_tc_block_cb_flower, 1662 mlxsw_sp); 1663 if (!block_cb) { 1664 acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, f->net); 1665 if (!acl_block) 1666 return -ENOMEM; 1667 block_cb = flow_block_cb_alloc(mlxsw_sp_setup_tc_block_cb_flower, 1668 mlxsw_sp, acl_block, 1669 mlxsw_sp_tc_block_flower_release); 1670 if (IS_ERR(block_cb)) { 1671 mlxsw_sp_acl_block_destroy(acl_block); 1672 err = PTR_ERR(block_cb); 1673 goto err_cb_register; 1674 } 1675 register_block = true; 1676 } else { 1677 acl_block = flow_block_cb_priv(block_cb); 1678 } 1679 flow_block_cb_incref(block_cb); 1680 err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block, 1681 mlxsw_sp_port, ingress, f->extack); 1682 if (err) 1683 goto err_block_bind; 1684 1685 if (ingress) 1686 mlxsw_sp_port->ing_acl_block = acl_block; 1687 else 1688 mlxsw_sp_port->eg_acl_block = acl_block; 1689 1690 if (register_block) { 1691 flow_block_cb_add(block_cb, f); 1692 list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list); 1693 } 1694 1695 return 0; 1696 1697 err_block_bind: 1698 if (!flow_block_cb_decref(block_cb)) 1699 flow_block_cb_free(block_cb); 1700 err_cb_register: 1701 return err; 1702 } 1703 1704 static void 1705 mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port, 1706 struct flow_block_offload *f, bool ingress) 1707 { 1708 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1709 struct mlxsw_sp_acl_block *acl_block; 1710 struct flow_block_cb *block_cb; 1711 int err; 1712 1713 block_cb = flow_block_cb_lookup(f->block, 1714 mlxsw_sp_setup_tc_block_cb_flower, 1715 mlxsw_sp); 1716 if (!block_cb) 1717 return; 1718 1719 if (ingress) 1720 mlxsw_sp_port->ing_acl_block = NULL; 1721 else 1722 mlxsw_sp_port->eg_acl_block = NULL; 1723 1724 acl_block = flow_block_cb_priv(block_cb); 1725 err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block, 1726 mlxsw_sp_port, ingress); 1727 if (!err && !flow_block_cb_decref(block_cb)) { 1728 flow_block_cb_remove(block_cb, f); 1729 list_del(&block_cb->driver_list); 1730 } 1731 } 1732 1733 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 1734 struct flow_block_offload *f) 1735 { 1736 struct flow_block_cb *block_cb; 1737 flow_setup_cb_t *cb; 1738 bool ingress; 1739 int err; 1740 1741 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { 1742 cb = mlxsw_sp_setup_tc_block_cb_matchall_ig; 1743 ingress = true; 1744 } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { 1745 cb = mlxsw_sp_setup_tc_block_cb_matchall_eg; 1746 ingress = false; 1747 } else { 1748 return -EOPNOTSUPP; 1749 } 1750 1751 f->driver_block_list = &mlxsw_sp_block_cb_list; 1752 1753 switch (f->command) { 1754 case FLOW_BLOCK_BIND: 1755 if (flow_block_cb_is_busy(cb, mlxsw_sp_port, 1756 &mlxsw_sp_block_cb_list)) 1757 return -EBUSY; 1758 1759 block_cb = flow_block_cb_alloc(cb, mlxsw_sp_port, 1760 mlxsw_sp_port, NULL); 1761 if (IS_ERR(block_cb)) 1762 return PTR_ERR(block_cb); 1763 err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port, f, 1764 ingress); 1765 if (err) { 1766 flow_block_cb_free(block_cb); 1767 return err; 1768 } 1769 flow_block_cb_add(block_cb, f); 1770 list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list); 1771 return 0; 1772 case FLOW_BLOCK_UNBIND: 1773 mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port, 1774 f, ingress); 1775 block_cb = flow_block_cb_lookup(f->block, cb, mlxsw_sp_port); 1776 if (!block_cb) 1777 return -ENOENT; 1778 1779 flow_block_cb_remove(block_cb, f); 1780 list_del(&block_cb->driver_list); 1781 return 0; 1782 default: 1783 return -EOPNOTSUPP; 1784 } 1785 } 1786 1787 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1788 void *type_data) 1789 { 1790 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1791 1792 switch (type) { 1793 case TC_SETUP_BLOCK: 1794 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1795 case TC_SETUP_QDISC_RED: 1796 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1797 case TC_SETUP_QDISC_PRIO: 1798 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); 1799 default: 1800 return -EOPNOTSUPP; 1801 } 1802 } 1803 1804 1805 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) 1806 { 1807 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1808 1809 if (!enable) { 1810 if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->ing_acl_block) || 1811 mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->eg_acl_block) || 1812 !list_empty(&mlxsw_sp_port->mall_tc_list)) { 1813 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 1814 return -EINVAL; 1815 } 1816 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->ing_acl_block); 1817 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->eg_acl_block); 1818 } else { 1819 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->ing_acl_block); 1820 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->eg_acl_block); 1821 } 1822 return 0; 1823 } 1824 1825 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable) 1826 { 1827 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1828 char pplr_pl[MLXSW_REG_PPLR_LEN]; 1829 int err; 1830 1831 if (netif_running(dev)) 1832 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1833 1834 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable); 1835 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr), 1836 pplr_pl); 1837 1838 if (netif_running(dev)) 1839 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1840 1841 return err; 1842 } 1843 1844 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); 1845 1846 static int mlxsw_sp_handle_feature(struct net_device *dev, 1847 netdev_features_t wanted_features, 1848 netdev_features_t feature, 1849 mlxsw_sp_feature_handler feature_handler) 1850 { 1851 netdev_features_t changes = wanted_features ^ dev->features; 1852 bool enable = !!(wanted_features & feature); 1853 int err; 1854 1855 if (!(changes & feature)) 1856 return 0; 1857 1858 err = feature_handler(dev, enable); 1859 if (err) { 1860 netdev_err(dev, "%s feature %pNF failed, err %d\n", 1861 enable ? "Enable" : "Disable", &feature, err); 1862 return err; 1863 } 1864 1865 if (enable) 1866 dev->features |= feature; 1867 else 1868 dev->features &= ~feature; 1869 1870 return 0; 1871 } 1872 static int mlxsw_sp_set_features(struct net_device *dev, 1873 netdev_features_t features) 1874 { 1875 netdev_features_t oper_features = dev->features; 1876 int err = 0; 1877 1878 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, 1879 mlxsw_sp_feature_hw_tc); 1880 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK, 1881 mlxsw_sp_feature_loopback); 1882 1883 if (err) { 1884 dev->features = oper_features; 1885 return -EINVAL; 1886 } 1887 1888 return 0; 1889 } 1890 1891 static struct devlink_port * 1892 mlxsw_sp_port_get_devlink_port(struct net_device *dev) 1893 { 1894 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1895 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1896 1897 return mlxsw_core_port_devlink_port_get(mlxsw_sp->core, 1898 mlxsw_sp_port->local_port); 1899 } 1900 1901 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, 1902 struct ifreq *ifr) 1903 { 1904 struct hwtstamp_config config; 1905 int err; 1906 1907 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 1908 return -EFAULT; 1909 1910 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, 1911 &config); 1912 if (err) 1913 return err; 1914 1915 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1916 return -EFAULT; 1917 1918 return 0; 1919 } 1920 1921 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port, 1922 struct ifreq *ifr) 1923 { 1924 struct hwtstamp_config config; 1925 int err; 1926 1927 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port, 1928 &config); 1929 if (err) 1930 return err; 1931 1932 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1933 return -EFAULT; 1934 1935 return 0; 1936 } 1937 1938 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port) 1939 { 1940 struct hwtstamp_config config = {0}; 1941 1942 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config); 1943 } 1944 1945 static int 1946 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1947 { 1948 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1949 1950 switch (cmd) { 1951 case SIOCSHWTSTAMP: 1952 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr); 1953 case SIOCGHWTSTAMP: 1954 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr); 1955 default: 1956 return -EOPNOTSUPP; 1957 } 1958 } 1959 1960 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1961 .ndo_open = mlxsw_sp_port_open, 1962 .ndo_stop = mlxsw_sp_port_stop, 1963 .ndo_start_xmit = mlxsw_sp_port_xmit, 1964 .ndo_setup_tc = mlxsw_sp_setup_tc, 1965 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1966 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1967 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1968 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1969 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1970 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1971 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1972 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1973 .ndo_set_features = mlxsw_sp_set_features, 1974 .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port, 1975 .ndo_do_ioctl = mlxsw_sp_port_ioctl, 1976 }; 1977 1978 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 1979 struct ethtool_drvinfo *drvinfo) 1980 { 1981 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1982 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1983 1984 strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind, 1985 sizeof(drvinfo->driver)); 1986 strlcpy(drvinfo->version, mlxsw_sp_driver_version, 1987 sizeof(drvinfo->version)); 1988 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 1989 "%d.%d.%d", 1990 mlxsw_sp->bus_info->fw_rev.major, 1991 mlxsw_sp->bus_info->fw_rev.minor, 1992 mlxsw_sp->bus_info->fw_rev.subminor); 1993 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 1994 sizeof(drvinfo->bus_info)); 1995 } 1996 1997 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, 1998 struct ethtool_pauseparam *pause) 1999 { 2000 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2001 2002 pause->rx_pause = mlxsw_sp_port->link.rx_pause; 2003 pause->tx_pause = mlxsw_sp_port->link.tx_pause; 2004 } 2005 2006 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, 2007 struct ethtool_pauseparam *pause) 2008 { 2009 char pfcc_pl[MLXSW_REG_PFCC_LEN]; 2010 2011 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); 2012 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause); 2013 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause); 2014 2015 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), 2016 pfcc_pl); 2017 } 2018 2019 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, 2020 struct ethtool_pauseparam *pause) 2021 { 2022 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2023 bool pause_en = pause->tx_pause || pause->rx_pause; 2024 int err; 2025 2026 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { 2027 netdev_err(dev, "PFC already enabled on port\n"); 2028 return -EINVAL; 2029 } 2030 2031 if (pause->autoneg) { 2032 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); 2033 return -EINVAL; 2034 } 2035 2036 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 2037 if (err) { 2038 netdev_err(dev, "Failed to configure port's headroom\n"); 2039 return err; 2040 } 2041 2042 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause); 2043 if (err) { 2044 netdev_err(dev, "Failed to set PAUSE parameters\n"); 2045 goto err_port_pause_configure; 2046 } 2047 2048 mlxsw_sp_port->link.rx_pause = pause->rx_pause; 2049 mlxsw_sp_port->link.tx_pause = pause->tx_pause; 2050 2051 return 0; 2052 2053 err_port_pause_configure: 2054 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 2055 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 2056 return err; 2057 } 2058 2059 struct mlxsw_sp_port_hw_stats { 2060 char str[ETH_GSTRING_LEN]; 2061 u64 (*getter)(const char *payload); 2062 bool cells_bytes; 2063 }; 2064 2065 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 2066 { 2067 .str = "a_frames_transmitted_ok", 2068 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 2069 }, 2070 { 2071 .str = "a_frames_received_ok", 2072 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 2073 }, 2074 { 2075 .str = "a_frame_check_sequence_errors", 2076 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 2077 }, 2078 { 2079 .str = "a_alignment_errors", 2080 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 2081 }, 2082 { 2083 .str = "a_octets_transmitted_ok", 2084 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 2085 }, 2086 { 2087 .str = "a_octets_received_ok", 2088 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 2089 }, 2090 { 2091 .str = "a_multicast_frames_xmitted_ok", 2092 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 2093 }, 2094 { 2095 .str = "a_broadcast_frames_xmitted_ok", 2096 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 2097 }, 2098 { 2099 .str = "a_multicast_frames_received_ok", 2100 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 2101 }, 2102 { 2103 .str = "a_broadcast_frames_received_ok", 2104 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 2105 }, 2106 { 2107 .str = "a_in_range_length_errors", 2108 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 2109 }, 2110 { 2111 .str = "a_out_of_range_length_field", 2112 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 2113 }, 2114 { 2115 .str = "a_frame_too_long_errors", 2116 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 2117 }, 2118 { 2119 .str = "a_symbol_error_during_carrier", 2120 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 2121 }, 2122 { 2123 .str = "a_mac_control_frames_transmitted", 2124 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 2125 }, 2126 { 2127 .str = "a_mac_control_frames_received", 2128 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 2129 }, 2130 { 2131 .str = "a_unsupported_opcodes_received", 2132 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 2133 }, 2134 { 2135 .str = "a_pause_mac_ctrl_frames_received", 2136 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 2137 }, 2138 { 2139 .str = "a_pause_mac_ctrl_frames_xmitted", 2140 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 2141 }, 2142 }; 2143 2144 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 2145 2146 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2863_stats[] = { 2147 { 2148 .str = "if_in_discards", 2149 .getter = mlxsw_reg_ppcnt_if_in_discards_get, 2150 }, 2151 { 2152 .str = "if_out_discards", 2153 .getter = mlxsw_reg_ppcnt_if_out_discards_get, 2154 }, 2155 { 2156 .str = "if_out_errors", 2157 .getter = mlxsw_reg_ppcnt_if_out_errors_get, 2158 }, 2159 }; 2160 2161 #define MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN \ 2162 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2863_stats) 2163 2164 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = { 2165 { 2166 .str = "ether_stats_undersize_pkts", 2167 .getter = mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get, 2168 }, 2169 { 2170 .str = "ether_stats_oversize_pkts", 2171 .getter = mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get, 2172 }, 2173 { 2174 .str = "ether_stats_fragments", 2175 .getter = mlxsw_reg_ppcnt_ether_stats_fragments_get, 2176 }, 2177 { 2178 .str = "ether_pkts64octets", 2179 .getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get, 2180 }, 2181 { 2182 .str = "ether_pkts65to127octets", 2183 .getter = mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get, 2184 }, 2185 { 2186 .str = "ether_pkts128to255octets", 2187 .getter = mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get, 2188 }, 2189 { 2190 .str = "ether_pkts256to511octets", 2191 .getter = mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get, 2192 }, 2193 { 2194 .str = "ether_pkts512to1023octets", 2195 .getter = mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get, 2196 }, 2197 { 2198 .str = "ether_pkts1024to1518octets", 2199 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get, 2200 }, 2201 { 2202 .str = "ether_pkts1519to2047octets", 2203 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get, 2204 }, 2205 { 2206 .str = "ether_pkts2048to4095octets", 2207 .getter = mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get, 2208 }, 2209 { 2210 .str = "ether_pkts4096to8191octets", 2211 .getter = mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get, 2212 }, 2213 { 2214 .str = "ether_pkts8192to10239octets", 2215 .getter = mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get, 2216 }, 2217 }; 2218 2219 #define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \ 2220 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats) 2221 2222 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_3635_stats[] = { 2223 { 2224 .str = "dot3stats_fcs_errors", 2225 .getter = mlxsw_reg_ppcnt_dot3stats_fcs_errors_get, 2226 }, 2227 { 2228 .str = "dot3stats_symbol_errors", 2229 .getter = mlxsw_reg_ppcnt_dot3stats_symbol_errors_get, 2230 }, 2231 { 2232 .str = "dot3control_in_unknown_opcodes", 2233 .getter = mlxsw_reg_ppcnt_dot3control_in_unknown_opcodes_get, 2234 }, 2235 { 2236 .str = "dot3in_pause_frames", 2237 .getter = mlxsw_reg_ppcnt_dot3in_pause_frames_get, 2238 }, 2239 }; 2240 2241 #define MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN \ 2242 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_3635_stats) 2243 2244 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_discard_stats[] = { 2245 { 2246 .str = "discard_ingress_general", 2247 .getter = mlxsw_reg_ppcnt_ingress_general_get, 2248 }, 2249 { 2250 .str = "discard_ingress_policy_engine", 2251 .getter = mlxsw_reg_ppcnt_ingress_policy_engine_get, 2252 }, 2253 { 2254 .str = "discard_ingress_vlan_membership", 2255 .getter = mlxsw_reg_ppcnt_ingress_vlan_membership_get, 2256 }, 2257 { 2258 .str = "discard_ingress_tag_frame_type", 2259 .getter = mlxsw_reg_ppcnt_ingress_tag_frame_type_get, 2260 }, 2261 { 2262 .str = "discard_egress_vlan_membership", 2263 .getter = mlxsw_reg_ppcnt_egress_vlan_membership_get, 2264 }, 2265 { 2266 .str = "discard_loopback_filter", 2267 .getter = mlxsw_reg_ppcnt_loopback_filter_get, 2268 }, 2269 { 2270 .str = "discard_egress_general", 2271 .getter = mlxsw_reg_ppcnt_egress_general_get, 2272 }, 2273 { 2274 .str = "discard_egress_hoq", 2275 .getter = mlxsw_reg_ppcnt_egress_hoq_get, 2276 }, 2277 { 2278 .str = "discard_egress_policy_engine", 2279 .getter = mlxsw_reg_ppcnt_egress_policy_engine_get, 2280 }, 2281 { 2282 .str = "discard_ingress_tx_link_down", 2283 .getter = mlxsw_reg_ppcnt_ingress_tx_link_down_get, 2284 }, 2285 { 2286 .str = "discard_egress_stp_filter", 2287 .getter = mlxsw_reg_ppcnt_egress_stp_filter_get, 2288 }, 2289 { 2290 .str = "discard_egress_sll", 2291 .getter = mlxsw_reg_ppcnt_egress_sll_get, 2292 }, 2293 }; 2294 2295 #define MLXSW_SP_PORT_HW_DISCARD_STATS_LEN \ 2296 ARRAY_SIZE(mlxsw_sp_port_hw_discard_stats) 2297 2298 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { 2299 { 2300 .str = "rx_octets_prio", 2301 .getter = mlxsw_reg_ppcnt_rx_octets_get, 2302 }, 2303 { 2304 .str = "rx_frames_prio", 2305 .getter = mlxsw_reg_ppcnt_rx_frames_get, 2306 }, 2307 { 2308 .str = "tx_octets_prio", 2309 .getter = mlxsw_reg_ppcnt_tx_octets_get, 2310 }, 2311 { 2312 .str = "tx_frames_prio", 2313 .getter = mlxsw_reg_ppcnt_tx_frames_get, 2314 }, 2315 { 2316 .str = "rx_pause_prio", 2317 .getter = mlxsw_reg_ppcnt_rx_pause_get, 2318 }, 2319 { 2320 .str = "rx_pause_duration_prio", 2321 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get, 2322 }, 2323 { 2324 .str = "tx_pause_prio", 2325 .getter = mlxsw_reg_ppcnt_tx_pause_get, 2326 }, 2327 { 2328 .str = "tx_pause_duration_prio", 2329 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get, 2330 }, 2331 }; 2332 2333 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) 2334 2335 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { 2336 { 2337 .str = "tc_transmit_queue_tc", 2338 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get, 2339 .cells_bytes = true, 2340 }, 2341 { 2342 .str = "tc_no_buffer_discard_uc_tc", 2343 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get, 2344 }, 2345 }; 2346 2347 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) 2348 2349 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ 2350 MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN + \ 2351 MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \ 2352 MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN + \ 2353 MLXSW_SP_PORT_HW_DISCARD_STATS_LEN + \ 2354 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \ 2355 IEEE_8021QAZ_MAX_TCS) + \ 2356 (MLXSW_SP_PORT_HW_TC_STATS_LEN * \ 2357 TC_MAX_QUEUE)) 2358 2359 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) 2360 { 2361 int i; 2362 2363 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { 2364 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", 2365 mlxsw_sp_port_hw_prio_stats[i].str, prio); 2366 *p += ETH_GSTRING_LEN; 2367 } 2368 } 2369 2370 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) 2371 { 2372 int i; 2373 2374 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { 2375 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", 2376 mlxsw_sp_port_hw_tc_stats[i].str, tc); 2377 *p += ETH_GSTRING_LEN; 2378 } 2379 } 2380 2381 static void mlxsw_sp_port_get_strings(struct net_device *dev, 2382 u32 stringset, u8 *data) 2383 { 2384 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2385 u8 *p = data; 2386 int i; 2387 2388 switch (stringset) { 2389 case ETH_SS_STATS: 2390 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 2391 memcpy(p, mlxsw_sp_port_hw_stats[i].str, 2392 ETH_GSTRING_LEN); 2393 p += ETH_GSTRING_LEN; 2394 } 2395 2396 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; i++) { 2397 memcpy(p, mlxsw_sp_port_hw_rfc_2863_stats[i].str, 2398 ETH_GSTRING_LEN); 2399 p += ETH_GSTRING_LEN; 2400 } 2401 2402 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) { 2403 memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str, 2404 ETH_GSTRING_LEN); 2405 p += ETH_GSTRING_LEN; 2406 } 2407 2408 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; i++) { 2409 memcpy(p, mlxsw_sp_port_hw_rfc_3635_stats[i].str, 2410 ETH_GSTRING_LEN); 2411 p += ETH_GSTRING_LEN; 2412 } 2413 2414 for (i = 0; i < MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; i++) { 2415 memcpy(p, mlxsw_sp_port_hw_discard_stats[i].str, 2416 ETH_GSTRING_LEN); 2417 p += ETH_GSTRING_LEN; 2418 } 2419 2420 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 2421 mlxsw_sp_port_get_prio_strings(&p, i); 2422 2423 for (i = 0; i < TC_MAX_QUEUE; i++) 2424 mlxsw_sp_port_get_tc_strings(&p, i); 2425 2426 mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_strings(&p); 2427 break; 2428 } 2429 } 2430 2431 static int mlxsw_sp_port_set_phys_id(struct net_device *dev, 2432 enum ethtool_phys_id_state state) 2433 { 2434 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2435 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2436 char mlcr_pl[MLXSW_REG_MLCR_LEN]; 2437 bool active; 2438 2439 switch (state) { 2440 case ETHTOOL_ID_ACTIVE: 2441 active = true; 2442 break; 2443 case ETHTOOL_ID_INACTIVE: 2444 active = false; 2445 break; 2446 default: 2447 return -EOPNOTSUPP; 2448 } 2449 2450 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); 2451 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); 2452 } 2453 2454 static int 2455 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, 2456 int *p_len, enum mlxsw_reg_ppcnt_grp grp) 2457 { 2458 switch (grp) { 2459 case MLXSW_REG_PPCNT_IEEE_8023_CNT: 2460 *p_hw_stats = mlxsw_sp_port_hw_stats; 2461 *p_len = MLXSW_SP_PORT_HW_STATS_LEN; 2462 break; 2463 case MLXSW_REG_PPCNT_RFC_2863_CNT: 2464 *p_hw_stats = mlxsw_sp_port_hw_rfc_2863_stats; 2465 *p_len = MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2466 break; 2467 case MLXSW_REG_PPCNT_RFC_2819_CNT: 2468 *p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats; 2469 *p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2470 break; 2471 case MLXSW_REG_PPCNT_RFC_3635_CNT: 2472 *p_hw_stats = mlxsw_sp_port_hw_rfc_3635_stats; 2473 *p_len = MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2474 break; 2475 case MLXSW_REG_PPCNT_DISCARD_CNT: 2476 *p_hw_stats = mlxsw_sp_port_hw_discard_stats; 2477 *p_len = MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2478 break; 2479 case MLXSW_REG_PPCNT_PRIO_CNT: 2480 *p_hw_stats = mlxsw_sp_port_hw_prio_stats; 2481 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2482 break; 2483 case MLXSW_REG_PPCNT_TC_CNT: 2484 *p_hw_stats = mlxsw_sp_port_hw_tc_stats; 2485 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN; 2486 break; 2487 default: 2488 WARN_ON(1); 2489 return -EOPNOTSUPP; 2490 } 2491 return 0; 2492 } 2493 2494 static void __mlxsw_sp_port_get_stats(struct net_device *dev, 2495 enum mlxsw_reg_ppcnt_grp grp, int prio, 2496 u64 *data, int data_index) 2497 { 2498 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2499 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2500 struct mlxsw_sp_port_hw_stats *hw_stats; 2501 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 2502 int i, len; 2503 int err; 2504 2505 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); 2506 if (err) 2507 return; 2508 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl); 2509 for (i = 0; i < len; i++) { 2510 data[data_index + i] = hw_stats[i].getter(ppcnt_pl); 2511 if (!hw_stats[i].cells_bytes) 2512 continue; 2513 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp, 2514 data[data_index + i]); 2515 } 2516 } 2517 2518 static void mlxsw_sp_port_get_stats(struct net_device *dev, 2519 struct ethtool_stats *stats, u64 *data) 2520 { 2521 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2522 int i, data_index = 0; 2523 2524 /* IEEE 802.3 Counters */ 2525 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0, 2526 data, data_index); 2527 data_index = MLXSW_SP_PORT_HW_STATS_LEN; 2528 2529 /* RFC 2863 Counters */ 2530 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2863_CNT, 0, 2531 data, data_index); 2532 data_index += MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2533 2534 /* RFC 2819 Counters */ 2535 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0, 2536 data, data_index); 2537 data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2538 2539 /* RFC 3635 Counters */ 2540 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_3635_CNT, 0, 2541 data, data_index); 2542 data_index += MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2543 2544 /* Discard Counters */ 2545 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_DISCARD_CNT, 0, 2546 data, data_index); 2547 data_index += MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2548 2549 /* Per-Priority Counters */ 2550 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2551 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, 2552 data, data_index); 2553 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2554 } 2555 2556 /* Per-TC Counters */ 2557 for (i = 0; i < TC_MAX_QUEUE; i++) { 2558 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, 2559 data, data_index); 2560 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; 2561 } 2562 2563 /* PTP counters */ 2564 mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats(mlxsw_sp_port, 2565 data, data_index); 2566 data_index += mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_count(); 2567 } 2568 2569 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 2570 { 2571 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2572 2573 switch (sset) { 2574 case ETH_SS_STATS: 2575 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN + 2576 mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_count(); 2577 default: 2578 return -EOPNOTSUPP; 2579 } 2580 } 2581 2582 struct mlxsw_sp1_port_link_mode { 2583 enum ethtool_link_mode_bit_indices mask_ethtool; 2584 u32 mask; 2585 u32 speed; 2586 }; 2587 2588 static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = { 2589 { 2590 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 2591 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2592 .speed = SPEED_100, 2593 }, 2594 { 2595 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 2596 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 2597 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2598 .speed = SPEED_1000, 2599 }, 2600 { 2601 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 2602 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2603 .speed = SPEED_10000, 2604 }, 2605 { 2606 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 2607 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 2608 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 2609 .speed = SPEED_10000, 2610 }, 2611 { 2612 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2613 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2614 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2615 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 2616 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2617 .speed = SPEED_10000, 2618 }, 2619 { 2620 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 2621 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 2622 .speed = SPEED_20000, 2623 }, 2624 { 2625 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 2626 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2627 .speed = SPEED_40000, 2628 }, 2629 { 2630 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 2631 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2632 .speed = SPEED_40000, 2633 }, 2634 { 2635 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 2636 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2637 .speed = SPEED_40000, 2638 }, 2639 { 2640 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 2641 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2642 .speed = SPEED_40000, 2643 }, 2644 { 2645 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR, 2646 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2647 .speed = SPEED_25000, 2648 }, 2649 { 2650 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR, 2651 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2652 .speed = SPEED_25000, 2653 }, 2654 { 2655 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 2656 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2657 .speed = SPEED_25000, 2658 }, 2659 { 2660 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2, 2661 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2662 .speed = SPEED_50000, 2663 }, 2664 { 2665 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 2666 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2667 .speed = SPEED_50000, 2668 }, 2669 { 2670 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2, 2671 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2672 .speed = SPEED_50000, 2673 }, 2674 { 2675 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4, 2676 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2677 .speed = SPEED_100000, 2678 }, 2679 { 2680 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4, 2681 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2682 .speed = SPEED_100000, 2683 }, 2684 { 2685 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4, 2686 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2687 .speed = SPEED_100000, 2688 }, 2689 { 2690 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 2691 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2692 .speed = SPEED_100000, 2693 }, 2694 }; 2695 2696 #define MLXSW_SP1_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp1_port_link_mode) 2697 2698 static void 2699 mlxsw_sp1_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, 2700 u32 ptys_eth_proto, 2701 struct ethtool_link_ksettings *cmd) 2702 { 2703 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2704 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2705 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2706 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2707 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2708 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2709 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 2710 2711 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2712 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2713 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2714 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 2715 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 2716 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 2717 } 2718 2719 static void 2720 mlxsw_sp1_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, 2721 u8 width, unsigned long *mode) 2722 { 2723 int i; 2724 2725 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2726 if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) 2727 __set_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, 2728 mode); 2729 } 2730 } 2731 2732 static u32 2733 mlxsw_sp1_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto) 2734 { 2735 int i; 2736 2737 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2738 if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) 2739 return mlxsw_sp1_port_link_mode[i].speed; 2740 } 2741 2742 return SPEED_UNKNOWN; 2743 } 2744 2745 static void 2746 mlxsw_sp1_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, 2747 u32 ptys_eth_proto, 2748 struct ethtool_link_ksettings *cmd) 2749 { 2750 cmd->base.speed = SPEED_UNKNOWN; 2751 cmd->base.duplex = DUPLEX_UNKNOWN; 2752 2753 if (!carrier_ok) 2754 return; 2755 2756 cmd->base.speed = mlxsw_sp1_from_ptys_speed(mlxsw_sp, ptys_eth_proto); 2757 if (cmd->base.speed != SPEED_UNKNOWN) 2758 cmd->base.duplex = DUPLEX_FULL; 2759 } 2760 2761 static u32 2762 mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width, 2763 const struct ethtool_link_ksettings *cmd) 2764 { 2765 u32 ptys_proto = 0; 2766 int i; 2767 2768 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2769 if (test_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, 2770 cmd->link_modes.advertising)) 2771 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2772 } 2773 return ptys_proto; 2774 } 2775 2776 static u32 mlxsw_sp1_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u8 width, 2777 u32 speed) 2778 { 2779 u32 ptys_proto = 0; 2780 int i; 2781 2782 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2783 if (speed == mlxsw_sp1_port_link_mode[i].speed) 2784 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2785 } 2786 return ptys_proto; 2787 } 2788 2789 static u32 2790 mlxsw_sp1_to_ptys_upper_speed(struct mlxsw_sp *mlxsw_sp, u32 upper_speed) 2791 { 2792 u32 ptys_proto = 0; 2793 int i; 2794 2795 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2796 if (mlxsw_sp1_port_link_mode[i].speed <= upper_speed) 2797 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2798 } 2799 return ptys_proto; 2800 } 2801 2802 static int 2803 mlxsw_sp1_port_speed_base(struct mlxsw_sp *mlxsw_sp, u8 local_port, 2804 u32 *base_speed) 2805 { 2806 *base_speed = MLXSW_SP_PORT_BASE_SPEED_25G; 2807 return 0; 2808 } 2809 2810 static void 2811 mlxsw_sp1_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, 2812 u8 local_port, u32 proto_admin, bool autoneg) 2813 { 2814 mlxsw_reg_ptys_eth_pack(payload, local_port, proto_admin, autoneg); 2815 } 2816 2817 static void 2818 mlxsw_sp1_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, 2819 u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, 2820 u32 *p_eth_proto_oper) 2821 { 2822 mlxsw_reg_ptys_eth_unpack(payload, p_eth_proto_cap, p_eth_proto_admin, 2823 p_eth_proto_oper); 2824 } 2825 2826 static const struct mlxsw_sp_port_type_speed_ops 2827 mlxsw_sp1_port_type_speed_ops = { 2828 .from_ptys_supported_port = mlxsw_sp1_from_ptys_supported_port, 2829 .from_ptys_link = mlxsw_sp1_from_ptys_link, 2830 .from_ptys_speed = mlxsw_sp1_from_ptys_speed, 2831 .from_ptys_speed_duplex = mlxsw_sp1_from_ptys_speed_duplex, 2832 .to_ptys_advert_link = mlxsw_sp1_to_ptys_advert_link, 2833 .to_ptys_speed = mlxsw_sp1_to_ptys_speed, 2834 .to_ptys_upper_speed = mlxsw_sp1_to_ptys_upper_speed, 2835 .port_speed_base = mlxsw_sp1_port_speed_base, 2836 .reg_ptys_eth_pack = mlxsw_sp1_reg_ptys_eth_pack, 2837 .reg_ptys_eth_unpack = mlxsw_sp1_reg_ptys_eth_unpack, 2838 }; 2839 2840 static const enum ethtool_link_mode_bit_indices 2841 mlxsw_sp2_mask_ethtool_sgmii_100m[] = { 2842 ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2843 }; 2844 2845 #define MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN \ 2846 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_sgmii_100m) 2847 2848 static const enum ethtool_link_mode_bit_indices 2849 mlxsw_sp2_mask_ethtool_1000base_x_sgmii[] = { 2850 ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 2851 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2852 }; 2853 2854 #define MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN \ 2855 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_1000base_x_sgmii) 2856 2857 static const enum ethtool_link_mode_bit_indices 2858 mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii[] = { 2859 ETHTOOL_LINK_MODE_2500baseX_Full_BIT, 2860 }; 2861 2862 #define MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN \ 2863 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii) 2864 2865 static const enum ethtool_link_mode_bit_indices 2866 mlxsw_sp2_mask_ethtool_5gbase_r[] = { 2867 ETHTOOL_LINK_MODE_5000baseT_Full_BIT, 2868 }; 2869 2870 #define MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN \ 2871 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_5gbase_r) 2872 2873 static const enum ethtool_link_mode_bit_indices 2874 mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g[] = { 2875 ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2876 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2877 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, 2878 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, 2879 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 2880 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 2881 ETHTOOL_LINK_MODE_10000baseER_Full_BIT, 2882 }; 2883 2884 #define MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN \ 2885 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g) 2886 2887 static const enum ethtool_link_mode_bit_indices 2888 mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g[] = { 2889 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2890 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2891 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2892 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2893 }; 2894 2895 #define MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN \ 2896 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g) 2897 2898 static const enum ethtool_link_mode_bit_indices 2899 mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr[] = { 2900 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2901 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2902 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2903 }; 2904 2905 #define MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN \ 2906 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr) 2907 2908 static const enum ethtool_link_mode_bit_indices 2909 mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2[] = { 2910 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2911 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2912 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2913 }; 2914 2915 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN \ 2916 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2) 2917 2918 static const enum ethtool_link_mode_bit_indices 2919 mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr[] = { 2920 ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, 2921 ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, 2922 ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, 2923 ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, 2924 ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, 2925 }; 2926 2927 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN \ 2928 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr) 2929 2930 static const enum ethtool_link_mode_bit_indices 2931 mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4[] = { 2932 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2933 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2934 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2935 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2936 }; 2937 2938 #define MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN \ 2939 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4) 2940 2941 static const enum ethtool_link_mode_bit_indices 2942 mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2[] = { 2943 ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, 2944 ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, 2945 ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, 2946 ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, 2947 ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, 2948 }; 2949 2950 #define MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN \ 2951 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2) 2952 2953 static const enum ethtool_link_mode_bit_indices 2954 mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = { 2955 ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, 2956 ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, 2957 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, 2958 ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT, 2959 ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, 2960 }; 2961 2962 #define MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN \ 2963 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4) 2964 2965 static const enum ethtool_link_mode_bit_indices 2966 mlxsw_sp2_mask_ethtool_400gaui_8[] = { 2967 ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT, 2968 ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT, 2969 ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT, 2970 ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT, 2971 ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT, 2972 }; 2973 2974 #define MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN \ 2975 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_400gaui_8) 2976 2977 #define MLXSW_SP_PORT_MASK_WIDTH_1X BIT(0) 2978 #define MLXSW_SP_PORT_MASK_WIDTH_2X BIT(1) 2979 #define MLXSW_SP_PORT_MASK_WIDTH_4X BIT(2) 2980 #define MLXSW_SP_PORT_MASK_WIDTH_8X BIT(3) 2981 2982 static u8 mlxsw_sp_port_mask_width_get(u8 width) 2983 { 2984 switch (width) { 2985 case 1: 2986 return MLXSW_SP_PORT_MASK_WIDTH_1X; 2987 case 2: 2988 return MLXSW_SP_PORT_MASK_WIDTH_2X; 2989 case 4: 2990 return MLXSW_SP_PORT_MASK_WIDTH_4X; 2991 case 8: 2992 return MLXSW_SP_PORT_MASK_WIDTH_8X; 2993 default: 2994 WARN_ON_ONCE(1); 2995 return 0; 2996 } 2997 } 2998 2999 struct mlxsw_sp2_port_link_mode { 3000 const enum ethtool_link_mode_bit_indices *mask_ethtool; 3001 int m_ethtool_len; 3002 u32 mask; 3003 u32 speed; 3004 u8 mask_width; 3005 }; 3006 3007 static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = { 3008 { 3009 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_SGMII_100M, 3010 .mask_ethtool = mlxsw_sp2_mask_ethtool_sgmii_100m, 3011 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN, 3012 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3013 MLXSW_SP_PORT_MASK_WIDTH_2X | 3014 MLXSW_SP_PORT_MASK_WIDTH_4X | 3015 MLXSW_SP_PORT_MASK_WIDTH_8X, 3016 .speed = SPEED_100, 3017 }, 3018 { 3019 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_1000BASE_X_SGMII, 3020 .mask_ethtool = mlxsw_sp2_mask_ethtool_1000base_x_sgmii, 3021 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN, 3022 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3023 MLXSW_SP_PORT_MASK_WIDTH_2X | 3024 MLXSW_SP_PORT_MASK_WIDTH_4X | 3025 MLXSW_SP_PORT_MASK_WIDTH_8X, 3026 .speed = SPEED_1000, 3027 }, 3028 { 3029 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_2_5GBASE_X_2_5GMII, 3030 .mask_ethtool = mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii, 3031 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN, 3032 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3033 MLXSW_SP_PORT_MASK_WIDTH_2X | 3034 MLXSW_SP_PORT_MASK_WIDTH_4X | 3035 MLXSW_SP_PORT_MASK_WIDTH_8X, 3036 .speed = SPEED_2500, 3037 }, 3038 { 3039 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R, 3040 .mask_ethtool = mlxsw_sp2_mask_ethtool_5gbase_r, 3041 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN, 3042 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3043 MLXSW_SP_PORT_MASK_WIDTH_2X | 3044 MLXSW_SP_PORT_MASK_WIDTH_4X | 3045 MLXSW_SP_PORT_MASK_WIDTH_8X, 3046 .speed = SPEED_5000, 3047 }, 3048 { 3049 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XFI_XAUI_1_10G, 3050 .mask_ethtool = mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g, 3051 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN, 3052 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3053 MLXSW_SP_PORT_MASK_WIDTH_2X | 3054 MLXSW_SP_PORT_MASK_WIDTH_4X | 3055 MLXSW_SP_PORT_MASK_WIDTH_8X, 3056 .speed = SPEED_10000, 3057 }, 3058 { 3059 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G, 3060 .mask_ethtool = mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g, 3061 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN, 3062 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X | 3063 MLXSW_SP_PORT_MASK_WIDTH_8X, 3064 .speed = SPEED_40000, 3065 }, 3066 { 3067 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR, 3068 .mask_ethtool = mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr, 3069 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN, 3070 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3071 MLXSW_SP_PORT_MASK_WIDTH_2X | 3072 MLXSW_SP_PORT_MASK_WIDTH_4X | 3073 MLXSW_SP_PORT_MASK_WIDTH_8X, 3074 .speed = SPEED_25000, 3075 }, 3076 { 3077 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_2_LAUI_2_50GBASE_CR2_KR2, 3078 .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2, 3079 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN, 3080 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_2X | 3081 MLXSW_SP_PORT_MASK_WIDTH_4X | 3082 MLXSW_SP_PORT_MASK_WIDTH_8X, 3083 .speed = SPEED_50000, 3084 }, 3085 { 3086 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR, 3087 .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr, 3088 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN, 3089 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X, 3090 .speed = SPEED_50000, 3091 }, 3092 { 3093 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4, 3094 .mask_ethtool = mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4, 3095 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN, 3096 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X | 3097 MLXSW_SP_PORT_MASK_WIDTH_8X, 3098 .speed = SPEED_100000, 3099 }, 3100 { 3101 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2, 3102 .mask_ethtool = mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2, 3103 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN, 3104 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_2X, 3105 .speed = SPEED_100000, 3106 }, 3107 { 3108 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4, 3109 .mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4, 3110 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN, 3111 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X | 3112 MLXSW_SP_PORT_MASK_WIDTH_8X, 3113 .speed = SPEED_200000, 3114 }, 3115 { 3116 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_8, 3117 .mask_ethtool = mlxsw_sp2_mask_ethtool_400gaui_8, 3118 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN, 3119 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_8X, 3120 .speed = SPEED_400000, 3121 }, 3122 }; 3123 3124 #define MLXSW_SP2_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp2_port_link_mode) 3125 3126 static void 3127 mlxsw_sp2_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, 3128 u32 ptys_eth_proto, 3129 struct ethtool_link_ksettings *cmd) 3130 { 3131 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 3132 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 3133 } 3134 3135 static void 3136 mlxsw_sp2_set_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, 3137 unsigned long *mode) 3138 { 3139 int i; 3140 3141 for (i = 0; i < link_mode->m_ethtool_len; i++) 3142 __set_bit(link_mode->mask_ethtool[i], mode); 3143 } 3144 3145 static void 3146 mlxsw_sp2_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, 3147 u8 width, unsigned long *mode) 3148 { 3149 u8 mask_width = mlxsw_sp_port_mask_width_get(width); 3150 int i; 3151 3152 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3153 if ((ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) && 3154 (mask_width & mlxsw_sp2_port_link_mode[i].mask_width)) 3155 mlxsw_sp2_set_bit_ethtool(&mlxsw_sp2_port_link_mode[i], 3156 mode); 3157 } 3158 } 3159 3160 static u32 3161 mlxsw_sp2_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto) 3162 { 3163 int i; 3164 3165 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3166 if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) 3167 return mlxsw_sp2_port_link_mode[i].speed; 3168 } 3169 3170 return SPEED_UNKNOWN; 3171 } 3172 3173 static void 3174 mlxsw_sp2_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, 3175 u32 ptys_eth_proto, 3176 struct ethtool_link_ksettings *cmd) 3177 { 3178 cmd->base.speed = SPEED_UNKNOWN; 3179 cmd->base.duplex = DUPLEX_UNKNOWN; 3180 3181 if (!carrier_ok) 3182 return; 3183 3184 cmd->base.speed = mlxsw_sp2_from_ptys_speed(mlxsw_sp, ptys_eth_proto); 3185 if (cmd->base.speed != SPEED_UNKNOWN) 3186 cmd->base.duplex = DUPLEX_FULL; 3187 } 3188 3189 static bool 3190 mlxsw_sp2_test_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, 3191 const unsigned long *mode) 3192 { 3193 int cnt = 0; 3194 int i; 3195 3196 for (i = 0; i < link_mode->m_ethtool_len; i++) { 3197 if (test_bit(link_mode->mask_ethtool[i], mode)) 3198 cnt++; 3199 } 3200 3201 return cnt == link_mode->m_ethtool_len; 3202 } 3203 3204 static u32 3205 mlxsw_sp2_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width, 3206 const struct ethtool_link_ksettings *cmd) 3207 { 3208 u8 mask_width = mlxsw_sp_port_mask_width_get(width); 3209 u32 ptys_proto = 0; 3210 int i; 3211 3212 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3213 if ((mask_width & mlxsw_sp2_port_link_mode[i].mask_width) && 3214 mlxsw_sp2_test_bit_ethtool(&mlxsw_sp2_port_link_mode[i], 3215 cmd->link_modes.advertising)) 3216 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3217 } 3218 return ptys_proto; 3219 } 3220 3221 static u32 mlxsw_sp2_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, 3222 u8 width, u32 speed) 3223 { 3224 u8 mask_width = mlxsw_sp_port_mask_width_get(width); 3225 u32 ptys_proto = 0; 3226 int i; 3227 3228 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3229 if ((speed == mlxsw_sp2_port_link_mode[i].speed) && 3230 (mask_width & mlxsw_sp2_port_link_mode[i].mask_width)) 3231 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3232 } 3233 return ptys_proto; 3234 } 3235 3236 static u32 3237 mlxsw_sp2_to_ptys_upper_speed(struct mlxsw_sp *mlxsw_sp, u32 upper_speed) 3238 { 3239 u32 ptys_proto = 0; 3240 int i; 3241 3242 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3243 if (mlxsw_sp2_port_link_mode[i].speed <= upper_speed) 3244 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3245 } 3246 return ptys_proto; 3247 } 3248 3249 static int 3250 mlxsw_sp2_port_speed_base(struct mlxsw_sp *mlxsw_sp, u8 local_port, 3251 u32 *base_speed) 3252 { 3253 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3254 u32 eth_proto_cap; 3255 int err; 3256 3257 /* In Spectrum-2, the speed of 1x can change from port to port, so query 3258 * it from firmware. 3259 */ 3260 mlxsw_reg_ptys_ext_eth_pack(ptys_pl, local_port, 0, false); 3261 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3262 if (err) 3263 return err; 3264 mlxsw_reg_ptys_ext_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL); 3265 3266 if (eth_proto_cap & 3267 MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR) { 3268 *base_speed = MLXSW_SP_PORT_BASE_SPEED_50G; 3269 return 0; 3270 } 3271 3272 if (eth_proto_cap & 3273 MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR) { 3274 *base_speed = MLXSW_SP_PORT_BASE_SPEED_25G; 3275 return 0; 3276 } 3277 3278 return -EIO; 3279 } 3280 3281 static void 3282 mlxsw_sp2_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, 3283 u8 local_port, u32 proto_admin, 3284 bool autoneg) 3285 { 3286 mlxsw_reg_ptys_ext_eth_pack(payload, local_port, proto_admin, autoneg); 3287 } 3288 3289 static void 3290 mlxsw_sp2_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, 3291 u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, 3292 u32 *p_eth_proto_oper) 3293 { 3294 mlxsw_reg_ptys_ext_eth_unpack(payload, p_eth_proto_cap, 3295 p_eth_proto_admin, p_eth_proto_oper); 3296 } 3297 3298 static const struct mlxsw_sp_port_type_speed_ops 3299 mlxsw_sp2_port_type_speed_ops = { 3300 .from_ptys_supported_port = mlxsw_sp2_from_ptys_supported_port, 3301 .from_ptys_link = mlxsw_sp2_from_ptys_link, 3302 .from_ptys_speed = mlxsw_sp2_from_ptys_speed, 3303 .from_ptys_speed_duplex = mlxsw_sp2_from_ptys_speed_duplex, 3304 .to_ptys_advert_link = mlxsw_sp2_to_ptys_advert_link, 3305 .to_ptys_speed = mlxsw_sp2_to_ptys_speed, 3306 .to_ptys_upper_speed = mlxsw_sp2_to_ptys_upper_speed, 3307 .port_speed_base = mlxsw_sp2_port_speed_base, 3308 .reg_ptys_eth_pack = mlxsw_sp2_reg_ptys_eth_pack, 3309 .reg_ptys_eth_unpack = mlxsw_sp2_reg_ptys_eth_unpack, 3310 }; 3311 3312 static void 3313 mlxsw_sp_port_get_link_supported(struct mlxsw_sp *mlxsw_sp, u32 eth_proto_cap, 3314 u8 width, struct ethtool_link_ksettings *cmd) 3315 { 3316 const struct mlxsw_sp_port_type_speed_ops *ops; 3317 3318 ops = mlxsw_sp->port_type_speed_ops; 3319 3320 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); 3321 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); 3322 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); 3323 3324 ops->from_ptys_supported_port(mlxsw_sp, eth_proto_cap, cmd); 3325 ops->from_ptys_link(mlxsw_sp, eth_proto_cap, width, 3326 cmd->link_modes.supported); 3327 } 3328 3329 static void 3330 mlxsw_sp_port_get_link_advertise(struct mlxsw_sp *mlxsw_sp, 3331 u32 eth_proto_admin, bool autoneg, u8 width, 3332 struct ethtool_link_ksettings *cmd) 3333 { 3334 const struct mlxsw_sp_port_type_speed_ops *ops; 3335 3336 ops = mlxsw_sp->port_type_speed_ops; 3337 3338 if (!autoneg) 3339 return; 3340 3341 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); 3342 ops->from_ptys_link(mlxsw_sp, eth_proto_admin, width, 3343 cmd->link_modes.advertising); 3344 } 3345 3346 static u8 3347 mlxsw_sp_port_connector_port(enum mlxsw_reg_ptys_connector_type connector_type) 3348 { 3349 switch (connector_type) { 3350 case MLXSW_REG_PTYS_CONNECTOR_TYPE_UNKNOWN_OR_NO_CONNECTOR: 3351 return PORT_OTHER; 3352 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_NONE: 3353 return PORT_NONE; 3354 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_TP: 3355 return PORT_TP; 3356 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_AUI: 3357 return PORT_AUI; 3358 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_BNC: 3359 return PORT_BNC; 3360 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_MII: 3361 return PORT_MII; 3362 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_FIBRE: 3363 return PORT_FIBRE; 3364 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_DA: 3365 return PORT_DA; 3366 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_OTHER: 3367 return PORT_OTHER; 3368 default: 3369 WARN_ON_ONCE(1); 3370 return PORT_OTHER; 3371 } 3372 } 3373 3374 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev, 3375 struct ethtool_link_ksettings *cmd) 3376 { 3377 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; 3378 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 3379 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3380 const struct mlxsw_sp_port_type_speed_ops *ops; 3381 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3382 u8 connector_type; 3383 bool autoneg; 3384 int err; 3385 3386 ops = mlxsw_sp->port_type_speed_ops; 3387 3388 autoneg = mlxsw_sp_port->link.autoneg; 3389 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3390 0, false); 3391 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3392 if (err) 3393 return err; 3394 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, 3395 ð_proto_admin, ð_proto_oper); 3396 3397 mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap, 3398 mlxsw_sp_port->mapping.width, cmd); 3399 3400 mlxsw_sp_port_get_link_advertise(mlxsw_sp, eth_proto_admin, autoneg, 3401 mlxsw_sp_port->mapping.width, cmd); 3402 3403 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 3404 connector_type = mlxsw_reg_ptys_connector_type_get(ptys_pl); 3405 cmd->base.port = mlxsw_sp_port_connector_port(connector_type); 3406 ops->from_ptys_speed_duplex(mlxsw_sp, netif_carrier_ok(dev), 3407 eth_proto_oper, cmd); 3408 3409 return 0; 3410 } 3411 3412 static int 3413 mlxsw_sp_port_set_link_ksettings(struct net_device *dev, 3414 const struct ethtool_link_ksettings *cmd) 3415 { 3416 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 3417 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3418 const struct mlxsw_sp_port_type_speed_ops *ops; 3419 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3420 u32 eth_proto_cap, eth_proto_new; 3421 bool autoneg; 3422 int err; 3423 3424 ops = mlxsw_sp->port_type_speed_ops; 3425 3426 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3427 0, false); 3428 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3429 if (err) 3430 return err; 3431 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, NULL, NULL); 3432 3433 autoneg = cmd->base.autoneg == AUTONEG_ENABLE; 3434 eth_proto_new = autoneg ? 3435 ops->to_ptys_advert_link(mlxsw_sp, mlxsw_sp_port->mapping.width, 3436 cmd) : 3437 ops->to_ptys_speed(mlxsw_sp, mlxsw_sp_port->mapping.width, 3438 cmd->base.speed); 3439 3440 eth_proto_new = eth_proto_new & eth_proto_cap; 3441 if (!eth_proto_new) { 3442 netdev_err(dev, "No supported speed requested\n"); 3443 return -EINVAL; 3444 } 3445 3446 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3447 eth_proto_new, autoneg); 3448 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3449 if (err) 3450 return err; 3451 3452 mlxsw_sp_port->link.autoneg = autoneg; 3453 3454 if (!netif_running(dev)) 3455 return 0; 3456 3457 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3458 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 3459 3460 return 0; 3461 } 3462 3463 static int mlxsw_sp_get_module_info(struct net_device *netdev, 3464 struct ethtool_modinfo *modinfo) 3465 { 3466 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3467 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3468 int err; 3469 3470 err = mlxsw_env_get_module_info(mlxsw_sp->core, 3471 mlxsw_sp_port->mapping.module, 3472 modinfo); 3473 3474 return err; 3475 } 3476 3477 static int mlxsw_sp_get_module_eeprom(struct net_device *netdev, 3478 struct ethtool_eeprom *ee, 3479 u8 *data) 3480 { 3481 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3482 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3483 int err; 3484 3485 err = mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core, 3486 mlxsw_sp_port->mapping.module, ee, 3487 data); 3488 3489 return err; 3490 } 3491 3492 static int 3493 mlxsw_sp_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info) 3494 { 3495 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3496 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3497 3498 return mlxsw_sp->ptp_ops->get_ts_info(mlxsw_sp, info); 3499 } 3500 3501 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 3502 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 3503 .get_link = ethtool_op_get_link, 3504 .get_pauseparam = mlxsw_sp_port_get_pauseparam, 3505 .set_pauseparam = mlxsw_sp_port_set_pauseparam, 3506 .get_strings = mlxsw_sp_port_get_strings, 3507 .set_phys_id = mlxsw_sp_port_set_phys_id, 3508 .get_ethtool_stats = mlxsw_sp_port_get_stats, 3509 .get_sset_count = mlxsw_sp_port_get_sset_count, 3510 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings, 3511 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings, 3512 .get_module_info = mlxsw_sp_get_module_info, 3513 .get_module_eeprom = mlxsw_sp_get_module_eeprom, 3514 .get_ts_info = mlxsw_sp_get_ts_info, 3515 }; 3516 3517 static int 3518 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port) 3519 { 3520 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3521 const struct mlxsw_sp_port_type_speed_ops *ops; 3522 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3523 u32 eth_proto_admin; 3524 u32 upper_speed; 3525 u32 base_speed; 3526 int err; 3527 3528 ops = mlxsw_sp->port_type_speed_ops; 3529 3530 err = ops->port_speed_base(mlxsw_sp, mlxsw_sp_port->local_port, 3531 &base_speed); 3532 if (err) 3533 return err; 3534 upper_speed = base_speed * mlxsw_sp_port->mapping.width; 3535 3536 eth_proto_admin = ops->to_ptys_upper_speed(mlxsw_sp, upper_speed); 3537 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3538 eth_proto_admin, mlxsw_sp_port->link.autoneg); 3539 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3540 } 3541 3542 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 3543 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 3544 bool dwrr, u8 dwrr_weight) 3545 { 3546 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3547 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3548 3549 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3550 next_index); 3551 mlxsw_reg_qeec_de_set(qeec_pl, true); 3552 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 3553 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 3554 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3555 } 3556 3557 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 3558 enum mlxsw_reg_qeec_hr hr, u8 index, 3559 u8 next_index, u32 maxrate) 3560 { 3561 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3562 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3563 3564 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3565 next_index); 3566 mlxsw_reg_qeec_mase_set(qeec_pl, true); 3567 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 3568 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3569 } 3570 3571 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port, 3572 enum mlxsw_reg_qeec_hr hr, u8 index, 3573 u8 next_index, u32 minrate) 3574 { 3575 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3576 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3577 3578 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3579 next_index); 3580 mlxsw_reg_qeec_mise_set(qeec_pl, true); 3581 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate); 3582 3583 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3584 } 3585 3586 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 3587 u8 switch_prio, u8 tclass) 3588 { 3589 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3590 char qtct_pl[MLXSW_REG_QTCT_LEN]; 3591 3592 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 3593 tclass); 3594 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 3595 } 3596 3597 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 3598 { 3599 int err, i; 3600 3601 /* Setup the elements hierarcy, so that each TC is linked to 3602 * one subgroup, which are all member in the same group. 3603 */ 3604 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3605 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false, 3606 0); 3607 if (err) 3608 return err; 3609 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3610 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3611 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i, 3612 0, false, 0); 3613 if (err) 3614 return err; 3615 } 3616 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3617 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3618 MLXSW_REG_QEEC_HIERARCY_TC, i, i, 3619 false, 0); 3620 if (err) 3621 return err; 3622 3623 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3624 MLXSW_REG_QEEC_HIERARCY_TC, 3625 i + 8, i, 3626 true, 100); 3627 if (err) 3628 return err; 3629 } 3630 3631 /* Make sure the max shaper is disabled in all hierarchies that support 3632 * it. Note that this disables ptps (PTP shaper), but that is intended 3633 * for the initial configuration. 3634 */ 3635 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3636 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0, 3637 MLXSW_REG_QEEC_MAS_DIS); 3638 if (err) 3639 return err; 3640 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3641 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3642 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, 3643 i, 0, 3644 MLXSW_REG_QEEC_MAS_DIS); 3645 if (err) 3646 return err; 3647 } 3648 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3649 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3650 MLXSW_REG_QEEC_HIERARCY_TC, 3651 i, i, 3652 MLXSW_REG_QEEC_MAS_DIS); 3653 if (err) 3654 return err; 3655 3656 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3657 MLXSW_REG_QEEC_HIERARCY_TC, 3658 i + 8, i, 3659 MLXSW_REG_QEEC_MAS_DIS); 3660 if (err) 3661 return err; 3662 } 3663 3664 /* Configure the min shaper for multicast TCs. */ 3665 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3666 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port, 3667 MLXSW_REG_QEEC_HIERARCY_TC, 3668 i + 8, i, 3669 MLXSW_REG_QEEC_MIS_MIN); 3670 if (err) 3671 return err; 3672 } 3673 3674 /* Map all priorities to traffic class 0. */ 3675 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3676 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 3677 if (err) 3678 return err; 3679 } 3680 3681 return 0; 3682 } 3683 3684 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 3685 bool enable) 3686 { 3687 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3688 char qtctm_pl[MLXSW_REG_QTCTM_LEN]; 3689 3690 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable); 3691 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl); 3692 } 3693 3694 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 3695 u8 split_base_local_port, 3696 struct mlxsw_sp_port_mapping *port_mapping) 3697 { 3698 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 3699 bool split = !!split_base_local_port; 3700 struct mlxsw_sp_port *mlxsw_sp_port; 3701 struct net_device *dev; 3702 int err; 3703 3704 err = mlxsw_core_port_init(mlxsw_sp->core, local_port, 3705 port_mapping->module + 1, split, 3706 port_mapping->lane / port_mapping->width, 3707 mlxsw_sp->base_mac, 3708 sizeof(mlxsw_sp->base_mac)); 3709 if (err) { 3710 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 3711 local_port); 3712 return err; 3713 } 3714 3715 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 3716 if (!dev) { 3717 err = -ENOMEM; 3718 goto err_alloc_etherdev; 3719 } 3720 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 3721 dev_net_set(dev, mlxsw_sp_net(mlxsw_sp)); 3722 mlxsw_sp_port = netdev_priv(dev); 3723 mlxsw_sp_port->dev = dev; 3724 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 3725 mlxsw_sp_port->local_port = local_port; 3726 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID; 3727 mlxsw_sp_port->split = split; 3728 mlxsw_sp_port->split_base_local_port = split_base_local_port; 3729 mlxsw_sp_port->mapping = *port_mapping; 3730 mlxsw_sp_port->link.autoneg = 1; 3731 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 3732 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list); 3733 3734 mlxsw_sp_port->pcpu_stats = 3735 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 3736 if (!mlxsw_sp_port->pcpu_stats) { 3737 err = -ENOMEM; 3738 goto err_alloc_stats; 3739 } 3740 3741 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample), 3742 GFP_KERNEL); 3743 if (!mlxsw_sp_port->sample) { 3744 err = -ENOMEM; 3745 goto err_alloc_sample; 3746 } 3747 3748 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 3749 &update_stats_cache); 3750 3751 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 3752 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 3753 3754 err = mlxsw_sp_port_module_map(mlxsw_sp_port); 3755 if (err) { 3756 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 3757 mlxsw_sp_port->local_port); 3758 goto err_port_module_map; 3759 } 3760 3761 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 3762 if (err) { 3763 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 3764 mlxsw_sp_port->local_port); 3765 goto err_port_swid_set; 3766 } 3767 3768 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 3769 if (err) { 3770 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 3771 mlxsw_sp_port->local_port); 3772 goto err_dev_addr_init; 3773 } 3774 3775 netif_carrier_off(dev); 3776 3777 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 3778 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 3779 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK; 3780 3781 dev->min_mtu = 0; 3782 dev->max_mtu = ETH_MAX_MTU; 3783 3784 /* Each packet needs to have a Tx header (metadata) on top all other 3785 * headers. 3786 */ 3787 dev->needed_headroom = MLXSW_TXHDR_LEN; 3788 3789 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 3790 if (err) { 3791 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 3792 mlxsw_sp_port->local_port); 3793 goto err_port_system_port_mapping_set; 3794 } 3795 3796 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port); 3797 if (err) { 3798 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 3799 mlxsw_sp_port->local_port); 3800 goto err_port_speed_by_width_set; 3801 } 3802 3803 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 3804 if (err) { 3805 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 3806 mlxsw_sp_port->local_port); 3807 goto err_port_mtu_set; 3808 } 3809 3810 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3811 if (err) 3812 goto err_port_admin_status_set; 3813 3814 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 3815 if (err) { 3816 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 3817 mlxsw_sp_port->local_port); 3818 goto err_port_buffers_init; 3819 } 3820 3821 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 3822 if (err) { 3823 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 3824 mlxsw_sp_port->local_port); 3825 goto err_port_ets_init; 3826 } 3827 3828 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true); 3829 if (err) { 3830 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n", 3831 mlxsw_sp_port->local_port); 3832 goto err_port_tc_mc_mode; 3833 } 3834 3835 /* ETS and buffers must be initialized before DCB. */ 3836 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 3837 if (err) { 3838 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 3839 mlxsw_sp_port->local_port); 3840 goto err_port_dcb_init; 3841 } 3842 3843 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 3844 if (err) { 3845 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 3846 mlxsw_sp_port->local_port); 3847 goto err_port_fids_init; 3848 } 3849 3850 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); 3851 if (err) { 3852 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", 3853 mlxsw_sp_port->local_port); 3854 goto err_port_qdiscs_init; 3855 } 3856 3857 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false, 3858 false); 3859 if (err) { 3860 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n", 3861 mlxsw_sp_port->local_port); 3862 goto err_port_vlan_clear; 3863 } 3864 3865 err = mlxsw_sp_port_nve_init(mlxsw_sp_port); 3866 if (err) { 3867 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n", 3868 mlxsw_sp_port->local_port); 3869 goto err_port_nve_init; 3870 } 3871 3872 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 3873 if (err) { 3874 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n", 3875 mlxsw_sp_port->local_port); 3876 goto err_port_pvid_set; 3877 } 3878 3879 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, 3880 MLXSW_SP_DEFAULT_VID); 3881 if (IS_ERR(mlxsw_sp_port_vlan)) { 3882 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 3883 mlxsw_sp_port->local_port); 3884 err = PTR_ERR(mlxsw_sp_port_vlan); 3885 goto err_port_vlan_create; 3886 } 3887 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan; 3888 3889 INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw, 3890 mlxsw_sp->ptp_ops->shaper_work); 3891 3892 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 3893 err = register_netdev(dev); 3894 if (err) { 3895 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 3896 mlxsw_sp_port->local_port); 3897 goto err_register_netdev; 3898 } 3899 3900 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 3901 mlxsw_sp_port, dev); 3902 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 3903 return 0; 3904 3905 err_register_netdev: 3906 mlxsw_sp->ports[local_port] = NULL; 3907 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 3908 err_port_vlan_create: 3909 err_port_pvid_set: 3910 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3911 err_port_nve_init: 3912 err_port_vlan_clear: 3913 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3914 err_port_qdiscs_init: 3915 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3916 err_port_fids_init: 3917 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3918 err_port_dcb_init: 3919 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3920 err_port_tc_mc_mode: 3921 err_port_ets_init: 3922 err_port_buffers_init: 3923 err_port_admin_status_set: 3924 err_port_mtu_set: 3925 err_port_speed_by_width_set: 3926 err_port_system_port_mapping_set: 3927 err_dev_addr_init: 3928 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3929 err_port_swid_set: 3930 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3931 err_port_module_map: 3932 kfree(mlxsw_sp_port->sample); 3933 err_alloc_sample: 3934 free_percpu(mlxsw_sp_port->pcpu_stats); 3935 err_alloc_stats: 3936 free_netdev(dev); 3937 err_alloc_etherdev: 3938 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3939 return err; 3940 } 3941 3942 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3943 { 3944 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3945 3946 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 3947 cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw); 3948 mlxsw_sp_port_ptp_clear(mlxsw_sp_port); 3949 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 3950 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 3951 mlxsw_sp->ports[local_port] = NULL; 3952 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true); 3953 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3954 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3955 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3956 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3957 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3958 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3959 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3960 kfree(mlxsw_sp_port->sample); 3961 free_percpu(mlxsw_sp_port->pcpu_stats); 3962 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 3963 free_netdev(mlxsw_sp_port->dev); 3964 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3965 } 3966 3967 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp) 3968 { 3969 struct mlxsw_sp_port *mlxsw_sp_port; 3970 int err; 3971 3972 mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL); 3973 if (!mlxsw_sp_port) 3974 return -ENOMEM; 3975 3976 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 3977 mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT; 3978 3979 err = mlxsw_core_cpu_port_init(mlxsw_sp->core, 3980 mlxsw_sp_port, 3981 mlxsw_sp->base_mac, 3982 sizeof(mlxsw_sp->base_mac)); 3983 if (err) { 3984 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n"); 3985 goto err_core_cpu_port_init; 3986 } 3987 3988 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port; 3989 return 0; 3990 3991 err_core_cpu_port_init: 3992 kfree(mlxsw_sp_port); 3993 return err; 3994 } 3995 3996 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp) 3997 { 3998 struct mlxsw_sp_port *mlxsw_sp_port = 3999 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT]; 4000 4001 mlxsw_core_cpu_port_fini(mlxsw_sp->core); 4002 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL; 4003 kfree(mlxsw_sp_port); 4004 } 4005 4006 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) 4007 { 4008 return mlxsw_sp->ports[local_port] != NULL; 4009 } 4010 4011 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 4012 { 4013 int i; 4014 4015 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 4016 if (mlxsw_sp_port_created(mlxsw_sp, i)) 4017 mlxsw_sp_port_remove(mlxsw_sp, i); 4018 mlxsw_sp_cpu_port_remove(mlxsw_sp); 4019 kfree(mlxsw_sp->ports); 4020 } 4021 4022 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 4023 { 4024 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 4025 struct mlxsw_sp_port_mapping *port_mapping; 4026 size_t alloc_size; 4027 int i; 4028 int err; 4029 4030 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 4031 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 4032 if (!mlxsw_sp->ports) 4033 return -ENOMEM; 4034 4035 err = mlxsw_sp_cpu_port_create(mlxsw_sp); 4036 if (err) 4037 goto err_cpu_port_create; 4038 4039 for (i = 1; i < max_ports; i++) { 4040 port_mapping = mlxsw_sp->port_mapping[i]; 4041 if (!port_mapping) 4042 continue; 4043 err = mlxsw_sp_port_create(mlxsw_sp, i, 0, port_mapping); 4044 if (err) 4045 goto err_port_create; 4046 } 4047 return 0; 4048 4049 err_port_create: 4050 for (i--; i >= 1; i--) 4051 if (mlxsw_sp_port_created(mlxsw_sp, i)) 4052 mlxsw_sp_port_remove(mlxsw_sp, i); 4053 mlxsw_sp_cpu_port_remove(mlxsw_sp); 4054 err_cpu_port_create: 4055 kfree(mlxsw_sp->ports); 4056 return err; 4057 } 4058 4059 static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp) 4060 { 4061 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 4062 struct mlxsw_sp_port_mapping port_mapping; 4063 int i; 4064 int err; 4065 4066 mlxsw_sp->port_mapping = kcalloc(max_ports, 4067 sizeof(struct mlxsw_sp_port_mapping *), 4068 GFP_KERNEL); 4069 if (!mlxsw_sp->port_mapping) 4070 return -ENOMEM; 4071 4072 for (i = 1; i < max_ports; i++) { 4073 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &port_mapping); 4074 if (err) 4075 goto err_port_module_info_get; 4076 if (!port_mapping.width) 4077 continue; 4078 4079 mlxsw_sp->port_mapping[i] = kmemdup(&port_mapping, 4080 sizeof(port_mapping), 4081 GFP_KERNEL); 4082 if (!mlxsw_sp->port_mapping[i]) { 4083 err = -ENOMEM; 4084 goto err_port_module_info_dup; 4085 } 4086 } 4087 return 0; 4088 4089 err_port_module_info_get: 4090 err_port_module_info_dup: 4091 for (i--; i >= 1; i--) 4092 kfree(mlxsw_sp->port_mapping[i]); 4093 kfree(mlxsw_sp->port_mapping); 4094 return err; 4095 } 4096 4097 static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp) 4098 { 4099 int i; 4100 4101 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 4102 kfree(mlxsw_sp->port_mapping[i]); 4103 kfree(mlxsw_sp->port_mapping); 4104 } 4105 4106 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port, unsigned int max_width) 4107 { 4108 u8 offset = (local_port - 1) % max_width; 4109 4110 return local_port - offset; 4111 } 4112 4113 static int 4114 mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 4115 struct mlxsw_sp_port_mapping *port_mapping, 4116 unsigned int count, u8 offset) 4117 { 4118 struct mlxsw_sp_port_mapping split_port_mapping; 4119 int err, i; 4120 4121 split_port_mapping = *port_mapping; 4122 split_port_mapping.width /= count; 4123 for (i = 0; i < count; i++) { 4124 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset, 4125 base_port, &split_port_mapping); 4126 if (err) 4127 goto err_port_create; 4128 split_port_mapping.lane += split_port_mapping.width; 4129 } 4130 4131 return 0; 4132 4133 err_port_create: 4134 for (i--; i >= 0; i--) 4135 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 4136 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 4137 return err; 4138 } 4139 4140 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 4141 u8 base_port, 4142 unsigned int count, u8 offset) 4143 { 4144 struct mlxsw_sp_port_mapping *port_mapping; 4145 int i; 4146 4147 /* Go over original unsplit ports in the gap and recreate them. */ 4148 for (i = 0; i < count * offset; i++) { 4149 port_mapping = mlxsw_sp->port_mapping[base_port + i]; 4150 if (!port_mapping) 4151 continue; 4152 mlxsw_sp_port_create(mlxsw_sp, base_port + i, 0, port_mapping); 4153 } 4154 } 4155 4156 static int mlxsw_sp_local_ports_offset(struct mlxsw_core *mlxsw_core, 4157 unsigned int count, 4158 unsigned int max_width) 4159 { 4160 enum mlxsw_res_id local_ports_in_x_res_id; 4161 int split_width = max_width / count; 4162 4163 if (split_width == 1) 4164 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_1X; 4165 else if (split_width == 2) 4166 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_2X; 4167 else if (split_width == 4) 4168 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_4X; 4169 else 4170 return -EINVAL; 4171 4172 if (!mlxsw_core_res_valid(mlxsw_core, local_ports_in_x_res_id)) 4173 return -EINVAL; 4174 return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id); 4175 } 4176 4177 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 4178 unsigned int count, 4179 struct netlink_ext_ack *extack) 4180 { 4181 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4182 struct mlxsw_sp_port_mapping port_mapping; 4183 struct mlxsw_sp_port *mlxsw_sp_port; 4184 int max_width; 4185 u8 base_port; 4186 int offset; 4187 int i; 4188 int err; 4189 4190 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4191 if (!mlxsw_sp_port) { 4192 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 4193 local_port); 4194 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 4195 return -EINVAL; 4196 } 4197 4198 /* Split ports cannot be split. */ 4199 if (mlxsw_sp_port->split) { 4200 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); 4201 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further"); 4202 return -EINVAL; 4203 } 4204 4205 max_width = mlxsw_core_module_max_width(mlxsw_core, 4206 mlxsw_sp_port->mapping.module); 4207 if (max_width < 0) { 4208 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n"); 4209 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module"); 4210 return max_width; 4211 } 4212 4213 /* Split port with non-max and 1 module width cannot be split. */ 4214 if (mlxsw_sp_port->mapping.width != max_width || max_width == 1) { 4215 netdev_err(mlxsw_sp_port->dev, "Port cannot be split\n"); 4216 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split"); 4217 return -EINVAL; 4218 } 4219 4220 if (count == 1 || !is_power_of_2(count) || count > max_width) { 4221 netdev_err(mlxsw_sp_port->dev, "Invalid split count\n"); 4222 NL_SET_ERR_MSG_MOD(extack, "Invalid split count"); 4223 return -EINVAL; 4224 } 4225 4226 offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width); 4227 if (offset < 0) { 4228 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n"); 4229 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset"); 4230 return -EINVAL; 4231 } 4232 4233 /* Only in case max split is being done, the local port and 4234 * base port may differ. 4235 */ 4236 base_port = count == max_width ? 4237 mlxsw_sp_cluster_base_port_get(local_port, max_width) : 4238 local_port; 4239 4240 for (i = 0; i < count * offset; i++) { 4241 /* Expect base port to exist and also the one in the middle in 4242 * case of maximal split count. 4243 */ 4244 if (i == 0 || (count == max_width && i == count / 2)) 4245 continue; 4246 4247 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) { 4248 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 4249 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 4250 return -EINVAL; 4251 } 4252 } 4253 4254 port_mapping = mlxsw_sp_port->mapping; 4255 4256 for (i = 0; i < count; i++) 4257 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 4258 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 4259 4260 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, &port_mapping, 4261 count, offset); 4262 if (err) { 4263 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 4264 goto err_port_split_create; 4265 } 4266 4267 return 0; 4268 4269 err_port_split_create: 4270 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset); 4271 return err; 4272 } 4273 4274 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, 4275 struct netlink_ext_ack *extack) 4276 { 4277 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4278 struct mlxsw_sp_port *mlxsw_sp_port; 4279 unsigned int count; 4280 int max_width; 4281 u8 base_port; 4282 int offset; 4283 int i; 4284 4285 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4286 if (!mlxsw_sp_port) { 4287 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 4288 local_port); 4289 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 4290 return -EINVAL; 4291 } 4292 4293 if (!mlxsw_sp_port->split) { 4294 netdev_err(mlxsw_sp_port->dev, "Port was not split\n"); 4295 NL_SET_ERR_MSG_MOD(extack, "Port was not split"); 4296 return -EINVAL; 4297 } 4298 4299 max_width = mlxsw_core_module_max_width(mlxsw_core, 4300 mlxsw_sp_port->mapping.module); 4301 if (max_width < 0) { 4302 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n"); 4303 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module"); 4304 return max_width; 4305 } 4306 4307 count = max_width / mlxsw_sp_port->mapping.width; 4308 4309 offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width); 4310 if (WARN_ON(offset < 0)) { 4311 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n"); 4312 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset"); 4313 return -EINVAL; 4314 } 4315 4316 base_port = mlxsw_sp_port->split_base_local_port; 4317 4318 for (i = 0; i < count; i++) 4319 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 4320 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 4321 4322 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset); 4323 4324 return 0; 4325 } 4326 4327 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 4328 char *pude_pl, void *priv) 4329 { 4330 struct mlxsw_sp *mlxsw_sp = priv; 4331 struct mlxsw_sp_port *mlxsw_sp_port; 4332 enum mlxsw_reg_pude_oper_status status; 4333 u8 local_port; 4334 4335 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 4336 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4337 if (!mlxsw_sp_port) 4338 return; 4339 4340 status = mlxsw_reg_pude_oper_status_get(pude_pl); 4341 if (status == MLXSW_PORT_OPER_STATUS_UP) { 4342 netdev_info(mlxsw_sp_port->dev, "link up\n"); 4343 netif_carrier_on(mlxsw_sp_port->dev); 4344 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0); 4345 } else { 4346 netdev_info(mlxsw_sp_port->dev, "link down\n"); 4347 netif_carrier_off(mlxsw_sp_port->dev); 4348 } 4349 } 4350 4351 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp, 4352 char *mtpptr_pl, bool ingress) 4353 { 4354 u8 local_port; 4355 u8 num_rec; 4356 int i; 4357 4358 local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl); 4359 num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl); 4360 for (i = 0; i < num_rec; i++) { 4361 u8 domain_number; 4362 u8 message_type; 4363 u16 sequence_id; 4364 u64 timestamp; 4365 4366 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type, 4367 &domain_number, &sequence_id, 4368 ×tamp); 4369 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port, 4370 message_type, domain_number, 4371 sequence_id, timestamp); 4372 } 4373 } 4374 4375 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg, 4376 char *mtpptr_pl, void *priv) 4377 { 4378 struct mlxsw_sp *mlxsw_sp = priv; 4379 4380 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true); 4381 } 4382 4383 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg, 4384 char *mtpptr_pl, void *priv) 4385 { 4386 struct mlxsw_sp *mlxsw_sp = priv; 4387 4388 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false); 4389 } 4390 4391 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 4392 u8 local_port, void *priv) 4393 { 4394 struct mlxsw_sp *mlxsw_sp = priv; 4395 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4396 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 4397 4398 if (unlikely(!mlxsw_sp_port)) { 4399 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 4400 local_port); 4401 return; 4402 } 4403 4404 skb->dev = mlxsw_sp_port->dev; 4405 4406 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 4407 u64_stats_update_begin(&pcpu_stats->syncp); 4408 pcpu_stats->rx_packets++; 4409 pcpu_stats->rx_bytes += skb->len; 4410 u64_stats_update_end(&pcpu_stats->syncp); 4411 4412 skb->protocol = eth_type_trans(skb, skb->dev); 4413 netif_receive_skb(skb); 4414 } 4415 4416 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, 4417 void *priv) 4418 { 4419 skb->offload_fwd_mark = 1; 4420 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 4421 } 4422 4423 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, 4424 u8 local_port, void *priv) 4425 { 4426 skb->offload_l3_fwd_mark = 1; 4427 skb->offload_fwd_mark = 1; 4428 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 4429 } 4430 4431 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port, 4432 void *priv) 4433 { 4434 struct mlxsw_sp *mlxsw_sp = priv; 4435 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4436 struct psample_group *psample_group; 4437 u32 size; 4438 4439 if (unlikely(!mlxsw_sp_port)) { 4440 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n", 4441 local_port); 4442 goto out; 4443 } 4444 if (unlikely(!mlxsw_sp_port->sample)) { 4445 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n", 4446 local_port); 4447 goto out; 4448 } 4449 4450 size = mlxsw_sp_port->sample->truncate ? 4451 mlxsw_sp_port->sample->trunc_size : skb->len; 4452 4453 rcu_read_lock(); 4454 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group); 4455 if (!psample_group) 4456 goto out_unlock; 4457 psample_sample_packet(psample_group, skb, size, 4458 mlxsw_sp_port->dev->ifindex, 0, 4459 mlxsw_sp_port->sample->rate); 4460 out_unlock: 4461 rcu_read_unlock(); 4462 out: 4463 consume_skb(skb); 4464 } 4465 4466 static void mlxsw_sp_rx_listener_ptp(struct sk_buff *skb, u8 local_port, 4467 void *priv) 4468 { 4469 struct mlxsw_sp *mlxsw_sp = priv; 4470 4471 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port); 4472 } 4473 4474 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4475 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 4476 _is_ctrl, SP_##_trap_group, DISCARD) 4477 4478 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4479 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 4480 _is_ctrl, SP_##_trap_group, DISCARD) 4481 4482 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4483 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \ 4484 _is_ctrl, SP_##_trap_group, DISCARD) 4485 4486 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 4487 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 4488 4489 static const struct mlxsw_listener mlxsw_sp_listener[] = { 4490 /* Events */ 4491 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 4492 /* L2 traps */ 4493 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true), 4494 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true), 4495 MLXSW_RXL(mlxsw_sp_rx_listener_ptp, LLDP, TRAP_TO_CPU, 4496 false, SP_LLDP, DISCARD), 4497 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false), 4498 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false), 4499 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false), 4500 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false), 4501 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false), 4502 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false), 4503 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false), 4504 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false), 4505 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false), 4506 MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD, 4507 false), 4508 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 4509 false), 4510 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD, 4511 false), 4512 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 4513 false), 4514 /* L3 traps */ 4515 MLXSW_SP_RXL_L3_MARK(LBERROR, MIRROR_TO_CPU, LBERROR, false), 4516 MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), 4517 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 4518 false), 4519 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false), 4520 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 4521 MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false), 4522 MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP, 4523 false), 4524 MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false), 4525 MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false), 4526 MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false), 4527 MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), 4528 MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false), 4529 MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false), 4530 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 4531 false), 4532 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 4533 false), 4534 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 4535 false), 4536 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 4537 false), 4538 MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false), 4539 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 4540 false), 4541 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), 4542 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), 4543 MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false), 4544 MLXSW_SP_RXL_MARK(DECAP_ECN0, TRAP_TO_CPU, ROUTER_EXP, false), 4545 MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), 4546 MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), 4547 /* PKT Sample trap */ 4548 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, 4549 false, SP_IP2ME, DISCARD), 4550 /* ACL trap */ 4551 MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false), 4552 /* Multicast Router Traps */ 4553 MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false), 4554 MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false), 4555 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 4556 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 4557 /* NVE traps */ 4558 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false), 4559 MLXSW_SP_RXL_NO_MARK(NVE_DECAP_ARP, TRAP_TO_CPU, ARP, false), 4560 /* PTP traps */ 4561 MLXSW_RXL(mlxsw_sp_rx_listener_ptp, PTP0, TRAP_TO_CPU, 4562 false, SP_PTP0, DISCARD), 4563 MLXSW_SP_RXL_NO_MARK(PTP1, TRAP_TO_CPU, PTP1, false), 4564 }; 4565 4566 static const struct mlxsw_listener mlxsw_sp1_listener[] = { 4567 /* Events */ 4568 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0), 4569 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0), 4570 }; 4571 4572 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 4573 { 4574 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 4575 enum mlxsw_reg_qpcr_ir_units ir_units; 4576 int max_cpu_policers; 4577 bool is_bytes; 4578 u8 burst_size; 4579 u32 rate; 4580 int i, err; 4581 4582 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 4583 return -EIO; 4584 4585 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 4586 4587 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 4588 for (i = 0; i < max_cpu_policers; i++) { 4589 is_bytes = false; 4590 switch (i) { 4591 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 4592 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 4593 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 4594 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 4595 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 4596 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 4597 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: 4598 rate = 128; 4599 burst_size = 7; 4600 break; 4601 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 4602 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 4603 rate = 16 * 1024; 4604 burst_size = 10; 4605 break; 4606 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 4607 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 4608 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 4609 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 4610 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 4611 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 4612 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 4613 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 4614 rate = 1024; 4615 burst_size = 7; 4616 break; 4617 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 4618 rate = 1024; 4619 burst_size = 7; 4620 break; 4621 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0: 4622 rate = 24 * 1024; 4623 burst_size = 12; 4624 break; 4625 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1: 4626 rate = 19 * 1024; 4627 burst_size = 12; 4628 break; 4629 default: 4630 continue; 4631 } 4632 4633 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 4634 burst_size); 4635 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 4636 if (err) 4637 return err; 4638 } 4639 4640 return 0; 4641 } 4642 4643 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 4644 { 4645 char htgt_pl[MLXSW_REG_HTGT_LEN]; 4646 enum mlxsw_reg_htgt_trap_group i; 4647 int max_cpu_policers; 4648 int max_trap_groups; 4649 u8 priority, tc; 4650 u16 policer_id; 4651 int err; 4652 4653 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 4654 return -EIO; 4655 4656 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 4657 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 4658 4659 for (i = 0; i < max_trap_groups; i++) { 4660 policer_id = i; 4661 switch (i) { 4662 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 4663 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 4664 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 4665 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 4666 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 4667 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0: 4668 priority = 5; 4669 tc = 5; 4670 break; 4671 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 4672 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 4673 priority = 4; 4674 tc = 4; 4675 break; 4676 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 4677 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 4678 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 4679 priority = 3; 4680 tc = 3; 4681 break; 4682 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 4683 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 4684 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 4685 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1: 4686 priority = 2; 4687 tc = 2; 4688 break; 4689 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 4690 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 4691 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 4692 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 4693 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: 4694 priority = 1; 4695 tc = 1; 4696 break; 4697 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 4698 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 4699 tc = MLXSW_REG_HTGT_DEFAULT_TC; 4700 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 4701 break; 4702 default: 4703 continue; 4704 } 4705 4706 if (max_cpu_policers <= policer_id && 4707 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 4708 return -EIO; 4709 4710 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 4711 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 4712 if (err) 4713 return err; 4714 } 4715 4716 return 0; 4717 } 4718 4719 static int mlxsw_sp_traps_register(struct mlxsw_sp *mlxsw_sp, 4720 const struct mlxsw_listener listeners[], 4721 size_t listeners_count) 4722 { 4723 int i; 4724 int err; 4725 4726 for (i = 0; i < listeners_count; i++) { 4727 err = mlxsw_core_trap_register(mlxsw_sp->core, 4728 &listeners[i], 4729 mlxsw_sp); 4730 if (err) 4731 goto err_listener_register; 4732 4733 } 4734 return 0; 4735 4736 err_listener_register: 4737 for (i--; i >= 0; i--) { 4738 mlxsw_core_trap_unregister(mlxsw_sp->core, 4739 &listeners[i], 4740 mlxsw_sp); 4741 } 4742 return err; 4743 } 4744 4745 static void mlxsw_sp_traps_unregister(struct mlxsw_sp *mlxsw_sp, 4746 const struct mlxsw_listener listeners[], 4747 size_t listeners_count) 4748 { 4749 int i; 4750 4751 for (i = 0; i < listeners_count; i++) { 4752 mlxsw_core_trap_unregister(mlxsw_sp->core, 4753 &listeners[i], 4754 mlxsw_sp); 4755 } 4756 } 4757 4758 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 4759 { 4760 int err; 4761 4762 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 4763 if (err) 4764 return err; 4765 4766 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 4767 if (err) 4768 return err; 4769 4770 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp_listener, 4771 ARRAY_SIZE(mlxsw_sp_listener)); 4772 if (err) 4773 return err; 4774 4775 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp->listeners, 4776 mlxsw_sp->listeners_count); 4777 if (err) 4778 goto err_extra_traps_init; 4779 4780 return 0; 4781 4782 err_extra_traps_init: 4783 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 4784 ARRAY_SIZE(mlxsw_sp_listener)); 4785 return err; 4786 } 4787 4788 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 4789 { 4790 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp->listeners, 4791 mlxsw_sp->listeners_count); 4792 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 4793 ARRAY_SIZE(mlxsw_sp_listener)); 4794 } 4795 4796 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe 4797 4798 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 4799 { 4800 char slcr_pl[MLXSW_REG_SLCR_LEN]; 4801 u32 seed; 4802 int err; 4803 4804 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 4805 MLXSW_SP_LAG_SEED_INIT); 4806 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 4807 MLXSW_REG_SLCR_LAG_HASH_DMAC | 4808 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 4809 MLXSW_REG_SLCR_LAG_HASH_VLANID | 4810 MLXSW_REG_SLCR_LAG_HASH_SIP | 4811 MLXSW_REG_SLCR_LAG_HASH_DIP | 4812 MLXSW_REG_SLCR_LAG_HASH_SPORT | 4813 MLXSW_REG_SLCR_LAG_HASH_DPORT | 4814 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed); 4815 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 4816 if (err) 4817 return err; 4818 4819 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 4820 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 4821 return -EIO; 4822 4823 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 4824 sizeof(struct mlxsw_sp_upper), 4825 GFP_KERNEL); 4826 if (!mlxsw_sp->lags) 4827 return -ENOMEM; 4828 4829 return 0; 4830 } 4831 4832 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 4833 { 4834 kfree(mlxsw_sp->lags); 4835 } 4836 4837 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) 4838 { 4839 char htgt_pl[MLXSW_REG_HTGT_LEN]; 4840 4841 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 4842 MLXSW_REG_HTGT_INVALID_POLICER, 4843 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 4844 MLXSW_REG_HTGT_DEFAULT_TC); 4845 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 4846 } 4847 4848 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = { 4849 .clock_init = mlxsw_sp1_ptp_clock_init, 4850 .clock_fini = mlxsw_sp1_ptp_clock_fini, 4851 .init = mlxsw_sp1_ptp_init, 4852 .fini = mlxsw_sp1_ptp_fini, 4853 .receive = mlxsw_sp1_ptp_receive, 4854 .transmitted = mlxsw_sp1_ptp_transmitted, 4855 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get, 4856 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set, 4857 .shaper_work = mlxsw_sp1_ptp_shaper_work, 4858 .get_ts_info = mlxsw_sp1_ptp_get_ts_info, 4859 .get_stats_count = mlxsw_sp1_get_stats_count, 4860 .get_stats_strings = mlxsw_sp1_get_stats_strings, 4861 .get_stats = mlxsw_sp1_get_stats, 4862 }; 4863 4864 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { 4865 .clock_init = mlxsw_sp2_ptp_clock_init, 4866 .clock_fini = mlxsw_sp2_ptp_clock_fini, 4867 .init = mlxsw_sp2_ptp_init, 4868 .fini = mlxsw_sp2_ptp_fini, 4869 .receive = mlxsw_sp2_ptp_receive, 4870 .transmitted = mlxsw_sp2_ptp_transmitted, 4871 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get, 4872 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set, 4873 .shaper_work = mlxsw_sp2_ptp_shaper_work, 4874 .get_ts_info = mlxsw_sp2_ptp_get_ts_info, 4875 .get_stats_count = mlxsw_sp2_get_stats_count, 4876 .get_stats_strings = mlxsw_sp2_get_stats_strings, 4877 .get_stats = mlxsw_sp2_get_stats, 4878 }; 4879 4880 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 4881 unsigned long event, void *ptr); 4882 4883 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 4884 const struct mlxsw_bus_info *mlxsw_bus_info, 4885 struct netlink_ext_ack *extack) 4886 { 4887 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4888 int err; 4889 4890 mlxsw_sp->core = mlxsw_core; 4891 mlxsw_sp->bus_info = mlxsw_bus_info; 4892 4893 err = mlxsw_sp_fw_rev_validate(mlxsw_sp); 4894 if (err) 4895 return err; 4896 4897 mlxsw_core_emad_string_tlv_enable(mlxsw_core); 4898 4899 err = mlxsw_sp_base_mac_get(mlxsw_sp); 4900 if (err) { 4901 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 4902 return err; 4903 } 4904 4905 err = mlxsw_sp_kvdl_init(mlxsw_sp); 4906 if (err) { 4907 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 4908 return err; 4909 } 4910 4911 err = mlxsw_sp_fids_init(mlxsw_sp); 4912 if (err) { 4913 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 4914 goto err_fids_init; 4915 } 4916 4917 err = mlxsw_sp_traps_init(mlxsw_sp); 4918 if (err) { 4919 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 4920 goto err_traps_init; 4921 } 4922 4923 err = mlxsw_sp_devlink_traps_init(mlxsw_sp); 4924 if (err) { 4925 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n"); 4926 goto err_devlink_traps_init; 4927 } 4928 4929 err = mlxsw_sp_buffers_init(mlxsw_sp); 4930 if (err) { 4931 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 4932 goto err_buffers_init; 4933 } 4934 4935 err = mlxsw_sp_lag_init(mlxsw_sp); 4936 if (err) { 4937 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 4938 goto err_lag_init; 4939 } 4940 4941 /* Initialize SPAN before router and switchdev, so that those components 4942 * can call mlxsw_sp_span_respin(). 4943 */ 4944 err = mlxsw_sp_span_init(mlxsw_sp); 4945 if (err) { 4946 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 4947 goto err_span_init; 4948 } 4949 4950 err = mlxsw_sp_switchdev_init(mlxsw_sp); 4951 if (err) { 4952 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 4953 goto err_switchdev_init; 4954 } 4955 4956 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 4957 if (err) { 4958 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 4959 goto err_counter_pool_init; 4960 } 4961 4962 err = mlxsw_sp_afa_init(mlxsw_sp); 4963 if (err) { 4964 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 4965 goto err_afa_init; 4966 } 4967 4968 err = mlxsw_sp_nve_init(mlxsw_sp); 4969 if (err) { 4970 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n"); 4971 goto err_nve_init; 4972 } 4973 4974 err = mlxsw_sp_acl_init(mlxsw_sp); 4975 if (err) { 4976 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 4977 goto err_acl_init; 4978 } 4979 4980 err = mlxsw_sp_router_init(mlxsw_sp, extack); 4981 if (err) { 4982 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 4983 goto err_router_init; 4984 } 4985 4986 if (mlxsw_sp->bus_info->read_frc_capable) { 4987 /* NULL is a valid return value from clock_init */ 4988 mlxsw_sp->clock = 4989 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp, 4990 mlxsw_sp->bus_info->dev); 4991 if (IS_ERR(mlxsw_sp->clock)) { 4992 err = PTR_ERR(mlxsw_sp->clock); 4993 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n"); 4994 goto err_ptp_clock_init; 4995 } 4996 } 4997 4998 if (mlxsw_sp->clock) { 4999 /* NULL is a valid return value from ptp_ops->init */ 5000 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp); 5001 if (IS_ERR(mlxsw_sp->ptp_state)) { 5002 err = PTR_ERR(mlxsw_sp->ptp_state); 5003 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n"); 5004 goto err_ptp_init; 5005 } 5006 } 5007 5008 /* Initialize netdevice notifier after router and SPAN is initialized, 5009 * so that the event handler can use router structures and call SPAN 5010 * respin. 5011 */ 5012 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 5013 err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 5014 &mlxsw_sp->netdevice_nb); 5015 if (err) { 5016 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 5017 goto err_netdev_notifier; 5018 } 5019 5020 err = mlxsw_sp_dpipe_init(mlxsw_sp); 5021 if (err) { 5022 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 5023 goto err_dpipe_init; 5024 } 5025 5026 err = mlxsw_sp_port_module_info_init(mlxsw_sp); 5027 if (err) { 5028 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n"); 5029 goto err_port_module_info_init; 5030 } 5031 5032 err = mlxsw_sp_ports_create(mlxsw_sp); 5033 if (err) { 5034 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 5035 goto err_ports_create; 5036 } 5037 5038 return 0; 5039 5040 err_ports_create: 5041 mlxsw_sp_port_module_info_fini(mlxsw_sp); 5042 err_port_module_info_init: 5043 mlxsw_sp_dpipe_fini(mlxsw_sp); 5044 err_dpipe_init: 5045 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 5046 &mlxsw_sp->netdevice_nb); 5047 err_netdev_notifier: 5048 if (mlxsw_sp->clock) 5049 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 5050 err_ptp_init: 5051 if (mlxsw_sp->clock) 5052 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 5053 err_ptp_clock_init: 5054 mlxsw_sp_router_fini(mlxsw_sp); 5055 err_router_init: 5056 mlxsw_sp_acl_fini(mlxsw_sp); 5057 err_acl_init: 5058 mlxsw_sp_nve_fini(mlxsw_sp); 5059 err_nve_init: 5060 mlxsw_sp_afa_fini(mlxsw_sp); 5061 err_afa_init: 5062 mlxsw_sp_counter_pool_fini(mlxsw_sp); 5063 err_counter_pool_init: 5064 mlxsw_sp_switchdev_fini(mlxsw_sp); 5065 err_switchdev_init: 5066 mlxsw_sp_span_fini(mlxsw_sp); 5067 err_span_init: 5068 mlxsw_sp_lag_fini(mlxsw_sp); 5069 err_lag_init: 5070 mlxsw_sp_buffers_fini(mlxsw_sp); 5071 err_buffers_init: 5072 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 5073 err_devlink_traps_init: 5074 mlxsw_sp_traps_fini(mlxsw_sp); 5075 err_traps_init: 5076 mlxsw_sp_fids_fini(mlxsw_sp); 5077 err_fids_init: 5078 mlxsw_sp_kvdl_fini(mlxsw_sp); 5079 return err; 5080 } 5081 5082 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, 5083 const struct mlxsw_bus_info *mlxsw_bus_info, 5084 struct netlink_ext_ack *extack) 5085 { 5086 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5087 5088 mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev; 5089 mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME; 5090 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; 5091 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; 5092 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; 5093 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; 5094 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; 5095 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; 5096 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; 5097 mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr; 5098 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals; 5099 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops; 5100 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops; 5101 mlxsw_sp->listeners = mlxsw_sp1_listener; 5102 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener); 5103 5104 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 5105 } 5106 5107 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, 5108 const struct mlxsw_bus_info *mlxsw_bus_info, 5109 struct netlink_ext_ack *extack) 5110 { 5111 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5112 5113 mlxsw_sp->req_rev = &mlxsw_sp2_fw_rev; 5114 mlxsw_sp->fw_filename = MLXSW_SP2_FW_FILENAME; 5115 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 5116 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 5117 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 5118 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 5119 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 5120 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 5121 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 5122 mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; 5123 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 5124 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 5125 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 5126 5127 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 5128 } 5129 5130 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 5131 { 5132 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5133 5134 mlxsw_sp_ports_remove(mlxsw_sp); 5135 mlxsw_sp_port_module_info_fini(mlxsw_sp); 5136 mlxsw_sp_dpipe_fini(mlxsw_sp); 5137 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 5138 &mlxsw_sp->netdevice_nb); 5139 if (mlxsw_sp->clock) { 5140 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 5141 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 5142 } 5143 mlxsw_sp_router_fini(mlxsw_sp); 5144 mlxsw_sp_acl_fini(mlxsw_sp); 5145 mlxsw_sp_nve_fini(mlxsw_sp); 5146 mlxsw_sp_afa_fini(mlxsw_sp); 5147 mlxsw_sp_counter_pool_fini(mlxsw_sp); 5148 mlxsw_sp_switchdev_fini(mlxsw_sp); 5149 mlxsw_sp_span_fini(mlxsw_sp); 5150 mlxsw_sp_lag_fini(mlxsw_sp); 5151 mlxsw_sp_buffers_fini(mlxsw_sp); 5152 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 5153 mlxsw_sp_traps_fini(mlxsw_sp); 5154 mlxsw_sp_fids_fini(mlxsw_sp); 5155 mlxsw_sp_kvdl_fini(mlxsw_sp); 5156 } 5157 5158 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated 5159 * 802.1Q FIDs 5160 */ 5161 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \ 5162 VLAN_VID_MASK - 1) 5163 5164 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { 5165 .used_max_mid = 1, 5166 .max_mid = MLXSW_SP_MID_MAX, 5167 .used_flood_tables = 1, 5168 .used_flood_mode = 1, 5169 .flood_mode = 3, 5170 .max_fid_flood_tables = 3, 5171 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 5172 .used_max_ib_mc = 1, 5173 .max_ib_mc = 0, 5174 .used_max_pkey = 1, 5175 .max_pkey = 0, 5176 .used_kvd_sizes = 1, 5177 .kvd_hash_single_parts = 59, 5178 .kvd_hash_double_parts = 41, 5179 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 5180 .swid_config = { 5181 { 5182 .used_type = 1, 5183 .type = MLXSW_PORT_SWID_TYPE_ETH, 5184 } 5185 }, 5186 }; 5187 5188 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { 5189 .used_max_mid = 1, 5190 .max_mid = MLXSW_SP_MID_MAX, 5191 .used_flood_tables = 1, 5192 .used_flood_mode = 1, 5193 .flood_mode = 3, 5194 .max_fid_flood_tables = 3, 5195 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 5196 .used_max_ib_mc = 1, 5197 .max_ib_mc = 0, 5198 .used_max_pkey = 1, 5199 .max_pkey = 0, 5200 .swid_config = { 5201 { 5202 .used_type = 1, 5203 .type = MLXSW_PORT_SWID_TYPE_ETH, 5204 } 5205 }, 5206 }; 5207 5208 static void 5209 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, 5210 struct devlink_resource_size_params *kvd_size_params, 5211 struct devlink_resource_size_params *linear_size_params, 5212 struct devlink_resource_size_params *hash_double_size_params, 5213 struct devlink_resource_size_params *hash_single_size_params) 5214 { 5215 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 5216 KVD_SINGLE_MIN_SIZE); 5217 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 5218 KVD_DOUBLE_MIN_SIZE); 5219 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 5220 u32 linear_size_min = 0; 5221 5222 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, 5223 MLXSW_SP_KVD_GRANULARITY, 5224 DEVLINK_RESOURCE_UNIT_ENTRY); 5225 devlink_resource_size_params_init(linear_size_params, linear_size_min, 5226 kvd_size - single_size_min - 5227 double_size_min, 5228 MLXSW_SP_KVD_GRANULARITY, 5229 DEVLINK_RESOURCE_UNIT_ENTRY); 5230 devlink_resource_size_params_init(hash_double_size_params, 5231 double_size_min, 5232 kvd_size - single_size_min - 5233 linear_size_min, 5234 MLXSW_SP_KVD_GRANULARITY, 5235 DEVLINK_RESOURCE_UNIT_ENTRY); 5236 devlink_resource_size_params_init(hash_single_size_params, 5237 single_size_min, 5238 kvd_size - double_size_min - 5239 linear_size_min, 5240 MLXSW_SP_KVD_GRANULARITY, 5241 DEVLINK_RESOURCE_UNIT_ENTRY); 5242 } 5243 5244 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) 5245 { 5246 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5247 struct devlink_resource_size_params hash_single_size_params; 5248 struct devlink_resource_size_params hash_double_size_params; 5249 struct devlink_resource_size_params linear_size_params; 5250 struct devlink_resource_size_params kvd_size_params; 5251 u32 kvd_size, single_size, double_size, linear_size; 5252 const struct mlxsw_config_profile *profile; 5253 int err; 5254 5255 profile = &mlxsw_sp1_config_profile; 5256 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 5257 return -EIO; 5258 5259 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, 5260 &linear_size_params, 5261 &hash_double_size_params, 5262 &hash_single_size_params); 5263 5264 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 5265 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 5266 kvd_size, MLXSW_SP_RESOURCE_KVD, 5267 DEVLINK_RESOURCE_ID_PARENT_TOP, 5268 &kvd_size_params); 5269 if (err) 5270 return err; 5271 5272 linear_size = profile->kvd_linear_size; 5273 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, 5274 linear_size, 5275 MLXSW_SP_RESOURCE_KVD_LINEAR, 5276 MLXSW_SP_RESOURCE_KVD, 5277 &linear_size_params); 5278 if (err) 5279 return err; 5280 5281 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); 5282 if (err) 5283 return err; 5284 5285 double_size = kvd_size - linear_size; 5286 double_size *= profile->kvd_hash_double_parts; 5287 double_size /= profile->kvd_hash_double_parts + 5288 profile->kvd_hash_single_parts; 5289 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY); 5290 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, 5291 double_size, 5292 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 5293 MLXSW_SP_RESOURCE_KVD, 5294 &hash_double_size_params); 5295 if (err) 5296 return err; 5297 5298 single_size = kvd_size - double_size - linear_size; 5299 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, 5300 single_size, 5301 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 5302 MLXSW_SP_RESOURCE_KVD, 5303 &hash_single_size_params); 5304 if (err) 5305 return err; 5306 5307 return 0; 5308 } 5309 5310 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core) 5311 { 5312 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5313 struct devlink_resource_size_params kvd_size_params; 5314 u32 kvd_size; 5315 5316 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 5317 return -EIO; 5318 5319 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 5320 devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size, 5321 MLXSW_SP_KVD_GRANULARITY, 5322 DEVLINK_RESOURCE_UNIT_ENTRY); 5323 5324 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 5325 kvd_size, MLXSW_SP_RESOURCE_KVD, 5326 DEVLINK_RESOURCE_ID_PARENT_TOP, 5327 &kvd_size_params); 5328 } 5329 5330 static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core) 5331 { 5332 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5333 struct devlink_resource_size_params span_size_params; 5334 u32 max_span; 5335 5336 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN)) 5337 return -EIO; 5338 5339 max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN); 5340 devlink_resource_size_params_init(&span_size_params, max_span, max_span, 5341 1, DEVLINK_RESOURCE_UNIT_ENTRY); 5342 5343 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN, 5344 max_span, MLXSW_SP_RESOURCE_SPAN, 5345 DEVLINK_RESOURCE_ID_PARENT_TOP, 5346 &span_size_params); 5347 } 5348 5349 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) 5350 { 5351 int err; 5352 5353 err = mlxsw_sp1_resources_kvd_register(mlxsw_core); 5354 if (err) 5355 return err; 5356 5357 err = mlxsw_sp_resources_span_register(mlxsw_core); 5358 if (err) 5359 goto err_resources_span_register; 5360 5361 return 0; 5362 5363 err_resources_span_register: 5364 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL); 5365 return err; 5366 } 5367 5368 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) 5369 { 5370 int err; 5371 5372 err = mlxsw_sp2_resources_kvd_register(mlxsw_core); 5373 if (err) 5374 return err; 5375 5376 err = mlxsw_sp_resources_span_register(mlxsw_core); 5377 if (err) 5378 goto err_resources_span_register; 5379 5380 return 0; 5381 5382 err_resources_span_register: 5383 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL); 5384 return err; 5385 } 5386 5387 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 5388 const struct mlxsw_config_profile *profile, 5389 u64 *p_single_size, u64 *p_double_size, 5390 u64 *p_linear_size) 5391 { 5392 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5393 u32 double_size; 5394 int err; 5395 5396 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 5397 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) 5398 return -EIO; 5399 5400 /* The hash part is what left of the kvd without the 5401 * linear part. It is split to the single size and 5402 * double size by the parts ratio from the profile. 5403 * Both sizes must be a multiplications of the 5404 * granularity from the profile. In case the user 5405 * provided the sizes they are obtained via devlink. 5406 */ 5407 err = devlink_resource_size_get(devlink, 5408 MLXSW_SP_RESOURCE_KVD_LINEAR, 5409 p_linear_size); 5410 if (err) 5411 *p_linear_size = profile->kvd_linear_size; 5412 5413 err = devlink_resource_size_get(devlink, 5414 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 5415 p_double_size); 5416 if (err) { 5417 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 5418 *p_linear_size; 5419 double_size *= profile->kvd_hash_double_parts; 5420 double_size /= profile->kvd_hash_double_parts + 5421 profile->kvd_hash_single_parts; 5422 *p_double_size = rounddown(double_size, 5423 MLXSW_SP_KVD_GRANULARITY); 5424 } 5425 5426 err = devlink_resource_size_get(devlink, 5427 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 5428 p_single_size); 5429 if (err) 5430 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 5431 *p_double_size - *p_linear_size; 5432 5433 /* Check results are legal. */ 5434 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 5435 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || 5436 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size) 5437 return -EIO; 5438 5439 return 0; 5440 } 5441 5442 static int 5443 mlxsw_sp_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id, 5444 union devlink_param_value val, 5445 struct netlink_ext_ack *extack) 5446 { 5447 if ((val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER) && 5448 (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)) { 5449 NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'"); 5450 return -EINVAL; 5451 } 5452 5453 return 0; 5454 } 5455 5456 static const struct devlink_param mlxsw_sp_devlink_params[] = { 5457 DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY, 5458 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 5459 NULL, NULL, 5460 mlxsw_sp_devlink_param_fw_load_policy_validate), 5461 }; 5462 5463 static int mlxsw_sp_params_register(struct mlxsw_core *mlxsw_core) 5464 { 5465 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5466 union devlink_param_value value; 5467 int err; 5468 5469 err = devlink_params_register(devlink, mlxsw_sp_devlink_params, 5470 ARRAY_SIZE(mlxsw_sp_devlink_params)); 5471 if (err) 5472 return err; 5473 5474 value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER; 5475 devlink_param_driverinit_value_set(devlink, 5476 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 5477 value); 5478 return 0; 5479 } 5480 5481 static void mlxsw_sp_params_unregister(struct mlxsw_core *mlxsw_core) 5482 { 5483 devlink_params_unregister(priv_to_devlink(mlxsw_core), 5484 mlxsw_sp_devlink_params, 5485 ARRAY_SIZE(mlxsw_sp_devlink_params)); 5486 } 5487 5488 static int 5489 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id, 5490 struct devlink_param_gset_ctx *ctx) 5491 { 5492 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 5493 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5494 5495 ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp); 5496 return 0; 5497 } 5498 5499 static int 5500 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id, 5501 struct devlink_param_gset_ctx *ctx) 5502 { 5503 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 5504 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5505 5506 return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32); 5507 } 5508 5509 static const struct devlink_param mlxsw_sp2_devlink_params[] = { 5510 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 5511 "acl_region_rehash_interval", 5512 DEVLINK_PARAM_TYPE_U32, 5513 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 5514 mlxsw_sp_params_acl_region_rehash_intrvl_get, 5515 mlxsw_sp_params_acl_region_rehash_intrvl_set, 5516 NULL), 5517 }; 5518 5519 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core) 5520 { 5521 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5522 union devlink_param_value value; 5523 int err; 5524 5525 err = mlxsw_sp_params_register(mlxsw_core); 5526 if (err) 5527 return err; 5528 5529 err = devlink_params_register(devlink, mlxsw_sp2_devlink_params, 5530 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 5531 if (err) 5532 goto err_devlink_params_register; 5533 5534 value.vu32 = 0; 5535 devlink_param_driverinit_value_set(devlink, 5536 MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 5537 value); 5538 return 0; 5539 5540 err_devlink_params_register: 5541 mlxsw_sp_params_unregister(mlxsw_core); 5542 return err; 5543 } 5544 5545 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core) 5546 { 5547 devlink_params_unregister(priv_to_devlink(mlxsw_core), 5548 mlxsw_sp2_devlink_params, 5549 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 5550 mlxsw_sp_params_unregister(mlxsw_core); 5551 } 5552 5553 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core, 5554 struct sk_buff *skb, u8 local_port) 5555 { 5556 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5557 5558 skb_pull(skb, MLXSW_TXHDR_LEN); 5559 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port); 5560 } 5561 5562 static struct mlxsw_driver mlxsw_sp1_driver = { 5563 .kind = mlxsw_sp1_driver_name, 5564 .priv_size = sizeof(struct mlxsw_sp), 5565 .init = mlxsw_sp1_init, 5566 .fini = mlxsw_sp_fini, 5567 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5568 .port_split = mlxsw_sp_port_split, 5569 .port_unsplit = mlxsw_sp_port_unsplit, 5570 .sb_pool_get = mlxsw_sp_sb_pool_get, 5571 .sb_pool_set = mlxsw_sp_sb_pool_set, 5572 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5573 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5574 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5575 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5576 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5577 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5578 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5579 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5580 .flash_update = mlxsw_sp_flash_update, 5581 .trap_init = mlxsw_sp_trap_init, 5582 .trap_fini = mlxsw_sp_trap_fini, 5583 .trap_action_set = mlxsw_sp_trap_action_set, 5584 .trap_group_init = mlxsw_sp_trap_group_init, 5585 .txhdr_construct = mlxsw_sp_txhdr_construct, 5586 .resources_register = mlxsw_sp1_resources_register, 5587 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, 5588 .params_register = mlxsw_sp_params_register, 5589 .params_unregister = mlxsw_sp_params_unregister, 5590 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5591 .txhdr_len = MLXSW_TXHDR_LEN, 5592 .profile = &mlxsw_sp1_config_profile, 5593 .res_query_enabled = true, 5594 }; 5595 5596 static struct mlxsw_driver mlxsw_sp2_driver = { 5597 .kind = mlxsw_sp2_driver_name, 5598 .priv_size = sizeof(struct mlxsw_sp), 5599 .init = mlxsw_sp2_init, 5600 .fini = mlxsw_sp_fini, 5601 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5602 .port_split = mlxsw_sp_port_split, 5603 .port_unsplit = mlxsw_sp_port_unsplit, 5604 .sb_pool_get = mlxsw_sp_sb_pool_get, 5605 .sb_pool_set = mlxsw_sp_sb_pool_set, 5606 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5607 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5608 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5609 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5610 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5611 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5612 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5613 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5614 .flash_update = mlxsw_sp_flash_update, 5615 .trap_init = mlxsw_sp_trap_init, 5616 .trap_fini = mlxsw_sp_trap_fini, 5617 .trap_action_set = mlxsw_sp_trap_action_set, 5618 .trap_group_init = mlxsw_sp_trap_group_init, 5619 .txhdr_construct = mlxsw_sp_txhdr_construct, 5620 .resources_register = mlxsw_sp2_resources_register, 5621 .params_register = mlxsw_sp2_params_register, 5622 .params_unregister = mlxsw_sp2_params_unregister, 5623 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5624 .txhdr_len = MLXSW_TXHDR_LEN, 5625 .profile = &mlxsw_sp2_config_profile, 5626 .res_query_enabled = true, 5627 }; 5628 5629 static struct mlxsw_driver mlxsw_sp3_driver = { 5630 .kind = mlxsw_sp3_driver_name, 5631 .priv_size = sizeof(struct mlxsw_sp), 5632 .init = mlxsw_sp2_init, 5633 .fini = mlxsw_sp_fini, 5634 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5635 .port_split = mlxsw_sp_port_split, 5636 .port_unsplit = mlxsw_sp_port_unsplit, 5637 .sb_pool_get = mlxsw_sp_sb_pool_get, 5638 .sb_pool_set = mlxsw_sp_sb_pool_set, 5639 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5640 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5641 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5642 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5643 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5644 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5645 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5646 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5647 .flash_update = mlxsw_sp_flash_update, 5648 .trap_init = mlxsw_sp_trap_init, 5649 .trap_fini = mlxsw_sp_trap_fini, 5650 .trap_action_set = mlxsw_sp_trap_action_set, 5651 .trap_group_init = mlxsw_sp_trap_group_init, 5652 .txhdr_construct = mlxsw_sp_txhdr_construct, 5653 .resources_register = mlxsw_sp2_resources_register, 5654 .params_register = mlxsw_sp2_params_register, 5655 .params_unregister = mlxsw_sp2_params_unregister, 5656 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5657 .txhdr_len = MLXSW_TXHDR_LEN, 5658 .profile = &mlxsw_sp2_config_profile, 5659 .res_query_enabled = true, 5660 }; 5661 5662 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 5663 { 5664 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 5665 } 5666 5667 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data) 5668 { 5669 struct mlxsw_sp_port **p_mlxsw_sp_port = data; 5670 int ret = 0; 5671 5672 if (mlxsw_sp_port_dev_check(lower_dev)) { 5673 *p_mlxsw_sp_port = netdev_priv(lower_dev); 5674 ret = 1; 5675 } 5676 5677 return ret; 5678 } 5679 5680 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 5681 { 5682 struct mlxsw_sp_port *mlxsw_sp_port; 5683 5684 if (mlxsw_sp_port_dev_check(dev)) 5685 return netdev_priv(dev); 5686 5687 mlxsw_sp_port = NULL; 5688 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port); 5689 5690 return mlxsw_sp_port; 5691 } 5692 5693 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 5694 { 5695 struct mlxsw_sp_port *mlxsw_sp_port; 5696 5697 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 5698 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 5699 } 5700 5701 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 5702 { 5703 struct mlxsw_sp_port *mlxsw_sp_port; 5704 5705 if (mlxsw_sp_port_dev_check(dev)) 5706 return netdev_priv(dev); 5707 5708 mlxsw_sp_port = NULL; 5709 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 5710 &mlxsw_sp_port); 5711 5712 return mlxsw_sp_port; 5713 } 5714 5715 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 5716 { 5717 struct mlxsw_sp_port *mlxsw_sp_port; 5718 5719 rcu_read_lock(); 5720 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 5721 if (mlxsw_sp_port) 5722 dev_hold(mlxsw_sp_port->dev); 5723 rcu_read_unlock(); 5724 return mlxsw_sp_port; 5725 } 5726 5727 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 5728 { 5729 dev_put(mlxsw_sp_port->dev); 5730 } 5731 5732 static void 5733 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port, 5734 struct net_device *lag_dev) 5735 { 5736 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev); 5737 struct net_device *upper_dev; 5738 struct list_head *iter; 5739 5740 if (netif_is_bridge_port(lag_dev)) 5741 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev); 5742 5743 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 5744 if (!netif_is_bridge_port(upper_dev)) 5745 continue; 5746 br_dev = netdev_master_upper_dev_get(upper_dev); 5747 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev); 5748 } 5749 } 5750 5751 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 5752 { 5753 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5754 5755 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 5756 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5757 } 5758 5759 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 5760 { 5761 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5762 5763 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 5764 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5765 } 5766 5767 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 5768 u16 lag_id, u8 port_index) 5769 { 5770 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5771 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5772 5773 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 5774 lag_id, port_index); 5775 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5776 } 5777 5778 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 5779 u16 lag_id) 5780 { 5781 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5782 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5783 5784 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 5785 lag_id); 5786 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5787 } 5788 5789 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 5790 u16 lag_id) 5791 { 5792 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5793 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5794 5795 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 5796 lag_id); 5797 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5798 } 5799 5800 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 5801 u16 lag_id) 5802 { 5803 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5804 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5805 5806 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 5807 lag_id); 5808 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5809 } 5810 5811 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 5812 struct net_device *lag_dev, 5813 u16 *p_lag_id) 5814 { 5815 struct mlxsw_sp_upper *lag; 5816 int free_lag_id = -1; 5817 u64 max_lag; 5818 int i; 5819 5820 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 5821 for (i = 0; i < max_lag; i++) { 5822 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 5823 if (lag->ref_count) { 5824 if (lag->dev == lag_dev) { 5825 *p_lag_id = i; 5826 return 0; 5827 } 5828 } else if (free_lag_id < 0) { 5829 free_lag_id = i; 5830 } 5831 } 5832 if (free_lag_id < 0) 5833 return -EBUSY; 5834 *p_lag_id = free_lag_id; 5835 return 0; 5836 } 5837 5838 static bool 5839 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 5840 struct net_device *lag_dev, 5841 struct netdev_lag_upper_info *lag_upper_info, 5842 struct netlink_ext_ack *extack) 5843 { 5844 u16 lag_id; 5845 5846 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 5847 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices"); 5848 return false; 5849 } 5850 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 5851 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 5852 return false; 5853 } 5854 return true; 5855 } 5856 5857 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 5858 u16 lag_id, u8 *p_port_index) 5859 { 5860 u64 max_lag_members; 5861 int i; 5862 5863 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 5864 MAX_LAG_MEMBERS); 5865 for (i = 0; i < max_lag_members; i++) { 5866 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 5867 *p_port_index = i; 5868 return 0; 5869 } 5870 } 5871 return -EBUSY; 5872 } 5873 5874 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 5875 struct net_device *lag_dev) 5876 { 5877 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5878 struct mlxsw_sp_upper *lag; 5879 u16 lag_id; 5880 u8 port_index; 5881 int err; 5882 5883 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 5884 if (err) 5885 return err; 5886 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 5887 if (!lag->ref_count) { 5888 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 5889 if (err) 5890 return err; 5891 lag->dev = lag_dev; 5892 } 5893 5894 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 5895 if (err) 5896 return err; 5897 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 5898 if (err) 5899 goto err_col_port_add; 5900 5901 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 5902 mlxsw_sp_port->local_port); 5903 mlxsw_sp_port->lag_id = lag_id; 5904 mlxsw_sp_port->lagged = 1; 5905 lag->ref_count++; 5906 5907 /* Port is no longer usable as a router interface */ 5908 if (mlxsw_sp_port->default_vlan->fid) 5909 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); 5910 5911 return 0; 5912 5913 err_col_port_add: 5914 if (!lag->ref_count) 5915 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 5916 return err; 5917 } 5918 5919 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 5920 struct net_device *lag_dev) 5921 { 5922 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5923 u16 lag_id = mlxsw_sp_port->lag_id; 5924 struct mlxsw_sp_upper *lag; 5925 5926 if (!mlxsw_sp_port->lagged) 5927 return; 5928 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 5929 WARN_ON(lag->ref_count == 0); 5930 5931 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 5932 5933 /* Any VLANs configured on the port are no longer valid */ 5934 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false); 5935 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan); 5936 /* Make the LAG and its directly linked uppers leave bridges they 5937 * are memeber in 5938 */ 5939 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev); 5940 5941 if (lag->ref_count == 1) 5942 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 5943 5944 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 5945 mlxsw_sp_port->local_port); 5946 mlxsw_sp_port->lagged = 0; 5947 lag->ref_count--; 5948 5949 /* Make sure untagged frames are allowed to ingress */ 5950 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 5951 } 5952 5953 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 5954 u16 lag_id) 5955 { 5956 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5957 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5958 5959 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 5960 mlxsw_sp_port->local_port); 5961 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5962 } 5963 5964 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 5965 u16 lag_id) 5966 { 5967 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5968 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5969 5970 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 5971 mlxsw_sp_port->local_port); 5972 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5973 } 5974 5975 static int 5976 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port) 5977 { 5978 int err; 5979 5980 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, 5981 mlxsw_sp_port->lag_id); 5982 if (err) 5983 return err; 5984 5985 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 5986 if (err) 5987 goto err_dist_port_add; 5988 5989 return 0; 5990 5991 err_dist_port_add: 5992 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id); 5993 return err; 5994 } 5995 5996 static int 5997 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port) 5998 { 5999 int err; 6000 6001 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 6002 mlxsw_sp_port->lag_id); 6003 if (err) 6004 return err; 6005 6006 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, 6007 mlxsw_sp_port->lag_id); 6008 if (err) 6009 goto err_col_port_disable; 6010 6011 return 0; 6012 6013 err_col_port_disable: 6014 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 6015 return err; 6016 } 6017 6018 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 6019 struct netdev_lag_lower_state_info *info) 6020 { 6021 if (info->tx_enabled) 6022 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port); 6023 else 6024 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 6025 } 6026 6027 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 6028 bool enable) 6029 { 6030 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 6031 enum mlxsw_reg_spms_state spms_state; 6032 char *spms_pl; 6033 u16 vid; 6034 int err; 6035 6036 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 6037 MLXSW_REG_SPMS_STATE_DISCARDING; 6038 6039 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 6040 if (!spms_pl) 6041 return -ENOMEM; 6042 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 6043 6044 for (vid = 0; vid < VLAN_N_VID; vid++) 6045 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 6046 6047 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 6048 kfree(spms_pl); 6049 return err; 6050 } 6051 6052 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 6053 { 6054 u16 vid = 1; 6055 int err; 6056 6057 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 6058 if (err) 6059 return err; 6060 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 6061 if (err) 6062 goto err_port_stp_set; 6063 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 6064 true, false); 6065 if (err) 6066 goto err_port_vlan_set; 6067 6068 for (; vid <= VLAN_N_VID - 1; vid++) { 6069 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 6070 vid, false); 6071 if (err) 6072 goto err_vid_learning_set; 6073 } 6074 6075 return 0; 6076 6077 err_vid_learning_set: 6078 for (vid--; vid >= 1; vid--) 6079 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 6080 err_port_vlan_set: 6081 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 6082 err_port_stp_set: 6083 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 6084 return err; 6085 } 6086 6087 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 6088 { 6089 u16 vid; 6090 6091 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 6092 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 6093 vid, true); 6094 6095 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 6096 false, false); 6097 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 6098 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 6099 } 6100 6101 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev) 6102 { 6103 unsigned int num_vxlans = 0; 6104 struct net_device *dev; 6105 struct list_head *iter; 6106 6107 netdev_for_each_lower_dev(br_dev, dev, iter) { 6108 if (netif_is_vxlan(dev)) 6109 num_vxlans++; 6110 } 6111 6112 return num_vxlans > 1; 6113 } 6114 6115 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev) 6116 { 6117 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0}; 6118 struct net_device *dev; 6119 struct list_head *iter; 6120 6121 netdev_for_each_lower_dev(br_dev, dev, iter) { 6122 u16 pvid; 6123 int err; 6124 6125 if (!netif_is_vxlan(dev)) 6126 continue; 6127 6128 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid); 6129 if (err || !pvid) 6130 continue; 6131 6132 if (test_and_set_bit(pvid, vlans)) 6133 return false; 6134 } 6135 6136 return true; 6137 } 6138 6139 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev, 6140 struct netlink_ext_ack *extack) 6141 { 6142 if (br_multicast_enabled(br_dev)) { 6143 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device"); 6144 return false; 6145 } 6146 6147 if (!br_vlan_enabled(br_dev) && 6148 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) { 6149 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge"); 6150 return false; 6151 } 6152 6153 if (br_vlan_enabled(br_dev) && 6154 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) { 6155 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged"); 6156 return false; 6157 } 6158 6159 return true; 6160 } 6161 6162 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 6163 struct net_device *dev, 6164 unsigned long event, void *ptr) 6165 { 6166 struct netdev_notifier_changeupper_info *info; 6167 struct mlxsw_sp_port *mlxsw_sp_port; 6168 struct netlink_ext_ack *extack; 6169 struct net_device *upper_dev; 6170 struct mlxsw_sp *mlxsw_sp; 6171 int err = 0; 6172 6173 mlxsw_sp_port = netdev_priv(dev); 6174 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 6175 info = ptr; 6176 extack = netdev_notifier_info_to_extack(&info->info); 6177 6178 switch (event) { 6179 case NETDEV_PRECHANGEUPPER: 6180 upper_dev = info->upper_dev; 6181 if (!is_vlan_dev(upper_dev) && 6182 !netif_is_lag_master(upper_dev) && 6183 !netif_is_bridge_master(upper_dev) && 6184 !netif_is_ovs_master(upper_dev) && 6185 !netif_is_macvlan(upper_dev)) { 6186 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6187 return -EINVAL; 6188 } 6189 if (!info->linking) 6190 break; 6191 if (netif_is_bridge_master(upper_dev) && 6192 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 6193 mlxsw_sp_bridge_has_vxlan(upper_dev) && 6194 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 6195 return -EOPNOTSUPP; 6196 if (netdev_has_any_upper_dev(upper_dev) && 6197 (!netif_is_bridge_master(upper_dev) || 6198 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 6199 upper_dev))) { 6200 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 6201 return -EINVAL; 6202 } 6203 if (netif_is_lag_master(upper_dev) && 6204 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 6205 info->upper_info, extack)) 6206 return -EINVAL; 6207 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 6208 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); 6209 return -EINVAL; 6210 } 6211 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 6212 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 6213 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); 6214 return -EINVAL; 6215 } 6216 if (netif_is_macvlan(upper_dev) && 6217 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, lower_dev)) { 6218 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6219 return -EOPNOTSUPP; 6220 } 6221 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 6222 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); 6223 return -EINVAL; 6224 } 6225 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 6226 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 6227 return -EINVAL; 6228 } 6229 break; 6230 case NETDEV_CHANGEUPPER: 6231 upper_dev = info->upper_dev; 6232 if (netif_is_bridge_master(upper_dev)) { 6233 if (info->linking) 6234 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 6235 lower_dev, 6236 upper_dev, 6237 extack); 6238 else 6239 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 6240 lower_dev, 6241 upper_dev); 6242 } else if (netif_is_lag_master(upper_dev)) { 6243 if (info->linking) { 6244 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 6245 upper_dev); 6246 } else { 6247 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 6248 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 6249 upper_dev); 6250 } 6251 } else if (netif_is_ovs_master(upper_dev)) { 6252 if (info->linking) 6253 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 6254 else 6255 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 6256 } else if (netif_is_macvlan(upper_dev)) { 6257 if (!info->linking) 6258 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6259 } else if (is_vlan_dev(upper_dev)) { 6260 struct net_device *br_dev; 6261 6262 if (!netif_is_bridge_port(upper_dev)) 6263 break; 6264 if (info->linking) 6265 break; 6266 br_dev = netdev_master_upper_dev_get(upper_dev); 6267 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, 6268 br_dev); 6269 } 6270 break; 6271 } 6272 6273 return err; 6274 } 6275 6276 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 6277 unsigned long event, void *ptr) 6278 { 6279 struct netdev_notifier_changelowerstate_info *info; 6280 struct mlxsw_sp_port *mlxsw_sp_port; 6281 int err; 6282 6283 mlxsw_sp_port = netdev_priv(dev); 6284 info = ptr; 6285 6286 switch (event) { 6287 case NETDEV_CHANGELOWERSTATE: 6288 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 6289 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 6290 info->lower_state_info); 6291 if (err) 6292 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 6293 } 6294 break; 6295 } 6296 6297 return 0; 6298 } 6299 6300 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 6301 struct net_device *port_dev, 6302 unsigned long event, void *ptr) 6303 { 6304 switch (event) { 6305 case NETDEV_PRECHANGEUPPER: 6306 case NETDEV_CHANGEUPPER: 6307 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 6308 event, ptr); 6309 case NETDEV_CHANGELOWERSTATE: 6310 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 6311 ptr); 6312 } 6313 6314 return 0; 6315 } 6316 6317 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 6318 unsigned long event, void *ptr) 6319 { 6320 struct net_device *dev; 6321 struct list_head *iter; 6322 int ret; 6323 6324 netdev_for_each_lower_dev(lag_dev, dev, iter) { 6325 if (mlxsw_sp_port_dev_check(dev)) { 6326 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 6327 ptr); 6328 if (ret) 6329 return ret; 6330 } 6331 } 6332 6333 return 0; 6334 } 6335 6336 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 6337 struct net_device *dev, 6338 unsigned long event, void *ptr, 6339 u16 vid) 6340 { 6341 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 6342 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 6343 struct netdev_notifier_changeupper_info *info = ptr; 6344 struct netlink_ext_ack *extack; 6345 struct net_device *upper_dev; 6346 int err = 0; 6347 6348 extack = netdev_notifier_info_to_extack(&info->info); 6349 6350 switch (event) { 6351 case NETDEV_PRECHANGEUPPER: 6352 upper_dev = info->upper_dev; 6353 if (!netif_is_bridge_master(upper_dev) && 6354 !netif_is_macvlan(upper_dev)) { 6355 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6356 return -EINVAL; 6357 } 6358 if (!info->linking) 6359 break; 6360 if (netif_is_bridge_master(upper_dev) && 6361 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 6362 mlxsw_sp_bridge_has_vxlan(upper_dev) && 6363 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 6364 return -EOPNOTSUPP; 6365 if (netdev_has_any_upper_dev(upper_dev) && 6366 (!netif_is_bridge_master(upper_dev) || 6367 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 6368 upper_dev))) { 6369 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 6370 return -EINVAL; 6371 } 6372 if (netif_is_macvlan(upper_dev) && 6373 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) { 6374 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6375 return -EOPNOTSUPP; 6376 } 6377 break; 6378 case NETDEV_CHANGEUPPER: 6379 upper_dev = info->upper_dev; 6380 if (netif_is_bridge_master(upper_dev)) { 6381 if (info->linking) 6382 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 6383 vlan_dev, 6384 upper_dev, 6385 extack); 6386 else 6387 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 6388 vlan_dev, 6389 upper_dev); 6390 } else if (netif_is_macvlan(upper_dev)) { 6391 if (!info->linking) 6392 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6393 } else { 6394 err = -EINVAL; 6395 WARN_ON(1); 6396 } 6397 break; 6398 } 6399 6400 return err; 6401 } 6402 6403 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 6404 struct net_device *lag_dev, 6405 unsigned long event, 6406 void *ptr, u16 vid) 6407 { 6408 struct net_device *dev; 6409 struct list_head *iter; 6410 int ret; 6411 6412 netdev_for_each_lower_dev(lag_dev, dev, iter) { 6413 if (mlxsw_sp_port_dev_check(dev)) { 6414 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 6415 event, ptr, 6416 vid); 6417 if (ret) 6418 return ret; 6419 } 6420 } 6421 6422 return 0; 6423 } 6424 6425 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev, 6426 struct net_device *br_dev, 6427 unsigned long event, void *ptr, 6428 u16 vid) 6429 { 6430 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); 6431 struct netdev_notifier_changeupper_info *info = ptr; 6432 struct netlink_ext_ack *extack; 6433 struct net_device *upper_dev; 6434 6435 if (!mlxsw_sp) 6436 return 0; 6437 6438 extack = netdev_notifier_info_to_extack(&info->info); 6439 6440 switch (event) { 6441 case NETDEV_PRECHANGEUPPER: 6442 upper_dev = info->upper_dev; 6443 if (!netif_is_macvlan(upper_dev)) { 6444 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6445 return -EOPNOTSUPP; 6446 } 6447 if (!info->linking) 6448 break; 6449 if (netif_is_macvlan(upper_dev) && 6450 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) { 6451 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6452 return -EOPNOTSUPP; 6453 } 6454 break; 6455 case NETDEV_CHANGEUPPER: 6456 upper_dev = info->upper_dev; 6457 if (info->linking) 6458 break; 6459 if (netif_is_macvlan(upper_dev)) 6460 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6461 break; 6462 } 6463 6464 return 0; 6465 } 6466 6467 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 6468 unsigned long event, void *ptr) 6469 { 6470 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 6471 u16 vid = vlan_dev_vlan_id(vlan_dev); 6472 6473 if (mlxsw_sp_port_dev_check(real_dev)) 6474 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 6475 event, ptr, vid); 6476 else if (netif_is_lag_master(real_dev)) 6477 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 6478 real_dev, event, 6479 ptr, vid); 6480 else if (netif_is_bridge_master(real_dev)) 6481 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev, 6482 event, ptr, vid); 6483 6484 return 0; 6485 } 6486 6487 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, 6488 unsigned long event, void *ptr) 6489 { 6490 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev); 6491 struct netdev_notifier_changeupper_info *info = ptr; 6492 struct netlink_ext_ack *extack; 6493 struct net_device *upper_dev; 6494 6495 if (!mlxsw_sp) 6496 return 0; 6497 6498 extack = netdev_notifier_info_to_extack(&info->info); 6499 6500 switch (event) { 6501 case NETDEV_PRECHANGEUPPER: 6502 upper_dev = info->upper_dev; 6503 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) { 6504 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6505 return -EOPNOTSUPP; 6506 } 6507 if (!info->linking) 6508 break; 6509 if (netif_is_macvlan(upper_dev) && 6510 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev)) { 6511 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6512 return -EOPNOTSUPP; 6513 } 6514 break; 6515 case NETDEV_CHANGEUPPER: 6516 upper_dev = info->upper_dev; 6517 if (info->linking) 6518 break; 6519 if (is_vlan_dev(upper_dev)) 6520 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev); 6521 if (netif_is_macvlan(upper_dev)) 6522 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6523 break; 6524 } 6525 6526 return 0; 6527 } 6528 6529 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, 6530 unsigned long event, void *ptr) 6531 { 6532 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); 6533 struct netdev_notifier_changeupper_info *info = ptr; 6534 struct netlink_ext_ack *extack; 6535 6536 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) 6537 return 0; 6538 6539 extack = netdev_notifier_info_to_extack(&info->info); 6540 6541 /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */ 6542 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6543 6544 return -EOPNOTSUPP; 6545 } 6546 6547 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) 6548 { 6549 struct netdev_notifier_changeupper_info *info = ptr; 6550 6551 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) 6552 return false; 6553 return netif_is_l3_master(info->upper_dev); 6554 } 6555 6556 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, 6557 struct net_device *dev, 6558 unsigned long event, void *ptr) 6559 { 6560 struct netdev_notifier_changeupper_info *cu_info; 6561 struct netdev_notifier_info *info = ptr; 6562 struct netlink_ext_ack *extack; 6563 struct net_device *upper_dev; 6564 6565 extack = netdev_notifier_info_to_extack(info); 6566 6567 switch (event) { 6568 case NETDEV_CHANGEUPPER: 6569 cu_info = container_of(info, 6570 struct netdev_notifier_changeupper_info, 6571 info); 6572 upper_dev = cu_info->upper_dev; 6573 if (!netif_is_bridge_master(upper_dev)) 6574 return 0; 6575 if (!mlxsw_sp_lower_get(upper_dev)) 6576 return 0; 6577 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 6578 return -EOPNOTSUPP; 6579 if (cu_info->linking) { 6580 if (!netif_running(dev)) 6581 return 0; 6582 /* When the bridge is VLAN-aware, the VNI of the VxLAN 6583 * device needs to be mapped to a VLAN, but at this 6584 * point no VLANs are configured on the VxLAN device 6585 */ 6586 if (br_vlan_enabled(upper_dev)) 6587 return 0; 6588 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, 6589 dev, 0, extack); 6590 } else { 6591 /* VLANs were already flushed, which triggered the 6592 * necessary cleanup 6593 */ 6594 if (br_vlan_enabled(upper_dev)) 6595 return 0; 6596 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 6597 } 6598 break; 6599 case NETDEV_PRE_UP: 6600 upper_dev = netdev_master_upper_dev_get(dev); 6601 if (!upper_dev) 6602 return 0; 6603 if (!netif_is_bridge_master(upper_dev)) 6604 return 0; 6605 if (!mlxsw_sp_lower_get(upper_dev)) 6606 return 0; 6607 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0, 6608 extack); 6609 case NETDEV_DOWN: 6610 upper_dev = netdev_master_upper_dev_get(dev); 6611 if (!upper_dev) 6612 return 0; 6613 if (!netif_is_bridge_master(upper_dev)) 6614 return 0; 6615 if (!mlxsw_sp_lower_get(upper_dev)) 6616 return 0; 6617 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 6618 break; 6619 } 6620 6621 return 0; 6622 } 6623 6624 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 6625 unsigned long event, void *ptr) 6626 { 6627 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 6628 struct mlxsw_sp_span_entry *span_entry; 6629 struct mlxsw_sp *mlxsw_sp; 6630 int err = 0; 6631 6632 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 6633 if (event == NETDEV_UNREGISTER) { 6634 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); 6635 if (span_entry) 6636 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); 6637 } 6638 mlxsw_sp_span_respin(mlxsw_sp); 6639 6640 if (netif_is_vxlan(dev)) 6641 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr); 6642 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) 6643 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, 6644 event, ptr); 6645 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev)) 6646 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev, 6647 event, ptr); 6648 else if (event == NETDEV_PRE_CHANGEADDR || 6649 event == NETDEV_CHANGEADDR || 6650 event == NETDEV_CHANGEMTU) 6651 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr); 6652 else if (mlxsw_sp_is_vrf_event(event, ptr)) 6653 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); 6654 else if (mlxsw_sp_port_dev_check(dev)) 6655 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 6656 else if (netif_is_lag_master(dev)) 6657 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 6658 else if (is_vlan_dev(dev)) 6659 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 6660 else if (netif_is_bridge_master(dev)) 6661 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); 6662 else if (netif_is_macvlan(dev)) 6663 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); 6664 6665 return notifier_from_errno(err); 6666 } 6667 6668 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = { 6669 .notifier_call = mlxsw_sp_inetaddr_valid_event, 6670 }; 6671 6672 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = { 6673 .notifier_call = mlxsw_sp_inet6addr_valid_event, 6674 }; 6675 6676 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { 6677 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 6678 {0, }, 6679 }; 6680 6681 static struct pci_driver mlxsw_sp1_pci_driver = { 6682 .name = mlxsw_sp1_driver_name, 6683 .id_table = mlxsw_sp1_pci_id_table, 6684 }; 6685 6686 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { 6687 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, 6688 {0, }, 6689 }; 6690 6691 static struct pci_driver mlxsw_sp2_pci_driver = { 6692 .name = mlxsw_sp2_driver_name, 6693 .id_table = mlxsw_sp2_pci_id_table, 6694 }; 6695 6696 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = { 6697 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0}, 6698 {0, }, 6699 }; 6700 6701 static struct pci_driver mlxsw_sp3_pci_driver = { 6702 .name = mlxsw_sp3_driver_name, 6703 .id_table = mlxsw_sp3_pci_id_table, 6704 }; 6705 6706 static int __init mlxsw_sp_module_init(void) 6707 { 6708 int err; 6709 6710 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6711 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6712 6713 err = mlxsw_core_driver_register(&mlxsw_sp1_driver); 6714 if (err) 6715 goto err_sp1_core_driver_register; 6716 6717 err = mlxsw_core_driver_register(&mlxsw_sp2_driver); 6718 if (err) 6719 goto err_sp2_core_driver_register; 6720 6721 err = mlxsw_core_driver_register(&mlxsw_sp3_driver); 6722 if (err) 6723 goto err_sp3_core_driver_register; 6724 6725 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); 6726 if (err) 6727 goto err_sp1_pci_driver_register; 6728 6729 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); 6730 if (err) 6731 goto err_sp2_pci_driver_register; 6732 6733 err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver); 6734 if (err) 6735 goto err_sp3_pci_driver_register; 6736 6737 return 0; 6738 6739 err_sp3_pci_driver_register: 6740 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 6741 err_sp2_pci_driver_register: 6742 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 6743 err_sp1_pci_driver_register: 6744 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 6745 err_sp3_core_driver_register: 6746 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 6747 err_sp2_core_driver_register: 6748 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 6749 err_sp1_core_driver_register: 6750 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6751 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6752 return err; 6753 } 6754 6755 static void __exit mlxsw_sp_module_exit(void) 6756 { 6757 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver); 6758 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 6759 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 6760 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 6761 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 6762 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 6763 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6764 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6765 } 6766 6767 module_init(mlxsw_sp_module_init); 6768 module_exit(mlxsw_sp_module_exit); 6769 6770 MODULE_LICENSE("Dual BSD/GPL"); 6771 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 6772 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 6773 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); 6774 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); 6775 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table); 6776 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); 6777 MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME); 6778