1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/types.h> 7 #include <linux/pci.h> 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/ethtool.h> 11 #include <linux/slab.h> 12 #include <linux/device.h> 13 #include <linux/skbuff.h> 14 #include <linux/if_vlan.h> 15 #include <linux/if_bridge.h> 16 #include <linux/workqueue.h> 17 #include <linux/jiffies.h> 18 #include <linux/bitops.h> 19 #include <linux/list.h> 20 #include <linux/notifier.h> 21 #include <linux/dcbnl.h> 22 #include <linux/inetdevice.h> 23 #include <linux/netlink.h> 24 #include <linux/jhash.h> 25 #include <linux/log2.h> 26 #include <net/switchdev.h> 27 #include <net/pkt_cls.h> 28 #include <net/tc_act/tc_mirred.h> 29 #include <net/netevent.h> 30 #include <net/tc_act/tc_sample.h> 31 #include <net/addrconf.h> 32 33 #include "spectrum.h" 34 #include "pci.h" 35 #include "core.h" 36 #include "core_env.h" 37 #include "reg.h" 38 #include "port.h" 39 #include "trap.h" 40 #include "txheader.h" 41 #include "spectrum_cnt.h" 42 #include "spectrum_dpipe.h" 43 #include "spectrum_acl_flex_actions.h" 44 #include "spectrum_span.h" 45 #include "spectrum_ptp.h" 46 #include "../mlxfw/mlxfw.h" 47 48 #define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100) 49 50 #define MLXSW_SP1_FWREV_MAJOR 13 51 #define MLXSW_SP1_FWREV_MINOR 2000 52 #define MLXSW_SP1_FWREV_SUBMINOR 2308 53 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 54 55 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { 56 .major = MLXSW_SP1_FWREV_MAJOR, 57 .minor = MLXSW_SP1_FWREV_MINOR, 58 .subminor = MLXSW_SP1_FWREV_SUBMINOR, 59 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR, 60 }; 61 62 #define MLXSW_SP1_FW_FILENAME \ 63 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ 64 "." __stringify(MLXSW_SP1_FWREV_MINOR) \ 65 "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2" 66 67 #define MLXSW_SP2_FWREV_MAJOR 29 68 #define MLXSW_SP2_FWREV_MINOR 2000 69 #define MLXSW_SP2_FWREV_SUBMINOR 2308 70 71 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { 72 .major = MLXSW_SP2_FWREV_MAJOR, 73 .minor = MLXSW_SP2_FWREV_MINOR, 74 .subminor = MLXSW_SP2_FWREV_SUBMINOR, 75 }; 76 77 #define MLXSW_SP2_FW_FILENAME \ 78 "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \ 79 "." __stringify(MLXSW_SP2_FWREV_MINOR) \ 80 "." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2" 81 82 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; 83 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; 84 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3"; 85 static const char mlxsw_sp_driver_version[] = "1.0"; 86 87 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = { 88 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00 89 }; 90 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = { 91 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00 92 }; 93 94 /* tx_hdr_version 95 * Tx header version. 96 * Must be set to 1. 97 */ 98 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 99 100 /* tx_hdr_ctl 101 * Packet control type. 102 * 0 - Ethernet control (e.g. EMADs, LACP) 103 * 1 - Ethernet data 104 */ 105 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 106 107 /* tx_hdr_proto 108 * Packet protocol type. Must be set to 1 (Ethernet). 109 */ 110 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 111 112 /* tx_hdr_rx_is_router 113 * Packet is sent from the router. Valid for data packets only. 114 */ 115 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 116 117 /* tx_hdr_fid_valid 118 * Indicates if the 'fid' field is valid and should be used for 119 * forwarding lookup. Valid for data packets only. 120 */ 121 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 122 123 /* tx_hdr_swid 124 * Switch partition ID. Must be set to 0. 125 */ 126 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 127 128 /* tx_hdr_control_tclass 129 * Indicates if the packet should use the control TClass and not one 130 * of the data TClasses. 131 */ 132 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 133 134 /* tx_hdr_etclass 135 * Egress TClass to be used on the egress device on the egress port. 136 */ 137 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 138 139 /* tx_hdr_port_mid 140 * Destination local port for unicast packets. 141 * Destination multicast ID for multicast packets. 142 * 143 * Control packets are directed to a specific egress port, while data 144 * packets are transmitted through the CPU port (0) into the switch partition, 145 * where forwarding rules are applied. 146 */ 147 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 148 149 /* tx_hdr_fid 150 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 151 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 152 * Valid for data packets only. 153 */ 154 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 155 156 /* tx_hdr_type 157 * 0 - Data packets 158 * 6 - Control packets 159 */ 160 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 161 162 struct mlxsw_sp_mlxfw_dev { 163 struct mlxfw_dev mlxfw_dev; 164 struct mlxsw_sp *mlxsw_sp; 165 }; 166 167 struct mlxsw_sp_ptp_ops { 168 struct mlxsw_sp_ptp_clock * 169 (*clock_init)(struct mlxsw_sp *mlxsw_sp, struct device *dev); 170 void (*clock_fini)(struct mlxsw_sp_ptp_clock *clock); 171 172 struct mlxsw_sp_ptp_state *(*init)(struct mlxsw_sp *mlxsw_sp); 173 void (*fini)(struct mlxsw_sp_ptp_state *ptp_state); 174 175 /* Notify a driver that a packet that might be PTP was received. Driver 176 * is responsible for freeing the passed-in SKB. 177 */ 178 void (*receive)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 179 u8 local_port); 180 181 /* Notify a driver that a timestamped packet was transmitted. Driver 182 * is responsible for freeing the passed-in SKB. 183 */ 184 void (*transmitted)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 185 u8 local_port); 186 187 int (*hwtstamp_get)(struct mlxsw_sp_port *mlxsw_sp_port, 188 struct hwtstamp_config *config); 189 int (*hwtstamp_set)(struct mlxsw_sp_port *mlxsw_sp_port, 190 struct hwtstamp_config *config); 191 void (*shaper_work)(struct work_struct *work); 192 int (*get_ts_info)(struct mlxsw_sp *mlxsw_sp, 193 struct ethtool_ts_info *info); 194 int (*get_stats_count)(void); 195 void (*get_stats_strings)(u8 **p); 196 void (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port, 197 u64 *data, int data_index); 198 }; 199 200 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev, 201 u16 component_index, u32 *p_max_size, 202 u8 *p_align_bits, u16 *p_max_write_size) 203 { 204 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 205 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 206 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 207 char mcqi_pl[MLXSW_REG_MCQI_LEN]; 208 int err; 209 210 mlxsw_reg_mcqi_pack(mcqi_pl, component_index); 211 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl); 212 if (err) 213 return err; 214 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, 215 p_max_write_size); 216 217 *p_align_bits = max_t(u8, *p_align_bits, 2); 218 *p_max_write_size = min_t(u16, *p_max_write_size, 219 MLXSW_REG_MCDA_MAX_DATA_LEN); 220 return 0; 221 } 222 223 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle) 224 { 225 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 226 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 227 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 228 char mcc_pl[MLXSW_REG_MCC_LEN]; 229 u8 control_state; 230 int err; 231 232 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0); 233 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 234 if (err) 235 return err; 236 237 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state); 238 if (control_state != MLXFW_FSM_STATE_IDLE) 239 return -EBUSY; 240 241 mlxsw_reg_mcc_pack(mcc_pl, 242 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 243 0, *fwhandle, 0); 244 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 245 } 246 247 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev, 248 u32 fwhandle, u16 component_index, 249 u32 component_size) 250 { 251 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 252 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 253 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 254 char mcc_pl[MLXSW_REG_MCC_LEN]; 255 256 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT, 257 component_index, fwhandle, component_size); 258 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 259 } 260 261 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev, 262 u32 fwhandle, u8 *data, u16 size, 263 u32 offset) 264 { 265 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 266 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 267 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 268 char mcda_pl[MLXSW_REG_MCDA_LEN]; 269 270 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data); 271 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl); 272 } 273 274 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, 275 u32 fwhandle, u16 component_index) 276 { 277 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 278 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 279 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 280 char mcc_pl[MLXSW_REG_MCC_LEN]; 281 282 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT, 283 component_index, fwhandle, 0); 284 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 285 } 286 287 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 288 { 289 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 290 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 291 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 292 char mcc_pl[MLXSW_REG_MCC_LEN]; 293 294 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, 295 fwhandle, 0); 296 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 297 } 298 299 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 300 enum mlxfw_fsm_state *fsm_state, 301 enum mlxfw_fsm_state_err *fsm_state_err) 302 { 303 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 304 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 305 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 306 char mcc_pl[MLXSW_REG_MCC_LEN]; 307 u8 control_state; 308 u8 error_code; 309 int err; 310 311 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0); 312 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 313 if (err) 314 return err; 315 316 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state); 317 *fsm_state = control_state; 318 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, 319 MLXFW_FSM_STATE_ERR_MAX); 320 return 0; 321 } 322 323 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 324 { 325 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 326 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 327 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 328 char mcc_pl[MLXSW_REG_MCC_LEN]; 329 330 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, 331 fwhandle, 0); 332 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 333 } 334 335 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 336 { 337 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 338 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 339 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 340 char mcc_pl[MLXSW_REG_MCC_LEN]; 341 342 mlxsw_reg_mcc_pack(mcc_pl, 343 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, 344 fwhandle, 0); 345 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 346 } 347 348 static void mlxsw_sp_status_notify(struct mlxfw_dev *mlxfw_dev, 349 const char *msg, const char *comp_name, 350 u32 done_bytes, u32 total_bytes) 351 { 352 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 353 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 354 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 355 356 devlink_flash_update_status_notify(priv_to_devlink(mlxsw_sp->core), 357 msg, comp_name, 358 done_bytes, total_bytes); 359 } 360 361 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = { 362 .component_query = mlxsw_sp_component_query, 363 .fsm_lock = mlxsw_sp_fsm_lock, 364 .fsm_component_update = mlxsw_sp_fsm_component_update, 365 .fsm_block_download = mlxsw_sp_fsm_block_download, 366 .fsm_component_verify = mlxsw_sp_fsm_component_verify, 367 .fsm_activate = mlxsw_sp_fsm_activate, 368 .fsm_query_state = mlxsw_sp_fsm_query_state, 369 .fsm_cancel = mlxsw_sp_fsm_cancel, 370 .fsm_release = mlxsw_sp_fsm_release, 371 .status_notify = mlxsw_sp_status_notify, 372 }; 373 374 static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp, 375 const struct firmware *firmware, 376 struct netlink_ext_ack *extack) 377 { 378 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = { 379 .mlxfw_dev = { 380 .ops = &mlxsw_sp_mlxfw_dev_ops, 381 .psid = mlxsw_sp->bus_info->psid, 382 .psid_size = strlen(mlxsw_sp->bus_info->psid), 383 }, 384 .mlxsw_sp = mlxsw_sp 385 }; 386 int err; 387 388 mlxsw_core_fw_flash_start(mlxsw_sp->core); 389 devlink_flash_update_begin_notify(priv_to_devlink(mlxsw_sp->core)); 390 err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, 391 firmware, extack); 392 devlink_flash_update_end_notify(priv_to_devlink(mlxsw_sp->core)); 393 mlxsw_core_fw_flash_end(mlxsw_sp->core); 394 395 return err; 396 } 397 398 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) 399 { 400 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev; 401 const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev; 402 const char *fw_filename = mlxsw_sp->fw_filename; 403 union devlink_param_value value; 404 const struct firmware *firmware; 405 int err; 406 407 /* Don't check if driver does not require it */ 408 if (!req_rev || !fw_filename) 409 return 0; 410 411 /* Don't check if devlink 'fw_load_policy' param is 'flash' */ 412 err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_sp->core), 413 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 414 &value); 415 if (err) 416 return err; 417 if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) 418 return 0; 419 420 /* Validate driver & FW are compatible */ 421 if (rev->major != req_rev->major) { 422 WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n", 423 rev->major, req_rev->major); 424 return -EINVAL; 425 } 426 if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) == 427 MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor) && 428 mlxsw_core_fw_rev_minor_subminor_validate(rev, req_rev)) 429 return 0; 430 431 dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n", 432 rev->major, rev->minor, rev->subminor); 433 dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n", 434 fw_filename); 435 436 err = request_firmware_direct(&firmware, fw_filename, 437 mlxsw_sp->bus_info->dev); 438 if (err) { 439 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n", 440 fw_filename); 441 return err; 442 } 443 444 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, NULL); 445 release_firmware(firmware); 446 if (err) 447 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n"); 448 449 /* On FW flash success, tell the caller FW reset is needed 450 * if current FW supports it. 451 */ 452 if (rev->minor >= req_rev->can_reset_minor) 453 return err ? err : -EAGAIN; 454 else 455 return 0; 456 } 457 458 static int mlxsw_sp_flash_update(struct mlxsw_core *mlxsw_core, 459 const char *file_name, const char *component, 460 struct netlink_ext_ack *extack) 461 { 462 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 463 const struct firmware *firmware; 464 int err; 465 466 if (component) 467 return -EOPNOTSUPP; 468 469 err = request_firmware_direct(&firmware, file_name, 470 mlxsw_sp->bus_info->dev); 471 if (err) 472 return err; 473 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, extack); 474 release_firmware(firmware); 475 476 return err; 477 } 478 479 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 480 unsigned int counter_index, u64 *packets, 481 u64 *bytes) 482 { 483 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 484 int err; 485 486 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 487 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 488 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 489 if (err) 490 return err; 491 if (packets) 492 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 493 if (bytes) 494 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 495 return 0; 496 } 497 498 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 499 unsigned int counter_index) 500 { 501 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 502 503 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 504 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 505 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 506 } 507 508 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 509 unsigned int *p_counter_index) 510 { 511 int err; 512 513 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 514 p_counter_index); 515 if (err) 516 return err; 517 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 518 if (err) 519 goto err_counter_clear; 520 return 0; 521 522 err_counter_clear: 523 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 524 *p_counter_index); 525 return err; 526 } 527 528 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 529 unsigned int counter_index) 530 { 531 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 532 counter_index); 533 } 534 535 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 536 const struct mlxsw_tx_info *tx_info) 537 { 538 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 539 540 memset(txhdr, 0, MLXSW_TXHDR_LEN); 541 542 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 543 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 544 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 545 mlxsw_tx_hdr_swid_set(txhdr, 0); 546 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 547 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 548 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 549 } 550 551 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) 552 { 553 switch (state) { 554 case BR_STATE_FORWARDING: 555 return MLXSW_REG_SPMS_STATE_FORWARDING; 556 case BR_STATE_LEARNING: 557 return MLXSW_REG_SPMS_STATE_LEARNING; 558 case BR_STATE_LISTENING: /* fall-through */ 559 case BR_STATE_DISABLED: /* fall-through */ 560 case BR_STATE_BLOCKING: 561 return MLXSW_REG_SPMS_STATE_DISCARDING; 562 default: 563 BUG(); 564 } 565 } 566 567 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 568 u8 state) 569 { 570 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state); 571 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 572 char *spms_pl; 573 int err; 574 575 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 576 if (!spms_pl) 577 return -ENOMEM; 578 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 579 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 580 581 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 582 kfree(spms_pl); 583 return err; 584 } 585 586 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 587 { 588 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 589 int err; 590 591 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 592 if (err) 593 return err; 594 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 595 return 0; 596 } 597 598 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port, 599 bool enable, u32 rate) 600 { 601 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 602 char mpsc_pl[MLXSW_REG_MPSC_LEN]; 603 604 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate); 605 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl); 606 } 607 608 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 609 bool is_up) 610 { 611 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 612 char paos_pl[MLXSW_REG_PAOS_LEN]; 613 614 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 615 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 616 MLXSW_PORT_ADMIN_STATUS_DOWN); 617 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 618 } 619 620 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 621 unsigned char *addr) 622 { 623 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 624 char ppad_pl[MLXSW_REG_PPAD_LEN]; 625 626 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 627 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 628 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 629 } 630 631 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 632 { 633 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 634 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 635 636 ether_addr_copy(addr, mlxsw_sp->base_mac); 637 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 638 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 639 } 640 641 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 642 { 643 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 644 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 645 int max_mtu; 646 int err; 647 648 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 649 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 650 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 651 if (err) 652 return err; 653 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 654 655 if (mtu > max_mtu) 656 return -EINVAL; 657 658 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 659 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 660 } 661 662 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 663 { 664 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 665 char pspa_pl[MLXSW_REG_PSPA_LEN]; 666 667 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 668 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 669 } 670 671 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 672 { 673 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 674 char svpe_pl[MLXSW_REG_SVPE_LEN]; 675 676 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 677 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 678 } 679 680 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 681 bool learn_enable) 682 { 683 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 684 char *spvmlr_pl; 685 int err; 686 687 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 688 if (!spvmlr_pl) 689 return -ENOMEM; 690 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 691 learn_enable); 692 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 693 kfree(spvmlr_pl); 694 return err; 695 } 696 697 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 698 u16 vid) 699 { 700 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 701 char spvid_pl[MLXSW_REG_SPVID_LEN]; 702 703 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid); 704 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 705 } 706 707 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 708 bool allow) 709 { 710 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 711 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 712 713 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 714 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 715 } 716 717 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 718 { 719 int err; 720 721 if (!vid) { 722 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 723 if (err) 724 return err; 725 } else { 726 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); 727 if (err) 728 return err; 729 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 730 if (err) 731 goto err_port_allow_untagged_set; 732 } 733 734 mlxsw_sp_port->pvid = vid; 735 return 0; 736 737 err_port_allow_untagged_set: 738 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid); 739 return err; 740 } 741 742 static int 743 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 744 { 745 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 746 char sspr_pl[MLXSW_REG_SSPR_LEN]; 747 748 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 749 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 750 } 751 752 static int 753 mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port, 754 struct mlxsw_sp_port_mapping *port_mapping) 755 { 756 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 757 bool separate_rxtx; 758 u8 module; 759 u8 width; 760 int err; 761 int i; 762 763 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 764 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 765 if (err) 766 return err; 767 module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 768 width = mlxsw_reg_pmlp_width_get(pmlp_pl); 769 separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl); 770 771 if (width && !is_power_of_2(width)) { 772 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n", 773 local_port); 774 return -EINVAL; 775 } 776 777 for (i = 0; i < width; i++) { 778 if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) { 779 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n", 780 local_port); 781 return -EINVAL; 782 } 783 if (separate_rxtx && 784 mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != 785 mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) { 786 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n", 787 local_port); 788 return -EINVAL; 789 } 790 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i) { 791 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n", 792 local_port); 793 return -EINVAL; 794 } 795 } 796 797 port_mapping->module = module; 798 port_mapping->width = width; 799 port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 800 return 0; 801 } 802 803 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port) 804 { 805 struct mlxsw_sp_port_mapping *port_mapping = &mlxsw_sp_port->mapping; 806 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 807 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 808 int i; 809 810 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 811 mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width); 812 for (i = 0; i < port_mapping->width; i++) { 813 mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module); 814 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */ 815 } 816 817 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 818 } 819 820 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port) 821 { 822 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 823 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 824 825 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 826 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 827 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 828 } 829 830 static int mlxsw_sp_port_open(struct net_device *dev) 831 { 832 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 833 int err; 834 835 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 836 if (err) 837 return err; 838 netif_start_queue(dev); 839 return 0; 840 } 841 842 static int mlxsw_sp_port_stop(struct net_device *dev) 843 { 844 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 845 846 netif_stop_queue(dev); 847 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 848 } 849 850 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 851 struct net_device *dev) 852 { 853 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 854 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 855 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 856 const struct mlxsw_tx_info tx_info = { 857 .local_port = mlxsw_sp_port->local_port, 858 .is_emad = false, 859 }; 860 u64 len; 861 int err; 862 863 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); 864 865 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 866 return NETDEV_TX_BUSY; 867 868 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { 869 struct sk_buff *skb_orig = skb; 870 871 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); 872 if (!skb) { 873 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 874 dev_kfree_skb_any(skb_orig); 875 return NETDEV_TX_OK; 876 } 877 dev_consume_skb_any(skb_orig); 878 } 879 880 if (eth_skb_pad(skb)) { 881 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 882 return NETDEV_TX_OK; 883 } 884 885 mlxsw_sp_txhdr_construct(skb, &tx_info); 886 /* TX header is consumed by HW on the way so we shouldn't count its 887 * bytes as being sent. 888 */ 889 len = skb->len - MLXSW_TXHDR_LEN; 890 891 /* Due to a race we might fail here because of a full queue. In that 892 * unlikely case we simply drop the packet. 893 */ 894 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 895 896 if (!err) { 897 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 898 u64_stats_update_begin(&pcpu_stats->syncp); 899 pcpu_stats->tx_packets++; 900 pcpu_stats->tx_bytes += len; 901 u64_stats_update_end(&pcpu_stats->syncp); 902 } else { 903 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 904 dev_kfree_skb_any(skb); 905 } 906 return NETDEV_TX_OK; 907 } 908 909 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 910 { 911 } 912 913 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 914 { 915 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 916 struct sockaddr *addr = p; 917 int err; 918 919 if (!is_valid_ether_addr(addr->sa_data)) 920 return -EADDRNOTAVAIL; 921 922 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 923 if (err) 924 return err; 925 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 926 return 0; 927 } 928 929 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, 930 int mtu) 931 { 932 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu); 933 } 934 935 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ 936 937 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 938 u16 delay) 939 { 940 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay, 941 BITS_PER_BYTE)); 942 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp, 943 mtu); 944 } 945 946 /* Maximum delay buffer needed in case of PAUSE frames, in bytes. 947 * Assumes 100m cable and maximum MTU. 948 */ 949 #define MLXSW_SP_PAUSE_DELAY 58752 950 951 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 952 u16 delay, bool pfc, bool pause) 953 { 954 if (pfc) 955 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay); 956 else if (pause) 957 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY); 958 else 959 return 0; 960 } 961 962 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, 963 bool lossy) 964 { 965 if (lossy) 966 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size); 967 else 968 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size, 969 thres); 970 } 971 972 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, 973 u8 *prio_tc, bool pause_en, 974 struct ieee_pfc *my_pfc) 975 { 976 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 977 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; 978 u16 delay = !!my_pfc ? my_pfc->delay : 0; 979 char pbmc_pl[MLXSW_REG_PBMC_LEN]; 980 u32 taken_headroom_cells = 0; 981 u32 max_headroom_cells; 982 int i, j, err; 983 984 max_headroom_cells = mlxsw_sp_sb_max_headroom_cells(mlxsw_sp); 985 986 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); 987 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 988 if (err) 989 return err; 990 991 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 992 bool configure = false; 993 bool pfc = false; 994 u16 thres_cells; 995 u16 delay_cells; 996 u16 total_cells; 997 bool lossy; 998 999 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { 1000 if (prio_tc[j] == i) { 1001 pfc = pfc_en & BIT(j); 1002 configure = true; 1003 break; 1004 } 1005 } 1006 1007 if (!configure) 1008 continue; 1009 1010 lossy = !(pfc || pause_en); 1011 thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); 1012 delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, 1013 pfc, pause_en); 1014 total_cells = thres_cells + delay_cells; 1015 1016 taken_headroom_cells += total_cells; 1017 if (taken_headroom_cells > max_headroom_cells) 1018 return -ENOBUFS; 1019 1020 mlxsw_sp_pg_buf_pack(pbmc_pl, i, total_cells, 1021 thres_cells, lossy); 1022 } 1023 1024 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 1025 } 1026 1027 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, 1028 int mtu, bool pause_en) 1029 { 1030 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; 1031 bool dcb_en = !!mlxsw_sp_port->dcb.ets; 1032 struct ieee_pfc *my_pfc; 1033 u8 *prio_tc; 1034 1035 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; 1036 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; 1037 1038 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, 1039 pause_en, my_pfc); 1040 } 1041 1042 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 1043 { 1044 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1045 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 1046 int err; 1047 1048 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); 1049 if (err) 1050 return err; 1051 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu); 1052 if (err) 1053 goto err_span_port_mtu_update; 1054 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 1055 if (err) 1056 goto err_port_mtu_set; 1057 dev->mtu = mtu; 1058 return 0; 1059 1060 err_port_mtu_set: 1061 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu); 1062 err_span_port_mtu_update: 1063 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1064 return err; 1065 } 1066 1067 static int 1068 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 1069 struct rtnl_link_stats64 *stats) 1070 { 1071 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1072 struct mlxsw_sp_port_pcpu_stats *p; 1073 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1074 u32 tx_dropped = 0; 1075 unsigned int start; 1076 int i; 1077 1078 for_each_possible_cpu(i) { 1079 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 1080 do { 1081 start = u64_stats_fetch_begin_irq(&p->syncp); 1082 rx_packets = p->rx_packets; 1083 rx_bytes = p->rx_bytes; 1084 tx_packets = p->tx_packets; 1085 tx_bytes = p->tx_bytes; 1086 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 1087 1088 stats->rx_packets += rx_packets; 1089 stats->rx_bytes += rx_bytes; 1090 stats->tx_packets += tx_packets; 1091 stats->tx_bytes += tx_bytes; 1092 /* tx_dropped is u32, updated without syncp protection. */ 1093 tx_dropped += p->tx_dropped; 1094 } 1095 stats->tx_dropped = tx_dropped; 1096 return 0; 1097 } 1098 1099 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 1100 { 1101 switch (attr_id) { 1102 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1103 return true; 1104 } 1105 1106 return false; 1107 } 1108 1109 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 1110 void *sp) 1111 { 1112 switch (attr_id) { 1113 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1114 return mlxsw_sp_port_get_sw_stats64(dev, sp); 1115 } 1116 1117 return -EINVAL; 1118 } 1119 1120 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 1121 int prio, char *ppcnt_pl) 1122 { 1123 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1124 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1125 1126 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 1127 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 1128 } 1129 1130 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 1131 struct rtnl_link_stats64 *stats) 1132 { 1133 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1134 int err; 1135 1136 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 1137 0, ppcnt_pl); 1138 if (err) 1139 goto out; 1140 1141 stats->tx_packets = 1142 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 1143 stats->rx_packets = 1144 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 1145 stats->tx_bytes = 1146 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 1147 stats->rx_bytes = 1148 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 1149 stats->multicast = 1150 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 1151 1152 stats->rx_crc_errors = 1153 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 1154 stats->rx_frame_errors = 1155 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 1156 1157 stats->rx_length_errors = ( 1158 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 1159 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 1160 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 1161 1162 stats->rx_errors = (stats->rx_crc_errors + 1163 stats->rx_frame_errors + stats->rx_length_errors); 1164 1165 out: 1166 return err; 1167 } 1168 1169 static void 1170 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 1171 struct mlxsw_sp_port_xstats *xstats) 1172 { 1173 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1174 int err, i; 1175 1176 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 1177 ppcnt_pl); 1178 if (!err) 1179 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 1180 1181 for (i = 0; i < TC_MAX_QUEUE; i++) { 1182 err = mlxsw_sp_port_get_stats_raw(dev, 1183 MLXSW_REG_PPCNT_TC_CONG_TC, 1184 i, ppcnt_pl); 1185 if (!err) 1186 xstats->wred_drop[i] = 1187 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 1188 1189 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 1190 i, ppcnt_pl); 1191 if (err) 1192 continue; 1193 1194 xstats->backlog[i] = 1195 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 1196 xstats->tail_drop[i] = 1197 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 1198 } 1199 1200 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1201 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, 1202 i, ppcnt_pl); 1203 if (err) 1204 continue; 1205 1206 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); 1207 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); 1208 } 1209 } 1210 1211 static void update_stats_cache(struct work_struct *work) 1212 { 1213 struct mlxsw_sp_port *mlxsw_sp_port = 1214 container_of(work, struct mlxsw_sp_port, 1215 periodic_hw_stats.update_dw.work); 1216 1217 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 1218 goto out; 1219 1220 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 1221 &mlxsw_sp_port->periodic_hw_stats.stats); 1222 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 1223 &mlxsw_sp_port->periodic_hw_stats.xstats); 1224 1225 out: 1226 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1227 MLXSW_HW_STATS_UPDATE_TIME); 1228 } 1229 1230 /* Return the stats from a cache that is updated periodically, 1231 * as this function might get called in an atomic context. 1232 */ 1233 static void 1234 mlxsw_sp_port_get_stats64(struct net_device *dev, 1235 struct rtnl_link_stats64 *stats) 1236 { 1237 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1238 1239 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 1240 } 1241 1242 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 1243 u16 vid_begin, u16 vid_end, 1244 bool is_member, bool untagged) 1245 { 1246 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1247 char *spvm_pl; 1248 int err; 1249 1250 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 1251 if (!spvm_pl) 1252 return -ENOMEM; 1253 1254 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 1255 vid_end, is_member, untagged); 1256 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 1257 kfree(spvm_pl); 1258 return err; 1259 } 1260 1261 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 1262 u16 vid_end, bool is_member, bool untagged) 1263 { 1264 u16 vid, vid_e; 1265 int err; 1266 1267 for (vid = vid_begin; vid <= vid_end; 1268 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 1269 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 1270 vid_end); 1271 1272 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 1273 is_member, untagged); 1274 if (err) 1275 return err; 1276 } 1277 1278 return 0; 1279 } 1280 1281 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port, 1282 bool flush_default) 1283 { 1284 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 1285 1286 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 1287 &mlxsw_sp_port->vlans_list, list) { 1288 if (!flush_default && 1289 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID) 1290 continue; 1291 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1292 } 1293 } 1294 1295 static void 1296 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1297 { 1298 if (mlxsw_sp_port_vlan->bridge_port) 1299 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1300 else if (mlxsw_sp_port_vlan->fid) 1301 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 1302 } 1303 1304 struct mlxsw_sp_port_vlan * 1305 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1306 { 1307 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1308 bool untagged = vid == MLXSW_SP_DEFAULT_VID; 1309 int err; 1310 1311 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1312 if (mlxsw_sp_port_vlan) 1313 return ERR_PTR(-EEXIST); 1314 1315 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 1316 if (err) 1317 return ERR_PTR(err); 1318 1319 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 1320 if (!mlxsw_sp_port_vlan) { 1321 err = -ENOMEM; 1322 goto err_port_vlan_alloc; 1323 } 1324 1325 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1326 mlxsw_sp_port_vlan->vid = vid; 1327 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1328 1329 return mlxsw_sp_port_vlan; 1330 1331 err_port_vlan_alloc: 1332 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1333 return ERR_PTR(err); 1334 } 1335 1336 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1337 { 1338 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1339 u16 vid = mlxsw_sp_port_vlan->vid; 1340 1341 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan); 1342 list_del(&mlxsw_sp_port_vlan->list); 1343 kfree(mlxsw_sp_port_vlan); 1344 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1345 } 1346 1347 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1348 __be16 __always_unused proto, u16 vid) 1349 { 1350 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1351 1352 /* VLAN 0 is added to HW filter when device goes up, but it is 1353 * reserved in our case, so simply return. 1354 */ 1355 if (!vid) 1356 return 0; 1357 1358 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid)); 1359 } 1360 1361 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1362 __be16 __always_unused proto, u16 vid) 1363 { 1364 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1365 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1366 1367 /* VLAN 0 is removed from HW filter when device goes down, but 1368 * it is reserved in our case, so simply return. 1369 */ 1370 if (!vid) 1371 return 0; 1372 1373 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1374 if (!mlxsw_sp_port_vlan) 1375 return 0; 1376 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1377 1378 return 0; 1379 } 1380 1381 static struct mlxsw_sp_port_mall_tc_entry * 1382 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port, 1383 unsigned long cookie) { 1384 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1385 1386 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list) 1387 if (mall_tc_entry->cookie == cookie) 1388 return mall_tc_entry; 1389 1390 return NULL; 1391 } 1392 1393 static int 1394 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1395 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror, 1396 const struct flow_action_entry *act, 1397 bool ingress) 1398 { 1399 enum mlxsw_sp_span_type span_type; 1400 1401 if (!act->dev) { 1402 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); 1403 return -EINVAL; 1404 } 1405 1406 mirror->ingress = ingress; 1407 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1408 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, act->dev, span_type, 1409 true, &mirror->span_id); 1410 } 1411 1412 static void 1413 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1414 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror) 1415 { 1416 enum mlxsw_sp_span_type span_type; 1417 1418 span_type = mirror->ingress ? 1419 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1420 mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->span_id, 1421 span_type, true); 1422 } 1423 1424 static int 1425 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port, 1426 struct tc_cls_matchall_offload *cls, 1427 const struct flow_action_entry *act, 1428 bool ingress) 1429 { 1430 int err; 1431 1432 if (!mlxsw_sp_port->sample) 1433 return -EOPNOTSUPP; 1434 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) { 1435 netdev_err(mlxsw_sp_port->dev, "sample already active\n"); 1436 return -EEXIST; 1437 } 1438 if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) { 1439 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n"); 1440 return -EOPNOTSUPP; 1441 } 1442 1443 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group, 1444 act->sample.psample_group); 1445 mlxsw_sp_port->sample->truncate = act->sample.truncate; 1446 mlxsw_sp_port->sample->trunc_size = act->sample.trunc_size; 1447 mlxsw_sp_port->sample->rate = act->sample.rate; 1448 1449 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, act->sample.rate); 1450 if (err) 1451 goto err_port_sample_set; 1452 return 0; 1453 1454 err_port_sample_set: 1455 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1456 return err; 1457 } 1458 1459 static void 1460 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port) 1461 { 1462 if (!mlxsw_sp_port->sample) 1463 return; 1464 1465 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1); 1466 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1467 } 1468 1469 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1470 struct tc_cls_matchall_offload *f, 1471 bool ingress) 1472 { 1473 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1474 __be16 protocol = f->common.protocol; 1475 struct flow_action_entry *act; 1476 int err; 1477 1478 if (!flow_offload_has_one_action(&f->rule->action)) { 1479 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); 1480 return -EOPNOTSUPP; 1481 } 1482 1483 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1484 if (!mall_tc_entry) 1485 return -ENOMEM; 1486 mall_tc_entry->cookie = f->cookie; 1487 1488 act = &f->rule->action.entries[0]; 1489 1490 if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) { 1491 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; 1492 1493 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; 1494 mirror = &mall_tc_entry->mirror; 1495 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, 1496 mirror, act, 1497 ingress); 1498 } else if (act->id == FLOW_ACTION_SAMPLE && 1499 protocol == htons(ETH_P_ALL)) { 1500 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE; 1501 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f, 1502 act, ingress); 1503 } else { 1504 err = -EOPNOTSUPP; 1505 } 1506 1507 if (err) 1508 goto err_add_action; 1509 1510 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); 1511 return 0; 1512 1513 err_add_action: 1514 kfree(mall_tc_entry); 1515 return err; 1516 } 1517 1518 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1519 struct tc_cls_matchall_offload *f) 1520 { 1521 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1522 1523 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port, 1524 f->cookie); 1525 if (!mall_tc_entry) { 1526 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); 1527 return; 1528 } 1529 list_del(&mall_tc_entry->list); 1530 1531 switch (mall_tc_entry->type) { 1532 case MLXSW_SP_PORT_MALL_MIRROR: 1533 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port, 1534 &mall_tc_entry->mirror); 1535 break; 1536 case MLXSW_SP_PORT_MALL_SAMPLE: 1537 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port); 1538 break; 1539 default: 1540 WARN_ON(1); 1541 } 1542 1543 kfree(mall_tc_entry); 1544 } 1545 1546 static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1547 struct tc_cls_matchall_offload *f, 1548 bool ingress) 1549 { 1550 switch (f->command) { 1551 case TC_CLSMATCHALL_REPLACE: 1552 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f, 1553 ingress); 1554 case TC_CLSMATCHALL_DESTROY: 1555 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f); 1556 return 0; 1557 default: 1558 return -EOPNOTSUPP; 1559 } 1560 } 1561 1562 static int 1563 mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block, 1564 struct flow_cls_offload *f) 1565 { 1566 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block); 1567 1568 switch (f->command) { 1569 case FLOW_CLS_REPLACE: 1570 return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f); 1571 case FLOW_CLS_DESTROY: 1572 mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f); 1573 return 0; 1574 case FLOW_CLS_STATS: 1575 return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f); 1576 case FLOW_CLS_TMPLT_CREATE: 1577 return mlxsw_sp_flower_tmplt_create(mlxsw_sp, acl_block, f); 1578 case FLOW_CLS_TMPLT_DESTROY: 1579 mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, acl_block, f); 1580 return 0; 1581 default: 1582 return -EOPNOTSUPP; 1583 } 1584 } 1585 1586 static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type, 1587 void *type_data, 1588 void *cb_priv, bool ingress) 1589 { 1590 struct mlxsw_sp_port *mlxsw_sp_port = cb_priv; 1591 1592 switch (type) { 1593 case TC_SETUP_CLSMATCHALL: 1594 if (!tc_cls_can_offload_and_chain0(mlxsw_sp_port->dev, 1595 type_data)) 1596 return -EOPNOTSUPP; 1597 1598 return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data, 1599 ingress); 1600 case TC_SETUP_CLSFLOWER: 1601 return 0; 1602 default: 1603 return -EOPNOTSUPP; 1604 } 1605 } 1606 1607 static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type, 1608 void *type_data, 1609 void *cb_priv) 1610 { 1611 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1612 cb_priv, true); 1613 } 1614 1615 static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type, 1616 void *type_data, 1617 void *cb_priv) 1618 { 1619 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1620 cb_priv, false); 1621 } 1622 1623 static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type, 1624 void *type_data, void *cb_priv) 1625 { 1626 struct mlxsw_sp_acl_block *acl_block = cb_priv; 1627 1628 switch (type) { 1629 case TC_SETUP_CLSMATCHALL: 1630 return 0; 1631 case TC_SETUP_CLSFLOWER: 1632 if (mlxsw_sp_acl_block_disabled(acl_block)) 1633 return -EOPNOTSUPP; 1634 1635 return mlxsw_sp_setup_tc_cls_flower(acl_block, type_data); 1636 default: 1637 return -EOPNOTSUPP; 1638 } 1639 } 1640 1641 static void mlxsw_sp_tc_block_flower_release(void *cb_priv) 1642 { 1643 struct mlxsw_sp_acl_block *acl_block = cb_priv; 1644 1645 mlxsw_sp_acl_block_destroy(acl_block); 1646 } 1647 1648 static LIST_HEAD(mlxsw_sp_block_cb_list); 1649 1650 static int 1651 mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port, 1652 struct flow_block_offload *f, bool ingress) 1653 { 1654 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1655 struct mlxsw_sp_acl_block *acl_block; 1656 struct flow_block_cb *block_cb; 1657 bool register_block = false; 1658 int err; 1659 1660 block_cb = flow_block_cb_lookup(f->block, 1661 mlxsw_sp_setup_tc_block_cb_flower, 1662 mlxsw_sp); 1663 if (!block_cb) { 1664 acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, f->net); 1665 if (!acl_block) 1666 return -ENOMEM; 1667 block_cb = flow_block_cb_alloc(mlxsw_sp_setup_tc_block_cb_flower, 1668 mlxsw_sp, acl_block, 1669 mlxsw_sp_tc_block_flower_release); 1670 if (IS_ERR(block_cb)) { 1671 mlxsw_sp_acl_block_destroy(acl_block); 1672 err = PTR_ERR(block_cb); 1673 goto err_cb_register; 1674 } 1675 register_block = true; 1676 } else { 1677 acl_block = flow_block_cb_priv(block_cb); 1678 } 1679 flow_block_cb_incref(block_cb); 1680 err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block, 1681 mlxsw_sp_port, ingress, f->extack); 1682 if (err) 1683 goto err_block_bind; 1684 1685 if (ingress) 1686 mlxsw_sp_port->ing_acl_block = acl_block; 1687 else 1688 mlxsw_sp_port->eg_acl_block = acl_block; 1689 1690 if (register_block) { 1691 flow_block_cb_add(block_cb, f); 1692 list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list); 1693 } 1694 1695 return 0; 1696 1697 err_block_bind: 1698 if (!flow_block_cb_decref(block_cb)) 1699 flow_block_cb_free(block_cb); 1700 err_cb_register: 1701 return err; 1702 } 1703 1704 static void 1705 mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port, 1706 struct flow_block_offload *f, bool ingress) 1707 { 1708 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1709 struct mlxsw_sp_acl_block *acl_block; 1710 struct flow_block_cb *block_cb; 1711 int err; 1712 1713 block_cb = flow_block_cb_lookup(f->block, 1714 mlxsw_sp_setup_tc_block_cb_flower, 1715 mlxsw_sp); 1716 if (!block_cb) 1717 return; 1718 1719 if (ingress) 1720 mlxsw_sp_port->ing_acl_block = NULL; 1721 else 1722 mlxsw_sp_port->eg_acl_block = NULL; 1723 1724 acl_block = flow_block_cb_priv(block_cb); 1725 err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block, 1726 mlxsw_sp_port, ingress); 1727 if (!err && !flow_block_cb_decref(block_cb)) { 1728 flow_block_cb_remove(block_cb, f); 1729 list_del(&block_cb->driver_list); 1730 } 1731 } 1732 1733 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 1734 struct flow_block_offload *f) 1735 { 1736 struct flow_block_cb *block_cb; 1737 flow_setup_cb_t *cb; 1738 bool ingress; 1739 int err; 1740 1741 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { 1742 cb = mlxsw_sp_setup_tc_block_cb_matchall_ig; 1743 ingress = true; 1744 } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { 1745 cb = mlxsw_sp_setup_tc_block_cb_matchall_eg; 1746 ingress = false; 1747 } else { 1748 return -EOPNOTSUPP; 1749 } 1750 1751 f->driver_block_list = &mlxsw_sp_block_cb_list; 1752 1753 switch (f->command) { 1754 case FLOW_BLOCK_BIND: 1755 if (flow_block_cb_is_busy(cb, mlxsw_sp_port, 1756 &mlxsw_sp_block_cb_list)) 1757 return -EBUSY; 1758 1759 block_cb = flow_block_cb_alloc(cb, mlxsw_sp_port, 1760 mlxsw_sp_port, NULL); 1761 if (IS_ERR(block_cb)) 1762 return PTR_ERR(block_cb); 1763 err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port, f, 1764 ingress); 1765 if (err) { 1766 flow_block_cb_free(block_cb); 1767 return err; 1768 } 1769 flow_block_cb_add(block_cb, f); 1770 list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list); 1771 return 0; 1772 case FLOW_BLOCK_UNBIND: 1773 mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port, 1774 f, ingress); 1775 block_cb = flow_block_cb_lookup(f->block, cb, mlxsw_sp_port); 1776 if (!block_cb) 1777 return -ENOENT; 1778 1779 flow_block_cb_remove(block_cb, f); 1780 list_del(&block_cb->driver_list); 1781 return 0; 1782 default: 1783 return -EOPNOTSUPP; 1784 } 1785 } 1786 1787 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1788 void *type_data) 1789 { 1790 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1791 1792 switch (type) { 1793 case TC_SETUP_BLOCK: 1794 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1795 case TC_SETUP_QDISC_RED: 1796 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1797 case TC_SETUP_QDISC_PRIO: 1798 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); 1799 case TC_SETUP_QDISC_ETS: 1800 return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data); 1801 default: 1802 return -EOPNOTSUPP; 1803 } 1804 } 1805 1806 1807 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) 1808 { 1809 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1810 1811 if (!enable) { 1812 if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->ing_acl_block) || 1813 mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->eg_acl_block) || 1814 !list_empty(&mlxsw_sp_port->mall_tc_list)) { 1815 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 1816 return -EINVAL; 1817 } 1818 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->ing_acl_block); 1819 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->eg_acl_block); 1820 } else { 1821 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->ing_acl_block); 1822 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->eg_acl_block); 1823 } 1824 return 0; 1825 } 1826 1827 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable) 1828 { 1829 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1830 char pplr_pl[MLXSW_REG_PPLR_LEN]; 1831 int err; 1832 1833 if (netif_running(dev)) 1834 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1835 1836 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable); 1837 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr), 1838 pplr_pl); 1839 1840 if (netif_running(dev)) 1841 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1842 1843 return err; 1844 } 1845 1846 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); 1847 1848 static int mlxsw_sp_handle_feature(struct net_device *dev, 1849 netdev_features_t wanted_features, 1850 netdev_features_t feature, 1851 mlxsw_sp_feature_handler feature_handler) 1852 { 1853 netdev_features_t changes = wanted_features ^ dev->features; 1854 bool enable = !!(wanted_features & feature); 1855 int err; 1856 1857 if (!(changes & feature)) 1858 return 0; 1859 1860 err = feature_handler(dev, enable); 1861 if (err) { 1862 netdev_err(dev, "%s feature %pNF failed, err %d\n", 1863 enable ? "Enable" : "Disable", &feature, err); 1864 return err; 1865 } 1866 1867 if (enable) 1868 dev->features |= feature; 1869 else 1870 dev->features &= ~feature; 1871 1872 return 0; 1873 } 1874 static int mlxsw_sp_set_features(struct net_device *dev, 1875 netdev_features_t features) 1876 { 1877 netdev_features_t oper_features = dev->features; 1878 int err = 0; 1879 1880 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, 1881 mlxsw_sp_feature_hw_tc); 1882 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK, 1883 mlxsw_sp_feature_loopback); 1884 1885 if (err) { 1886 dev->features = oper_features; 1887 return -EINVAL; 1888 } 1889 1890 return 0; 1891 } 1892 1893 static struct devlink_port * 1894 mlxsw_sp_port_get_devlink_port(struct net_device *dev) 1895 { 1896 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1897 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1898 1899 return mlxsw_core_port_devlink_port_get(mlxsw_sp->core, 1900 mlxsw_sp_port->local_port); 1901 } 1902 1903 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, 1904 struct ifreq *ifr) 1905 { 1906 struct hwtstamp_config config; 1907 int err; 1908 1909 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 1910 return -EFAULT; 1911 1912 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, 1913 &config); 1914 if (err) 1915 return err; 1916 1917 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1918 return -EFAULT; 1919 1920 return 0; 1921 } 1922 1923 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port, 1924 struct ifreq *ifr) 1925 { 1926 struct hwtstamp_config config; 1927 int err; 1928 1929 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port, 1930 &config); 1931 if (err) 1932 return err; 1933 1934 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1935 return -EFAULT; 1936 1937 return 0; 1938 } 1939 1940 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port) 1941 { 1942 struct hwtstamp_config config = {0}; 1943 1944 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config); 1945 } 1946 1947 static int 1948 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1949 { 1950 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1951 1952 switch (cmd) { 1953 case SIOCSHWTSTAMP: 1954 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr); 1955 case SIOCGHWTSTAMP: 1956 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr); 1957 default: 1958 return -EOPNOTSUPP; 1959 } 1960 } 1961 1962 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1963 .ndo_open = mlxsw_sp_port_open, 1964 .ndo_stop = mlxsw_sp_port_stop, 1965 .ndo_start_xmit = mlxsw_sp_port_xmit, 1966 .ndo_setup_tc = mlxsw_sp_setup_tc, 1967 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1968 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1969 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1970 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1971 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1972 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1973 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1974 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1975 .ndo_set_features = mlxsw_sp_set_features, 1976 .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port, 1977 .ndo_do_ioctl = mlxsw_sp_port_ioctl, 1978 }; 1979 1980 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 1981 struct ethtool_drvinfo *drvinfo) 1982 { 1983 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1984 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1985 1986 strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind, 1987 sizeof(drvinfo->driver)); 1988 strlcpy(drvinfo->version, mlxsw_sp_driver_version, 1989 sizeof(drvinfo->version)); 1990 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 1991 "%d.%d.%d", 1992 mlxsw_sp->bus_info->fw_rev.major, 1993 mlxsw_sp->bus_info->fw_rev.minor, 1994 mlxsw_sp->bus_info->fw_rev.subminor); 1995 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 1996 sizeof(drvinfo->bus_info)); 1997 } 1998 1999 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, 2000 struct ethtool_pauseparam *pause) 2001 { 2002 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2003 2004 pause->rx_pause = mlxsw_sp_port->link.rx_pause; 2005 pause->tx_pause = mlxsw_sp_port->link.tx_pause; 2006 } 2007 2008 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, 2009 struct ethtool_pauseparam *pause) 2010 { 2011 char pfcc_pl[MLXSW_REG_PFCC_LEN]; 2012 2013 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); 2014 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause); 2015 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause); 2016 2017 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), 2018 pfcc_pl); 2019 } 2020 2021 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, 2022 struct ethtool_pauseparam *pause) 2023 { 2024 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2025 bool pause_en = pause->tx_pause || pause->rx_pause; 2026 int err; 2027 2028 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { 2029 netdev_err(dev, "PFC already enabled on port\n"); 2030 return -EINVAL; 2031 } 2032 2033 if (pause->autoneg) { 2034 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); 2035 return -EINVAL; 2036 } 2037 2038 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 2039 if (err) { 2040 netdev_err(dev, "Failed to configure port's headroom\n"); 2041 return err; 2042 } 2043 2044 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause); 2045 if (err) { 2046 netdev_err(dev, "Failed to set PAUSE parameters\n"); 2047 goto err_port_pause_configure; 2048 } 2049 2050 mlxsw_sp_port->link.rx_pause = pause->rx_pause; 2051 mlxsw_sp_port->link.tx_pause = pause->tx_pause; 2052 2053 return 0; 2054 2055 err_port_pause_configure: 2056 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 2057 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 2058 return err; 2059 } 2060 2061 struct mlxsw_sp_port_hw_stats { 2062 char str[ETH_GSTRING_LEN]; 2063 u64 (*getter)(const char *payload); 2064 bool cells_bytes; 2065 }; 2066 2067 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 2068 { 2069 .str = "a_frames_transmitted_ok", 2070 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 2071 }, 2072 { 2073 .str = "a_frames_received_ok", 2074 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 2075 }, 2076 { 2077 .str = "a_frame_check_sequence_errors", 2078 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 2079 }, 2080 { 2081 .str = "a_alignment_errors", 2082 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 2083 }, 2084 { 2085 .str = "a_octets_transmitted_ok", 2086 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 2087 }, 2088 { 2089 .str = "a_octets_received_ok", 2090 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 2091 }, 2092 { 2093 .str = "a_multicast_frames_xmitted_ok", 2094 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 2095 }, 2096 { 2097 .str = "a_broadcast_frames_xmitted_ok", 2098 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 2099 }, 2100 { 2101 .str = "a_multicast_frames_received_ok", 2102 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 2103 }, 2104 { 2105 .str = "a_broadcast_frames_received_ok", 2106 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 2107 }, 2108 { 2109 .str = "a_in_range_length_errors", 2110 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 2111 }, 2112 { 2113 .str = "a_out_of_range_length_field", 2114 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 2115 }, 2116 { 2117 .str = "a_frame_too_long_errors", 2118 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 2119 }, 2120 { 2121 .str = "a_symbol_error_during_carrier", 2122 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 2123 }, 2124 { 2125 .str = "a_mac_control_frames_transmitted", 2126 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 2127 }, 2128 { 2129 .str = "a_mac_control_frames_received", 2130 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 2131 }, 2132 { 2133 .str = "a_unsupported_opcodes_received", 2134 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 2135 }, 2136 { 2137 .str = "a_pause_mac_ctrl_frames_received", 2138 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 2139 }, 2140 { 2141 .str = "a_pause_mac_ctrl_frames_xmitted", 2142 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 2143 }, 2144 }; 2145 2146 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 2147 2148 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2863_stats[] = { 2149 { 2150 .str = "if_in_discards", 2151 .getter = mlxsw_reg_ppcnt_if_in_discards_get, 2152 }, 2153 { 2154 .str = "if_out_discards", 2155 .getter = mlxsw_reg_ppcnt_if_out_discards_get, 2156 }, 2157 { 2158 .str = "if_out_errors", 2159 .getter = mlxsw_reg_ppcnt_if_out_errors_get, 2160 }, 2161 }; 2162 2163 #define MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN \ 2164 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2863_stats) 2165 2166 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = { 2167 { 2168 .str = "ether_stats_undersize_pkts", 2169 .getter = mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get, 2170 }, 2171 { 2172 .str = "ether_stats_oversize_pkts", 2173 .getter = mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get, 2174 }, 2175 { 2176 .str = "ether_stats_fragments", 2177 .getter = mlxsw_reg_ppcnt_ether_stats_fragments_get, 2178 }, 2179 { 2180 .str = "ether_pkts64octets", 2181 .getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get, 2182 }, 2183 { 2184 .str = "ether_pkts65to127octets", 2185 .getter = mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get, 2186 }, 2187 { 2188 .str = "ether_pkts128to255octets", 2189 .getter = mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get, 2190 }, 2191 { 2192 .str = "ether_pkts256to511octets", 2193 .getter = mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get, 2194 }, 2195 { 2196 .str = "ether_pkts512to1023octets", 2197 .getter = mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get, 2198 }, 2199 { 2200 .str = "ether_pkts1024to1518octets", 2201 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get, 2202 }, 2203 { 2204 .str = "ether_pkts1519to2047octets", 2205 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get, 2206 }, 2207 { 2208 .str = "ether_pkts2048to4095octets", 2209 .getter = mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get, 2210 }, 2211 { 2212 .str = "ether_pkts4096to8191octets", 2213 .getter = mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get, 2214 }, 2215 { 2216 .str = "ether_pkts8192to10239octets", 2217 .getter = mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get, 2218 }, 2219 }; 2220 2221 #define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \ 2222 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats) 2223 2224 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_3635_stats[] = { 2225 { 2226 .str = "dot3stats_fcs_errors", 2227 .getter = mlxsw_reg_ppcnt_dot3stats_fcs_errors_get, 2228 }, 2229 { 2230 .str = "dot3stats_symbol_errors", 2231 .getter = mlxsw_reg_ppcnt_dot3stats_symbol_errors_get, 2232 }, 2233 { 2234 .str = "dot3control_in_unknown_opcodes", 2235 .getter = mlxsw_reg_ppcnt_dot3control_in_unknown_opcodes_get, 2236 }, 2237 { 2238 .str = "dot3in_pause_frames", 2239 .getter = mlxsw_reg_ppcnt_dot3in_pause_frames_get, 2240 }, 2241 }; 2242 2243 #define MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN \ 2244 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_3635_stats) 2245 2246 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_discard_stats[] = { 2247 { 2248 .str = "discard_ingress_general", 2249 .getter = mlxsw_reg_ppcnt_ingress_general_get, 2250 }, 2251 { 2252 .str = "discard_ingress_policy_engine", 2253 .getter = mlxsw_reg_ppcnt_ingress_policy_engine_get, 2254 }, 2255 { 2256 .str = "discard_ingress_vlan_membership", 2257 .getter = mlxsw_reg_ppcnt_ingress_vlan_membership_get, 2258 }, 2259 { 2260 .str = "discard_ingress_tag_frame_type", 2261 .getter = mlxsw_reg_ppcnt_ingress_tag_frame_type_get, 2262 }, 2263 { 2264 .str = "discard_egress_vlan_membership", 2265 .getter = mlxsw_reg_ppcnt_egress_vlan_membership_get, 2266 }, 2267 { 2268 .str = "discard_loopback_filter", 2269 .getter = mlxsw_reg_ppcnt_loopback_filter_get, 2270 }, 2271 { 2272 .str = "discard_egress_general", 2273 .getter = mlxsw_reg_ppcnt_egress_general_get, 2274 }, 2275 { 2276 .str = "discard_egress_hoq", 2277 .getter = mlxsw_reg_ppcnt_egress_hoq_get, 2278 }, 2279 { 2280 .str = "discard_egress_policy_engine", 2281 .getter = mlxsw_reg_ppcnt_egress_policy_engine_get, 2282 }, 2283 { 2284 .str = "discard_ingress_tx_link_down", 2285 .getter = mlxsw_reg_ppcnt_ingress_tx_link_down_get, 2286 }, 2287 { 2288 .str = "discard_egress_stp_filter", 2289 .getter = mlxsw_reg_ppcnt_egress_stp_filter_get, 2290 }, 2291 { 2292 .str = "discard_egress_sll", 2293 .getter = mlxsw_reg_ppcnt_egress_sll_get, 2294 }, 2295 }; 2296 2297 #define MLXSW_SP_PORT_HW_DISCARD_STATS_LEN \ 2298 ARRAY_SIZE(mlxsw_sp_port_hw_discard_stats) 2299 2300 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { 2301 { 2302 .str = "rx_octets_prio", 2303 .getter = mlxsw_reg_ppcnt_rx_octets_get, 2304 }, 2305 { 2306 .str = "rx_frames_prio", 2307 .getter = mlxsw_reg_ppcnt_rx_frames_get, 2308 }, 2309 { 2310 .str = "tx_octets_prio", 2311 .getter = mlxsw_reg_ppcnt_tx_octets_get, 2312 }, 2313 { 2314 .str = "tx_frames_prio", 2315 .getter = mlxsw_reg_ppcnt_tx_frames_get, 2316 }, 2317 { 2318 .str = "rx_pause_prio", 2319 .getter = mlxsw_reg_ppcnt_rx_pause_get, 2320 }, 2321 { 2322 .str = "rx_pause_duration_prio", 2323 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get, 2324 }, 2325 { 2326 .str = "tx_pause_prio", 2327 .getter = mlxsw_reg_ppcnt_tx_pause_get, 2328 }, 2329 { 2330 .str = "tx_pause_duration_prio", 2331 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get, 2332 }, 2333 }; 2334 2335 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) 2336 2337 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { 2338 { 2339 .str = "tc_transmit_queue_tc", 2340 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get, 2341 .cells_bytes = true, 2342 }, 2343 { 2344 .str = "tc_no_buffer_discard_uc_tc", 2345 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get, 2346 }, 2347 }; 2348 2349 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) 2350 2351 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ 2352 MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN + \ 2353 MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \ 2354 MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN + \ 2355 MLXSW_SP_PORT_HW_DISCARD_STATS_LEN + \ 2356 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \ 2357 IEEE_8021QAZ_MAX_TCS) + \ 2358 (MLXSW_SP_PORT_HW_TC_STATS_LEN * \ 2359 TC_MAX_QUEUE)) 2360 2361 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) 2362 { 2363 int i; 2364 2365 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { 2366 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", 2367 mlxsw_sp_port_hw_prio_stats[i].str, prio); 2368 *p += ETH_GSTRING_LEN; 2369 } 2370 } 2371 2372 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) 2373 { 2374 int i; 2375 2376 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { 2377 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", 2378 mlxsw_sp_port_hw_tc_stats[i].str, tc); 2379 *p += ETH_GSTRING_LEN; 2380 } 2381 } 2382 2383 static void mlxsw_sp_port_get_strings(struct net_device *dev, 2384 u32 stringset, u8 *data) 2385 { 2386 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2387 u8 *p = data; 2388 int i; 2389 2390 switch (stringset) { 2391 case ETH_SS_STATS: 2392 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 2393 memcpy(p, mlxsw_sp_port_hw_stats[i].str, 2394 ETH_GSTRING_LEN); 2395 p += ETH_GSTRING_LEN; 2396 } 2397 2398 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; i++) { 2399 memcpy(p, mlxsw_sp_port_hw_rfc_2863_stats[i].str, 2400 ETH_GSTRING_LEN); 2401 p += ETH_GSTRING_LEN; 2402 } 2403 2404 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) { 2405 memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str, 2406 ETH_GSTRING_LEN); 2407 p += ETH_GSTRING_LEN; 2408 } 2409 2410 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; i++) { 2411 memcpy(p, mlxsw_sp_port_hw_rfc_3635_stats[i].str, 2412 ETH_GSTRING_LEN); 2413 p += ETH_GSTRING_LEN; 2414 } 2415 2416 for (i = 0; i < MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; i++) { 2417 memcpy(p, mlxsw_sp_port_hw_discard_stats[i].str, 2418 ETH_GSTRING_LEN); 2419 p += ETH_GSTRING_LEN; 2420 } 2421 2422 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 2423 mlxsw_sp_port_get_prio_strings(&p, i); 2424 2425 for (i = 0; i < TC_MAX_QUEUE; i++) 2426 mlxsw_sp_port_get_tc_strings(&p, i); 2427 2428 mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_strings(&p); 2429 break; 2430 } 2431 } 2432 2433 static int mlxsw_sp_port_set_phys_id(struct net_device *dev, 2434 enum ethtool_phys_id_state state) 2435 { 2436 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2437 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2438 char mlcr_pl[MLXSW_REG_MLCR_LEN]; 2439 bool active; 2440 2441 switch (state) { 2442 case ETHTOOL_ID_ACTIVE: 2443 active = true; 2444 break; 2445 case ETHTOOL_ID_INACTIVE: 2446 active = false; 2447 break; 2448 default: 2449 return -EOPNOTSUPP; 2450 } 2451 2452 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); 2453 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); 2454 } 2455 2456 static int 2457 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, 2458 int *p_len, enum mlxsw_reg_ppcnt_grp grp) 2459 { 2460 switch (grp) { 2461 case MLXSW_REG_PPCNT_IEEE_8023_CNT: 2462 *p_hw_stats = mlxsw_sp_port_hw_stats; 2463 *p_len = MLXSW_SP_PORT_HW_STATS_LEN; 2464 break; 2465 case MLXSW_REG_PPCNT_RFC_2863_CNT: 2466 *p_hw_stats = mlxsw_sp_port_hw_rfc_2863_stats; 2467 *p_len = MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2468 break; 2469 case MLXSW_REG_PPCNT_RFC_2819_CNT: 2470 *p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats; 2471 *p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2472 break; 2473 case MLXSW_REG_PPCNT_RFC_3635_CNT: 2474 *p_hw_stats = mlxsw_sp_port_hw_rfc_3635_stats; 2475 *p_len = MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2476 break; 2477 case MLXSW_REG_PPCNT_DISCARD_CNT: 2478 *p_hw_stats = mlxsw_sp_port_hw_discard_stats; 2479 *p_len = MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2480 break; 2481 case MLXSW_REG_PPCNT_PRIO_CNT: 2482 *p_hw_stats = mlxsw_sp_port_hw_prio_stats; 2483 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2484 break; 2485 case MLXSW_REG_PPCNT_TC_CNT: 2486 *p_hw_stats = mlxsw_sp_port_hw_tc_stats; 2487 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN; 2488 break; 2489 default: 2490 WARN_ON(1); 2491 return -EOPNOTSUPP; 2492 } 2493 return 0; 2494 } 2495 2496 static void __mlxsw_sp_port_get_stats(struct net_device *dev, 2497 enum mlxsw_reg_ppcnt_grp grp, int prio, 2498 u64 *data, int data_index) 2499 { 2500 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2501 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2502 struct mlxsw_sp_port_hw_stats *hw_stats; 2503 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 2504 int i, len; 2505 int err; 2506 2507 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); 2508 if (err) 2509 return; 2510 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl); 2511 for (i = 0; i < len; i++) { 2512 data[data_index + i] = hw_stats[i].getter(ppcnt_pl); 2513 if (!hw_stats[i].cells_bytes) 2514 continue; 2515 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp, 2516 data[data_index + i]); 2517 } 2518 } 2519 2520 static void mlxsw_sp_port_get_stats(struct net_device *dev, 2521 struct ethtool_stats *stats, u64 *data) 2522 { 2523 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2524 int i, data_index = 0; 2525 2526 /* IEEE 802.3 Counters */ 2527 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0, 2528 data, data_index); 2529 data_index = MLXSW_SP_PORT_HW_STATS_LEN; 2530 2531 /* RFC 2863 Counters */ 2532 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2863_CNT, 0, 2533 data, data_index); 2534 data_index += MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2535 2536 /* RFC 2819 Counters */ 2537 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0, 2538 data, data_index); 2539 data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2540 2541 /* RFC 3635 Counters */ 2542 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_3635_CNT, 0, 2543 data, data_index); 2544 data_index += MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2545 2546 /* Discard Counters */ 2547 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_DISCARD_CNT, 0, 2548 data, data_index); 2549 data_index += MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2550 2551 /* Per-Priority Counters */ 2552 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2553 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, 2554 data, data_index); 2555 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2556 } 2557 2558 /* Per-TC Counters */ 2559 for (i = 0; i < TC_MAX_QUEUE; i++) { 2560 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, 2561 data, data_index); 2562 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; 2563 } 2564 2565 /* PTP counters */ 2566 mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats(mlxsw_sp_port, 2567 data, data_index); 2568 data_index += mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_count(); 2569 } 2570 2571 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 2572 { 2573 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2574 2575 switch (sset) { 2576 case ETH_SS_STATS: 2577 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN + 2578 mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_count(); 2579 default: 2580 return -EOPNOTSUPP; 2581 } 2582 } 2583 2584 struct mlxsw_sp1_port_link_mode { 2585 enum ethtool_link_mode_bit_indices mask_ethtool; 2586 u32 mask; 2587 u32 speed; 2588 }; 2589 2590 static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = { 2591 { 2592 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 2593 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2594 .speed = SPEED_100, 2595 }, 2596 { 2597 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 2598 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 2599 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2600 .speed = SPEED_1000, 2601 }, 2602 { 2603 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 2604 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2605 .speed = SPEED_10000, 2606 }, 2607 { 2608 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 2609 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 2610 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 2611 .speed = SPEED_10000, 2612 }, 2613 { 2614 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2615 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2616 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2617 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 2618 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2619 .speed = SPEED_10000, 2620 }, 2621 { 2622 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 2623 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 2624 .speed = SPEED_20000, 2625 }, 2626 { 2627 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 2628 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2629 .speed = SPEED_40000, 2630 }, 2631 { 2632 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 2633 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2634 .speed = SPEED_40000, 2635 }, 2636 { 2637 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 2638 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2639 .speed = SPEED_40000, 2640 }, 2641 { 2642 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 2643 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2644 .speed = SPEED_40000, 2645 }, 2646 { 2647 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR, 2648 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2649 .speed = SPEED_25000, 2650 }, 2651 { 2652 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR, 2653 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2654 .speed = SPEED_25000, 2655 }, 2656 { 2657 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 2658 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2659 .speed = SPEED_25000, 2660 }, 2661 { 2662 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2, 2663 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2664 .speed = SPEED_50000, 2665 }, 2666 { 2667 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 2668 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2669 .speed = SPEED_50000, 2670 }, 2671 { 2672 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2, 2673 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2674 .speed = SPEED_50000, 2675 }, 2676 { 2677 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4, 2678 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2679 .speed = SPEED_100000, 2680 }, 2681 { 2682 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4, 2683 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2684 .speed = SPEED_100000, 2685 }, 2686 { 2687 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4, 2688 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2689 .speed = SPEED_100000, 2690 }, 2691 { 2692 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 2693 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2694 .speed = SPEED_100000, 2695 }, 2696 }; 2697 2698 #define MLXSW_SP1_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp1_port_link_mode) 2699 2700 static void 2701 mlxsw_sp1_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, 2702 u32 ptys_eth_proto, 2703 struct ethtool_link_ksettings *cmd) 2704 { 2705 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2706 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2707 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2708 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2709 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2710 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2711 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 2712 2713 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2714 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2715 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2716 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 2717 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 2718 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 2719 } 2720 2721 static void 2722 mlxsw_sp1_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, 2723 u8 width, unsigned long *mode) 2724 { 2725 int i; 2726 2727 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2728 if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) 2729 __set_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, 2730 mode); 2731 } 2732 } 2733 2734 static u32 2735 mlxsw_sp1_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto) 2736 { 2737 int i; 2738 2739 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2740 if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) 2741 return mlxsw_sp1_port_link_mode[i].speed; 2742 } 2743 2744 return SPEED_UNKNOWN; 2745 } 2746 2747 static void 2748 mlxsw_sp1_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, 2749 u32 ptys_eth_proto, 2750 struct ethtool_link_ksettings *cmd) 2751 { 2752 cmd->base.speed = SPEED_UNKNOWN; 2753 cmd->base.duplex = DUPLEX_UNKNOWN; 2754 2755 if (!carrier_ok) 2756 return; 2757 2758 cmd->base.speed = mlxsw_sp1_from_ptys_speed(mlxsw_sp, ptys_eth_proto); 2759 if (cmd->base.speed != SPEED_UNKNOWN) 2760 cmd->base.duplex = DUPLEX_FULL; 2761 } 2762 2763 static u32 2764 mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width, 2765 const struct ethtool_link_ksettings *cmd) 2766 { 2767 u32 ptys_proto = 0; 2768 int i; 2769 2770 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2771 if (test_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, 2772 cmd->link_modes.advertising)) 2773 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2774 } 2775 return ptys_proto; 2776 } 2777 2778 static u32 mlxsw_sp1_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u8 width, 2779 u32 speed) 2780 { 2781 u32 ptys_proto = 0; 2782 int i; 2783 2784 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2785 if (speed == mlxsw_sp1_port_link_mode[i].speed) 2786 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2787 } 2788 return ptys_proto; 2789 } 2790 2791 static u32 2792 mlxsw_sp1_to_ptys_upper_speed(struct mlxsw_sp *mlxsw_sp, u32 upper_speed) 2793 { 2794 u32 ptys_proto = 0; 2795 int i; 2796 2797 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2798 if (mlxsw_sp1_port_link_mode[i].speed <= upper_speed) 2799 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2800 } 2801 return ptys_proto; 2802 } 2803 2804 static int 2805 mlxsw_sp1_port_speed_base(struct mlxsw_sp *mlxsw_sp, u8 local_port, 2806 u32 *base_speed) 2807 { 2808 *base_speed = MLXSW_SP_PORT_BASE_SPEED_25G; 2809 return 0; 2810 } 2811 2812 static void 2813 mlxsw_sp1_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, 2814 u8 local_port, u32 proto_admin, bool autoneg) 2815 { 2816 mlxsw_reg_ptys_eth_pack(payload, local_port, proto_admin, autoneg); 2817 } 2818 2819 static void 2820 mlxsw_sp1_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, 2821 u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, 2822 u32 *p_eth_proto_oper) 2823 { 2824 mlxsw_reg_ptys_eth_unpack(payload, p_eth_proto_cap, p_eth_proto_admin, 2825 p_eth_proto_oper); 2826 } 2827 2828 static const struct mlxsw_sp_port_type_speed_ops 2829 mlxsw_sp1_port_type_speed_ops = { 2830 .from_ptys_supported_port = mlxsw_sp1_from_ptys_supported_port, 2831 .from_ptys_link = mlxsw_sp1_from_ptys_link, 2832 .from_ptys_speed = mlxsw_sp1_from_ptys_speed, 2833 .from_ptys_speed_duplex = mlxsw_sp1_from_ptys_speed_duplex, 2834 .to_ptys_advert_link = mlxsw_sp1_to_ptys_advert_link, 2835 .to_ptys_speed = mlxsw_sp1_to_ptys_speed, 2836 .to_ptys_upper_speed = mlxsw_sp1_to_ptys_upper_speed, 2837 .port_speed_base = mlxsw_sp1_port_speed_base, 2838 .reg_ptys_eth_pack = mlxsw_sp1_reg_ptys_eth_pack, 2839 .reg_ptys_eth_unpack = mlxsw_sp1_reg_ptys_eth_unpack, 2840 }; 2841 2842 static const enum ethtool_link_mode_bit_indices 2843 mlxsw_sp2_mask_ethtool_sgmii_100m[] = { 2844 ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2845 }; 2846 2847 #define MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN \ 2848 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_sgmii_100m) 2849 2850 static const enum ethtool_link_mode_bit_indices 2851 mlxsw_sp2_mask_ethtool_1000base_x_sgmii[] = { 2852 ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 2853 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2854 }; 2855 2856 #define MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN \ 2857 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_1000base_x_sgmii) 2858 2859 static const enum ethtool_link_mode_bit_indices 2860 mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii[] = { 2861 ETHTOOL_LINK_MODE_2500baseX_Full_BIT, 2862 }; 2863 2864 #define MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN \ 2865 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii) 2866 2867 static const enum ethtool_link_mode_bit_indices 2868 mlxsw_sp2_mask_ethtool_5gbase_r[] = { 2869 ETHTOOL_LINK_MODE_5000baseT_Full_BIT, 2870 }; 2871 2872 #define MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN \ 2873 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_5gbase_r) 2874 2875 static const enum ethtool_link_mode_bit_indices 2876 mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g[] = { 2877 ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2878 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2879 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, 2880 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, 2881 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 2882 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 2883 ETHTOOL_LINK_MODE_10000baseER_Full_BIT, 2884 }; 2885 2886 #define MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN \ 2887 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g) 2888 2889 static const enum ethtool_link_mode_bit_indices 2890 mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g[] = { 2891 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2892 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2893 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2894 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2895 }; 2896 2897 #define MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN \ 2898 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g) 2899 2900 static const enum ethtool_link_mode_bit_indices 2901 mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr[] = { 2902 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2903 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2904 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2905 }; 2906 2907 #define MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN \ 2908 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr) 2909 2910 static const enum ethtool_link_mode_bit_indices 2911 mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2[] = { 2912 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2913 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2914 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2915 }; 2916 2917 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN \ 2918 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2) 2919 2920 static const enum ethtool_link_mode_bit_indices 2921 mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr[] = { 2922 ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, 2923 ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, 2924 ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, 2925 ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, 2926 ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, 2927 }; 2928 2929 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN \ 2930 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr) 2931 2932 static const enum ethtool_link_mode_bit_indices 2933 mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4[] = { 2934 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2935 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2936 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2937 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2938 }; 2939 2940 #define MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN \ 2941 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4) 2942 2943 static const enum ethtool_link_mode_bit_indices 2944 mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2[] = { 2945 ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, 2946 ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, 2947 ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, 2948 ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, 2949 ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, 2950 }; 2951 2952 #define MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN \ 2953 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2) 2954 2955 static const enum ethtool_link_mode_bit_indices 2956 mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = { 2957 ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, 2958 ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, 2959 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, 2960 ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT, 2961 ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, 2962 }; 2963 2964 #define MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN \ 2965 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4) 2966 2967 static const enum ethtool_link_mode_bit_indices 2968 mlxsw_sp2_mask_ethtool_400gaui_8[] = { 2969 ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT, 2970 ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT, 2971 ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT, 2972 ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT, 2973 ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT, 2974 }; 2975 2976 #define MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN \ 2977 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_400gaui_8) 2978 2979 #define MLXSW_SP_PORT_MASK_WIDTH_1X BIT(0) 2980 #define MLXSW_SP_PORT_MASK_WIDTH_2X BIT(1) 2981 #define MLXSW_SP_PORT_MASK_WIDTH_4X BIT(2) 2982 #define MLXSW_SP_PORT_MASK_WIDTH_8X BIT(3) 2983 2984 static u8 mlxsw_sp_port_mask_width_get(u8 width) 2985 { 2986 switch (width) { 2987 case 1: 2988 return MLXSW_SP_PORT_MASK_WIDTH_1X; 2989 case 2: 2990 return MLXSW_SP_PORT_MASK_WIDTH_2X; 2991 case 4: 2992 return MLXSW_SP_PORT_MASK_WIDTH_4X; 2993 case 8: 2994 return MLXSW_SP_PORT_MASK_WIDTH_8X; 2995 default: 2996 WARN_ON_ONCE(1); 2997 return 0; 2998 } 2999 } 3000 3001 struct mlxsw_sp2_port_link_mode { 3002 const enum ethtool_link_mode_bit_indices *mask_ethtool; 3003 int m_ethtool_len; 3004 u32 mask; 3005 u32 speed; 3006 u8 mask_width; 3007 }; 3008 3009 static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = { 3010 { 3011 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_SGMII_100M, 3012 .mask_ethtool = mlxsw_sp2_mask_ethtool_sgmii_100m, 3013 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN, 3014 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3015 MLXSW_SP_PORT_MASK_WIDTH_2X | 3016 MLXSW_SP_PORT_MASK_WIDTH_4X | 3017 MLXSW_SP_PORT_MASK_WIDTH_8X, 3018 .speed = SPEED_100, 3019 }, 3020 { 3021 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_1000BASE_X_SGMII, 3022 .mask_ethtool = mlxsw_sp2_mask_ethtool_1000base_x_sgmii, 3023 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN, 3024 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3025 MLXSW_SP_PORT_MASK_WIDTH_2X | 3026 MLXSW_SP_PORT_MASK_WIDTH_4X | 3027 MLXSW_SP_PORT_MASK_WIDTH_8X, 3028 .speed = SPEED_1000, 3029 }, 3030 { 3031 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_2_5GBASE_X_2_5GMII, 3032 .mask_ethtool = mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii, 3033 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN, 3034 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3035 MLXSW_SP_PORT_MASK_WIDTH_2X | 3036 MLXSW_SP_PORT_MASK_WIDTH_4X | 3037 MLXSW_SP_PORT_MASK_WIDTH_8X, 3038 .speed = SPEED_2500, 3039 }, 3040 { 3041 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R, 3042 .mask_ethtool = mlxsw_sp2_mask_ethtool_5gbase_r, 3043 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN, 3044 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3045 MLXSW_SP_PORT_MASK_WIDTH_2X | 3046 MLXSW_SP_PORT_MASK_WIDTH_4X | 3047 MLXSW_SP_PORT_MASK_WIDTH_8X, 3048 .speed = SPEED_5000, 3049 }, 3050 { 3051 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XFI_XAUI_1_10G, 3052 .mask_ethtool = mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g, 3053 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN, 3054 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3055 MLXSW_SP_PORT_MASK_WIDTH_2X | 3056 MLXSW_SP_PORT_MASK_WIDTH_4X | 3057 MLXSW_SP_PORT_MASK_WIDTH_8X, 3058 .speed = SPEED_10000, 3059 }, 3060 { 3061 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G, 3062 .mask_ethtool = mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g, 3063 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN, 3064 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X | 3065 MLXSW_SP_PORT_MASK_WIDTH_8X, 3066 .speed = SPEED_40000, 3067 }, 3068 { 3069 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR, 3070 .mask_ethtool = mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr, 3071 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN, 3072 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3073 MLXSW_SP_PORT_MASK_WIDTH_2X | 3074 MLXSW_SP_PORT_MASK_WIDTH_4X | 3075 MLXSW_SP_PORT_MASK_WIDTH_8X, 3076 .speed = SPEED_25000, 3077 }, 3078 { 3079 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_2_LAUI_2_50GBASE_CR2_KR2, 3080 .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2, 3081 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN, 3082 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_2X | 3083 MLXSW_SP_PORT_MASK_WIDTH_4X | 3084 MLXSW_SP_PORT_MASK_WIDTH_8X, 3085 .speed = SPEED_50000, 3086 }, 3087 { 3088 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR, 3089 .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr, 3090 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN, 3091 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X, 3092 .speed = SPEED_50000, 3093 }, 3094 { 3095 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4, 3096 .mask_ethtool = mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4, 3097 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN, 3098 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X | 3099 MLXSW_SP_PORT_MASK_WIDTH_8X, 3100 .speed = SPEED_100000, 3101 }, 3102 { 3103 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2, 3104 .mask_ethtool = mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2, 3105 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN, 3106 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_2X, 3107 .speed = SPEED_100000, 3108 }, 3109 { 3110 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4, 3111 .mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4, 3112 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN, 3113 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X | 3114 MLXSW_SP_PORT_MASK_WIDTH_8X, 3115 .speed = SPEED_200000, 3116 }, 3117 { 3118 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_8, 3119 .mask_ethtool = mlxsw_sp2_mask_ethtool_400gaui_8, 3120 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN, 3121 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_8X, 3122 .speed = SPEED_400000, 3123 }, 3124 }; 3125 3126 #define MLXSW_SP2_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp2_port_link_mode) 3127 3128 static void 3129 mlxsw_sp2_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, 3130 u32 ptys_eth_proto, 3131 struct ethtool_link_ksettings *cmd) 3132 { 3133 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 3134 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 3135 } 3136 3137 static void 3138 mlxsw_sp2_set_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, 3139 unsigned long *mode) 3140 { 3141 int i; 3142 3143 for (i = 0; i < link_mode->m_ethtool_len; i++) 3144 __set_bit(link_mode->mask_ethtool[i], mode); 3145 } 3146 3147 static void 3148 mlxsw_sp2_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, 3149 u8 width, unsigned long *mode) 3150 { 3151 u8 mask_width = mlxsw_sp_port_mask_width_get(width); 3152 int i; 3153 3154 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3155 if ((ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) && 3156 (mask_width & mlxsw_sp2_port_link_mode[i].mask_width)) 3157 mlxsw_sp2_set_bit_ethtool(&mlxsw_sp2_port_link_mode[i], 3158 mode); 3159 } 3160 } 3161 3162 static u32 3163 mlxsw_sp2_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto) 3164 { 3165 int i; 3166 3167 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3168 if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) 3169 return mlxsw_sp2_port_link_mode[i].speed; 3170 } 3171 3172 return SPEED_UNKNOWN; 3173 } 3174 3175 static void 3176 mlxsw_sp2_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, 3177 u32 ptys_eth_proto, 3178 struct ethtool_link_ksettings *cmd) 3179 { 3180 cmd->base.speed = SPEED_UNKNOWN; 3181 cmd->base.duplex = DUPLEX_UNKNOWN; 3182 3183 if (!carrier_ok) 3184 return; 3185 3186 cmd->base.speed = mlxsw_sp2_from_ptys_speed(mlxsw_sp, ptys_eth_proto); 3187 if (cmd->base.speed != SPEED_UNKNOWN) 3188 cmd->base.duplex = DUPLEX_FULL; 3189 } 3190 3191 static bool 3192 mlxsw_sp2_test_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, 3193 const unsigned long *mode) 3194 { 3195 int cnt = 0; 3196 int i; 3197 3198 for (i = 0; i < link_mode->m_ethtool_len; i++) { 3199 if (test_bit(link_mode->mask_ethtool[i], mode)) 3200 cnt++; 3201 } 3202 3203 return cnt == link_mode->m_ethtool_len; 3204 } 3205 3206 static u32 3207 mlxsw_sp2_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width, 3208 const struct ethtool_link_ksettings *cmd) 3209 { 3210 u8 mask_width = mlxsw_sp_port_mask_width_get(width); 3211 u32 ptys_proto = 0; 3212 int i; 3213 3214 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3215 if ((mask_width & mlxsw_sp2_port_link_mode[i].mask_width) && 3216 mlxsw_sp2_test_bit_ethtool(&mlxsw_sp2_port_link_mode[i], 3217 cmd->link_modes.advertising)) 3218 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3219 } 3220 return ptys_proto; 3221 } 3222 3223 static u32 mlxsw_sp2_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, 3224 u8 width, u32 speed) 3225 { 3226 u8 mask_width = mlxsw_sp_port_mask_width_get(width); 3227 u32 ptys_proto = 0; 3228 int i; 3229 3230 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3231 if ((speed == mlxsw_sp2_port_link_mode[i].speed) && 3232 (mask_width & mlxsw_sp2_port_link_mode[i].mask_width)) 3233 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3234 } 3235 return ptys_proto; 3236 } 3237 3238 static u32 3239 mlxsw_sp2_to_ptys_upper_speed(struct mlxsw_sp *mlxsw_sp, u32 upper_speed) 3240 { 3241 u32 ptys_proto = 0; 3242 int i; 3243 3244 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3245 if (mlxsw_sp2_port_link_mode[i].speed <= upper_speed) 3246 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3247 } 3248 return ptys_proto; 3249 } 3250 3251 static int 3252 mlxsw_sp2_port_speed_base(struct mlxsw_sp *mlxsw_sp, u8 local_port, 3253 u32 *base_speed) 3254 { 3255 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3256 u32 eth_proto_cap; 3257 int err; 3258 3259 /* In Spectrum-2, the speed of 1x can change from port to port, so query 3260 * it from firmware. 3261 */ 3262 mlxsw_reg_ptys_ext_eth_pack(ptys_pl, local_port, 0, false); 3263 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3264 if (err) 3265 return err; 3266 mlxsw_reg_ptys_ext_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL); 3267 3268 if (eth_proto_cap & 3269 MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR) { 3270 *base_speed = MLXSW_SP_PORT_BASE_SPEED_50G; 3271 return 0; 3272 } 3273 3274 if (eth_proto_cap & 3275 MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR) { 3276 *base_speed = MLXSW_SP_PORT_BASE_SPEED_25G; 3277 return 0; 3278 } 3279 3280 return -EIO; 3281 } 3282 3283 static void 3284 mlxsw_sp2_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, 3285 u8 local_port, u32 proto_admin, 3286 bool autoneg) 3287 { 3288 mlxsw_reg_ptys_ext_eth_pack(payload, local_port, proto_admin, autoneg); 3289 } 3290 3291 static void 3292 mlxsw_sp2_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, 3293 u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, 3294 u32 *p_eth_proto_oper) 3295 { 3296 mlxsw_reg_ptys_ext_eth_unpack(payload, p_eth_proto_cap, 3297 p_eth_proto_admin, p_eth_proto_oper); 3298 } 3299 3300 static const struct mlxsw_sp_port_type_speed_ops 3301 mlxsw_sp2_port_type_speed_ops = { 3302 .from_ptys_supported_port = mlxsw_sp2_from_ptys_supported_port, 3303 .from_ptys_link = mlxsw_sp2_from_ptys_link, 3304 .from_ptys_speed = mlxsw_sp2_from_ptys_speed, 3305 .from_ptys_speed_duplex = mlxsw_sp2_from_ptys_speed_duplex, 3306 .to_ptys_advert_link = mlxsw_sp2_to_ptys_advert_link, 3307 .to_ptys_speed = mlxsw_sp2_to_ptys_speed, 3308 .to_ptys_upper_speed = mlxsw_sp2_to_ptys_upper_speed, 3309 .port_speed_base = mlxsw_sp2_port_speed_base, 3310 .reg_ptys_eth_pack = mlxsw_sp2_reg_ptys_eth_pack, 3311 .reg_ptys_eth_unpack = mlxsw_sp2_reg_ptys_eth_unpack, 3312 }; 3313 3314 static void 3315 mlxsw_sp_port_get_link_supported(struct mlxsw_sp *mlxsw_sp, u32 eth_proto_cap, 3316 u8 width, struct ethtool_link_ksettings *cmd) 3317 { 3318 const struct mlxsw_sp_port_type_speed_ops *ops; 3319 3320 ops = mlxsw_sp->port_type_speed_ops; 3321 3322 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); 3323 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); 3324 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); 3325 3326 ops->from_ptys_supported_port(mlxsw_sp, eth_proto_cap, cmd); 3327 ops->from_ptys_link(mlxsw_sp, eth_proto_cap, width, 3328 cmd->link_modes.supported); 3329 } 3330 3331 static void 3332 mlxsw_sp_port_get_link_advertise(struct mlxsw_sp *mlxsw_sp, 3333 u32 eth_proto_admin, bool autoneg, u8 width, 3334 struct ethtool_link_ksettings *cmd) 3335 { 3336 const struct mlxsw_sp_port_type_speed_ops *ops; 3337 3338 ops = mlxsw_sp->port_type_speed_ops; 3339 3340 if (!autoneg) 3341 return; 3342 3343 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); 3344 ops->from_ptys_link(mlxsw_sp, eth_proto_admin, width, 3345 cmd->link_modes.advertising); 3346 } 3347 3348 static u8 3349 mlxsw_sp_port_connector_port(enum mlxsw_reg_ptys_connector_type connector_type) 3350 { 3351 switch (connector_type) { 3352 case MLXSW_REG_PTYS_CONNECTOR_TYPE_UNKNOWN_OR_NO_CONNECTOR: 3353 return PORT_OTHER; 3354 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_NONE: 3355 return PORT_NONE; 3356 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_TP: 3357 return PORT_TP; 3358 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_AUI: 3359 return PORT_AUI; 3360 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_BNC: 3361 return PORT_BNC; 3362 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_MII: 3363 return PORT_MII; 3364 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_FIBRE: 3365 return PORT_FIBRE; 3366 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_DA: 3367 return PORT_DA; 3368 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_OTHER: 3369 return PORT_OTHER; 3370 default: 3371 WARN_ON_ONCE(1); 3372 return PORT_OTHER; 3373 } 3374 } 3375 3376 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev, 3377 struct ethtool_link_ksettings *cmd) 3378 { 3379 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; 3380 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 3381 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3382 const struct mlxsw_sp_port_type_speed_ops *ops; 3383 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3384 u8 connector_type; 3385 bool autoneg; 3386 int err; 3387 3388 ops = mlxsw_sp->port_type_speed_ops; 3389 3390 autoneg = mlxsw_sp_port->link.autoneg; 3391 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3392 0, false); 3393 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3394 if (err) 3395 return err; 3396 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, 3397 ð_proto_admin, ð_proto_oper); 3398 3399 mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap, 3400 mlxsw_sp_port->mapping.width, cmd); 3401 3402 mlxsw_sp_port_get_link_advertise(mlxsw_sp, eth_proto_admin, autoneg, 3403 mlxsw_sp_port->mapping.width, cmd); 3404 3405 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 3406 connector_type = mlxsw_reg_ptys_connector_type_get(ptys_pl); 3407 cmd->base.port = mlxsw_sp_port_connector_port(connector_type); 3408 ops->from_ptys_speed_duplex(mlxsw_sp, netif_carrier_ok(dev), 3409 eth_proto_oper, cmd); 3410 3411 return 0; 3412 } 3413 3414 static int 3415 mlxsw_sp_port_set_link_ksettings(struct net_device *dev, 3416 const struct ethtool_link_ksettings *cmd) 3417 { 3418 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 3419 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3420 const struct mlxsw_sp_port_type_speed_ops *ops; 3421 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3422 u32 eth_proto_cap, eth_proto_new; 3423 bool autoneg; 3424 int err; 3425 3426 ops = mlxsw_sp->port_type_speed_ops; 3427 3428 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3429 0, false); 3430 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3431 if (err) 3432 return err; 3433 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, NULL, NULL); 3434 3435 autoneg = cmd->base.autoneg == AUTONEG_ENABLE; 3436 eth_proto_new = autoneg ? 3437 ops->to_ptys_advert_link(mlxsw_sp, mlxsw_sp_port->mapping.width, 3438 cmd) : 3439 ops->to_ptys_speed(mlxsw_sp, mlxsw_sp_port->mapping.width, 3440 cmd->base.speed); 3441 3442 eth_proto_new = eth_proto_new & eth_proto_cap; 3443 if (!eth_proto_new) { 3444 netdev_err(dev, "No supported speed requested\n"); 3445 return -EINVAL; 3446 } 3447 3448 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3449 eth_proto_new, autoneg); 3450 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3451 if (err) 3452 return err; 3453 3454 mlxsw_sp_port->link.autoneg = autoneg; 3455 3456 if (!netif_running(dev)) 3457 return 0; 3458 3459 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3460 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 3461 3462 return 0; 3463 } 3464 3465 static int mlxsw_sp_get_module_info(struct net_device *netdev, 3466 struct ethtool_modinfo *modinfo) 3467 { 3468 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3469 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3470 int err; 3471 3472 err = mlxsw_env_get_module_info(mlxsw_sp->core, 3473 mlxsw_sp_port->mapping.module, 3474 modinfo); 3475 3476 return err; 3477 } 3478 3479 static int mlxsw_sp_get_module_eeprom(struct net_device *netdev, 3480 struct ethtool_eeprom *ee, 3481 u8 *data) 3482 { 3483 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3484 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3485 int err; 3486 3487 err = mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core, 3488 mlxsw_sp_port->mapping.module, ee, 3489 data); 3490 3491 return err; 3492 } 3493 3494 static int 3495 mlxsw_sp_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info) 3496 { 3497 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3498 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3499 3500 return mlxsw_sp->ptp_ops->get_ts_info(mlxsw_sp, info); 3501 } 3502 3503 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 3504 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 3505 .get_link = ethtool_op_get_link, 3506 .get_pauseparam = mlxsw_sp_port_get_pauseparam, 3507 .set_pauseparam = mlxsw_sp_port_set_pauseparam, 3508 .get_strings = mlxsw_sp_port_get_strings, 3509 .set_phys_id = mlxsw_sp_port_set_phys_id, 3510 .get_ethtool_stats = mlxsw_sp_port_get_stats, 3511 .get_sset_count = mlxsw_sp_port_get_sset_count, 3512 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings, 3513 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings, 3514 .get_module_info = mlxsw_sp_get_module_info, 3515 .get_module_eeprom = mlxsw_sp_get_module_eeprom, 3516 .get_ts_info = mlxsw_sp_get_ts_info, 3517 }; 3518 3519 static int 3520 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port) 3521 { 3522 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3523 const struct mlxsw_sp_port_type_speed_ops *ops; 3524 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3525 u32 eth_proto_admin; 3526 u32 upper_speed; 3527 u32 base_speed; 3528 int err; 3529 3530 ops = mlxsw_sp->port_type_speed_ops; 3531 3532 err = ops->port_speed_base(mlxsw_sp, mlxsw_sp_port->local_port, 3533 &base_speed); 3534 if (err) 3535 return err; 3536 upper_speed = base_speed * mlxsw_sp_port->mapping.width; 3537 3538 eth_proto_admin = ops->to_ptys_upper_speed(mlxsw_sp, upper_speed); 3539 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3540 eth_proto_admin, mlxsw_sp_port->link.autoneg); 3541 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3542 } 3543 3544 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 3545 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 3546 bool dwrr, u8 dwrr_weight) 3547 { 3548 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3549 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3550 3551 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3552 next_index); 3553 mlxsw_reg_qeec_de_set(qeec_pl, true); 3554 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 3555 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 3556 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3557 } 3558 3559 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 3560 enum mlxsw_reg_qeec_hr hr, u8 index, 3561 u8 next_index, u32 maxrate) 3562 { 3563 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3564 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3565 3566 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3567 next_index); 3568 mlxsw_reg_qeec_mase_set(qeec_pl, true); 3569 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 3570 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3571 } 3572 3573 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port, 3574 enum mlxsw_reg_qeec_hr hr, u8 index, 3575 u8 next_index, u32 minrate) 3576 { 3577 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3578 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3579 3580 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3581 next_index); 3582 mlxsw_reg_qeec_mise_set(qeec_pl, true); 3583 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate); 3584 3585 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3586 } 3587 3588 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 3589 u8 switch_prio, u8 tclass) 3590 { 3591 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3592 char qtct_pl[MLXSW_REG_QTCT_LEN]; 3593 3594 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 3595 tclass); 3596 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 3597 } 3598 3599 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 3600 { 3601 int err, i; 3602 3603 /* Setup the elements hierarcy, so that each TC is linked to 3604 * one subgroup, which are all member in the same group. 3605 */ 3606 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3607 MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0); 3608 if (err) 3609 return err; 3610 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3611 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3612 MLXSW_REG_QEEC_HR_SUBGROUP, i, 3613 0, false, 0); 3614 if (err) 3615 return err; 3616 } 3617 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3618 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3619 MLXSW_REG_QEEC_HR_TC, i, i, 3620 false, 0); 3621 if (err) 3622 return err; 3623 3624 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3625 MLXSW_REG_QEEC_HR_TC, 3626 i + 8, i, 3627 true, 100); 3628 if (err) 3629 return err; 3630 } 3631 3632 /* Make sure the max shaper is disabled in all hierarchies that support 3633 * it. Note that this disables ptps (PTP shaper), but that is intended 3634 * for the initial configuration. 3635 */ 3636 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3637 MLXSW_REG_QEEC_HR_PORT, 0, 0, 3638 MLXSW_REG_QEEC_MAS_DIS); 3639 if (err) 3640 return err; 3641 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3642 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3643 MLXSW_REG_QEEC_HR_SUBGROUP, 3644 i, 0, 3645 MLXSW_REG_QEEC_MAS_DIS); 3646 if (err) 3647 return err; 3648 } 3649 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3650 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3651 MLXSW_REG_QEEC_HR_TC, 3652 i, i, 3653 MLXSW_REG_QEEC_MAS_DIS); 3654 if (err) 3655 return err; 3656 3657 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3658 MLXSW_REG_QEEC_HR_TC, 3659 i + 8, i, 3660 MLXSW_REG_QEEC_MAS_DIS); 3661 if (err) 3662 return err; 3663 } 3664 3665 /* Configure the min shaper for multicast TCs. */ 3666 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3667 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port, 3668 MLXSW_REG_QEEC_HR_TC, 3669 i + 8, i, 3670 MLXSW_REG_QEEC_MIS_MIN); 3671 if (err) 3672 return err; 3673 } 3674 3675 /* Map all priorities to traffic class 0. */ 3676 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3677 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 3678 if (err) 3679 return err; 3680 } 3681 3682 return 0; 3683 } 3684 3685 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 3686 bool enable) 3687 { 3688 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3689 char qtctm_pl[MLXSW_REG_QTCTM_LEN]; 3690 3691 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable); 3692 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl); 3693 } 3694 3695 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 3696 u8 split_base_local_port, 3697 struct mlxsw_sp_port_mapping *port_mapping) 3698 { 3699 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 3700 bool split = !!split_base_local_port; 3701 struct mlxsw_sp_port *mlxsw_sp_port; 3702 struct net_device *dev; 3703 int err; 3704 3705 err = mlxsw_core_port_init(mlxsw_sp->core, local_port, 3706 port_mapping->module + 1, split, 3707 port_mapping->lane / port_mapping->width, 3708 mlxsw_sp->base_mac, 3709 sizeof(mlxsw_sp->base_mac)); 3710 if (err) { 3711 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 3712 local_port); 3713 return err; 3714 } 3715 3716 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 3717 if (!dev) { 3718 err = -ENOMEM; 3719 goto err_alloc_etherdev; 3720 } 3721 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 3722 dev_net_set(dev, mlxsw_sp_net(mlxsw_sp)); 3723 mlxsw_sp_port = netdev_priv(dev); 3724 mlxsw_sp_port->dev = dev; 3725 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 3726 mlxsw_sp_port->local_port = local_port; 3727 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID; 3728 mlxsw_sp_port->split = split; 3729 mlxsw_sp_port->split_base_local_port = split_base_local_port; 3730 mlxsw_sp_port->mapping = *port_mapping; 3731 mlxsw_sp_port->link.autoneg = 1; 3732 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 3733 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list); 3734 3735 mlxsw_sp_port->pcpu_stats = 3736 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 3737 if (!mlxsw_sp_port->pcpu_stats) { 3738 err = -ENOMEM; 3739 goto err_alloc_stats; 3740 } 3741 3742 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample), 3743 GFP_KERNEL); 3744 if (!mlxsw_sp_port->sample) { 3745 err = -ENOMEM; 3746 goto err_alloc_sample; 3747 } 3748 3749 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 3750 &update_stats_cache); 3751 3752 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 3753 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 3754 3755 err = mlxsw_sp_port_module_map(mlxsw_sp_port); 3756 if (err) { 3757 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 3758 mlxsw_sp_port->local_port); 3759 goto err_port_module_map; 3760 } 3761 3762 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 3763 if (err) { 3764 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 3765 mlxsw_sp_port->local_port); 3766 goto err_port_swid_set; 3767 } 3768 3769 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 3770 if (err) { 3771 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 3772 mlxsw_sp_port->local_port); 3773 goto err_dev_addr_init; 3774 } 3775 3776 netif_carrier_off(dev); 3777 3778 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 3779 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 3780 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK; 3781 3782 dev->min_mtu = 0; 3783 dev->max_mtu = ETH_MAX_MTU; 3784 3785 /* Each packet needs to have a Tx header (metadata) on top all other 3786 * headers. 3787 */ 3788 dev->needed_headroom = MLXSW_TXHDR_LEN; 3789 3790 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 3791 if (err) { 3792 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 3793 mlxsw_sp_port->local_port); 3794 goto err_port_system_port_mapping_set; 3795 } 3796 3797 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port); 3798 if (err) { 3799 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 3800 mlxsw_sp_port->local_port); 3801 goto err_port_speed_by_width_set; 3802 } 3803 3804 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 3805 if (err) { 3806 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 3807 mlxsw_sp_port->local_port); 3808 goto err_port_mtu_set; 3809 } 3810 3811 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3812 if (err) 3813 goto err_port_admin_status_set; 3814 3815 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 3816 if (err) { 3817 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 3818 mlxsw_sp_port->local_port); 3819 goto err_port_buffers_init; 3820 } 3821 3822 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 3823 if (err) { 3824 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 3825 mlxsw_sp_port->local_port); 3826 goto err_port_ets_init; 3827 } 3828 3829 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true); 3830 if (err) { 3831 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n", 3832 mlxsw_sp_port->local_port); 3833 goto err_port_tc_mc_mode; 3834 } 3835 3836 /* ETS and buffers must be initialized before DCB. */ 3837 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 3838 if (err) { 3839 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 3840 mlxsw_sp_port->local_port); 3841 goto err_port_dcb_init; 3842 } 3843 3844 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 3845 if (err) { 3846 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 3847 mlxsw_sp_port->local_port); 3848 goto err_port_fids_init; 3849 } 3850 3851 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); 3852 if (err) { 3853 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", 3854 mlxsw_sp_port->local_port); 3855 goto err_port_qdiscs_init; 3856 } 3857 3858 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false, 3859 false); 3860 if (err) { 3861 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n", 3862 mlxsw_sp_port->local_port); 3863 goto err_port_vlan_clear; 3864 } 3865 3866 err = mlxsw_sp_port_nve_init(mlxsw_sp_port); 3867 if (err) { 3868 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n", 3869 mlxsw_sp_port->local_port); 3870 goto err_port_nve_init; 3871 } 3872 3873 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 3874 if (err) { 3875 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n", 3876 mlxsw_sp_port->local_port); 3877 goto err_port_pvid_set; 3878 } 3879 3880 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, 3881 MLXSW_SP_DEFAULT_VID); 3882 if (IS_ERR(mlxsw_sp_port_vlan)) { 3883 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 3884 mlxsw_sp_port->local_port); 3885 err = PTR_ERR(mlxsw_sp_port_vlan); 3886 goto err_port_vlan_create; 3887 } 3888 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan; 3889 3890 INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw, 3891 mlxsw_sp->ptp_ops->shaper_work); 3892 3893 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 3894 err = register_netdev(dev); 3895 if (err) { 3896 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 3897 mlxsw_sp_port->local_port); 3898 goto err_register_netdev; 3899 } 3900 3901 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 3902 mlxsw_sp_port, dev); 3903 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 3904 return 0; 3905 3906 err_register_netdev: 3907 mlxsw_sp->ports[local_port] = NULL; 3908 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 3909 err_port_vlan_create: 3910 err_port_pvid_set: 3911 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3912 err_port_nve_init: 3913 err_port_vlan_clear: 3914 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3915 err_port_qdiscs_init: 3916 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3917 err_port_fids_init: 3918 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3919 err_port_dcb_init: 3920 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3921 err_port_tc_mc_mode: 3922 err_port_ets_init: 3923 err_port_buffers_init: 3924 err_port_admin_status_set: 3925 err_port_mtu_set: 3926 err_port_speed_by_width_set: 3927 err_port_system_port_mapping_set: 3928 err_dev_addr_init: 3929 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3930 err_port_swid_set: 3931 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3932 err_port_module_map: 3933 kfree(mlxsw_sp_port->sample); 3934 err_alloc_sample: 3935 free_percpu(mlxsw_sp_port->pcpu_stats); 3936 err_alloc_stats: 3937 free_netdev(dev); 3938 err_alloc_etherdev: 3939 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3940 return err; 3941 } 3942 3943 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3944 { 3945 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3946 3947 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 3948 cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw); 3949 mlxsw_sp_port_ptp_clear(mlxsw_sp_port); 3950 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 3951 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 3952 mlxsw_sp->ports[local_port] = NULL; 3953 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true); 3954 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3955 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3956 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3957 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3958 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3959 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3960 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3961 kfree(mlxsw_sp_port->sample); 3962 free_percpu(mlxsw_sp_port->pcpu_stats); 3963 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 3964 free_netdev(mlxsw_sp_port->dev); 3965 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3966 } 3967 3968 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp) 3969 { 3970 struct mlxsw_sp_port *mlxsw_sp_port; 3971 int err; 3972 3973 mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL); 3974 if (!mlxsw_sp_port) 3975 return -ENOMEM; 3976 3977 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 3978 mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT; 3979 3980 err = mlxsw_core_cpu_port_init(mlxsw_sp->core, 3981 mlxsw_sp_port, 3982 mlxsw_sp->base_mac, 3983 sizeof(mlxsw_sp->base_mac)); 3984 if (err) { 3985 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n"); 3986 goto err_core_cpu_port_init; 3987 } 3988 3989 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port; 3990 return 0; 3991 3992 err_core_cpu_port_init: 3993 kfree(mlxsw_sp_port); 3994 return err; 3995 } 3996 3997 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp) 3998 { 3999 struct mlxsw_sp_port *mlxsw_sp_port = 4000 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT]; 4001 4002 mlxsw_core_cpu_port_fini(mlxsw_sp->core); 4003 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL; 4004 kfree(mlxsw_sp_port); 4005 } 4006 4007 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) 4008 { 4009 return mlxsw_sp->ports[local_port] != NULL; 4010 } 4011 4012 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 4013 { 4014 int i; 4015 4016 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 4017 if (mlxsw_sp_port_created(mlxsw_sp, i)) 4018 mlxsw_sp_port_remove(mlxsw_sp, i); 4019 mlxsw_sp_cpu_port_remove(mlxsw_sp); 4020 kfree(mlxsw_sp->ports); 4021 } 4022 4023 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 4024 { 4025 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 4026 struct mlxsw_sp_port_mapping *port_mapping; 4027 size_t alloc_size; 4028 int i; 4029 int err; 4030 4031 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 4032 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 4033 if (!mlxsw_sp->ports) 4034 return -ENOMEM; 4035 4036 err = mlxsw_sp_cpu_port_create(mlxsw_sp); 4037 if (err) 4038 goto err_cpu_port_create; 4039 4040 for (i = 1; i < max_ports; i++) { 4041 port_mapping = mlxsw_sp->port_mapping[i]; 4042 if (!port_mapping) 4043 continue; 4044 err = mlxsw_sp_port_create(mlxsw_sp, i, 0, port_mapping); 4045 if (err) 4046 goto err_port_create; 4047 } 4048 return 0; 4049 4050 err_port_create: 4051 for (i--; i >= 1; i--) 4052 if (mlxsw_sp_port_created(mlxsw_sp, i)) 4053 mlxsw_sp_port_remove(mlxsw_sp, i); 4054 mlxsw_sp_cpu_port_remove(mlxsw_sp); 4055 err_cpu_port_create: 4056 kfree(mlxsw_sp->ports); 4057 return err; 4058 } 4059 4060 static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp) 4061 { 4062 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 4063 struct mlxsw_sp_port_mapping port_mapping; 4064 int i; 4065 int err; 4066 4067 mlxsw_sp->port_mapping = kcalloc(max_ports, 4068 sizeof(struct mlxsw_sp_port_mapping *), 4069 GFP_KERNEL); 4070 if (!mlxsw_sp->port_mapping) 4071 return -ENOMEM; 4072 4073 for (i = 1; i < max_ports; i++) { 4074 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &port_mapping); 4075 if (err) 4076 goto err_port_module_info_get; 4077 if (!port_mapping.width) 4078 continue; 4079 4080 mlxsw_sp->port_mapping[i] = kmemdup(&port_mapping, 4081 sizeof(port_mapping), 4082 GFP_KERNEL); 4083 if (!mlxsw_sp->port_mapping[i]) { 4084 err = -ENOMEM; 4085 goto err_port_module_info_dup; 4086 } 4087 } 4088 return 0; 4089 4090 err_port_module_info_get: 4091 err_port_module_info_dup: 4092 for (i--; i >= 1; i--) 4093 kfree(mlxsw_sp->port_mapping[i]); 4094 kfree(mlxsw_sp->port_mapping); 4095 return err; 4096 } 4097 4098 static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp) 4099 { 4100 int i; 4101 4102 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 4103 kfree(mlxsw_sp->port_mapping[i]); 4104 kfree(mlxsw_sp->port_mapping); 4105 } 4106 4107 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port, unsigned int max_width) 4108 { 4109 u8 offset = (local_port - 1) % max_width; 4110 4111 return local_port - offset; 4112 } 4113 4114 static int 4115 mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 4116 struct mlxsw_sp_port_mapping *port_mapping, 4117 unsigned int count, u8 offset) 4118 { 4119 struct mlxsw_sp_port_mapping split_port_mapping; 4120 int err, i; 4121 4122 split_port_mapping = *port_mapping; 4123 split_port_mapping.width /= count; 4124 for (i = 0; i < count; i++) { 4125 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset, 4126 base_port, &split_port_mapping); 4127 if (err) 4128 goto err_port_create; 4129 split_port_mapping.lane += split_port_mapping.width; 4130 } 4131 4132 return 0; 4133 4134 err_port_create: 4135 for (i--; i >= 0; i--) 4136 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 4137 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 4138 return err; 4139 } 4140 4141 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 4142 u8 base_port, 4143 unsigned int count, u8 offset) 4144 { 4145 struct mlxsw_sp_port_mapping *port_mapping; 4146 int i; 4147 4148 /* Go over original unsplit ports in the gap and recreate them. */ 4149 for (i = 0; i < count * offset; i++) { 4150 port_mapping = mlxsw_sp->port_mapping[base_port + i]; 4151 if (!port_mapping) 4152 continue; 4153 mlxsw_sp_port_create(mlxsw_sp, base_port + i, 0, port_mapping); 4154 } 4155 } 4156 4157 static int mlxsw_sp_local_ports_offset(struct mlxsw_core *mlxsw_core, 4158 unsigned int count, 4159 unsigned int max_width) 4160 { 4161 enum mlxsw_res_id local_ports_in_x_res_id; 4162 int split_width = max_width / count; 4163 4164 if (split_width == 1) 4165 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_1X; 4166 else if (split_width == 2) 4167 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_2X; 4168 else if (split_width == 4) 4169 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_4X; 4170 else 4171 return -EINVAL; 4172 4173 if (!mlxsw_core_res_valid(mlxsw_core, local_ports_in_x_res_id)) 4174 return -EINVAL; 4175 return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id); 4176 } 4177 4178 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 4179 unsigned int count, 4180 struct netlink_ext_ack *extack) 4181 { 4182 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4183 struct mlxsw_sp_port_mapping port_mapping; 4184 struct mlxsw_sp_port *mlxsw_sp_port; 4185 int max_width; 4186 u8 base_port; 4187 int offset; 4188 int i; 4189 int err; 4190 4191 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4192 if (!mlxsw_sp_port) { 4193 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 4194 local_port); 4195 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 4196 return -EINVAL; 4197 } 4198 4199 /* Split ports cannot be split. */ 4200 if (mlxsw_sp_port->split) { 4201 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); 4202 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further"); 4203 return -EINVAL; 4204 } 4205 4206 max_width = mlxsw_core_module_max_width(mlxsw_core, 4207 mlxsw_sp_port->mapping.module); 4208 if (max_width < 0) { 4209 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n"); 4210 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module"); 4211 return max_width; 4212 } 4213 4214 /* Split port with non-max and 1 module width cannot be split. */ 4215 if (mlxsw_sp_port->mapping.width != max_width || max_width == 1) { 4216 netdev_err(mlxsw_sp_port->dev, "Port cannot be split\n"); 4217 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split"); 4218 return -EINVAL; 4219 } 4220 4221 if (count == 1 || !is_power_of_2(count) || count > max_width) { 4222 netdev_err(mlxsw_sp_port->dev, "Invalid split count\n"); 4223 NL_SET_ERR_MSG_MOD(extack, "Invalid split count"); 4224 return -EINVAL; 4225 } 4226 4227 offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width); 4228 if (offset < 0) { 4229 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n"); 4230 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset"); 4231 return -EINVAL; 4232 } 4233 4234 /* Only in case max split is being done, the local port and 4235 * base port may differ. 4236 */ 4237 base_port = count == max_width ? 4238 mlxsw_sp_cluster_base_port_get(local_port, max_width) : 4239 local_port; 4240 4241 for (i = 0; i < count * offset; i++) { 4242 /* Expect base port to exist and also the one in the middle in 4243 * case of maximal split count. 4244 */ 4245 if (i == 0 || (count == max_width && i == count / 2)) 4246 continue; 4247 4248 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) { 4249 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 4250 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 4251 return -EINVAL; 4252 } 4253 } 4254 4255 port_mapping = mlxsw_sp_port->mapping; 4256 4257 for (i = 0; i < count; i++) 4258 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 4259 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 4260 4261 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, &port_mapping, 4262 count, offset); 4263 if (err) { 4264 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 4265 goto err_port_split_create; 4266 } 4267 4268 return 0; 4269 4270 err_port_split_create: 4271 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset); 4272 return err; 4273 } 4274 4275 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, 4276 struct netlink_ext_ack *extack) 4277 { 4278 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4279 struct mlxsw_sp_port *mlxsw_sp_port; 4280 unsigned int count; 4281 int max_width; 4282 u8 base_port; 4283 int offset; 4284 int i; 4285 4286 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4287 if (!mlxsw_sp_port) { 4288 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 4289 local_port); 4290 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 4291 return -EINVAL; 4292 } 4293 4294 if (!mlxsw_sp_port->split) { 4295 netdev_err(mlxsw_sp_port->dev, "Port was not split\n"); 4296 NL_SET_ERR_MSG_MOD(extack, "Port was not split"); 4297 return -EINVAL; 4298 } 4299 4300 max_width = mlxsw_core_module_max_width(mlxsw_core, 4301 mlxsw_sp_port->mapping.module); 4302 if (max_width < 0) { 4303 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n"); 4304 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module"); 4305 return max_width; 4306 } 4307 4308 count = max_width / mlxsw_sp_port->mapping.width; 4309 4310 offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width); 4311 if (WARN_ON(offset < 0)) { 4312 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n"); 4313 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset"); 4314 return -EINVAL; 4315 } 4316 4317 base_port = mlxsw_sp_port->split_base_local_port; 4318 4319 for (i = 0; i < count; i++) 4320 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 4321 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 4322 4323 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset); 4324 4325 return 0; 4326 } 4327 4328 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 4329 char *pude_pl, void *priv) 4330 { 4331 struct mlxsw_sp *mlxsw_sp = priv; 4332 struct mlxsw_sp_port *mlxsw_sp_port; 4333 enum mlxsw_reg_pude_oper_status status; 4334 u8 local_port; 4335 4336 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 4337 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4338 if (!mlxsw_sp_port) 4339 return; 4340 4341 status = mlxsw_reg_pude_oper_status_get(pude_pl); 4342 if (status == MLXSW_PORT_OPER_STATUS_UP) { 4343 netdev_info(mlxsw_sp_port->dev, "link up\n"); 4344 netif_carrier_on(mlxsw_sp_port->dev); 4345 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0); 4346 } else { 4347 netdev_info(mlxsw_sp_port->dev, "link down\n"); 4348 netif_carrier_off(mlxsw_sp_port->dev); 4349 } 4350 } 4351 4352 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp, 4353 char *mtpptr_pl, bool ingress) 4354 { 4355 u8 local_port; 4356 u8 num_rec; 4357 int i; 4358 4359 local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl); 4360 num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl); 4361 for (i = 0; i < num_rec; i++) { 4362 u8 domain_number; 4363 u8 message_type; 4364 u16 sequence_id; 4365 u64 timestamp; 4366 4367 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type, 4368 &domain_number, &sequence_id, 4369 ×tamp); 4370 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port, 4371 message_type, domain_number, 4372 sequence_id, timestamp); 4373 } 4374 } 4375 4376 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg, 4377 char *mtpptr_pl, void *priv) 4378 { 4379 struct mlxsw_sp *mlxsw_sp = priv; 4380 4381 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true); 4382 } 4383 4384 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg, 4385 char *mtpptr_pl, void *priv) 4386 { 4387 struct mlxsw_sp *mlxsw_sp = priv; 4388 4389 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false); 4390 } 4391 4392 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 4393 u8 local_port, void *priv) 4394 { 4395 struct mlxsw_sp *mlxsw_sp = priv; 4396 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4397 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 4398 4399 if (unlikely(!mlxsw_sp_port)) { 4400 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 4401 local_port); 4402 return; 4403 } 4404 4405 skb->dev = mlxsw_sp_port->dev; 4406 4407 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 4408 u64_stats_update_begin(&pcpu_stats->syncp); 4409 pcpu_stats->rx_packets++; 4410 pcpu_stats->rx_bytes += skb->len; 4411 u64_stats_update_end(&pcpu_stats->syncp); 4412 4413 skb->protocol = eth_type_trans(skb, skb->dev); 4414 netif_receive_skb(skb); 4415 } 4416 4417 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, 4418 void *priv) 4419 { 4420 skb->offload_fwd_mark = 1; 4421 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 4422 } 4423 4424 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, 4425 u8 local_port, void *priv) 4426 { 4427 skb->offload_l3_fwd_mark = 1; 4428 skb->offload_fwd_mark = 1; 4429 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 4430 } 4431 4432 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port, 4433 void *priv) 4434 { 4435 struct mlxsw_sp *mlxsw_sp = priv; 4436 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4437 struct psample_group *psample_group; 4438 u32 size; 4439 4440 if (unlikely(!mlxsw_sp_port)) { 4441 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n", 4442 local_port); 4443 goto out; 4444 } 4445 if (unlikely(!mlxsw_sp_port->sample)) { 4446 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n", 4447 local_port); 4448 goto out; 4449 } 4450 4451 size = mlxsw_sp_port->sample->truncate ? 4452 mlxsw_sp_port->sample->trunc_size : skb->len; 4453 4454 rcu_read_lock(); 4455 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group); 4456 if (!psample_group) 4457 goto out_unlock; 4458 psample_sample_packet(psample_group, skb, size, 4459 mlxsw_sp_port->dev->ifindex, 0, 4460 mlxsw_sp_port->sample->rate); 4461 out_unlock: 4462 rcu_read_unlock(); 4463 out: 4464 consume_skb(skb); 4465 } 4466 4467 static void mlxsw_sp_rx_listener_ptp(struct sk_buff *skb, u8 local_port, 4468 void *priv) 4469 { 4470 struct mlxsw_sp *mlxsw_sp = priv; 4471 4472 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port); 4473 } 4474 4475 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4476 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 4477 _is_ctrl, SP_##_trap_group, DISCARD) 4478 4479 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4480 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 4481 _is_ctrl, SP_##_trap_group, DISCARD) 4482 4483 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4484 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \ 4485 _is_ctrl, SP_##_trap_group, DISCARD) 4486 4487 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 4488 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 4489 4490 static const struct mlxsw_listener mlxsw_sp_listener[] = { 4491 /* Events */ 4492 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 4493 /* L2 traps */ 4494 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true), 4495 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true), 4496 MLXSW_RXL(mlxsw_sp_rx_listener_ptp, LLDP, TRAP_TO_CPU, 4497 false, SP_LLDP, DISCARD), 4498 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false), 4499 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false), 4500 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false), 4501 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false), 4502 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false), 4503 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false), 4504 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false), 4505 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false), 4506 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false), 4507 MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD, 4508 false), 4509 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 4510 false), 4511 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD, 4512 false), 4513 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 4514 false), 4515 /* L3 traps */ 4516 MLXSW_SP_RXL_L3_MARK(LBERROR, MIRROR_TO_CPU, LBERROR, false), 4517 MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), 4518 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 4519 false), 4520 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false), 4521 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 4522 MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false), 4523 MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP, 4524 false), 4525 MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false), 4526 MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false), 4527 MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false), 4528 MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), 4529 MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false), 4530 MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false), 4531 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 4532 false), 4533 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 4534 false), 4535 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 4536 false), 4537 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 4538 false), 4539 MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false), 4540 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 4541 false), 4542 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), 4543 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), 4544 MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false), 4545 MLXSW_SP_RXL_MARK(DECAP_ECN0, TRAP_TO_CPU, ROUTER_EXP, false), 4546 MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), 4547 MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), 4548 /* PKT Sample trap */ 4549 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, 4550 false, SP_IP2ME, DISCARD), 4551 /* ACL trap */ 4552 MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false), 4553 /* Multicast Router Traps */ 4554 MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false), 4555 MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false), 4556 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 4557 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 4558 /* NVE traps */ 4559 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false), 4560 MLXSW_SP_RXL_NO_MARK(NVE_DECAP_ARP, TRAP_TO_CPU, ARP, false), 4561 /* PTP traps */ 4562 MLXSW_RXL(mlxsw_sp_rx_listener_ptp, PTP0, TRAP_TO_CPU, 4563 false, SP_PTP0, DISCARD), 4564 MLXSW_SP_RXL_NO_MARK(PTP1, TRAP_TO_CPU, PTP1, false), 4565 }; 4566 4567 static const struct mlxsw_listener mlxsw_sp1_listener[] = { 4568 /* Events */ 4569 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0), 4570 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0), 4571 }; 4572 4573 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 4574 { 4575 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 4576 enum mlxsw_reg_qpcr_ir_units ir_units; 4577 int max_cpu_policers; 4578 bool is_bytes; 4579 u8 burst_size; 4580 u32 rate; 4581 int i, err; 4582 4583 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 4584 return -EIO; 4585 4586 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 4587 4588 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 4589 for (i = 0; i < max_cpu_policers; i++) { 4590 is_bytes = false; 4591 switch (i) { 4592 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 4593 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 4594 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 4595 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 4596 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 4597 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 4598 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: 4599 rate = 128; 4600 burst_size = 7; 4601 break; 4602 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 4603 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 4604 rate = 16 * 1024; 4605 burst_size = 10; 4606 break; 4607 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 4608 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 4609 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 4610 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 4611 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 4612 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 4613 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 4614 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 4615 rate = 1024; 4616 burst_size = 7; 4617 break; 4618 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 4619 rate = 1024; 4620 burst_size = 7; 4621 break; 4622 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0: 4623 rate = 24 * 1024; 4624 burst_size = 12; 4625 break; 4626 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1: 4627 rate = 19 * 1024; 4628 burst_size = 12; 4629 break; 4630 default: 4631 continue; 4632 } 4633 4634 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 4635 burst_size); 4636 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 4637 if (err) 4638 return err; 4639 } 4640 4641 return 0; 4642 } 4643 4644 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 4645 { 4646 char htgt_pl[MLXSW_REG_HTGT_LEN]; 4647 enum mlxsw_reg_htgt_trap_group i; 4648 int max_cpu_policers; 4649 int max_trap_groups; 4650 u8 priority, tc; 4651 u16 policer_id; 4652 int err; 4653 4654 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 4655 return -EIO; 4656 4657 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 4658 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 4659 4660 for (i = 0; i < max_trap_groups; i++) { 4661 policer_id = i; 4662 switch (i) { 4663 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 4664 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 4665 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 4666 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 4667 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 4668 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0: 4669 priority = 5; 4670 tc = 5; 4671 break; 4672 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 4673 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 4674 priority = 4; 4675 tc = 4; 4676 break; 4677 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 4678 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 4679 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 4680 priority = 3; 4681 tc = 3; 4682 break; 4683 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 4684 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 4685 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 4686 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1: 4687 priority = 2; 4688 tc = 2; 4689 break; 4690 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 4691 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 4692 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 4693 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 4694 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: 4695 priority = 1; 4696 tc = 1; 4697 break; 4698 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 4699 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 4700 tc = MLXSW_REG_HTGT_DEFAULT_TC; 4701 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 4702 break; 4703 default: 4704 continue; 4705 } 4706 4707 if (max_cpu_policers <= policer_id && 4708 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 4709 return -EIO; 4710 4711 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 4712 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 4713 if (err) 4714 return err; 4715 } 4716 4717 return 0; 4718 } 4719 4720 static int mlxsw_sp_traps_register(struct mlxsw_sp *mlxsw_sp, 4721 const struct mlxsw_listener listeners[], 4722 size_t listeners_count) 4723 { 4724 int i; 4725 int err; 4726 4727 for (i = 0; i < listeners_count; i++) { 4728 err = mlxsw_core_trap_register(mlxsw_sp->core, 4729 &listeners[i], 4730 mlxsw_sp); 4731 if (err) 4732 goto err_listener_register; 4733 4734 } 4735 return 0; 4736 4737 err_listener_register: 4738 for (i--; i >= 0; i--) { 4739 mlxsw_core_trap_unregister(mlxsw_sp->core, 4740 &listeners[i], 4741 mlxsw_sp); 4742 } 4743 return err; 4744 } 4745 4746 static void mlxsw_sp_traps_unregister(struct mlxsw_sp *mlxsw_sp, 4747 const struct mlxsw_listener listeners[], 4748 size_t listeners_count) 4749 { 4750 int i; 4751 4752 for (i = 0; i < listeners_count; i++) { 4753 mlxsw_core_trap_unregister(mlxsw_sp->core, 4754 &listeners[i], 4755 mlxsw_sp); 4756 } 4757 } 4758 4759 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 4760 { 4761 int err; 4762 4763 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 4764 if (err) 4765 return err; 4766 4767 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 4768 if (err) 4769 return err; 4770 4771 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp_listener, 4772 ARRAY_SIZE(mlxsw_sp_listener)); 4773 if (err) 4774 return err; 4775 4776 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp->listeners, 4777 mlxsw_sp->listeners_count); 4778 if (err) 4779 goto err_extra_traps_init; 4780 4781 return 0; 4782 4783 err_extra_traps_init: 4784 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 4785 ARRAY_SIZE(mlxsw_sp_listener)); 4786 return err; 4787 } 4788 4789 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 4790 { 4791 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp->listeners, 4792 mlxsw_sp->listeners_count); 4793 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 4794 ARRAY_SIZE(mlxsw_sp_listener)); 4795 } 4796 4797 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe 4798 4799 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 4800 { 4801 char slcr_pl[MLXSW_REG_SLCR_LEN]; 4802 u32 seed; 4803 int err; 4804 4805 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 4806 MLXSW_SP_LAG_SEED_INIT); 4807 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 4808 MLXSW_REG_SLCR_LAG_HASH_DMAC | 4809 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 4810 MLXSW_REG_SLCR_LAG_HASH_VLANID | 4811 MLXSW_REG_SLCR_LAG_HASH_SIP | 4812 MLXSW_REG_SLCR_LAG_HASH_DIP | 4813 MLXSW_REG_SLCR_LAG_HASH_SPORT | 4814 MLXSW_REG_SLCR_LAG_HASH_DPORT | 4815 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed); 4816 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 4817 if (err) 4818 return err; 4819 4820 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 4821 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 4822 return -EIO; 4823 4824 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 4825 sizeof(struct mlxsw_sp_upper), 4826 GFP_KERNEL); 4827 if (!mlxsw_sp->lags) 4828 return -ENOMEM; 4829 4830 return 0; 4831 } 4832 4833 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 4834 { 4835 kfree(mlxsw_sp->lags); 4836 } 4837 4838 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) 4839 { 4840 char htgt_pl[MLXSW_REG_HTGT_LEN]; 4841 4842 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 4843 MLXSW_REG_HTGT_INVALID_POLICER, 4844 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 4845 MLXSW_REG_HTGT_DEFAULT_TC); 4846 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 4847 } 4848 4849 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = { 4850 .clock_init = mlxsw_sp1_ptp_clock_init, 4851 .clock_fini = mlxsw_sp1_ptp_clock_fini, 4852 .init = mlxsw_sp1_ptp_init, 4853 .fini = mlxsw_sp1_ptp_fini, 4854 .receive = mlxsw_sp1_ptp_receive, 4855 .transmitted = mlxsw_sp1_ptp_transmitted, 4856 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get, 4857 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set, 4858 .shaper_work = mlxsw_sp1_ptp_shaper_work, 4859 .get_ts_info = mlxsw_sp1_ptp_get_ts_info, 4860 .get_stats_count = mlxsw_sp1_get_stats_count, 4861 .get_stats_strings = mlxsw_sp1_get_stats_strings, 4862 .get_stats = mlxsw_sp1_get_stats, 4863 }; 4864 4865 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { 4866 .clock_init = mlxsw_sp2_ptp_clock_init, 4867 .clock_fini = mlxsw_sp2_ptp_clock_fini, 4868 .init = mlxsw_sp2_ptp_init, 4869 .fini = mlxsw_sp2_ptp_fini, 4870 .receive = mlxsw_sp2_ptp_receive, 4871 .transmitted = mlxsw_sp2_ptp_transmitted, 4872 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get, 4873 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set, 4874 .shaper_work = mlxsw_sp2_ptp_shaper_work, 4875 .get_ts_info = mlxsw_sp2_ptp_get_ts_info, 4876 .get_stats_count = mlxsw_sp2_get_stats_count, 4877 .get_stats_strings = mlxsw_sp2_get_stats_strings, 4878 .get_stats = mlxsw_sp2_get_stats, 4879 }; 4880 4881 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 4882 unsigned long event, void *ptr); 4883 4884 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 4885 const struct mlxsw_bus_info *mlxsw_bus_info, 4886 struct netlink_ext_ack *extack) 4887 { 4888 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4889 int err; 4890 4891 mlxsw_sp->core = mlxsw_core; 4892 mlxsw_sp->bus_info = mlxsw_bus_info; 4893 4894 err = mlxsw_sp_fw_rev_validate(mlxsw_sp); 4895 if (err) 4896 return err; 4897 4898 mlxsw_core_emad_string_tlv_enable(mlxsw_core); 4899 4900 err = mlxsw_sp_base_mac_get(mlxsw_sp); 4901 if (err) { 4902 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 4903 return err; 4904 } 4905 4906 err = mlxsw_sp_kvdl_init(mlxsw_sp); 4907 if (err) { 4908 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 4909 return err; 4910 } 4911 4912 err = mlxsw_sp_fids_init(mlxsw_sp); 4913 if (err) { 4914 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 4915 goto err_fids_init; 4916 } 4917 4918 err = mlxsw_sp_traps_init(mlxsw_sp); 4919 if (err) { 4920 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 4921 goto err_traps_init; 4922 } 4923 4924 err = mlxsw_sp_devlink_traps_init(mlxsw_sp); 4925 if (err) { 4926 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n"); 4927 goto err_devlink_traps_init; 4928 } 4929 4930 err = mlxsw_sp_buffers_init(mlxsw_sp); 4931 if (err) { 4932 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 4933 goto err_buffers_init; 4934 } 4935 4936 err = mlxsw_sp_lag_init(mlxsw_sp); 4937 if (err) { 4938 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 4939 goto err_lag_init; 4940 } 4941 4942 /* Initialize SPAN before router and switchdev, so that those components 4943 * can call mlxsw_sp_span_respin(). 4944 */ 4945 err = mlxsw_sp_span_init(mlxsw_sp); 4946 if (err) { 4947 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 4948 goto err_span_init; 4949 } 4950 4951 err = mlxsw_sp_switchdev_init(mlxsw_sp); 4952 if (err) { 4953 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 4954 goto err_switchdev_init; 4955 } 4956 4957 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 4958 if (err) { 4959 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 4960 goto err_counter_pool_init; 4961 } 4962 4963 err = mlxsw_sp_afa_init(mlxsw_sp); 4964 if (err) { 4965 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 4966 goto err_afa_init; 4967 } 4968 4969 err = mlxsw_sp_nve_init(mlxsw_sp); 4970 if (err) { 4971 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n"); 4972 goto err_nve_init; 4973 } 4974 4975 err = mlxsw_sp_acl_init(mlxsw_sp); 4976 if (err) { 4977 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 4978 goto err_acl_init; 4979 } 4980 4981 err = mlxsw_sp_router_init(mlxsw_sp, extack); 4982 if (err) { 4983 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 4984 goto err_router_init; 4985 } 4986 4987 if (mlxsw_sp->bus_info->read_frc_capable) { 4988 /* NULL is a valid return value from clock_init */ 4989 mlxsw_sp->clock = 4990 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp, 4991 mlxsw_sp->bus_info->dev); 4992 if (IS_ERR(mlxsw_sp->clock)) { 4993 err = PTR_ERR(mlxsw_sp->clock); 4994 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n"); 4995 goto err_ptp_clock_init; 4996 } 4997 } 4998 4999 if (mlxsw_sp->clock) { 5000 /* NULL is a valid return value from ptp_ops->init */ 5001 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp); 5002 if (IS_ERR(mlxsw_sp->ptp_state)) { 5003 err = PTR_ERR(mlxsw_sp->ptp_state); 5004 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n"); 5005 goto err_ptp_init; 5006 } 5007 } 5008 5009 /* Initialize netdevice notifier after router and SPAN is initialized, 5010 * so that the event handler can use router structures and call SPAN 5011 * respin. 5012 */ 5013 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 5014 err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 5015 &mlxsw_sp->netdevice_nb); 5016 if (err) { 5017 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 5018 goto err_netdev_notifier; 5019 } 5020 5021 err = mlxsw_sp_dpipe_init(mlxsw_sp); 5022 if (err) { 5023 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 5024 goto err_dpipe_init; 5025 } 5026 5027 err = mlxsw_sp_port_module_info_init(mlxsw_sp); 5028 if (err) { 5029 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n"); 5030 goto err_port_module_info_init; 5031 } 5032 5033 err = mlxsw_sp_ports_create(mlxsw_sp); 5034 if (err) { 5035 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 5036 goto err_ports_create; 5037 } 5038 5039 return 0; 5040 5041 err_ports_create: 5042 mlxsw_sp_port_module_info_fini(mlxsw_sp); 5043 err_port_module_info_init: 5044 mlxsw_sp_dpipe_fini(mlxsw_sp); 5045 err_dpipe_init: 5046 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 5047 &mlxsw_sp->netdevice_nb); 5048 err_netdev_notifier: 5049 if (mlxsw_sp->clock) 5050 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 5051 err_ptp_init: 5052 if (mlxsw_sp->clock) 5053 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 5054 err_ptp_clock_init: 5055 mlxsw_sp_router_fini(mlxsw_sp); 5056 err_router_init: 5057 mlxsw_sp_acl_fini(mlxsw_sp); 5058 err_acl_init: 5059 mlxsw_sp_nve_fini(mlxsw_sp); 5060 err_nve_init: 5061 mlxsw_sp_afa_fini(mlxsw_sp); 5062 err_afa_init: 5063 mlxsw_sp_counter_pool_fini(mlxsw_sp); 5064 err_counter_pool_init: 5065 mlxsw_sp_switchdev_fini(mlxsw_sp); 5066 err_switchdev_init: 5067 mlxsw_sp_span_fini(mlxsw_sp); 5068 err_span_init: 5069 mlxsw_sp_lag_fini(mlxsw_sp); 5070 err_lag_init: 5071 mlxsw_sp_buffers_fini(mlxsw_sp); 5072 err_buffers_init: 5073 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 5074 err_devlink_traps_init: 5075 mlxsw_sp_traps_fini(mlxsw_sp); 5076 err_traps_init: 5077 mlxsw_sp_fids_fini(mlxsw_sp); 5078 err_fids_init: 5079 mlxsw_sp_kvdl_fini(mlxsw_sp); 5080 return err; 5081 } 5082 5083 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, 5084 const struct mlxsw_bus_info *mlxsw_bus_info, 5085 struct netlink_ext_ack *extack) 5086 { 5087 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5088 5089 mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev; 5090 mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME; 5091 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; 5092 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; 5093 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; 5094 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; 5095 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; 5096 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; 5097 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; 5098 mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr; 5099 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals; 5100 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops; 5101 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops; 5102 mlxsw_sp->listeners = mlxsw_sp1_listener; 5103 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener); 5104 5105 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 5106 } 5107 5108 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, 5109 const struct mlxsw_bus_info *mlxsw_bus_info, 5110 struct netlink_ext_ack *extack) 5111 { 5112 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5113 5114 mlxsw_sp->req_rev = &mlxsw_sp2_fw_rev; 5115 mlxsw_sp->fw_filename = MLXSW_SP2_FW_FILENAME; 5116 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 5117 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 5118 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 5119 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 5120 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 5121 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 5122 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 5123 mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; 5124 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 5125 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 5126 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 5127 5128 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 5129 } 5130 5131 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 5132 { 5133 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5134 5135 mlxsw_sp_ports_remove(mlxsw_sp); 5136 mlxsw_sp_port_module_info_fini(mlxsw_sp); 5137 mlxsw_sp_dpipe_fini(mlxsw_sp); 5138 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 5139 &mlxsw_sp->netdevice_nb); 5140 if (mlxsw_sp->clock) { 5141 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 5142 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 5143 } 5144 mlxsw_sp_router_fini(mlxsw_sp); 5145 mlxsw_sp_acl_fini(mlxsw_sp); 5146 mlxsw_sp_nve_fini(mlxsw_sp); 5147 mlxsw_sp_afa_fini(mlxsw_sp); 5148 mlxsw_sp_counter_pool_fini(mlxsw_sp); 5149 mlxsw_sp_switchdev_fini(mlxsw_sp); 5150 mlxsw_sp_span_fini(mlxsw_sp); 5151 mlxsw_sp_lag_fini(mlxsw_sp); 5152 mlxsw_sp_buffers_fini(mlxsw_sp); 5153 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 5154 mlxsw_sp_traps_fini(mlxsw_sp); 5155 mlxsw_sp_fids_fini(mlxsw_sp); 5156 mlxsw_sp_kvdl_fini(mlxsw_sp); 5157 } 5158 5159 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated 5160 * 802.1Q FIDs 5161 */ 5162 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \ 5163 VLAN_VID_MASK - 1) 5164 5165 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { 5166 .used_max_mid = 1, 5167 .max_mid = MLXSW_SP_MID_MAX, 5168 .used_flood_tables = 1, 5169 .used_flood_mode = 1, 5170 .flood_mode = 3, 5171 .max_fid_flood_tables = 3, 5172 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 5173 .used_max_ib_mc = 1, 5174 .max_ib_mc = 0, 5175 .used_max_pkey = 1, 5176 .max_pkey = 0, 5177 .used_kvd_sizes = 1, 5178 .kvd_hash_single_parts = 59, 5179 .kvd_hash_double_parts = 41, 5180 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 5181 .swid_config = { 5182 { 5183 .used_type = 1, 5184 .type = MLXSW_PORT_SWID_TYPE_ETH, 5185 } 5186 }, 5187 }; 5188 5189 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { 5190 .used_max_mid = 1, 5191 .max_mid = MLXSW_SP_MID_MAX, 5192 .used_flood_tables = 1, 5193 .used_flood_mode = 1, 5194 .flood_mode = 3, 5195 .max_fid_flood_tables = 3, 5196 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 5197 .used_max_ib_mc = 1, 5198 .max_ib_mc = 0, 5199 .used_max_pkey = 1, 5200 .max_pkey = 0, 5201 .swid_config = { 5202 { 5203 .used_type = 1, 5204 .type = MLXSW_PORT_SWID_TYPE_ETH, 5205 } 5206 }, 5207 }; 5208 5209 static void 5210 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, 5211 struct devlink_resource_size_params *kvd_size_params, 5212 struct devlink_resource_size_params *linear_size_params, 5213 struct devlink_resource_size_params *hash_double_size_params, 5214 struct devlink_resource_size_params *hash_single_size_params) 5215 { 5216 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 5217 KVD_SINGLE_MIN_SIZE); 5218 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 5219 KVD_DOUBLE_MIN_SIZE); 5220 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 5221 u32 linear_size_min = 0; 5222 5223 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, 5224 MLXSW_SP_KVD_GRANULARITY, 5225 DEVLINK_RESOURCE_UNIT_ENTRY); 5226 devlink_resource_size_params_init(linear_size_params, linear_size_min, 5227 kvd_size - single_size_min - 5228 double_size_min, 5229 MLXSW_SP_KVD_GRANULARITY, 5230 DEVLINK_RESOURCE_UNIT_ENTRY); 5231 devlink_resource_size_params_init(hash_double_size_params, 5232 double_size_min, 5233 kvd_size - single_size_min - 5234 linear_size_min, 5235 MLXSW_SP_KVD_GRANULARITY, 5236 DEVLINK_RESOURCE_UNIT_ENTRY); 5237 devlink_resource_size_params_init(hash_single_size_params, 5238 single_size_min, 5239 kvd_size - double_size_min - 5240 linear_size_min, 5241 MLXSW_SP_KVD_GRANULARITY, 5242 DEVLINK_RESOURCE_UNIT_ENTRY); 5243 } 5244 5245 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) 5246 { 5247 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5248 struct devlink_resource_size_params hash_single_size_params; 5249 struct devlink_resource_size_params hash_double_size_params; 5250 struct devlink_resource_size_params linear_size_params; 5251 struct devlink_resource_size_params kvd_size_params; 5252 u32 kvd_size, single_size, double_size, linear_size; 5253 const struct mlxsw_config_profile *profile; 5254 int err; 5255 5256 profile = &mlxsw_sp1_config_profile; 5257 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 5258 return -EIO; 5259 5260 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, 5261 &linear_size_params, 5262 &hash_double_size_params, 5263 &hash_single_size_params); 5264 5265 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 5266 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 5267 kvd_size, MLXSW_SP_RESOURCE_KVD, 5268 DEVLINK_RESOURCE_ID_PARENT_TOP, 5269 &kvd_size_params); 5270 if (err) 5271 return err; 5272 5273 linear_size = profile->kvd_linear_size; 5274 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, 5275 linear_size, 5276 MLXSW_SP_RESOURCE_KVD_LINEAR, 5277 MLXSW_SP_RESOURCE_KVD, 5278 &linear_size_params); 5279 if (err) 5280 return err; 5281 5282 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); 5283 if (err) 5284 return err; 5285 5286 double_size = kvd_size - linear_size; 5287 double_size *= profile->kvd_hash_double_parts; 5288 double_size /= profile->kvd_hash_double_parts + 5289 profile->kvd_hash_single_parts; 5290 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY); 5291 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, 5292 double_size, 5293 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 5294 MLXSW_SP_RESOURCE_KVD, 5295 &hash_double_size_params); 5296 if (err) 5297 return err; 5298 5299 single_size = kvd_size - double_size - linear_size; 5300 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, 5301 single_size, 5302 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 5303 MLXSW_SP_RESOURCE_KVD, 5304 &hash_single_size_params); 5305 if (err) 5306 return err; 5307 5308 return 0; 5309 } 5310 5311 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core) 5312 { 5313 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5314 struct devlink_resource_size_params kvd_size_params; 5315 u32 kvd_size; 5316 5317 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 5318 return -EIO; 5319 5320 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 5321 devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size, 5322 MLXSW_SP_KVD_GRANULARITY, 5323 DEVLINK_RESOURCE_UNIT_ENTRY); 5324 5325 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 5326 kvd_size, MLXSW_SP_RESOURCE_KVD, 5327 DEVLINK_RESOURCE_ID_PARENT_TOP, 5328 &kvd_size_params); 5329 } 5330 5331 static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core) 5332 { 5333 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5334 struct devlink_resource_size_params span_size_params; 5335 u32 max_span; 5336 5337 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN)) 5338 return -EIO; 5339 5340 max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN); 5341 devlink_resource_size_params_init(&span_size_params, max_span, max_span, 5342 1, DEVLINK_RESOURCE_UNIT_ENTRY); 5343 5344 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN, 5345 max_span, MLXSW_SP_RESOURCE_SPAN, 5346 DEVLINK_RESOURCE_ID_PARENT_TOP, 5347 &span_size_params); 5348 } 5349 5350 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) 5351 { 5352 int err; 5353 5354 err = mlxsw_sp1_resources_kvd_register(mlxsw_core); 5355 if (err) 5356 return err; 5357 5358 err = mlxsw_sp_resources_span_register(mlxsw_core); 5359 if (err) 5360 goto err_resources_span_register; 5361 5362 return 0; 5363 5364 err_resources_span_register: 5365 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL); 5366 return err; 5367 } 5368 5369 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) 5370 { 5371 int err; 5372 5373 err = mlxsw_sp2_resources_kvd_register(mlxsw_core); 5374 if (err) 5375 return err; 5376 5377 err = mlxsw_sp_resources_span_register(mlxsw_core); 5378 if (err) 5379 goto err_resources_span_register; 5380 5381 return 0; 5382 5383 err_resources_span_register: 5384 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL); 5385 return err; 5386 } 5387 5388 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 5389 const struct mlxsw_config_profile *profile, 5390 u64 *p_single_size, u64 *p_double_size, 5391 u64 *p_linear_size) 5392 { 5393 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5394 u32 double_size; 5395 int err; 5396 5397 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 5398 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) 5399 return -EIO; 5400 5401 /* The hash part is what left of the kvd without the 5402 * linear part. It is split to the single size and 5403 * double size by the parts ratio from the profile. 5404 * Both sizes must be a multiplications of the 5405 * granularity from the profile. In case the user 5406 * provided the sizes they are obtained via devlink. 5407 */ 5408 err = devlink_resource_size_get(devlink, 5409 MLXSW_SP_RESOURCE_KVD_LINEAR, 5410 p_linear_size); 5411 if (err) 5412 *p_linear_size = profile->kvd_linear_size; 5413 5414 err = devlink_resource_size_get(devlink, 5415 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 5416 p_double_size); 5417 if (err) { 5418 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 5419 *p_linear_size; 5420 double_size *= profile->kvd_hash_double_parts; 5421 double_size /= profile->kvd_hash_double_parts + 5422 profile->kvd_hash_single_parts; 5423 *p_double_size = rounddown(double_size, 5424 MLXSW_SP_KVD_GRANULARITY); 5425 } 5426 5427 err = devlink_resource_size_get(devlink, 5428 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 5429 p_single_size); 5430 if (err) 5431 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 5432 *p_double_size - *p_linear_size; 5433 5434 /* Check results are legal. */ 5435 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 5436 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || 5437 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size) 5438 return -EIO; 5439 5440 return 0; 5441 } 5442 5443 static int 5444 mlxsw_sp_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id, 5445 union devlink_param_value val, 5446 struct netlink_ext_ack *extack) 5447 { 5448 if ((val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER) && 5449 (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)) { 5450 NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'"); 5451 return -EINVAL; 5452 } 5453 5454 return 0; 5455 } 5456 5457 static const struct devlink_param mlxsw_sp_devlink_params[] = { 5458 DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY, 5459 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 5460 NULL, NULL, 5461 mlxsw_sp_devlink_param_fw_load_policy_validate), 5462 }; 5463 5464 static int mlxsw_sp_params_register(struct mlxsw_core *mlxsw_core) 5465 { 5466 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5467 union devlink_param_value value; 5468 int err; 5469 5470 err = devlink_params_register(devlink, mlxsw_sp_devlink_params, 5471 ARRAY_SIZE(mlxsw_sp_devlink_params)); 5472 if (err) 5473 return err; 5474 5475 value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER; 5476 devlink_param_driverinit_value_set(devlink, 5477 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 5478 value); 5479 return 0; 5480 } 5481 5482 static void mlxsw_sp_params_unregister(struct mlxsw_core *mlxsw_core) 5483 { 5484 devlink_params_unregister(priv_to_devlink(mlxsw_core), 5485 mlxsw_sp_devlink_params, 5486 ARRAY_SIZE(mlxsw_sp_devlink_params)); 5487 } 5488 5489 static int 5490 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id, 5491 struct devlink_param_gset_ctx *ctx) 5492 { 5493 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 5494 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5495 5496 ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp); 5497 return 0; 5498 } 5499 5500 static int 5501 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id, 5502 struct devlink_param_gset_ctx *ctx) 5503 { 5504 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 5505 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5506 5507 return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32); 5508 } 5509 5510 static const struct devlink_param mlxsw_sp2_devlink_params[] = { 5511 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 5512 "acl_region_rehash_interval", 5513 DEVLINK_PARAM_TYPE_U32, 5514 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 5515 mlxsw_sp_params_acl_region_rehash_intrvl_get, 5516 mlxsw_sp_params_acl_region_rehash_intrvl_set, 5517 NULL), 5518 }; 5519 5520 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core) 5521 { 5522 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5523 union devlink_param_value value; 5524 int err; 5525 5526 err = mlxsw_sp_params_register(mlxsw_core); 5527 if (err) 5528 return err; 5529 5530 err = devlink_params_register(devlink, mlxsw_sp2_devlink_params, 5531 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 5532 if (err) 5533 goto err_devlink_params_register; 5534 5535 value.vu32 = 0; 5536 devlink_param_driverinit_value_set(devlink, 5537 MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 5538 value); 5539 return 0; 5540 5541 err_devlink_params_register: 5542 mlxsw_sp_params_unregister(mlxsw_core); 5543 return err; 5544 } 5545 5546 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core) 5547 { 5548 devlink_params_unregister(priv_to_devlink(mlxsw_core), 5549 mlxsw_sp2_devlink_params, 5550 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 5551 mlxsw_sp_params_unregister(mlxsw_core); 5552 } 5553 5554 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core, 5555 struct sk_buff *skb, u8 local_port) 5556 { 5557 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5558 5559 skb_pull(skb, MLXSW_TXHDR_LEN); 5560 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port); 5561 } 5562 5563 static struct mlxsw_driver mlxsw_sp1_driver = { 5564 .kind = mlxsw_sp1_driver_name, 5565 .priv_size = sizeof(struct mlxsw_sp), 5566 .init = mlxsw_sp1_init, 5567 .fini = mlxsw_sp_fini, 5568 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5569 .port_split = mlxsw_sp_port_split, 5570 .port_unsplit = mlxsw_sp_port_unsplit, 5571 .sb_pool_get = mlxsw_sp_sb_pool_get, 5572 .sb_pool_set = mlxsw_sp_sb_pool_set, 5573 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5574 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5575 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5576 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5577 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5578 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5579 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5580 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5581 .flash_update = mlxsw_sp_flash_update, 5582 .trap_init = mlxsw_sp_trap_init, 5583 .trap_fini = mlxsw_sp_trap_fini, 5584 .trap_action_set = mlxsw_sp_trap_action_set, 5585 .trap_group_init = mlxsw_sp_trap_group_init, 5586 .txhdr_construct = mlxsw_sp_txhdr_construct, 5587 .resources_register = mlxsw_sp1_resources_register, 5588 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, 5589 .params_register = mlxsw_sp_params_register, 5590 .params_unregister = mlxsw_sp_params_unregister, 5591 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5592 .txhdr_len = MLXSW_TXHDR_LEN, 5593 .profile = &mlxsw_sp1_config_profile, 5594 .res_query_enabled = true, 5595 }; 5596 5597 static struct mlxsw_driver mlxsw_sp2_driver = { 5598 .kind = mlxsw_sp2_driver_name, 5599 .priv_size = sizeof(struct mlxsw_sp), 5600 .init = mlxsw_sp2_init, 5601 .fini = mlxsw_sp_fini, 5602 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5603 .port_split = mlxsw_sp_port_split, 5604 .port_unsplit = mlxsw_sp_port_unsplit, 5605 .sb_pool_get = mlxsw_sp_sb_pool_get, 5606 .sb_pool_set = mlxsw_sp_sb_pool_set, 5607 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5608 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5609 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5610 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5611 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5612 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5613 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5614 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5615 .flash_update = mlxsw_sp_flash_update, 5616 .trap_init = mlxsw_sp_trap_init, 5617 .trap_fini = mlxsw_sp_trap_fini, 5618 .trap_action_set = mlxsw_sp_trap_action_set, 5619 .trap_group_init = mlxsw_sp_trap_group_init, 5620 .txhdr_construct = mlxsw_sp_txhdr_construct, 5621 .resources_register = mlxsw_sp2_resources_register, 5622 .params_register = mlxsw_sp2_params_register, 5623 .params_unregister = mlxsw_sp2_params_unregister, 5624 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5625 .txhdr_len = MLXSW_TXHDR_LEN, 5626 .profile = &mlxsw_sp2_config_profile, 5627 .res_query_enabled = true, 5628 }; 5629 5630 static struct mlxsw_driver mlxsw_sp3_driver = { 5631 .kind = mlxsw_sp3_driver_name, 5632 .priv_size = sizeof(struct mlxsw_sp), 5633 .init = mlxsw_sp2_init, 5634 .fini = mlxsw_sp_fini, 5635 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5636 .port_split = mlxsw_sp_port_split, 5637 .port_unsplit = mlxsw_sp_port_unsplit, 5638 .sb_pool_get = mlxsw_sp_sb_pool_get, 5639 .sb_pool_set = mlxsw_sp_sb_pool_set, 5640 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5641 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5642 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5643 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5644 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5645 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5646 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5647 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5648 .flash_update = mlxsw_sp_flash_update, 5649 .trap_init = mlxsw_sp_trap_init, 5650 .trap_fini = mlxsw_sp_trap_fini, 5651 .trap_action_set = mlxsw_sp_trap_action_set, 5652 .trap_group_init = mlxsw_sp_trap_group_init, 5653 .txhdr_construct = mlxsw_sp_txhdr_construct, 5654 .resources_register = mlxsw_sp2_resources_register, 5655 .params_register = mlxsw_sp2_params_register, 5656 .params_unregister = mlxsw_sp2_params_unregister, 5657 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5658 .txhdr_len = MLXSW_TXHDR_LEN, 5659 .profile = &mlxsw_sp2_config_profile, 5660 .res_query_enabled = true, 5661 }; 5662 5663 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 5664 { 5665 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 5666 } 5667 5668 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data) 5669 { 5670 struct mlxsw_sp_port **p_mlxsw_sp_port = data; 5671 int ret = 0; 5672 5673 if (mlxsw_sp_port_dev_check(lower_dev)) { 5674 *p_mlxsw_sp_port = netdev_priv(lower_dev); 5675 ret = 1; 5676 } 5677 5678 return ret; 5679 } 5680 5681 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 5682 { 5683 struct mlxsw_sp_port *mlxsw_sp_port; 5684 5685 if (mlxsw_sp_port_dev_check(dev)) 5686 return netdev_priv(dev); 5687 5688 mlxsw_sp_port = NULL; 5689 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port); 5690 5691 return mlxsw_sp_port; 5692 } 5693 5694 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 5695 { 5696 struct mlxsw_sp_port *mlxsw_sp_port; 5697 5698 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 5699 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 5700 } 5701 5702 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 5703 { 5704 struct mlxsw_sp_port *mlxsw_sp_port; 5705 5706 if (mlxsw_sp_port_dev_check(dev)) 5707 return netdev_priv(dev); 5708 5709 mlxsw_sp_port = NULL; 5710 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 5711 &mlxsw_sp_port); 5712 5713 return mlxsw_sp_port; 5714 } 5715 5716 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 5717 { 5718 struct mlxsw_sp_port *mlxsw_sp_port; 5719 5720 rcu_read_lock(); 5721 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 5722 if (mlxsw_sp_port) 5723 dev_hold(mlxsw_sp_port->dev); 5724 rcu_read_unlock(); 5725 return mlxsw_sp_port; 5726 } 5727 5728 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 5729 { 5730 dev_put(mlxsw_sp_port->dev); 5731 } 5732 5733 static void 5734 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port, 5735 struct net_device *lag_dev) 5736 { 5737 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev); 5738 struct net_device *upper_dev; 5739 struct list_head *iter; 5740 5741 if (netif_is_bridge_port(lag_dev)) 5742 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev); 5743 5744 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 5745 if (!netif_is_bridge_port(upper_dev)) 5746 continue; 5747 br_dev = netdev_master_upper_dev_get(upper_dev); 5748 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev); 5749 } 5750 } 5751 5752 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 5753 { 5754 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5755 5756 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 5757 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5758 } 5759 5760 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 5761 { 5762 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5763 5764 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 5765 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5766 } 5767 5768 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 5769 u16 lag_id, u8 port_index) 5770 { 5771 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5772 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5773 5774 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 5775 lag_id, port_index); 5776 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5777 } 5778 5779 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 5780 u16 lag_id) 5781 { 5782 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5783 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5784 5785 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 5786 lag_id); 5787 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5788 } 5789 5790 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 5791 u16 lag_id) 5792 { 5793 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5794 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5795 5796 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 5797 lag_id); 5798 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5799 } 5800 5801 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 5802 u16 lag_id) 5803 { 5804 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5805 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5806 5807 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 5808 lag_id); 5809 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5810 } 5811 5812 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 5813 struct net_device *lag_dev, 5814 u16 *p_lag_id) 5815 { 5816 struct mlxsw_sp_upper *lag; 5817 int free_lag_id = -1; 5818 u64 max_lag; 5819 int i; 5820 5821 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 5822 for (i = 0; i < max_lag; i++) { 5823 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 5824 if (lag->ref_count) { 5825 if (lag->dev == lag_dev) { 5826 *p_lag_id = i; 5827 return 0; 5828 } 5829 } else if (free_lag_id < 0) { 5830 free_lag_id = i; 5831 } 5832 } 5833 if (free_lag_id < 0) 5834 return -EBUSY; 5835 *p_lag_id = free_lag_id; 5836 return 0; 5837 } 5838 5839 static bool 5840 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 5841 struct net_device *lag_dev, 5842 struct netdev_lag_upper_info *lag_upper_info, 5843 struct netlink_ext_ack *extack) 5844 { 5845 u16 lag_id; 5846 5847 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 5848 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices"); 5849 return false; 5850 } 5851 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 5852 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 5853 return false; 5854 } 5855 return true; 5856 } 5857 5858 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 5859 u16 lag_id, u8 *p_port_index) 5860 { 5861 u64 max_lag_members; 5862 int i; 5863 5864 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 5865 MAX_LAG_MEMBERS); 5866 for (i = 0; i < max_lag_members; i++) { 5867 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 5868 *p_port_index = i; 5869 return 0; 5870 } 5871 } 5872 return -EBUSY; 5873 } 5874 5875 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 5876 struct net_device *lag_dev) 5877 { 5878 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5879 struct mlxsw_sp_upper *lag; 5880 u16 lag_id; 5881 u8 port_index; 5882 int err; 5883 5884 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 5885 if (err) 5886 return err; 5887 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 5888 if (!lag->ref_count) { 5889 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 5890 if (err) 5891 return err; 5892 lag->dev = lag_dev; 5893 } 5894 5895 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 5896 if (err) 5897 return err; 5898 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 5899 if (err) 5900 goto err_col_port_add; 5901 5902 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 5903 mlxsw_sp_port->local_port); 5904 mlxsw_sp_port->lag_id = lag_id; 5905 mlxsw_sp_port->lagged = 1; 5906 lag->ref_count++; 5907 5908 /* Port is no longer usable as a router interface */ 5909 if (mlxsw_sp_port->default_vlan->fid) 5910 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); 5911 5912 return 0; 5913 5914 err_col_port_add: 5915 if (!lag->ref_count) 5916 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 5917 return err; 5918 } 5919 5920 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 5921 struct net_device *lag_dev) 5922 { 5923 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5924 u16 lag_id = mlxsw_sp_port->lag_id; 5925 struct mlxsw_sp_upper *lag; 5926 5927 if (!mlxsw_sp_port->lagged) 5928 return; 5929 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 5930 WARN_ON(lag->ref_count == 0); 5931 5932 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 5933 5934 /* Any VLANs configured on the port are no longer valid */ 5935 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false); 5936 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan); 5937 /* Make the LAG and its directly linked uppers leave bridges they 5938 * are memeber in 5939 */ 5940 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev); 5941 5942 if (lag->ref_count == 1) 5943 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 5944 5945 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 5946 mlxsw_sp_port->local_port); 5947 mlxsw_sp_port->lagged = 0; 5948 lag->ref_count--; 5949 5950 /* Make sure untagged frames are allowed to ingress */ 5951 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 5952 } 5953 5954 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 5955 u16 lag_id) 5956 { 5957 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5958 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5959 5960 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 5961 mlxsw_sp_port->local_port); 5962 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5963 } 5964 5965 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 5966 u16 lag_id) 5967 { 5968 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5969 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5970 5971 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 5972 mlxsw_sp_port->local_port); 5973 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5974 } 5975 5976 static int 5977 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port) 5978 { 5979 int err; 5980 5981 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, 5982 mlxsw_sp_port->lag_id); 5983 if (err) 5984 return err; 5985 5986 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 5987 if (err) 5988 goto err_dist_port_add; 5989 5990 return 0; 5991 5992 err_dist_port_add: 5993 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id); 5994 return err; 5995 } 5996 5997 static int 5998 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port) 5999 { 6000 int err; 6001 6002 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 6003 mlxsw_sp_port->lag_id); 6004 if (err) 6005 return err; 6006 6007 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, 6008 mlxsw_sp_port->lag_id); 6009 if (err) 6010 goto err_col_port_disable; 6011 6012 return 0; 6013 6014 err_col_port_disable: 6015 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 6016 return err; 6017 } 6018 6019 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 6020 struct netdev_lag_lower_state_info *info) 6021 { 6022 if (info->tx_enabled) 6023 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port); 6024 else 6025 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 6026 } 6027 6028 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 6029 bool enable) 6030 { 6031 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 6032 enum mlxsw_reg_spms_state spms_state; 6033 char *spms_pl; 6034 u16 vid; 6035 int err; 6036 6037 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 6038 MLXSW_REG_SPMS_STATE_DISCARDING; 6039 6040 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 6041 if (!spms_pl) 6042 return -ENOMEM; 6043 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 6044 6045 for (vid = 0; vid < VLAN_N_VID; vid++) 6046 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 6047 6048 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 6049 kfree(spms_pl); 6050 return err; 6051 } 6052 6053 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 6054 { 6055 u16 vid = 1; 6056 int err; 6057 6058 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 6059 if (err) 6060 return err; 6061 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 6062 if (err) 6063 goto err_port_stp_set; 6064 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 6065 true, false); 6066 if (err) 6067 goto err_port_vlan_set; 6068 6069 for (; vid <= VLAN_N_VID - 1; vid++) { 6070 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 6071 vid, false); 6072 if (err) 6073 goto err_vid_learning_set; 6074 } 6075 6076 return 0; 6077 6078 err_vid_learning_set: 6079 for (vid--; vid >= 1; vid--) 6080 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 6081 err_port_vlan_set: 6082 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 6083 err_port_stp_set: 6084 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 6085 return err; 6086 } 6087 6088 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 6089 { 6090 u16 vid; 6091 6092 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 6093 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 6094 vid, true); 6095 6096 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 6097 false, false); 6098 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 6099 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 6100 } 6101 6102 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev) 6103 { 6104 unsigned int num_vxlans = 0; 6105 struct net_device *dev; 6106 struct list_head *iter; 6107 6108 netdev_for_each_lower_dev(br_dev, dev, iter) { 6109 if (netif_is_vxlan(dev)) 6110 num_vxlans++; 6111 } 6112 6113 return num_vxlans > 1; 6114 } 6115 6116 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev) 6117 { 6118 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0}; 6119 struct net_device *dev; 6120 struct list_head *iter; 6121 6122 netdev_for_each_lower_dev(br_dev, dev, iter) { 6123 u16 pvid; 6124 int err; 6125 6126 if (!netif_is_vxlan(dev)) 6127 continue; 6128 6129 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid); 6130 if (err || !pvid) 6131 continue; 6132 6133 if (test_and_set_bit(pvid, vlans)) 6134 return false; 6135 } 6136 6137 return true; 6138 } 6139 6140 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev, 6141 struct netlink_ext_ack *extack) 6142 { 6143 if (br_multicast_enabled(br_dev)) { 6144 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device"); 6145 return false; 6146 } 6147 6148 if (!br_vlan_enabled(br_dev) && 6149 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) { 6150 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge"); 6151 return false; 6152 } 6153 6154 if (br_vlan_enabled(br_dev) && 6155 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) { 6156 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged"); 6157 return false; 6158 } 6159 6160 return true; 6161 } 6162 6163 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 6164 struct net_device *dev, 6165 unsigned long event, void *ptr) 6166 { 6167 struct netdev_notifier_changeupper_info *info; 6168 struct mlxsw_sp_port *mlxsw_sp_port; 6169 struct netlink_ext_ack *extack; 6170 struct net_device *upper_dev; 6171 struct mlxsw_sp *mlxsw_sp; 6172 int err = 0; 6173 6174 mlxsw_sp_port = netdev_priv(dev); 6175 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 6176 info = ptr; 6177 extack = netdev_notifier_info_to_extack(&info->info); 6178 6179 switch (event) { 6180 case NETDEV_PRECHANGEUPPER: 6181 upper_dev = info->upper_dev; 6182 if (!is_vlan_dev(upper_dev) && 6183 !netif_is_lag_master(upper_dev) && 6184 !netif_is_bridge_master(upper_dev) && 6185 !netif_is_ovs_master(upper_dev) && 6186 !netif_is_macvlan(upper_dev)) { 6187 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6188 return -EINVAL; 6189 } 6190 if (!info->linking) 6191 break; 6192 if (netif_is_bridge_master(upper_dev) && 6193 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 6194 mlxsw_sp_bridge_has_vxlan(upper_dev) && 6195 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 6196 return -EOPNOTSUPP; 6197 if (netdev_has_any_upper_dev(upper_dev) && 6198 (!netif_is_bridge_master(upper_dev) || 6199 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 6200 upper_dev))) { 6201 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 6202 return -EINVAL; 6203 } 6204 if (netif_is_lag_master(upper_dev) && 6205 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 6206 info->upper_info, extack)) 6207 return -EINVAL; 6208 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 6209 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); 6210 return -EINVAL; 6211 } 6212 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 6213 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 6214 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); 6215 return -EINVAL; 6216 } 6217 if (netif_is_macvlan(upper_dev) && 6218 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, lower_dev)) { 6219 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6220 return -EOPNOTSUPP; 6221 } 6222 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 6223 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); 6224 return -EINVAL; 6225 } 6226 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 6227 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 6228 return -EINVAL; 6229 } 6230 break; 6231 case NETDEV_CHANGEUPPER: 6232 upper_dev = info->upper_dev; 6233 if (netif_is_bridge_master(upper_dev)) { 6234 if (info->linking) 6235 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 6236 lower_dev, 6237 upper_dev, 6238 extack); 6239 else 6240 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 6241 lower_dev, 6242 upper_dev); 6243 } else if (netif_is_lag_master(upper_dev)) { 6244 if (info->linking) { 6245 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 6246 upper_dev); 6247 } else { 6248 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 6249 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 6250 upper_dev); 6251 } 6252 } else if (netif_is_ovs_master(upper_dev)) { 6253 if (info->linking) 6254 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 6255 else 6256 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 6257 } else if (netif_is_macvlan(upper_dev)) { 6258 if (!info->linking) 6259 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6260 } else if (is_vlan_dev(upper_dev)) { 6261 struct net_device *br_dev; 6262 6263 if (!netif_is_bridge_port(upper_dev)) 6264 break; 6265 if (info->linking) 6266 break; 6267 br_dev = netdev_master_upper_dev_get(upper_dev); 6268 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, 6269 br_dev); 6270 } 6271 break; 6272 } 6273 6274 return err; 6275 } 6276 6277 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 6278 unsigned long event, void *ptr) 6279 { 6280 struct netdev_notifier_changelowerstate_info *info; 6281 struct mlxsw_sp_port *mlxsw_sp_port; 6282 int err; 6283 6284 mlxsw_sp_port = netdev_priv(dev); 6285 info = ptr; 6286 6287 switch (event) { 6288 case NETDEV_CHANGELOWERSTATE: 6289 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 6290 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 6291 info->lower_state_info); 6292 if (err) 6293 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 6294 } 6295 break; 6296 } 6297 6298 return 0; 6299 } 6300 6301 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 6302 struct net_device *port_dev, 6303 unsigned long event, void *ptr) 6304 { 6305 switch (event) { 6306 case NETDEV_PRECHANGEUPPER: 6307 case NETDEV_CHANGEUPPER: 6308 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 6309 event, ptr); 6310 case NETDEV_CHANGELOWERSTATE: 6311 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 6312 ptr); 6313 } 6314 6315 return 0; 6316 } 6317 6318 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 6319 unsigned long event, void *ptr) 6320 { 6321 struct net_device *dev; 6322 struct list_head *iter; 6323 int ret; 6324 6325 netdev_for_each_lower_dev(lag_dev, dev, iter) { 6326 if (mlxsw_sp_port_dev_check(dev)) { 6327 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 6328 ptr); 6329 if (ret) 6330 return ret; 6331 } 6332 } 6333 6334 return 0; 6335 } 6336 6337 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 6338 struct net_device *dev, 6339 unsigned long event, void *ptr, 6340 u16 vid) 6341 { 6342 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 6343 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 6344 struct netdev_notifier_changeupper_info *info = ptr; 6345 struct netlink_ext_ack *extack; 6346 struct net_device *upper_dev; 6347 int err = 0; 6348 6349 extack = netdev_notifier_info_to_extack(&info->info); 6350 6351 switch (event) { 6352 case NETDEV_PRECHANGEUPPER: 6353 upper_dev = info->upper_dev; 6354 if (!netif_is_bridge_master(upper_dev) && 6355 !netif_is_macvlan(upper_dev)) { 6356 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6357 return -EINVAL; 6358 } 6359 if (!info->linking) 6360 break; 6361 if (netif_is_bridge_master(upper_dev) && 6362 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 6363 mlxsw_sp_bridge_has_vxlan(upper_dev) && 6364 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 6365 return -EOPNOTSUPP; 6366 if (netdev_has_any_upper_dev(upper_dev) && 6367 (!netif_is_bridge_master(upper_dev) || 6368 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 6369 upper_dev))) { 6370 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 6371 return -EINVAL; 6372 } 6373 if (netif_is_macvlan(upper_dev) && 6374 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) { 6375 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6376 return -EOPNOTSUPP; 6377 } 6378 break; 6379 case NETDEV_CHANGEUPPER: 6380 upper_dev = info->upper_dev; 6381 if (netif_is_bridge_master(upper_dev)) { 6382 if (info->linking) 6383 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 6384 vlan_dev, 6385 upper_dev, 6386 extack); 6387 else 6388 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 6389 vlan_dev, 6390 upper_dev); 6391 } else if (netif_is_macvlan(upper_dev)) { 6392 if (!info->linking) 6393 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6394 } else { 6395 err = -EINVAL; 6396 WARN_ON(1); 6397 } 6398 break; 6399 } 6400 6401 return err; 6402 } 6403 6404 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 6405 struct net_device *lag_dev, 6406 unsigned long event, 6407 void *ptr, u16 vid) 6408 { 6409 struct net_device *dev; 6410 struct list_head *iter; 6411 int ret; 6412 6413 netdev_for_each_lower_dev(lag_dev, dev, iter) { 6414 if (mlxsw_sp_port_dev_check(dev)) { 6415 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 6416 event, ptr, 6417 vid); 6418 if (ret) 6419 return ret; 6420 } 6421 } 6422 6423 return 0; 6424 } 6425 6426 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev, 6427 struct net_device *br_dev, 6428 unsigned long event, void *ptr, 6429 u16 vid) 6430 { 6431 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); 6432 struct netdev_notifier_changeupper_info *info = ptr; 6433 struct netlink_ext_ack *extack; 6434 struct net_device *upper_dev; 6435 6436 if (!mlxsw_sp) 6437 return 0; 6438 6439 extack = netdev_notifier_info_to_extack(&info->info); 6440 6441 switch (event) { 6442 case NETDEV_PRECHANGEUPPER: 6443 upper_dev = info->upper_dev; 6444 if (!netif_is_macvlan(upper_dev)) { 6445 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6446 return -EOPNOTSUPP; 6447 } 6448 if (!info->linking) 6449 break; 6450 if (netif_is_macvlan(upper_dev) && 6451 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) { 6452 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6453 return -EOPNOTSUPP; 6454 } 6455 break; 6456 case NETDEV_CHANGEUPPER: 6457 upper_dev = info->upper_dev; 6458 if (info->linking) 6459 break; 6460 if (netif_is_macvlan(upper_dev)) 6461 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6462 break; 6463 } 6464 6465 return 0; 6466 } 6467 6468 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 6469 unsigned long event, void *ptr) 6470 { 6471 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 6472 u16 vid = vlan_dev_vlan_id(vlan_dev); 6473 6474 if (mlxsw_sp_port_dev_check(real_dev)) 6475 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 6476 event, ptr, vid); 6477 else if (netif_is_lag_master(real_dev)) 6478 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 6479 real_dev, event, 6480 ptr, vid); 6481 else if (netif_is_bridge_master(real_dev)) 6482 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev, 6483 event, ptr, vid); 6484 6485 return 0; 6486 } 6487 6488 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, 6489 unsigned long event, void *ptr) 6490 { 6491 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev); 6492 struct netdev_notifier_changeupper_info *info = ptr; 6493 struct netlink_ext_ack *extack; 6494 struct net_device *upper_dev; 6495 6496 if (!mlxsw_sp) 6497 return 0; 6498 6499 extack = netdev_notifier_info_to_extack(&info->info); 6500 6501 switch (event) { 6502 case NETDEV_PRECHANGEUPPER: 6503 upper_dev = info->upper_dev; 6504 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) { 6505 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6506 return -EOPNOTSUPP; 6507 } 6508 if (!info->linking) 6509 break; 6510 if (netif_is_macvlan(upper_dev) && 6511 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev)) { 6512 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6513 return -EOPNOTSUPP; 6514 } 6515 break; 6516 case NETDEV_CHANGEUPPER: 6517 upper_dev = info->upper_dev; 6518 if (info->linking) 6519 break; 6520 if (is_vlan_dev(upper_dev)) 6521 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev); 6522 if (netif_is_macvlan(upper_dev)) 6523 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6524 break; 6525 } 6526 6527 return 0; 6528 } 6529 6530 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, 6531 unsigned long event, void *ptr) 6532 { 6533 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); 6534 struct netdev_notifier_changeupper_info *info = ptr; 6535 struct netlink_ext_ack *extack; 6536 6537 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) 6538 return 0; 6539 6540 extack = netdev_notifier_info_to_extack(&info->info); 6541 6542 /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */ 6543 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6544 6545 return -EOPNOTSUPP; 6546 } 6547 6548 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) 6549 { 6550 struct netdev_notifier_changeupper_info *info = ptr; 6551 6552 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) 6553 return false; 6554 return netif_is_l3_master(info->upper_dev); 6555 } 6556 6557 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, 6558 struct net_device *dev, 6559 unsigned long event, void *ptr) 6560 { 6561 struct netdev_notifier_changeupper_info *cu_info; 6562 struct netdev_notifier_info *info = ptr; 6563 struct netlink_ext_ack *extack; 6564 struct net_device *upper_dev; 6565 6566 extack = netdev_notifier_info_to_extack(info); 6567 6568 switch (event) { 6569 case NETDEV_CHANGEUPPER: 6570 cu_info = container_of(info, 6571 struct netdev_notifier_changeupper_info, 6572 info); 6573 upper_dev = cu_info->upper_dev; 6574 if (!netif_is_bridge_master(upper_dev)) 6575 return 0; 6576 if (!mlxsw_sp_lower_get(upper_dev)) 6577 return 0; 6578 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 6579 return -EOPNOTSUPP; 6580 if (cu_info->linking) { 6581 if (!netif_running(dev)) 6582 return 0; 6583 /* When the bridge is VLAN-aware, the VNI of the VxLAN 6584 * device needs to be mapped to a VLAN, but at this 6585 * point no VLANs are configured on the VxLAN device 6586 */ 6587 if (br_vlan_enabled(upper_dev)) 6588 return 0; 6589 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, 6590 dev, 0, extack); 6591 } else { 6592 /* VLANs were already flushed, which triggered the 6593 * necessary cleanup 6594 */ 6595 if (br_vlan_enabled(upper_dev)) 6596 return 0; 6597 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 6598 } 6599 break; 6600 case NETDEV_PRE_UP: 6601 upper_dev = netdev_master_upper_dev_get(dev); 6602 if (!upper_dev) 6603 return 0; 6604 if (!netif_is_bridge_master(upper_dev)) 6605 return 0; 6606 if (!mlxsw_sp_lower_get(upper_dev)) 6607 return 0; 6608 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0, 6609 extack); 6610 case NETDEV_DOWN: 6611 upper_dev = netdev_master_upper_dev_get(dev); 6612 if (!upper_dev) 6613 return 0; 6614 if (!netif_is_bridge_master(upper_dev)) 6615 return 0; 6616 if (!mlxsw_sp_lower_get(upper_dev)) 6617 return 0; 6618 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 6619 break; 6620 } 6621 6622 return 0; 6623 } 6624 6625 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 6626 unsigned long event, void *ptr) 6627 { 6628 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 6629 struct mlxsw_sp_span_entry *span_entry; 6630 struct mlxsw_sp *mlxsw_sp; 6631 int err = 0; 6632 6633 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 6634 if (event == NETDEV_UNREGISTER) { 6635 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); 6636 if (span_entry) 6637 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); 6638 } 6639 mlxsw_sp_span_respin(mlxsw_sp); 6640 6641 if (netif_is_vxlan(dev)) 6642 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr); 6643 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) 6644 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, 6645 event, ptr); 6646 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev)) 6647 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev, 6648 event, ptr); 6649 else if (event == NETDEV_PRE_CHANGEADDR || 6650 event == NETDEV_CHANGEADDR || 6651 event == NETDEV_CHANGEMTU) 6652 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr); 6653 else if (mlxsw_sp_is_vrf_event(event, ptr)) 6654 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); 6655 else if (mlxsw_sp_port_dev_check(dev)) 6656 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 6657 else if (netif_is_lag_master(dev)) 6658 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 6659 else if (is_vlan_dev(dev)) 6660 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 6661 else if (netif_is_bridge_master(dev)) 6662 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); 6663 else if (netif_is_macvlan(dev)) 6664 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); 6665 6666 return notifier_from_errno(err); 6667 } 6668 6669 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = { 6670 .notifier_call = mlxsw_sp_inetaddr_valid_event, 6671 }; 6672 6673 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = { 6674 .notifier_call = mlxsw_sp_inet6addr_valid_event, 6675 }; 6676 6677 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { 6678 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 6679 {0, }, 6680 }; 6681 6682 static struct pci_driver mlxsw_sp1_pci_driver = { 6683 .name = mlxsw_sp1_driver_name, 6684 .id_table = mlxsw_sp1_pci_id_table, 6685 }; 6686 6687 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { 6688 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, 6689 {0, }, 6690 }; 6691 6692 static struct pci_driver mlxsw_sp2_pci_driver = { 6693 .name = mlxsw_sp2_driver_name, 6694 .id_table = mlxsw_sp2_pci_id_table, 6695 }; 6696 6697 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = { 6698 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0}, 6699 {0, }, 6700 }; 6701 6702 static struct pci_driver mlxsw_sp3_pci_driver = { 6703 .name = mlxsw_sp3_driver_name, 6704 .id_table = mlxsw_sp3_pci_id_table, 6705 }; 6706 6707 static int __init mlxsw_sp_module_init(void) 6708 { 6709 int err; 6710 6711 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6712 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6713 6714 err = mlxsw_core_driver_register(&mlxsw_sp1_driver); 6715 if (err) 6716 goto err_sp1_core_driver_register; 6717 6718 err = mlxsw_core_driver_register(&mlxsw_sp2_driver); 6719 if (err) 6720 goto err_sp2_core_driver_register; 6721 6722 err = mlxsw_core_driver_register(&mlxsw_sp3_driver); 6723 if (err) 6724 goto err_sp3_core_driver_register; 6725 6726 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); 6727 if (err) 6728 goto err_sp1_pci_driver_register; 6729 6730 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); 6731 if (err) 6732 goto err_sp2_pci_driver_register; 6733 6734 err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver); 6735 if (err) 6736 goto err_sp3_pci_driver_register; 6737 6738 return 0; 6739 6740 err_sp3_pci_driver_register: 6741 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 6742 err_sp2_pci_driver_register: 6743 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 6744 err_sp1_pci_driver_register: 6745 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 6746 err_sp3_core_driver_register: 6747 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 6748 err_sp2_core_driver_register: 6749 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 6750 err_sp1_core_driver_register: 6751 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6752 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6753 return err; 6754 } 6755 6756 static void __exit mlxsw_sp_module_exit(void) 6757 { 6758 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver); 6759 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 6760 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 6761 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 6762 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 6763 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 6764 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6765 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6766 } 6767 6768 module_init(mlxsw_sp_module_init); 6769 module_exit(mlxsw_sp_module_exit); 6770 6771 MODULE_LICENSE("Dual BSD/GPL"); 6772 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 6773 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 6774 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); 6775 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); 6776 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table); 6777 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); 6778 MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME); 6779