1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/types.h> 7 #include <linux/pci.h> 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/ethtool.h> 11 #include <linux/slab.h> 12 #include <linux/device.h> 13 #include <linux/skbuff.h> 14 #include <linux/if_vlan.h> 15 #include <linux/if_bridge.h> 16 #include <linux/workqueue.h> 17 #include <linux/jiffies.h> 18 #include <linux/bitops.h> 19 #include <linux/list.h> 20 #include <linux/notifier.h> 21 #include <linux/dcbnl.h> 22 #include <linux/inetdevice.h> 23 #include <linux/netlink.h> 24 #include <linux/jhash.h> 25 #include <linux/log2.h> 26 #include <net/switchdev.h> 27 #include <net/pkt_cls.h> 28 #include <net/tc_act/tc_mirred.h> 29 #include <net/netevent.h> 30 #include <net/tc_act/tc_sample.h> 31 #include <net/addrconf.h> 32 33 #include "spectrum.h" 34 #include "pci.h" 35 #include "core.h" 36 #include "core_env.h" 37 #include "reg.h" 38 #include "port.h" 39 #include "trap.h" 40 #include "txheader.h" 41 #include "spectrum_cnt.h" 42 #include "spectrum_dpipe.h" 43 #include "spectrum_acl_flex_actions.h" 44 #include "spectrum_span.h" 45 #include "spectrum_ptp.h" 46 #include "../mlxfw/mlxfw.h" 47 48 #define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100) 49 50 #define MLXSW_SP1_FWREV_MAJOR 13 51 #define MLXSW_SP1_FWREV_MINOR 2000 52 #define MLXSW_SP1_FWREV_SUBMINOR 2308 53 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 54 55 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { 56 .major = MLXSW_SP1_FWREV_MAJOR, 57 .minor = MLXSW_SP1_FWREV_MINOR, 58 .subminor = MLXSW_SP1_FWREV_SUBMINOR, 59 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR, 60 }; 61 62 #define MLXSW_SP1_FW_FILENAME \ 63 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ 64 "." __stringify(MLXSW_SP1_FWREV_MINOR) \ 65 "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2" 66 67 #define MLXSW_SP2_FWREV_MAJOR 29 68 #define MLXSW_SP2_FWREV_MINOR 2000 69 #define MLXSW_SP2_FWREV_SUBMINOR 2308 70 71 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { 72 .major = MLXSW_SP2_FWREV_MAJOR, 73 .minor = MLXSW_SP2_FWREV_MINOR, 74 .subminor = MLXSW_SP2_FWREV_SUBMINOR, 75 }; 76 77 #define MLXSW_SP2_FW_FILENAME \ 78 "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \ 79 "." __stringify(MLXSW_SP2_FWREV_MINOR) \ 80 "." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2" 81 82 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; 83 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; 84 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3"; 85 static const char mlxsw_sp_driver_version[] = "1.0"; 86 87 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = { 88 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00 89 }; 90 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = { 91 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00 92 }; 93 94 /* tx_hdr_version 95 * Tx header version. 96 * Must be set to 1. 97 */ 98 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 99 100 /* tx_hdr_ctl 101 * Packet control type. 102 * 0 - Ethernet control (e.g. EMADs, LACP) 103 * 1 - Ethernet data 104 */ 105 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 106 107 /* tx_hdr_proto 108 * Packet protocol type. Must be set to 1 (Ethernet). 109 */ 110 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 111 112 /* tx_hdr_rx_is_router 113 * Packet is sent from the router. Valid for data packets only. 114 */ 115 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 116 117 /* tx_hdr_fid_valid 118 * Indicates if the 'fid' field is valid and should be used for 119 * forwarding lookup. Valid for data packets only. 120 */ 121 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 122 123 /* tx_hdr_swid 124 * Switch partition ID. Must be set to 0. 125 */ 126 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 127 128 /* tx_hdr_control_tclass 129 * Indicates if the packet should use the control TClass and not one 130 * of the data TClasses. 131 */ 132 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 133 134 /* tx_hdr_etclass 135 * Egress TClass to be used on the egress device on the egress port. 136 */ 137 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 138 139 /* tx_hdr_port_mid 140 * Destination local port for unicast packets. 141 * Destination multicast ID for multicast packets. 142 * 143 * Control packets are directed to a specific egress port, while data 144 * packets are transmitted through the CPU port (0) into the switch partition, 145 * where forwarding rules are applied. 146 */ 147 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 148 149 /* tx_hdr_fid 150 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 151 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 152 * Valid for data packets only. 153 */ 154 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 155 156 /* tx_hdr_type 157 * 0 - Data packets 158 * 6 - Control packets 159 */ 160 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 161 162 struct mlxsw_sp_mlxfw_dev { 163 struct mlxfw_dev mlxfw_dev; 164 struct mlxsw_sp *mlxsw_sp; 165 }; 166 167 struct mlxsw_sp_ptp_ops { 168 struct mlxsw_sp_ptp_clock * 169 (*clock_init)(struct mlxsw_sp *mlxsw_sp, struct device *dev); 170 void (*clock_fini)(struct mlxsw_sp_ptp_clock *clock); 171 172 struct mlxsw_sp_ptp_state *(*init)(struct mlxsw_sp *mlxsw_sp); 173 void (*fini)(struct mlxsw_sp_ptp_state *ptp_state); 174 175 /* Notify a driver that a packet that might be PTP was received. Driver 176 * is responsible for freeing the passed-in SKB. 177 */ 178 void (*receive)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 179 u8 local_port); 180 181 /* Notify a driver that a timestamped packet was transmitted. Driver 182 * is responsible for freeing the passed-in SKB. 183 */ 184 void (*transmitted)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 185 u8 local_port); 186 187 int (*hwtstamp_get)(struct mlxsw_sp_port *mlxsw_sp_port, 188 struct hwtstamp_config *config); 189 int (*hwtstamp_set)(struct mlxsw_sp_port *mlxsw_sp_port, 190 struct hwtstamp_config *config); 191 void (*shaper_work)(struct work_struct *work); 192 int (*get_ts_info)(struct mlxsw_sp *mlxsw_sp, 193 struct ethtool_ts_info *info); 194 int (*get_stats_count)(void); 195 void (*get_stats_strings)(u8 **p); 196 void (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port, 197 u64 *data, int data_index); 198 }; 199 200 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev, 201 u16 component_index, u32 *p_max_size, 202 u8 *p_align_bits, u16 *p_max_write_size) 203 { 204 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 205 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 206 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 207 char mcqi_pl[MLXSW_REG_MCQI_LEN]; 208 int err; 209 210 mlxsw_reg_mcqi_pack(mcqi_pl, component_index); 211 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl); 212 if (err) 213 return err; 214 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, 215 p_max_write_size); 216 217 *p_align_bits = max_t(u8, *p_align_bits, 2); 218 *p_max_write_size = min_t(u16, *p_max_write_size, 219 MLXSW_REG_MCDA_MAX_DATA_LEN); 220 return 0; 221 } 222 223 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle) 224 { 225 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 226 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 227 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 228 char mcc_pl[MLXSW_REG_MCC_LEN]; 229 u8 control_state; 230 int err; 231 232 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0); 233 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 234 if (err) 235 return err; 236 237 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state); 238 if (control_state != MLXFW_FSM_STATE_IDLE) 239 return -EBUSY; 240 241 mlxsw_reg_mcc_pack(mcc_pl, 242 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 243 0, *fwhandle, 0); 244 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 245 } 246 247 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev, 248 u32 fwhandle, u16 component_index, 249 u32 component_size) 250 { 251 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 252 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 253 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 254 char mcc_pl[MLXSW_REG_MCC_LEN]; 255 256 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT, 257 component_index, fwhandle, component_size); 258 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 259 } 260 261 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev, 262 u32 fwhandle, u8 *data, u16 size, 263 u32 offset) 264 { 265 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 266 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 267 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 268 char mcda_pl[MLXSW_REG_MCDA_LEN]; 269 270 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data); 271 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl); 272 } 273 274 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, 275 u32 fwhandle, u16 component_index) 276 { 277 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 278 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 279 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 280 char mcc_pl[MLXSW_REG_MCC_LEN]; 281 282 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT, 283 component_index, fwhandle, 0); 284 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 285 } 286 287 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 288 { 289 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 290 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 291 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 292 char mcc_pl[MLXSW_REG_MCC_LEN]; 293 294 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, 295 fwhandle, 0); 296 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 297 } 298 299 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 300 enum mlxfw_fsm_state *fsm_state, 301 enum mlxfw_fsm_state_err *fsm_state_err) 302 { 303 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 304 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 305 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 306 char mcc_pl[MLXSW_REG_MCC_LEN]; 307 u8 control_state; 308 u8 error_code; 309 int err; 310 311 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0); 312 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 313 if (err) 314 return err; 315 316 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state); 317 *fsm_state = control_state; 318 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, 319 MLXFW_FSM_STATE_ERR_MAX); 320 return 0; 321 } 322 323 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 324 { 325 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 326 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 327 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 328 char mcc_pl[MLXSW_REG_MCC_LEN]; 329 330 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, 331 fwhandle, 0); 332 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 333 } 334 335 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 336 { 337 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 338 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 339 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 340 char mcc_pl[MLXSW_REG_MCC_LEN]; 341 342 mlxsw_reg_mcc_pack(mcc_pl, 343 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, 344 fwhandle, 0); 345 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 346 } 347 348 static void mlxsw_sp_status_notify(struct mlxfw_dev *mlxfw_dev, 349 const char *msg, const char *comp_name, 350 u32 done_bytes, u32 total_bytes) 351 { 352 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 353 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 354 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 355 356 devlink_flash_update_status_notify(priv_to_devlink(mlxsw_sp->core), 357 msg, comp_name, 358 done_bytes, total_bytes); 359 } 360 361 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = { 362 .component_query = mlxsw_sp_component_query, 363 .fsm_lock = mlxsw_sp_fsm_lock, 364 .fsm_component_update = mlxsw_sp_fsm_component_update, 365 .fsm_block_download = mlxsw_sp_fsm_block_download, 366 .fsm_component_verify = mlxsw_sp_fsm_component_verify, 367 .fsm_activate = mlxsw_sp_fsm_activate, 368 .fsm_query_state = mlxsw_sp_fsm_query_state, 369 .fsm_cancel = mlxsw_sp_fsm_cancel, 370 .fsm_release = mlxsw_sp_fsm_release, 371 .status_notify = mlxsw_sp_status_notify, 372 }; 373 374 static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp, 375 const struct firmware *firmware, 376 struct netlink_ext_ack *extack) 377 { 378 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = { 379 .mlxfw_dev = { 380 .ops = &mlxsw_sp_mlxfw_dev_ops, 381 .psid = mlxsw_sp->bus_info->psid, 382 .psid_size = strlen(mlxsw_sp->bus_info->psid), 383 }, 384 .mlxsw_sp = mlxsw_sp 385 }; 386 int err; 387 388 mlxsw_core_fw_flash_start(mlxsw_sp->core); 389 devlink_flash_update_begin_notify(priv_to_devlink(mlxsw_sp->core)); 390 err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, 391 firmware, extack); 392 devlink_flash_update_end_notify(priv_to_devlink(mlxsw_sp->core)); 393 mlxsw_core_fw_flash_end(mlxsw_sp->core); 394 395 return err; 396 } 397 398 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) 399 { 400 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev; 401 const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev; 402 const char *fw_filename = mlxsw_sp->fw_filename; 403 union devlink_param_value value; 404 const struct firmware *firmware; 405 int err; 406 407 /* Don't check if driver does not require it */ 408 if (!req_rev || !fw_filename) 409 return 0; 410 411 /* Don't check if devlink 'fw_load_policy' param is 'flash' */ 412 err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_sp->core), 413 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 414 &value); 415 if (err) 416 return err; 417 if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) 418 return 0; 419 420 /* Validate driver & FW are compatible */ 421 if (rev->major != req_rev->major) { 422 WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n", 423 rev->major, req_rev->major); 424 return -EINVAL; 425 } 426 if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) == 427 MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor) && 428 mlxsw_core_fw_rev_minor_subminor_validate(rev, req_rev)) 429 return 0; 430 431 dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n", 432 rev->major, rev->minor, rev->subminor); 433 dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n", 434 fw_filename); 435 436 err = request_firmware_direct(&firmware, fw_filename, 437 mlxsw_sp->bus_info->dev); 438 if (err) { 439 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n", 440 fw_filename); 441 return err; 442 } 443 444 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, NULL); 445 release_firmware(firmware); 446 if (err) 447 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n"); 448 449 /* On FW flash success, tell the caller FW reset is needed 450 * if current FW supports it. 451 */ 452 if (rev->minor >= req_rev->can_reset_minor) 453 return err ? err : -EAGAIN; 454 else 455 return 0; 456 } 457 458 static int mlxsw_sp_flash_update(struct mlxsw_core *mlxsw_core, 459 const char *file_name, const char *component, 460 struct netlink_ext_ack *extack) 461 { 462 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 463 const struct firmware *firmware; 464 int err; 465 466 if (component) 467 return -EOPNOTSUPP; 468 469 err = request_firmware_direct(&firmware, file_name, 470 mlxsw_sp->bus_info->dev); 471 if (err) 472 return err; 473 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, extack); 474 release_firmware(firmware); 475 476 return err; 477 } 478 479 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 480 unsigned int counter_index, u64 *packets, 481 u64 *bytes) 482 { 483 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 484 int err; 485 486 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 487 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 488 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 489 if (err) 490 return err; 491 if (packets) 492 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 493 if (bytes) 494 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 495 return 0; 496 } 497 498 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 499 unsigned int counter_index) 500 { 501 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 502 503 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 504 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 505 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 506 } 507 508 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 509 unsigned int *p_counter_index) 510 { 511 int err; 512 513 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 514 p_counter_index); 515 if (err) 516 return err; 517 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 518 if (err) 519 goto err_counter_clear; 520 return 0; 521 522 err_counter_clear: 523 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 524 *p_counter_index); 525 return err; 526 } 527 528 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 529 unsigned int counter_index) 530 { 531 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 532 counter_index); 533 } 534 535 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 536 const struct mlxsw_tx_info *tx_info) 537 { 538 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 539 540 memset(txhdr, 0, MLXSW_TXHDR_LEN); 541 542 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 543 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 544 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 545 mlxsw_tx_hdr_swid_set(txhdr, 0); 546 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 547 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 548 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 549 } 550 551 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) 552 { 553 switch (state) { 554 case BR_STATE_FORWARDING: 555 return MLXSW_REG_SPMS_STATE_FORWARDING; 556 case BR_STATE_LEARNING: 557 return MLXSW_REG_SPMS_STATE_LEARNING; 558 case BR_STATE_LISTENING: /* fall-through */ 559 case BR_STATE_DISABLED: /* fall-through */ 560 case BR_STATE_BLOCKING: 561 return MLXSW_REG_SPMS_STATE_DISCARDING; 562 default: 563 BUG(); 564 } 565 } 566 567 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 568 u8 state) 569 { 570 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state); 571 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 572 char *spms_pl; 573 int err; 574 575 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 576 if (!spms_pl) 577 return -ENOMEM; 578 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 579 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 580 581 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 582 kfree(spms_pl); 583 return err; 584 } 585 586 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 587 { 588 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 589 int err; 590 591 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 592 if (err) 593 return err; 594 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 595 return 0; 596 } 597 598 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port, 599 bool enable, u32 rate) 600 { 601 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 602 char mpsc_pl[MLXSW_REG_MPSC_LEN]; 603 604 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate); 605 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl); 606 } 607 608 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 609 bool is_up) 610 { 611 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 612 char paos_pl[MLXSW_REG_PAOS_LEN]; 613 614 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 615 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 616 MLXSW_PORT_ADMIN_STATUS_DOWN); 617 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 618 } 619 620 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 621 unsigned char *addr) 622 { 623 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 624 char ppad_pl[MLXSW_REG_PPAD_LEN]; 625 626 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 627 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 628 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 629 } 630 631 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 632 { 633 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 634 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 635 636 ether_addr_copy(addr, mlxsw_sp->base_mac); 637 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 638 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 639 } 640 641 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 642 { 643 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 644 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 645 int max_mtu; 646 int err; 647 648 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 649 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 650 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 651 if (err) 652 return err; 653 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 654 655 if (mtu > max_mtu) 656 return -EINVAL; 657 658 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 659 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 660 } 661 662 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 663 { 664 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 665 char pspa_pl[MLXSW_REG_PSPA_LEN]; 666 667 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 668 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 669 } 670 671 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 672 { 673 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 674 char svpe_pl[MLXSW_REG_SVPE_LEN]; 675 676 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 677 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 678 } 679 680 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 681 bool learn_enable) 682 { 683 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 684 char *spvmlr_pl; 685 int err; 686 687 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 688 if (!spvmlr_pl) 689 return -ENOMEM; 690 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 691 learn_enable); 692 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 693 kfree(spvmlr_pl); 694 return err; 695 } 696 697 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 698 u16 vid) 699 { 700 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 701 char spvid_pl[MLXSW_REG_SPVID_LEN]; 702 703 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid); 704 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 705 } 706 707 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 708 bool allow) 709 { 710 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 711 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 712 713 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 714 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 715 } 716 717 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 718 { 719 int err; 720 721 if (!vid) { 722 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 723 if (err) 724 return err; 725 } else { 726 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); 727 if (err) 728 return err; 729 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 730 if (err) 731 goto err_port_allow_untagged_set; 732 } 733 734 mlxsw_sp_port->pvid = vid; 735 return 0; 736 737 err_port_allow_untagged_set: 738 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid); 739 return err; 740 } 741 742 static int 743 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 744 { 745 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 746 char sspr_pl[MLXSW_REG_SSPR_LEN]; 747 748 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 749 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 750 } 751 752 static int 753 mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port, 754 struct mlxsw_sp_port_mapping *port_mapping) 755 { 756 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 757 bool separate_rxtx; 758 u8 module; 759 u8 width; 760 int err; 761 int i; 762 763 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 764 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 765 if (err) 766 return err; 767 module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 768 width = mlxsw_reg_pmlp_width_get(pmlp_pl); 769 separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl); 770 771 if (width && !is_power_of_2(width)) { 772 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n", 773 local_port); 774 return -EINVAL; 775 } 776 777 for (i = 0; i < width; i++) { 778 if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) { 779 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n", 780 local_port); 781 return -EINVAL; 782 } 783 if (separate_rxtx && 784 mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != 785 mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) { 786 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n", 787 local_port); 788 return -EINVAL; 789 } 790 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i) { 791 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n", 792 local_port); 793 return -EINVAL; 794 } 795 } 796 797 port_mapping->module = module; 798 port_mapping->width = width; 799 port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 800 return 0; 801 } 802 803 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port) 804 { 805 struct mlxsw_sp_port_mapping *port_mapping = &mlxsw_sp_port->mapping; 806 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 807 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 808 int i; 809 810 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 811 mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width); 812 for (i = 0; i < port_mapping->width; i++) { 813 mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module); 814 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */ 815 } 816 817 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 818 } 819 820 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port) 821 { 822 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 823 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 824 825 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 826 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 827 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 828 } 829 830 static int mlxsw_sp_port_open(struct net_device *dev) 831 { 832 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 833 int err; 834 835 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 836 if (err) 837 return err; 838 netif_start_queue(dev); 839 return 0; 840 } 841 842 static int mlxsw_sp_port_stop(struct net_device *dev) 843 { 844 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 845 846 netif_stop_queue(dev); 847 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 848 } 849 850 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 851 struct net_device *dev) 852 { 853 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 854 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 855 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 856 const struct mlxsw_tx_info tx_info = { 857 .local_port = mlxsw_sp_port->local_port, 858 .is_emad = false, 859 }; 860 u64 len; 861 int err; 862 863 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); 864 865 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 866 return NETDEV_TX_BUSY; 867 868 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { 869 struct sk_buff *skb_orig = skb; 870 871 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); 872 if (!skb) { 873 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 874 dev_kfree_skb_any(skb_orig); 875 return NETDEV_TX_OK; 876 } 877 dev_consume_skb_any(skb_orig); 878 } 879 880 if (eth_skb_pad(skb)) { 881 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 882 return NETDEV_TX_OK; 883 } 884 885 mlxsw_sp_txhdr_construct(skb, &tx_info); 886 /* TX header is consumed by HW on the way so we shouldn't count its 887 * bytes as being sent. 888 */ 889 len = skb->len - MLXSW_TXHDR_LEN; 890 891 /* Due to a race we might fail here because of a full queue. In that 892 * unlikely case we simply drop the packet. 893 */ 894 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 895 896 if (!err) { 897 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 898 u64_stats_update_begin(&pcpu_stats->syncp); 899 pcpu_stats->tx_packets++; 900 pcpu_stats->tx_bytes += len; 901 u64_stats_update_end(&pcpu_stats->syncp); 902 } else { 903 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 904 dev_kfree_skb_any(skb); 905 } 906 return NETDEV_TX_OK; 907 } 908 909 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 910 { 911 } 912 913 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 914 { 915 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 916 struct sockaddr *addr = p; 917 int err; 918 919 if (!is_valid_ether_addr(addr->sa_data)) 920 return -EADDRNOTAVAIL; 921 922 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 923 if (err) 924 return err; 925 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 926 return 0; 927 } 928 929 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, 930 int mtu) 931 { 932 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu); 933 } 934 935 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ 936 937 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 938 u16 delay) 939 { 940 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay, 941 BITS_PER_BYTE)); 942 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp, 943 mtu); 944 } 945 946 /* Maximum delay buffer needed in case of PAUSE frames, in bytes. 947 * Assumes 100m cable and maximum MTU. 948 */ 949 #define MLXSW_SP_PAUSE_DELAY 58752 950 951 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 952 u16 delay, bool pfc, bool pause) 953 { 954 if (pfc) 955 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay); 956 else if (pause) 957 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY); 958 else 959 return 0; 960 } 961 962 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, 963 bool lossy) 964 { 965 if (lossy) 966 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size); 967 else 968 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size, 969 thres); 970 } 971 972 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, 973 u8 *prio_tc, bool pause_en, 974 struct ieee_pfc *my_pfc) 975 { 976 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 977 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; 978 u16 delay = !!my_pfc ? my_pfc->delay : 0; 979 char pbmc_pl[MLXSW_REG_PBMC_LEN]; 980 u32 taken_headroom_cells = 0; 981 u32 max_headroom_cells; 982 int i, j, err; 983 984 max_headroom_cells = mlxsw_sp_sb_max_headroom_cells(mlxsw_sp); 985 986 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); 987 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 988 if (err) 989 return err; 990 991 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 992 bool configure = false; 993 bool pfc = false; 994 u16 thres_cells; 995 u16 delay_cells; 996 u16 total_cells; 997 bool lossy; 998 999 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { 1000 if (prio_tc[j] == i) { 1001 pfc = pfc_en & BIT(j); 1002 configure = true; 1003 break; 1004 } 1005 } 1006 1007 if (!configure) 1008 continue; 1009 1010 lossy = !(pfc || pause_en); 1011 thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); 1012 delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, 1013 pfc, pause_en); 1014 total_cells = thres_cells + delay_cells; 1015 1016 taken_headroom_cells += total_cells; 1017 if (taken_headroom_cells > max_headroom_cells) 1018 return -ENOBUFS; 1019 1020 mlxsw_sp_pg_buf_pack(pbmc_pl, i, total_cells, 1021 thres_cells, lossy); 1022 } 1023 1024 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 1025 } 1026 1027 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, 1028 int mtu, bool pause_en) 1029 { 1030 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; 1031 bool dcb_en = !!mlxsw_sp_port->dcb.ets; 1032 struct ieee_pfc *my_pfc; 1033 u8 *prio_tc; 1034 1035 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; 1036 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; 1037 1038 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, 1039 pause_en, my_pfc); 1040 } 1041 1042 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 1043 { 1044 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1045 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 1046 int err; 1047 1048 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); 1049 if (err) 1050 return err; 1051 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu); 1052 if (err) 1053 goto err_span_port_mtu_update; 1054 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 1055 if (err) 1056 goto err_port_mtu_set; 1057 dev->mtu = mtu; 1058 return 0; 1059 1060 err_port_mtu_set: 1061 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu); 1062 err_span_port_mtu_update: 1063 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1064 return err; 1065 } 1066 1067 static int 1068 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 1069 struct rtnl_link_stats64 *stats) 1070 { 1071 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1072 struct mlxsw_sp_port_pcpu_stats *p; 1073 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1074 u32 tx_dropped = 0; 1075 unsigned int start; 1076 int i; 1077 1078 for_each_possible_cpu(i) { 1079 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 1080 do { 1081 start = u64_stats_fetch_begin_irq(&p->syncp); 1082 rx_packets = p->rx_packets; 1083 rx_bytes = p->rx_bytes; 1084 tx_packets = p->tx_packets; 1085 tx_bytes = p->tx_bytes; 1086 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 1087 1088 stats->rx_packets += rx_packets; 1089 stats->rx_bytes += rx_bytes; 1090 stats->tx_packets += tx_packets; 1091 stats->tx_bytes += tx_bytes; 1092 /* tx_dropped is u32, updated without syncp protection. */ 1093 tx_dropped += p->tx_dropped; 1094 } 1095 stats->tx_dropped = tx_dropped; 1096 return 0; 1097 } 1098 1099 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 1100 { 1101 switch (attr_id) { 1102 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1103 return true; 1104 } 1105 1106 return false; 1107 } 1108 1109 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 1110 void *sp) 1111 { 1112 switch (attr_id) { 1113 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1114 return mlxsw_sp_port_get_sw_stats64(dev, sp); 1115 } 1116 1117 return -EINVAL; 1118 } 1119 1120 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 1121 int prio, char *ppcnt_pl) 1122 { 1123 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1124 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1125 1126 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 1127 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 1128 } 1129 1130 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 1131 struct rtnl_link_stats64 *stats) 1132 { 1133 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1134 int err; 1135 1136 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 1137 0, ppcnt_pl); 1138 if (err) 1139 goto out; 1140 1141 stats->tx_packets = 1142 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 1143 stats->rx_packets = 1144 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 1145 stats->tx_bytes = 1146 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 1147 stats->rx_bytes = 1148 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 1149 stats->multicast = 1150 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 1151 1152 stats->rx_crc_errors = 1153 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 1154 stats->rx_frame_errors = 1155 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 1156 1157 stats->rx_length_errors = ( 1158 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 1159 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 1160 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 1161 1162 stats->rx_errors = (stats->rx_crc_errors + 1163 stats->rx_frame_errors + stats->rx_length_errors); 1164 1165 out: 1166 return err; 1167 } 1168 1169 static void 1170 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 1171 struct mlxsw_sp_port_xstats *xstats) 1172 { 1173 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1174 int err, i; 1175 1176 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 1177 ppcnt_pl); 1178 if (!err) 1179 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 1180 1181 for (i = 0; i < TC_MAX_QUEUE; i++) { 1182 err = mlxsw_sp_port_get_stats_raw(dev, 1183 MLXSW_REG_PPCNT_TC_CONG_TC, 1184 i, ppcnt_pl); 1185 if (!err) 1186 xstats->wred_drop[i] = 1187 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 1188 1189 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 1190 i, ppcnt_pl); 1191 if (err) 1192 continue; 1193 1194 xstats->backlog[i] = 1195 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 1196 xstats->tail_drop[i] = 1197 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 1198 } 1199 1200 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1201 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, 1202 i, ppcnt_pl); 1203 if (err) 1204 continue; 1205 1206 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); 1207 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); 1208 } 1209 } 1210 1211 static void update_stats_cache(struct work_struct *work) 1212 { 1213 struct mlxsw_sp_port *mlxsw_sp_port = 1214 container_of(work, struct mlxsw_sp_port, 1215 periodic_hw_stats.update_dw.work); 1216 1217 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 1218 goto out; 1219 1220 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 1221 &mlxsw_sp_port->periodic_hw_stats.stats); 1222 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 1223 &mlxsw_sp_port->periodic_hw_stats.xstats); 1224 1225 out: 1226 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1227 MLXSW_HW_STATS_UPDATE_TIME); 1228 } 1229 1230 /* Return the stats from a cache that is updated periodically, 1231 * as this function might get called in an atomic context. 1232 */ 1233 static void 1234 mlxsw_sp_port_get_stats64(struct net_device *dev, 1235 struct rtnl_link_stats64 *stats) 1236 { 1237 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1238 1239 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 1240 } 1241 1242 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 1243 u16 vid_begin, u16 vid_end, 1244 bool is_member, bool untagged) 1245 { 1246 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1247 char *spvm_pl; 1248 int err; 1249 1250 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 1251 if (!spvm_pl) 1252 return -ENOMEM; 1253 1254 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 1255 vid_end, is_member, untagged); 1256 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 1257 kfree(spvm_pl); 1258 return err; 1259 } 1260 1261 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 1262 u16 vid_end, bool is_member, bool untagged) 1263 { 1264 u16 vid, vid_e; 1265 int err; 1266 1267 for (vid = vid_begin; vid <= vid_end; 1268 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 1269 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 1270 vid_end); 1271 1272 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 1273 is_member, untagged); 1274 if (err) 1275 return err; 1276 } 1277 1278 return 0; 1279 } 1280 1281 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port, 1282 bool flush_default) 1283 { 1284 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 1285 1286 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 1287 &mlxsw_sp_port->vlans_list, list) { 1288 if (!flush_default && 1289 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID) 1290 continue; 1291 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1292 } 1293 } 1294 1295 static void 1296 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1297 { 1298 if (mlxsw_sp_port_vlan->bridge_port) 1299 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1300 else if (mlxsw_sp_port_vlan->fid) 1301 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 1302 } 1303 1304 struct mlxsw_sp_port_vlan * 1305 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1306 { 1307 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1308 bool untagged = vid == MLXSW_SP_DEFAULT_VID; 1309 int err; 1310 1311 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1312 if (mlxsw_sp_port_vlan) 1313 return ERR_PTR(-EEXIST); 1314 1315 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 1316 if (err) 1317 return ERR_PTR(err); 1318 1319 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 1320 if (!mlxsw_sp_port_vlan) { 1321 err = -ENOMEM; 1322 goto err_port_vlan_alloc; 1323 } 1324 1325 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1326 mlxsw_sp_port_vlan->vid = vid; 1327 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1328 1329 return mlxsw_sp_port_vlan; 1330 1331 err_port_vlan_alloc: 1332 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1333 return ERR_PTR(err); 1334 } 1335 1336 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1337 { 1338 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1339 u16 vid = mlxsw_sp_port_vlan->vid; 1340 1341 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan); 1342 list_del(&mlxsw_sp_port_vlan->list); 1343 kfree(mlxsw_sp_port_vlan); 1344 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1345 } 1346 1347 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1348 __be16 __always_unused proto, u16 vid) 1349 { 1350 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1351 1352 /* VLAN 0 is added to HW filter when device goes up, but it is 1353 * reserved in our case, so simply return. 1354 */ 1355 if (!vid) 1356 return 0; 1357 1358 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid)); 1359 } 1360 1361 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1362 __be16 __always_unused proto, u16 vid) 1363 { 1364 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1365 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1366 1367 /* VLAN 0 is removed from HW filter when device goes down, but 1368 * it is reserved in our case, so simply return. 1369 */ 1370 if (!vid) 1371 return 0; 1372 1373 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1374 if (!mlxsw_sp_port_vlan) 1375 return 0; 1376 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1377 1378 return 0; 1379 } 1380 1381 static struct mlxsw_sp_port_mall_tc_entry * 1382 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port, 1383 unsigned long cookie) { 1384 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1385 1386 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list) 1387 if (mall_tc_entry->cookie == cookie) 1388 return mall_tc_entry; 1389 1390 return NULL; 1391 } 1392 1393 static int 1394 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1395 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror, 1396 const struct flow_action_entry *act, 1397 bool ingress) 1398 { 1399 enum mlxsw_sp_span_type span_type; 1400 1401 if (!act->dev) { 1402 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); 1403 return -EINVAL; 1404 } 1405 1406 mirror->ingress = ingress; 1407 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1408 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, act->dev, span_type, 1409 true, &mirror->span_id); 1410 } 1411 1412 static void 1413 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1414 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror) 1415 { 1416 enum mlxsw_sp_span_type span_type; 1417 1418 span_type = mirror->ingress ? 1419 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1420 mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->span_id, 1421 span_type, true); 1422 } 1423 1424 static int 1425 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port, 1426 struct tc_cls_matchall_offload *cls, 1427 const struct flow_action_entry *act, 1428 bool ingress) 1429 { 1430 int err; 1431 1432 if (!mlxsw_sp_port->sample) 1433 return -EOPNOTSUPP; 1434 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) { 1435 netdev_err(mlxsw_sp_port->dev, "sample already active\n"); 1436 return -EEXIST; 1437 } 1438 if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) { 1439 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n"); 1440 return -EOPNOTSUPP; 1441 } 1442 1443 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group, 1444 act->sample.psample_group); 1445 mlxsw_sp_port->sample->truncate = act->sample.truncate; 1446 mlxsw_sp_port->sample->trunc_size = act->sample.trunc_size; 1447 mlxsw_sp_port->sample->rate = act->sample.rate; 1448 1449 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, act->sample.rate); 1450 if (err) 1451 goto err_port_sample_set; 1452 return 0; 1453 1454 err_port_sample_set: 1455 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1456 return err; 1457 } 1458 1459 static void 1460 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port) 1461 { 1462 if (!mlxsw_sp_port->sample) 1463 return; 1464 1465 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1); 1466 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1467 } 1468 1469 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1470 struct tc_cls_matchall_offload *f, 1471 bool ingress) 1472 { 1473 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1474 __be16 protocol = f->common.protocol; 1475 struct flow_action_entry *act; 1476 int err; 1477 1478 if (!flow_offload_has_one_action(&f->rule->action)) { 1479 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); 1480 return -EOPNOTSUPP; 1481 } 1482 1483 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1484 if (!mall_tc_entry) 1485 return -ENOMEM; 1486 mall_tc_entry->cookie = f->cookie; 1487 1488 act = &f->rule->action.entries[0]; 1489 1490 if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) { 1491 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; 1492 1493 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; 1494 mirror = &mall_tc_entry->mirror; 1495 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, 1496 mirror, act, 1497 ingress); 1498 } else if (act->id == FLOW_ACTION_SAMPLE && 1499 protocol == htons(ETH_P_ALL)) { 1500 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE; 1501 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f, 1502 act, ingress); 1503 } else { 1504 err = -EOPNOTSUPP; 1505 } 1506 1507 if (err) 1508 goto err_add_action; 1509 1510 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); 1511 return 0; 1512 1513 err_add_action: 1514 kfree(mall_tc_entry); 1515 return err; 1516 } 1517 1518 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1519 struct tc_cls_matchall_offload *f) 1520 { 1521 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1522 1523 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port, 1524 f->cookie); 1525 if (!mall_tc_entry) { 1526 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); 1527 return; 1528 } 1529 list_del(&mall_tc_entry->list); 1530 1531 switch (mall_tc_entry->type) { 1532 case MLXSW_SP_PORT_MALL_MIRROR: 1533 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port, 1534 &mall_tc_entry->mirror); 1535 break; 1536 case MLXSW_SP_PORT_MALL_SAMPLE: 1537 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port); 1538 break; 1539 default: 1540 WARN_ON(1); 1541 } 1542 1543 kfree(mall_tc_entry); 1544 } 1545 1546 static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1547 struct tc_cls_matchall_offload *f, 1548 bool ingress) 1549 { 1550 switch (f->command) { 1551 case TC_CLSMATCHALL_REPLACE: 1552 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f, 1553 ingress); 1554 case TC_CLSMATCHALL_DESTROY: 1555 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f); 1556 return 0; 1557 default: 1558 return -EOPNOTSUPP; 1559 } 1560 } 1561 1562 static int 1563 mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block, 1564 struct flow_cls_offload *f) 1565 { 1566 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block); 1567 1568 switch (f->command) { 1569 case FLOW_CLS_REPLACE: 1570 return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f); 1571 case FLOW_CLS_DESTROY: 1572 mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f); 1573 return 0; 1574 case FLOW_CLS_STATS: 1575 return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f); 1576 case FLOW_CLS_TMPLT_CREATE: 1577 return mlxsw_sp_flower_tmplt_create(mlxsw_sp, acl_block, f); 1578 case FLOW_CLS_TMPLT_DESTROY: 1579 mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, acl_block, f); 1580 return 0; 1581 default: 1582 return -EOPNOTSUPP; 1583 } 1584 } 1585 1586 static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type, 1587 void *type_data, 1588 void *cb_priv, bool ingress) 1589 { 1590 struct mlxsw_sp_port *mlxsw_sp_port = cb_priv; 1591 1592 switch (type) { 1593 case TC_SETUP_CLSMATCHALL: 1594 if (!tc_cls_can_offload_and_chain0(mlxsw_sp_port->dev, 1595 type_data)) 1596 return -EOPNOTSUPP; 1597 1598 return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data, 1599 ingress); 1600 case TC_SETUP_CLSFLOWER: 1601 return 0; 1602 default: 1603 return -EOPNOTSUPP; 1604 } 1605 } 1606 1607 static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type, 1608 void *type_data, 1609 void *cb_priv) 1610 { 1611 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1612 cb_priv, true); 1613 } 1614 1615 static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type, 1616 void *type_data, 1617 void *cb_priv) 1618 { 1619 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1620 cb_priv, false); 1621 } 1622 1623 static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type, 1624 void *type_data, void *cb_priv) 1625 { 1626 struct mlxsw_sp_acl_block *acl_block = cb_priv; 1627 1628 switch (type) { 1629 case TC_SETUP_CLSMATCHALL: 1630 return 0; 1631 case TC_SETUP_CLSFLOWER: 1632 if (mlxsw_sp_acl_block_disabled(acl_block)) 1633 return -EOPNOTSUPP; 1634 1635 return mlxsw_sp_setup_tc_cls_flower(acl_block, type_data); 1636 default: 1637 return -EOPNOTSUPP; 1638 } 1639 } 1640 1641 static void mlxsw_sp_tc_block_flower_release(void *cb_priv) 1642 { 1643 struct mlxsw_sp_acl_block *acl_block = cb_priv; 1644 1645 mlxsw_sp_acl_block_destroy(acl_block); 1646 } 1647 1648 static LIST_HEAD(mlxsw_sp_block_cb_list); 1649 1650 static int 1651 mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port, 1652 struct flow_block_offload *f, bool ingress) 1653 { 1654 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1655 struct mlxsw_sp_acl_block *acl_block; 1656 struct flow_block_cb *block_cb; 1657 bool register_block = false; 1658 int err; 1659 1660 block_cb = flow_block_cb_lookup(f->block, 1661 mlxsw_sp_setup_tc_block_cb_flower, 1662 mlxsw_sp); 1663 if (!block_cb) { 1664 acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, f->net); 1665 if (!acl_block) 1666 return -ENOMEM; 1667 block_cb = flow_block_cb_alloc(mlxsw_sp_setup_tc_block_cb_flower, 1668 mlxsw_sp, acl_block, 1669 mlxsw_sp_tc_block_flower_release); 1670 if (IS_ERR(block_cb)) { 1671 mlxsw_sp_acl_block_destroy(acl_block); 1672 err = PTR_ERR(block_cb); 1673 goto err_cb_register; 1674 } 1675 register_block = true; 1676 } else { 1677 acl_block = flow_block_cb_priv(block_cb); 1678 } 1679 flow_block_cb_incref(block_cb); 1680 err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block, 1681 mlxsw_sp_port, ingress, f->extack); 1682 if (err) 1683 goto err_block_bind; 1684 1685 if (ingress) 1686 mlxsw_sp_port->ing_acl_block = acl_block; 1687 else 1688 mlxsw_sp_port->eg_acl_block = acl_block; 1689 1690 if (register_block) { 1691 flow_block_cb_add(block_cb, f); 1692 list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list); 1693 } 1694 1695 return 0; 1696 1697 err_block_bind: 1698 if (!flow_block_cb_decref(block_cb)) 1699 flow_block_cb_free(block_cb); 1700 err_cb_register: 1701 return err; 1702 } 1703 1704 static void 1705 mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port, 1706 struct flow_block_offload *f, bool ingress) 1707 { 1708 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1709 struct mlxsw_sp_acl_block *acl_block; 1710 struct flow_block_cb *block_cb; 1711 int err; 1712 1713 block_cb = flow_block_cb_lookup(f->block, 1714 mlxsw_sp_setup_tc_block_cb_flower, 1715 mlxsw_sp); 1716 if (!block_cb) 1717 return; 1718 1719 if (ingress) 1720 mlxsw_sp_port->ing_acl_block = NULL; 1721 else 1722 mlxsw_sp_port->eg_acl_block = NULL; 1723 1724 acl_block = flow_block_cb_priv(block_cb); 1725 err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block, 1726 mlxsw_sp_port, ingress); 1727 if (!err && !flow_block_cb_decref(block_cb)) { 1728 flow_block_cb_remove(block_cb, f); 1729 list_del(&block_cb->driver_list); 1730 } 1731 } 1732 1733 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 1734 struct flow_block_offload *f) 1735 { 1736 struct flow_block_cb *block_cb; 1737 flow_setup_cb_t *cb; 1738 bool ingress; 1739 int err; 1740 1741 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { 1742 cb = mlxsw_sp_setup_tc_block_cb_matchall_ig; 1743 ingress = true; 1744 } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { 1745 cb = mlxsw_sp_setup_tc_block_cb_matchall_eg; 1746 ingress = false; 1747 } else { 1748 return -EOPNOTSUPP; 1749 } 1750 1751 f->driver_block_list = &mlxsw_sp_block_cb_list; 1752 1753 switch (f->command) { 1754 case FLOW_BLOCK_BIND: 1755 if (flow_block_cb_is_busy(cb, mlxsw_sp_port, 1756 &mlxsw_sp_block_cb_list)) 1757 return -EBUSY; 1758 1759 block_cb = flow_block_cb_alloc(cb, mlxsw_sp_port, 1760 mlxsw_sp_port, NULL); 1761 if (IS_ERR(block_cb)) 1762 return PTR_ERR(block_cb); 1763 err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port, f, 1764 ingress); 1765 if (err) { 1766 flow_block_cb_free(block_cb); 1767 return err; 1768 } 1769 flow_block_cb_add(block_cb, f); 1770 list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list); 1771 return 0; 1772 case FLOW_BLOCK_UNBIND: 1773 mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port, 1774 f, ingress); 1775 block_cb = flow_block_cb_lookup(f->block, cb, mlxsw_sp_port); 1776 if (!block_cb) 1777 return -ENOENT; 1778 1779 flow_block_cb_remove(block_cb, f); 1780 list_del(&block_cb->driver_list); 1781 return 0; 1782 default: 1783 return -EOPNOTSUPP; 1784 } 1785 } 1786 1787 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1788 void *type_data) 1789 { 1790 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1791 1792 switch (type) { 1793 case TC_SETUP_BLOCK: 1794 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1795 case TC_SETUP_QDISC_RED: 1796 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1797 case TC_SETUP_QDISC_PRIO: 1798 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); 1799 default: 1800 return -EOPNOTSUPP; 1801 } 1802 } 1803 1804 1805 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) 1806 { 1807 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1808 1809 if (!enable) { 1810 if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->ing_acl_block) || 1811 mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->eg_acl_block) || 1812 !list_empty(&mlxsw_sp_port->mall_tc_list)) { 1813 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 1814 return -EINVAL; 1815 } 1816 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->ing_acl_block); 1817 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->eg_acl_block); 1818 } else { 1819 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->ing_acl_block); 1820 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->eg_acl_block); 1821 } 1822 return 0; 1823 } 1824 1825 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable) 1826 { 1827 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1828 char pplr_pl[MLXSW_REG_PPLR_LEN]; 1829 int err; 1830 1831 if (netif_running(dev)) 1832 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1833 1834 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable); 1835 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr), 1836 pplr_pl); 1837 1838 if (netif_running(dev)) 1839 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1840 1841 return err; 1842 } 1843 1844 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); 1845 1846 static int mlxsw_sp_handle_feature(struct net_device *dev, 1847 netdev_features_t wanted_features, 1848 netdev_features_t feature, 1849 mlxsw_sp_feature_handler feature_handler) 1850 { 1851 netdev_features_t changes = wanted_features ^ dev->features; 1852 bool enable = !!(wanted_features & feature); 1853 int err; 1854 1855 if (!(changes & feature)) 1856 return 0; 1857 1858 err = feature_handler(dev, enable); 1859 if (err) { 1860 netdev_err(dev, "%s feature %pNF failed, err %d\n", 1861 enable ? "Enable" : "Disable", &feature, err); 1862 return err; 1863 } 1864 1865 if (enable) 1866 dev->features |= feature; 1867 else 1868 dev->features &= ~feature; 1869 1870 return 0; 1871 } 1872 static int mlxsw_sp_set_features(struct net_device *dev, 1873 netdev_features_t features) 1874 { 1875 netdev_features_t oper_features = dev->features; 1876 int err = 0; 1877 1878 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, 1879 mlxsw_sp_feature_hw_tc); 1880 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK, 1881 mlxsw_sp_feature_loopback); 1882 1883 if (err) { 1884 dev->features = oper_features; 1885 return -EINVAL; 1886 } 1887 1888 return 0; 1889 } 1890 1891 static struct devlink_port * 1892 mlxsw_sp_port_get_devlink_port(struct net_device *dev) 1893 { 1894 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1895 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1896 1897 return mlxsw_core_port_devlink_port_get(mlxsw_sp->core, 1898 mlxsw_sp_port->local_port); 1899 } 1900 1901 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, 1902 struct ifreq *ifr) 1903 { 1904 struct hwtstamp_config config; 1905 int err; 1906 1907 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 1908 return -EFAULT; 1909 1910 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, 1911 &config); 1912 if (err) 1913 return err; 1914 1915 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1916 return -EFAULT; 1917 1918 return 0; 1919 } 1920 1921 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port, 1922 struct ifreq *ifr) 1923 { 1924 struct hwtstamp_config config; 1925 int err; 1926 1927 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port, 1928 &config); 1929 if (err) 1930 return err; 1931 1932 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1933 return -EFAULT; 1934 1935 return 0; 1936 } 1937 1938 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port) 1939 { 1940 struct hwtstamp_config config = {0}; 1941 1942 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config); 1943 } 1944 1945 static int 1946 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1947 { 1948 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1949 1950 switch (cmd) { 1951 case SIOCSHWTSTAMP: 1952 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr); 1953 case SIOCGHWTSTAMP: 1954 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr); 1955 default: 1956 return -EOPNOTSUPP; 1957 } 1958 } 1959 1960 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1961 .ndo_open = mlxsw_sp_port_open, 1962 .ndo_stop = mlxsw_sp_port_stop, 1963 .ndo_start_xmit = mlxsw_sp_port_xmit, 1964 .ndo_setup_tc = mlxsw_sp_setup_tc, 1965 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1966 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1967 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1968 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1969 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1970 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1971 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1972 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1973 .ndo_set_features = mlxsw_sp_set_features, 1974 .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port, 1975 .ndo_do_ioctl = mlxsw_sp_port_ioctl, 1976 }; 1977 1978 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 1979 struct ethtool_drvinfo *drvinfo) 1980 { 1981 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1982 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1983 1984 strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind, 1985 sizeof(drvinfo->driver)); 1986 strlcpy(drvinfo->version, mlxsw_sp_driver_version, 1987 sizeof(drvinfo->version)); 1988 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 1989 "%d.%d.%d", 1990 mlxsw_sp->bus_info->fw_rev.major, 1991 mlxsw_sp->bus_info->fw_rev.minor, 1992 mlxsw_sp->bus_info->fw_rev.subminor); 1993 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 1994 sizeof(drvinfo->bus_info)); 1995 } 1996 1997 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, 1998 struct ethtool_pauseparam *pause) 1999 { 2000 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2001 2002 pause->rx_pause = mlxsw_sp_port->link.rx_pause; 2003 pause->tx_pause = mlxsw_sp_port->link.tx_pause; 2004 } 2005 2006 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, 2007 struct ethtool_pauseparam *pause) 2008 { 2009 char pfcc_pl[MLXSW_REG_PFCC_LEN]; 2010 2011 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); 2012 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause); 2013 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause); 2014 2015 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), 2016 pfcc_pl); 2017 } 2018 2019 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, 2020 struct ethtool_pauseparam *pause) 2021 { 2022 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2023 bool pause_en = pause->tx_pause || pause->rx_pause; 2024 int err; 2025 2026 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { 2027 netdev_err(dev, "PFC already enabled on port\n"); 2028 return -EINVAL; 2029 } 2030 2031 if (pause->autoneg) { 2032 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); 2033 return -EINVAL; 2034 } 2035 2036 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 2037 if (err) { 2038 netdev_err(dev, "Failed to configure port's headroom\n"); 2039 return err; 2040 } 2041 2042 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause); 2043 if (err) { 2044 netdev_err(dev, "Failed to set PAUSE parameters\n"); 2045 goto err_port_pause_configure; 2046 } 2047 2048 mlxsw_sp_port->link.rx_pause = pause->rx_pause; 2049 mlxsw_sp_port->link.tx_pause = pause->tx_pause; 2050 2051 return 0; 2052 2053 err_port_pause_configure: 2054 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 2055 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 2056 return err; 2057 } 2058 2059 struct mlxsw_sp_port_hw_stats { 2060 char str[ETH_GSTRING_LEN]; 2061 u64 (*getter)(const char *payload); 2062 bool cells_bytes; 2063 }; 2064 2065 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 2066 { 2067 .str = "a_frames_transmitted_ok", 2068 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 2069 }, 2070 { 2071 .str = "a_frames_received_ok", 2072 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 2073 }, 2074 { 2075 .str = "a_frame_check_sequence_errors", 2076 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 2077 }, 2078 { 2079 .str = "a_alignment_errors", 2080 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 2081 }, 2082 { 2083 .str = "a_octets_transmitted_ok", 2084 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 2085 }, 2086 { 2087 .str = "a_octets_received_ok", 2088 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 2089 }, 2090 { 2091 .str = "a_multicast_frames_xmitted_ok", 2092 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 2093 }, 2094 { 2095 .str = "a_broadcast_frames_xmitted_ok", 2096 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 2097 }, 2098 { 2099 .str = "a_multicast_frames_received_ok", 2100 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 2101 }, 2102 { 2103 .str = "a_broadcast_frames_received_ok", 2104 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 2105 }, 2106 { 2107 .str = "a_in_range_length_errors", 2108 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 2109 }, 2110 { 2111 .str = "a_out_of_range_length_field", 2112 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 2113 }, 2114 { 2115 .str = "a_frame_too_long_errors", 2116 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 2117 }, 2118 { 2119 .str = "a_symbol_error_during_carrier", 2120 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 2121 }, 2122 { 2123 .str = "a_mac_control_frames_transmitted", 2124 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 2125 }, 2126 { 2127 .str = "a_mac_control_frames_received", 2128 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 2129 }, 2130 { 2131 .str = "a_unsupported_opcodes_received", 2132 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 2133 }, 2134 { 2135 .str = "a_pause_mac_ctrl_frames_received", 2136 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 2137 }, 2138 { 2139 .str = "a_pause_mac_ctrl_frames_xmitted", 2140 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 2141 }, 2142 }; 2143 2144 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 2145 2146 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2863_stats[] = { 2147 { 2148 .str = "if_in_discards", 2149 .getter = mlxsw_reg_ppcnt_if_in_discards_get, 2150 }, 2151 { 2152 .str = "if_out_discards", 2153 .getter = mlxsw_reg_ppcnt_if_out_discards_get, 2154 }, 2155 { 2156 .str = "if_out_errors", 2157 .getter = mlxsw_reg_ppcnt_if_out_errors_get, 2158 }, 2159 }; 2160 2161 #define MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN \ 2162 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2863_stats) 2163 2164 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = { 2165 { 2166 .str = "ether_stats_undersize_pkts", 2167 .getter = mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get, 2168 }, 2169 { 2170 .str = "ether_stats_oversize_pkts", 2171 .getter = mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get, 2172 }, 2173 { 2174 .str = "ether_stats_fragments", 2175 .getter = mlxsw_reg_ppcnt_ether_stats_fragments_get, 2176 }, 2177 { 2178 .str = "ether_pkts64octets", 2179 .getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get, 2180 }, 2181 { 2182 .str = "ether_pkts65to127octets", 2183 .getter = mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get, 2184 }, 2185 { 2186 .str = "ether_pkts128to255octets", 2187 .getter = mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get, 2188 }, 2189 { 2190 .str = "ether_pkts256to511octets", 2191 .getter = mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get, 2192 }, 2193 { 2194 .str = "ether_pkts512to1023octets", 2195 .getter = mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get, 2196 }, 2197 { 2198 .str = "ether_pkts1024to1518octets", 2199 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get, 2200 }, 2201 { 2202 .str = "ether_pkts1519to2047octets", 2203 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get, 2204 }, 2205 { 2206 .str = "ether_pkts2048to4095octets", 2207 .getter = mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get, 2208 }, 2209 { 2210 .str = "ether_pkts4096to8191octets", 2211 .getter = mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get, 2212 }, 2213 { 2214 .str = "ether_pkts8192to10239octets", 2215 .getter = mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get, 2216 }, 2217 }; 2218 2219 #define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \ 2220 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats) 2221 2222 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_3635_stats[] = { 2223 { 2224 .str = "dot3stats_fcs_errors", 2225 .getter = mlxsw_reg_ppcnt_dot3stats_fcs_errors_get, 2226 }, 2227 { 2228 .str = "dot3stats_symbol_errors", 2229 .getter = mlxsw_reg_ppcnt_dot3stats_symbol_errors_get, 2230 }, 2231 { 2232 .str = "dot3control_in_unknown_opcodes", 2233 .getter = mlxsw_reg_ppcnt_dot3control_in_unknown_opcodes_get, 2234 }, 2235 { 2236 .str = "dot3in_pause_frames", 2237 .getter = mlxsw_reg_ppcnt_dot3in_pause_frames_get, 2238 }, 2239 }; 2240 2241 #define MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN \ 2242 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_3635_stats) 2243 2244 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_discard_stats[] = { 2245 { 2246 .str = "discard_ingress_general", 2247 .getter = mlxsw_reg_ppcnt_ingress_general_get, 2248 }, 2249 { 2250 .str = "discard_ingress_policy_engine", 2251 .getter = mlxsw_reg_ppcnt_ingress_policy_engine_get, 2252 }, 2253 { 2254 .str = "discard_ingress_vlan_membership", 2255 .getter = mlxsw_reg_ppcnt_ingress_vlan_membership_get, 2256 }, 2257 { 2258 .str = "discard_ingress_tag_frame_type", 2259 .getter = mlxsw_reg_ppcnt_ingress_tag_frame_type_get, 2260 }, 2261 { 2262 .str = "discard_egress_vlan_membership", 2263 .getter = mlxsw_reg_ppcnt_egress_vlan_membership_get, 2264 }, 2265 { 2266 .str = "discard_loopback_filter", 2267 .getter = mlxsw_reg_ppcnt_loopback_filter_get, 2268 }, 2269 { 2270 .str = "discard_egress_general", 2271 .getter = mlxsw_reg_ppcnt_egress_general_get, 2272 }, 2273 { 2274 .str = "discard_egress_hoq", 2275 .getter = mlxsw_reg_ppcnt_egress_hoq_get, 2276 }, 2277 { 2278 .str = "discard_egress_policy_engine", 2279 .getter = mlxsw_reg_ppcnt_egress_policy_engine_get, 2280 }, 2281 { 2282 .str = "discard_ingress_tx_link_down", 2283 .getter = mlxsw_reg_ppcnt_ingress_tx_link_down_get, 2284 }, 2285 { 2286 .str = "discard_egress_stp_filter", 2287 .getter = mlxsw_reg_ppcnt_egress_stp_filter_get, 2288 }, 2289 { 2290 .str = "discard_egress_sll", 2291 .getter = mlxsw_reg_ppcnt_egress_sll_get, 2292 }, 2293 }; 2294 2295 #define MLXSW_SP_PORT_HW_DISCARD_STATS_LEN \ 2296 ARRAY_SIZE(mlxsw_sp_port_hw_discard_stats) 2297 2298 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { 2299 { 2300 .str = "rx_octets_prio", 2301 .getter = mlxsw_reg_ppcnt_rx_octets_get, 2302 }, 2303 { 2304 .str = "rx_frames_prio", 2305 .getter = mlxsw_reg_ppcnt_rx_frames_get, 2306 }, 2307 { 2308 .str = "tx_octets_prio", 2309 .getter = mlxsw_reg_ppcnt_tx_octets_get, 2310 }, 2311 { 2312 .str = "tx_frames_prio", 2313 .getter = mlxsw_reg_ppcnt_tx_frames_get, 2314 }, 2315 { 2316 .str = "rx_pause_prio", 2317 .getter = mlxsw_reg_ppcnt_rx_pause_get, 2318 }, 2319 { 2320 .str = "rx_pause_duration_prio", 2321 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get, 2322 }, 2323 { 2324 .str = "tx_pause_prio", 2325 .getter = mlxsw_reg_ppcnt_tx_pause_get, 2326 }, 2327 { 2328 .str = "tx_pause_duration_prio", 2329 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get, 2330 }, 2331 }; 2332 2333 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) 2334 2335 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { 2336 { 2337 .str = "tc_transmit_queue_tc", 2338 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get, 2339 .cells_bytes = true, 2340 }, 2341 { 2342 .str = "tc_no_buffer_discard_uc_tc", 2343 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get, 2344 }, 2345 }; 2346 2347 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) 2348 2349 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ 2350 MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN + \ 2351 MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \ 2352 MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN + \ 2353 MLXSW_SP_PORT_HW_DISCARD_STATS_LEN + \ 2354 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \ 2355 IEEE_8021QAZ_MAX_TCS) + \ 2356 (MLXSW_SP_PORT_HW_TC_STATS_LEN * \ 2357 TC_MAX_QUEUE)) 2358 2359 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) 2360 { 2361 int i; 2362 2363 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { 2364 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", 2365 mlxsw_sp_port_hw_prio_stats[i].str, prio); 2366 *p += ETH_GSTRING_LEN; 2367 } 2368 } 2369 2370 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) 2371 { 2372 int i; 2373 2374 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { 2375 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", 2376 mlxsw_sp_port_hw_tc_stats[i].str, tc); 2377 *p += ETH_GSTRING_LEN; 2378 } 2379 } 2380 2381 static void mlxsw_sp_port_get_strings(struct net_device *dev, 2382 u32 stringset, u8 *data) 2383 { 2384 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2385 u8 *p = data; 2386 int i; 2387 2388 switch (stringset) { 2389 case ETH_SS_STATS: 2390 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 2391 memcpy(p, mlxsw_sp_port_hw_stats[i].str, 2392 ETH_GSTRING_LEN); 2393 p += ETH_GSTRING_LEN; 2394 } 2395 2396 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; i++) { 2397 memcpy(p, mlxsw_sp_port_hw_rfc_2863_stats[i].str, 2398 ETH_GSTRING_LEN); 2399 p += ETH_GSTRING_LEN; 2400 } 2401 2402 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) { 2403 memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str, 2404 ETH_GSTRING_LEN); 2405 p += ETH_GSTRING_LEN; 2406 } 2407 2408 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; i++) { 2409 memcpy(p, mlxsw_sp_port_hw_rfc_3635_stats[i].str, 2410 ETH_GSTRING_LEN); 2411 p += ETH_GSTRING_LEN; 2412 } 2413 2414 for (i = 0; i < MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; i++) { 2415 memcpy(p, mlxsw_sp_port_hw_discard_stats[i].str, 2416 ETH_GSTRING_LEN); 2417 p += ETH_GSTRING_LEN; 2418 } 2419 2420 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 2421 mlxsw_sp_port_get_prio_strings(&p, i); 2422 2423 for (i = 0; i < TC_MAX_QUEUE; i++) 2424 mlxsw_sp_port_get_tc_strings(&p, i); 2425 2426 mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_strings(&p); 2427 break; 2428 } 2429 } 2430 2431 static int mlxsw_sp_port_set_phys_id(struct net_device *dev, 2432 enum ethtool_phys_id_state state) 2433 { 2434 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2435 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2436 char mlcr_pl[MLXSW_REG_MLCR_LEN]; 2437 bool active; 2438 2439 switch (state) { 2440 case ETHTOOL_ID_ACTIVE: 2441 active = true; 2442 break; 2443 case ETHTOOL_ID_INACTIVE: 2444 active = false; 2445 break; 2446 default: 2447 return -EOPNOTSUPP; 2448 } 2449 2450 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); 2451 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); 2452 } 2453 2454 static int 2455 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, 2456 int *p_len, enum mlxsw_reg_ppcnt_grp grp) 2457 { 2458 switch (grp) { 2459 case MLXSW_REG_PPCNT_IEEE_8023_CNT: 2460 *p_hw_stats = mlxsw_sp_port_hw_stats; 2461 *p_len = MLXSW_SP_PORT_HW_STATS_LEN; 2462 break; 2463 case MLXSW_REG_PPCNT_RFC_2863_CNT: 2464 *p_hw_stats = mlxsw_sp_port_hw_rfc_2863_stats; 2465 *p_len = MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2466 break; 2467 case MLXSW_REG_PPCNT_RFC_2819_CNT: 2468 *p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats; 2469 *p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2470 break; 2471 case MLXSW_REG_PPCNT_RFC_3635_CNT: 2472 *p_hw_stats = mlxsw_sp_port_hw_rfc_3635_stats; 2473 *p_len = MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2474 break; 2475 case MLXSW_REG_PPCNT_DISCARD_CNT: 2476 *p_hw_stats = mlxsw_sp_port_hw_discard_stats; 2477 *p_len = MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2478 break; 2479 case MLXSW_REG_PPCNT_PRIO_CNT: 2480 *p_hw_stats = mlxsw_sp_port_hw_prio_stats; 2481 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2482 break; 2483 case MLXSW_REG_PPCNT_TC_CNT: 2484 *p_hw_stats = mlxsw_sp_port_hw_tc_stats; 2485 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN; 2486 break; 2487 default: 2488 WARN_ON(1); 2489 return -EOPNOTSUPP; 2490 } 2491 return 0; 2492 } 2493 2494 static void __mlxsw_sp_port_get_stats(struct net_device *dev, 2495 enum mlxsw_reg_ppcnt_grp grp, int prio, 2496 u64 *data, int data_index) 2497 { 2498 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2499 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2500 struct mlxsw_sp_port_hw_stats *hw_stats; 2501 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 2502 int i, len; 2503 int err; 2504 2505 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); 2506 if (err) 2507 return; 2508 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl); 2509 for (i = 0; i < len; i++) { 2510 data[data_index + i] = hw_stats[i].getter(ppcnt_pl); 2511 if (!hw_stats[i].cells_bytes) 2512 continue; 2513 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp, 2514 data[data_index + i]); 2515 } 2516 } 2517 2518 static void mlxsw_sp_port_get_stats(struct net_device *dev, 2519 struct ethtool_stats *stats, u64 *data) 2520 { 2521 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2522 int i, data_index = 0; 2523 2524 /* IEEE 802.3 Counters */ 2525 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0, 2526 data, data_index); 2527 data_index = MLXSW_SP_PORT_HW_STATS_LEN; 2528 2529 /* RFC 2863 Counters */ 2530 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2863_CNT, 0, 2531 data, data_index); 2532 data_index += MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2533 2534 /* RFC 2819 Counters */ 2535 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0, 2536 data, data_index); 2537 data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2538 2539 /* RFC 3635 Counters */ 2540 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_3635_CNT, 0, 2541 data, data_index); 2542 data_index += MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2543 2544 /* Discard Counters */ 2545 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_DISCARD_CNT, 0, 2546 data, data_index); 2547 data_index += MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2548 2549 /* Per-Priority Counters */ 2550 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2551 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, 2552 data, data_index); 2553 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2554 } 2555 2556 /* Per-TC Counters */ 2557 for (i = 0; i < TC_MAX_QUEUE; i++) { 2558 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, 2559 data, data_index); 2560 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; 2561 } 2562 2563 /* PTP counters */ 2564 mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats(mlxsw_sp_port, 2565 data, data_index); 2566 data_index += mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_count(); 2567 } 2568 2569 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 2570 { 2571 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2572 2573 switch (sset) { 2574 case ETH_SS_STATS: 2575 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN + 2576 mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_count(); 2577 default: 2578 return -EOPNOTSUPP; 2579 } 2580 } 2581 2582 struct mlxsw_sp1_port_link_mode { 2583 enum ethtool_link_mode_bit_indices mask_ethtool; 2584 u32 mask; 2585 u32 speed; 2586 }; 2587 2588 static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = { 2589 { 2590 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 2591 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2592 .speed = SPEED_100, 2593 }, 2594 { 2595 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 2596 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 2597 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2598 .speed = SPEED_1000, 2599 }, 2600 { 2601 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 2602 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2603 .speed = SPEED_10000, 2604 }, 2605 { 2606 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 2607 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 2608 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 2609 .speed = SPEED_10000, 2610 }, 2611 { 2612 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2613 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2614 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2615 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 2616 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2617 .speed = SPEED_10000, 2618 }, 2619 { 2620 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 2621 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 2622 .speed = SPEED_20000, 2623 }, 2624 { 2625 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 2626 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2627 .speed = SPEED_40000, 2628 }, 2629 { 2630 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 2631 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2632 .speed = SPEED_40000, 2633 }, 2634 { 2635 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 2636 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2637 .speed = SPEED_40000, 2638 }, 2639 { 2640 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 2641 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2642 .speed = SPEED_40000, 2643 }, 2644 { 2645 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR, 2646 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2647 .speed = SPEED_25000, 2648 }, 2649 { 2650 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR, 2651 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2652 .speed = SPEED_25000, 2653 }, 2654 { 2655 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 2656 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2657 .speed = SPEED_25000, 2658 }, 2659 { 2660 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2, 2661 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2662 .speed = SPEED_50000, 2663 }, 2664 { 2665 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 2666 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2667 .speed = SPEED_50000, 2668 }, 2669 { 2670 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2, 2671 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2672 .speed = SPEED_50000, 2673 }, 2674 { 2675 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4, 2676 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2677 .speed = SPEED_100000, 2678 }, 2679 { 2680 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4, 2681 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2682 .speed = SPEED_100000, 2683 }, 2684 { 2685 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4, 2686 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2687 .speed = SPEED_100000, 2688 }, 2689 { 2690 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 2691 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2692 .speed = SPEED_100000, 2693 }, 2694 }; 2695 2696 #define MLXSW_SP1_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp1_port_link_mode) 2697 2698 static void 2699 mlxsw_sp1_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, 2700 u32 ptys_eth_proto, 2701 struct ethtool_link_ksettings *cmd) 2702 { 2703 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2704 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2705 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2706 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2707 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2708 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2709 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 2710 2711 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2712 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2713 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2714 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 2715 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 2716 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 2717 } 2718 2719 static void 2720 mlxsw_sp1_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, 2721 u8 width, unsigned long *mode) 2722 { 2723 int i; 2724 2725 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2726 if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) 2727 __set_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, 2728 mode); 2729 } 2730 } 2731 2732 static u32 2733 mlxsw_sp1_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto) 2734 { 2735 int i; 2736 2737 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2738 if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) 2739 return mlxsw_sp1_port_link_mode[i].speed; 2740 } 2741 2742 return SPEED_UNKNOWN; 2743 } 2744 2745 static void 2746 mlxsw_sp1_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, 2747 u32 ptys_eth_proto, 2748 struct ethtool_link_ksettings *cmd) 2749 { 2750 cmd->base.speed = SPEED_UNKNOWN; 2751 cmd->base.duplex = DUPLEX_UNKNOWN; 2752 2753 if (!carrier_ok) 2754 return; 2755 2756 cmd->base.speed = mlxsw_sp1_from_ptys_speed(mlxsw_sp, ptys_eth_proto); 2757 if (cmd->base.speed != SPEED_UNKNOWN) 2758 cmd->base.duplex = DUPLEX_FULL; 2759 } 2760 2761 static u32 2762 mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width, 2763 const struct ethtool_link_ksettings *cmd) 2764 { 2765 u32 ptys_proto = 0; 2766 int i; 2767 2768 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2769 if (test_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, 2770 cmd->link_modes.advertising)) 2771 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2772 } 2773 return ptys_proto; 2774 } 2775 2776 static u32 mlxsw_sp1_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u8 width, 2777 u32 speed) 2778 { 2779 u32 ptys_proto = 0; 2780 int i; 2781 2782 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2783 if (speed == mlxsw_sp1_port_link_mode[i].speed) 2784 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2785 } 2786 return ptys_proto; 2787 } 2788 2789 static u32 2790 mlxsw_sp1_to_ptys_upper_speed(struct mlxsw_sp *mlxsw_sp, u32 upper_speed) 2791 { 2792 u32 ptys_proto = 0; 2793 int i; 2794 2795 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2796 if (mlxsw_sp1_port_link_mode[i].speed <= upper_speed) 2797 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2798 } 2799 return ptys_proto; 2800 } 2801 2802 static int 2803 mlxsw_sp1_port_speed_base(struct mlxsw_sp *mlxsw_sp, u8 local_port, 2804 u32 *base_speed) 2805 { 2806 *base_speed = MLXSW_SP_PORT_BASE_SPEED_25G; 2807 return 0; 2808 } 2809 2810 static void 2811 mlxsw_sp1_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, 2812 u8 local_port, u32 proto_admin, bool autoneg) 2813 { 2814 mlxsw_reg_ptys_eth_pack(payload, local_port, proto_admin, autoneg); 2815 } 2816 2817 static void 2818 mlxsw_sp1_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, 2819 u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, 2820 u32 *p_eth_proto_oper) 2821 { 2822 mlxsw_reg_ptys_eth_unpack(payload, p_eth_proto_cap, p_eth_proto_admin, 2823 p_eth_proto_oper); 2824 } 2825 2826 static const struct mlxsw_sp_port_type_speed_ops 2827 mlxsw_sp1_port_type_speed_ops = { 2828 .from_ptys_supported_port = mlxsw_sp1_from_ptys_supported_port, 2829 .from_ptys_link = mlxsw_sp1_from_ptys_link, 2830 .from_ptys_speed = mlxsw_sp1_from_ptys_speed, 2831 .from_ptys_speed_duplex = mlxsw_sp1_from_ptys_speed_duplex, 2832 .to_ptys_advert_link = mlxsw_sp1_to_ptys_advert_link, 2833 .to_ptys_speed = mlxsw_sp1_to_ptys_speed, 2834 .to_ptys_upper_speed = mlxsw_sp1_to_ptys_upper_speed, 2835 .port_speed_base = mlxsw_sp1_port_speed_base, 2836 .reg_ptys_eth_pack = mlxsw_sp1_reg_ptys_eth_pack, 2837 .reg_ptys_eth_unpack = mlxsw_sp1_reg_ptys_eth_unpack, 2838 }; 2839 2840 static const enum ethtool_link_mode_bit_indices 2841 mlxsw_sp2_mask_ethtool_sgmii_100m[] = { 2842 ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2843 }; 2844 2845 #define MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN \ 2846 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_sgmii_100m) 2847 2848 static const enum ethtool_link_mode_bit_indices 2849 mlxsw_sp2_mask_ethtool_1000base_x_sgmii[] = { 2850 ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 2851 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2852 }; 2853 2854 #define MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN \ 2855 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_1000base_x_sgmii) 2856 2857 static const enum ethtool_link_mode_bit_indices 2858 mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii[] = { 2859 ETHTOOL_LINK_MODE_2500baseX_Full_BIT, 2860 }; 2861 2862 #define MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN \ 2863 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii) 2864 2865 static const enum ethtool_link_mode_bit_indices 2866 mlxsw_sp2_mask_ethtool_5gbase_r[] = { 2867 ETHTOOL_LINK_MODE_5000baseT_Full_BIT, 2868 }; 2869 2870 #define MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN \ 2871 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_5gbase_r) 2872 2873 static const enum ethtool_link_mode_bit_indices 2874 mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g[] = { 2875 ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2876 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2877 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, 2878 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, 2879 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 2880 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 2881 ETHTOOL_LINK_MODE_10000baseER_Full_BIT, 2882 }; 2883 2884 #define MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN \ 2885 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g) 2886 2887 static const enum ethtool_link_mode_bit_indices 2888 mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g[] = { 2889 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2890 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2891 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2892 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2893 }; 2894 2895 #define MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN \ 2896 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g) 2897 2898 static const enum ethtool_link_mode_bit_indices 2899 mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr[] = { 2900 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2901 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2902 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2903 }; 2904 2905 #define MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN \ 2906 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr) 2907 2908 static const enum ethtool_link_mode_bit_indices 2909 mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2[] = { 2910 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2911 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2912 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2913 }; 2914 2915 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN \ 2916 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2) 2917 2918 static const enum ethtool_link_mode_bit_indices 2919 mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr[] = { 2920 ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, 2921 ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, 2922 ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, 2923 ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, 2924 ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, 2925 }; 2926 2927 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN \ 2928 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr) 2929 2930 static const enum ethtool_link_mode_bit_indices 2931 mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4[] = { 2932 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2933 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2934 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2935 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2936 }; 2937 2938 #define MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN \ 2939 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4) 2940 2941 static const enum ethtool_link_mode_bit_indices 2942 mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2[] = { 2943 ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, 2944 ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, 2945 ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, 2946 ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, 2947 ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, 2948 }; 2949 2950 #define MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN \ 2951 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2) 2952 2953 static const enum ethtool_link_mode_bit_indices 2954 mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = { 2955 ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, 2956 ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, 2957 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, 2958 ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT, 2959 ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, 2960 }; 2961 2962 #define MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN \ 2963 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4) 2964 2965 static const enum ethtool_link_mode_bit_indices 2966 mlxsw_sp2_mask_ethtool_400gaui_8[] = { 2967 ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT, 2968 ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT, 2969 ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT, 2970 ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT, 2971 ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT, 2972 }; 2973 2974 #define MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN \ 2975 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_400gaui_8) 2976 2977 #define MLXSW_SP_PORT_MASK_WIDTH_1X BIT(0) 2978 #define MLXSW_SP_PORT_MASK_WIDTH_2X BIT(1) 2979 #define MLXSW_SP_PORT_MASK_WIDTH_4X BIT(2) 2980 #define MLXSW_SP_PORT_MASK_WIDTH_8X BIT(3) 2981 2982 static u8 mlxsw_sp_port_mask_width_get(u8 width) 2983 { 2984 switch (width) { 2985 case 1: 2986 return MLXSW_SP_PORT_MASK_WIDTH_1X; 2987 case 2: 2988 return MLXSW_SP_PORT_MASK_WIDTH_2X; 2989 case 4: 2990 return MLXSW_SP_PORT_MASK_WIDTH_4X; 2991 case 8: 2992 return MLXSW_SP_PORT_MASK_WIDTH_8X; 2993 default: 2994 WARN_ON_ONCE(1); 2995 return 0; 2996 } 2997 } 2998 2999 struct mlxsw_sp2_port_link_mode { 3000 const enum ethtool_link_mode_bit_indices *mask_ethtool; 3001 int m_ethtool_len; 3002 u32 mask; 3003 u32 speed; 3004 u8 mask_width; 3005 }; 3006 3007 static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = { 3008 { 3009 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_SGMII_100M, 3010 .mask_ethtool = mlxsw_sp2_mask_ethtool_sgmii_100m, 3011 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN, 3012 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3013 MLXSW_SP_PORT_MASK_WIDTH_2X | 3014 MLXSW_SP_PORT_MASK_WIDTH_4X | 3015 MLXSW_SP_PORT_MASK_WIDTH_8X, 3016 .speed = SPEED_100, 3017 }, 3018 { 3019 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_1000BASE_X_SGMII, 3020 .mask_ethtool = mlxsw_sp2_mask_ethtool_1000base_x_sgmii, 3021 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN, 3022 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3023 MLXSW_SP_PORT_MASK_WIDTH_2X | 3024 MLXSW_SP_PORT_MASK_WIDTH_4X | 3025 MLXSW_SP_PORT_MASK_WIDTH_8X, 3026 .speed = SPEED_1000, 3027 }, 3028 { 3029 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_2_5GBASE_X_2_5GMII, 3030 .mask_ethtool = mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii, 3031 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN, 3032 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3033 MLXSW_SP_PORT_MASK_WIDTH_2X | 3034 MLXSW_SP_PORT_MASK_WIDTH_4X | 3035 MLXSW_SP_PORT_MASK_WIDTH_8X, 3036 .speed = SPEED_2500, 3037 }, 3038 { 3039 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R, 3040 .mask_ethtool = mlxsw_sp2_mask_ethtool_5gbase_r, 3041 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN, 3042 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3043 MLXSW_SP_PORT_MASK_WIDTH_2X | 3044 MLXSW_SP_PORT_MASK_WIDTH_4X | 3045 MLXSW_SP_PORT_MASK_WIDTH_8X, 3046 .speed = SPEED_5000, 3047 }, 3048 { 3049 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XFI_XAUI_1_10G, 3050 .mask_ethtool = mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g, 3051 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN, 3052 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3053 MLXSW_SP_PORT_MASK_WIDTH_2X | 3054 MLXSW_SP_PORT_MASK_WIDTH_4X | 3055 MLXSW_SP_PORT_MASK_WIDTH_8X, 3056 .speed = SPEED_10000, 3057 }, 3058 { 3059 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G, 3060 .mask_ethtool = mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g, 3061 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN, 3062 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X | 3063 MLXSW_SP_PORT_MASK_WIDTH_8X, 3064 .speed = SPEED_40000, 3065 }, 3066 { 3067 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR, 3068 .mask_ethtool = mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr, 3069 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN, 3070 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3071 MLXSW_SP_PORT_MASK_WIDTH_2X | 3072 MLXSW_SP_PORT_MASK_WIDTH_4X | 3073 MLXSW_SP_PORT_MASK_WIDTH_8X, 3074 .speed = SPEED_25000, 3075 }, 3076 { 3077 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_2_LAUI_2_50GBASE_CR2_KR2, 3078 .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2, 3079 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN, 3080 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_2X | 3081 MLXSW_SP_PORT_MASK_WIDTH_4X | 3082 MLXSW_SP_PORT_MASK_WIDTH_8X, 3083 .speed = SPEED_50000, 3084 }, 3085 { 3086 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR, 3087 .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr, 3088 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN, 3089 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X, 3090 .speed = SPEED_50000, 3091 }, 3092 { 3093 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4, 3094 .mask_ethtool = mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4, 3095 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN, 3096 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X | 3097 MLXSW_SP_PORT_MASK_WIDTH_8X, 3098 .speed = SPEED_100000, 3099 }, 3100 { 3101 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2, 3102 .mask_ethtool = mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2, 3103 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN, 3104 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_2X, 3105 .speed = SPEED_100000, 3106 }, 3107 { 3108 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4, 3109 .mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4, 3110 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN, 3111 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X | 3112 MLXSW_SP_PORT_MASK_WIDTH_8X, 3113 .speed = SPEED_200000, 3114 }, 3115 { 3116 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_8, 3117 .mask_ethtool = mlxsw_sp2_mask_ethtool_400gaui_8, 3118 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN, 3119 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_8X, 3120 .speed = SPEED_400000, 3121 }, 3122 }; 3123 3124 #define MLXSW_SP2_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp2_port_link_mode) 3125 3126 static void 3127 mlxsw_sp2_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, 3128 u32 ptys_eth_proto, 3129 struct ethtool_link_ksettings *cmd) 3130 { 3131 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 3132 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 3133 } 3134 3135 static void 3136 mlxsw_sp2_set_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, 3137 unsigned long *mode) 3138 { 3139 int i; 3140 3141 for (i = 0; i < link_mode->m_ethtool_len; i++) 3142 __set_bit(link_mode->mask_ethtool[i], mode); 3143 } 3144 3145 static void 3146 mlxsw_sp2_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, 3147 u8 width, unsigned long *mode) 3148 { 3149 u8 mask_width = mlxsw_sp_port_mask_width_get(width); 3150 int i; 3151 3152 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3153 if ((ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) && 3154 (mask_width & mlxsw_sp2_port_link_mode[i].mask_width)) 3155 mlxsw_sp2_set_bit_ethtool(&mlxsw_sp2_port_link_mode[i], 3156 mode); 3157 } 3158 } 3159 3160 static u32 3161 mlxsw_sp2_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto) 3162 { 3163 int i; 3164 3165 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3166 if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) 3167 return mlxsw_sp2_port_link_mode[i].speed; 3168 } 3169 3170 return SPEED_UNKNOWN; 3171 } 3172 3173 static void 3174 mlxsw_sp2_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, 3175 u32 ptys_eth_proto, 3176 struct ethtool_link_ksettings *cmd) 3177 { 3178 cmd->base.speed = SPEED_UNKNOWN; 3179 cmd->base.duplex = DUPLEX_UNKNOWN; 3180 3181 if (!carrier_ok) 3182 return; 3183 3184 cmd->base.speed = mlxsw_sp2_from_ptys_speed(mlxsw_sp, ptys_eth_proto); 3185 if (cmd->base.speed != SPEED_UNKNOWN) 3186 cmd->base.duplex = DUPLEX_FULL; 3187 } 3188 3189 static bool 3190 mlxsw_sp2_test_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, 3191 const unsigned long *mode) 3192 { 3193 int cnt = 0; 3194 int i; 3195 3196 for (i = 0; i < link_mode->m_ethtool_len; i++) { 3197 if (test_bit(link_mode->mask_ethtool[i], mode)) 3198 cnt++; 3199 } 3200 3201 return cnt == link_mode->m_ethtool_len; 3202 } 3203 3204 static u32 3205 mlxsw_sp2_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width, 3206 const struct ethtool_link_ksettings *cmd) 3207 { 3208 u8 mask_width = mlxsw_sp_port_mask_width_get(width); 3209 u32 ptys_proto = 0; 3210 int i; 3211 3212 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3213 if ((mask_width & mlxsw_sp2_port_link_mode[i].mask_width) && 3214 mlxsw_sp2_test_bit_ethtool(&mlxsw_sp2_port_link_mode[i], 3215 cmd->link_modes.advertising)) 3216 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3217 } 3218 return ptys_proto; 3219 } 3220 3221 static u32 mlxsw_sp2_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, 3222 u8 width, u32 speed) 3223 { 3224 u8 mask_width = mlxsw_sp_port_mask_width_get(width); 3225 u32 ptys_proto = 0; 3226 int i; 3227 3228 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3229 if ((speed == mlxsw_sp2_port_link_mode[i].speed) && 3230 (mask_width & mlxsw_sp2_port_link_mode[i].mask_width)) 3231 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3232 } 3233 return ptys_proto; 3234 } 3235 3236 static u32 3237 mlxsw_sp2_to_ptys_upper_speed(struct mlxsw_sp *mlxsw_sp, u32 upper_speed) 3238 { 3239 u32 ptys_proto = 0; 3240 int i; 3241 3242 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3243 if (mlxsw_sp2_port_link_mode[i].speed <= upper_speed) 3244 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3245 } 3246 return ptys_proto; 3247 } 3248 3249 static int 3250 mlxsw_sp2_port_speed_base(struct mlxsw_sp *mlxsw_sp, u8 local_port, 3251 u32 *base_speed) 3252 { 3253 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3254 u32 eth_proto_cap; 3255 int err; 3256 3257 /* In Spectrum-2, the speed of 1x can change from port to port, so query 3258 * it from firmware. 3259 */ 3260 mlxsw_reg_ptys_ext_eth_pack(ptys_pl, local_port, 0, false); 3261 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3262 if (err) 3263 return err; 3264 mlxsw_reg_ptys_ext_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL); 3265 3266 if (eth_proto_cap & 3267 MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR) { 3268 *base_speed = MLXSW_SP_PORT_BASE_SPEED_50G; 3269 return 0; 3270 } 3271 3272 if (eth_proto_cap & 3273 MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR) { 3274 *base_speed = MLXSW_SP_PORT_BASE_SPEED_25G; 3275 return 0; 3276 } 3277 3278 return -EIO; 3279 } 3280 3281 static void 3282 mlxsw_sp2_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, 3283 u8 local_port, u32 proto_admin, 3284 bool autoneg) 3285 { 3286 mlxsw_reg_ptys_ext_eth_pack(payload, local_port, proto_admin, autoneg); 3287 } 3288 3289 static void 3290 mlxsw_sp2_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, 3291 u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, 3292 u32 *p_eth_proto_oper) 3293 { 3294 mlxsw_reg_ptys_ext_eth_unpack(payload, p_eth_proto_cap, 3295 p_eth_proto_admin, p_eth_proto_oper); 3296 } 3297 3298 static const struct mlxsw_sp_port_type_speed_ops 3299 mlxsw_sp2_port_type_speed_ops = { 3300 .from_ptys_supported_port = mlxsw_sp2_from_ptys_supported_port, 3301 .from_ptys_link = mlxsw_sp2_from_ptys_link, 3302 .from_ptys_speed = mlxsw_sp2_from_ptys_speed, 3303 .from_ptys_speed_duplex = mlxsw_sp2_from_ptys_speed_duplex, 3304 .to_ptys_advert_link = mlxsw_sp2_to_ptys_advert_link, 3305 .to_ptys_speed = mlxsw_sp2_to_ptys_speed, 3306 .to_ptys_upper_speed = mlxsw_sp2_to_ptys_upper_speed, 3307 .port_speed_base = mlxsw_sp2_port_speed_base, 3308 .reg_ptys_eth_pack = mlxsw_sp2_reg_ptys_eth_pack, 3309 .reg_ptys_eth_unpack = mlxsw_sp2_reg_ptys_eth_unpack, 3310 }; 3311 3312 static void 3313 mlxsw_sp_port_get_link_supported(struct mlxsw_sp *mlxsw_sp, u32 eth_proto_cap, 3314 u8 width, struct ethtool_link_ksettings *cmd) 3315 { 3316 const struct mlxsw_sp_port_type_speed_ops *ops; 3317 3318 ops = mlxsw_sp->port_type_speed_ops; 3319 3320 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); 3321 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); 3322 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); 3323 3324 ops->from_ptys_supported_port(mlxsw_sp, eth_proto_cap, cmd); 3325 ops->from_ptys_link(mlxsw_sp, eth_proto_cap, width, 3326 cmd->link_modes.supported); 3327 } 3328 3329 static void 3330 mlxsw_sp_port_get_link_advertise(struct mlxsw_sp *mlxsw_sp, 3331 u32 eth_proto_admin, bool autoneg, u8 width, 3332 struct ethtool_link_ksettings *cmd) 3333 { 3334 const struct mlxsw_sp_port_type_speed_ops *ops; 3335 3336 ops = mlxsw_sp->port_type_speed_ops; 3337 3338 if (!autoneg) 3339 return; 3340 3341 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); 3342 ops->from_ptys_link(mlxsw_sp, eth_proto_admin, width, 3343 cmd->link_modes.advertising); 3344 } 3345 3346 static u8 3347 mlxsw_sp_port_connector_port(enum mlxsw_reg_ptys_connector_type connector_type) 3348 { 3349 switch (connector_type) { 3350 case MLXSW_REG_PTYS_CONNECTOR_TYPE_UNKNOWN_OR_NO_CONNECTOR: 3351 return PORT_OTHER; 3352 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_NONE: 3353 return PORT_NONE; 3354 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_TP: 3355 return PORT_TP; 3356 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_AUI: 3357 return PORT_AUI; 3358 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_BNC: 3359 return PORT_BNC; 3360 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_MII: 3361 return PORT_MII; 3362 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_FIBRE: 3363 return PORT_FIBRE; 3364 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_DA: 3365 return PORT_DA; 3366 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_OTHER: 3367 return PORT_OTHER; 3368 default: 3369 WARN_ON_ONCE(1); 3370 return PORT_OTHER; 3371 } 3372 } 3373 3374 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev, 3375 struct ethtool_link_ksettings *cmd) 3376 { 3377 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; 3378 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 3379 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3380 const struct mlxsw_sp_port_type_speed_ops *ops; 3381 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3382 u8 connector_type; 3383 bool autoneg; 3384 int err; 3385 3386 ops = mlxsw_sp->port_type_speed_ops; 3387 3388 autoneg = mlxsw_sp_port->link.autoneg; 3389 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3390 0, false); 3391 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3392 if (err) 3393 return err; 3394 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, 3395 ð_proto_admin, ð_proto_oper); 3396 3397 mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap, 3398 mlxsw_sp_port->mapping.width, cmd); 3399 3400 mlxsw_sp_port_get_link_advertise(mlxsw_sp, eth_proto_admin, autoneg, 3401 mlxsw_sp_port->mapping.width, cmd); 3402 3403 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 3404 connector_type = mlxsw_reg_ptys_connector_type_get(ptys_pl); 3405 cmd->base.port = mlxsw_sp_port_connector_port(connector_type); 3406 ops->from_ptys_speed_duplex(mlxsw_sp, netif_carrier_ok(dev), 3407 eth_proto_oper, cmd); 3408 3409 return 0; 3410 } 3411 3412 static int 3413 mlxsw_sp_port_set_link_ksettings(struct net_device *dev, 3414 const struct ethtool_link_ksettings *cmd) 3415 { 3416 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 3417 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3418 const struct mlxsw_sp_port_type_speed_ops *ops; 3419 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3420 u32 eth_proto_cap, eth_proto_new; 3421 bool autoneg; 3422 int err; 3423 3424 ops = mlxsw_sp->port_type_speed_ops; 3425 3426 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3427 0, false); 3428 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3429 if (err) 3430 return err; 3431 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, NULL, NULL); 3432 3433 autoneg = cmd->base.autoneg == AUTONEG_ENABLE; 3434 eth_proto_new = autoneg ? 3435 ops->to_ptys_advert_link(mlxsw_sp, mlxsw_sp_port->mapping.width, 3436 cmd) : 3437 ops->to_ptys_speed(mlxsw_sp, mlxsw_sp_port->mapping.width, 3438 cmd->base.speed); 3439 3440 eth_proto_new = eth_proto_new & eth_proto_cap; 3441 if (!eth_proto_new) { 3442 netdev_err(dev, "No supported speed requested\n"); 3443 return -EINVAL; 3444 } 3445 3446 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3447 eth_proto_new, autoneg); 3448 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3449 if (err) 3450 return err; 3451 3452 mlxsw_sp_port->link.autoneg = autoneg; 3453 3454 if (!netif_running(dev)) 3455 return 0; 3456 3457 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3458 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 3459 3460 return 0; 3461 } 3462 3463 static int mlxsw_sp_get_module_info(struct net_device *netdev, 3464 struct ethtool_modinfo *modinfo) 3465 { 3466 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3467 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3468 int err; 3469 3470 err = mlxsw_env_get_module_info(mlxsw_sp->core, 3471 mlxsw_sp_port->mapping.module, 3472 modinfo); 3473 3474 return err; 3475 } 3476 3477 static int mlxsw_sp_get_module_eeprom(struct net_device *netdev, 3478 struct ethtool_eeprom *ee, 3479 u8 *data) 3480 { 3481 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3482 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3483 int err; 3484 3485 err = mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core, 3486 mlxsw_sp_port->mapping.module, ee, 3487 data); 3488 3489 return err; 3490 } 3491 3492 static int 3493 mlxsw_sp_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info) 3494 { 3495 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3496 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3497 3498 return mlxsw_sp->ptp_ops->get_ts_info(mlxsw_sp, info); 3499 } 3500 3501 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 3502 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 3503 .get_link = ethtool_op_get_link, 3504 .get_pauseparam = mlxsw_sp_port_get_pauseparam, 3505 .set_pauseparam = mlxsw_sp_port_set_pauseparam, 3506 .get_strings = mlxsw_sp_port_get_strings, 3507 .set_phys_id = mlxsw_sp_port_set_phys_id, 3508 .get_ethtool_stats = mlxsw_sp_port_get_stats, 3509 .get_sset_count = mlxsw_sp_port_get_sset_count, 3510 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings, 3511 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings, 3512 .get_module_info = mlxsw_sp_get_module_info, 3513 .get_module_eeprom = mlxsw_sp_get_module_eeprom, 3514 .get_ts_info = mlxsw_sp_get_ts_info, 3515 }; 3516 3517 static int 3518 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port) 3519 { 3520 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3521 const struct mlxsw_sp_port_type_speed_ops *ops; 3522 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3523 u32 eth_proto_admin; 3524 u32 upper_speed; 3525 u32 base_speed; 3526 int err; 3527 3528 ops = mlxsw_sp->port_type_speed_ops; 3529 3530 err = ops->port_speed_base(mlxsw_sp, mlxsw_sp_port->local_port, 3531 &base_speed); 3532 if (err) 3533 return err; 3534 upper_speed = base_speed * mlxsw_sp_port->mapping.width; 3535 3536 eth_proto_admin = ops->to_ptys_upper_speed(mlxsw_sp, upper_speed); 3537 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3538 eth_proto_admin, mlxsw_sp_port->link.autoneg); 3539 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3540 } 3541 3542 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 3543 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 3544 bool dwrr, u8 dwrr_weight) 3545 { 3546 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3547 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3548 3549 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3550 next_index); 3551 mlxsw_reg_qeec_de_set(qeec_pl, true); 3552 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 3553 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 3554 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3555 } 3556 3557 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 3558 enum mlxsw_reg_qeec_hr hr, u8 index, 3559 u8 next_index, u32 maxrate) 3560 { 3561 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3562 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3563 3564 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3565 next_index); 3566 mlxsw_reg_qeec_mase_set(qeec_pl, true); 3567 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 3568 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3569 } 3570 3571 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port, 3572 enum mlxsw_reg_qeec_hr hr, u8 index, 3573 u8 next_index, u32 minrate) 3574 { 3575 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3576 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3577 3578 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3579 next_index); 3580 mlxsw_reg_qeec_mise_set(qeec_pl, true); 3581 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate); 3582 3583 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3584 } 3585 3586 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 3587 u8 switch_prio, u8 tclass) 3588 { 3589 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3590 char qtct_pl[MLXSW_REG_QTCT_LEN]; 3591 3592 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 3593 tclass); 3594 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 3595 } 3596 3597 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 3598 { 3599 int err, i; 3600 3601 /* Setup the elements hierarcy, so that each TC is linked to 3602 * one subgroup, which are all member in the same group. 3603 */ 3604 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3605 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false, 3606 0); 3607 if (err) 3608 return err; 3609 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3610 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3611 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i, 3612 0, false, 0); 3613 if (err) 3614 return err; 3615 } 3616 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3617 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3618 MLXSW_REG_QEEC_HIERARCY_TC, i, i, 3619 false, 0); 3620 if (err) 3621 return err; 3622 3623 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3624 MLXSW_REG_QEEC_HIERARCY_TC, 3625 i + 8, i, 3626 true, 100); 3627 if (err) 3628 return err; 3629 } 3630 3631 /* Make sure the max shaper is disabled in all hierarchies that support 3632 * it. Note that this disables ptps (PTP shaper), but that is intended 3633 * for the initial configuration. 3634 */ 3635 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3636 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0, 3637 MLXSW_REG_QEEC_MAS_DIS); 3638 if (err) 3639 return err; 3640 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3641 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3642 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, 3643 i, 0, 3644 MLXSW_REG_QEEC_MAS_DIS); 3645 if (err) 3646 return err; 3647 } 3648 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3649 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3650 MLXSW_REG_QEEC_HIERARCY_TC, 3651 i, i, 3652 MLXSW_REG_QEEC_MAS_DIS); 3653 if (err) 3654 return err; 3655 3656 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3657 MLXSW_REG_QEEC_HIERARCY_TC, 3658 i + 8, i, 3659 MLXSW_REG_QEEC_MAS_DIS); 3660 if (err) 3661 return err; 3662 } 3663 3664 /* Configure the min shaper for multicast TCs. */ 3665 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3666 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port, 3667 MLXSW_REG_QEEC_HIERARCY_TC, 3668 i + 8, i, 3669 MLXSW_REG_QEEC_MIS_MIN); 3670 if (err) 3671 return err; 3672 } 3673 3674 /* Map all priorities to traffic class 0. */ 3675 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3676 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 3677 if (err) 3678 return err; 3679 } 3680 3681 return 0; 3682 } 3683 3684 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 3685 bool enable) 3686 { 3687 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3688 char qtctm_pl[MLXSW_REG_QTCTM_LEN]; 3689 3690 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable); 3691 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl); 3692 } 3693 3694 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 3695 u8 split_base_local_port, 3696 struct mlxsw_sp_port_mapping *port_mapping) 3697 { 3698 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 3699 bool split = !!split_base_local_port; 3700 struct mlxsw_sp_port *mlxsw_sp_port; 3701 struct net_device *dev; 3702 int err; 3703 3704 err = mlxsw_core_port_init(mlxsw_sp->core, local_port, 3705 port_mapping->module + 1, split, 3706 port_mapping->lane / port_mapping->width, 3707 mlxsw_sp->base_mac, 3708 sizeof(mlxsw_sp->base_mac)); 3709 if (err) { 3710 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 3711 local_port); 3712 return err; 3713 } 3714 3715 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 3716 if (!dev) { 3717 err = -ENOMEM; 3718 goto err_alloc_etherdev; 3719 } 3720 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 3721 dev_net_set(dev, mlxsw_sp_net(mlxsw_sp)); 3722 mlxsw_sp_port = netdev_priv(dev); 3723 mlxsw_sp_port->dev = dev; 3724 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 3725 mlxsw_sp_port->local_port = local_port; 3726 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID; 3727 mlxsw_sp_port->split = split; 3728 mlxsw_sp_port->split_base_local_port = split_base_local_port; 3729 mlxsw_sp_port->mapping = *port_mapping; 3730 mlxsw_sp_port->link.autoneg = 1; 3731 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 3732 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list); 3733 3734 mlxsw_sp_port->pcpu_stats = 3735 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 3736 if (!mlxsw_sp_port->pcpu_stats) { 3737 err = -ENOMEM; 3738 goto err_alloc_stats; 3739 } 3740 3741 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample), 3742 GFP_KERNEL); 3743 if (!mlxsw_sp_port->sample) { 3744 err = -ENOMEM; 3745 goto err_alloc_sample; 3746 } 3747 3748 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 3749 &update_stats_cache); 3750 3751 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 3752 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 3753 3754 err = mlxsw_sp_port_module_map(mlxsw_sp_port); 3755 if (err) { 3756 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 3757 mlxsw_sp_port->local_port); 3758 goto err_port_module_map; 3759 } 3760 3761 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 3762 if (err) { 3763 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 3764 mlxsw_sp_port->local_port); 3765 goto err_port_swid_set; 3766 } 3767 3768 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 3769 if (err) { 3770 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 3771 mlxsw_sp_port->local_port); 3772 goto err_dev_addr_init; 3773 } 3774 3775 netif_carrier_off(dev); 3776 3777 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 3778 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 3779 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK; 3780 3781 dev->min_mtu = 0; 3782 dev->max_mtu = ETH_MAX_MTU; 3783 3784 /* Each packet needs to have a Tx header (metadata) on top all other 3785 * headers. 3786 */ 3787 dev->needed_headroom = MLXSW_TXHDR_LEN; 3788 3789 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 3790 if (err) { 3791 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 3792 mlxsw_sp_port->local_port); 3793 goto err_port_system_port_mapping_set; 3794 } 3795 3796 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port); 3797 if (err) { 3798 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 3799 mlxsw_sp_port->local_port); 3800 goto err_port_speed_by_width_set; 3801 } 3802 3803 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 3804 if (err) { 3805 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 3806 mlxsw_sp_port->local_port); 3807 goto err_port_mtu_set; 3808 } 3809 3810 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3811 if (err) 3812 goto err_port_admin_status_set; 3813 3814 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 3815 if (err) { 3816 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 3817 mlxsw_sp_port->local_port); 3818 goto err_port_buffers_init; 3819 } 3820 3821 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 3822 if (err) { 3823 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 3824 mlxsw_sp_port->local_port); 3825 goto err_port_ets_init; 3826 } 3827 3828 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true); 3829 if (err) { 3830 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n", 3831 mlxsw_sp_port->local_port); 3832 goto err_port_tc_mc_mode; 3833 } 3834 3835 /* ETS and buffers must be initialized before DCB. */ 3836 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 3837 if (err) { 3838 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 3839 mlxsw_sp_port->local_port); 3840 goto err_port_dcb_init; 3841 } 3842 3843 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 3844 if (err) { 3845 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 3846 mlxsw_sp_port->local_port); 3847 goto err_port_fids_init; 3848 } 3849 3850 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); 3851 if (err) { 3852 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", 3853 mlxsw_sp_port->local_port); 3854 goto err_port_qdiscs_init; 3855 } 3856 3857 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false, 3858 false); 3859 if (err) { 3860 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n", 3861 mlxsw_sp_port->local_port); 3862 goto err_port_vlan_clear; 3863 } 3864 3865 err = mlxsw_sp_port_nve_init(mlxsw_sp_port); 3866 if (err) { 3867 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n", 3868 mlxsw_sp_port->local_port); 3869 goto err_port_nve_init; 3870 } 3871 3872 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 3873 if (err) { 3874 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n", 3875 mlxsw_sp_port->local_port); 3876 goto err_port_pvid_set; 3877 } 3878 3879 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, 3880 MLXSW_SP_DEFAULT_VID); 3881 if (IS_ERR(mlxsw_sp_port_vlan)) { 3882 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 3883 mlxsw_sp_port->local_port); 3884 err = PTR_ERR(mlxsw_sp_port_vlan); 3885 goto err_port_vlan_create; 3886 } 3887 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan; 3888 3889 INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw, 3890 mlxsw_sp->ptp_ops->shaper_work); 3891 3892 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 3893 err = register_netdev(dev); 3894 if (err) { 3895 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 3896 mlxsw_sp_port->local_port); 3897 goto err_register_netdev; 3898 } 3899 3900 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 3901 mlxsw_sp_port, dev); 3902 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 3903 return 0; 3904 3905 err_register_netdev: 3906 mlxsw_sp->ports[local_port] = NULL; 3907 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 3908 err_port_vlan_create: 3909 err_port_pvid_set: 3910 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3911 err_port_nve_init: 3912 err_port_vlan_clear: 3913 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3914 err_port_qdiscs_init: 3915 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3916 err_port_fids_init: 3917 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3918 err_port_dcb_init: 3919 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3920 err_port_tc_mc_mode: 3921 err_port_ets_init: 3922 err_port_buffers_init: 3923 err_port_admin_status_set: 3924 err_port_mtu_set: 3925 err_port_speed_by_width_set: 3926 err_port_system_port_mapping_set: 3927 err_dev_addr_init: 3928 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3929 err_port_swid_set: 3930 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3931 err_port_module_map: 3932 kfree(mlxsw_sp_port->sample); 3933 err_alloc_sample: 3934 free_percpu(mlxsw_sp_port->pcpu_stats); 3935 err_alloc_stats: 3936 free_netdev(dev); 3937 err_alloc_etherdev: 3938 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3939 return err; 3940 } 3941 3942 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3943 { 3944 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3945 3946 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 3947 cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw); 3948 mlxsw_sp_port_ptp_clear(mlxsw_sp_port); 3949 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 3950 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 3951 mlxsw_sp->ports[local_port] = NULL; 3952 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true); 3953 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3954 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3955 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3956 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3957 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3958 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3959 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3960 kfree(mlxsw_sp_port->sample); 3961 free_percpu(mlxsw_sp_port->pcpu_stats); 3962 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 3963 free_netdev(mlxsw_sp_port->dev); 3964 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3965 } 3966 3967 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp) 3968 { 3969 struct mlxsw_sp_port *mlxsw_sp_port; 3970 int err; 3971 3972 mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL); 3973 if (!mlxsw_sp_port) 3974 return -ENOMEM; 3975 3976 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 3977 mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT; 3978 3979 err = mlxsw_core_cpu_port_init(mlxsw_sp->core, 3980 mlxsw_sp_port, 3981 mlxsw_sp->base_mac, 3982 sizeof(mlxsw_sp->base_mac)); 3983 if (err) { 3984 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n"); 3985 goto err_core_cpu_port_init; 3986 } 3987 3988 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port; 3989 return 0; 3990 3991 err_core_cpu_port_init: 3992 kfree(mlxsw_sp_port); 3993 return err; 3994 } 3995 3996 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp) 3997 { 3998 struct mlxsw_sp_port *mlxsw_sp_port = 3999 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT]; 4000 4001 mlxsw_core_cpu_port_fini(mlxsw_sp->core); 4002 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL; 4003 kfree(mlxsw_sp_port); 4004 } 4005 4006 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) 4007 { 4008 return mlxsw_sp->ports[local_port] != NULL; 4009 } 4010 4011 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 4012 { 4013 int i; 4014 4015 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 4016 if (mlxsw_sp_port_created(mlxsw_sp, i)) 4017 mlxsw_sp_port_remove(mlxsw_sp, i); 4018 mlxsw_sp_cpu_port_remove(mlxsw_sp); 4019 kfree(mlxsw_sp->ports); 4020 } 4021 4022 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 4023 { 4024 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 4025 struct mlxsw_sp_port_mapping *port_mapping; 4026 size_t alloc_size; 4027 int i; 4028 int err; 4029 4030 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 4031 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 4032 if (!mlxsw_sp->ports) 4033 return -ENOMEM; 4034 4035 err = mlxsw_sp_cpu_port_create(mlxsw_sp); 4036 if (err) 4037 goto err_cpu_port_create; 4038 4039 for (i = 1; i < max_ports; i++) { 4040 port_mapping = mlxsw_sp->port_mapping[i]; 4041 if (!port_mapping) 4042 continue; 4043 err = mlxsw_sp_port_create(mlxsw_sp, i, 0, port_mapping); 4044 if (err) 4045 goto err_port_create; 4046 } 4047 return 0; 4048 4049 err_port_create: 4050 for (i--; i >= 1; i--) 4051 if (mlxsw_sp_port_created(mlxsw_sp, i)) 4052 mlxsw_sp_port_remove(mlxsw_sp, i); 4053 mlxsw_sp_cpu_port_remove(mlxsw_sp); 4054 err_cpu_port_create: 4055 kfree(mlxsw_sp->ports); 4056 return err; 4057 } 4058 4059 static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp) 4060 { 4061 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 4062 struct mlxsw_sp_port_mapping port_mapping; 4063 int i; 4064 int err; 4065 4066 mlxsw_sp->port_mapping = kcalloc(max_ports, 4067 sizeof(struct mlxsw_sp_port_mapping *), 4068 GFP_KERNEL); 4069 if (!mlxsw_sp->port_mapping) 4070 return -ENOMEM; 4071 4072 for (i = 1; i < max_ports; i++) { 4073 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &port_mapping); 4074 if (err) 4075 goto err_port_module_info_get; 4076 if (!port_mapping.width) 4077 continue; 4078 4079 mlxsw_sp->port_mapping[i] = kmemdup(&port_mapping, 4080 sizeof(port_mapping), 4081 GFP_KERNEL); 4082 if (!mlxsw_sp->port_mapping[i]) { 4083 err = -ENOMEM; 4084 goto err_port_module_info_dup; 4085 } 4086 } 4087 return 0; 4088 4089 err_port_module_info_get: 4090 err_port_module_info_dup: 4091 for (i--; i >= 1; i--) 4092 kfree(mlxsw_sp->port_mapping[i]); 4093 kfree(mlxsw_sp->port_mapping); 4094 return err; 4095 } 4096 4097 static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp) 4098 { 4099 int i; 4100 4101 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 4102 kfree(mlxsw_sp->port_mapping[i]); 4103 kfree(mlxsw_sp->port_mapping); 4104 } 4105 4106 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port, unsigned int max_width) 4107 { 4108 u8 offset = (local_port - 1) % max_width; 4109 4110 return local_port - offset; 4111 } 4112 4113 static int 4114 mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 4115 struct mlxsw_sp_port_mapping *port_mapping, 4116 unsigned int count, u8 offset) 4117 { 4118 struct mlxsw_sp_port_mapping split_port_mapping; 4119 int err, i; 4120 4121 split_port_mapping = *port_mapping; 4122 split_port_mapping.width /= count; 4123 for (i = 0; i < count; i++) { 4124 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset, 4125 base_port, &split_port_mapping); 4126 if (err) 4127 goto err_port_create; 4128 split_port_mapping.lane += split_port_mapping.width; 4129 } 4130 4131 return 0; 4132 4133 err_port_create: 4134 for (i--; i >= 0; i--) 4135 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 4136 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 4137 return err; 4138 } 4139 4140 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 4141 u8 base_port, 4142 unsigned int count, u8 offset) 4143 { 4144 struct mlxsw_sp_port_mapping *port_mapping; 4145 int i; 4146 4147 /* Go over original unsplit ports in the gap and recreate them. */ 4148 for (i = 0; i < count * offset; i++) { 4149 port_mapping = mlxsw_sp->port_mapping[base_port + i]; 4150 if (!port_mapping) 4151 continue; 4152 mlxsw_sp_port_create(mlxsw_sp, base_port + i, 0, port_mapping); 4153 } 4154 } 4155 4156 static int mlxsw_sp_local_ports_offset(struct mlxsw_core *mlxsw_core, 4157 unsigned int count, 4158 unsigned int max_width) 4159 { 4160 enum mlxsw_res_id local_ports_in_x_res_id; 4161 int split_width = max_width / count; 4162 4163 if (split_width == 1) 4164 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_1X; 4165 else if (split_width == 2) 4166 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_2X; 4167 else if (split_width == 4) 4168 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_4X; 4169 else 4170 return -EINVAL; 4171 4172 if (!mlxsw_core_res_valid(mlxsw_core, local_ports_in_x_res_id)) 4173 return -EINVAL; 4174 return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id); 4175 } 4176 4177 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 4178 unsigned int count, 4179 struct netlink_ext_ack *extack) 4180 { 4181 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4182 struct mlxsw_sp_port_mapping port_mapping; 4183 struct mlxsw_sp_port *mlxsw_sp_port; 4184 int max_width; 4185 u8 base_port; 4186 int offset; 4187 int i; 4188 int err; 4189 4190 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4191 if (!mlxsw_sp_port) { 4192 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 4193 local_port); 4194 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 4195 return -EINVAL; 4196 } 4197 4198 /* Split ports cannot be split. */ 4199 if (mlxsw_sp_port->split) { 4200 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); 4201 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further"); 4202 return -EINVAL; 4203 } 4204 4205 max_width = mlxsw_core_module_max_width(mlxsw_core, 4206 mlxsw_sp_port->mapping.module); 4207 if (max_width < 0) { 4208 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n"); 4209 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module"); 4210 return max_width; 4211 } 4212 4213 /* Split port with non-max and 1 module width cannot be split. */ 4214 if (mlxsw_sp_port->mapping.width != max_width || max_width == 1) { 4215 netdev_err(mlxsw_sp_port->dev, "Port cannot be split\n"); 4216 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split"); 4217 return -EINVAL; 4218 } 4219 4220 if (count == 1 || !is_power_of_2(count) || count > max_width) { 4221 netdev_err(mlxsw_sp_port->dev, "Invalid split count\n"); 4222 NL_SET_ERR_MSG_MOD(extack, "Invalid split count"); 4223 return -EINVAL; 4224 } 4225 4226 offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width); 4227 if (offset < 0) { 4228 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n"); 4229 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset"); 4230 return -EINVAL; 4231 } 4232 4233 /* Only in case max split is being done, the local port and 4234 * base port may differ. 4235 */ 4236 base_port = count == max_width ? 4237 mlxsw_sp_cluster_base_port_get(local_port, max_width) : 4238 local_port; 4239 4240 for (i = 0; i < count * offset; i++) { 4241 /* Expect base port to exist and also the one in the middle in 4242 * case of maximal split count. 4243 */ 4244 if (i == 0 || (count == max_width && i == count / 2)) 4245 continue; 4246 4247 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) { 4248 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 4249 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 4250 return -EINVAL; 4251 } 4252 } 4253 4254 port_mapping = mlxsw_sp_port->mapping; 4255 4256 for (i = 0; i < count; i++) 4257 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 4258 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 4259 4260 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, &port_mapping, 4261 count, offset); 4262 if (err) { 4263 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 4264 goto err_port_split_create; 4265 } 4266 4267 return 0; 4268 4269 err_port_split_create: 4270 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset); 4271 return err; 4272 } 4273 4274 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, 4275 struct netlink_ext_ack *extack) 4276 { 4277 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4278 struct mlxsw_sp_port *mlxsw_sp_port; 4279 unsigned int count; 4280 int max_width; 4281 u8 base_port; 4282 int offset; 4283 int i; 4284 4285 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4286 if (!mlxsw_sp_port) { 4287 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 4288 local_port); 4289 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 4290 return -EINVAL; 4291 } 4292 4293 if (!mlxsw_sp_port->split) { 4294 netdev_err(mlxsw_sp_port->dev, "Port was not split\n"); 4295 NL_SET_ERR_MSG_MOD(extack, "Port was not split"); 4296 return -EINVAL; 4297 } 4298 4299 max_width = mlxsw_core_module_max_width(mlxsw_core, 4300 mlxsw_sp_port->mapping.module); 4301 if (max_width < 0) { 4302 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n"); 4303 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module"); 4304 return max_width; 4305 } 4306 4307 count = max_width / mlxsw_sp_port->mapping.width; 4308 4309 offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width); 4310 if (WARN_ON(offset < 0)) { 4311 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n"); 4312 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset"); 4313 return -EINVAL; 4314 } 4315 4316 base_port = mlxsw_sp_port->split_base_local_port; 4317 4318 for (i = 0; i < count; i++) 4319 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 4320 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 4321 4322 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset); 4323 4324 return 0; 4325 } 4326 4327 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 4328 char *pude_pl, void *priv) 4329 { 4330 struct mlxsw_sp *mlxsw_sp = priv; 4331 struct mlxsw_sp_port *mlxsw_sp_port; 4332 enum mlxsw_reg_pude_oper_status status; 4333 u8 local_port; 4334 4335 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 4336 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4337 if (!mlxsw_sp_port) 4338 return; 4339 4340 status = mlxsw_reg_pude_oper_status_get(pude_pl); 4341 if (status == MLXSW_PORT_OPER_STATUS_UP) { 4342 netdev_info(mlxsw_sp_port->dev, "link up\n"); 4343 netif_carrier_on(mlxsw_sp_port->dev); 4344 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0); 4345 } else { 4346 netdev_info(mlxsw_sp_port->dev, "link down\n"); 4347 netif_carrier_off(mlxsw_sp_port->dev); 4348 } 4349 } 4350 4351 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp, 4352 char *mtpptr_pl, bool ingress) 4353 { 4354 u8 local_port; 4355 u8 num_rec; 4356 int i; 4357 4358 local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl); 4359 num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl); 4360 for (i = 0; i < num_rec; i++) { 4361 u8 domain_number; 4362 u8 message_type; 4363 u16 sequence_id; 4364 u64 timestamp; 4365 4366 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type, 4367 &domain_number, &sequence_id, 4368 ×tamp); 4369 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port, 4370 message_type, domain_number, 4371 sequence_id, timestamp); 4372 } 4373 } 4374 4375 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg, 4376 char *mtpptr_pl, void *priv) 4377 { 4378 struct mlxsw_sp *mlxsw_sp = priv; 4379 4380 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true); 4381 } 4382 4383 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg, 4384 char *mtpptr_pl, void *priv) 4385 { 4386 struct mlxsw_sp *mlxsw_sp = priv; 4387 4388 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false); 4389 } 4390 4391 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 4392 u8 local_port, void *priv) 4393 { 4394 struct mlxsw_sp *mlxsw_sp = priv; 4395 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4396 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 4397 4398 if (unlikely(!mlxsw_sp_port)) { 4399 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 4400 local_port); 4401 return; 4402 } 4403 4404 skb->dev = mlxsw_sp_port->dev; 4405 4406 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 4407 u64_stats_update_begin(&pcpu_stats->syncp); 4408 pcpu_stats->rx_packets++; 4409 pcpu_stats->rx_bytes += skb->len; 4410 u64_stats_update_end(&pcpu_stats->syncp); 4411 4412 skb->protocol = eth_type_trans(skb, skb->dev); 4413 netif_receive_skb(skb); 4414 } 4415 4416 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, 4417 void *priv) 4418 { 4419 skb->offload_fwd_mark = 1; 4420 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 4421 } 4422 4423 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, 4424 u8 local_port, void *priv) 4425 { 4426 skb->offload_l3_fwd_mark = 1; 4427 skb->offload_fwd_mark = 1; 4428 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 4429 } 4430 4431 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port, 4432 void *priv) 4433 { 4434 struct mlxsw_sp *mlxsw_sp = priv; 4435 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4436 struct psample_group *psample_group; 4437 u32 size; 4438 4439 if (unlikely(!mlxsw_sp_port)) { 4440 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n", 4441 local_port); 4442 goto out; 4443 } 4444 if (unlikely(!mlxsw_sp_port->sample)) { 4445 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n", 4446 local_port); 4447 goto out; 4448 } 4449 4450 size = mlxsw_sp_port->sample->truncate ? 4451 mlxsw_sp_port->sample->trunc_size : skb->len; 4452 4453 rcu_read_lock(); 4454 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group); 4455 if (!psample_group) 4456 goto out_unlock; 4457 psample_sample_packet(psample_group, skb, size, 4458 mlxsw_sp_port->dev->ifindex, 0, 4459 mlxsw_sp_port->sample->rate); 4460 out_unlock: 4461 rcu_read_unlock(); 4462 out: 4463 consume_skb(skb); 4464 } 4465 4466 static void mlxsw_sp_rx_listener_ptp(struct sk_buff *skb, u8 local_port, 4467 void *priv) 4468 { 4469 struct mlxsw_sp *mlxsw_sp = priv; 4470 4471 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port); 4472 } 4473 4474 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4475 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 4476 _is_ctrl, SP_##_trap_group, DISCARD) 4477 4478 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4479 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 4480 _is_ctrl, SP_##_trap_group, DISCARD) 4481 4482 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4483 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \ 4484 _is_ctrl, SP_##_trap_group, DISCARD) 4485 4486 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 4487 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 4488 4489 static const struct mlxsw_listener mlxsw_sp_listener[] = { 4490 /* Events */ 4491 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 4492 /* L2 traps */ 4493 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true), 4494 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true), 4495 MLXSW_RXL(mlxsw_sp_rx_listener_ptp, LLDP, TRAP_TO_CPU, 4496 false, SP_LLDP, DISCARD), 4497 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false), 4498 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false), 4499 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false), 4500 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false), 4501 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false), 4502 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false), 4503 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false), 4504 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false), 4505 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false), 4506 MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD, 4507 false), 4508 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 4509 false), 4510 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD, 4511 false), 4512 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 4513 false), 4514 /* L3 traps */ 4515 MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false), 4516 MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false), 4517 MLXSW_SP_RXL_L3_MARK(LBERROR, MIRROR_TO_CPU, LBERROR, false), 4518 MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), 4519 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 4520 false), 4521 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false), 4522 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 4523 MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false), 4524 MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP, 4525 false), 4526 MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false), 4527 MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false), 4528 MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false), 4529 MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), 4530 MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false), 4531 MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false), 4532 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 4533 false), 4534 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 4535 false), 4536 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 4537 false), 4538 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 4539 false), 4540 MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false), 4541 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 4542 false), 4543 MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, HOST_MISS, false), 4544 MLXSW_SP_RXL_MARK(HOST_MISS_IPV6, TRAP_TO_CPU, HOST_MISS, false), 4545 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), 4546 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), 4547 MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false), 4548 MLXSW_SP_RXL_MARK(DECAP_ECN0, TRAP_TO_CPU, ROUTER_EXP, false), 4549 MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), 4550 MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), 4551 /* PKT Sample trap */ 4552 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, 4553 false, SP_IP2ME, DISCARD), 4554 /* ACL trap */ 4555 MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false), 4556 /* Multicast Router Traps */ 4557 MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false), 4558 MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false), 4559 MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false), 4560 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 4561 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 4562 /* NVE traps */ 4563 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false), 4564 MLXSW_SP_RXL_NO_MARK(NVE_DECAP_ARP, TRAP_TO_CPU, ARP, false), 4565 /* PTP traps */ 4566 MLXSW_RXL(mlxsw_sp_rx_listener_ptp, PTP0, TRAP_TO_CPU, 4567 false, SP_PTP0, DISCARD), 4568 MLXSW_SP_RXL_NO_MARK(PTP1, TRAP_TO_CPU, PTP1, false), 4569 }; 4570 4571 static const struct mlxsw_listener mlxsw_sp1_listener[] = { 4572 /* Events */ 4573 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0), 4574 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0), 4575 }; 4576 4577 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 4578 { 4579 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 4580 enum mlxsw_reg_qpcr_ir_units ir_units; 4581 int max_cpu_policers; 4582 bool is_bytes; 4583 u8 burst_size; 4584 u32 rate; 4585 int i, err; 4586 4587 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 4588 return -EIO; 4589 4590 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 4591 4592 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 4593 for (i = 0; i < max_cpu_policers; i++) { 4594 is_bytes = false; 4595 switch (i) { 4596 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 4597 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 4598 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 4599 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 4600 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 4601 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 4602 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: 4603 rate = 128; 4604 burst_size = 7; 4605 break; 4606 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 4607 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 4608 rate = 16 * 1024; 4609 burst_size = 10; 4610 break; 4611 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 4612 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 4613 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 4614 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 4615 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 4616 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 4617 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 4618 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 4619 rate = 1024; 4620 burst_size = 7; 4621 break; 4622 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 4623 rate = 1024; 4624 burst_size = 7; 4625 break; 4626 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0: 4627 rate = 24 * 1024; 4628 burst_size = 12; 4629 break; 4630 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1: 4631 rate = 19 * 1024; 4632 burst_size = 12; 4633 break; 4634 default: 4635 continue; 4636 } 4637 4638 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 4639 burst_size); 4640 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 4641 if (err) 4642 return err; 4643 } 4644 4645 return 0; 4646 } 4647 4648 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 4649 { 4650 char htgt_pl[MLXSW_REG_HTGT_LEN]; 4651 enum mlxsw_reg_htgt_trap_group i; 4652 int max_cpu_policers; 4653 int max_trap_groups; 4654 u8 priority, tc; 4655 u16 policer_id; 4656 int err; 4657 4658 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 4659 return -EIO; 4660 4661 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 4662 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 4663 4664 for (i = 0; i < max_trap_groups; i++) { 4665 policer_id = i; 4666 switch (i) { 4667 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 4668 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 4669 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 4670 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 4671 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 4672 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0: 4673 priority = 5; 4674 tc = 5; 4675 break; 4676 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 4677 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 4678 priority = 4; 4679 tc = 4; 4680 break; 4681 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 4682 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 4683 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 4684 priority = 3; 4685 tc = 3; 4686 break; 4687 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 4688 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 4689 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 4690 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1: 4691 priority = 2; 4692 tc = 2; 4693 break; 4694 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 4695 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 4696 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 4697 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 4698 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: 4699 priority = 1; 4700 tc = 1; 4701 break; 4702 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 4703 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 4704 tc = MLXSW_REG_HTGT_DEFAULT_TC; 4705 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 4706 break; 4707 default: 4708 continue; 4709 } 4710 4711 if (max_cpu_policers <= policer_id && 4712 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 4713 return -EIO; 4714 4715 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 4716 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 4717 if (err) 4718 return err; 4719 } 4720 4721 return 0; 4722 } 4723 4724 static int mlxsw_sp_traps_register(struct mlxsw_sp *mlxsw_sp, 4725 const struct mlxsw_listener listeners[], 4726 size_t listeners_count) 4727 { 4728 int i; 4729 int err; 4730 4731 for (i = 0; i < listeners_count; i++) { 4732 err = mlxsw_core_trap_register(mlxsw_sp->core, 4733 &listeners[i], 4734 mlxsw_sp); 4735 if (err) 4736 goto err_listener_register; 4737 4738 } 4739 return 0; 4740 4741 err_listener_register: 4742 for (i--; i >= 0; i--) { 4743 mlxsw_core_trap_unregister(mlxsw_sp->core, 4744 &listeners[i], 4745 mlxsw_sp); 4746 } 4747 return err; 4748 } 4749 4750 static void mlxsw_sp_traps_unregister(struct mlxsw_sp *mlxsw_sp, 4751 const struct mlxsw_listener listeners[], 4752 size_t listeners_count) 4753 { 4754 int i; 4755 4756 for (i = 0; i < listeners_count; i++) { 4757 mlxsw_core_trap_unregister(mlxsw_sp->core, 4758 &listeners[i], 4759 mlxsw_sp); 4760 } 4761 } 4762 4763 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 4764 { 4765 int err; 4766 4767 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 4768 if (err) 4769 return err; 4770 4771 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 4772 if (err) 4773 return err; 4774 4775 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp_listener, 4776 ARRAY_SIZE(mlxsw_sp_listener)); 4777 if (err) 4778 return err; 4779 4780 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp->listeners, 4781 mlxsw_sp->listeners_count); 4782 if (err) 4783 goto err_extra_traps_init; 4784 4785 return 0; 4786 4787 err_extra_traps_init: 4788 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 4789 ARRAY_SIZE(mlxsw_sp_listener)); 4790 return err; 4791 } 4792 4793 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 4794 { 4795 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp->listeners, 4796 mlxsw_sp->listeners_count); 4797 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 4798 ARRAY_SIZE(mlxsw_sp_listener)); 4799 } 4800 4801 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe 4802 4803 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 4804 { 4805 char slcr_pl[MLXSW_REG_SLCR_LEN]; 4806 u32 seed; 4807 int err; 4808 4809 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 4810 MLXSW_SP_LAG_SEED_INIT); 4811 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 4812 MLXSW_REG_SLCR_LAG_HASH_DMAC | 4813 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 4814 MLXSW_REG_SLCR_LAG_HASH_VLANID | 4815 MLXSW_REG_SLCR_LAG_HASH_SIP | 4816 MLXSW_REG_SLCR_LAG_HASH_DIP | 4817 MLXSW_REG_SLCR_LAG_HASH_SPORT | 4818 MLXSW_REG_SLCR_LAG_HASH_DPORT | 4819 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed); 4820 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 4821 if (err) 4822 return err; 4823 4824 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 4825 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 4826 return -EIO; 4827 4828 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 4829 sizeof(struct mlxsw_sp_upper), 4830 GFP_KERNEL); 4831 if (!mlxsw_sp->lags) 4832 return -ENOMEM; 4833 4834 return 0; 4835 } 4836 4837 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 4838 { 4839 kfree(mlxsw_sp->lags); 4840 } 4841 4842 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) 4843 { 4844 char htgt_pl[MLXSW_REG_HTGT_LEN]; 4845 4846 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 4847 MLXSW_REG_HTGT_INVALID_POLICER, 4848 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 4849 MLXSW_REG_HTGT_DEFAULT_TC); 4850 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 4851 } 4852 4853 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = { 4854 .clock_init = mlxsw_sp1_ptp_clock_init, 4855 .clock_fini = mlxsw_sp1_ptp_clock_fini, 4856 .init = mlxsw_sp1_ptp_init, 4857 .fini = mlxsw_sp1_ptp_fini, 4858 .receive = mlxsw_sp1_ptp_receive, 4859 .transmitted = mlxsw_sp1_ptp_transmitted, 4860 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get, 4861 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set, 4862 .shaper_work = mlxsw_sp1_ptp_shaper_work, 4863 .get_ts_info = mlxsw_sp1_ptp_get_ts_info, 4864 .get_stats_count = mlxsw_sp1_get_stats_count, 4865 .get_stats_strings = mlxsw_sp1_get_stats_strings, 4866 .get_stats = mlxsw_sp1_get_stats, 4867 }; 4868 4869 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { 4870 .clock_init = mlxsw_sp2_ptp_clock_init, 4871 .clock_fini = mlxsw_sp2_ptp_clock_fini, 4872 .init = mlxsw_sp2_ptp_init, 4873 .fini = mlxsw_sp2_ptp_fini, 4874 .receive = mlxsw_sp2_ptp_receive, 4875 .transmitted = mlxsw_sp2_ptp_transmitted, 4876 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get, 4877 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set, 4878 .shaper_work = mlxsw_sp2_ptp_shaper_work, 4879 .get_ts_info = mlxsw_sp2_ptp_get_ts_info, 4880 .get_stats_count = mlxsw_sp2_get_stats_count, 4881 .get_stats_strings = mlxsw_sp2_get_stats_strings, 4882 .get_stats = mlxsw_sp2_get_stats, 4883 }; 4884 4885 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 4886 unsigned long event, void *ptr); 4887 4888 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 4889 const struct mlxsw_bus_info *mlxsw_bus_info, 4890 struct netlink_ext_ack *extack) 4891 { 4892 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4893 int err; 4894 4895 mlxsw_sp->core = mlxsw_core; 4896 mlxsw_sp->bus_info = mlxsw_bus_info; 4897 4898 err = mlxsw_sp_fw_rev_validate(mlxsw_sp); 4899 if (err) 4900 return err; 4901 4902 err = mlxsw_sp_base_mac_get(mlxsw_sp); 4903 if (err) { 4904 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 4905 return err; 4906 } 4907 4908 err = mlxsw_sp_kvdl_init(mlxsw_sp); 4909 if (err) { 4910 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 4911 return err; 4912 } 4913 4914 err = mlxsw_sp_fids_init(mlxsw_sp); 4915 if (err) { 4916 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 4917 goto err_fids_init; 4918 } 4919 4920 err = mlxsw_sp_traps_init(mlxsw_sp); 4921 if (err) { 4922 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 4923 goto err_traps_init; 4924 } 4925 4926 err = mlxsw_sp_devlink_traps_init(mlxsw_sp); 4927 if (err) { 4928 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n"); 4929 goto err_devlink_traps_init; 4930 } 4931 4932 err = mlxsw_sp_buffers_init(mlxsw_sp); 4933 if (err) { 4934 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 4935 goto err_buffers_init; 4936 } 4937 4938 err = mlxsw_sp_lag_init(mlxsw_sp); 4939 if (err) { 4940 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 4941 goto err_lag_init; 4942 } 4943 4944 /* Initialize SPAN before router and switchdev, so that those components 4945 * can call mlxsw_sp_span_respin(). 4946 */ 4947 err = mlxsw_sp_span_init(mlxsw_sp); 4948 if (err) { 4949 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 4950 goto err_span_init; 4951 } 4952 4953 err = mlxsw_sp_switchdev_init(mlxsw_sp); 4954 if (err) { 4955 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 4956 goto err_switchdev_init; 4957 } 4958 4959 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 4960 if (err) { 4961 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 4962 goto err_counter_pool_init; 4963 } 4964 4965 err = mlxsw_sp_afa_init(mlxsw_sp); 4966 if (err) { 4967 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 4968 goto err_afa_init; 4969 } 4970 4971 err = mlxsw_sp_nve_init(mlxsw_sp); 4972 if (err) { 4973 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n"); 4974 goto err_nve_init; 4975 } 4976 4977 err = mlxsw_sp_acl_init(mlxsw_sp); 4978 if (err) { 4979 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 4980 goto err_acl_init; 4981 } 4982 4983 err = mlxsw_sp_router_init(mlxsw_sp, extack); 4984 if (err) { 4985 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 4986 goto err_router_init; 4987 } 4988 4989 if (mlxsw_sp->bus_info->read_frc_capable) { 4990 /* NULL is a valid return value from clock_init */ 4991 mlxsw_sp->clock = 4992 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp, 4993 mlxsw_sp->bus_info->dev); 4994 if (IS_ERR(mlxsw_sp->clock)) { 4995 err = PTR_ERR(mlxsw_sp->clock); 4996 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n"); 4997 goto err_ptp_clock_init; 4998 } 4999 } 5000 5001 if (mlxsw_sp->clock) { 5002 /* NULL is a valid return value from ptp_ops->init */ 5003 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp); 5004 if (IS_ERR(mlxsw_sp->ptp_state)) { 5005 err = PTR_ERR(mlxsw_sp->ptp_state); 5006 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n"); 5007 goto err_ptp_init; 5008 } 5009 } 5010 5011 /* Initialize netdevice notifier after router and SPAN is initialized, 5012 * so that the event handler can use router structures and call SPAN 5013 * respin. 5014 */ 5015 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 5016 err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 5017 &mlxsw_sp->netdevice_nb); 5018 if (err) { 5019 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 5020 goto err_netdev_notifier; 5021 } 5022 5023 err = mlxsw_sp_dpipe_init(mlxsw_sp); 5024 if (err) { 5025 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 5026 goto err_dpipe_init; 5027 } 5028 5029 err = mlxsw_sp_port_module_info_init(mlxsw_sp); 5030 if (err) { 5031 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n"); 5032 goto err_port_module_info_init; 5033 } 5034 5035 err = mlxsw_sp_ports_create(mlxsw_sp); 5036 if (err) { 5037 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 5038 goto err_ports_create; 5039 } 5040 5041 return 0; 5042 5043 err_ports_create: 5044 mlxsw_sp_port_module_info_fini(mlxsw_sp); 5045 err_port_module_info_init: 5046 mlxsw_sp_dpipe_fini(mlxsw_sp); 5047 err_dpipe_init: 5048 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 5049 &mlxsw_sp->netdevice_nb); 5050 err_netdev_notifier: 5051 if (mlxsw_sp->clock) 5052 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 5053 err_ptp_init: 5054 if (mlxsw_sp->clock) 5055 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 5056 err_ptp_clock_init: 5057 mlxsw_sp_router_fini(mlxsw_sp); 5058 err_router_init: 5059 mlxsw_sp_acl_fini(mlxsw_sp); 5060 err_acl_init: 5061 mlxsw_sp_nve_fini(mlxsw_sp); 5062 err_nve_init: 5063 mlxsw_sp_afa_fini(mlxsw_sp); 5064 err_afa_init: 5065 mlxsw_sp_counter_pool_fini(mlxsw_sp); 5066 err_counter_pool_init: 5067 mlxsw_sp_switchdev_fini(mlxsw_sp); 5068 err_switchdev_init: 5069 mlxsw_sp_span_fini(mlxsw_sp); 5070 err_span_init: 5071 mlxsw_sp_lag_fini(mlxsw_sp); 5072 err_lag_init: 5073 mlxsw_sp_buffers_fini(mlxsw_sp); 5074 err_buffers_init: 5075 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 5076 err_devlink_traps_init: 5077 mlxsw_sp_traps_fini(mlxsw_sp); 5078 err_traps_init: 5079 mlxsw_sp_fids_fini(mlxsw_sp); 5080 err_fids_init: 5081 mlxsw_sp_kvdl_fini(mlxsw_sp); 5082 return err; 5083 } 5084 5085 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, 5086 const struct mlxsw_bus_info *mlxsw_bus_info, 5087 struct netlink_ext_ack *extack) 5088 { 5089 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5090 5091 mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev; 5092 mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME; 5093 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; 5094 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; 5095 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; 5096 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; 5097 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; 5098 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; 5099 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; 5100 mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr; 5101 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals; 5102 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops; 5103 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops; 5104 mlxsw_sp->listeners = mlxsw_sp1_listener; 5105 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener); 5106 5107 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 5108 } 5109 5110 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, 5111 const struct mlxsw_bus_info *mlxsw_bus_info, 5112 struct netlink_ext_ack *extack) 5113 { 5114 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5115 5116 mlxsw_sp->req_rev = &mlxsw_sp2_fw_rev; 5117 mlxsw_sp->fw_filename = MLXSW_SP2_FW_FILENAME; 5118 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 5119 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 5120 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 5121 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 5122 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 5123 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 5124 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 5125 mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; 5126 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 5127 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 5128 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 5129 5130 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 5131 } 5132 5133 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 5134 { 5135 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5136 5137 mlxsw_sp_ports_remove(mlxsw_sp); 5138 mlxsw_sp_port_module_info_fini(mlxsw_sp); 5139 mlxsw_sp_dpipe_fini(mlxsw_sp); 5140 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 5141 &mlxsw_sp->netdevice_nb); 5142 if (mlxsw_sp->clock) { 5143 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 5144 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 5145 } 5146 mlxsw_sp_router_fini(mlxsw_sp); 5147 mlxsw_sp_acl_fini(mlxsw_sp); 5148 mlxsw_sp_nve_fini(mlxsw_sp); 5149 mlxsw_sp_afa_fini(mlxsw_sp); 5150 mlxsw_sp_counter_pool_fini(mlxsw_sp); 5151 mlxsw_sp_switchdev_fini(mlxsw_sp); 5152 mlxsw_sp_span_fini(mlxsw_sp); 5153 mlxsw_sp_lag_fini(mlxsw_sp); 5154 mlxsw_sp_buffers_fini(mlxsw_sp); 5155 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 5156 mlxsw_sp_traps_fini(mlxsw_sp); 5157 mlxsw_sp_fids_fini(mlxsw_sp); 5158 mlxsw_sp_kvdl_fini(mlxsw_sp); 5159 } 5160 5161 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated 5162 * 802.1Q FIDs 5163 */ 5164 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \ 5165 VLAN_VID_MASK - 1) 5166 5167 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { 5168 .used_max_mid = 1, 5169 .max_mid = MLXSW_SP_MID_MAX, 5170 .used_flood_tables = 1, 5171 .used_flood_mode = 1, 5172 .flood_mode = 3, 5173 .max_fid_flood_tables = 3, 5174 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 5175 .used_max_ib_mc = 1, 5176 .max_ib_mc = 0, 5177 .used_max_pkey = 1, 5178 .max_pkey = 0, 5179 .used_kvd_sizes = 1, 5180 .kvd_hash_single_parts = 59, 5181 .kvd_hash_double_parts = 41, 5182 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 5183 .swid_config = { 5184 { 5185 .used_type = 1, 5186 .type = MLXSW_PORT_SWID_TYPE_ETH, 5187 } 5188 }, 5189 }; 5190 5191 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { 5192 .used_max_mid = 1, 5193 .max_mid = MLXSW_SP_MID_MAX, 5194 .used_flood_tables = 1, 5195 .used_flood_mode = 1, 5196 .flood_mode = 3, 5197 .max_fid_flood_tables = 3, 5198 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 5199 .used_max_ib_mc = 1, 5200 .max_ib_mc = 0, 5201 .used_max_pkey = 1, 5202 .max_pkey = 0, 5203 .swid_config = { 5204 { 5205 .used_type = 1, 5206 .type = MLXSW_PORT_SWID_TYPE_ETH, 5207 } 5208 }, 5209 }; 5210 5211 static void 5212 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, 5213 struct devlink_resource_size_params *kvd_size_params, 5214 struct devlink_resource_size_params *linear_size_params, 5215 struct devlink_resource_size_params *hash_double_size_params, 5216 struct devlink_resource_size_params *hash_single_size_params) 5217 { 5218 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 5219 KVD_SINGLE_MIN_SIZE); 5220 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 5221 KVD_DOUBLE_MIN_SIZE); 5222 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 5223 u32 linear_size_min = 0; 5224 5225 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, 5226 MLXSW_SP_KVD_GRANULARITY, 5227 DEVLINK_RESOURCE_UNIT_ENTRY); 5228 devlink_resource_size_params_init(linear_size_params, linear_size_min, 5229 kvd_size - single_size_min - 5230 double_size_min, 5231 MLXSW_SP_KVD_GRANULARITY, 5232 DEVLINK_RESOURCE_UNIT_ENTRY); 5233 devlink_resource_size_params_init(hash_double_size_params, 5234 double_size_min, 5235 kvd_size - single_size_min - 5236 linear_size_min, 5237 MLXSW_SP_KVD_GRANULARITY, 5238 DEVLINK_RESOURCE_UNIT_ENTRY); 5239 devlink_resource_size_params_init(hash_single_size_params, 5240 single_size_min, 5241 kvd_size - double_size_min - 5242 linear_size_min, 5243 MLXSW_SP_KVD_GRANULARITY, 5244 DEVLINK_RESOURCE_UNIT_ENTRY); 5245 } 5246 5247 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) 5248 { 5249 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5250 struct devlink_resource_size_params hash_single_size_params; 5251 struct devlink_resource_size_params hash_double_size_params; 5252 struct devlink_resource_size_params linear_size_params; 5253 struct devlink_resource_size_params kvd_size_params; 5254 u32 kvd_size, single_size, double_size, linear_size; 5255 const struct mlxsw_config_profile *profile; 5256 int err; 5257 5258 profile = &mlxsw_sp1_config_profile; 5259 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 5260 return -EIO; 5261 5262 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, 5263 &linear_size_params, 5264 &hash_double_size_params, 5265 &hash_single_size_params); 5266 5267 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 5268 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 5269 kvd_size, MLXSW_SP_RESOURCE_KVD, 5270 DEVLINK_RESOURCE_ID_PARENT_TOP, 5271 &kvd_size_params); 5272 if (err) 5273 return err; 5274 5275 linear_size = profile->kvd_linear_size; 5276 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, 5277 linear_size, 5278 MLXSW_SP_RESOURCE_KVD_LINEAR, 5279 MLXSW_SP_RESOURCE_KVD, 5280 &linear_size_params); 5281 if (err) 5282 return err; 5283 5284 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); 5285 if (err) 5286 return err; 5287 5288 double_size = kvd_size - linear_size; 5289 double_size *= profile->kvd_hash_double_parts; 5290 double_size /= profile->kvd_hash_double_parts + 5291 profile->kvd_hash_single_parts; 5292 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY); 5293 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, 5294 double_size, 5295 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 5296 MLXSW_SP_RESOURCE_KVD, 5297 &hash_double_size_params); 5298 if (err) 5299 return err; 5300 5301 single_size = kvd_size - double_size - linear_size; 5302 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, 5303 single_size, 5304 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 5305 MLXSW_SP_RESOURCE_KVD, 5306 &hash_single_size_params); 5307 if (err) 5308 return err; 5309 5310 return 0; 5311 } 5312 5313 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core) 5314 { 5315 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5316 struct devlink_resource_size_params kvd_size_params; 5317 u32 kvd_size; 5318 5319 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 5320 return -EIO; 5321 5322 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 5323 devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size, 5324 MLXSW_SP_KVD_GRANULARITY, 5325 DEVLINK_RESOURCE_UNIT_ENTRY); 5326 5327 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 5328 kvd_size, MLXSW_SP_RESOURCE_KVD, 5329 DEVLINK_RESOURCE_ID_PARENT_TOP, 5330 &kvd_size_params); 5331 } 5332 5333 static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core) 5334 { 5335 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5336 struct devlink_resource_size_params span_size_params; 5337 u32 max_span; 5338 5339 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN)) 5340 return -EIO; 5341 5342 max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN); 5343 devlink_resource_size_params_init(&span_size_params, max_span, max_span, 5344 1, DEVLINK_RESOURCE_UNIT_ENTRY); 5345 5346 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN, 5347 max_span, MLXSW_SP_RESOURCE_SPAN, 5348 DEVLINK_RESOURCE_ID_PARENT_TOP, 5349 &span_size_params); 5350 } 5351 5352 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) 5353 { 5354 int err; 5355 5356 err = mlxsw_sp1_resources_kvd_register(mlxsw_core); 5357 if (err) 5358 return err; 5359 5360 err = mlxsw_sp_resources_span_register(mlxsw_core); 5361 if (err) 5362 goto err_resources_span_register; 5363 5364 return 0; 5365 5366 err_resources_span_register: 5367 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL); 5368 return err; 5369 } 5370 5371 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) 5372 { 5373 int err; 5374 5375 err = mlxsw_sp2_resources_kvd_register(mlxsw_core); 5376 if (err) 5377 return err; 5378 5379 err = mlxsw_sp_resources_span_register(mlxsw_core); 5380 if (err) 5381 goto err_resources_span_register; 5382 5383 return 0; 5384 5385 err_resources_span_register: 5386 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL); 5387 return err; 5388 } 5389 5390 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 5391 const struct mlxsw_config_profile *profile, 5392 u64 *p_single_size, u64 *p_double_size, 5393 u64 *p_linear_size) 5394 { 5395 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5396 u32 double_size; 5397 int err; 5398 5399 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 5400 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) 5401 return -EIO; 5402 5403 /* The hash part is what left of the kvd without the 5404 * linear part. It is split to the single size and 5405 * double size by the parts ratio from the profile. 5406 * Both sizes must be a multiplications of the 5407 * granularity from the profile. In case the user 5408 * provided the sizes they are obtained via devlink. 5409 */ 5410 err = devlink_resource_size_get(devlink, 5411 MLXSW_SP_RESOURCE_KVD_LINEAR, 5412 p_linear_size); 5413 if (err) 5414 *p_linear_size = profile->kvd_linear_size; 5415 5416 err = devlink_resource_size_get(devlink, 5417 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 5418 p_double_size); 5419 if (err) { 5420 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 5421 *p_linear_size; 5422 double_size *= profile->kvd_hash_double_parts; 5423 double_size /= profile->kvd_hash_double_parts + 5424 profile->kvd_hash_single_parts; 5425 *p_double_size = rounddown(double_size, 5426 MLXSW_SP_KVD_GRANULARITY); 5427 } 5428 5429 err = devlink_resource_size_get(devlink, 5430 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 5431 p_single_size); 5432 if (err) 5433 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 5434 *p_double_size - *p_linear_size; 5435 5436 /* Check results are legal. */ 5437 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 5438 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || 5439 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size) 5440 return -EIO; 5441 5442 return 0; 5443 } 5444 5445 static int 5446 mlxsw_sp_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id, 5447 union devlink_param_value val, 5448 struct netlink_ext_ack *extack) 5449 { 5450 if ((val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER) && 5451 (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)) { 5452 NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'"); 5453 return -EINVAL; 5454 } 5455 5456 return 0; 5457 } 5458 5459 static const struct devlink_param mlxsw_sp_devlink_params[] = { 5460 DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY, 5461 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 5462 NULL, NULL, 5463 mlxsw_sp_devlink_param_fw_load_policy_validate), 5464 }; 5465 5466 static int mlxsw_sp_params_register(struct mlxsw_core *mlxsw_core) 5467 { 5468 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5469 union devlink_param_value value; 5470 int err; 5471 5472 err = devlink_params_register(devlink, mlxsw_sp_devlink_params, 5473 ARRAY_SIZE(mlxsw_sp_devlink_params)); 5474 if (err) 5475 return err; 5476 5477 value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER; 5478 devlink_param_driverinit_value_set(devlink, 5479 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 5480 value); 5481 return 0; 5482 } 5483 5484 static void mlxsw_sp_params_unregister(struct mlxsw_core *mlxsw_core) 5485 { 5486 devlink_params_unregister(priv_to_devlink(mlxsw_core), 5487 mlxsw_sp_devlink_params, 5488 ARRAY_SIZE(mlxsw_sp_devlink_params)); 5489 } 5490 5491 static int 5492 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id, 5493 struct devlink_param_gset_ctx *ctx) 5494 { 5495 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 5496 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5497 5498 ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp); 5499 return 0; 5500 } 5501 5502 static int 5503 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id, 5504 struct devlink_param_gset_ctx *ctx) 5505 { 5506 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 5507 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5508 5509 return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32); 5510 } 5511 5512 static const struct devlink_param mlxsw_sp2_devlink_params[] = { 5513 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 5514 "acl_region_rehash_interval", 5515 DEVLINK_PARAM_TYPE_U32, 5516 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 5517 mlxsw_sp_params_acl_region_rehash_intrvl_get, 5518 mlxsw_sp_params_acl_region_rehash_intrvl_set, 5519 NULL), 5520 }; 5521 5522 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core) 5523 { 5524 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5525 union devlink_param_value value; 5526 int err; 5527 5528 err = mlxsw_sp_params_register(mlxsw_core); 5529 if (err) 5530 return err; 5531 5532 err = devlink_params_register(devlink, mlxsw_sp2_devlink_params, 5533 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 5534 if (err) 5535 goto err_devlink_params_register; 5536 5537 value.vu32 = 0; 5538 devlink_param_driverinit_value_set(devlink, 5539 MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 5540 value); 5541 return 0; 5542 5543 err_devlink_params_register: 5544 mlxsw_sp_params_unregister(mlxsw_core); 5545 return err; 5546 } 5547 5548 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core) 5549 { 5550 devlink_params_unregister(priv_to_devlink(mlxsw_core), 5551 mlxsw_sp2_devlink_params, 5552 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 5553 mlxsw_sp_params_unregister(mlxsw_core); 5554 } 5555 5556 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core, 5557 struct sk_buff *skb, u8 local_port) 5558 { 5559 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5560 5561 skb_pull(skb, MLXSW_TXHDR_LEN); 5562 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port); 5563 } 5564 5565 static struct mlxsw_driver mlxsw_sp1_driver = { 5566 .kind = mlxsw_sp1_driver_name, 5567 .priv_size = sizeof(struct mlxsw_sp), 5568 .init = mlxsw_sp1_init, 5569 .fini = mlxsw_sp_fini, 5570 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5571 .port_split = mlxsw_sp_port_split, 5572 .port_unsplit = mlxsw_sp_port_unsplit, 5573 .sb_pool_get = mlxsw_sp_sb_pool_get, 5574 .sb_pool_set = mlxsw_sp_sb_pool_set, 5575 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5576 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5577 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5578 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5579 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5580 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5581 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5582 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5583 .flash_update = mlxsw_sp_flash_update, 5584 .trap_init = mlxsw_sp_trap_init, 5585 .trap_fini = mlxsw_sp_trap_fini, 5586 .trap_action_set = mlxsw_sp_trap_action_set, 5587 .trap_group_init = mlxsw_sp_trap_group_init, 5588 .txhdr_construct = mlxsw_sp_txhdr_construct, 5589 .resources_register = mlxsw_sp1_resources_register, 5590 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, 5591 .params_register = mlxsw_sp_params_register, 5592 .params_unregister = mlxsw_sp_params_unregister, 5593 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5594 .txhdr_len = MLXSW_TXHDR_LEN, 5595 .profile = &mlxsw_sp1_config_profile, 5596 .res_query_enabled = true, 5597 }; 5598 5599 static struct mlxsw_driver mlxsw_sp2_driver = { 5600 .kind = mlxsw_sp2_driver_name, 5601 .priv_size = sizeof(struct mlxsw_sp), 5602 .init = mlxsw_sp2_init, 5603 .fini = mlxsw_sp_fini, 5604 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5605 .port_split = mlxsw_sp_port_split, 5606 .port_unsplit = mlxsw_sp_port_unsplit, 5607 .sb_pool_get = mlxsw_sp_sb_pool_get, 5608 .sb_pool_set = mlxsw_sp_sb_pool_set, 5609 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5610 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5611 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5612 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5613 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5614 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5615 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5616 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5617 .flash_update = mlxsw_sp_flash_update, 5618 .trap_init = mlxsw_sp_trap_init, 5619 .trap_fini = mlxsw_sp_trap_fini, 5620 .trap_action_set = mlxsw_sp_trap_action_set, 5621 .trap_group_init = mlxsw_sp_trap_group_init, 5622 .txhdr_construct = mlxsw_sp_txhdr_construct, 5623 .resources_register = mlxsw_sp2_resources_register, 5624 .params_register = mlxsw_sp2_params_register, 5625 .params_unregister = mlxsw_sp2_params_unregister, 5626 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5627 .txhdr_len = MLXSW_TXHDR_LEN, 5628 .profile = &mlxsw_sp2_config_profile, 5629 .res_query_enabled = true, 5630 }; 5631 5632 static struct mlxsw_driver mlxsw_sp3_driver = { 5633 .kind = mlxsw_sp3_driver_name, 5634 .priv_size = sizeof(struct mlxsw_sp), 5635 .init = mlxsw_sp2_init, 5636 .fini = mlxsw_sp_fini, 5637 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5638 .port_split = mlxsw_sp_port_split, 5639 .port_unsplit = mlxsw_sp_port_unsplit, 5640 .sb_pool_get = mlxsw_sp_sb_pool_get, 5641 .sb_pool_set = mlxsw_sp_sb_pool_set, 5642 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5643 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5644 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5645 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5646 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5647 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5648 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5649 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5650 .flash_update = mlxsw_sp_flash_update, 5651 .trap_init = mlxsw_sp_trap_init, 5652 .trap_fini = mlxsw_sp_trap_fini, 5653 .trap_action_set = mlxsw_sp_trap_action_set, 5654 .trap_group_init = mlxsw_sp_trap_group_init, 5655 .txhdr_construct = mlxsw_sp_txhdr_construct, 5656 .resources_register = mlxsw_sp2_resources_register, 5657 .params_register = mlxsw_sp2_params_register, 5658 .params_unregister = mlxsw_sp2_params_unregister, 5659 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5660 .txhdr_len = MLXSW_TXHDR_LEN, 5661 .profile = &mlxsw_sp2_config_profile, 5662 .res_query_enabled = true, 5663 }; 5664 5665 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 5666 { 5667 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 5668 } 5669 5670 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data) 5671 { 5672 struct mlxsw_sp_port **p_mlxsw_sp_port = data; 5673 int ret = 0; 5674 5675 if (mlxsw_sp_port_dev_check(lower_dev)) { 5676 *p_mlxsw_sp_port = netdev_priv(lower_dev); 5677 ret = 1; 5678 } 5679 5680 return ret; 5681 } 5682 5683 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 5684 { 5685 struct mlxsw_sp_port *mlxsw_sp_port; 5686 5687 if (mlxsw_sp_port_dev_check(dev)) 5688 return netdev_priv(dev); 5689 5690 mlxsw_sp_port = NULL; 5691 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port); 5692 5693 return mlxsw_sp_port; 5694 } 5695 5696 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 5697 { 5698 struct mlxsw_sp_port *mlxsw_sp_port; 5699 5700 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 5701 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 5702 } 5703 5704 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 5705 { 5706 struct mlxsw_sp_port *mlxsw_sp_port; 5707 5708 if (mlxsw_sp_port_dev_check(dev)) 5709 return netdev_priv(dev); 5710 5711 mlxsw_sp_port = NULL; 5712 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 5713 &mlxsw_sp_port); 5714 5715 return mlxsw_sp_port; 5716 } 5717 5718 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 5719 { 5720 struct mlxsw_sp_port *mlxsw_sp_port; 5721 5722 rcu_read_lock(); 5723 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 5724 if (mlxsw_sp_port) 5725 dev_hold(mlxsw_sp_port->dev); 5726 rcu_read_unlock(); 5727 return mlxsw_sp_port; 5728 } 5729 5730 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 5731 { 5732 dev_put(mlxsw_sp_port->dev); 5733 } 5734 5735 static void 5736 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port, 5737 struct net_device *lag_dev) 5738 { 5739 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev); 5740 struct net_device *upper_dev; 5741 struct list_head *iter; 5742 5743 if (netif_is_bridge_port(lag_dev)) 5744 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev); 5745 5746 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 5747 if (!netif_is_bridge_port(upper_dev)) 5748 continue; 5749 br_dev = netdev_master_upper_dev_get(upper_dev); 5750 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev); 5751 } 5752 } 5753 5754 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 5755 { 5756 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5757 5758 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 5759 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5760 } 5761 5762 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 5763 { 5764 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5765 5766 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 5767 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5768 } 5769 5770 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 5771 u16 lag_id, u8 port_index) 5772 { 5773 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5774 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5775 5776 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 5777 lag_id, port_index); 5778 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5779 } 5780 5781 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 5782 u16 lag_id) 5783 { 5784 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5785 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5786 5787 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 5788 lag_id); 5789 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5790 } 5791 5792 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 5793 u16 lag_id) 5794 { 5795 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5796 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5797 5798 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 5799 lag_id); 5800 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5801 } 5802 5803 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 5804 u16 lag_id) 5805 { 5806 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5807 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5808 5809 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 5810 lag_id); 5811 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5812 } 5813 5814 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 5815 struct net_device *lag_dev, 5816 u16 *p_lag_id) 5817 { 5818 struct mlxsw_sp_upper *lag; 5819 int free_lag_id = -1; 5820 u64 max_lag; 5821 int i; 5822 5823 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 5824 for (i = 0; i < max_lag; i++) { 5825 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 5826 if (lag->ref_count) { 5827 if (lag->dev == lag_dev) { 5828 *p_lag_id = i; 5829 return 0; 5830 } 5831 } else if (free_lag_id < 0) { 5832 free_lag_id = i; 5833 } 5834 } 5835 if (free_lag_id < 0) 5836 return -EBUSY; 5837 *p_lag_id = free_lag_id; 5838 return 0; 5839 } 5840 5841 static bool 5842 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 5843 struct net_device *lag_dev, 5844 struct netdev_lag_upper_info *lag_upper_info, 5845 struct netlink_ext_ack *extack) 5846 { 5847 u16 lag_id; 5848 5849 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 5850 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices"); 5851 return false; 5852 } 5853 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 5854 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 5855 return false; 5856 } 5857 return true; 5858 } 5859 5860 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 5861 u16 lag_id, u8 *p_port_index) 5862 { 5863 u64 max_lag_members; 5864 int i; 5865 5866 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 5867 MAX_LAG_MEMBERS); 5868 for (i = 0; i < max_lag_members; i++) { 5869 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 5870 *p_port_index = i; 5871 return 0; 5872 } 5873 } 5874 return -EBUSY; 5875 } 5876 5877 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 5878 struct net_device *lag_dev) 5879 { 5880 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5881 struct mlxsw_sp_upper *lag; 5882 u16 lag_id; 5883 u8 port_index; 5884 int err; 5885 5886 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 5887 if (err) 5888 return err; 5889 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 5890 if (!lag->ref_count) { 5891 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 5892 if (err) 5893 return err; 5894 lag->dev = lag_dev; 5895 } 5896 5897 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 5898 if (err) 5899 return err; 5900 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 5901 if (err) 5902 goto err_col_port_add; 5903 5904 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 5905 mlxsw_sp_port->local_port); 5906 mlxsw_sp_port->lag_id = lag_id; 5907 mlxsw_sp_port->lagged = 1; 5908 lag->ref_count++; 5909 5910 /* Port is no longer usable as a router interface */ 5911 if (mlxsw_sp_port->default_vlan->fid) 5912 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); 5913 5914 return 0; 5915 5916 err_col_port_add: 5917 if (!lag->ref_count) 5918 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 5919 return err; 5920 } 5921 5922 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 5923 struct net_device *lag_dev) 5924 { 5925 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5926 u16 lag_id = mlxsw_sp_port->lag_id; 5927 struct mlxsw_sp_upper *lag; 5928 5929 if (!mlxsw_sp_port->lagged) 5930 return; 5931 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 5932 WARN_ON(lag->ref_count == 0); 5933 5934 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 5935 5936 /* Any VLANs configured on the port are no longer valid */ 5937 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false); 5938 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan); 5939 /* Make the LAG and its directly linked uppers leave bridges they 5940 * are memeber in 5941 */ 5942 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev); 5943 5944 if (lag->ref_count == 1) 5945 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 5946 5947 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 5948 mlxsw_sp_port->local_port); 5949 mlxsw_sp_port->lagged = 0; 5950 lag->ref_count--; 5951 5952 /* Make sure untagged frames are allowed to ingress */ 5953 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 5954 } 5955 5956 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 5957 u16 lag_id) 5958 { 5959 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5960 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5961 5962 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 5963 mlxsw_sp_port->local_port); 5964 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5965 } 5966 5967 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 5968 u16 lag_id) 5969 { 5970 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5971 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5972 5973 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 5974 mlxsw_sp_port->local_port); 5975 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5976 } 5977 5978 static int 5979 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port) 5980 { 5981 int err; 5982 5983 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, 5984 mlxsw_sp_port->lag_id); 5985 if (err) 5986 return err; 5987 5988 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 5989 if (err) 5990 goto err_dist_port_add; 5991 5992 return 0; 5993 5994 err_dist_port_add: 5995 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id); 5996 return err; 5997 } 5998 5999 static int 6000 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port) 6001 { 6002 int err; 6003 6004 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 6005 mlxsw_sp_port->lag_id); 6006 if (err) 6007 return err; 6008 6009 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, 6010 mlxsw_sp_port->lag_id); 6011 if (err) 6012 goto err_col_port_disable; 6013 6014 return 0; 6015 6016 err_col_port_disable: 6017 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 6018 return err; 6019 } 6020 6021 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 6022 struct netdev_lag_lower_state_info *info) 6023 { 6024 if (info->tx_enabled) 6025 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port); 6026 else 6027 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 6028 } 6029 6030 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 6031 bool enable) 6032 { 6033 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 6034 enum mlxsw_reg_spms_state spms_state; 6035 char *spms_pl; 6036 u16 vid; 6037 int err; 6038 6039 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 6040 MLXSW_REG_SPMS_STATE_DISCARDING; 6041 6042 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 6043 if (!spms_pl) 6044 return -ENOMEM; 6045 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 6046 6047 for (vid = 0; vid < VLAN_N_VID; vid++) 6048 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 6049 6050 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 6051 kfree(spms_pl); 6052 return err; 6053 } 6054 6055 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 6056 { 6057 u16 vid = 1; 6058 int err; 6059 6060 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 6061 if (err) 6062 return err; 6063 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 6064 if (err) 6065 goto err_port_stp_set; 6066 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 6067 true, false); 6068 if (err) 6069 goto err_port_vlan_set; 6070 6071 for (; vid <= VLAN_N_VID - 1; vid++) { 6072 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 6073 vid, false); 6074 if (err) 6075 goto err_vid_learning_set; 6076 } 6077 6078 return 0; 6079 6080 err_vid_learning_set: 6081 for (vid--; vid >= 1; vid--) 6082 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 6083 err_port_vlan_set: 6084 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 6085 err_port_stp_set: 6086 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 6087 return err; 6088 } 6089 6090 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 6091 { 6092 u16 vid; 6093 6094 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 6095 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 6096 vid, true); 6097 6098 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 6099 false, false); 6100 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 6101 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 6102 } 6103 6104 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev) 6105 { 6106 unsigned int num_vxlans = 0; 6107 struct net_device *dev; 6108 struct list_head *iter; 6109 6110 netdev_for_each_lower_dev(br_dev, dev, iter) { 6111 if (netif_is_vxlan(dev)) 6112 num_vxlans++; 6113 } 6114 6115 return num_vxlans > 1; 6116 } 6117 6118 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev) 6119 { 6120 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0}; 6121 struct net_device *dev; 6122 struct list_head *iter; 6123 6124 netdev_for_each_lower_dev(br_dev, dev, iter) { 6125 u16 pvid; 6126 int err; 6127 6128 if (!netif_is_vxlan(dev)) 6129 continue; 6130 6131 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid); 6132 if (err || !pvid) 6133 continue; 6134 6135 if (test_and_set_bit(pvid, vlans)) 6136 return false; 6137 } 6138 6139 return true; 6140 } 6141 6142 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev, 6143 struct netlink_ext_ack *extack) 6144 { 6145 if (br_multicast_enabled(br_dev)) { 6146 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device"); 6147 return false; 6148 } 6149 6150 if (!br_vlan_enabled(br_dev) && 6151 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) { 6152 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge"); 6153 return false; 6154 } 6155 6156 if (br_vlan_enabled(br_dev) && 6157 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) { 6158 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged"); 6159 return false; 6160 } 6161 6162 return true; 6163 } 6164 6165 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 6166 struct net_device *dev, 6167 unsigned long event, void *ptr) 6168 { 6169 struct netdev_notifier_changeupper_info *info; 6170 struct mlxsw_sp_port *mlxsw_sp_port; 6171 struct netlink_ext_ack *extack; 6172 struct net_device *upper_dev; 6173 struct mlxsw_sp *mlxsw_sp; 6174 int err = 0; 6175 6176 mlxsw_sp_port = netdev_priv(dev); 6177 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 6178 info = ptr; 6179 extack = netdev_notifier_info_to_extack(&info->info); 6180 6181 switch (event) { 6182 case NETDEV_PRECHANGEUPPER: 6183 upper_dev = info->upper_dev; 6184 if (!is_vlan_dev(upper_dev) && 6185 !netif_is_lag_master(upper_dev) && 6186 !netif_is_bridge_master(upper_dev) && 6187 !netif_is_ovs_master(upper_dev) && 6188 !netif_is_macvlan(upper_dev)) { 6189 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6190 return -EINVAL; 6191 } 6192 if (!info->linking) 6193 break; 6194 if (netif_is_bridge_master(upper_dev) && 6195 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 6196 mlxsw_sp_bridge_has_vxlan(upper_dev) && 6197 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 6198 return -EOPNOTSUPP; 6199 if (netdev_has_any_upper_dev(upper_dev) && 6200 (!netif_is_bridge_master(upper_dev) || 6201 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 6202 upper_dev))) { 6203 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 6204 return -EINVAL; 6205 } 6206 if (netif_is_lag_master(upper_dev) && 6207 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 6208 info->upper_info, extack)) 6209 return -EINVAL; 6210 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 6211 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); 6212 return -EINVAL; 6213 } 6214 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 6215 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 6216 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); 6217 return -EINVAL; 6218 } 6219 if (netif_is_macvlan(upper_dev) && 6220 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, lower_dev)) { 6221 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6222 return -EOPNOTSUPP; 6223 } 6224 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 6225 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); 6226 return -EINVAL; 6227 } 6228 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 6229 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 6230 return -EINVAL; 6231 } 6232 break; 6233 case NETDEV_CHANGEUPPER: 6234 upper_dev = info->upper_dev; 6235 if (netif_is_bridge_master(upper_dev)) { 6236 if (info->linking) 6237 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 6238 lower_dev, 6239 upper_dev, 6240 extack); 6241 else 6242 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 6243 lower_dev, 6244 upper_dev); 6245 } else if (netif_is_lag_master(upper_dev)) { 6246 if (info->linking) { 6247 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 6248 upper_dev); 6249 } else { 6250 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 6251 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 6252 upper_dev); 6253 } 6254 } else if (netif_is_ovs_master(upper_dev)) { 6255 if (info->linking) 6256 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 6257 else 6258 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 6259 } else if (netif_is_macvlan(upper_dev)) { 6260 if (!info->linking) 6261 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6262 } else if (is_vlan_dev(upper_dev)) { 6263 struct net_device *br_dev; 6264 6265 if (!netif_is_bridge_port(upper_dev)) 6266 break; 6267 if (info->linking) 6268 break; 6269 br_dev = netdev_master_upper_dev_get(upper_dev); 6270 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, 6271 br_dev); 6272 } 6273 break; 6274 } 6275 6276 return err; 6277 } 6278 6279 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 6280 unsigned long event, void *ptr) 6281 { 6282 struct netdev_notifier_changelowerstate_info *info; 6283 struct mlxsw_sp_port *mlxsw_sp_port; 6284 int err; 6285 6286 mlxsw_sp_port = netdev_priv(dev); 6287 info = ptr; 6288 6289 switch (event) { 6290 case NETDEV_CHANGELOWERSTATE: 6291 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 6292 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 6293 info->lower_state_info); 6294 if (err) 6295 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 6296 } 6297 break; 6298 } 6299 6300 return 0; 6301 } 6302 6303 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 6304 struct net_device *port_dev, 6305 unsigned long event, void *ptr) 6306 { 6307 switch (event) { 6308 case NETDEV_PRECHANGEUPPER: 6309 case NETDEV_CHANGEUPPER: 6310 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 6311 event, ptr); 6312 case NETDEV_CHANGELOWERSTATE: 6313 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 6314 ptr); 6315 } 6316 6317 return 0; 6318 } 6319 6320 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 6321 unsigned long event, void *ptr) 6322 { 6323 struct net_device *dev; 6324 struct list_head *iter; 6325 int ret; 6326 6327 netdev_for_each_lower_dev(lag_dev, dev, iter) { 6328 if (mlxsw_sp_port_dev_check(dev)) { 6329 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 6330 ptr); 6331 if (ret) 6332 return ret; 6333 } 6334 } 6335 6336 return 0; 6337 } 6338 6339 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 6340 struct net_device *dev, 6341 unsigned long event, void *ptr, 6342 u16 vid) 6343 { 6344 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 6345 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 6346 struct netdev_notifier_changeupper_info *info = ptr; 6347 struct netlink_ext_ack *extack; 6348 struct net_device *upper_dev; 6349 int err = 0; 6350 6351 extack = netdev_notifier_info_to_extack(&info->info); 6352 6353 switch (event) { 6354 case NETDEV_PRECHANGEUPPER: 6355 upper_dev = info->upper_dev; 6356 if (!netif_is_bridge_master(upper_dev) && 6357 !netif_is_macvlan(upper_dev)) { 6358 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6359 return -EINVAL; 6360 } 6361 if (!info->linking) 6362 break; 6363 if (netif_is_bridge_master(upper_dev) && 6364 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 6365 mlxsw_sp_bridge_has_vxlan(upper_dev) && 6366 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 6367 return -EOPNOTSUPP; 6368 if (netdev_has_any_upper_dev(upper_dev) && 6369 (!netif_is_bridge_master(upper_dev) || 6370 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 6371 upper_dev))) { 6372 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 6373 return -EINVAL; 6374 } 6375 if (netif_is_macvlan(upper_dev) && 6376 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) { 6377 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6378 return -EOPNOTSUPP; 6379 } 6380 break; 6381 case NETDEV_CHANGEUPPER: 6382 upper_dev = info->upper_dev; 6383 if (netif_is_bridge_master(upper_dev)) { 6384 if (info->linking) 6385 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 6386 vlan_dev, 6387 upper_dev, 6388 extack); 6389 else 6390 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 6391 vlan_dev, 6392 upper_dev); 6393 } else if (netif_is_macvlan(upper_dev)) { 6394 if (!info->linking) 6395 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6396 } else { 6397 err = -EINVAL; 6398 WARN_ON(1); 6399 } 6400 break; 6401 } 6402 6403 return err; 6404 } 6405 6406 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 6407 struct net_device *lag_dev, 6408 unsigned long event, 6409 void *ptr, u16 vid) 6410 { 6411 struct net_device *dev; 6412 struct list_head *iter; 6413 int ret; 6414 6415 netdev_for_each_lower_dev(lag_dev, dev, iter) { 6416 if (mlxsw_sp_port_dev_check(dev)) { 6417 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 6418 event, ptr, 6419 vid); 6420 if (ret) 6421 return ret; 6422 } 6423 } 6424 6425 return 0; 6426 } 6427 6428 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev, 6429 struct net_device *br_dev, 6430 unsigned long event, void *ptr, 6431 u16 vid) 6432 { 6433 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); 6434 struct netdev_notifier_changeupper_info *info = ptr; 6435 struct netlink_ext_ack *extack; 6436 struct net_device *upper_dev; 6437 6438 if (!mlxsw_sp) 6439 return 0; 6440 6441 extack = netdev_notifier_info_to_extack(&info->info); 6442 6443 switch (event) { 6444 case NETDEV_PRECHANGEUPPER: 6445 upper_dev = info->upper_dev; 6446 if (!netif_is_macvlan(upper_dev)) { 6447 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6448 return -EOPNOTSUPP; 6449 } 6450 if (!info->linking) 6451 break; 6452 if (netif_is_macvlan(upper_dev) && 6453 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) { 6454 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6455 return -EOPNOTSUPP; 6456 } 6457 break; 6458 case NETDEV_CHANGEUPPER: 6459 upper_dev = info->upper_dev; 6460 if (info->linking) 6461 break; 6462 if (netif_is_macvlan(upper_dev)) 6463 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6464 break; 6465 } 6466 6467 return 0; 6468 } 6469 6470 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 6471 unsigned long event, void *ptr) 6472 { 6473 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 6474 u16 vid = vlan_dev_vlan_id(vlan_dev); 6475 6476 if (mlxsw_sp_port_dev_check(real_dev)) 6477 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 6478 event, ptr, vid); 6479 else if (netif_is_lag_master(real_dev)) 6480 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 6481 real_dev, event, 6482 ptr, vid); 6483 else if (netif_is_bridge_master(real_dev)) 6484 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev, 6485 event, ptr, vid); 6486 6487 return 0; 6488 } 6489 6490 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, 6491 unsigned long event, void *ptr) 6492 { 6493 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev); 6494 struct netdev_notifier_changeupper_info *info = ptr; 6495 struct netlink_ext_ack *extack; 6496 struct net_device *upper_dev; 6497 6498 if (!mlxsw_sp) 6499 return 0; 6500 6501 extack = netdev_notifier_info_to_extack(&info->info); 6502 6503 switch (event) { 6504 case NETDEV_PRECHANGEUPPER: 6505 upper_dev = info->upper_dev; 6506 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) { 6507 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6508 return -EOPNOTSUPP; 6509 } 6510 if (!info->linking) 6511 break; 6512 if (netif_is_macvlan(upper_dev) && 6513 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev)) { 6514 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6515 return -EOPNOTSUPP; 6516 } 6517 break; 6518 case NETDEV_CHANGEUPPER: 6519 upper_dev = info->upper_dev; 6520 if (info->linking) 6521 break; 6522 if (is_vlan_dev(upper_dev)) 6523 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev); 6524 if (netif_is_macvlan(upper_dev)) 6525 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6526 break; 6527 } 6528 6529 return 0; 6530 } 6531 6532 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, 6533 unsigned long event, void *ptr) 6534 { 6535 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); 6536 struct netdev_notifier_changeupper_info *info = ptr; 6537 struct netlink_ext_ack *extack; 6538 6539 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) 6540 return 0; 6541 6542 extack = netdev_notifier_info_to_extack(&info->info); 6543 6544 /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */ 6545 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6546 6547 return -EOPNOTSUPP; 6548 } 6549 6550 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) 6551 { 6552 struct netdev_notifier_changeupper_info *info = ptr; 6553 6554 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) 6555 return false; 6556 return netif_is_l3_master(info->upper_dev); 6557 } 6558 6559 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, 6560 struct net_device *dev, 6561 unsigned long event, void *ptr) 6562 { 6563 struct netdev_notifier_changeupper_info *cu_info; 6564 struct netdev_notifier_info *info = ptr; 6565 struct netlink_ext_ack *extack; 6566 struct net_device *upper_dev; 6567 6568 extack = netdev_notifier_info_to_extack(info); 6569 6570 switch (event) { 6571 case NETDEV_CHANGEUPPER: 6572 cu_info = container_of(info, 6573 struct netdev_notifier_changeupper_info, 6574 info); 6575 upper_dev = cu_info->upper_dev; 6576 if (!netif_is_bridge_master(upper_dev)) 6577 return 0; 6578 if (!mlxsw_sp_lower_get(upper_dev)) 6579 return 0; 6580 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 6581 return -EOPNOTSUPP; 6582 if (cu_info->linking) { 6583 if (!netif_running(dev)) 6584 return 0; 6585 /* When the bridge is VLAN-aware, the VNI of the VxLAN 6586 * device needs to be mapped to a VLAN, but at this 6587 * point no VLANs are configured on the VxLAN device 6588 */ 6589 if (br_vlan_enabled(upper_dev)) 6590 return 0; 6591 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, 6592 dev, 0, extack); 6593 } else { 6594 /* VLANs were already flushed, which triggered the 6595 * necessary cleanup 6596 */ 6597 if (br_vlan_enabled(upper_dev)) 6598 return 0; 6599 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 6600 } 6601 break; 6602 case NETDEV_PRE_UP: 6603 upper_dev = netdev_master_upper_dev_get(dev); 6604 if (!upper_dev) 6605 return 0; 6606 if (!netif_is_bridge_master(upper_dev)) 6607 return 0; 6608 if (!mlxsw_sp_lower_get(upper_dev)) 6609 return 0; 6610 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0, 6611 extack); 6612 case NETDEV_DOWN: 6613 upper_dev = netdev_master_upper_dev_get(dev); 6614 if (!upper_dev) 6615 return 0; 6616 if (!netif_is_bridge_master(upper_dev)) 6617 return 0; 6618 if (!mlxsw_sp_lower_get(upper_dev)) 6619 return 0; 6620 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 6621 break; 6622 } 6623 6624 return 0; 6625 } 6626 6627 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 6628 unsigned long event, void *ptr) 6629 { 6630 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 6631 struct mlxsw_sp_span_entry *span_entry; 6632 struct mlxsw_sp *mlxsw_sp; 6633 int err = 0; 6634 6635 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 6636 if (event == NETDEV_UNREGISTER) { 6637 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); 6638 if (span_entry) 6639 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); 6640 } 6641 mlxsw_sp_span_respin(mlxsw_sp); 6642 6643 if (netif_is_vxlan(dev)) 6644 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr); 6645 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) 6646 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, 6647 event, ptr); 6648 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev)) 6649 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev, 6650 event, ptr); 6651 else if (event == NETDEV_PRE_CHANGEADDR || 6652 event == NETDEV_CHANGEADDR || 6653 event == NETDEV_CHANGEMTU) 6654 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr); 6655 else if (mlxsw_sp_is_vrf_event(event, ptr)) 6656 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); 6657 else if (mlxsw_sp_port_dev_check(dev)) 6658 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 6659 else if (netif_is_lag_master(dev)) 6660 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 6661 else if (is_vlan_dev(dev)) 6662 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 6663 else if (netif_is_bridge_master(dev)) 6664 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); 6665 else if (netif_is_macvlan(dev)) 6666 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); 6667 6668 return notifier_from_errno(err); 6669 } 6670 6671 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = { 6672 .notifier_call = mlxsw_sp_inetaddr_valid_event, 6673 }; 6674 6675 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = { 6676 .notifier_call = mlxsw_sp_inet6addr_valid_event, 6677 }; 6678 6679 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { 6680 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 6681 {0, }, 6682 }; 6683 6684 static struct pci_driver mlxsw_sp1_pci_driver = { 6685 .name = mlxsw_sp1_driver_name, 6686 .id_table = mlxsw_sp1_pci_id_table, 6687 }; 6688 6689 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { 6690 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, 6691 {0, }, 6692 }; 6693 6694 static struct pci_driver mlxsw_sp2_pci_driver = { 6695 .name = mlxsw_sp2_driver_name, 6696 .id_table = mlxsw_sp2_pci_id_table, 6697 }; 6698 6699 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = { 6700 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0}, 6701 {0, }, 6702 }; 6703 6704 static struct pci_driver mlxsw_sp3_pci_driver = { 6705 .name = mlxsw_sp3_driver_name, 6706 .id_table = mlxsw_sp3_pci_id_table, 6707 }; 6708 6709 static int __init mlxsw_sp_module_init(void) 6710 { 6711 int err; 6712 6713 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6714 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6715 6716 err = mlxsw_core_driver_register(&mlxsw_sp1_driver); 6717 if (err) 6718 goto err_sp1_core_driver_register; 6719 6720 err = mlxsw_core_driver_register(&mlxsw_sp2_driver); 6721 if (err) 6722 goto err_sp2_core_driver_register; 6723 6724 err = mlxsw_core_driver_register(&mlxsw_sp3_driver); 6725 if (err) 6726 goto err_sp3_core_driver_register; 6727 6728 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); 6729 if (err) 6730 goto err_sp1_pci_driver_register; 6731 6732 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); 6733 if (err) 6734 goto err_sp2_pci_driver_register; 6735 6736 err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver); 6737 if (err) 6738 goto err_sp3_pci_driver_register; 6739 6740 return 0; 6741 6742 err_sp3_pci_driver_register: 6743 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 6744 err_sp2_pci_driver_register: 6745 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 6746 err_sp1_pci_driver_register: 6747 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 6748 err_sp3_core_driver_register: 6749 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 6750 err_sp2_core_driver_register: 6751 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 6752 err_sp1_core_driver_register: 6753 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6754 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6755 return err; 6756 } 6757 6758 static void __exit mlxsw_sp_module_exit(void) 6759 { 6760 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver); 6761 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 6762 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 6763 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 6764 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 6765 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 6766 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6767 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6768 } 6769 6770 module_init(mlxsw_sp_module_init); 6771 module_exit(mlxsw_sp_module_exit); 6772 6773 MODULE_LICENSE("Dual BSD/GPL"); 6774 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 6775 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 6776 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); 6777 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); 6778 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table); 6779 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); 6780 MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME); 6781