1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/types.h> 7 #include <linux/pci.h> 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/ethtool.h> 11 #include <linux/slab.h> 12 #include <linux/device.h> 13 #include <linux/skbuff.h> 14 #include <linux/if_vlan.h> 15 #include <linux/if_bridge.h> 16 #include <linux/workqueue.h> 17 #include <linux/jiffies.h> 18 #include <linux/bitops.h> 19 #include <linux/list.h> 20 #include <linux/notifier.h> 21 #include <linux/dcbnl.h> 22 #include <linux/inetdevice.h> 23 #include <linux/netlink.h> 24 #include <linux/jhash.h> 25 #include <net/switchdev.h> 26 #include <net/pkt_cls.h> 27 #include <net/tc_act/tc_mirred.h> 28 #include <net/netevent.h> 29 #include <net/tc_act/tc_sample.h> 30 #include <net/addrconf.h> 31 32 #include "spectrum.h" 33 #include "pci.h" 34 #include "core.h" 35 #include "core_env.h" 36 #include "reg.h" 37 #include "port.h" 38 #include "trap.h" 39 #include "txheader.h" 40 #include "spectrum_cnt.h" 41 #include "spectrum_dpipe.h" 42 #include "spectrum_acl_flex_actions.h" 43 #include "spectrum_span.h" 44 #include "spectrum_ptp.h" 45 #include "../mlxfw/mlxfw.h" 46 47 #define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100) 48 49 #define MLXSW_SP1_FWREV_MAJOR 13 50 #define MLXSW_SP1_FWREV_MINOR 2000 51 #define MLXSW_SP1_FWREV_SUBMINOR 1122 52 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 53 54 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { 55 .major = MLXSW_SP1_FWREV_MAJOR, 56 .minor = MLXSW_SP1_FWREV_MINOR, 57 .subminor = MLXSW_SP1_FWREV_SUBMINOR, 58 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR, 59 }; 60 61 #define MLXSW_SP1_FW_FILENAME \ 62 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ 63 "." __stringify(MLXSW_SP1_FWREV_MINOR) \ 64 "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2" 65 66 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; 67 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; 68 static const char mlxsw_sp_driver_version[] = "1.0"; 69 70 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = { 71 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00 72 }; 73 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = { 74 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00 75 }; 76 77 /* tx_hdr_version 78 * Tx header version. 79 * Must be set to 1. 80 */ 81 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 82 83 /* tx_hdr_ctl 84 * Packet control type. 85 * 0 - Ethernet control (e.g. EMADs, LACP) 86 * 1 - Ethernet data 87 */ 88 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 89 90 /* tx_hdr_proto 91 * Packet protocol type. Must be set to 1 (Ethernet). 92 */ 93 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 94 95 /* tx_hdr_rx_is_router 96 * Packet is sent from the router. Valid for data packets only. 97 */ 98 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 99 100 /* tx_hdr_fid_valid 101 * Indicates if the 'fid' field is valid and should be used for 102 * forwarding lookup. Valid for data packets only. 103 */ 104 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 105 106 /* tx_hdr_swid 107 * Switch partition ID. Must be set to 0. 108 */ 109 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 110 111 /* tx_hdr_control_tclass 112 * Indicates if the packet should use the control TClass and not one 113 * of the data TClasses. 114 */ 115 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 116 117 /* tx_hdr_etclass 118 * Egress TClass to be used on the egress device on the egress port. 119 */ 120 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 121 122 /* tx_hdr_port_mid 123 * Destination local port for unicast packets. 124 * Destination multicast ID for multicast packets. 125 * 126 * Control packets are directed to a specific egress port, while data 127 * packets are transmitted through the CPU port (0) into the switch partition, 128 * where forwarding rules are applied. 129 */ 130 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 131 132 /* tx_hdr_fid 133 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 134 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 135 * Valid for data packets only. 136 */ 137 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 138 139 /* tx_hdr_type 140 * 0 - Data packets 141 * 6 - Control packets 142 */ 143 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 144 145 struct mlxsw_sp_mlxfw_dev { 146 struct mlxfw_dev mlxfw_dev; 147 struct mlxsw_sp *mlxsw_sp; 148 }; 149 150 struct mlxsw_sp_ptp_ops { 151 struct mlxsw_sp_ptp_clock * 152 (*clock_init)(struct mlxsw_sp *mlxsw_sp, struct device *dev); 153 void (*clock_fini)(struct mlxsw_sp_ptp_clock *clock); 154 155 struct mlxsw_sp_ptp_state *(*init)(struct mlxsw_sp *mlxsw_sp); 156 void (*fini)(struct mlxsw_sp_ptp_state *ptp_state); 157 158 /* Notify a driver that a packet that might be PTP was received. Driver 159 * is responsible for freeing the passed-in SKB. 160 */ 161 void (*receive)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 162 u8 local_port); 163 164 /* Notify a driver that a timestamped packet was transmitted. Driver 165 * is responsible for freeing the passed-in SKB. 166 */ 167 void (*transmitted)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 168 u8 local_port); 169 170 int (*hwtstamp_get)(struct mlxsw_sp_port *mlxsw_sp_port, 171 struct hwtstamp_config *config); 172 int (*hwtstamp_set)(struct mlxsw_sp_port *mlxsw_sp_port, 173 struct hwtstamp_config *config); 174 int (*get_ts_info)(struct mlxsw_sp *mlxsw_sp, 175 struct ethtool_ts_info *info); 176 }; 177 178 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev, 179 u16 component_index, u32 *p_max_size, 180 u8 *p_align_bits, u16 *p_max_write_size) 181 { 182 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 183 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 184 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 185 char mcqi_pl[MLXSW_REG_MCQI_LEN]; 186 int err; 187 188 mlxsw_reg_mcqi_pack(mcqi_pl, component_index); 189 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl); 190 if (err) 191 return err; 192 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, 193 p_max_write_size); 194 195 *p_align_bits = max_t(u8, *p_align_bits, 2); 196 *p_max_write_size = min_t(u16, *p_max_write_size, 197 MLXSW_REG_MCDA_MAX_DATA_LEN); 198 return 0; 199 } 200 201 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle) 202 { 203 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 204 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 205 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 206 char mcc_pl[MLXSW_REG_MCC_LEN]; 207 u8 control_state; 208 int err; 209 210 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0); 211 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 212 if (err) 213 return err; 214 215 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state); 216 if (control_state != MLXFW_FSM_STATE_IDLE) 217 return -EBUSY; 218 219 mlxsw_reg_mcc_pack(mcc_pl, 220 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 221 0, *fwhandle, 0); 222 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 223 } 224 225 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev, 226 u32 fwhandle, u16 component_index, 227 u32 component_size) 228 { 229 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 230 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 231 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 232 char mcc_pl[MLXSW_REG_MCC_LEN]; 233 234 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT, 235 component_index, fwhandle, component_size); 236 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 237 } 238 239 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev, 240 u32 fwhandle, u8 *data, u16 size, 241 u32 offset) 242 { 243 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 244 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 245 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 246 char mcda_pl[MLXSW_REG_MCDA_LEN]; 247 248 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data); 249 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl); 250 } 251 252 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, 253 u32 fwhandle, u16 component_index) 254 { 255 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 256 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 257 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 258 char mcc_pl[MLXSW_REG_MCC_LEN]; 259 260 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT, 261 component_index, fwhandle, 0); 262 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 263 } 264 265 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 266 { 267 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 268 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 269 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 270 char mcc_pl[MLXSW_REG_MCC_LEN]; 271 272 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, 273 fwhandle, 0); 274 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 275 } 276 277 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 278 enum mlxfw_fsm_state *fsm_state, 279 enum mlxfw_fsm_state_err *fsm_state_err) 280 { 281 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 282 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 283 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 284 char mcc_pl[MLXSW_REG_MCC_LEN]; 285 u8 control_state; 286 u8 error_code; 287 int err; 288 289 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0); 290 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 291 if (err) 292 return err; 293 294 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state); 295 *fsm_state = control_state; 296 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, 297 MLXFW_FSM_STATE_ERR_MAX); 298 return 0; 299 } 300 301 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 302 { 303 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 304 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 305 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 306 char mcc_pl[MLXSW_REG_MCC_LEN]; 307 308 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, 309 fwhandle, 0); 310 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 311 } 312 313 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 314 { 315 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 316 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 317 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 318 char mcc_pl[MLXSW_REG_MCC_LEN]; 319 320 mlxsw_reg_mcc_pack(mcc_pl, 321 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, 322 fwhandle, 0); 323 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 324 } 325 326 static void mlxsw_sp_status_notify(struct mlxfw_dev *mlxfw_dev, 327 const char *msg, const char *comp_name, 328 u32 done_bytes, u32 total_bytes) 329 { 330 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 331 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 332 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 333 334 devlink_flash_update_status_notify(priv_to_devlink(mlxsw_sp->core), 335 msg, comp_name, 336 done_bytes, total_bytes); 337 } 338 339 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = { 340 .component_query = mlxsw_sp_component_query, 341 .fsm_lock = mlxsw_sp_fsm_lock, 342 .fsm_component_update = mlxsw_sp_fsm_component_update, 343 .fsm_block_download = mlxsw_sp_fsm_block_download, 344 .fsm_component_verify = mlxsw_sp_fsm_component_verify, 345 .fsm_activate = mlxsw_sp_fsm_activate, 346 .fsm_query_state = mlxsw_sp_fsm_query_state, 347 .fsm_cancel = mlxsw_sp_fsm_cancel, 348 .fsm_release = mlxsw_sp_fsm_release, 349 .status_notify = mlxsw_sp_status_notify, 350 }; 351 352 static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp, 353 const struct firmware *firmware, 354 struct netlink_ext_ack *extack) 355 { 356 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = { 357 .mlxfw_dev = { 358 .ops = &mlxsw_sp_mlxfw_dev_ops, 359 .psid = mlxsw_sp->bus_info->psid, 360 .psid_size = strlen(mlxsw_sp->bus_info->psid), 361 }, 362 .mlxsw_sp = mlxsw_sp 363 }; 364 int err; 365 366 mlxsw_core_fw_flash_start(mlxsw_sp->core); 367 devlink_flash_update_begin_notify(priv_to_devlink(mlxsw_sp->core)); 368 err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, 369 firmware, extack); 370 devlink_flash_update_end_notify(priv_to_devlink(mlxsw_sp->core)); 371 mlxsw_core_fw_flash_end(mlxsw_sp->core); 372 373 return err; 374 } 375 376 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) 377 { 378 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev; 379 const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev; 380 const char *fw_filename = mlxsw_sp->fw_filename; 381 union devlink_param_value value; 382 const struct firmware *firmware; 383 int err; 384 385 /* Don't check if driver does not require it */ 386 if (!req_rev || !fw_filename) 387 return 0; 388 389 /* Don't check if devlink 'fw_load_policy' param is 'flash' */ 390 err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_sp->core), 391 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 392 &value); 393 if (err) 394 return err; 395 if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) 396 return 0; 397 398 /* Validate driver & FW are compatible */ 399 if (rev->major != req_rev->major) { 400 WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n", 401 rev->major, req_rev->major); 402 return -EINVAL; 403 } 404 if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) == 405 MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor) && 406 (rev->minor > req_rev->minor || 407 (rev->minor == req_rev->minor && 408 rev->subminor >= req_rev->subminor))) 409 return 0; 410 411 dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n", 412 rev->major, rev->minor, rev->subminor); 413 dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n", 414 fw_filename); 415 416 err = request_firmware_direct(&firmware, fw_filename, 417 mlxsw_sp->bus_info->dev); 418 if (err) { 419 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n", 420 fw_filename); 421 return err; 422 } 423 424 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, NULL); 425 release_firmware(firmware); 426 if (err) 427 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n"); 428 429 /* On FW flash success, tell the caller FW reset is needed 430 * if current FW supports it. 431 */ 432 if (rev->minor >= req_rev->can_reset_minor) 433 return err ? err : -EAGAIN; 434 else 435 return 0; 436 } 437 438 static int mlxsw_sp_flash_update(struct mlxsw_core *mlxsw_core, 439 const char *file_name, const char *component, 440 struct netlink_ext_ack *extack) 441 { 442 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 443 const struct firmware *firmware; 444 int err; 445 446 if (component) 447 return -EOPNOTSUPP; 448 449 err = request_firmware_direct(&firmware, file_name, 450 mlxsw_sp->bus_info->dev); 451 if (err) 452 return err; 453 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, extack); 454 release_firmware(firmware); 455 456 return err; 457 } 458 459 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 460 unsigned int counter_index, u64 *packets, 461 u64 *bytes) 462 { 463 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 464 int err; 465 466 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 467 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 468 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 469 if (err) 470 return err; 471 if (packets) 472 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 473 if (bytes) 474 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 475 return 0; 476 } 477 478 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 479 unsigned int counter_index) 480 { 481 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 482 483 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 484 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 485 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 486 } 487 488 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 489 unsigned int *p_counter_index) 490 { 491 int err; 492 493 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 494 p_counter_index); 495 if (err) 496 return err; 497 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 498 if (err) 499 goto err_counter_clear; 500 return 0; 501 502 err_counter_clear: 503 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 504 *p_counter_index); 505 return err; 506 } 507 508 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 509 unsigned int counter_index) 510 { 511 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 512 counter_index); 513 } 514 515 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 516 const struct mlxsw_tx_info *tx_info) 517 { 518 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 519 520 memset(txhdr, 0, MLXSW_TXHDR_LEN); 521 522 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 523 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 524 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 525 mlxsw_tx_hdr_swid_set(txhdr, 0); 526 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 527 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 528 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 529 } 530 531 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) 532 { 533 switch (state) { 534 case BR_STATE_FORWARDING: 535 return MLXSW_REG_SPMS_STATE_FORWARDING; 536 case BR_STATE_LEARNING: 537 return MLXSW_REG_SPMS_STATE_LEARNING; 538 case BR_STATE_LISTENING: /* fall-through */ 539 case BR_STATE_DISABLED: /* fall-through */ 540 case BR_STATE_BLOCKING: 541 return MLXSW_REG_SPMS_STATE_DISCARDING; 542 default: 543 BUG(); 544 } 545 } 546 547 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 548 u8 state) 549 { 550 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state); 551 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 552 char *spms_pl; 553 int err; 554 555 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 556 if (!spms_pl) 557 return -ENOMEM; 558 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 559 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 560 561 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 562 kfree(spms_pl); 563 return err; 564 } 565 566 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 567 { 568 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 569 int err; 570 571 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 572 if (err) 573 return err; 574 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 575 return 0; 576 } 577 578 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port, 579 bool enable, u32 rate) 580 { 581 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 582 char mpsc_pl[MLXSW_REG_MPSC_LEN]; 583 584 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate); 585 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl); 586 } 587 588 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 589 bool is_up) 590 { 591 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 592 char paos_pl[MLXSW_REG_PAOS_LEN]; 593 594 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 595 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 596 MLXSW_PORT_ADMIN_STATUS_DOWN); 597 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 598 } 599 600 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 601 unsigned char *addr) 602 { 603 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 604 char ppad_pl[MLXSW_REG_PPAD_LEN]; 605 606 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 607 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 608 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 609 } 610 611 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 612 { 613 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 614 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 615 616 ether_addr_copy(addr, mlxsw_sp->base_mac); 617 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 618 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 619 } 620 621 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 622 { 623 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 624 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 625 int max_mtu; 626 int err; 627 628 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 629 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 630 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 631 if (err) 632 return err; 633 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 634 635 if (mtu > max_mtu) 636 return -EINVAL; 637 638 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 639 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 640 } 641 642 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 643 { 644 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 645 char pspa_pl[MLXSW_REG_PSPA_LEN]; 646 647 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 648 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 649 } 650 651 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 652 { 653 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 654 char svpe_pl[MLXSW_REG_SVPE_LEN]; 655 656 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 657 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 658 } 659 660 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 661 bool learn_enable) 662 { 663 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 664 char *spvmlr_pl; 665 int err; 666 667 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 668 if (!spvmlr_pl) 669 return -ENOMEM; 670 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 671 learn_enable); 672 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 673 kfree(spvmlr_pl); 674 return err; 675 } 676 677 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 678 u16 vid) 679 { 680 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 681 char spvid_pl[MLXSW_REG_SPVID_LEN]; 682 683 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid); 684 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 685 } 686 687 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 688 bool allow) 689 { 690 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 691 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 692 693 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 694 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 695 } 696 697 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 698 { 699 int err; 700 701 if (!vid) { 702 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 703 if (err) 704 return err; 705 } else { 706 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); 707 if (err) 708 return err; 709 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 710 if (err) 711 goto err_port_allow_untagged_set; 712 } 713 714 mlxsw_sp_port->pvid = vid; 715 return 0; 716 717 err_port_allow_untagged_set: 718 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid); 719 return err; 720 } 721 722 static int 723 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 724 { 725 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 726 char sspr_pl[MLXSW_REG_SSPR_LEN]; 727 728 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 729 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 730 } 731 732 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, 733 u8 local_port, u8 *p_module, 734 u8 *p_width, u8 *p_lane) 735 { 736 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 737 int err; 738 739 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 740 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 741 if (err) 742 return err; 743 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 744 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl); 745 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 746 return 0; 747 } 748 749 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port, 750 u8 module, u8 width, u8 lane) 751 { 752 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 753 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 754 int i; 755 756 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 757 mlxsw_reg_pmlp_width_set(pmlp_pl, width); 758 for (i = 0; i < width; i++) { 759 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module); 760 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */ 761 } 762 763 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 764 } 765 766 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port) 767 { 768 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 769 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 770 771 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 772 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 773 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 774 } 775 776 static int mlxsw_sp_port_open(struct net_device *dev) 777 { 778 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 779 int err; 780 781 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 782 if (err) 783 return err; 784 netif_start_queue(dev); 785 return 0; 786 } 787 788 static int mlxsw_sp_port_stop(struct net_device *dev) 789 { 790 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 791 792 netif_stop_queue(dev); 793 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 794 } 795 796 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 797 struct net_device *dev) 798 { 799 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 800 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 801 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 802 const struct mlxsw_tx_info tx_info = { 803 .local_port = mlxsw_sp_port->local_port, 804 .is_emad = false, 805 }; 806 u64 len; 807 int err; 808 809 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); 810 811 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 812 return NETDEV_TX_BUSY; 813 814 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { 815 struct sk_buff *skb_orig = skb; 816 817 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); 818 if (!skb) { 819 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 820 dev_kfree_skb_any(skb_orig); 821 return NETDEV_TX_OK; 822 } 823 dev_consume_skb_any(skb_orig); 824 } 825 826 if (eth_skb_pad(skb)) { 827 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 828 return NETDEV_TX_OK; 829 } 830 831 mlxsw_sp_txhdr_construct(skb, &tx_info); 832 /* TX header is consumed by HW on the way so we shouldn't count its 833 * bytes as being sent. 834 */ 835 len = skb->len - MLXSW_TXHDR_LEN; 836 837 /* Due to a race we might fail here because of a full queue. In that 838 * unlikely case we simply drop the packet. 839 */ 840 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 841 842 if (!err) { 843 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 844 u64_stats_update_begin(&pcpu_stats->syncp); 845 pcpu_stats->tx_packets++; 846 pcpu_stats->tx_bytes += len; 847 u64_stats_update_end(&pcpu_stats->syncp); 848 } else { 849 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 850 dev_kfree_skb_any(skb); 851 } 852 return NETDEV_TX_OK; 853 } 854 855 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 856 { 857 } 858 859 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 860 { 861 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 862 struct sockaddr *addr = p; 863 int err; 864 865 if (!is_valid_ether_addr(addr->sa_data)) 866 return -EADDRNOTAVAIL; 867 868 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 869 if (err) 870 return err; 871 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 872 return 0; 873 } 874 875 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, 876 int mtu) 877 { 878 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu); 879 } 880 881 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ 882 883 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 884 u16 delay) 885 { 886 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay, 887 BITS_PER_BYTE)); 888 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp, 889 mtu); 890 } 891 892 /* Maximum delay buffer needed in case of PAUSE frames, in bytes. 893 * Assumes 100m cable and maximum MTU. 894 */ 895 #define MLXSW_SP_PAUSE_DELAY 58752 896 897 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 898 u16 delay, bool pfc, bool pause) 899 { 900 if (pfc) 901 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay); 902 else if (pause) 903 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY); 904 else 905 return 0; 906 } 907 908 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, 909 bool lossy) 910 { 911 if (lossy) 912 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size); 913 else 914 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size, 915 thres); 916 } 917 918 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, 919 u8 *prio_tc, bool pause_en, 920 struct ieee_pfc *my_pfc) 921 { 922 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 923 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; 924 u16 delay = !!my_pfc ? my_pfc->delay : 0; 925 char pbmc_pl[MLXSW_REG_PBMC_LEN]; 926 u32 taken_headroom_cells = 0; 927 u32 max_headroom_cells; 928 int i, j, err; 929 930 max_headroom_cells = mlxsw_sp_sb_max_headroom_cells(mlxsw_sp); 931 932 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); 933 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 934 if (err) 935 return err; 936 937 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 938 bool configure = false; 939 bool pfc = false; 940 u16 thres_cells; 941 u16 delay_cells; 942 u16 total_cells; 943 bool lossy; 944 945 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { 946 if (prio_tc[j] == i) { 947 pfc = pfc_en & BIT(j); 948 configure = true; 949 break; 950 } 951 } 952 953 if (!configure) 954 continue; 955 956 lossy = !(pfc || pause_en); 957 thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); 958 delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, 959 pfc, pause_en); 960 total_cells = thres_cells + delay_cells; 961 962 taken_headroom_cells += total_cells; 963 if (taken_headroom_cells > max_headroom_cells) 964 return -ENOBUFS; 965 966 mlxsw_sp_pg_buf_pack(pbmc_pl, i, total_cells, 967 thres_cells, lossy); 968 } 969 970 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 971 } 972 973 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, 974 int mtu, bool pause_en) 975 { 976 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; 977 bool dcb_en = !!mlxsw_sp_port->dcb.ets; 978 struct ieee_pfc *my_pfc; 979 u8 *prio_tc; 980 981 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; 982 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; 983 984 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, 985 pause_en, my_pfc); 986 } 987 988 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 989 { 990 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 991 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 992 int err; 993 994 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); 995 if (err) 996 return err; 997 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu); 998 if (err) 999 goto err_span_port_mtu_update; 1000 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 1001 if (err) 1002 goto err_port_mtu_set; 1003 dev->mtu = mtu; 1004 return 0; 1005 1006 err_port_mtu_set: 1007 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu); 1008 err_span_port_mtu_update: 1009 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1010 return err; 1011 } 1012 1013 static int 1014 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 1015 struct rtnl_link_stats64 *stats) 1016 { 1017 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1018 struct mlxsw_sp_port_pcpu_stats *p; 1019 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1020 u32 tx_dropped = 0; 1021 unsigned int start; 1022 int i; 1023 1024 for_each_possible_cpu(i) { 1025 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 1026 do { 1027 start = u64_stats_fetch_begin_irq(&p->syncp); 1028 rx_packets = p->rx_packets; 1029 rx_bytes = p->rx_bytes; 1030 tx_packets = p->tx_packets; 1031 tx_bytes = p->tx_bytes; 1032 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 1033 1034 stats->rx_packets += rx_packets; 1035 stats->rx_bytes += rx_bytes; 1036 stats->tx_packets += tx_packets; 1037 stats->tx_bytes += tx_bytes; 1038 /* tx_dropped is u32, updated without syncp protection. */ 1039 tx_dropped += p->tx_dropped; 1040 } 1041 stats->tx_dropped = tx_dropped; 1042 return 0; 1043 } 1044 1045 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 1046 { 1047 switch (attr_id) { 1048 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1049 return true; 1050 } 1051 1052 return false; 1053 } 1054 1055 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 1056 void *sp) 1057 { 1058 switch (attr_id) { 1059 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1060 return mlxsw_sp_port_get_sw_stats64(dev, sp); 1061 } 1062 1063 return -EINVAL; 1064 } 1065 1066 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 1067 int prio, char *ppcnt_pl) 1068 { 1069 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1070 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1071 1072 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 1073 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 1074 } 1075 1076 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 1077 struct rtnl_link_stats64 *stats) 1078 { 1079 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1080 int err; 1081 1082 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 1083 0, ppcnt_pl); 1084 if (err) 1085 goto out; 1086 1087 stats->tx_packets = 1088 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 1089 stats->rx_packets = 1090 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 1091 stats->tx_bytes = 1092 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 1093 stats->rx_bytes = 1094 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 1095 stats->multicast = 1096 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 1097 1098 stats->rx_crc_errors = 1099 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 1100 stats->rx_frame_errors = 1101 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 1102 1103 stats->rx_length_errors = ( 1104 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 1105 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 1106 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 1107 1108 stats->rx_errors = (stats->rx_crc_errors + 1109 stats->rx_frame_errors + stats->rx_length_errors); 1110 1111 out: 1112 return err; 1113 } 1114 1115 static void 1116 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 1117 struct mlxsw_sp_port_xstats *xstats) 1118 { 1119 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1120 int err, i; 1121 1122 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 1123 ppcnt_pl); 1124 if (!err) 1125 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 1126 1127 for (i = 0; i < TC_MAX_QUEUE; i++) { 1128 err = mlxsw_sp_port_get_stats_raw(dev, 1129 MLXSW_REG_PPCNT_TC_CONG_TC, 1130 i, ppcnt_pl); 1131 if (!err) 1132 xstats->wred_drop[i] = 1133 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 1134 1135 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 1136 i, ppcnt_pl); 1137 if (err) 1138 continue; 1139 1140 xstats->backlog[i] = 1141 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 1142 xstats->tail_drop[i] = 1143 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 1144 } 1145 1146 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1147 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, 1148 i, ppcnt_pl); 1149 if (err) 1150 continue; 1151 1152 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); 1153 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); 1154 } 1155 } 1156 1157 static void update_stats_cache(struct work_struct *work) 1158 { 1159 struct mlxsw_sp_port *mlxsw_sp_port = 1160 container_of(work, struct mlxsw_sp_port, 1161 periodic_hw_stats.update_dw.work); 1162 1163 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 1164 goto out; 1165 1166 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 1167 &mlxsw_sp_port->periodic_hw_stats.stats); 1168 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 1169 &mlxsw_sp_port->periodic_hw_stats.xstats); 1170 1171 out: 1172 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1173 MLXSW_HW_STATS_UPDATE_TIME); 1174 } 1175 1176 /* Return the stats from a cache that is updated periodically, 1177 * as this function might get called in an atomic context. 1178 */ 1179 static void 1180 mlxsw_sp_port_get_stats64(struct net_device *dev, 1181 struct rtnl_link_stats64 *stats) 1182 { 1183 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1184 1185 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 1186 } 1187 1188 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 1189 u16 vid_begin, u16 vid_end, 1190 bool is_member, bool untagged) 1191 { 1192 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1193 char *spvm_pl; 1194 int err; 1195 1196 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 1197 if (!spvm_pl) 1198 return -ENOMEM; 1199 1200 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 1201 vid_end, is_member, untagged); 1202 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 1203 kfree(spvm_pl); 1204 return err; 1205 } 1206 1207 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 1208 u16 vid_end, bool is_member, bool untagged) 1209 { 1210 u16 vid, vid_e; 1211 int err; 1212 1213 for (vid = vid_begin; vid <= vid_end; 1214 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 1215 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 1216 vid_end); 1217 1218 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 1219 is_member, untagged); 1220 if (err) 1221 return err; 1222 } 1223 1224 return 0; 1225 } 1226 1227 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port, 1228 bool flush_default) 1229 { 1230 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 1231 1232 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 1233 &mlxsw_sp_port->vlans_list, list) { 1234 if (!flush_default && 1235 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID) 1236 continue; 1237 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1238 } 1239 } 1240 1241 static void 1242 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1243 { 1244 if (mlxsw_sp_port_vlan->bridge_port) 1245 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1246 else if (mlxsw_sp_port_vlan->fid) 1247 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 1248 } 1249 1250 struct mlxsw_sp_port_vlan * 1251 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1252 { 1253 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1254 bool untagged = vid == MLXSW_SP_DEFAULT_VID; 1255 int err; 1256 1257 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1258 if (mlxsw_sp_port_vlan) 1259 return ERR_PTR(-EEXIST); 1260 1261 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 1262 if (err) 1263 return ERR_PTR(err); 1264 1265 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 1266 if (!mlxsw_sp_port_vlan) { 1267 err = -ENOMEM; 1268 goto err_port_vlan_alloc; 1269 } 1270 1271 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1272 mlxsw_sp_port_vlan->vid = vid; 1273 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1274 1275 return mlxsw_sp_port_vlan; 1276 1277 err_port_vlan_alloc: 1278 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1279 return ERR_PTR(err); 1280 } 1281 1282 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1283 { 1284 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1285 u16 vid = mlxsw_sp_port_vlan->vid; 1286 1287 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan); 1288 list_del(&mlxsw_sp_port_vlan->list); 1289 kfree(mlxsw_sp_port_vlan); 1290 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1291 } 1292 1293 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1294 __be16 __always_unused proto, u16 vid) 1295 { 1296 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1297 1298 /* VLAN 0 is added to HW filter when device goes up, but it is 1299 * reserved in our case, so simply return. 1300 */ 1301 if (!vid) 1302 return 0; 1303 1304 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid)); 1305 } 1306 1307 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1308 __be16 __always_unused proto, u16 vid) 1309 { 1310 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1311 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1312 1313 /* VLAN 0 is removed from HW filter when device goes down, but 1314 * it is reserved in our case, so simply return. 1315 */ 1316 if (!vid) 1317 return 0; 1318 1319 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1320 if (!mlxsw_sp_port_vlan) 1321 return 0; 1322 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1323 1324 return 0; 1325 } 1326 1327 static struct mlxsw_sp_port_mall_tc_entry * 1328 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port, 1329 unsigned long cookie) { 1330 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1331 1332 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list) 1333 if (mall_tc_entry->cookie == cookie) 1334 return mall_tc_entry; 1335 1336 return NULL; 1337 } 1338 1339 static int 1340 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1341 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror, 1342 const struct flow_action_entry *act, 1343 bool ingress) 1344 { 1345 enum mlxsw_sp_span_type span_type; 1346 1347 if (!act->dev) { 1348 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); 1349 return -EINVAL; 1350 } 1351 1352 mirror->ingress = ingress; 1353 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1354 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, act->dev, span_type, 1355 true, &mirror->span_id); 1356 } 1357 1358 static void 1359 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1360 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror) 1361 { 1362 enum mlxsw_sp_span_type span_type; 1363 1364 span_type = mirror->ingress ? 1365 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1366 mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->span_id, 1367 span_type, true); 1368 } 1369 1370 static int 1371 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port, 1372 struct tc_cls_matchall_offload *cls, 1373 const struct flow_action_entry *act, 1374 bool ingress) 1375 { 1376 int err; 1377 1378 if (!mlxsw_sp_port->sample) 1379 return -EOPNOTSUPP; 1380 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) { 1381 netdev_err(mlxsw_sp_port->dev, "sample already active\n"); 1382 return -EEXIST; 1383 } 1384 if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) { 1385 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n"); 1386 return -EOPNOTSUPP; 1387 } 1388 1389 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group, 1390 act->sample.psample_group); 1391 mlxsw_sp_port->sample->truncate = act->sample.truncate; 1392 mlxsw_sp_port->sample->trunc_size = act->sample.trunc_size; 1393 mlxsw_sp_port->sample->rate = act->sample.rate; 1394 1395 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, act->sample.rate); 1396 if (err) 1397 goto err_port_sample_set; 1398 return 0; 1399 1400 err_port_sample_set: 1401 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1402 return err; 1403 } 1404 1405 static void 1406 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port) 1407 { 1408 if (!mlxsw_sp_port->sample) 1409 return; 1410 1411 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1); 1412 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1413 } 1414 1415 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1416 struct tc_cls_matchall_offload *f, 1417 bool ingress) 1418 { 1419 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1420 __be16 protocol = f->common.protocol; 1421 struct flow_action_entry *act; 1422 int err; 1423 1424 if (!flow_offload_has_one_action(&f->rule->action)) { 1425 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); 1426 return -EOPNOTSUPP; 1427 } 1428 1429 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1430 if (!mall_tc_entry) 1431 return -ENOMEM; 1432 mall_tc_entry->cookie = f->cookie; 1433 1434 act = &f->rule->action.entries[0]; 1435 1436 if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) { 1437 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; 1438 1439 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; 1440 mirror = &mall_tc_entry->mirror; 1441 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, 1442 mirror, act, 1443 ingress); 1444 } else if (act->id == FLOW_ACTION_SAMPLE && 1445 protocol == htons(ETH_P_ALL)) { 1446 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE; 1447 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f, 1448 act, ingress); 1449 } else { 1450 err = -EOPNOTSUPP; 1451 } 1452 1453 if (err) 1454 goto err_add_action; 1455 1456 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); 1457 return 0; 1458 1459 err_add_action: 1460 kfree(mall_tc_entry); 1461 return err; 1462 } 1463 1464 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1465 struct tc_cls_matchall_offload *f) 1466 { 1467 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1468 1469 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port, 1470 f->cookie); 1471 if (!mall_tc_entry) { 1472 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); 1473 return; 1474 } 1475 list_del(&mall_tc_entry->list); 1476 1477 switch (mall_tc_entry->type) { 1478 case MLXSW_SP_PORT_MALL_MIRROR: 1479 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port, 1480 &mall_tc_entry->mirror); 1481 break; 1482 case MLXSW_SP_PORT_MALL_SAMPLE: 1483 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port); 1484 break; 1485 default: 1486 WARN_ON(1); 1487 } 1488 1489 kfree(mall_tc_entry); 1490 } 1491 1492 static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1493 struct tc_cls_matchall_offload *f, 1494 bool ingress) 1495 { 1496 switch (f->command) { 1497 case TC_CLSMATCHALL_REPLACE: 1498 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f, 1499 ingress); 1500 case TC_CLSMATCHALL_DESTROY: 1501 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f); 1502 return 0; 1503 default: 1504 return -EOPNOTSUPP; 1505 } 1506 } 1507 1508 static int 1509 mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block, 1510 struct tc_cls_flower_offload *f) 1511 { 1512 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block); 1513 1514 switch (f->command) { 1515 case TC_CLSFLOWER_REPLACE: 1516 return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f); 1517 case TC_CLSFLOWER_DESTROY: 1518 mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f); 1519 return 0; 1520 case TC_CLSFLOWER_STATS: 1521 return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f); 1522 case TC_CLSFLOWER_TMPLT_CREATE: 1523 return mlxsw_sp_flower_tmplt_create(mlxsw_sp, acl_block, f); 1524 case TC_CLSFLOWER_TMPLT_DESTROY: 1525 mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, acl_block, f); 1526 return 0; 1527 default: 1528 return -EOPNOTSUPP; 1529 } 1530 } 1531 1532 static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type, 1533 void *type_data, 1534 void *cb_priv, bool ingress) 1535 { 1536 struct mlxsw_sp_port *mlxsw_sp_port = cb_priv; 1537 1538 switch (type) { 1539 case TC_SETUP_CLSMATCHALL: 1540 if (!tc_cls_can_offload_and_chain0(mlxsw_sp_port->dev, 1541 type_data)) 1542 return -EOPNOTSUPP; 1543 1544 return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data, 1545 ingress); 1546 case TC_SETUP_CLSFLOWER: 1547 return 0; 1548 default: 1549 return -EOPNOTSUPP; 1550 } 1551 } 1552 1553 static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type, 1554 void *type_data, 1555 void *cb_priv) 1556 { 1557 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1558 cb_priv, true); 1559 } 1560 1561 static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type, 1562 void *type_data, 1563 void *cb_priv) 1564 { 1565 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1566 cb_priv, false); 1567 } 1568 1569 static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type, 1570 void *type_data, void *cb_priv) 1571 { 1572 struct mlxsw_sp_acl_block *acl_block = cb_priv; 1573 1574 switch (type) { 1575 case TC_SETUP_CLSMATCHALL: 1576 return 0; 1577 case TC_SETUP_CLSFLOWER: 1578 if (mlxsw_sp_acl_block_disabled(acl_block)) 1579 return -EOPNOTSUPP; 1580 1581 return mlxsw_sp_setup_tc_cls_flower(acl_block, type_data); 1582 default: 1583 return -EOPNOTSUPP; 1584 } 1585 } 1586 1587 static int 1588 mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port, 1589 struct tcf_block *block, bool ingress, 1590 struct netlink_ext_ack *extack) 1591 { 1592 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1593 struct mlxsw_sp_acl_block *acl_block; 1594 struct tcf_block_cb *block_cb; 1595 int err; 1596 1597 block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower, 1598 mlxsw_sp); 1599 if (!block_cb) { 1600 acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, block->net); 1601 if (!acl_block) 1602 return -ENOMEM; 1603 block_cb = __tcf_block_cb_register(block, 1604 mlxsw_sp_setup_tc_block_cb_flower, 1605 mlxsw_sp, acl_block, extack); 1606 if (IS_ERR(block_cb)) { 1607 err = PTR_ERR(block_cb); 1608 goto err_cb_register; 1609 } 1610 } else { 1611 acl_block = tcf_block_cb_priv(block_cb); 1612 } 1613 tcf_block_cb_incref(block_cb); 1614 err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block, 1615 mlxsw_sp_port, ingress); 1616 if (err) 1617 goto err_block_bind; 1618 1619 if (ingress) 1620 mlxsw_sp_port->ing_acl_block = acl_block; 1621 else 1622 mlxsw_sp_port->eg_acl_block = acl_block; 1623 1624 return 0; 1625 1626 err_block_bind: 1627 if (!tcf_block_cb_decref(block_cb)) { 1628 __tcf_block_cb_unregister(block, block_cb); 1629 err_cb_register: 1630 mlxsw_sp_acl_block_destroy(acl_block); 1631 } 1632 return err; 1633 } 1634 1635 static void 1636 mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port, 1637 struct tcf_block *block, bool ingress) 1638 { 1639 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1640 struct mlxsw_sp_acl_block *acl_block; 1641 struct tcf_block_cb *block_cb; 1642 int err; 1643 1644 block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower, 1645 mlxsw_sp); 1646 if (!block_cb) 1647 return; 1648 1649 if (ingress) 1650 mlxsw_sp_port->ing_acl_block = NULL; 1651 else 1652 mlxsw_sp_port->eg_acl_block = NULL; 1653 1654 acl_block = tcf_block_cb_priv(block_cb); 1655 err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block, 1656 mlxsw_sp_port, ingress); 1657 if (!err && !tcf_block_cb_decref(block_cb)) { 1658 __tcf_block_cb_unregister(block, block_cb); 1659 mlxsw_sp_acl_block_destroy(acl_block); 1660 } 1661 } 1662 1663 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 1664 struct tc_block_offload *f) 1665 { 1666 tc_setup_cb_t *cb; 1667 bool ingress; 1668 int err; 1669 1670 if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { 1671 cb = mlxsw_sp_setup_tc_block_cb_matchall_ig; 1672 ingress = true; 1673 } else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { 1674 cb = mlxsw_sp_setup_tc_block_cb_matchall_eg; 1675 ingress = false; 1676 } else { 1677 return -EOPNOTSUPP; 1678 } 1679 1680 switch (f->command) { 1681 case TC_BLOCK_BIND: 1682 err = tcf_block_cb_register(f->block, cb, mlxsw_sp_port, 1683 mlxsw_sp_port, f->extack); 1684 if (err) 1685 return err; 1686 err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port, 1687 f->block, ingress, 1688 f->extack); 1689 if (err) { 1690 tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port); 1691 return err; 1692 } 1693 return 0; 1694 case TC_BLOCK_UNBIND: 1695 mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port, 1696 f->block, ingress); 1697 tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port); 1698 return 0; 1699 default: 1700 return -EOPNOTSUPP; 1701 } 1702 } 1703 1704 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1705 void *type_data) 1706 { 1707 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1708 1709 switch (type) { 1710 case TC_SETUP_BLOCK: 1711 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1712 case TC_SETUP_QDISC_RED: 1713 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1714 case TC_SETUP_QDISC_PRIO: 1715 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); 1716 default: 1717 return -EOPNOTSUPP; 1718 } 1719 } 1720 1721 1722 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) 1723 { 1724 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1725 1726 if (!enable) { 1727 if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->ing_acl_block) || 1728 mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->eg_acl_block) || 1729 !list_empty(&mlxsw_sp_port->mall_tc_list)) { 1730 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 1731 return -EINVAL; 1732 } 1733 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->ing_acl_block); 1734 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->eg_acl_block); 1735 } else { 1736 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->ing_acl_block); 1737 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->eg_acl_block); 1738 } 1739 return 0; 1740 } 1741 1742 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable) 1743 { 1744 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1745 char pplr_pl[MLXSW_REG_PPLR_LEN]; 1746 int err; 1747 1748 if (netif_running(dev)) 1749 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1750 1751 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable); 1752 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr), 1753 pplr_pl); 1754 1755 if (netif_running(dev)) 1756 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1757 1758 return err; 1759 } 1760 1761 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); 1762 1763 static int mlxsw_sp_handle_feature(struct net_device *dev, 1764 netdev_features_t wanted_features, 1765 netdev_features_t feature, 1766 mlxsw_sp_feature_handler feature_handler) 1767 { 1768 netdev_features_t changes = wanted_features ^ dev->features; 1769 bool enable = !!(wanted_features & feature); 1770 int err; 1771 1772 if (!(changes & feature)) 1773 return 0; 1774 1775 err = feature_handler(dev, enable); 1776 if (err) { 1777 netdev_err(dev, "%s feature %pNF failed, err %d\n", 1778 enable ? "Enable" : "Disable", &feature, err); 1779 return err; 1780 } 1781 1782 if (enable) 1783 dev->features |= feature; 1784 else 1785 dev->features &= ~feature; 1786 1787 return 0; 1788 } 1789 static int mlxsw_sp_set_features(struct net_device *dev, 1790 netdev_features_t features) 1791 { 1792 netdev_features_t oper_features = dev->features; 1793 int err = 0; 1794 1795 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, 1796 mlxsw_sp_feature_hw_tc); 1797 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK, 1798 mlxsw_sp_feature_loopback); 1799 1800 if (err) { 1801 dev->features = oper_features; 1802 return -EINVAL; 1803 } 1804 1805 return 0; 1806 } 1807 1808 static struct devlink_port * 1809 mlxsw_sp_port_get_devlink_port(struct net_device *dev) 1810 { 1811 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1812 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1813 1814 return mlxsw_core_port_devlink_port_get(mlxsw_sp->core, 1815 mlxsw_sp_port->local_port); 1816 } 1817 1818 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, 1819 struct ifreq *ifr) 1820 { 1821 struct hwtstamp_config config; 1822 int err; 1823 1824 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 1825 return -EFAULT; 1826 1827 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, 1828 &config); 1829 if (err) 1830 return err; 1831 1832 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1833 return -EFAULT; 1834 1835 return 0; 1836 } 1837 1838 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port, 1839 struct ifreq *ifr) 1840 { 1841 struct hwtstamp_config config; 1842 int err; 1843 1844 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port, 1845 &config); 1846 if (err) 1847 return err; 1848 1849 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1850 return -EFAULT; 1851 1852 return 0; 1853 } 1854 1855 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port) 1856 { 1857 struct hwtstamp_config config = {0}; 1858 1859 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config); 1860 } 1861 1862 static int 1863 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1864 { 1865 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1866 1867 switch (cmd) { 1868 case SIOCSHWTSTAMP: 1869 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr); 1870 case SIOCGHWTSTAMP: 1871 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr); 1872 default: 1873 return -EOPNOTSUPP; 1874 } 1875 } 1876 1877 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1878 .ndo_open = mlxsw_sp_port_open, 1879 .ndo_stop = mlxsw_sp_port_stop, 1880 .ndo_start_xmit = mlxsw_sp_port_xmit, 1881 .ndo_setup_tc = mlxsw_sp_setup_tc, 1882 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1883 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1884 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1885 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1886 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1887 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1888 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1889 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1890 .ndo_set_features = mlxsw_sp_set_features, 1891 .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port, 1892 .ndo_do_ioctl = mlxsw_sp_port_ioctl, 1893 }; 1894 1895 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 1896 struct ethtool_drvinfo *drvinfo) 1897 { 1898 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1899 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1900 1901 strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind, 1902 sizeof(drvinfo->driver)); 1903 strlcpy(drvinfo->version, mlxsw_sp_driver_version, 1904 sizeof(drvinfo->version)); 1905 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 1906 "%d.%d.%d", 1907 mlxsw_sp->bus_info->fw_rev.major, 1908 mlxsw_sp->bus_info->fw_rev.minor, 1909 mlxsw_sp->bus_info->fw_rev.subminor); 1910 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 1911 sizeof(drvinfo->bus_info)); 1912 } 1913 1914 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, 1915 struct ethtool_pauseparam *pause) 1916 { 1917 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1918 1919 pause->rx_pause = mlxsw_sp_port->link.rx_pause; 1920 pause->tx_pause = mlxsw_sp_port->link.tx_pause; 1921 } 1922 1923 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, 1924 struct ethtool_pauseparam *pause) 1925 { 1926 char pfcc_pl[MLXSW_REG_PFCC_LEN]; 1927 1928 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); 1929 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause); 1930 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause); 1931 1932 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), 1933 pfcc_pl); 1934 } 1935 1936 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, 1937 struct ethtool_pauseparam *pause) 1938 { 1939 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1940 bool pause_en = pause->tx_pause || pause->rx_pause; 1941 int err; 1942 1943 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { 1944 netdev_err(dev, "PFC already enabled on port\n"); 1945 return -EINVAL; 1946 } 1947 1948 if (pause->autoneg) { 1949 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); 1950 return -EINVAL; 1951 } 1952 1953 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1954 if (err) { 1955 netdev_err(dev, "Failed to configure port's headroom\n"); 1956 return err; 1957 } 1958 1959 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause); 1960 if (err) { 1961 netdev_err(dev, "Failed to set PAUSE parameters\n"); 1962 goto err_port_pause_configure; 1963 } 1964 1965 mlxsw_sp_port->link.rx_pause = pause->rx_pause; 1966 mlxsw_sp_port->link.tx_pause = pause->tx_pause; 1967 1968 return 0; 1969 1970 err_port_pause_configure: 1971 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 1972 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1973 return err; 1974 } 1975 1976 struct mlxsw_sp_port_hw_stats { 1977 char str[ETH_GSTRING_LEN]; 1978 u64 (*getter)(const char *payload); 1979 bool cells_bytes; 1980 }; 1981 1982 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 1983 { 1984 .str = "a_frames_transmitted_ok", 1985 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 1986 }, 1987 { 1988 .str = "a_frames_received_ok", 1989 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 1990 }, 1991 { 1992 .str = "a_frame_check_sequence_errors", 1993 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 1994 }, 1995 { 1996 .str = "a_alignment_errors", 1997 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 1998 }, 1999 { 2000 .str = "a_octets_transmitted_ok", 2001 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 2002 }, 2003 { 2004 .str = "a_octets_received_ok", 2005 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 2006 }, 2007 { 2008 .str = "a_multicast_frames_xmitted_ok", 2009 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 2010 }, 2011 { 2012 .str = "a_broadcast_frames_xmitted_ok", 2013 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 2014 }, 2015 { 2016 .str = "a_multicast_frames_received_ok", 2017 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 2018 }, 2019 { 2020 .str = "a_broadcast_frames_received_ok", 2021 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 2022 }, 2023 { 2024 .str = "a_in_range_length_errors", 2025 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 2026 }, 2027 { 2028 .str = "a_out_of_range_length_field", 2029 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 2030 }, 2031 { 2032 .str = "a_frame_too_long_errors", 2033 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 2034 }, 2035 { 2036 .str = "a_symbol_error_during_carrier", 2037 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 2038 }, 2039 { 2040 .str = "a_mac_control_frames_transmitted", 2041 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 2042 }, 2043 { 2044 .str = "a_mac_control_frames_received", 2045 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 2046 }, 2047 { 2048 .str = "a_unsupported_opcodes_received", 2049 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 2050 }, 2051 { 2052 .str = "a_pause_mac_ctrl_frames_received", 2053 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 2054 }, 2055 { 2056 .str = "a_pause_mac_ctrl_frames_xmitted", 2057 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 2058 }, 2059 }; 2060 2061 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 2062 2063 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2863_stats[] = { 2064 { 2065 .str = "if_in_discards", 2066 .getter = mlxsw_reg_ppcnt_if_in_discards_get, 2067 }, 2068 { 2069 .str = "if_out_discards", 2070 .getter = mlxsw_reg_ppcnt_if_out_discards_get, 2071 }, 2072 { 2073 .str = "if_out_errors", 2074 .getter = mlxsw_reg_ppcnt_if_out_errors_get, 2075 }, 2076 }; 2077 2078 #define MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN \ 2079 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2863_stats) 2080 2081 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = { 2082 { 2083 .str = "ether_stats_undersize_pkts", 2084 .getter = mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get, 2085 }, 2086 { 2087 .str = "ether_stats_oversize_pkts", 2088 .getter = mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get, 2089 }, 2090 { 2091 .str = "ether_stats_fragments", 2092 .getter = mlxsw_reg_ppcnt_ether_stats_fragments_get, 2093 }, 2094 { 2095 .str = "ether_pkts64octets", 2096 .getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get, 2097 }, 2098 { 2099 .str = "ether_pkts65to127octets", 2100 .getter = mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get, 2101 }, 2102 { 2103 .str = "ether_pkts128to255octets", 2104 .getter = mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get, 2105 }, 2106 { 2107 .str = "ether_pkts256to511octets", 2108 .getter = mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get, 2109 }, 2110 { 2111 .str = "ether_pkts512to1023octets", 2112 .getter = mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get, 2113 }, 2114 { 2115 .str = "ether_pkts1024to1518octets", 2116 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get, 2117 }, 2118 { 2119 .str = "ether_pkts1519to2047octets", 2120 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get, 2121 }, 2122 { 2123 .str = "ether_pkts2048to4095octets", 2124 .getter = mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get, 2125 }, 2126 { 2127 .str = "ether_pkts4096to8191octets", 2128 .getter = mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get, 2129 }, 2130 { 2131 .str = "ether_pkts8192to10239octets", 2132 .getter = mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get, 2133 }, 2134 }; 2135 2136 #define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \ 2137 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats) 2138 2139 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_3635_stats[] = { 2140 { 2141 .str = "dot3stats_fcs_errors", 2142 .getter = mlxsw_reg_ppcnt_dot3stats_fcs_errors_get, 2143 }, 2144 { 2145 .str = "dot3stats_symbol_errors", 2146 .getter = mlxsw_reg_ppcnt_dot3stats_symbol_errors_get, 2147 }, 2148 { 2149 .str = "dot3control_in_unknown_opcodes", 2150 .getter = mlxsw_reg_ppcnt_dot3control_in_unknown_opcodes_get, 2151 }, 2152 { 2153 .str = "dot3in_pause_frames", 2154 .getter = mlxsw_reg_ppcnt_dot3in_pause_frames_get, 2155 }, 2156 }; 2157 2158 #define MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN \ 2159 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_3635_stats) 2160 2161 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_discard_stats[] = { 2162 { 2163 .str = "discard_ingress_general", 2164 .getter = mlxsw_reg_ppcnt_ingress_general_get, 2165 }, 2166 { 2167 .str = "discard_ingress_policy_engine", 2168 .getter = mlxsw_reg_ppcnt_ingress_policy_engine_get, 2169 }, 2170 { 2171 .str = "discard_ingress_vlan_membership", 2172 .getter = mlxsw_reg_ppcnt_ingress_vlan_membership_get, 2173 }, 2174 { 2175 .str = "discard_ingress_tag_frame_type", 2176 .getter = mlxsw_reg_ppcnt_ingress_tag_frame_type_get, 2177 }, 2178 { 2179 .str = "discard_egress_vlan_membership", 2180 .getter = mlxsw_reg_ppcnt_egress_vlan_membership_get, 2181 }, 2182 { 2183 .str = "discard_loopback_filter", 2184 .getter = mlxsw_reg_ppcnt_loopback_filter_get, 2185 }, 2186 { 2187 .str = "discard_egress_general", 2188 .getter = mlxsw_reg_ppcnt_egress_general_get, 2189 }, 2190 { 2191 .str = "discard_egress_hoq", 2192 .getter = mlxsw_reg_ppcnt_egress_hoq_get, 2193 }, 2194 { 2195 .str = "discard_egress_policy_engine", 2196 .getter = mlxsw_reg_ppcnt_egress_policy_engine_get, 2197 }, 2198 { 2199 .str = "discard_ingress_tx_link_down", 2200 .getter = mlxsw_reg_ppcnt_ingress_tx_link_down_get, 2201 }, 2202 { 2203 .str = "discard_egress_stp_filter", 2204 .getter = mlxsw_reg_ppcnt_egress_stp_filter_get, 2205 }, 2206 { 2207 .str = "discard_egress_sll", 2208 .getter = mlxsw_reg_ppcnt_egress_sll_get, 2209 }, 2210 }; 2211 2212 #define MLXSW_SP_PORT_HW_DISCARD_STATS_LEN \ 2213 ARRAY_SIZE(mlxsw_sp_port_hw_discard_stats) 2214 2215 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { 2216 { 2217 .str = "rx_octets_prio", 2218 .getter = mlxsw_reg_ppcnt_rx_octets_get, 2219 }, 2220 { 2221 .str = "rx_frames_prio", 2222 .getter = mlxsw_reg_ppcnt_rx_frames_get, 2223 }, 2224 { 2225 .str = "tx_octets_prio", 2226 .getter = mlxsw_reg_ppcnt_tx_octets_get, 2227 }, 2228 { 2229 .str = "tx_frames_prio", 2230 .getter = mlxsw_reg_ppcnt_tx_frames_get, 2231 }, 2232 { 2233 .str = "rx_pause_prio", 2234 .getter = mlxsw_reg_ppcnt_rx_pause_get, 2235 }, 2236 { 2237 .str = "rx_pause_duration_prio", 2238 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get, 2239 }, 2240 { 2241 .str = "tx_pause_prio", 2242 .getter = mlxsw_reg_ppcnt_tx_pause_get, 2243 }, 2244 { 2245 .str = "tx_pause_duration_prio", 2246 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get, 2247 }, 2248 }; 2249 2250 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) 2251 2252 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { 2253 { 2254 .str = "tc_transmit_queue_tc", 2255 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get, 2256 .cells_bytes = true, 2257 }, 2258 { 2259 .str = "tc_no_buffer_discard_uc_tc", 2260 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get, 2261 }, 2262 }; 2263 2264 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) 2265 2266 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ 2267 MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN + \ 2268 MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \ 2269 MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN + \ 2270 MLXSW_SP_PORT_HW_DISCARD_STATS_LEN + \ 2271 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \ 2272 IEEE_8021QAZ_MAX_TCS) + \ 2273 (MLXSW_SP_PORT_HW_TC_STATS_LEN * \ 2274 TC_MAX_QUEUE)) 2275 2276 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) 2277 { 2278 int i; 2279 2280 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { 2281 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", 2282 mlxsw_sp_port_hw_prio_stats[i].str, prio); 2283 *p += ETH_GSTRING_LEN; 2284 } 2285 } 2286 2287 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) 2288 { 2289 int i; 2290 2291 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { 2292 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", 2293 mlxsw_sp_port_hw_tc_stats[i].str, tc); 2294 *p += ETH_GSTRING_LEN; 2295 } 2296 } 2297 2298 static void mlxsw_sp_port_get_strings(struct net_device *dev, 2299 u32 stringset, u8 *data) 2300 { 2301 u8 *p = data; 2302 int i; 2303 2304 switch (stringset) { 2305 case ETH_SS_STATS: 2306 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 2307 memcpy(p, mlxsw_sp_port_hw_stats[i].str, 2308 ETH_GSTRING_LEN); 2309 p += ETH_GSTRING_LEN; 2310 } 2311 2312 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; i++) { 2313 memcpy(p, mlxsw_sp_port_hw_rfc_2863_stats[i].str, 2314 ETH_GSTRING_LEN); 2315 p += ETH_GSTRING_LEN; 2316 } 2317 2318 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) { 2319 memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str, 2320 ETH_GSTRING_LEN); 2321 p += ETH_GSTRING_LEN; 2322 } 2323 2324 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; i++) { 2325 memcpy(p, mlxsw_sp_port_hw_rfc_3635_stats[i].str, 2326 ETH_GSTRING_LEN); 2327 p += ETH_GSTRING_LEN; 2328 } 2329 2330 for (i = 0; i < MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; i++) { 2331 memcpy(p, mlxsw_sp_port_hw_discard_stats[i].str, 2332 ETH_GSTRING_LEN); 2333 p += ETH_GSTRING_LEN; 2334 } 2335 2336 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 2337 mlxsw_sp_port_get_prio_strings(&p, i); 2338 2339 for (i = 0; i < TC_MAX_QUEUE; i++) 2340 mlxsw_sp_port_get_tc_strings(&p, i); 2341 2342 break; 2343 } 2344 } 2345 2346 static int mlxsw_sp_port_set_phys_id(struct net_device *dev, 2347 enum ethtool_phys_id_state state) 2348 { 2349 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2350 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2351 char mlcr_pl[MLXSW_REG_MLCR_LEN]; 2352 bool active; 2353 2354 switch (state) { 2355 case ETHTOOL_ID_ACTIVE: 2356 active = true; 2357 break; 2358 case ETHTOOL_ID_INACTIVE: 2359 active = false; 2360 break; 2361 default: 2362 return -EOPNOTSUPP; 2363 } 2364 2365 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); 2366 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); 2367 } 2368 2369 static int 2370 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, 2371 int *p_len, enum mlxsw_reg_ppcnt_grp grp) 2372 { 2373 switch (grp) { 2374 case MLXSW_REG_PPCNT_IEEE_8023_CNT: 2375 *p_hw_stats = mlxsw_sp_port_hw_stats; 2376 *p_len = MLXSW_SP_PORT_HW_STATS_LEN; 2377 break; 2378 case MLXSW_REG_PPCNT_RFC_2863_CNT: 2379 *p_hw_stats = mlxsw_sp_port_hw_rfc_2863_stats; 2380 *p_len = MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2381 break; 2382 case MLXSW_REG_PPCNT_RFC_2819_CNT: 2383 *p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats; 2384 *p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2385 break; 2386 case MLXSW_REG_PPCNT_RFC_3635_CNT: 2387 *p_hw_stats = mlxsw_sp_port_hw_rfc_3635_stats; 2388 *p_len = MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2389 break; 2390 case MLXSW_REG_PPCNT_DISCARD_CNT: 2391 *p_hw_stats = mlxsw_sp_port_hw_discard_stats; 2392 *p_len = MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2393 break; 2394 case MLXSW_REG_PPCNT_PRIO_CNT: 2395 *p_hw_stats = mlxsw_sp_port_hw_prio_stats; 2396 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2397 break; 2398 case MLXSW_REG_PPCNT_TC_CNT: 2399 *p_hw_stats = mlxsw_sp_port_hw_tc_stats; 2400 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN; 2401 break; 2402 default: 2403 WARN_ON(1); 2404 return -EOPNOTSUPP; 2405 } 2406 return 0; 2407 } 2408 2409 static void __mlxsw_sp_port_get_stats(struct net_device *dev, 2410 enum mlxsw_reg_ppcnt_grp grp, int prio, 2411 u64 *data, int data_index) 2412 { 2413 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2414 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2415 struct mlxsw_sp_port_hw_stats *hw_stats; 2416 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 2417 int i, len; 2418 int err; 2419 2420 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); 2421 if (err) 2422 return; 2423 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl); 2424 for (i = 0; i < len; i++) { 2425 data[data_index + i] = hw_stats[i].getter(ppcnt_pl); 2426 if (!hw_stats[i].cells_bytes) 2427 continue; 2428 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp, 2429 data[data_index + i]); 2430 } 2431 } 2432 2433 static void mlxsw_sp_port_get_stats(struct net_device *dev, 2434 struct ethtool_stats *stats, u64 *data) 2435 { 2436 int i, data_index = 0; 2437 2438 /* IEEE 802.3 Counters */ 2439 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0, 2440 data, data_index); 2441 data_index = MLXSW_SP_PORT_HW_STATS_LEN; 2442 2443 /* RFC 2863 Counters */ 2444 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2863_CNT, 0, 2445 data, data_index); 2446 data_index += MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2447 2448 /* RFC 2819 Counters */ 2449 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0, 2450 data, data_index); 2451 data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2452 2453 /* RFC 3635 Counters */ 2454 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_3635_CNT, 0, 2455 data, data_index); 2456 data_index += MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2457 2458 /* Discard Counters */ 2459 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_DISCARD_CNT, 0, 2460 data, data_index); 2461 data_index += MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2462 2463 /* Per-Priority Counters */ 2464 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2465 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, 2466 data, data_index); 2467 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2468 } 2469 2470 /* Per-TC Counters */ 2471 for (i = 0; i < TC_MAX_QUEUE; i++) { 2472 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, 2473 data, data_index); 2474 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; 2475 } 2476 } 2477 2478 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 2479 { 2480 switch (sset) { 2481 case ETH_SS_STATS: 2482 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN; 2483 default: 2484 return -EOPNOTSUPP; 2485 } 2486 } 2487 2488 struct mlxsw_sp1_port_link_mode { 2489 enum ethtool_link_mode_bit_indices mask_ethtool; 2490 u32 mask; 2491 u32 speed; 2492 }; 2493 2494 static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = { 2495 { 2496 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 2497 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2498 .speed = SPEED_100, 2499 }, 2500 { 2501 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 2502 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 2503 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2504 .speed = SPEED_1000, 2505 }, 2506 { 2507 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 2508 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2509 .speed = SPEED_10000, 2510 }, 2511 { 2512 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 2513 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 2514 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 2515 .speed = SPEED_10000, 2516 }, 2517 { 2518 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2519 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2520 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2521 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 2522 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2523 .speed = SPEED_10000, 2524 }, 2525 { 2526 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 2527 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 2528 .speed = SPEED_20000, 2529 }, 2530 { 2531 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 2532 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2533 .speed = SPEED_40000, 2534 }, 2535 { 2536 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 2537 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2538 .speed = SPEED_40000, 2539 }, 2540 { 2541 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 2542 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2543 .speed = SPEED_40000, 2544 }, 2545 { 2546 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 2547 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2548 .speed = SPEED_40000, 2549 }, 2550 { 2551 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR, 2552 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2553 .speed = SPEED_25000, 2554 }, 2555 { 2556 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR, 2557 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2558 .speed = SPEED_25000, 2559 }, 2560 { 2561 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 2562 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2563 .speed = SPEED_25000, 2564 }, 2565 { 2566 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2, 2567 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2568 .speed = SPEED_50000, 2569 }, 2570 { 2571 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 2572 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2573 .speed = SPEED_50000, 2574 }, 2575 { 2576 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2, 2577 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2578 .speed = SPEED_50000, 2579 }, 2580 { 2581 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2582 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT, 2583 .speed = SPEED_56000, 2584 }, 2585 { 2586 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2587 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT, 2588 .speed = SPEED_56000, 2589 }, 2590 { 2591 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2592 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT, 2593 .speed = SPEED_56000, 2594 }, 2595 { 2596 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2597 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, 2598 .speed = SPEED_56000, 2599 }, 2600 { 2601 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4, 2602 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2603 .speed = SPEED_100000, 2604 }, 2605 { 2606 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4, 2607 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2608 .speed = SPEED_100000, 2609 }, 2610 { 2611 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4, 2612 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2613 .speed = SPEED_100000, 2614 }, 2615 { 2616 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 2617 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2618 .speed = SPEED_100000, 2619 }, 2620 }; 2621 2622 #define MLXSW_SP1_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp1_port_link_mode) 2623 2624 static void 2625 mlxsw_sp1_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, 2626 u32 ptys_eth_proto, 2627 struct ethtool_link_ksettings *cmd) 2628 { 2629 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2630 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2631 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2632 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2633 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2634 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2635 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 2636 2637 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2638 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2639 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2640 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 2641 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 2642 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 2643 } 2644 2645 static void 2646 mlxsw_sp1_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, 2647 unsigned long *mode) 2648 { 2649 int i; 2650 2651 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2652 if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) 2653 __set_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, 2654 mode); 2655 } 2656 } 2657 2658 static void 2659 mlxsw_sp1_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, 2660 u32 ptys_eth_proto, 2661 struct ethtool_link_ksettings *cmd) 2662 { 2663 u32 speed = SPEED_UNKNOWN; 2664 u8 duplex = DUPLEX_UNKNOWN; 2665 int i; 2666 2667 if (!carrier_ok) 2668 goto out; 2669 2670 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2671 if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) { 2672 speed = mlxsw_sp1_port_link_mode[i].speed; 2673 duplex = DUPLEX_FULL; 2674 break; 2675 } 2676 } 2677 out: 2678 cmd->base.speed = speed; 2679 cmd->base.duplex = duplex; 2680 } 2681 2682 static u32 2683 mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, 2684 const struct ethtool_link_ksettings *cmd) 2685 { 2686 u32 ptys_proto = 0; 2687 int i; 2688 2689 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2690 if (test_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, 2691 cmd->link_modes.advertising)) 2692 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2693 } 2694 return ptys_proto; 2695 } 2696 2697 static u32 mlxsw_sp1_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 speed) 2698 { 2699 u32 ptys_proto = 0; 2700 int i; 2701 2702 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2703 if (speed == mlxsw_sp1_port_link_mode[i].speed) 2704 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2705 } 2706 return ptys_proto; 2707 } 2708 2709 static u32 2710 mlxsw_sp1_to_ptys_upper_speed(struct mlxsw_sp *mlxsw_sp, u32 upper_speed) 2711 { 2712 u32 ptys_proto = 0; 2713 int i; 2714 2715 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2716 if (mlxsw_sp1_port_link_mode[i].speed <= upper_speed) 2717 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2718 } 2719 return ptys_proto; 2720 } 2721 2722 static int 2723 mlxsw_sp1_port_speed_base(struct mlxsw_sp *mlxsw_sp, u8 local_port, 2724 u32 *base_speed) 2725 { 2726 *base_speed = MLXSW_SP_PORT_BASE_SPEED_25G; 2727 return 0; 2728 } 2729 2730 static void 2731 mlxsw_sp1_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, 2732 u8 local_port, u32 proto_admin, bool autoneg) 2733 { 2734 mlxsw_reg_ptys_eth_pack(payload, local_port, proto_admin, autoneg); 2735 } 2736 2737 static void 2738 mlxsw_sp1_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, 2739 u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, 2740 u32 *p_eth_proto_oper) 2741 { 2742 mlxsw_reg_ptys_eth_unpack(payload, p_eth_proto_cap, p_eth_proto_admin, 2743 p_eth_proto_oper); 2744 } 2745 2746 static const struct mlxsw_sp_port_type_speed_ops 2747 mlxsw_sp1_port_type_speed_ops = { 2748 .from_ptys_supported_port = mlxsw_sp1_from_ptys_supported_port, 2749 .from_ptys_link = mlxsw_sp1_from_ptys_link, 2750 .from_ptys_speed_duplex = mlxsw_sp1_from_ptys_speed_duplex, 2751 .to_ptys_advert_link = mlxsw_sp1_to_ptys_advert_link, 2752 .to_ptys_speed = mlxsw_sp1_to_ptys_speed, 2753 .to_ptys_upper_speed = mlxsw_sp1_to_ptys_upper_speed, 2754 .port_speed_base = mlxsw_sp1_port_speed_base, 2755 .reg_ptys_eth_pack = mlxsw_sp1_reg_ptys_eth_pack, 2756 .reg_ptys_eth_unpack = mlxsw_sp1_reg_ptys_eth_unpack, 2757 }; 2758 2759 static const enum ethtool_link_mode_bit_indices 2760 mlxsw_sp2_mask_ethtool_sgmii_100m[] = { 2761 ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2762 }; 2763 2764 #define MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN \ 2765 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_sgmii_100m) 2766 2767 static const enum ethtool_link_mode_bit_indices 2768 mlxsw_sp2_mask_ethtool_1000base_x_sgmii[] = { 2769 ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 2770 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2771 }; 2772 2773 #define MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN \ 2774 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_1000base_x_sgmii) 2775 2776 static const enum ethtool_link_mode_bit_indices 2777 mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii[] = { 2778 ETHTOOL_LINK_MODE_2500baseX_Full_BIT, 2779 }; 2780 2781 #define MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN \ 2782 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii) 2783 2784 static const enum ethtool_link_mode_bit_indices 2785 mlxsw_sp2_mask_ethtool_5gbase_r[] = { 2786 ETHTOOL_LINK_MODE_5000baseT_Full_BIT, 2787 }; 2788 2789 #define MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN \ 2790 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_5gbase_r) 2791 2792 static const enum ethtool_link_mode_bit_indices 2793 mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g[] = { 2794 ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2795 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2796 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, 2797 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, 2798 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 2799 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 2800 ETHTOOL_LINK_MODE_10000baseER_Full_BIT, 2801 }; 2802 2803 #define MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN \ 2804 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g) 2805 2806 static const enum ethtool_link_mode_bit_indices 2807 mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g[] = { 2808 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2809 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2810 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2811 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2812 }; 2813 2814 #define MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN \ 2815 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g) 2816 2817 static const enum ethtool_link_mode_bit_indices 2818 mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr[] = { 2819 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2820 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2821 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2822 }; 2823 2824 #define MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN \ 2825 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr) 2826 2827 static const enum ethtool_link_mode_bit_indices 2828 mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2[] = { 2829 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2830 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2831 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2832 }; 2833 2834 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN \ 2835 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2) 2836 2837 static const enum ethtool_link_mode_bit_indices 2838 mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr[] = { 2839 ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, 2840 ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, 2841 ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, 2842 ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, 2843 ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, 2844 }; 2845 2846 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN \ 2847 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr) 2848 2849 static const enum ethtool_link_mode_bit_indices 2850 mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4[] = { 2851 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2852 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2853 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2854 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2855 }; 2856 2857 #define MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN \ 2858 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4) 2859 2860 static const enum ethtool_link_mode_bit_indices 2861 mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2[] = { 2862 ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, 2863 ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, 2864 ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, 2865 ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, 2866 ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, 2867 }; 2868 2869 #define MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN \ 2870 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2) 2871 2872 static const enum ethtool_link_mode_bit_indices 2873 mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = { 2874 ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, 2875 ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, 2876 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, 2877 ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT, 2878 ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, 2879 }; 2880 2881 #define MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN \ 2882 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4) 2883 2884 struct mlxsw_sp2_port_link_mode { 2885 const enum ethtool_link_mode_bit_indices *mask_ethtool; 2886 int m_ethtool_len; 2887 u32 mask; 2888 u32 speed; 2889 }; 2890 2891 static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = { 2892 { 2893 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_SGMII_100M, 2894 .mask_ethtool = mlxsw_sp2_mask_ethtool_sgmii_100m, 2895 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN, 2896 .speed = SPEED_100, 2897 }, 2898 { 2899 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_1000BASE_X_SGMII, 2900 .mask_ethtool = mlxsw_sp2_mask_ethtool_1000base_x_sgmii, 2901 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN, 2902 .speed = SPEED_1000, 2903 }, 2904 { 2905 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_2_5GBASE_X_2_5GMII, 2906 .mask_ethtool = mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii, 2907 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN, 2908 .speed = SPEED_2500, 2909 }, 2910 { 2911 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R, 2912 .mask_ethtool = mlxsw_sp2_mask_ethtool_5gbase_r, 2913 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN, 2914 .speed = SPEED_5000, 2915 }, 2916 { 2917 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XFI_XAUI_1_10G, 2918 .mask_ethtool = mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g, 2919 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN, 2920 .speed = SPEED_10000, 2921 }, 2922 { 2923 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G, 2924 .mask_ethtool = mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g, 2925 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN, 2926 .speed = SPEED_40000, 2927 }, 2928 { 2929 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR, 2930 .mask_ethtool = mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr, 2931 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN, 2932 .speed = SPEED_25000, 2933 }, 2934 { 2935 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_2_LAUI_2_50GBASE_CR2_KR2, 2936 .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2, 2937 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN, 2938 .speed = SPEED_50000, 2939 }, 2940 { 2941 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR, 2942 .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr, 2943 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN, 2944 .speed = SPEED_50000, 2945 }, 2946 { 2947 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4, 2948 .mask_ethtool = mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4, 2949 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN, 2950 .speed = SPEED_100000, 2951 }, 2952 { 2953 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2, 2954 .mask_ethtool = mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2, 2955 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN, 2956 .speed = SPEED_100000, 2957 }, 2958 { 2959 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4, 2960 .mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4, 2961 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN, 2962 .speed = SPEED_200000, 2963 }, 2964 }; 2965 2966 #define MLXSW_SP2_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp2_port_link_mode) 2967 2968 static void 2969 mlxsw_sp2_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, 2970 u32 ptys_eth_proto, 2971 struct ethtool_link_ksettings *cmd) 2972 { 2973 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 2974 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 2975 } 2976 2977 static void 2978 mlxsw_sp2_set_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, 2979 unsigned long *mode) 2980 { 2981 int i; 2982 2983 for (i = 0; i < link_mode->m_ethtool_len; i++) 2984 __set_bit(link_mode->mask_ethtool[i], mode); 2985 } 2986 2987 static void 2988 mlxsw_sp2_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, 2989 unsigned long *mode) 2990 { 2991 int i; 2992 2993 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 2994 if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) 2995 mlxsw_sp2_set_bit_ethtool(&mlxsw_sp2_port_link_mode[i], 2996 mode); 2997 } 2998 } 2999 3000 static void 3001 mlxsw_sp2_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, 3002 u32 ptys_eth_proto, 3003 struct ethtool_link_ksettings *cmd) 3004 { 3005 u32 speed = SPEED_UNKNOWN; 3006 u8 duplex = DUPLEX_UNKNOWN; 3007 int i; 3008 3009 if (!carrier_ok) 3010 goto out; 3011 3012 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3013 if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) { 3014 speed = mlxsw_sp2_port_link_mode[i].speed; 3015 duplex = DUPLEX_FULL; 3016 break; 3017 } 3018 } 3019 out: 3020 cmd->base.speed = speed; 3021 cmd->base.duplex = duplex; 3022 } 3023 3024 static bool 3025 mlxsw_sp2_test_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, 3026 const unsigned long *mode) 3027 { 3028 int cnt = 0; 3029 int i; 3030 3031 for (i = 0; i < link_mode->m_ethtool_len; i++) { 3032 if (test_bit(link_mode->mask_ethtool[i], mode)) 3033 cnt++; 3034 } 3035 3036 return cnt == link_mode->m_ethtool_len; 3037 } 3038 3039 static u32 3040 mlxsw_sp2_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, 3041 const struct ethtool_link_ksettings *cmd) 3042 { 3043 u32 ptys_proto = 0; 3044 int i; 3045 3046 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3047 if (mlxsw_sp2_test_bit_ethtool(&mlxsw_sp2_port_link_mode[i], 3048 cmd->link_modes.advertising)) 3049 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3050 } 3051 return ptys_proto; 3052 } 3053 3054 static u32 mlxsw_sp2_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 speed) 3055 { 3056 u32 ptys_proto = 0; 3057 int i; 3058 3059 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3060 if (speed == mlxsw_sp2_port_link_mode[i].speed) 3061 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3062 } 3063 return ptys_proto; 3064 } 3065 3066 static u32 3067 mlxsw_sp2_to_ptys_upper_speed(struct mlxsw_sp *mlxsw_sp, u32 upper_speed) 3068 { 3069 u32 ptys_proto = 0; 3070 int i; 3071 3072 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3073 if (mlxsw_sp2_port_link_mode[i].speed <= upper_speed) 3074 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3075 } 3076 return ptys_proto; 3077 } 3078 3079 static int 3080 mlxsw_sp2_port_speed_base(struct mlxsw_sp *mlxsw_sp, u8 local_port, 3081 u32 *base_speed) 3082 { 3083 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3084 u32 eth_proto_cap; 3085 int err; 3086 3087 /* In Spectrum-2, the speed of 1x can change from port to port, so query 3088 * it from firmware. 3089 */ 3090 mlxsw_reg_ptys_ext_eth_pack(ptys_pl, local_port, 0, false); 3091 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3092 if (err) 3093 return err; 3094 mlxsw_reg_ptys_ext_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL); 3095 3096 if (eth_proto_cap & 3097 MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR) { 3098 *base_speed = MLXSW_SP_PORT_BASE_SPEED_50G; 3099 return 0; 3100 } 3101 3102 if (eth_proto_cap & 3103 MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR) { 3104 *base_speed = MLXSW_SP_PORT_BASE_SPEED_25G; 3105 return 0; 3106 } 3107 3108 return -EIO; 3109 } 3110 3111 static void 3112 mlxsw_sp2_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, 3113 u8 local_port, u32 proto_admin, 3114 bool autoneg) 3115 { 3116 mlxsw_reg_ptys_ext_eth_pack(payload, local_port, proto_admin, autoneg); 3117 } 3118 3119 static void 3120 mlxsw_sp2_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, 3121 u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, 3122 u32 *p_eth_proto_oper) 3123 { 3124 mlxsw_reg_ptys_ext_eth_unpack(payload, p_eth_proto_cap, 3125 p_eth_proto_admin, p_eth_proto_oper); 3126 } 3127 3128 static const struct mlxsw_sp_port_type_speed_ops 3129 mlxsw_sp2_port_type_speed_ops = { 3130 .from_ptys_supported_port = mlxsw_sp2_from_ptys_supported_port, 3131 .from_ptys_link = mlxsw_sp2_from_ptys_link, 3132 .from_ptys_speed_duplex = mlxsw_sp2_from_ptys_speed_duplex, 3133 .to_ptys_advert_link = mlxsw_sp2_to_ptys_advert_link, 3134 .to_ptys_speed = mlxsw_sp2_to_ptys_speed, 3135 .to_ptys_upper_speed = mlxsw_sp2_to_ptys_upper_speed, 3136 .port_speed_base = mlxsw_sp2_port_speed_base, 3137 .reg_ptys_eth_pack = mlxsw_sp2_reg_ptys_eth_pack, 3138 .reg_ptys_eth_unpack = mlxsw_sp2_reg_ptys_eth_unpack, 3139 }; 3140 3141 static void 3142 mlxsw_sp_port_get_link_supported(struct mlxsw_sp *mlxsw_sp, u32 eth_proto_cap, 3143 struct ethtool_link_ksettings *cmd) 3144 { 3145 const struct mlxsw_sp_port_type_speed_ops *ops; 3146 3147 ops = mlxsw_sp->port_type_speed_ops; 3148 3149 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); 3150 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); 3151 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); 3152 3153 ops->from_ptys_supported_port(mlxsw_sp, eth_proto_cap, cmd); 3154 ops->from_ptys_link(mlxsw_sp, eth_proto_cap, cmd->link_modes.supported); 3155 } 3156 3157 static void 3158 mlxsw_sp_port_get_link_advertise(struct mlxsw_sp *mlxsw_sp, 3159 u32 eth_proto_admin, bool autoneg, 3160 struct ethtool_link_ksettings *cmd) 3161 { 3162 const struct mlxsw_sp_port_type_speed_ops *ops; 3163 3164 ops = mlxsw_sp->port_type_speed_ops; 3165 3166 if (!autoneg) 3167 return; 3168 3169 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); 3170 ops->from_ptys_link(mlxsw_sp, eth_proto_admin, 3171 cmd->link_modes.advertising); 3172 } 3173 3174 static u8 3175 mlxsw_sp_port_connector_port(enum mlxsw_reg_ptys_connector_type connector_type) 3176 { 3177 switch (connector_type) { 3178 case MLXSW_REG_PTYS_CONNECTOR_TYPE_UNKNOWN_OR_NO_CONNECTOR: 3179 return PORT_OTHER; 3180 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_NONE: 3181 return PORT_NONE; 3182 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_TP: 3183 return PORT_TP; 3184 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_AUI: 3185 return PORT_AUI; 3186 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_BNC: 3187 return PORT_BNC; 3188 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_MII: 3189 return PORT_MII; 3190 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_FIBRE: 3191 return PORT_FIBRE; 3192 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_DA: 3193 return PORT_DA; 3194 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_OTHER: 3195 return PORT_OTHER; 3196 default: 3197 WARN_ON_ONCE(1); 3198 return PORT_OTHER; 3199 } 3200 } 3201 3202 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev, 3203 struct ethtool_link_ksettings *cmd) 3204 { 3205 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; 3206 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 3207 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3208 const struct mlxsw_sp_port_type_speed_ops *ops; 3209 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3210 u8 connector_type; 3211 bool autoneg; 3212 int err; 3213 3214 ops = mlxsw_sp->port_type_speed_ops; 3215 3216 autoneg = mlxsw_sp_port->link.autoneg; 3217 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3218 0, false); 3219 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3220 if (err) 3221 return err; 3222 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, 3223 ð_proto_admin, ð_proto_oper); 3224 3225 mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap, cmd); 3226 3227 mlxsw_sp_port_get_link_advertise(mlxsw_sp, eth_proto_admin, autoneg, 3228 cmd); 3229 3230 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 3231 connector_type = mlxsw_reg_ptys_connector_type_get(ptys_pl); 3232 cmd->base.port = mlxsw_sp_port_connector_port(connector_type); 3233 ops->from_ptys_speed_duplex(mlxsw_sp, netif_carrier_ok(dev), 3234 eth_proto_oper, cmd); 3235 3236 return 0; 3237 } 3238 3239 static int 3240 mlxsw_sp_port_set_link_ksettings(struct net_device *dev, 3241 const struct ethtool_link_ksettings *cmd) 3242 { 3243 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 3244 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3245 const struct mlxsw_sp_port_type_speed_ops *ops; 3246 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3247 u32 eth_proto_cap, eth_proto_new; 3248 bool autoneg; 3249 int err; 3250 3251 ops = mlxsw_sp->port_type_speed_ops; 3252 3253 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3254 0, false); 3255 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3256 if (err) 3257 return err; 3258 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, NULL, NULL); 3259 3260 autoneg = cmd->base.autoneg == AUTONEG_ENABLE; 3261 if (!autoneg && cmd->base.speed == SPEED_56000) { 3262 netdev_err(dev, "56G not supported with autoneg off\n"); 3263 return -EINVAL; 3264 } 3265 eth_proto_new = autoneg ? 3266 ops->to_ptys_advert_link(mlxsw_sp, cmd) : 3267 ops->to_ptys_speed(mlxsw_sp, cmd->base.speed); 3268 3269 eth_proto_new = eth_proto_new & eth_proto_cap; 3270 if (!eth_proto_new) { 3271 netdev_err(dev, "No supported speed requested\n"); 3272 return -EINVAL; 3273 } 3274 3275 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3276 eth_proto_new, autoneg); 3277 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3278 if (err) 3279 return err; 3280 3281 mlxsw_sp_port->link.autoneg = autoneg; 3282 3283 if (!netif_running(dev)) 3284 return 0; 3285 3286 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3287 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 3288 3289 return 0; 3290 } 3291 3292 static int mlxsw_sp_get_module_info(struct net_device *netdev, 3293 struct ethtool_modinfo *modinfo) 3294 { 3295 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3296 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3297 int err; 3298 3299 err = mlxsw_env_get_module_info(mlxsw_sp->core, 3300 mlxsw_sp_port->mapping.module, 3301 modinfo); 3302 3303 return err; 3304 } 3305 3306 static int mlxsw_sp_get_module_eeprom(struct net_device *netdev, 3307 struct ethtool_eeprom *ee, 3308 u8 *data) 3309 { 3310 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3311 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3312 int err; 3313 3314 err = mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core, 3315 mlxsw_sp_port->mapping.module, ee, 3316 data); 3317 3318 return err; 3319 } 3320 3321 static int 3322 mlxsw_sp_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info) 3323 { 3324 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3325 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3326 3327 return mlxsw_sp->ptp_ops->get_ts_info(mlxsw_sp, info); 3328 } 3329 3330 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 3331 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 3332 .get_link = ethtool_op_get_link, 3333 .get_pauseparam = mlxsw_sp_port_get_pauseparam, 3334 .set_pauseparam = mlxsw_sp_port_set_pauseparam, 3335 .get_strings = mlxsw_sp_port_get_strings, 3336 .set_phys_id = mlxsw_sp_port_set_phys_id, 3337 .get_ethtool_stats = mlxsw_sp_port_get_stats, 3338 .get_sset_count = mlxsw_sp_port_get_sset_count, 3339 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings, 3340 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings, 3341 .get_module_info = mlxsw_sp_get_module_info, 3342 .get_module_eeprom = mlxsw_sp_get_module_eeprom, 3343 .get_ts_info = mlxsw_sp_get_ts_info, 3344 }; 3345 3346 static int 3347 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width) 3348 { 3349 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3350 const struct mlxsw_sp_port_type_speed_ops *ops; 3351 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3352 u32 eth_proto_admin; 3353 u32 upper_speed; 3354 u32 base_speed; 3355 int err; 3356 3357 ops = mlxsw_sp->port_type_speed_ops; 3358 3359 err = ops->port_speed_base(mlxsw_sp, mlxsw_sp_port->local_port, 3360 &base_speed); 3361 if (err) 3362 return err; 3363 upper_speed = base_speed * width; 3364 3365 eth_proto_admin = ops->to_ptys_upper_speed(mlxsw_sp, upper_speed); 3366 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3367 eth_proto_admin, mlxsw_sp_port->link.autoneg); 3368 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3369 } 3370 3371 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 3372 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 3373 bool dwrr, u8 dwrr_weight) 3374 { 3375 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3376 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3377 3378 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3379 next_index); 3380 mlxsw_reg_qeec_de_set(qeec_pl, true); 3381 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 3382 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 3383 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3384 } 3385 3386 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 3387 enum mlxsw_reg_qeec_hr hr, u8 index, 3388 u8 next_index, u32 maxrate) 3389 { 3390 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3391 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3392 3393 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3394 next_index); 3395 mlxsw_reg_qeec_mase_set(qeec_pl, true); 3396 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 3397 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3398 } 3399 3400 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port, 3401 enum mlxsw_reg_qeec_hr hr, u8 index, 3402 u8 next_index, u32 minrate) 3403 { 3404 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3405 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3406 3407 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3408 next_index); 3409 mlxsw_reg_qeec_mise_set(qeec_pl, true); 3410 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate); 3411 3412 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3413 } 3414 3415 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 3416 u8 switch_prio, u8 tclass) 3417 { 3418 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3419 char qtct_pl[MLXSW_REG_QTCT_LEN]; 3420 3421 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 3422 tclass); 3423 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 3424 } 3425 3426 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 3427 { 3428 int err, i; 3429 3430 /* Setup the elements hierarcy, so that each TC is linked to 3431 * one subgroup, which are all member in the same group. 3432 */ 3433 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3434 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false, 3435 0); 3436 if (err) 3437 return err; 3438 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3439 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3440 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i, 3441 0, false, 0); 3442 if (err) 3443 return err; 3444 } 3445 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3446 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3447 MLXSW_REG_QEEC_HIERARCY_TC, i, i, 3448 false, 0); 3449 if (err) 3450 return err; 3451 3452 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3453 MLXSW_REG_QEEC_HIERARCY_TC, 3454 i + 8, i, 3455 true, 100); 3456 if (err) 3457 return err; 3458 } 3459 3460 /* Make sure the max shaper is disabled in all hierarchies that 3461 * support it. 3462 */ 3463 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3464 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0, 3465 MLXSW_REG_QEEC_MAS_DIS); 3466 if (err) 3467 return err; 3468 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3469 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3470 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, 3471 i, 0, 3472 MLXSW_REG_QEEC_MAS_DIS); 3473 if (err) 3474 return err; 3475 } 3476 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3477 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3478 MLXSW_REG_QEEC_HIERARCY_TC, 3479 i, i, 3480 MLXSW_REG_QEEC_MAS_DIS); 3481 if (err) 3482 return err; 3483 3484 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3485 MLXSW_REG_QEEC_HIERARCY_TC, 3486 i + 8, i, 3487 MLXSW_REG_QEEC_MAS_DIS); 3488 if (err) 3489 return err; 3490 } 3491 3492 /* Configure the min shaper for multicast TCs. */ 3493 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3494 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port, 3495 MLXSW_REG_QEEC_HIERARCY_TC, 3496 i + 8, i, 3497 MLXSW_REG_QEEC_MIS_MIN); 3498 if (err) 3499 return err; 3500 } 3501 3502 /* Map all priorities to traffic class 0. */ 3503 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3504 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 3505 if (err) 3506 return err; 3507 } 3508 3509 return 0; 3510 } 3511 3512 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 3513 bool enable) 3514 { 3515 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3516 char qtctm_pl[MLXSW_REG_QTCTM_LEN]; 3517 3518 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable); 3519 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl); 3520 } 3521 3522 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 3523 bool split, u8 module, u8 width, u8 lane) 3524 { 3525 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 3526 struct mlxsw_sp_port *mlxsw_sp_port; 3527 struct net_device *dev; 3528 int err; 3529 3530 err = mlxsw_core_port_init(mlxsw_sp->core, local_port, 3531 module + 1, split, lane / width, 3532 mlxsw_sp->base_mac, 3533 sizeof(mlxsw_sp->base_mac)); 3534 if (err) { 3535 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 3536 local_port); 3537 return err; 3538 } 3539 3540 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 3541 if (!dev) { 3542 err = -ENOMEM; 3543 goto err_alloc_etherdev; 3544 } 3545 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 3546 mlxsw_sp_port = netdev_priv(dev); 3547 mlxsw_sp_port->dev = dev; 3548 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 3549 mlxsw_sp_port->local_port = local_port; 3550 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID; 3551 mlxsw_sp_port->split = split; 3552 mlxsw_sp_port->mapping.module = module; 3553 mlxsw_sp_port->mapping.width = width; 3554 mlxsw_sp_port->mapping.lane = lane; 3555 mlxsw_sp_port->link.autoneg = 1; 3556 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 3557 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list); 3558 3559 mlxsw_sp_port->pcpu_stats = 3560 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 3561 if (!mlxsw_sp_port->pcpu_stats) { 3562 err = -ENOMEM; 3563 goto err_alloc_stats; 3564 } 3565 3566 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample), 3567 GFP_KERNEL); 3568 if (!mlxsw_sp_port->sample) { 3569 err = -ENOMEM; 3570 goto err_alloc_sample; 3571 } 3572 3573 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 3574 &update_stats_cache); 3575 3576 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 3577 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 3578 3579 err = mlxsw_sp_port_module_map(mlxsw_sp_port, module, width, lane); 3580 if (err) { 3581 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 3582 mlxsw_sp_port->local_port); 3583 goto err_port_module_map; 3584 } 3585 3586 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 3587 if (err) { 3588 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 3589 mlxsw_sp_port->local_port); 3590 goto err_port_swid_set; 3591 } 3592 3593 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 3594 if (err) { 3595 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 3596 mlxsw_sp_port->local_port); 3597 goto err_dev_addr_init; 3598 } 3599 3600 netif_carrier_off(dev); 3601 3602 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 3603 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 3604 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK; 3605 3606 dev->min_mtu = 0; 3607 dev->max_mtu = ETH_MAX_MTU; 3608 3609 /* Each packet needs to have a Tx header (metadata) on top all other 3610 * headers. 3611 */ 3612 dev->needed_headroom = MLXSW_TXHDR_LEN; 3613 3614 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 3615 if (err) { 3616 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 3617 mlxsw_sp_port->local_port); 3618 goto err_port_system_port_mapping_set; 3619 } 3620 3621 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width); 3622 if (err) { 3623 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 3624 mlxsw_sp_port->local_port); 3625 goto err_port_speed_by_width_set; 3626 } 3627 3628 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 3629 if (err) { 3630 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 3631 mlxsw_sp_port->local_port); 3632 goto err_port_mtu_set; 3633 } 3634 3635 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3636 if (err) 3637 goto err_port_admin_status_set; 3638 3639 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 3640 if (err) { 3641 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 3642 mlxsw_sp_port->local_port); 3643 goto err_port_buffers_init; 3644 } 3645 3646 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 3647 if (err) { 3648 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 3649 mlxsw_sp_port->local_port); 3650 goto err_port_ets_init; 3651 } 3652 3653 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true); 3654 if (err) { 3655 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n", 3656 mlxsw_sp_port->local_port); 3657 goto err_port_tc_mc_mode; 3658 } 3659 3660 /* ETS and buffers must be initialized before DCB. */ 3661 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 3662 if (err) { 3663 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 3664 mlxsw_sp_port->local_port); 3665 goto err_port_dcb_init; 3666 } 3667 3668 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 3669 if (err) { 3670 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 3671 mlxsw_sp_port->local_port); 3672 goto err_port_fids_init; 3673 } 3674 3675 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); 3676 if (err) { 3677 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", 3678 mlxsw_sp_port->local_port); 3679 goto err_port_qdiscs_init; 3680 } 3681 3682 err = mlxsw_sp_port_nve_init(mlxsw_sp_port); 3683 if (err) { 3684 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n", 3685 mlxsw_sp_port->local_port); 3686 goto err_port_nve_init; 3687 } 3688 3689 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 3690 if (err) { 3691 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n", 3692 mlxsw_sp_port->local_port); 3693 goto err_port_pvid_set; 3694 } 3695 3696 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, 3697 MLXSW_SP_DEFAULT_VID); 3698 if (IS_ERR(mlxsw_sp_port_vlan)) { 3699 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 3700 mlxsw_sp_port->local_port); 3701 err = PTR_ERR(mlxsw_sp_port_vlan); 3702 goto err_port_vlan_create; 3703 } 3704 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan; 3705 3706 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 3707 err = register_netdev(dev); 3708 if (err) { 3709 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 3710 mlxsw_sp_port->local_port); 3711 goto err_register_netdev; 3712 } 3713 3714 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 3715 mlxsw_sp_port, dev); 3716 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 3717 return 0; 3718 3719 err_register_netdev: 3720 mlxsw_sp->ports[local_port] = NULL; 3721 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 3722 err_port_vlan_create: 3723 err_port_pvid_set: 3724 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3725 err_port_nve_init: 3726 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3727 err_port_qdiscs_init: 3728 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3729 err_port_fids_init: 3730 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3731 err_port_dcb_init: 3732 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3733 err_port_tc_mc_mode: 3734 err_port_ets_init: 3735 err_port_buffers_init: 3736 err_port_admin_status_set: 3737 err_port_mtu_set: 3738 err_port_speed_by_width_set: 3739 err_port_system_port_mapping_set: 3740 err_dev_addr_init: 3741 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3742 err_port_swid_set: 3743 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3744 err_port_module_map: 3745 kfree(mlxsw_sp_port->sample); 3746 err_alloc_sample: 3747 free_percpu(mlxsw_sp_port->pcpu_stats); 3748 err_alloc_stats: 3749 free_netdev(dev); 3750 err_alloc_etherdev: 3751 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3752 return err; 3753 } 3754 3755 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3756 { 3757 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3758 3759 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 3760 mlxsw_sp_port_ptp_clear(mlxsw_sp_port); 3761 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 3762 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 3763 mlxsw_sp->ports[local_port] = NULL; 3764 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true); 3765 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3766 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3767 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3768 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3769 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3770 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3771 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3772 kfree(mlxsw_sp_port->sample); 3773 free_percpu(mlxsw_sp_port->pcpu_stats); 3774 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 3775 free_netdev(mlxsw_sp_port->dev); 3776 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3777 } 3778 3779 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3780 { 3781 return mlxsw_sp->ports[local_port] != NULL; 3782 } 3783 3784 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 3785 { 3786 int i; 3787 3788 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 3789 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3790 mlxsw_sp_port_remove(mlxsw_sp, i); 3791 kfree(mlxsw_sp->port_to_module); 3792 kfree(mlxsw_sp->ports); 3793 } 3794 3795 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 3796 { 3797 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 3798 u8 module, width, lane; 3799 size_t alloc_size; 3800 int i; 3801 int err; 3802 3803 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 3804 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 3805 if (!mlxsw_sp->ports) 3806 return -ENOMEM; 3807 3808 mlxsw_sp->port_to_module = kmalloc_array(max_ports, sizeof(int), 3809 GFP_KERNEL); 3810 if (!mlxsw_sp->port_to_module) { 3811 err = -ENOMEM; 3812 goto err_port_to_module_alloc; 3813 } 3814 3815 for (i = 1; i < max_ports; i++) { 3816 /* Mark as invalid */ 3817 mlxsw_sp->port_to_module[i] = -1; 3818 3819 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, 3820 &width, &lane); 3821 if (err) 3822 goto err_port_module_info_get; 3823 if (!width) 3824 continue; 3825 mlxsw_sp->port_to_module[i] = module; 3826 err = mlxsw_sp_port_create(mlxsw_sp, i, false, 3827 module, width, lane); 3828 if (err) 3829 goto err_port_create; 3830 } 3831 return 0; 3832 3833 err_port_create: 3834 err_port_module_info_get: 3835 for (i--; i >= 1; i--) 3836 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3837 mlxsw_sp_port_remove(mlxsw_sp, i); 3838 kfree(mlxsw_sp->port_to_module); 3839 err_port_to_module_alloc: 3840 kfree(mlxsw_sp->ports); 3841 return err; 3842 } 3843 3844 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port) 3845 { 3846 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX; 3847 3848 return local_port - offset; 3849 } 3850 3851 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 3852 u8 module, unsigned int count, u8 offset) 3853 { 3854 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; 3855 int err, i; 3856 3857 for (i = 0; i < count; i++) { 3858 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset, 3859 true, module, width, i * width); 3860 if (err) 3861 goto err_port_create; 3862 } 3863 3864 return 0; 3865 3866 err_port_create: 3867 for (i--; i >= 0; i--) 3868 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 3869 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 3870 return err; 3871 } 3872 3873 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 3874 u8 base_port, unsigned int count) 3875 { 3876 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH; 3877 int i; 3878 3879 /* Split by four means we need to re-create two ports, otherwise 3880 * only one. 3881 */ 3882 count = count / 2; 3883 3884 for (i = 0; i < count; i++) { 3885 local_port = base_port + i * 2; 3886 if (mlxsw_sp->port_to_module[local_port] < 0) 3887 continue; 3888 module = mlxsw_sp->port_to_module[local_port]; 3889 3890 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module, 3891 width, 0); 3892 } 3893 } 3894 3895 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 3896 unsigned int count, 3897 struct netlink_ext_ack *extack) 3898 { 3899 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3900 u8 local_ports_in_1x, local_ports_in_2x, offset; 3901 struct mlxsw_sp_port *mlxsw_sp_port; 3902 u8 module, cur_width, base_port; 3903 int i; 3904 int err; 3905 3906 if (!MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_1X) || 3907 !MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_2X)) 3908 return -EIO; 3909 3910 local_ports_in_1x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_1X); 3911 local_ports_in_2x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_2X); 3912 3913 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3914 if (!mlxsw_sp_port) { 3915 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 3916 local_port); 3917 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 3918 return -EINVAL; 3919 } 3920 3921 module = mlxsw_sp_port->mapping.module; 3922 cur_width = mlxsw_sp_port->mapping.width; 3923 3924 if (count != 2 && count != 4) { 3925 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); 3926 NL_SET_ERR_MSG_MOD(extack, "Port can only be split into 2 or 4 ports"); 3927 return -EINVAL; 3928 } 3929 3930 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { 3931 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); 3932 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further"); 3933 return -EINVAL; 3934 } 3935 3936 /* Make sure we have enough slave (even) ports for the split. */ 3937 if (count == 2) { 3938 offset = local_ports_in_2x; 3939 base_port = local_port; 3940 if (mlxsw_sp->ports[base_port + local_ports_in_2x]) { 3941 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 3942 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 3943 return -EINVAL; 3944 } 3945 } else { 3946 offset = local_ports_in_1x; 3947 base_port = mlxsw_sp_cluster_base_port_get(local_port); 3948 if (mlxsw_sp->ports[base_port + 1] || 3949 mlxsw_sp->ports[base_port + 3]) { 3950 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 3951 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 3952 return -EINVAL; 3953 } 3954 } 3955 3956 for (i = 0; i < count; i++) 3957 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 3958 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 3959 3960 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count, 3961 offset); 3962 if (err) { 3963 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 3964 goto err_port_split_create; 3965 } 3966 3967 return 0; 3968 3969 err_port_split_create: 3970 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 3971 return err; 3972 } 3973 3974 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, 3975 struct netlink_ext_ack *extack) 3976 { 3977 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3978 u8 local_ports_in_1x, local_ports_in_2x, offset; 3979 struct mlxsw_sp_port *mlxsw_sp_port; 3980 u8 cur_width, base_port; 3981 unsigned int count; 3982 int i; 3983 3984 if (!MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_1X) || 3985 !MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_2X)) 3986 return -EIO; 3987 3988 local_ports_in_1x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_1X); 3989 local_ports_in_2x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_2X); 3990 3991 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3992 if (!mlxsw_sp_port) { 3993 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 3994 local_port); 3995 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 3996 return -EINVAL; 3997 } 3998 3999 if (!mlxsw_sp_port->split) { 4000 netdev_err(mlxsw_sp_port->dev, "Port was not split\n"); 4001 NL_SET_ERR_MSG_MOD(extack, "Port was not split"); 4002 return -EINVAL; 4003 } 4004 4005 cur_width = mlxsw_sp_port->mapping.width; 4006 count = cur_width == 1 ? 4 : 2; 4007 4008 if (count == 2) 4009 offset = local_ports_in_2x; 4010 else 4011 offset = local_ports_in_1x; 4012 4013 base_port = mlxsw_sp_cluster_base_port_get(local_port); 4014 4015 /* Determine which ports to remove. */ 4016 if (count == 2 && local_port >= base_port + 2) 4017 base_port = base_port + 2; 4018 4019 for (i = 0; i < count; i++) 4020 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 4021 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 4022 4023 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 4024 4025 return 0; 4026 } 4027 4028 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 4029 char *pude_pl, void *priv) 4030 { 4031 struct mlxsw_sp *mlxsw_sp = priv; 4032 struct mlxsw_sp_port *mlxsw_sp_port; 4033 enum mlxsw_reg_pude_oper_status status; 4034 u8 local_port; 4035 4036 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 4037 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4038 if (!mlxsw_sp_port) 4039 return; 4040 4041 status = mlxsw_reg_pude_oper_status_get(pude_pl); 4042 if (status == MLXSW_PORT_OPER_STATUS_UP) { 4043 netdev_info(mlxsw_sp_port->dev, "link up\n"); 4044 netif_carrier_on(mlxsw_sp_port->dev); 4045 } else { 4046 netdev_info(mlxsw_sp_port->dev, "link down\n"); 4047 netif_carrier_off(mlxsw_sp_port->dev); 4048 } 4049 } 4050 4051 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp, 4052 char *mtpptr_pl, bool ingress) 4053 { 4054 u8 local_port; 4055 u8 num_rec; 4056 int i; 4057 4058 local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl); 4059 num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl); 4060 for (i = 0; i < num_rec; i++) { 4061 u8 domain_number; 4062 u8 message_type; 4063 u16 sequence_id; 4064 u64 timestamp; 4065 4066 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type, 4067 &domain_number, &sequence_id, 4068 ×tamp); 4069 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port, 4070 message_type, domain_number, 4071 sequence_id, timestamp); 4072 } 4073 } 4074 4075 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg, 4076 char *mtpptr_pl, void *priv) 4077 { 4078 struct mlxsw_sp *mlxsw_sp = priv; 4079 4080 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true); 4081 } 4082 4083 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg, 4084 char *mtpptr_pl, void *priv) 4085 { 4086 struct mlxsw_sp *mlxsw_sp = priv; 4087 4088 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false); 4089 } 4090 4091 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 4092 u8 local_port, void *priv) 4093 { 4094 struct mlxsw_sp *mlxsw_sp = priv; 4095 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4096 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 4097 4098 if (unlikely(!mlxsw_sp_port)) { 4099 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 4100 local_port); 4101 return; 4102 } 4103 4104 skb->dev = mlxsw_sp_port->dev; 4105 4106 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 4107 u64_stats_update_begin(&pcpu_stats->syncp); 4108 pcpu_stats->rx_packets++; 4109 pcpu_stats->rx_bytes += skb->len; 4110 u64_stats_update_end(&pcpu_stats->syncp); 4111 4112 skb->protocol = eth_type_trans(skb, skb->dev); 4113 netif_receive_skb(skb); 4114 } 4115 4116 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, 4117 void *priv) 4118 { 4119 skb->offload_fwd_mark = 1; 4120 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 4121 } 4122 4123 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, 4124 u8 local_port, void *priv) 4125 { 4126 skb->offload_l3_fwd_mark = 1; 4127 skb->offload_fwd_mark = 1; 4128 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 4129 } 4130 4131 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port, 4132 void *priv) 4133 { 4134 struct mlxsw_sp *mlxsw_sp = priv; 4135 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4136 struct psample_group *psample_group; 4137 u32 size; 4138 4139 if (unlikely(!mlxsw_sp_port)) { 4140 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n", 4141 local_port); 4142 goto out; 4143 } 4144 if (unlikely(!mlxsw_sp_port->sample)) { 4145 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n", 4146 local_port); 4147 goto out; 4148 } 4149 4150 size = mlxsw_sp_port->sample->truncate ? 4151 mlxsw_sp_port->sample->trunc_size : skb->len; 4152 4153 rcu_read_lock(); 4154 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group); 4155 if (!psample_group) 4156 goto out_unlock; 4157 psample_sample_packet(psample_group, skb, size, 4158 mlxsw_sp_port->dev->ifindex, 0, 4159 mlxsw_sp_port->sample->rate); 4160 out_unlock: 4161 rcu_read_unlock(); 4162 out: 4163 consume_skb(skb); 4164 } 4165 4166 static void mlxsw_sp_rx_listener_ptp(struct sk_buff *skb, u8 local_port, 4167 void *priv) 4168 { 4169 struct mlxsw_sp *mlxsw_sp = priv; 4170 4171 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port); 4172 } 4173 4174 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4175 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 4176 _is_ctrl, SP_##_trap_group, DISCARD) 4177 4178 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4179 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 4180 _is_ctrl, SP_##_trap_group, DISCARD) 4181 4182 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4183 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \ 4184 _is_ctrl, SP_##_trap_group, DISCARD) 4185 4186 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 4187 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 4188 4189 static const struct mlxsw_listener mlxsw_sp_listener[] = { 4190 /* Events */ 4191 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 4192 /* L2 traps */ 4193 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true), 4194 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true), 4195 MLXSW_RXL(mlxsw_sp_rx_listener_ptp, LLDP, TRAP_TO_CPU, 4196 false, SP_LLDP, DISCARD), 4197 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false), 4198 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false), 4199 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false), 4200 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false), 4201 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false), 4202 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false), 4203 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false), 4204 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false), 4205 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false), 4206 MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD, 4207 false), 4208 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 4209 false), 4210 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD, 4211 false), 4212 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 4213 false), 4214 /* L3 traps */ 4215 MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false), 4216 MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false), 4217 MLXSW_SP_RXL_L3_MARK(LBERROR, MIRROR_TO_CPU, LBERROR, false), 4218 MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), 4219 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 4220 false), 4221 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false), 4222 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 4223 MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false), 4224 MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP, 4225 false), 4226 MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false), 4227 MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false), 4228 MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false), 4229 MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), 4230 MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false), 4231 MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false), 4232 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 4233 false), 4234 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 4235 false), 4236 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 4237 false), 4238 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 4239 false), 4240 MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false), 4241 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 4242 false), 4243 MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, HOST_MISS, false), 4244 MLXSW_SP_RXL_MARK(HOST_MISS_IPV6, TRAP_TO_CPU, HOST_MISS, false), 4245 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), 4246 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), 4247 MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false), 4248 MLXSW_SP_RXL_MARK(DECAP_ECN0, TRAP_TO_CPU, ROUTER_EXP, false), 4249 MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), 4250 MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), 4251 /* PKT Sample trap */ 4252 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, 4253 false, SP_IP2ME, DISCARD), 4254 /* ACL trap */ 4255 MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false), 4256 /* Multicast Router Traps */ 4257 MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false), 4258 MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false), 4259 MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false), 4260 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 4261 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 4262 /* NVE traps */ 4263 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false), 4264 MLXSW_SP_RXL_NO_MARK(NVE_DECAP_ARP, TRAP_TO_CPU, ARP, false), 4265 /* PTP traps */ 4266 MLXSW_RXL(mlxsw_sp_rx_listener_ptp, PTP0, TRAP_TO_CPU, 4267 false, SP_PTP0, DISCARD), 4268 MLXSW_SP_RXL_NO_MARK(PTP1, TRAP_TO_CPU, PTP1, false), 4269 }; 4270 4271 static const struct mlxsw_listener mlxsw_sp1_listener[] = { 4272 /* Events */ 4273 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0), 4274 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0), 4275 }; 4276 4277 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 4278 { 4279 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 4280 enum mlxsw_reg_qpcr_ir_units ir_units; 4281 int max_cpu_policers; 4282 bool is_bytes; 4283 u8 burst_size; 4284 u32 rate; 4285 int i, err; 4286 4287 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 4288 return -EIO; 4289 4290 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 4291 4292 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 4293 for (i = 0; i < max_cpu_policers; i++) { 4294 is_bytes = false; 4295 switch (i) { 4296 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 4297 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 4298 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 4299 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 4300 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 4301 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 4302 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: 4303 rate = 128; 4304 burst_size = 7; 4305 break; 4306 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 4307 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 4308 rate = 16 * 1024; 4309 burst_size = 10; 4310 break; 4311 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 4312 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 4313 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 4314 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 4315 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 4316 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 4317 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 4318 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 4319 rate = 1024; 4320 burst_size = 7; 4321 break; 4322 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 4323 rate = 1024; 4324 burst_size = 7; 4325 break; 4326 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0: 4327 rate = 24 * 1024; 4328 burst_size = 12; 4329 break; 4330 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1: 4331 rate = 19 * 1024; 4332 burst_size = 12; 4333 break; 4334 default: 4335 continue; 4336 } 4337 4338 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 4339 burst_size); 4340 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 4341 if (err) 4342 return err; 4343 } 4344 4345 return 0; 4346 } 4347 4348 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 4349 { 4350 char htgt_pl[MLXSW_REG_HTGT_LEN]; 4351 enum mlxsw_reg_htgt_trap_group i; 4352 int max_cpu_policers; 4353 int max_trap_groups; 4354 u8 priority, tc; 4355 u16 policer_id; 4356 int err; 4357 4358 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 4359 return -EIO; 4360 4361 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 4362 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 4363 4364 for (i = 0; i < max_trap_groups; i++) { 4365 policer_id = i; 4366 switch (i) { 4367 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 4368 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 4369 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 4370 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 4371 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 4372 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0: 4373 priority = 5; 4374 tc = 5; 4375 break; 4376 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 4377 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 4378 priority = 4; 4379 tc = 4; 4380 break; 4381 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 4382 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 4383 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 4384 priority = 3; 4385 tc = 3; 4386 break; 4387 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 4388 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 4389 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 4390 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1: 4391 priority = 2; 4392 tc = 2; 4393 break; 4394 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 4395 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 4396 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 4397 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 4398 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: 4399 priority = 1; 4400 tc = 1; 4401 break; 4402 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 4403 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 4404 tc = MLXSW_REG_HTGT_DEFAULT_TC; 4405 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 4406 break; 4407 default: 4408 continue; 4409 } 4410 4411 if (max_cpu_policers <= policer_id && 4412 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 4413 return -EIO; 4414 4415 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 4416 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 4417 if (err) 4418 return err; 4419 } 4420 4421 return 0; 4422 } 4423 4424 static int mlxsw_sp_traps_register(struct mlxsw_sp *mlxsw_sp, 4425 const struct mlxsw_listener listeners[], 4426 size_t listeners_count) 4427 { 4428 int i; 4429 int err; 4430 4431 for (i = 0; i < listeners_count; i++) { 4432 err = mlxsw_core_trap_register(mlxsw_sp->core, 4433 &listeners[i], 4434 mlxsw_sp); 4435 if (err) 4436 goto err_listener_register; 4437 4438 } 4439 return 0; 4440 4441 err_listener_register: 4442 for (i--; i >= 0; i--) { 4443 mlxsw_core_trap_unregister(mlxsw_sp->core, 4444 &listeners[i], 4445 mlxsw_sp); 4446 } 4447 return err; 4448 } 4449 4450 static void mlxsw_sp_traps_unregister(struct mlxsw_sp *mlxsw_sp, 4451 const struct mlxsw_listener listeners[], 4452 size_t listeners_count) 4453 { 4454 int i; 4455 4456 for (i = 0; i < listeners_count; i++) { 4457 mlxsw_core_trap_unregister(mlxsw_sp->core, 4458 &listeners[i], 4459 mlxsw_sp); 4460 } 4461 } 4462 4463 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 4464 { 4465 int err; 4466 4467 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 4468 if (err) 4469 return err; 4470 4471 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 4472 if (err) 4473 return err; 4474 4475 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp_listener, 4476 ARRAY_SIZE(mlxsw_sp_listener)); 4477 if (err) 4478 return err; 4479 4480 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp->listeners, 4481 mlxsw_sp->listeners_count); 4482 if (err) 4483 goto err_extra_traps_init; 4484 4485 return 0; 4486 4487 err_extra_traps_init: 4488 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 4489 ARRAY_SIZE(mlxsw_sp_listener)); 4490 return err; 4491 } 4492 4493 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 4494 { 4495 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp->listeners, 4496 mlxsw_sp->listeners_count); 4497 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 4498 ARRAY_SIZE(mlxsw_sp_listener)); 4499 } 4500 4501 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe 4502 4503 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 4504 { 4505 char slcr_pl[MLXSW_REG_SLCR_LEN]; 4506 u32 seed; 4507 int err; 4508 4509 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 4510 MLXSW_SP_LAG_SEED_INIT); 4511 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 4512 MLXSW_REG_SLCR_LAG_HASH_DMAC | 4513 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 4514 MLXSW_REG_SLCR_LAG_HASH_VLANID | 4515 MLXSW_REG_SLCR_LAG_HASH_SIP | 4516 MLXSW_REG_SLCR_LAG_HASH_DIP | 4517 MLXSW_REG_SLCR_LAG_HASH_SPORT | 4518 MLXSW_REG_SLCR_LAG_HASH_DPORT | 4519 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed); 4520 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 4521 if (err) 4522 return err; 4523 4524 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 4525 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 4526 return -EIO; 4527 4528 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 4529 sizeof(struct mlxsw_sp_upper), 4530 GFP_KERNEL); 4531 if (!mlxsw_sp->lags) 4532 return -ENOMEM; 4533 4534 return 0; 4535 } 4536 4537 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 4538 { 4539 kfree(mlxsw_sp->lags); 4540 } 4541 4542 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) 4543 { 4544 char htgt_pl[MLXSW_REG_HTGT_LEN]; 4545 4546 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 4547 MLXSW_REG_HTGT_INVALID_POLICER, 4548 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 4549 MLXSW_REG_HTGT_DEFAULT_TC); 4550 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 4551 } 4552 4553 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = { 4554 .clock_init = mlxsw_sp1_ptp_clock_init, 4555 .clock_fini = mlxsw_sp1_ptp_clock_fini, 4556 .init = mlxsw_sp1_ptp_init, 4557 .fini = mlxsw_sp1_ptp_fini, 4558 .receive = mlxsw_sp1_ptp_receive, 4559 .transmitted = mlxsw_sp1_ptp_transmitted, 4560 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get, 4561 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set, 4562 .get_ts_info = mlxsw_sp1_ptp_get_ts_info, 4563 }; 4564 4565 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { 4566 .clock_init = mlxsw_sp2_ptp_clock_init, 4567 .clock_fini = mlxsw_sp2_ptp_clock_fini, 4568 .init = mlxsw_sp2_ptp_init, 4569 .fini = mlxsw_sp2_ptp_fini, 4570 .receive = mlxsw_sp2_ptp_receive, 4571 .transmitted = mlxsw_sp2_ptp_transmitted, 4572 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get, 4573 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set, 4574 .get_ts_info = mlxsw_sp2_ptp_get_ts_info, 4575 }; 4576 4577 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 4578 unsigned long event, void *ptr); 4579 4580 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 4581 const struct mlxsw_bus_info *mlxsw_bus_info) 4582 { 4583 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4584 int err; 4585 4586 mlxsw_sp->core = mlxsw_core; 4587 mlxsw_sp->bus_info = mlxsw_bus_info; 4588 4589 err = mlxsw_sp_fw_rev_validate(mlxsw_sp); 4590 if (err) 4591 return err; 4592 4593 err = mlxsw_sp_base_mac_get(mlxsw_sp); 4594 if (err) { 4595 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 4596 return err; 4597 } 4598 4599 err = mlxsw_sp_kvdl_init(mlxsw_sp); 4600 if (err) { 4601 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 4602 return err; 4603 } 4604 4605 err = mlxsw_sp_fids_init(mlxsw_sp); 4606 if (err) { 4607 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 4608 goto err_fids_init; 4609 } 4610 4611 err = mlxsw_sp_traps_init(mlxsw_sp); 4612 if (err) { 4613 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 4614 goto err_traps_init; 4615 } 4616 4617 err = mlxsw_sp_buffers_init(mlxsw_sp); 4618 if (err) { 4619 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 4620 goto err_buffers_init; 4621 } 4622 4623 err = mlxsw_sp_lag_init(mlxsw_sp); 4624 if (err) { 4625 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 4626 goto err_lag_init; 4627 } 4628 4629 /* Initialize SPAN before router and switchdev, so that those components 4630 * can call mlxsw_sp_span_respin(). 4631 */ 4632 err = mlxsw_sp_span_init(mlxsw_sp); 4633 if (err) { 4634 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 4635 goto err_span_init; 4636 } 4637 4638 err = mlxsw_sp_switchdev_init(mlxsw_sp); 4639 if (err) { 4640 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 4641 goto err_switchdev_init; 4642 } 4643 4644 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 4645 if (err) { 4646 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 4647 goto err_counter_pool_init; 4648 } 4649 4650 err = mlxsw_sp_afa_init(mlxsw_sp); 4651 if (err) { 4652 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 4653 goto err_afa_init; 4654 } 4655 4656 err = mlxsw_sp_nve_init(mlxsw_sp); 4657 if (err) { 4658 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n"); 4659 goto err_nve_init; 4660 } 4661 4662 err = mlxsw_sp_acl_init(mlxsw_sp); 4663 if (err) { 4664 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 4665 goto err_acl_init; 4666 } 4667 4668 err = mlxsw_sp_router_init(mlxsw_sp); 4669 if (err) { 4670 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 4671 goto err_router_init; 4672 } 4673 4674 if (mlxsw_sp->bus_info->read_frc_capable) { 4675 /* NULL is a valid return value from clock_init */ 4676 mlxsw_sp->clock = 4677 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp, 4678 mlxsw_sp->bus_info->dev); 4679 if (IS_ERR(mlxsw_sp->clock)) { 4680 err = PTR_ERR(mlxsw_sp->clock); 4681 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n"); 4682 goto err_ptp_clock_init; 4683 } 4684 } 4685 4686 if (mlxsw_sp->clock) { 4687 /* NULL is a valid return value from ptp_ops->init */ 4688 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp); 4689 if (IS_ERR(mlxsw_sp->ptp_state)) { 4690 err = PTR_ERR(mlxsw_sp->ptp_state); 4691 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n"); 4692 goto err_ptp_init; 4693 } 4694 } 4695 4696 /* Initialize netdevice notifier after router and SPAN is initialized, 4697 * so that the event handler can use router structures and call SPAN 4698 * respin. 4699 */ 4700 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 4701 err = register_netdevice_notifier(&mlxsw_sp->netdevice_nb); 4702 if (err) { 4703 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 4704 goto err_netdev_notifier; 4705 } 4706 4707 err = mlxsw_sp_dpipe_init(mlxsw_sp); 4708 if (err) { 4709 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 4710 goto err_dpipe_init; 4711 } 4712 4713 err = mlxsw_sp_ports_create(mlxsw_sp); 4714 if (err) { 4715 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 4716 goto err_ports_create; 4717 } 4718 4719 return 0; 4720 4721 err_ports_create: 4722 mlxsw_sp_dpipe_fini(mlxsw_sp); 4723 err_dpipe_init: 4724 unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); 4725 err_netdev_notifier: 4726 if (mlxsw_sp->clock) 4727 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 4728 err_ptp_init: 4729 if (mlxsw_sp->clock) 4730 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 4731 err_ptp_clock_init: 4732 mlxsw_sp_router_fini(mlxsw_sp); 4733 err_router_init: 4734 mlxsw_sp_acl_fini(mlxsw_sp); 4735 err_acl_init: 4736 mlxsw_sp_nve_fini(mlxsw_sp); 4737 err_nve_init: 4738 mlxsw_sp_afa_fini(mlxsw_sp); 4739 err_afa_init: 4740 mlxsw_sp_counter_pool_fini(mlxsw_sp); 4741 err_counter_pool_init: 4742 mlxsw_sp_switchdev_fini(mlxsw_sp); 4743 err_switchdev_init: 4744 mlxsw_sp_span_fini(mlxsw_sp); 4745 err_span_init: 4746 mlxsw_sp_lag_fini(mlxsw_sp); 4747 err_lag_init: 4748 mlxsw_sp_buffers_fini(mlxsw_sp); 4749 err_buffers_init: 4750 mlxsw_sp_traps_fini(mlxsw_sp); 4751 err_traps_init: 4752 mlxsw_sp_fids_fini(mlxsw_sp); 4753 err_fids_init: 4754 mlxsw_sp_kvdl_fini(mlxsw_sp); 4755 return err; 4756 } 4757 4758 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, 4759 const struct mlxsw_bus_info *mlxsw_bus_info) 4760 { 4761 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4762 4763 mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev; 4764 mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME; 4765 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; 4766 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; 4767 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; 4768 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; 4769 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; 4770 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; 4771 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; 4772 mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr; 4773 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals; 4774 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops; 4775 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops; 4776 mlxsw_sp->listeners = mlxsw_sp1_listener; 4777 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener); 4778 4779 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info); 4780 } 4781 4782 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, 4783 const struct mlxsw_bus_info *mlxsw_bus_info) 4784 { 4785 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4786 4787 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 4788 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 4789 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 4790 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 4791 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 4792 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 4793 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 4794 mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; 4795 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 4796 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 4797 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 4798 4799 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info); 4800 } 4801 4802 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 4803 { 4804 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4805 4806 mlxsw_sp_ports_remove(mlxsw_sp); 4807 mlxsw_sp_dpipe_fini(mlxsw_sp); 4808 unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); 4809 if (mlxsw_sp->clock) { 4810 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 4811 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 4812 } 4813 mlxsw_sp_router_fini(mlxsw_sp); 4814 mlxsw_sp_acl_fini(mlxsw_sp); 4815 mlxsw_sp_nve_fini(mlxsw_sp); 4816 mlxsw_sp_afa_fini(mlxsw_sp); 4817 mlxsw_sp_counter_pool_fini(mlxsw_sp); 4818 mlxsw_sp_switchdev_fini(mlxsw_sp); 4819 mlxsw_sp_span_fini(mlxsw_sp); 4820 mlxsw_sp_lag_fini(mlxsw_sp); 4821 mlxsw_sp_buffers_fini(mlxsw_sp); 4822 mlxsw_sp_traps_fini(mlxsw_sp); 4823 mlxsw_sp_fids_fini(mlxsw_sp); 4824 mlxsw_sp_kvdl_fini(mlxsw_sp); 4825 } 4826 4827 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated 4828 * 802.1Q FIDs 4829 */ 4830 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \ 4831 VLAN_VID_MASK - 1) 4832 4833 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { 4834 .used_max_mid = 1, 4835 .max_mid = MLXSW_SP_MID_MAX, 4836 .used_flood_tables = 1, 4837 .used_flood_mode = 1, 4838 .flood_mode = 3, 4839 .max_fid_flood_tables = 3, 4840 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 4841 .used_max_ib_mc = 1, 4842 .max_ib_mc = 0, 4843 .used_max_pkey = 1, 4844 .max_pkey = 0, 4845 .used_kvd_sizes = 1, 4846 .kvd_hash_single_parts = 59, 4847 .kvd_hash_double_parts = 41, 4848 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 4849 .swid_config = { 4850 { 4851 .used_type = 1, 4852 .type = MLXSW_PORT_SWID_TYPE_ETH, 4853 } 4854 }, 4855 }; 4856 4857 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { 4858 .used_max_mid = 1, 4859 .max_mid = MLXSW_SP_MID_MAX, 4860 .used_flood_tables = 1, 4861 .used_flood_mode = 1, 4862 .flood_mode = 3, 4863 .max_fid_flood_tables = 3, 4864 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 4865 .used_max_ib_mc = 1, 4866 .max_ib_mc = 0, 4867 .used_max_pkey = 1, 4868 .max_pkey = 0, 4869 .swid_config = { 4870 { 4871 .used_type = 1, 4872 .type = MLXSW_PORT_SWID_TYPE_ETH, 4873 } 4874 }, 4875 }; 4876 4877 static void 4878 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, 4879 struct devlink_resource_size_params *kvd_size_params, 4880 struct devlink_resource_size_params *linear_size_params, 4881 struct devlink_resource_size_params *hash_double_size_params, 4882 struct devlink_resource_size_params *hash_single_size_params) 4883 { 4884 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 4885 KVD_SINGLE_MIN_SIZE); 4886 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 4887 KVD_DOUBLE_MIN_SIZE); 4888 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 4889 u32 linear_size_min = 0; 4890 4891 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, 4892 MLXSW_SP_KVD_GRANULARITY, 4893 DEVLINK_RESOURCE_UNIT_ENTRY); 4894 devlink_resource_size_params_init(linear_size_params, linear_size_min, 4895 kvd_size - single_size_min - 4896 double_size_min, 4897 MLXSW_SP_KVD_GRANULARITY, 4898 DEVLINK_RESOURCE_UNIT_ENTRY); 4899 devlink_resource_size_params_init(hash_double_size_params, 4900 double_size_min, 4901 kvd_size - single_size_min - 4902 linear_size_min, 4903 MLXSW_SP_KVD_GRANULARITY, 4904 DEVLINK_RESOURCE_UNIT_ENTRY); 4905 devlink_resource_size_params_init(hash_single_size_params, 4906 single_size_min, 4907 kvd_size - double_size_min - 4908 linear_size_min, 4909 MLXSW_SP_KVD_GRANULARITY, 4910 DEVLINK_RESOURCE_UNIT_ENTRY); 4911 } 4912 4913 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) 4914 { 4915 struct devlink *devlink = priv_to_devlink(mlxsw_core); 4916 struct devlink_resource_size_params hash_single_size_params; 4917 struct devlink_resource_size_params hash_double_size_params; 4918 struct devlink_resource_size_params linear_size_params; 4919 struct devlink_resource_size_params kvd_size_params; 4920 u32 kvd_size, single_size, double_size, linear_size; 4921 const struct mlxsw_config_profile *profile; 4922 int err; 4923 4924 profile = &mlxsw_sp1_config_profile; 4925 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 4926 return -EIO; 4927 4928 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, 4929 &linear_size_params, 4930 &hash_double_size_params, 4931 &hash_single_size_params); 4932 4933 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 4934 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 4935 kvd_size, MLXSW_SP_RESOURCE_KVD, 4936 DEVLINK_RESOURCE_ID_PARENT_TOP, 4937 &kvd_size_params); 4938 if (err) 4939 return err; 4940 4941 linear_size = profile->kvd_linear_size; 4942 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, 4943 linear_size, 4944 MLXSW_SP_RESOURCE_KVD_LINEAR, 4945 MLXSW_SP_RESOURCE_KVD, 4946 &linear_size_params); 4947 if (err) 4948 return err; 4949 4950 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); 4951 if (err) 4952 return err; 4953 4954 double_size = kvd_size - linear_size; 4955 double_size *= profile->kvd_hash_double_parts; 4956 double_size /= profile->kvd_hash_double_parts + 4957 profile->kvd_hash_single_parts; 4958 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY); 4959 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, 4960 double_size, 4961 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 4962 MLXSW_SP_RESOURCE_KVD, 4963 &hash_double_size_params); 4964 if (err) 4965 return err; 4966 4967 single_size = kvd_size - double_size - linear_size; 4968 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, 4969 single_size, 4970 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 4971 MLXSW_SP_RESOURCE_KVD, 4972 &hash_single_size_params); 4973 if (err) 4974 return err; 4975 4976 return 0; 4977 } 4978 4979 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) 4980 { 4981 return mlxsw_sp1_resources_kvd_register(mlxsw_core); 4982 } 4983 4984 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) 4985 { 4986 return 0; 4987 } 4988 4989 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 4990 const struct mlxsw_config_profile *profile, 4991 u64 *p_single_size, u64 *p_double_size, 4992 u64 *p_linear_size) 4993 { 4994 struct devlink *devlink = priv_to_devlink(mlxsw_core); 4995 u32 double_size; 4996 int err; 4997 4998 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 4999 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) 5000 return -EIO; 5001 5002 /* The hash part is what left of the kvd without the 5003 * linear part. It is split to the single size and 5004 * double size by the parts ratio from the profile. 5005 * Both sizes must be a multiplications of the 5006 * granularity from the profile. In case the user 5007 * provided the sizes they are obtained via devlink. 5008 */ 5009 err = devlink_resource_size_get(devlink, 5010 MLXSW_SP_RESOURCE_KVD_LINEAR, 5011 p_linear_size); 5012 if (err) 5013 *p_linear_size = profile->kvd_linear_size; 5014 5015 err = devlink_resource_size_get(devlink, 5016 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 5017 p_double_size); 5018 if (err) { 5019 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 5020 *p_linear_size; 5021 double_size *= profile->kvd_hash_double_parts; 5022 double_size /= profile->kvd_hash_double_parts + 5023 profile->kvd_hash_single_parts; 5024 *p_double_size = rounddown(double_size, 5025 MLXSW_SP_KVD_GRANULARITY); 5026 } 5027 5028 err = devlink_resource_size_get(devlink, 5029 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 5030 p_single_size); 5031 if (err) 5032 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 5033 *p_double_size - *p_linear_size; 5034 5035 /* Check results are legal. */ 5036 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 5037 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || 5038 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size) 5039 return -EIO; 5040 5041 return 0; 5042 } 5043 5044 static int 5045 mlxsw_sp_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id, 5046 union devlink_param_value val, 5047 struct netlink_ext_ack *extack) 5048 { 5049 if ((val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER) && 5050 (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)) { 5051 NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'"); 5052 return -EINVAL; 5053 } 5054 5055 return 0; 5056 } 5057 5058 static const struct devlink_param mlxsw_sp_devlink_params[] = { 5059 DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY, 5060 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 5061 NULL, NULL, 5062 mlxsw_sp_devlink_param_fw_load_policy_validate), 5063 }; 5064 5065 static int mlxsw_sp_params_register(struct mlxsw_core *mlxsw_core) 5066 { 5067 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5068 union devlink_param_value value; 5069 int err; 5070 5071 err = devlink_params_register(devlink, mlxsw_sp_devlink_params, 5072 ARRAY_SIZE(mlxsw_sp_devlink_params)); 5073 if (err) 5074 return err; 5075 5076 value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER; 5077 devlink_param_driverinit_value_set(devlink, 5078 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 5079 value); 5080 return 0; 5081 } 5082 5083 static void mlxsw_sp_params_unregister(struct mlxsw_core *mlxsw_core) 5084 { 5085 devlink_params_unregister(priv_to_devlink(mlxsw_core), 5086 mlxsw_sp_devlink_params, 5087 ARRAY_SIZE(mlxsw_sp_devlink_params)); 5088 } 5089 5090 static int 5091 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id, 5092 struct devlink_param_gset_ctx *ctx) 5093 { 5094 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 5095 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5096 5097 ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp); 5098 return 0; 5099 } 5100 5101 static int 5102 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id, 5103 struct devlink_param_gset_ctx *ctx) 5104 { 5105 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 5106 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5107 5108 return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32); 5109 } 5110 5111 static const struct devlink_param mlxsw_sp2_devlink_params[] = { 5112 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 5113 "acl_region_rehash_interval", 5114 DEVLINK_PARAM_TYPE_U32, 5115 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 5116 mlxsw_sp_params_acl_region_rehash_intrvl_get, 5117 mlxsw_sp_params_acl_region_rehash_intrvl_set, 5118 NULL), 5119 }; 5120 5121 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core) 5122 { 5123 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5124 union devlink_param_value value; 5125 int err; 5126 5127 err = mlxsw_sp_params_register(mlxsw_core); 5128 if (err) 5129 return err; 5130 5131 err = devlink_params_register(devlink, mlxsw_sp2_devlink_params, 5132 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 5133 if (err) 5134 goto err_devlink_params_register; 5135 5136 value.vu32 = 0; 5137 devlink_param_driverinit_value_set(devlink, 5138 MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 5139 value); 5140 return 0; 5141 5142 err_devlink_params_register: 5143 mlxsw_sp_params_unregister(mlxsw_core); 5144 return err; 5145 } 5146 5147 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core) 5148 { 5149 devlink_params_unregister(priv_to_devlink(mlxsw_core), 5150 mlxsw_sp2_devlink_params, 5151 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 5152 mlxsw_sp_params_unregister(mlxsw_core); 5153 } 5154 5155 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core, 5156 struct sk_buff *skb, u8 local_port) 5157 { 5158 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5159 5160 skb_pull(skb, MLXSW_TXHDR_LEN); 5161 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port); 5162 } 5163 5164 static struct mlxsw_driver mlxsw_sp1_driver = { 5165 .kind = mlxsw_sp1_driver_name, 5166 .priv_size = sizeof(struct mlxsw_sp), 5167 .init = mlxsw_sp1_init, 5168 .fini = mlxsw_sp_fini, 5169 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5170 .port_split = mlxsw_sp_port_split, 5171 .port_unsplit = mlxsw_sp_port_unsplit, 5172 .sb_pool_get = mlxsw_sp_sb_pool_get, 5173 .sb_pool_set = mlxsw_sp_sb_pool_set, 5174 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5175 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5176 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5177 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5178 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5179 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5180 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5181 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5182 .flash_update = mlxsw_sp_flash_update, 5183 .txhdr_construct = mlxsw_sp_txhdr_construct, 5184 .resources_register = mlxsw_sp1_resources_register, 5185 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, 5186 .params_register = mlxsw_sp_params_register, 5187 .params_unregister = mlxsw_sp_params_unregister, 5188 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5189 .txhdr_len = MLXSW_TXHDR_LEN, 5190 .profile = &mlxsw_sp1_config_profile, 5191 .res_query_enabled = true, 5192 }; 5193 5194 static struct mlxsw_driver mlxsw_sp2_driver = { 5195 .kind = mlxsw_sp2_driver_name, 5196 .priv_size = sizeof(struct mlxsw_sp), 5197 .init = mlxsw_sp2_init, 5198 .fini = mlxsw_sp_fini, 5199 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5200 .port_split = mlxsw_sp_port_split, 5201 .port_unsplit = mlxsw_sp_port_unsplit, 5202 .sb_pool_get = mlxsw_sp_sb_pool_get, 5203 .sb_pool_set = mlxsw_sp_sb_pool_set, 5204 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5205 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5206 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5207 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5208 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5209 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5210 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5211 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5212 .flash_update = mlxsw_sp_flash_update, 5213 .txhdr_construct = mlxsw_sp_txhdr_construct, 5214 .resources_register = mlxsw_sp2_resources_register, 5215 .params_register = mlxsw_sp2_params_register, 5216 .params_unregister = mlxsw_sp2_params_unregister, 5217 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5218 .txhdr_len = MLXSW_TXHDR_LEN, 5219 .profile = &mlxsw_sp2_config_profile, 5220 .res_query_enabled = true, 5221 }; 5222 5223 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 5224 { 5225 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 5226 } 5227 5228 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data) 5229 { 5230 struct mlxsw_sp_port **p_mlxsw_sp_port = data; 5231 int ret = 0; 5232 5233 if (mlxsw_sp_port_dev_check(lower_dev)) { 5234 *p_mlxsw_sp_port = netdev_priv(lower_dev); 5235 ret = 1; 5236 } 5237 5238 return ret; 5239 } 5240 5241 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 5242 { 5243 struct mlxsw_sp_port *mlxsw_sp_port; 5244 5245 if (mlxsw_sp_port_dev_check(dev)) 5246 return netdev_priv(dev); 5247 5248 mlxsw_sp_port = NULL; 5249 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port); 5250 5251 return mlxsw_sp_port; 5252 } 5253 5254 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 5255 { 5256 struct mlxsw_sp_port *mlxsw_sp_port; 5257 5258 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 5259 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 5260 } 5261 5262 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 5263 { 5264 struct mlxsw_sp_port *mlxsw_sp_port; 5265 5266 if (mlxsw_sp_port_dev_check(dev)) 5267 return netdev_priv(dev); 5268 5269 mlxsw_sp_port = NULL; 5270 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 5271 &mlxsw_sp_port); 5272 5273 return mlxsw_sp_port; 5274 } 5275 5276 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 5277 { 5278 struct mlxsw_sp_port *mlxsw_sp_port; 5279 5280 rcu_read_lock(); 5281 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 5282 if (mlxsw_sp_port) 5283 dev_hold(mlxsw_sp_port->dev); 5284 rcu_read_unlock(); 5285 return mlxsw_sp_port; 5286 } 5287 5288 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 5289 { 5290 dev_put(mlxsw_sp_port->dev); 5291 } 5292 5293 static void 5294 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port, 5295 struct net_device *lag_dev) 5296 { 5297 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev); 5298 struct net_device *upper_dev; 5299 struct list_head *iter; 5300 5301 if (netif_is_bridge_port(lag_dev)) 5302 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev); 5303 5304 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 5305 if (!netif_is_bridge_port(upper_dev)) 5306 continue; 5307 br_dev = netdev_master_upper_dev_get(upper_dev); 5308 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev); 5309 } 5310 } 5311 5312 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 5313 { 5314 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5315 5316 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 5317 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5318 } 5319 5320 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 5321 { 5322 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5323 5324 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 5325 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5326 } 5327 5328 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 5329 u16 lag_id, u8 port_index) 5330 { 5331 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5332 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5333 5334 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 5335 lag_id, port_index); 5336 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5337 } 5338 5339 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 5340 u16 lag_id) 5341 { 5342 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5343 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5344 5345 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 5346 lag_id); 5347 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5348 } 5349 5350 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 5351 u16 lag_id) 5352 { 5353 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5354 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5355 5356 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 5357 lag_id); 5358 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5359 } 5360 5361 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 5362 u16 lag_id) 5363 { 5364 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5365 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5366 5367 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 5368 lag_id); 5369 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5370 } 5371 5372 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 5373 struct net_device *lag_dev, 5374 u16 *p_lag_id) 5375 { 5376 struct mlxsw_sp_upper *lag; 5377 int free_lag_id = -1; 5378 u64 max_lag; 5379 int i; 5380 5381 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 5382 for (i = 0; i < max_lag; i++) { 5383 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 5384 if (lag->ref_count) { 5385 if (lag->dev == lag_dev) { 5386 *p_lag_id = i; 5387 return 0; 5388 } 5389 } else if (free_lag_id < 0) { 5390 free_lag_id = i; 5391 } 5392 } 5393 if (free_lag_id < 0) 5394 return -EBUSY; 5395 *p_lag_id = free_lag_id; 5396 return 0; 5397 } 5398 5399 static bool 5400 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 5401 struct net_device *lag_dev, 5402 struct netdev_lag_upper_info *lag_upper_info, 5403 struct netlink_ext_ack *extack) 5404 { 5405 u16 lag_id; 5406 5407 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 5408 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices"); 5409 return false; 5410 } 5411 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 5412 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 5413 return false; 5414 } 5415 return true; 5416 } 5417 5418 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 5419 u16 lag_id, u8 *p_port_index) 5420 { 5421 u64 max_lag_members; 5422 int i; 5423 5424 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 5425 MAX_LAG_MEMBERS); 5426 for (i = 0; i < max_lag_members; i++) { 5427 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 5428 *p_port_index = i; 5429 return 0; 5430 } 5431 } 5432 return -EBUSY; 5433 } 5434 5435 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 5436 struct net_device *lag_dev) 5437 { 5438 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5439 struct mlxsw_sp_upper *lag; 5440 u16 lag_id; 5441 u8 port_index; 5442 int err; 5443 5444 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 5445 if (err) 5446 return err; 5447 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 5448 if (!lag->ref_count) { 5449 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 5450 if (err) 5451 return err; 5452 lag->dev = lag_dev; 5453 } 5454 5455 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 5456 if (err) 5457 return err; 5458 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 5459 if (err) 5460 goto err_col_port_add; 5461 5462 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 5463 mlxsw_sp_port->local_port); 5464 mlxsw_sp_port->lag_id = lag_id; 5465 mlxsw_sp_port->lagged = 1; 5466 lag->ref_count++; 5467 5468 /* Port is no longer usable as a router interface */ 5469 if (mlxsw_sp_port->default_vlan->fid) 5470 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); 5471 5472 return 0; 5473 5474 err_col_port_add: 5475 if (!lag->ref_count) 5476 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 5477 return err; 5478 } 5479 5480 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 5481 struct net_device *lag_dev) 5482 { 5483 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5484 u16 lag_id = mlxsw_sp_port->lag_id; 5485 struct mlxsw_sp_upper *lag; 5486 5487 if (!mlxsw_sp_port->lagged) 5488 return; 5489 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 5490 WARN_ON(lag->ref_count == 0); 5491 5492 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 5493 5494 /* Any VLANs configured on the port are no longer valid */ 5495 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false); 5496 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan); 5497 /* Make the LAG and its directly linked uppers leave bridges they 5498 * are memeber in 5499 */ 5500 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev); 5501 5502 if (lag->ref_count == 1) 5503 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 5504 5505 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 5506 mlxsw_sp_port->local_port); 5507 mlxsw_sp_port->lagged = 0; 5508 lag->ref_count--; 5509 5510 /* Make sure untagged frames are allowed to ingress */ 5511 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 5512 } 5513 5514 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 5515 u16 lag_id) 5516 { 5517 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5518 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5519 5520 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 5521 mlxsw_sp_port->local_port); 5522 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5523 } 5524 5525 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 5526 u16 lag_id) 5527 { 5528 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5529 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5530 5531 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 5532 mlxsw_sp_port->local_port); 5533 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5534 } 5535 5536 static int 5537 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port) 5538 { 5539 int err; 5540 5541 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, 5542 mlxsw_sp_port->lag_id); 5543 if (err) 5544 return err; 5545 5546 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 5547 if (err) 5548 goto err_dist_port_add; 5549 5550 return 0; 5551 5552 err_dist_port_add: 5553 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id); 5554 return err; 5555 } 5556 5557 static int 5558 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port) 5559 { 5560 int err; 5561 5562 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 5563 mlxsw_sp_port->lag_id); 5564 if (err) 5565 return err; 5566 5567 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, 5568 mlxsw_sp_port->lag_id); 5569 if (err) 5570 goto err_col_port_disable; 5571 5572 return 0; 5573 5574 err_col_port_disable: 5575 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 5576 return err; 5577 } 5578 5579 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 5580 struct netdev_lag_lower_state_info *info) 5581 { 5582 if (info->tx_enabled) 5583 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port); 5584 else 5585 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 5586 } 5587 5588 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 5589 bool enable) 5590 { 5591 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5592 enum mlxsw_reg_spms_state spms_state; 5593 char *spms_pl; 5594 u16 vid; 5595 int err; 5596 5597 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 5598 MLXSW_REG_SPMS_STATE_DISCARDING; 5599 5600 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 5601 if (!spms_pl) 5602 return -ENOMEM; 5603 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 5604 5605 for (vid = 0; vid < VLAN_N_VID; vid++) 5606 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 5607 5608 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 5609 kfree(spms_pl); 5610 return err; 5611 } 5612 5613 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 5614 { 5615 u16 vid = 1; 5616 int err; 5617 5618 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 5619 if (err) 5620 return err; 5621 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 5622 if (err) 5623 goto err_port_stp_set; 5624 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 5625 true, false); 5626 if (err) 5627 goto err_port_vlan_set; 5628 5629 for (; vid <= VLAN_N_VID - 1; vid++) { 5630 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 5631 vid, false); 5632 if (err) 5633 goto err_vid_learning_set; 5634 } 5635 5636 return 0; 5637 5638 err_vid_learning_set: 5639 for (vid--; vid >= 1; vid--) 5640 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 5641 err_port_vlan_set: 5642 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 5643 err_port_stp_set: 5644 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 5645 return err; 5646 } 5647 5648 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 5649 { 5650 u16 vid; 5651 5652 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 5653 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 5654 vid, true); 5655 5656 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 5657 false, false); 5658 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 5659 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 5660 } 5661 5662 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev) 5663 { 5664 unsigned int num_vxlans = 0; 5665 struct net_device *dev; 5666 struct list_head *iter; 5667 5668 netdev_for_each_lower_dev(br_dev, dev, iter) { 5669 if (netif_is_vxlan(dev)) 5670 num_vxlans++; 5671 } 5672 5673 return num_vxlans > 1; 5674 } 5675 5676 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev) 5677 { 5678 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0}; 5679 struct net_device *dev; 5680 struct list_head *iter; 5681 5682 netdev_for_each_lower_dev(br_dev, dev, iter) { 5683 u16 pvid; 5684 int err; 5685 5686 if (!netif_is_vxlan(dev)) 5687 continue; 5688 5689 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid); 5690 if (err || !pvid) 5691 continue; 5692 5693 if (test_and_set_bit(pvid, vlans)) 5694 return false; 5695 } 5696 5697 return true; 5698 } 5699 5700 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev, 5701 struct netlink_ext_ack *extack) 5702 { 5703 if (br_multicast_enabled(br_dev)) { 5704 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device"); 5705 return false; 5706 } 5707 5708 if (!br_vlan_enabled(br_dev) && 5709 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) { 5710 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge"); 5711 return false; 5712 } 5713 5714 if (br_vlan_enabled(br_dev) && 5715 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) { 5716 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged"); 5717 return false; 5718 } 5719 5720 return true; 5721 } 5722 5723 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 5724 struct net_device *dev, 5725 unsigned long event, void *ptr) 5726 { 5727 struct netdev_notifier_changeupper_info *info; 5728 struct mlxsw_sp_port *mlxsw_sp_port; 5729 struct netlink_ext_ack *extack; 5730 struct net_device *upper_dev; 5731 struct mlxsw_sp *mlxsw_sp; 5732 int err = 0; 5733 5734 mlxsw_sp_port = netdev_priv(dev); 5735 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5736 info = ptr; 5737 extack = netdev_notifier_info_to_extack(&info->info); 5738 5739 switch (event) { 5740 case NETDEV_PRECHANGEUPPER: 5741 upper_dev = info->upper_dev; 5742 if (!is_vlan_dev(upper_dev) && 5743 !netif_is_lag_master(upper_dev) && 5744 !netif_is_bridge_master(upper_dev) && 5745 !netif_is_ovs_master(upper_dev) && 5746 !netif_is_macvlan(upper_dev)) { 5747 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5748 return -EINVAL; 5749 } 5750 if (!info->linking) 5751 break; 5752 if (netif_is_bridge_master(upper_dev) && 5753 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 5754 mlxsw_sp_bridge_has_vxlan(upper_dev) && 5755 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 5756 return -EOPNOTSUPP; 5757 if (netdev_has_any_upper_dev(upper_dev) && 5758 (!netif_is_bridge_master(upper_dev) || 5759 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 5760 upper_dev))) { 5761 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 5762 return -EINVAL; 5763 } 5764 if (netif_is_lag_master(upper_dev) && 5765 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 5766 info->upper_info, extack)) 5767 return -EINVAL; 5768 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 5769 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); 5770 return -EINVAL; 5771 } 5772 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 5773 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 5774 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); 5775 return -EINVAL; 5776 } 5777 if (netif_is_macvlan(upper_dev) && 5778 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, lower_dev)) { 5779 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 5780 return -EOPNOTSUPP; 5781 } 5782 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 5783 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); 5784 return -EINVAL; 5785 } 5786 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 5787 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 5788 return -EINVAL; 5789 } 5790 break; 5791 case NETDEV_CHANGEUPPER: 5792 upper_dev = info->upper_dev; 5793 if (netif_is_bridge_master(upper_dev)) { 5794 if (info->linking) 5795 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 5796 lower_dev, 5797 upper_dev, 5798 extack); 5799 else 5800 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 5801 lower_dev, 5802 upper_dev); 5803 } else if (netif_is_lag_master(upper_dev)) { 5804 if (info->linking) { 5805 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 5806 upper_dev); 5807 } else { 5808 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 5809 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 5810 upper_dev); 5811 } 5812 } else if (netif_is_ovs_master(upper_dev)) { 5813 if (info->linking) 5814 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 5815 else 5816 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 5817 } else if (netif_is_macvlan(upper_dev)) { 5818 if (!info->linking) 5819 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 5820 } else if (is_vlan_dev(upper_dev)) { 5821 struct net_device *br_dev; 5822 5823 if (!netif_is_bridge_port(upper_dev)) 5824 break; 5825 if (info->linking) 5826 break; 5827 br_dev = netdev_master_upper_dev_get(upper_dev); 5828 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, 5829 br_dev); 5830 } 5831 break; 5832 } 5833 5834 return err; 5835 } 5836 5837 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 5838 unsigned long event, void *ptr) 5839 { 5840 struct netdev_notifier_changelowerstate_info *info; 5841 struct mlxsw_sp_port *mlxsw_sp_port; 5842 int err; 5843 5844 mlxsw_sp_port = netdev_priv(dev); 5845 info = ptr; 5846 5847 switch (event) { 5848 case NETDEV_CHANGELOWERSTATE: 5849 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 5850 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 5851 info->lower_state_info); 5852 if (err) 5853 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 5854 } 5855 break; 5856 } 5857 5858 return 0; 5859 } 5860 5861 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 5862 struct net_device *port_dev, 5863 unsigned long event, void *ptr) 5864 { 5865 switch (event) { 5866 case NETDEV_PRECHANGEUPPER: 5867 case NETDEV_CHANGEUPPER: 5868 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 5869 event, ptr); 5870 case NETDEV_CHANGELOWERSTATE: 5871 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 5872 ptr); 5873 } 5874 5875 return 0; 5876 } 5877 5878 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 5879 unsigned long event, void *ptr) 5880 { 5881 struct net_device *dev; 5882 struct list_head *iter; 5883 int ret; 5884 5885 netdev_for_each_lower_dev(lag_dev, dev, iter) { 5886 if (mlxsw_sp_port_dev_check(dev)) { 5887 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 5888 ptr); 5889 if (ret) 5890 return ret; 5891 } 5892 } 5893 5894 return 0; 5895 } 5896 5897 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 5898 struct net_device *dev, 5899 unsigned long event, void *ptr, 5900 u16 vid) 5901 { 5902 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 5903 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5904 struct netdev_notifier_changeupper_info *info = ptr; 5905 struct netlink_ext_ack *extack; 5906 struct net_device *upper_dev; 5907 int err = 0; 5908 5909 extack = netdev_notifier_info_to_extack(&info->info); 5910 5911 switch (event) { 5912 case NETDEV_PRECHANGEUPPER: 5913 upper_dev = info->upper_dev; 5914 if (!netif_is_bridge_master(upper_dev) && 5915 !netif_is_macvlan(upper_dev)) { 5916 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5917 return -EINVAL; 5918 } 5919 if (!info->linking) 5920 break; 5921 if (netif_is_bridge_master(upper_dev) && 5922 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 5923 mlxsw_sp_bridge_has_vxlan(upper_dev) && 5924 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 5925 return -EOPNOTSUPP; 5926 if (netdev_has_any_upper_dev(upper_dev) && 5927 (!netif_is_bridge_master(upper_dev) || 5928 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 5929 upper_dev))) { 5930 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 5931 return -EINVAL; 5932 } 5933 if (netif_is_macvlan(upper_dev) && 5934 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) { 5935 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 5936 return -EOPNOTSUPP; 5937 } 5938 break; 5939 case NETDEV_CHANGEUPPER: 5940 upper_dev = info->upper_dev; 5941 if (netif_is_bridge_master(upper_dev)) { 5942 if (info->linking) 5943 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 5944 vlan_dev, 5945 upper_dev, 5946 extack); 5947 else 5948 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 5949 vlan_dev, 5950 upper_dev); 5951 } else if (netif_is_macvlan(upper_dev)) { 5952 if (!info->linking) 5953 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 5954 } else { 5955 err = -EINVAL; 5956 WARN_ON(1); 5957 } 5958 break; 5959 } 5960 5961 return err; 5962 } 5963 5964 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 5965 struct net_device *lag_dev, 5966 unsigned long event, 5967 void *ptr, u16 vid) 5968 { 5969 struct net_device *dev; 5970 struct list_head *iter; 5971 int ret; 5972 5973 netdev_for_each_lower_dev(lag_dev, dev, iter) { 5974 if (mlxsw_sp_port_dev_check(dev)) { 5975 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 5976 event, ptr, 5977 vid); 5978 if (ret) 5979 return ret; 5980 } 5981 } 5982 5983 return 0; 5984 } 5985 5986 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev, 5987 struct net_device *br_dev, 5988 unsigned long event, void *ptr, 5989 u16 vid) 5990 { 5991 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); 5992 struct netdev_notifier_changeupper_info *info = ptr; 5993 struct netlink_ext_ack *extack; 5994 struct net_device *upper_dev; 5995 5996 if (!mlxsw_sp) 5997 return 0; 5998 5999 extack = netdev_notifier_info_to_extack(&info->info); 6000 6001 switch (event) { 6002 case NETDEV_PRECHANGEUPPER: 6003 upper_dev = info->upper_dev; 6004 if (!netif_is_macvlan(upper_dev)) { 6005 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6006 return -EOPNOTSUPP; 6007 } 6008 if (!info->linking) 6009 break; 6010 if (netif_is_macvlan(upper_dev) && 6011 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) { 6012 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6013 return -EOPNOTSUPP; 6014 } 6015 break; 6016 case NETDEV_CHANGEUPPER: 6017 upper_dev = info->upper_dev; 6018 if (info->linking) 6019 break; 6020 if (netif_is_macvlan(upper_dev)) 6021 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6022 break; 6023 } 6024 6025 return 0; 6026 } 6027 6028 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 6029 unsigned long event, void *ptr) 6030 { 6031 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 6032 u16 vid = vlan_dev_vlan_id(vlan_dev); 6033 6034 if (mlxsw_sp_port_dev_check(real_dev)) 6035 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 6036 event, ptr, vid); 6037 else if (netif_is_lag_master(real_dev)) 6038 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 6039 real_dev, event, 6040 ptr, vid); 6041 else if (netif_is_bridge_master(real_dev)) 6042 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev, 6043 event, ptr, vid); 6044 6045 return 0; 6046 } 6047 6048 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, 6049 unsigned long event, void *ptr) 6050 { 6051 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev); 6052 struct netdev_notifier_changeupper_info *info = ptr; 6053 struct netlink_ext_ack *extack; 6054 struct net_device *upper_dev; 6055 6056 if (!mlxsw_sp) 6057 return 0; 6058 6059 extack = netdev_notifier_info_to_extack(&info->info); 6060 6061 switch (event) { 6062 case NETDEV_PRECHANGEUPPER: 6063 upper_dev = info->upper_dev; 6064 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) { 6065 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6066 return -EOPNOTSUPP; 6067 } 6068 if (!info->linking) 6069 break; 6070 if (netif_is_macvlan(upper_dev) && 6071 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev)) { 6072 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6073 return -EOPNOTSUPP; 6074 } 6075 break; 6076 case NETDEV_CHANGEUPPER: 6077 upper_dev = info->upper_dev; 6078 if (info->linking) 6079 break; 6080 if (is_vlan_dev(upper_dev)) 6081 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev); 6082 if (netif_is_macvlan(upper_dev)) 6083 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6084 break; 6085 } 6086 6087 return 0; 6088 } 6089 6090 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, 6091 unsigned long event, void *ptr) 6092 { 6093 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); 6094 struct netdev_notifier_changeupper_info *info = ptr; 6095 struct netlink_ext_ack *extack; 6096 6097 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) 6098 return 0; 6099 6100 extack = netdev_notifier_info_to_extack(&info->info); 6101 6102 /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */ 6103 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6104 6105 return -EOPNOTSUPP; 6106 } 6107 6108 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) 6109 { 6110 struct netdev_notifier_changeupper_info *info = ptr; 6111 6112 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) 6113 return false; 6114 return netif_is_l3_master(info->upper_dev); 6115 } 6116 6117 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, 6118 struct net_device *dev, 6119 unsigned long event, void *ptr) 6120 { 6121 struct netdev_notifier_changeupper_info *cu_info; 6122 struct netdev_notifier_info *info = ptr; 6123 struct netlink_ext_ack *extack; 6124 struct net_device *upper_dev; 6125 6126 extack = netdev_notifier_info_to_extack(info); 6127 6128 switch (event) { 6129 case NETDEV_CHANGEUPPER: 6130 cu_info = container_of(info, 6131 struct netdev_notifier_changeupper_info, 6132 info); 6133 upper_dev = cu_info->upper_dev; 6134 if (!netif_is_bridge_master(upper_dev)) 6135 return 0; 6136 if (!mlxsw_sp_lower_get(upper_dev)) 6137 return 0; 6138 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 6139 return -EOPNOTSUPP; 6140 if (cu_info->linking) { 6141 if (!netif_running(dev)) 6142 return 0; 6143 /* When the bridge is VLAN-aware, the VNI of the VxLAN 6144 * device needs to be mapped to a VLAN, but at this 6145 * point no VLANs are configured on the VxLAN device 6146 */ 6147 if (br_vlan_enabled(upper_dev)) 6148 return 0; 6149 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, 6150 dev, 0, extack); 6151 } else { 6152 /* VLANs were already flushed, which triggered the 6153 * necessary cleanup 6154 */ 6155 if (br_vlan_enabled(upper_dev)) 6156 return 0; 6157 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 6158 } 6159 break; 6160 case NETDEV_PRE_UP: 6161 upper_dev = netdev_master_upper_dev_get(dev); 6162 if (!upper_dev) 6163 return 0; 6164 if (!netif_is_bridge_master(upper_dev)) 6165 return 0; 6166 if (!mlxsw_sp_lower_get(upper_dev)) 6167 return 0; 6168 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0, 6169 extack); 6170 case NETDEV_DOWN: 6171 upper_dev = netdev_master_upper_dev_get(dev); 6172 if (!upper_dev) 6173 return 0; 6174 if (!netif_is_bridge_master(upper_dev)) 6175 return 0; 6176 if (!mlxsw_sp_lower_get(upper_dev)) 6177 return 0; 6178 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 6179 break; 6180 } 6181 6182 return 0; 6183 } 6184 6185 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 6186 unsigned long event, void *ptr) 6187 { 6188 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 6189 struct mlxsw_sp_span_entry *span_entry; 6190 struct mlxsw_sp *mlxsw_sp; 6191 int err = 0; 6192 6193 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 6194 if (event == NETDEV_UNREGISTER) { 6195 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); 6196 if (span_entry) 6197 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); 6198 } 6199 mlxsw_sp_span_respin(mlxsw_sp); 6200 6201 if (netif_is_vxlan(dev)) 6202 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr); 6203 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) 6204 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, 6205 event, ptr); 6206 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev)) 6207 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev, 6208 event, ptr); 6209 else if (event == NETDEV_PRE_CHANGEADDR || 6210 event == NETDEV_CHANGEADDR || 6211 event == NETDEV_CHANGEMTU) 6212 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr); 6213 else if (mlxsw_sp_is_vrf_event(event, ptr)) 6214 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); 6215 else if (mlxsw_sp_port_dev_check(dev)) 6216 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 6217 else if (netif_is_lag_master(dev)) 6218 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 6219 else if (is_vlan_dev(dev)) 6220 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 6221 else if (netif_is_bridge_master(dev)) 6222 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); 6223 else if (netif_is_macvlan(dev)) 6224 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); 6225 6226 return notifier_from_errno(err); 6227 } 6228 6229 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = { 6230 .notifier_call = mlxsw_sp_inetaddr_valid_event, 6231 }; 6232 6233 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = { 6234 .notifier_call = mlxsw_sp_inet6addr_valid_event, 6235 }; 6236 6237 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { 6238 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 6239 {0, }, 6240 }; 6241 6242 static struct pci_driver mlxsw_sp1_pci_driver = { 6243 .name = mlxsw_sp1_driver_name, 6244 .id_table = mlxsw_sp1_pci_id_table, 6245 }; 6246 6247 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { 6248 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, 6249 {0, }, 6250 }; 6251 6252 static struct pci_driver mlxsw_sp2_pci_driver = { 6253 .name = mlxsw_sp2_driver_name, 6254 .id_table = mlxsw_sp2_pci_id_table, 6255 }; 6256 6257 static int __init mlxsw_sp_module_init(void) 6258 { 6259 int err; 6260 6261 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6262 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6263 6264 err = mlxsw_core_driver_register(&mlxsw_sp1_driver); 6265 if (err) 6266 goto err_sp1_core_driver_register; 6267 6268 err = mlxsw_core_driver_register(&mlxsw_sp2_driver); 6269 if (err) 6270 goto err_sp2_core_driver_register; 6271 6272 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); 6273 if (err) 6274 goto err_sp1_pci_driver_register; 6275 6276 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); 6277 if (err) 6278 goto err_sp2_pci_driver_register; 6279 6280 return 0; 6281 6282 err_sp2_pci_driver_register: 6283 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 6284 err_sp1_pci_driver_register: 6285 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 6286 err_sp2_core_driver_register: 6287 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 6288 err_sp1_core_driver_register: 6289 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6290 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6291 return err; 6292 } 6293 6294 static void __exit mlxsw_sp_module_exit(void) 6295 { 6296 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 6297 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 6298 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 6299 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 6300 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6301 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6302 } 6303 6304 module_init(mlxsw_sp_module_init); 6305 module_exit(mlxsw_sp_module_exit); 6306 6307 MODULE_LICENSE("Dual BSD/GPL"); 6308 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 6309 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 6310 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); 6311 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); 6312 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); 6313