1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/types.h> 7 #include <linux/pci.h> 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/ethtool.h> 11 #include <linux/slab.h> 12 #include <linux/device.h> 13 #include <linux/skbuff.h> 14 #include <linux/if_vlan.h> 15 #include <linux/if_bridge.h> 16 #include <linux/workqueue.h> 17 #include <linux/jiffies.h> 18 #include <linux/bitops.h> 19 #include <linux/list.h> 20 #include <linux/notifier.h> 21 #include <linux/dcbnl.h> 22 #include <linux/inetdevice.h> 23 #include <linux/netlink.h> 24 #include <linux/jhash.h> 25 #include <net/switchdev.h> 26 #include <net/pkt_cls.h> 27 #include <net/tc_act/tc_mirred.h> 28 #include <net/netevent.h> 29 #include <net/tc_act/tc_sample.h> 30 #include <net/addrconf.h> 31 32 #include "spectrum.h" 33 #include "pci.h" 34 #include "core.h" 35 #include "core_env.h" 36 #include "reg.h" 37 #include "port.h" 38 #include "trap.h" 39 #include "txheader.h" 40 #include "spectrum_cnt.h" 41 #include "spectrum_dpipe.h" 42 #include "spectrum_acl_flex_actions.h" 43 #include "spectrum_span.h" 44 #include "spectrum_ptp.h" 45 #include "../mlxfw/mlxfw.h" 46 47 #define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100) 48 49 #define MLXSW_SP1_FWREV_MAJOR 13 50 #define MLXSW_SP1_FWREV_MINOR 2000 51 #define MLXSW_SP1_FWREV_SUBMINOR 1122 52 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 53 54 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { 55 .major = MLXSW_SP1_FWREV_MAJOR, 56 .minor = MLXSW_SP1_FWREV_MINOR, 57 .subminor = MLXSW_SP1_FWREV_SUBMINOR, 58 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR, 59 }; 60 61 #define MLXSW_SP1_FW_FILENAME \ 62 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ 63 "." __stringify(MLXSW_SP1_FWREV_MINOR) \ 64 "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2" 65 66 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; 67 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; 68 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3"; 69 static const char mlxsw_sp_driver_version[] = "1.0"; 70 71 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = { 72 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00 73 }; 74 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = { 75 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00 76 }; 77 78 /* tx_hdr_version 79 * Tx header version. 80 * Must be set to 1. 81 */ 82 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 83 84 /* tx_hdr_ctl 85 * Packet control type. 86 * 0 - Ethernet control (e.g. EMADs, LACP) 87 * 1 - Ethernet data 88 */ 89 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 90 91 /* tx_hdr_proto 92 * Packet protocol type. Must be set to 1 (Ethernet). 93 */ 94 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 95 96 /* tx_hdr_rx_is_router 97 * Packet is sent from the router. Valid for data packets only. 98 */ 99 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 100 101 /* tx_hdr_fid_valid 102 * Indicates if the 'fid' field is valid and should be used for 103 * forwarding lookup. Valid for data packets only. 104 */ 105 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 106 107 /* tx_hdr_swid 108 * Switch partition ID. Must be set to 0. 109 */ 110 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 111 112 /* tx_hdr_control_tclass 113 * Indicates if the packet should use the control TClass and not one 114 * of the data TClasses. 115 */ 116 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 117 118 /* tx_hdr_etclass 119 * Egress TClass to be used on the egress device on the egress port. 120 */ 121 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 122 123 /* tx_hdr_port_mid 124 * Destination local port for unicast packets. 125 * Destination multicast ID for multicast packets. 126 * 127 * Control packets are directed to a specific egress port, while data 128 * packets are transmitted through the CPU port (0) into the switch partition, 129 * where forwarding rules are applied. 130 */ 131 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 132 133 /* tx_hdr_fid 134 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 135 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 136 * Valid for data packets only. 137 */ 138 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 139 140 /* tx_hdr_type 141 * 0 - Data packets 142 * 6 - Control packets 143 */ 144 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 145 146 struct mlxsw_sp_mlxfw_dev { 147 struct mlxfw_dev mlxfw_dev; 148 struct mlxsw_sp *mlxsw_sp; 149 }; 150 151 struct mlxsw_sp_ptp_ops { 152 struct mlxsw_sp_ptp_clock * 153 (*clock_init)(struct mlxsw_sp *mlxsw_sp, struct device *dev); 154 void (*clock_fini)(struct mlxsw_sp_ptp_clock *clock); 155 156 struct mlxsw_sp_ptp_state *(*init)(struct mlxsw_sp *mlxsw_sp); 157 void (*fini)(struct mlxsw_sp_ptp_state *ptp_state); 158 159 /* Notify a driver that a packet that might be PTP was received. Driver 160 * is responsible for freeing the passed-in SKB. 161 */ 162 void (*receive)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 163 u8 local_port); 164 165 /* Notify a driver that a timestamped packet was transmitted. Driver 166 * is responsible for freeing the passed-in SKB. 167 */ 168 void (*transmitted)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 169 u8 local_port); 170 171 int (*hwtstamp_get)(struct mlxsw_sp_port *mlxsw_sp_port, 172 struct hwtstamp_config *config); 173 int (*hwtstamp_set)(struct mlxsw_sp_port *mlxsw_sp_port, 174 struct hwtstamp_config *config); 175 void (*shaper_work)(struct work_struct *work); 176 int (*get_ts_info)(struct mlxsw_sp *mlxsw_sp, 177 struct ethtool_ts_info *info); 178 }; 179 180 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev, 181 u16 component_index, u32 *p_max_size, 182 u8 *p_align_bits, u16 *p_max_write_size) 183 { 184 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 185 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 186 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 187 char mcqi_pl[MLXSW_REG_MCQI_LEN]; 188 int err; 189 190 mlxsw_reg_mcqi_pack(mcqi_pl, component_index); 191 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl); 192 if (err) 193 return err; 194 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, 195 p_max_write_size); 196 197 *p_align_bits = max_t(u8, *p_align_bits, 2); 198 *p_max_write_size = min_t(u16, *p_max_write_size, 199 MLXSW_REG_MCDA_MAX_DATA_LEN); 200 return 0; 201 } 202 203 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle) 204 { 205 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 206 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 207 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 208 char mcc_pl[MLXSW_REG_MCC_LEN]; 209 u8 control_state; 210 int err; 211 212 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0); 213 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 214 if (err) 215 return err; 216 217 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state); 218 if (control_state != MLXFW_FSM_STATE_IDLE) 219 return -EBUSY; 220 221 mlxsw_reg_mcc_pack(mcc_pl, 222 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 223 0, *fwhandle, 0); 224 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 225 } 226 227 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev, 228 u32 fwhandle, u16 component_index, 229 u32 component_size) 230 { 231 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 232 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 233 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 234 char mcc_pl[MLXSW_REG_MCC_LEN]; 235 236 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT, 237 component_index, fwhandle, component_size); 238 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 239 } 240 241 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev, 242 u32 fwhandle, u8 *data, u16 size, 243 u32 offset) 244 { 245 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 246 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 247 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 248 char mcda_pl[MLXSW_REG_MCDA_LEN]; 249 250 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data); 251 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl); 252 } 253 254 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, 255 u32 fwhandle, u16 component_index) 256 { 257 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 258 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 259 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 260 char mcc_pl[MLXSW_REG_MCC_LEN]; 261 262 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT, 263 component_index, fwhandle, 0); 264 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 265 } 266 267 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 268 { 269 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 270 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 271 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 272 char mcc_pl[MLXSW_REG_MCC_LEN]; 273 274 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, 275 fwhandle, 0); 276 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 277 } 278 279 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 280 enum mlxfw_fsm_state *fsm_state, 281 enum mlxfw_fsm_state_err *fsm_state_err) 282 { 283 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 284 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 285 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 286 char mcc_pl[MLXSW_REG_MCC_LEN]; 287 u8 control_state; 288 u8 error_code; 289 int err; 290 291 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0); 292 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 293 if (err) 294 return err; 295 296 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state); 297 *fsm_state = control_state; 298 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, 299 MLXFW_FSM_STATE_ERR_MAX); 300 return 0; 301 } 302 303 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 304 { 305 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 306 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 307 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 308 char mcc_pl[MLXSW_REG_MCC_LEN]; 309 310 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, 311 fwhandle, 0); 312 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 313 } 314 315 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 316 { 317 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 318 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 319 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 320 char mcc_pl[MLXSW_REG_MCC_LEN]; 321 322 mlxsw_reg_mcc_pack(mcc_pl, 323 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, 324 fwhandle, 0); 325 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 326 } 327 328 static void mlxsw_sp_status_notify(struct mlxfw_dev *mlxfw_dev, 329 const char *msg, const char *comp_name, 330 u32 done_bytes, u32 total_bytes) 331 { 332 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 333 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 334 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 335 336 devlink_flash_update_status_notify(priv_to_devlink(mlxsw_sp->core), 337 msg, comp_name, 338 done_bytes, total_bytes); 339 } 340 341 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = { 342 .component_query = mlxsw_sp_component_query, 343 .fsm_lock = mlxsw_sp_fsm_lock, 344 .fsm_component_update = mlxsw_sp_fsm_component_update, 345 .fsm_block_download = mlxsw_sp_fsm_block_download, 346 .fsm_component_verify = mlxsw_sp_fsm_component_verify, 347 .fsm_activate = mlxsw_sp_fsm_activate, 348 .fsm_query_state = mlxsw_sp_fsm_query_state, 349 .fsm_cancel = mlxsw_sp_fsm_cancel, 350 .fsm_release = mlxsw_sp_fsm_release, 351 .status_notify = mlxsw_sp_status_notify, 352 }; 353 354 static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp, 355 const struct firmware *firmware, 356 struct netlink_ext_ack *extack) 357 { 358 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = { 359 .mlxfw_dev = { 360 .ops = &mlxsw_sp_mlxfw_dev_ops, 361 .psid = mlxsw_sp->bus_info->psid, 362 .psid_size = strlen(mlxsw_sp->bus_info->psid), 363 }, 364 .mlxsw_sp = mlxsw_sp 365 }; 366 int err; 367 368 mlxsw_core_fw_flash_start(mlxsw_sp->core); 369 devlink_flash_update_begin_notify(priv_to_devlink(mlxsw_sp->core)); 370 err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, 371 firmware, extack); 372 devlink_flash_update_end_notify(priv_to_devlink(mlxsw_sp->core)); 373 mlxsw_core_fw_flash_end(mlxsw_sp->core); 374 375 return err; 376 } 377 378 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) 379 { 380 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev; 381 const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev; 382 const char *fw_filename = mlxsw_sp->fw_filename; 383 union devlink_param_value value; 384 const struct firmware *firmware; 385 int err; 386 387 /* Don't check if driver does not require it */ 388 if (!req_rev || !fw_filename) 389 return 0; 390 391 /* Don't check if devlink 'fw_load_policy' param is 'flash' */ 392 err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_sp->core), 393 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 394 &value); 395 if (err) 396 return err; 397 if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) 398 return 0; 399 400 /* Validate driver & FW are compatible */ 401 if (rev->major != req_rev->major) { 402 WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n", 403 rev->major, req_rev->major); 404 return -EINVAL; 405 } 406 if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) == 407 MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor) && 408 (rev->minor > req_rev->minor || 409 (rev->minor == req_rev->minor && 410 rev->subminor >= req_rev->subminor))) 411 return 0; 412 413 dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n", 414 rev->major, rev->minor, rev->subminor); 415 dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n", 416 fw_filename); 417 418 err = request_firmware_direct(&firmware, fw_filename, 419 mlxsw_sp->bus_info->dev); 420 if (err) { 421 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n", 422 fw_filename); 423 return err; 424 } 425 426 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, NULL); 427 release_firmware(firmware); 428 if (err) 429 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n"); 430 431 /* On FW flash success, tell the caller FW reset is needed 432 * if current FW supports it. 433 */ 434 if (rev->minor >= req_rev->can_reset_minor) 435 return err ? err : -EAGAIN; 436 else 437 return 0; 438 } 439 440 static int mlxsw_sp_flash_update(struct mlxsw_core *mlxsw_core, 441 const char *file_name, const char *component, 442 struct netlink_ext_ack *extack) 443 { 444 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 445 const struct firmware *firmware; 446 int err; 447 448 if (component) 449 return -EOPNOTSUPP; 450 451 err = request_firmware_direct(&firmware, file_name, 452 mlxsw_sp->bus_info->dev); 453 if (err) 454 return err; 455 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, extack); 456 release_firmware(firmware); 457 458 return err; 459 } 460 461 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 462 unsigned int counter_index, u64 *packets, 463 u64 *bytes) 464 { 465 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 466 int err; 467 468 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 469 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 470 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 471 if (err) 472 return err; 473 if (packets) 474 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 475 if (bytes) 476 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 477 return 0; 478 } 479 480 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 481 unsigned int counter_index) 482 { 483 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 484 485 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 486 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 487 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 488 } 489 490 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 491 unsigned int *p_counter_index) 492 { 493 int err; 494 495 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 496 p_counter_index); 497 if (err) 498 return err; 499 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 500 if (err) 501 goto err_counter_clear; 502 return 0; 503 504 err_counter_clear: 505 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 506 *p_counter_index); 507 return err; 508 } 509 510 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 511 unsigned int counter_index) 512 { 513 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 514 counter_index); 515 } 516 517 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 518 const struct mlxsw_tx_info *tx_info) 519 { 520 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 521 522 memset(txhdr, 0, MLXSW_TXHDR_LEN); 523 524 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 525 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 526 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 527 mlxsw_tx_hdr_swid_set(txhdr, 0); 528 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 529 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 530 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 531 } 532 533 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) 534 { 535 switch (state) { 536 case BR_STATE_FORWARDING: 537 return MLXSW_REG_SPMS_STATE_FORWARDING; 538 case BR_STATE_LEARNING: 539 return MLXSW_REG_SPMS_STATE_LEARNING; 540 case BR_STATE_LISTENING: /* fall-through */ 541 case BR_STATE_DISABLED: /* fall-through */ 542 case BR_STATE_BLOCKING: 543 return MLXSW_REG_SPMS_STATE_DISCARDING; 544 default: 545 BUG(); 546 } 547 } 548 549 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 550 u8 state) 551 { 552 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state); 553 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 554 char *spms_pl; 555 int err; 556 557 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 558 if (!spms_pl) 559 return -ENOMEM; 560 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 561 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 562 563 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 564 kfree(spms_pl); 565 return err; 566 } 567 568 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 569 { 570 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 571 int err; 572 573 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 574 if (err) 575 return err; 576 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 577 return 0; 578 } 579 580 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port, 581 bool enable, u32 rate) 582 { 583 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 584 char mpsc_pl[MLXSW_REG_MPSC_LEN]; 585 586 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate); 587 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl); 588 } 589 590 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 591 bool is_up) 592 { 593 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 594 char paos_pl[MLXSW_REG_PAOS_LEN]; 595 596 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 597 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 598 MLXSW_PORT_ADMIN_STATUS_DOWN); 599 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 600 } 601 602 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 603 unsigned char *addr) 604 { 605 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 606 char ppad_pl[MLXSW_REG_PPAD_LEN]; 607 608 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 609 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 610 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 611 } 612 613 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 614 { 615 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 616 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 617 618 ether_addr_copy(addr, mlxsw_sp->base_mac); 619 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 620 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 621 } 622 623 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 624 { 625 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 626 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 627 int max_mtu; 628 int err; 629 630 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 631 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 632 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 633 if (err) 634 return err; 635 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 636 637 if (mtu > max_mtu) 638 return -EINVAL; 639 640 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 641 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 642 } 643 644 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 645 { 646 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 647 char pspa_pl[MLXSW_REG_PSPA_LEN]; 648 649 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 650 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 651 } 652 653 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 654 { 655 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 656 char svpe_pl[MLXSW_REG_SVPE_LEN]; 657 658 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 659 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 660 } 661 662 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 663 bool learn_enable) 664 { 665 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 666 char *spvmlr_pl; 667 int err; 668 669 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 670 if (!spvmlr_pl) 671 return -ENOMEM; 672 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 673 learn_enable); 674 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 675 kfree(spvmlr_pl); 676 return err; 677 } 678 679 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 680 u16 vid) 681 { 682 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 683 char spvid_pl[MLXSW_REG_SPVID_LEN]; 684 685 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid); 686 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 687 } 688 689 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 690 bool allow) 691 { 692 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 693 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 694 695 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 696 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 697 } 698 699 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 700 { 701 int err; 702 703 if (!vid) { 704 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 705 if (err) 706 return err; 707 } else { 708 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); 709 if (err) 710 return err; 711 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 712 if (err) 713 goto err_port_allow_untagged_set; 714 } 715 716 mlxsw_sp_port->pvid = vid; 717 return 0; 718 719 err_port_allow_untagged_set: 720 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid); 721 return err; 722 } 723 724 static int 725 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 726 { 727 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 728 char sspr_pl[MLXSW_REG_SSPR_LEN]; 729 730 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 731 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 732 } 733 734 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, 735 u8 local_port, u8 *p_module, 736 u8 *p_width, u8 *p_lane) 737 { 738 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 739 int err; 740 741 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 742 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 743 if (err) 744 return err; 745 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 746 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl); 747 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 748 return 0; 749 } 750 751 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port, 752 u8 module, u8 width, u8 lane) 753 { 754 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 755 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 756 int i; 757 758 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 759 mlxsw_reg_pmlp_width_set(pmlp_pl, width); 760 for (i = 0; i < width; i++) { 761 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module); 762 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */ 763 } 764 765 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 766 } 767 768 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port) 769 { 770 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 771 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 772 773 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 774 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 775 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 776 } 777 778 static int mlxsw_sp_port_open(struct net_device *dev) 779 { 780 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 781 int err; 782 783 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 784 if (err) 785 return err; 786 netif_start_queue(dev); 787 return 0; 788 } 789 790 static int mlxsw_sp_port_stop(struct net_device *dev) 791 { 792 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 793 794 netif_stop_queue(dev); 795 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 796 } 797 798 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 799 struct net_device *dev) 800 { 801 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 802 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 803 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 804 const struct mlxsw_tx_info tx_info = { 805 .local_port = mlxsw_sp_port->local_port, 806 .is_emad = false, 807 }; 808 u64 len; 809 int err; 810 811 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); 812 813 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 814 return NETDEV_TX_BUSY; 815 816 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { 817 struct sk_buff *skb_orig = skb; 818 819 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); 820 if (!skb) { 821 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 822 dev_kfree_skb_any(skb_orig); 823 return NETDEV_TX_OK; 824 } 825 dev_consume_skb_any(skb_orig); 826 } 827 828 if (eth_skb_pad(skb)) { 829 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 830 return NETDEV_TX_OK; 831 } 832 833 mlxsw_sp_txhdr_construct(skb, &tx_info); 834 /* TX header is consumed by HW on the way so we shouldn't count its 835 * bytes as being sent. 836 */ 837 len = skb->len - MLXSW_TXHDR_LEN; 838 839 /* Due to a race we might fail here because of a full queue. In that 840 * unlikely case we simply drop the packet. 841 */ 842 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 843 844 if (!err) { 845 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 846 u64_stats_update_begin(&pcpu_stats->syncp); 847 pcpu_stats->tx_packets++; 848 pcpu_stats->tx_bytes += len; 849 u64_stats_update_end(&pcpu_stats->syncp); 850 } else { 851 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 852 dev_kfree_skb_any(skb); 853 } 854 return NETDEV_TX_OK; 855 } 856 857 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 858 { 859 } 860 861 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 862 { 863 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 864 struct sockaddr *addr = p; 865 int err; 866 867 if (!is_valid_ether_addr(addr->sa_data)) 868 return -EADDRNOTAVAIL; 869 870 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 871 if (err) 872 return err; 873 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 874 return 0; 875 } 876 877 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, 878 int mtu) 879 { 880 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu); 881 } 882 883 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ 884 885 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 886 u16 delay) 887 { 888 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay, 889 BITS_PER_BYTE)); 890 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp, 891 mtu); 892 } 893 894 /* Maximum delay buffer needed in case of PAUSE frames, in bytes. 895 * Assumes 100m cable and maximum MTU. 896 */ 897 #define MLXSW_SP_PAUSE_DELAY 58752 898 899 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 900 u16 delay, bool pfc, bool pause) 901 { 902 if (pfc) 903 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay); 904 else if (pause) 905 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY); 906 else 907 return 0; 908 } 909 910 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, 911 bool lossy) 912 { 913 if (lossy) 914 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size); 915 else 916 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size, 917 thres); 918 } 919 920 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, 921 u8 *prio_tc, bool pause_en, 922 struct ieee_pfc *my_pfc) 923 { 924 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 925 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; 926 u16 delay = !!my_pfc ? my_pfc->delay : 0; 927 char pbmc_pl[MLXSW_REG_PBMC_LEN]; 928 u32 taken_headroom_cells = 0; 929 u32 max_headroom_cells; 930 int i, j, err; 931 932 max_headroom_cells = mlxsw_sp_sb_max_headroom_cells(mlxsw_sp); 933 934 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); 935 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 936 if (err) 937 return err; 938 939 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 940 bool configure = false; 941 bool pfc = false; 942 u16 thres_cells; 943 u16 delay_cells; 944 u16 total_cells; 945 bool lossy; 946 947 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { 948 if (prio_tc[j] == i) { 949 pfc = pfc_en & BIT(j); 950 configure = true; 951 break; 952 } 953 } 954 955 if (!configure) 956 continue; 957 958 lossy = !(pfc || pause_en); 959 thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); 960 delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, 961 pfc, pause_en); 962 total_cells = thres_cells + delay_cells; 963 964 taken_headroom_cells += total_cells; 965 if (taken_headroom_cells > max_headroom_cells) 966 return -ENOBUFS; 967 968 mlxsw_sp_pg_buf_pack(pbmc_pl, i, total_cells, 969 thres_cells, lossy); 970 } 971 972 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 973 } 974 975 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, 976 int mtu, bool pause_en) 977 { 978 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; 979 bool dcb_en = !!mlxsw_sp_port->dcb.ets; 980 struct ieee_pfc *my_pfc; 981 u8 *prio_tc; 982 983 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; 984 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; 985 986 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, 987 pause_en, my_pfc); 988 } 989 990 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 991 { 992 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 993 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 994 int err; 995 996 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); 997 if (err) 998 return err; 999 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu); 1000 if (err) 1001 goto err_span_port_mtu_update; 1002 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 1003 if (err) 1004 goto err_port_mtu_set; 1005 dev->mtu = mtu; 1006 return 0; 1007 1008 err_port_mtu_set: 1009 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu); 1010 err_span_port_mtu_update: 1011 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1012 return err; 1013 } 1014 1015 static int 1016 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 1017 struct rtnl_link_stats64 *stats) 1018 { 1019 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1020 struct mlxsw_sp_port_pcpu_stats *p; 1021 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1022 u32 tx_dropped = 0; 1023 unsigned int start; 1024 int i; 1025 1026 for_each_possible_cpu(i) { 1027 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 1028 do { 1029 start = u64_stats_fetch_begin_irq(&p->syncp); 1030 rx_packets = p->rx_packets; 1031 rx_bytes = p->rx_bytes; 1032 tx_packets = p->tx_packets; 1033 tx_bytes = p->tx_bytes; 1034 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 1035 1036 stats->rx_packets += rx_packets; 1037 stats->rx_bytes += rx_bytes; 1038 stats->tx_packets += tx_packets; 1039 stats->tx_bytes += tx_bytes; 1040 /* tx_dropped is u32, updated without syncp protection. */ 1041 tx_dropped += p->tx_dropped; 1042 } 1043 stats->tx_dropped = tx_dropped; 1044 return 0; 1045 } 1046 1047 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 1048 { 1049 switch (attr_id) { 1050 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1051 return true; 1052 } 1053 1054 return false; 1055 } 1056 1057 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 1058 void *sp) 1059 { 1060 switch (attr_id) { 1061 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1062 return mlxsw_sp_port_get_sw_stats64(dev, sp); 1063 } 1064 1065 return -EINVAL; 1066 } 1067 1068 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 1069 int prio, char *ppcnt_pl) 1070 { 1071 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1072 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1073 1074 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 1075 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 1076 } 1077 1078 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 1079 struct rtnl_link_stats64 *stats) 1080 { 1081 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1082 int err; 1083 1084 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 1085 0, ppcnt_pl); 1086 if (err) 1087 goto out; 1088 1089 stats->tx_packets = 1090 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 1091 stats->rx_packets = 1092 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 1093 stats->tx_bytes = 1094 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 1095 stats->rx_bytes = 1096 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 1097 stats->multicast = 1098 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 1099 1100 stats->rx_crc_errors = 1101 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 1102 stats->rx_frame_errors = 1103 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 1104 1105 stats->rx_length_errors = ( 1106 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 1107 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 1108 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 1109 1110 stats->rx_errors = (stats->rx_crc_errors + 1111 stats->rx_frame_errors + stats->rx_length_errors); 1112 1113 out: 1114 return err; 1115 } 1116 1117 static void 1118 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 1119 struct mlxsw_sp_port_xstats *xstats) 1120 { 1121 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1122 int err, i; 1123 1124 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 1125 ppcnt_pl); 1126 if (!err) 1127 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 1128 1129 for (i = 0; i < TC_MAX_QUEUE; i++) { 1130 err = mlxsw_sp_port_get_stats_raw(dev, 1131 MLXSW_REG_PPCNT_TC_CONG_TC, 1132 i, ppcnt_pl); 1133 if (!err) 1134 xstats->wred_drop[i] = 1135 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 1136 1137 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 1138 i, ppcnt_pl); 1139 if (err) 1140 continue; 1141 1142 xstats->backlog[i] = 1143 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 1144 xstats->tail_drop[i] = 1145 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 1146 } 1147 1148 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1149 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, 1150 i, ppcnt_pl); 1151 if (err) 1152 continue; 1153 1154 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); 1155 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); 1156 } 1157 } 1158 1159 static void update_stats_cache(struct work_struct *work) 1160 { 1161 struct mlxsw_sp_port *mlxsw_sp_port = 1162 container_of(work, struct mlxsw_sp_port, 1163 periodic_hw_stats.update_dw.work); 1164 1165 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 1166 goto out; 1167 1168 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 1169 &mlxsw_sp_port->periodic_hw_stats.stats); 1170 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 1171 &mlxsw_sp_port->periodic_hw_stats.xstats); 1172 1173 out: 1174 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1175 MLXSW_HW_STATS_UPDATE_TIME); 1176 } 1177 1178 /* Return the stats from a cache that is updated periodically, 1179 * as this function might get called in an atomic context. 1180 */ 1181 static void 1182 mlxsw_sp_port_get_stats64(struct net_device *dev, 1183 struct rtnl_link_stats64 *stats) 1184 { 1185 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1186 1187 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 1188 } 1189 1190 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 1191 u16 vid_begin, u16 vid_end, 1192 bool is_member, bool untagged) 1193 { 1194 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1195 char *spvm_pl; 1196 int err; 1197 1198 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 1199 if (!spvm_pl) 1200 return -ENOMEM; 1201 1202 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 1203 vid_end, is_member, untagged); 1204 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 1205 kfree(spvm_pl); 1206 return err; 1207 } 1208 1209 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 1210 u16 vid_end, bool is_member, bool untagged) 1211 { 1212 u16 vid, vid_e; 1213 int err; 1214 1215 for (vid = vid_begin; vid <= vid_end; 1216 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 1217 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 1218 vid_end); 1219 1220 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 1221 is_member, untagged); 1222 if (err) 1223 return err; 1224 } 1225 1226 return 0; 1227 } 1228 1229 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port, 1230 bool flush_default) 1231 { 1232 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 1233 1234 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 1235 &mlxsw_sp_port->vlans_list, list) { 1236 if (!flush_default && 1237 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID) 1238 continue; 1239 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1240 } 1241 } 1242 1243 static void 1244 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1245 { 1246 if (mlxsw_sp_port_vlan->bridge_port) 1247 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1248 else if (mlxsw_sp_port_vlan->fid) 1249 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 1250 } 1251 1252 struct mlxsw_sp_port_vlan * 1253 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1254 { 1255 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1256 bool untagged = vid == MLXSW_SP_DEFAULT_VID; 1257 int err; 1258 1259 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1260 if (mlxsw_sp_port_vlan) 1261 return ERR_PTR(-EEXIST); 1262 1263 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 1264 if (err) 1265 return ERR_PTR(err); 1266 1267 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 1268 if (!mlxsw_sp_port_vlan) { 1269 err = -ENOMEM; 1270 goto err_port_vlan_alloc; 1271 } 1272 1273 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1274 mlxsw_sp_port_vlan->vid = vid; 1275 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1276 1277 return mlxsw_sp_port_vlan; 1278 1279 err_port_vlan_alloc: 1280 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1281 return ERR_PTR(err); 1282 } 1283 1284 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1285 { 1286 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1287 u16 vid = mlxsw_sp_port_vlan->vid; 1288 1289 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan); 1290 list_del(&mlxsw_sp_port_vlan->list); 1291 kfree(mlxsw_sp_port_vlan); 1292 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1293 } 1294 1295 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1296 __be16 __always_unused proto, u16 vid) 1297 { 1298 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1299 1300 /* VLAN 0 is added to HW filter when device goes up, but it is 1301 * reserved in our case, so simply return. 1302 */ 1303 if (!vid) 1304 return 0; 1305 1306 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid)); 1307 } 1308 1309 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1310 __be16 __always_unused proto, u16 vid) 1311 { 1312 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1313 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1314 1315 /* VLAN 0 is removed from HW filter when device goes down, but 1316 * it is reserved in our case, so simply return. 1317 */ 1318 if (!vid) 1319 return 0; 1320 1321 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1322 if (!mlxsw_sp_port_vlan) 1323 return 0; 1324 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1325 1326 return 0; 1327 } 1328 1329 static struct mlxsw_sp_port_mall_tc_entry * 1330 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port, 1331 unsigned long cookie) { 1332 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1333 1334 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list) 1335 if (mall_tc_entry->cookie == cookie) 1336 return mall_tc_entry; 1337 1338 return NULL; 1339 } 1340 1341 static int 1342 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1343 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror, 1344 const struct flow_action_entry *act, 1345 bool ingress) 1346 { 1347 enum mlxsw_sp_span_type span_type; 1348 1349 if (!act->dev) { 1350 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); 1351 return -EINVAL; 1352 } 1353 1354 mirror->ingress = ingress; 1355 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1356 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, act->dev, span_type, 1357 true, &mirror->span_id); 1358 } 1359 1360 static void 1361 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1362 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror) 1363 { 1364 enum mlxsw_sp_span_type span_type; 1365 1366 span_type = mirror->ingress ? 1367 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1368 mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->span_id, 1369 span_type, true); 1370 } 1371 1372 static int 1373 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port, 1374 struct tc_cls_matchall_offload *cls, 1375 const struct flow_action_entry *act, 1376 bool ingress) 1377 { 1378 int err; 1379 1380 if (!mlxsw_sp_port->sample) 1381 return -EOPNOTSUPP; 1382 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) { 1383 netdev_err(mlxsw_sp_port->dev, "sample already active\n"); 1384 return -EEXIST; 1385 } 1386 if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) { 1387 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n"); 1388 return -EOPNOTSUPP; 1389 } 1390 1391 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group, 1392 act->sample.psample_group); 1393 mlxsw_sp_port->sample->truncate = act->sample.truncate; 1394 mlxsw_sp_port->sample->trunc_size = act->sample.trunc_size; 1395 mlxsw_sp_port->sample->rate = act->sample.rate; 1396 1397 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, act->sample.rate); 1398 if (err) 1399 goto err_port_sample_set; 1400 return 0; 1401 1402 err_port_sample_set: 1403 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1404 return err; 1405 } 1406 1407 static void 1408 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port) 1409 { 1410 if (!mlxsw_sp_port->sample) 1411 return; 1412 1413 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1); 1414 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1415 } 1416 1417 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1418 struct tc_cls_matchall_offload *f, 1419 bool ingress) 1420 { 1421 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1422 __be16 protocol = f->common.protocol; 1423 struct flow_action_entry *act; 1424 int err; 1425 1426 if (!flow_offload_has_one_action(&f->rule->action)) { 1427 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); 1428 return -EOPNOTSUPP; 1429 } 1430 1431 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1432 if (!mall_tc_entry) 1433 return -ENOMEM; 1434 mall_tc_entry->cookie = f->cookie; 1435 1436 act = &f->rule->action.entries[0]; 1437 1438 if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) { 1439 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; 1440 1441 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; 1442 mirror = &mall_tc_entry->mirror; 1443 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, 1444 mirror, act, 1445 ingress); 1446 } else if (act->id == FLOW_ACTION_SAMPLE && 1447 protocol == htons(ETH_P_ALL)) { 1448 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE; 1449 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f, 1450 act, ingress); 1451 } else { 1452 err = -EOPNOTSUPP; 1453 } 1454 1455 if (err) 1456 goto err_add_action; 1457 1458 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); 1459 return 0; 1460 1461 err_add_action: 1462 kfree(mall_tc_entry); 1463 return err; 1464 } 1465 1466 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1467 struct tc_cls_matchall_offload *f) 1468 { 1469 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1470 1471 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port, 1472 f->cookie); 1473 if (!mall_tc_entry) { 1474 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); 1475 return; 1476 } 1477 list_del(&mall_tc_entry->list); 1478 1479 switch (mall_tc_entry->type) { 1480 case MLXSW_SP_PORT_MALL_MIRROR: 1481 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port, 1482 &mall_tc_entry->mirror); 1483 break; 1484 case MLXSW_SP_PORT_MALL_SAMPLE: 1485 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port); 1486 break; 1487 default: 1488 WARN_ON(1); 1489 } 1490 1491 kfree(mall_tc_entry); 1492 } 1493 1494 static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1495 struct tc_cls_matchall_offload *f, 1496 bool ingress) 1497 { 1498 switch (f->command) { 1499 case TC_CLSMATCHALL_REPLACE: 1500 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f, 1501 ingress); 1502 case TC_CLSMATCHALL_DESTROY: 1503 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f); 1504 return 0; 1505 default: 1506 return -EOPNOTSUPP; 1507 } 1508 } 1509 1510 static int 1511 mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block, 1512 struct flow_cls_offload *f) 1513 { 1514 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block); 1515 1516 switch (f->command) { 1517 case FLOW_CLS_REPLACE: 1518 return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f); 1519 case FLOW_CLS_DESTROY: 1520 mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f); 1521 return 0; 1522 case FLOW_CLS_STATS: 1523 return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f); 1524 case FLOW_CLS_TMPLT_CREATE: 1525 return mlxsw_sp_flower_tmplt_create(mlxsw_sp, acl_block, f); 1526 case FLOW_CLS_TMPLT_DESTROY: 1527 mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, acl_block, f); 1528 return 0; 1529 default: 1530 return -EOPNOTSUPP; 1531 } 1532 } 1533 1534 static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type, 1535 void *type_data, 1536 void *cb_priv, bool ingress) 1537 { 1538 struct mlxsw_sp_port *mlxsw_sp_port = cb_priv; 1539 1540 switch (type) { 1541 case TC_SETUP_CLSMATCHALL: 1542 if (!tc_cls_can_offload_and_chain0(mlxsw_sp_port->dev, 1543 type_data)) 1544 return -EOPNOTSUPP; 1545 1546 return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data, 1547 ingress); 1548 case TC_SETUP_CLSFLOWER: 1549 return 0; 1550 default: 1551 return -EOPNOTSUPP; 1552 } 1553 } 1554 1555 static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type, 1556 void *type_data, 1557 void *cb_priv) 1558 { 1559 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1560 cb_priv, true); 1561 } 1562 1563 static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type, 1564 void *type_data, 1565 void *cb_priv) 1566 { 1567 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1568 cb_priv, false); 1569 } 1570 1571 static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type, 1572 void *type_data, void *cb_priv) 1573 { 1574 struct mlxsw_sp_acl_block *acl_block = cb_priv; 1575 1576 switch (type) { 1577 case TC_SETUP_CLSMATCHALL: 1578 return 0; 1579 case TC_SETUP_CLSFLOWER: 1580 if (mlxsw_sp_acl_block_disabled(acl_block)) 1581 return -EOPNOTSUPP; 1582 1583 return mlxsw_sp_setup_tc_cls_flower(acl_block, type_data); 1584 default: 1585 return -EOPNOTSUPP; 1586 } 1587 } 1588 1589 static void mlxsw_sp_tc_block_flower_release(void *cb_priv) 1590 { 1591 struct mlxsw_sp_acl_block *acl_block = cb_priv; 1592 1593 mlxsw_sp_acl_block_destroy(acl_block); 1594 } 1595 1596 static LIST_HEAD(mlxsw_sp_block_cb_list); 1597 1598 static int 1599 mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port, 1600 struct flow_block_offload *f, bool ingress) 1601 { 1602 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1603 struct mlxsw_sp_acl_block *acl_block; 1604 struct flow_block_cb *block_cb; 1605 bool register_block = false; 1606 int err; 1607 1608 block_cb = flow_block_cb_lookup(f->block, 1609 mlxsw_sp_setup_tc_block_cb_flower, 1610 mlxsw_sp); 1611 if (!block_cb) { 1612 acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, f->net); 1613 if (!acl_block) 1614 return -ENOMEM; 1615 block_cb = flow_block_cb_alloc(mlxsw_sp_setup_tc_block_cb_flower, 1616 mlxsw_sp, acl_block, 1617 mlxsw_sp_tc_block_flower_release); 1618 if (IS_ERR(block_cb)) { 1619 mlxsw_sp_acl_block_destroy(acl_block); 1620 err = PTR_ERR(block_cb); 1621 goto err_cb_register; 1622 } 1623 register_block = true; 1624 } else { 1625 acl_block = flow_block_cb_priv(block_cb); 1626 } 1627 flow_block_cb_incref(block_cb); 1628 err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block, 1629 mlxsw_sp_port, ingress, f->extack); 1630 if (err) 1631 goto err_block_bind; 1632 1633 if (ingress) 1634 mlxsw_sp_port->ing_acl_block = acl_block; 1635 else 1636 mlxsw_sp_port->eg_acl_block = acl_block; 1637 1638 if (register_block) { 1639 flow_block_cb_add(block_cb, f); 1640 list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list); 1641 } 1642 1643 return 0; 1644 1645 err_block_bind: 1646 if (!flow_block_cb_decref(block_cb)) 1647 flow_block_cb_free(block_cb); 1648 err_cb_register: 1649 return err; 1650 } 1651 1652 static void 1653 mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port, 1654 struct flow_block_offload *f, bool ingress) 1655 { 1656 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1657 struct mlxsw_sp_acl_block *acl_block; 1658 struct flow_block_cb *block_cb; 1659 int err; 1660 1661 block_cb = flow_block_cb_lookup(f->block, 1662 mlxsw_sp_setup_tc_block_cb_flower, 1663 mlxsw_sp); 1664 if (!block_cb) 1665 return; 1666 1667 if (ingress) 1668 mlxsw_sp_port->ing_acl_block = NULL; 1669 else 1670 mlxsw_sp_port->eg_acl_block = NULL; 1671 1672 acl_block = flow_block_cb_priv(block_cb); 1673 err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block, 1674 mlxsw_sp_port, ingress); 1675 if (!err && !flow_block_cb_decref(block_cb)) { 1676 flow_block_cb_remove(block_cb, f); 1677 list_del(&block_cb->driver_list); 1678 } 1679 } 1680 1681 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 1682 struct flow_block_offload *f) 1683 { 1684 struct flow_block_cb *block_cb; 1685 flow_setup_cb_t *cb; 1686 bool ingress; 1687 int err; 1688 1689 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { 1690 cb = mlxsw_sp_setup_tc_block_cb_matchall_ig; 1691 ingress = true; 1692 } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { 1693 cb = mlxsw_sp_setup_tc_block_cb_matchall_eg; 1694 ingress = false; 1695 } else { 1696 return -EOPNOTSUPP; 1697 } 1698 1699 f->driver_block_list = &mlxsw_sp_block_cb_list; 1700 1701 switch (f->command) { 1702 case FLOW_BLOCK_BIND: 1703 if (flow_block_cb_is_busy(cb, mlxsw_sp_port, 1704 &mlxsw_sp_block_cb_list)) 1705 return -EBUSY; 1706 1707 block_cb = flow_block_cb_alloc(cb, mlxsw_sp_port, 1708 mlxsw_sp_port, NULL); 1709 if (IS_ERR(block_cb)) 1710 return PTR_ERR(block_cb); 1711 err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port, f, 1712 ingress); 1713 if (err) { 1714 flow_block_cb_free(block_cb); 1715 return err; 1716 } 1717 flow_block_cb_add(block_cb, f); 1718 list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list); 1719 return 0; 1720 case FLOW_BLOCK_UNBIND: 1721 mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port, 1722 f, ingress); 1723 block_cb = flow_block_cb_lookup(f->block, cb, mlxsw_sp_port); 1724 if (!block_cb) 1725 return -ENOENT; 1726 1727 flow_block_cb_remove(block_cb, f); 1728 list_del(&block_cb->driver_list); 1729 return 0; 1730 default: 1731 return -EOPNOTSUPP; 1732 } 1733 } 1734 1735 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1736 void *type_data) 1737 { 1738 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1739 1740 switch (type) { 1741 case TC_SETUP_BLOCK: 1742 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1743 case TC_SETUP_QDISC_RED: 1744 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1745 case TC_SETUP_QDISC_PRIO: 1746 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); 1747 default: 1748 return -EOPNOTSUPP; 1749 } 1750 } 1751 1752 1753 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) 1754 { 1755 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1756 1757 if (!enable) { 1758 if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->ing_acl_block) || 1759 mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->eg_acl_block) || 1760 !list_empty(&mlxsw_sp_port->mall_tc_list)) { 1761 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 1762 return -EINVAL; 1763 } 1764 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->ing_acl_block); 1765 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->eg_acl_block); 1766 } else { 1767 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->ing_acl_block); 1768 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->eg_acl_block); 1769 } 1770 return 0; 1771 } 1772 1773 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable) 1774 { 1775 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1776 char pplr_pl[MLXSW_REG_PPLR_LEN]; 1777 int err; 1778 1779 if (netif_running(dev)) 1780 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1781 1782 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable); 1783 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr), 1784 pplr_pl); 1785 1786 if (netif_running(dev)) 1787 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1788 1789 return err; 1790 } 1791 1792 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); 1793 1794 static int mlxsw_sp_handle_feature(struct net_device *dev, 1795 netdev_features_t wanted_features, 1796 netdev_features_t feature, 1797 mlxsw_sp_feature_handler feature_handler) 1798 { 1799 netdev_features_t changes = wanted_features ^ dev->features; 1800 bool enable = !!(wanted_features & feature); 1801 int err; 1802 1803 if (!(changes & feature)) 1804 return 0; 1805 1806 err = feature_handler(dev, enable); 1807 if (err) { 1808 netdev_err(dev, "%s feature %pNF failed, err %d\n", 1809 enable ? "Enable" : "Disable", &feature, err); 1810 return err; 1811 } 1812 1813 if (enable) 1814 dev->features |= feature; 1815 else 1816 dev->features &= ~feature; 1817 1818 return 0; 1819 } 1820 static int mlxsw_sp_set_features(struct net_device *dev, 1821 netdev_features_t features) 1822 { 1823 netdev_features_t oper_features = dev->features; 1824 int err = 0; 1825 1826 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, 1827 mlxsw_sp_feature_hw_tc); 1828 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK, 1829 mlxsw_sp_feature_loopback); 1830 1831 if (err) { 1832 dev->features = oper_features; 1833 return -EINVAL; 1834 } 1835 1836 return 0; 1837 } 1838 1839 static struct devlink_port * 1840 mlxsw_sp_port_get_devlink_port(struct net_device *dev) 1841 { 1842 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1843 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1844 1845 return mlxsw_core_port_devlink_port_get(mlxsw_sp->core, 1846 mlxsw_sp_port->local_port); 1847 } 1848 1849 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, 1850 struct ifreq *ifr) 1851 { 1852 struct hwtstamp_config config; 1853 int err; 1854 1855 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 1856 return -EFAULT; 1857 1858 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, 1859 &config); 1860 if (err) 1861 return err; 1862 1863 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1864 return -EFAULT; 1865 1866 return 0; 1867 } 1868 1869 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port, 1870 struct ifreq *ifr) 1871 { 1872 struct hwtstamp_config config; 1873 int err; 1874 1875 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port, 1876 &config); 1877 if (err) 1878 return err; 1879 1880 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1881 return -EFAULT; 1882 1883 return 0; 1884 } 1885 1886 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port) 1887 { 1888 struct hwtstamp_config config = {0}; 1889 1890 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config); 1891 } 1892 1893 static int 1894 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1895 { 1896 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1897 1898 switch (cmd) { 1899 case SIOCSHWTSTAMP: 1900 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr); 1901 case SIOCGHWTSTAMP: 1902 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr); 1903 default: 1904 return -EOPNOTSUPP; 1905 } 1906 } 1907 1908 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1909 .ndo_open = mlxsw_sp_port_open, 1910 .ndo_stop = mlxsw_sp_port_stop, 1911 .ndo_start_xmit = mlxsw_sp_port_xmit, 1912 .ndo_setup_tc = mlxsw_sp_setup_tc, 1913 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1914 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1915 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1916 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1917 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1918 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1919 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1920 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1921 .ndo_set_features = mlxsw_sp_set_features, 1922 .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port, 1923 .ndo_do_ioctl = mlxsw_sp_port_ioctl, 1924 }; 1925 1926 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 1927 struct ethtool_drvinfo *drvinfo) 1928 { 1929 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1930 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1931 1932 strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind, 1933 sizeof(drvinfo->driver)); 1934 strlcpy(drvinfo->version, mlxsw_sp_driver_version, 1935 sizeof(drvinfo->version)); 1936 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 1937 "%d.%d.%d", 1938 mlxsw_sp->bus_info->fw_rev.major, 1939 mlxsw_sp->bus_info->fw_rev.minor, 1940 mlxsw_sp->bus_info->fw_rev.subminor); 1941 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 1942 sizeof(drvinfo->bus_info)); 1943 } 1944 1945 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, 1946 struct ethtool_pauseparam *pause) 1947 { 1948 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1949 1950 pause->rx_pause = mlxsw_sp_port->link.rx_pause; 1951 pause->tx_pause = mlxsw_sp_port->link.tx_pause; 1952 } 1953 1954 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, 1955 struct ethtool_pauseparam *pause) 1956 { 1957 char pfcc_pl[MLXSW_REG_PFCC_LEN]; 1958 1959 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); 1960 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause); 1961 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause); 1962 1963 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), 1964 pfcc_pl); 1965 } 1966 1967 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, 1968 struct ethtool_pauseparam *pause) 1969 { 1970 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1971 bool pause_en = pause->tx_pause || pause->rx_pause; 1972 int err; 1973 1974 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { 1975 netdev_err(dev, "PFC already enabled on port\n"); 1976 return -EINVAL; 1977 } 1978 1979 if (pause->autoneg) { 1980 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); 1981 return -EINVAL; 1982 } 1983 1984 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1985 if (err) { 1986 netdev_err(dev, "Failed to configure port's headroom\n"); 1987 return err; 1988 } 1989 1990 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause); 1991 if (err) { 1992 netdev_err(dev, "Failed to set PAUSE parameters\n"); 1993 goto err_port_pause_configure; 1994 } 1995 1996 mlxsw_sp_port->link.rx_pause = pause->rx_pause; 1997 mlxsw_sp_port->link.tx_pause = pause->tx_pause; 1998 1999 return 0; 2000 2001 err_port_pause_configure: 2002 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 2003 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 2004 return err; 2005 } 2006 2007 struct mlxsw_sp_port_hw_stats { 2008 char str[ETH_GSTRING_LEN]; 2009 u64 (*getter)(const char *payload); 2010 bool cells_bytes; 2011 }; 2012 2013 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 2014 { 2015 .str = "a_frames_transmitted_ok", 2016 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 2017 }, 2018 { 2019 .str = "a_frames_received_ok", 2020 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 2021 }, 2022 { 2023 .str = "a_frame_check_sequence_errors", 2024 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 2025 }, 2026 { 2027 .str = "a_alignment_errors", 2028 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 2029 }, 2030 { 2031 .str = "a_octets_transmitted_ok", 2032 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 2033 }, 2034 { 2035 .str = "a_octets_received_ok", 2036 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 2037 }, 2038 { 2039 .str = "a_multicast_frames_xmitted_ok", 2040 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 2041 }, 2042 { 2043 .str = "a_broadcast_frames_xmitted_ok", 2044 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 2045 }, 2046 { 2047 .str = "a_multicast_frames_received_ok", 2048 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 2049 }, 2050 { 2051 .str = "a_broadcast_frames_received_ok", 2052 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 2053 }, 2054 { 2055 .str = "a_in_range_length_errors", 2056 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 2057 }, 2058 { 2059 .str = "a_out_of_range_length_field", 2060 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 2061 }, 2062 { 2063 .str = "a_frame_too_long_errors", 2064 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 2065 }, 2066 { 2067 .str = "a_symbol_error_during_carrier", 2068 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 2069 }, 2070 { 2071 .str = "a_mac_control_frames_transmitted", 2072 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 2073 }, 2074 { 2075 .str = "a_mac_control_frames_received", 2076 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 2077 }, 2078 { 2079 .str = "a_unsupported_opcodes_received", 2080 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 2081 }, 2082 { 2083 .str = "a_pause_mac_ctrl_frames_received", 2084 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 2085 }, 2086 { 2087 .str = "a_pause_mac_ctrl_frames_xmitted", 2088 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 2089 }, 2090 }; 2091 2092 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 2093 2094 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2863_stats[] = { 2095 { 2096 .str = "if_in_discards", 2097 .getter = mlxsw_reg_ppcnt_if_in_discards_get, 2098 }, 2099 { 2100 .str = "if_out_discards", 2101 .getter = mlxsw_reg_ppcnt_if_out_discards_get, 2102 }, 2103 { 2104 .str = "if_out_errors", 2105 .getter = mlxsw_reg_ppcnt_if_out_errors_get, 2106 }, 2107 }; 2108 2109 #define MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN \ 2110 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2863_stats) 2111 2112 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = { 2113 { 2114 .str = "ether_stats_undersize_pkts", 2115 .getter = mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get, 2116 }, 2117 { 2118 .str = "ether_stats_oversize_pkts", 2119 .getter = mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get, 2120 }, 2121 { 2122 .str = "ether_stats_fragments", 2123 .getter = mlxsw_reg_ppcnt_ether_stats_fragments_get, 2124 }, 2125 { 2126 .str = "ether_pkts64octets", 2127 .getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get, 2128 }, 2129 { 2130 .str = "ether_pkts65to127octets", 2131 .getter = mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get, 2132 }, 2133 { 2134 .str = "ether_pkts128to255octets", 2135 .getter = mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get, 2136 }, 2137 { 2138 .str = "ether_pkts256to511octets", 2139 .getter = mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get, 2140 }, 2141 { 2142 .str = "ether_pkts512to1023octets", 2143 .getter = mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get, 2144 }, 2145 { 2146 .str = "ether_pkts1024to1518octets", 2147 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get, 2148 }, 2149 { 2150 .str = "ether_pkts1519to2047octets", 2151 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get, 2152 }, 2153 { 2154 .str = "ether_pkts2048to4095octets", 2155 .getter = mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get, 2156 }, 2157 { 2158 .str = "ether_pkts4096to8191octets", 2159 .getter = mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get, 2160 }, 2161 { 2162 .str = "ether_pkts8192to10239octets", 2163 .getter = mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get, 2164 }, 2165 }; 2166 2167 #define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \ 2168 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats) 2169 2170 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_3635_stats[] = { 2171 { 2172 .str = "dot3stats_fcs_errors", 2173 .getter = mlxsw_reg_ppcnt_dot3stats_fcs_errors_get, 2174 }, 2175 { 2176 .str = "dot3stats_symbol_errors", 2177 .getter = mlxsw_reg_ppcnt_dot3stats_symbol_errors_get, 2178 }, 2179 { 2180 .str = "dot3control_in_unknown_opcodes", 2181 .getter = mlxsw_reg_ppcnt_dot3control_in_unknown_opcodes_get, 2182 }, 2183 { 2184 .str = "dot3in_pause_frames", 2185 .getter = mlxsw_reg_ppcnt_dot3in_pause_frames_get, 2186 }, 2187 }; 2188 2189 #define MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN \ 2190 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_3635_stats) 2191 2192 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_discard_stats[] = { 2193 { 2194 .str = "discard_ingress_general", 2195 .getter = mlxsw_reg_ppcnt_ingress_general_get, 2196 }, 2197 { 2198 .str = "discard_ingress_policy_engine", 2199 .getter = mlxsw_reg_ppcnt_ingress_policy_engine_get, 2200 }, 2201 { 2202 .str = "discard_ingress_vlan_membership", 2203 .getter = mlxsw_reg_ppcnt_ingress_vlan_membership_get, 2204 }, 2205 { 2206 .str = "discard_ingress_tag_frame_type", 2207 .getter = mlxsw_reg_ppcnt_ingress_tag_frame_type_get, 2208 }, 2209 { 2210 .str = "discard_egress_vlan_membership", 2211 .getter = mlxsw_reg_ppcnt_egress_vlan_membership_get, 2212 }, 2213 { 2214 .str = "discard_loopback_filter", 2215 .getter = mlxsw_reg_ppcnt_loopback_filter_get, 2216 }, 2217 { 2218 .str = "discard_egress_general", 2219 .getter = mlxsw_reg_ppcnt_egress_general_get, 2220 }, 2221 { 2222 .str = "discard_egress_hoq", 2223 .getter = mlxsw_reg_ppcnt_egress_hoq_get, 2224 }, 2225 { 2226 .str = "discard_egress_policy_engine", 2227 .getter = mlxsw_reg_ppcnt_egress_policy_engine_get, 2228 }, 2229 { 2230 .str = "discard_ingress_tx_link_down", 2231 .getter = mlxsw_reg_ppcnt_ingress_tx_link_down_get, 2232 }, 2233 { 2234 .str = "discard_egress_stp_filter", 2235 .getter = mlxsw_reg_ppcnt_egress_stp_filter_get, 2236 }, 2237 { 2238 .str = "discard_egress_sll", 2239 .getter = mlxsw_reg_ppcnt_egress_sll_get, 2240 }, 2241 }; 2242 2243 #define MLXSW_SP_PORT_HW_DISCARD_STATS_LEN \ 2244 ARRAY_SIZE(mlxsw_sp_port_hw_discard_stats) 2245 2246 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { 2247 { 2248 .str = "rx_octets_prio", 2249 .getter = mlxsw_reg_ppcnt_rx_octets_get, 2250 }, 2251 { 2252 .str = "rx_frames_prio", 2253 .getter = mlxsw_reg_ppcnt_rx_frames_get, 2254 }, 2255 { 2256 .str = "tx_octets_prio", 2257 .getter = mlxsw_reg_ppcnt_tx_octets_get, 2258 }, 2259 { 2260 .str = "tx_frames_prio", 2261 .getter = mlxsw_reg_ppcnt_tx_frames_get, 2262 }, 2263 { 2264 .str = "rx_pause_prio", 2265 .getter = mlxsw_reg_ppcnt_rx_pause_get, 2266 }, 2267 { 2268 .str = "rx_pause_duration_prio", 2269 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get, 2270 }, 2271 { 2272 .str = "tx_pause_prio", 2273 .getter = mlxsw_reg_ppcnt_tx_pause_get, 2274 }, 2275 { 2276 .str = "tx_pause_duration_prio", 2277 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get, 2278 }, 2279 }; 2280 2281 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) 2282 2283 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { 2284 { 2285 .str = "tc_transmit_queue_tc", 2286 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get, 2287 .cells_bytes = true, 2288 }, 2289 { 2290 .str = "tc_no_buffer_discard_uc_tc", 2291 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get, 2292 }, 2293 }; 2294 2295 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) 2296 2297 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ 2298 MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN + \ 2299 MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \ 2300 MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN + \ 2301 MLXSW_SP_PORT_HW_DISCARD_STATS_LEN + \ 2302 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \ 2303 IEEE_8021QAZ_MAX_TCS) + \ 2304 (MLXSW_SP_PORT_HW_TC_STATS_LEN * \ 2305 TC_MAX_QUEUE)) 2306 2307 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) 2308 { 2309 int i; 2310 2311 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { 2312 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", 2313 mlxsw_sp_port_hw_prio_stats[i].str, prio); 2314 *p += ETH_GSTRING_LEN; 2315 } 2316 } 2317 2318 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) 2319 { 2320 int i; 2321 2322 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { 2323 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", 2324 mlxsw_sp_port_hw_tc_stats[i].str, tc); 2325 *p += ETH_GSTRING_LEN; 2326 } 2327 } 2328 2329 static void mlxsw_sp_port_get_strings(struct net_device *dev, 2330 u32 stringset, u8 *data) 2331 { 2332 u8 *p = data; 2333 int i; 2334 2335 switch (stringset) { 2336 case ETH_SS_STATS: 2337 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 2338 memcpy(p, mlxsw_sp_port_hw_stats[i].str, 2339 ETH_GSTRING_LEN); 2340 p += ETH_GSTRING_LEN; 2341 } 2342 2343 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; i++) { 2344 memcpy(p, mlxsw_sp_port_hw_rfc_2863_stats[i].str, 2345 ETH_GSTRING_LEN); 2346 p += ETH_GSTRING_LEN; 2347 } 2348 2349 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) { 2350 memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str, 2351 ETH_GSTRING_LEN); 2352 p += ETH_GSTRING_LEN; 2353 } 2354 2355 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; i++) { 2356 memcpy(p, mlxsw_sp_port_hw_rfc_3635_stats[i].str, 2357 ETH_GSTRING_LEN); 2358 p += ETH_GSTRING_LEN; 2359 } 2360 2361 for (i = 0; i < MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; i++) { 2362 memcpy(p, mlxsw_sp_port_hw_discard_stats[i].str, 2363 ETH_GSTRING_LEN); 2364 p += ETH_GSTRING_LEN; 2365 } 2366 2367 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 2368 mlxsw_sp_port_get_prio_strings(&p, i); 2369 2370 for (i = 0; i < TC_MAX_QUEUE; i++) 2371 mlxsw_sp_port_get_tc_strings(&p, i); 2372 2373 break; 2374 } 2375 } 2376 2377 static int mlxsw_sp_port_set_phys_id(struct net_device *dev, 2378 enum ethtool_phys_id_state state) 2379 { 2380 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2381 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2382 char mlcr_pl[MLXSW_REG_MLCR_LEN]; 2383 bool active; 2384 2385 switch (state) { 2386 case ETHTOOL_ID_ACTIVE: 2387 active = true; 2388 break; 2389 case ETHTOOL_ID_INACTIVE: 2390 active = false; 2391 break; 2392 default: 2393 return -EOPNOTSUPP; 2394 } 2395 2396 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); 2397 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); 2398 } 2399 2400 static int 2401 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, 2402 int *p_len, enum mlxsw_reg_ppcnt_grp grp) 2403 { 2404 switch (grp) { 2405 case MLXSW_REG_PPCNT_IEEE_8023_CNT: 2406 *p_hw_stats = mlxsw_sp_port_hw_stats; 2407 *p_len = MLXSW_SP_PORT_HW_STATS_LEN; 2408 break; 2409 case MLXSW_REG_PPCNT_RFC_2863_CNT: 2410 *p_hw_stats = mlxsw_sp_port_hw_rfc_2863_stats; 2411 *p_len = MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2412 break; 2413 case MLXSW_REG_PPCNT_RFC_2819_CNT: 2414 *p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats; 2415 *p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2416 break; 2417 case MLXSW_REG_PPCNT_RFC_3635_CNT: 2418 *p_hw_stats = mlxsw_sp_port_hw_rfc_3635_stats; 2419 *p_len = MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2420 break; 2421 case MLXSW_REG_PPCNT_DISCARD_CNT: 2422 *p_hw_stats = mlxsw_sp_port_hw_discard_stats; 2423 *p_len = MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2424 break; 2425 case MLXSW_REG_PPCNT_PRIO_CNT: 2426 *p_hw_stats = mlxsw_sp_port_hw_prio_stats; 2427 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2428 break; 2429 case MLXSW_REG_PPCNT_TC_CNT: 2430 *p_hw_stats = mlxsw_sp_port_hw_tc_stats; 2431 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN; 2432 break; 2433 default: 2434 WARN_ON(1); 2435 return -EOPNOTSUPP; 2436 } 2437 return 0; 2438 } 2439 2440 static void __mlxsw_sp_port_get_stats(struct net_device *dev, 2441 enum mlxsw_reg_ppcnt_grp grp, int prio, 2442 u64 *data, int data_index) 2443 { 2444 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2445 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2446 struct mlxsw_sp_port_hw_stats *hw_stats; 2447 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 2448 int i, len; 2449 int err; 2450 2451 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); 2452 if (err) 2453 return; 2454 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl); 2455 for (i = 0; i < len; i++) { 2456 data[data_index + i] = hw_stats[i].getter(ppcnt_pl); 2457 if (!hw_stats[i].cells_bytes) 2458 continue; 2459 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp, 2460 data[data_index + i]); 2461 } 2462 } 2463 2464 static void mlxsw_sp_port_get_stats(struct net_device *dev, 2465 struct ethtool_stats *stats, u64 *data) 2466 { 2467 int i, data_index = 0; 2468 2469 /* IEEE 802.3 Counters */ 2470 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0, 2471 data, data_index); 2472 data_index = MLXSW_SP_PORT_HW_STATS_LEN; 2473 2474 /* RFC 2863 Counters */ 2475 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2863_CNT, 0, 2476 data, data_index); 2477 data_index += MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2478 2479 /* RFC 2819 Counters */ 2480 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0, 2481 data, data_index); 2482 data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2483 2484 /* RFC 3635 Counters */ 2485 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_3635_CNT, 0, 2486 data, data_index); 2487 data_index += MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2488 2489 /* Discard Counters */ 2490 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_DISCARD_CNT, 0, 2491 data, data_index); 2492 data_index += MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2493 2494 /* Per-Priority Counters */ 2495 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2496 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, 2497 data, data_index); 2498 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2499 } 2500 2501 /* Per-TC Counters */ 2502 for (i = 0; i < TC_MAX_QUEUE; i++) { 2503 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, 2504 data, data_index); 2505 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; 2506 } 2507 } 2508 2509 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 2510 { 2511 switch (sset) { 2512 case ETH_SS_STATS: 2513 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN; 2514 default: 2515 return -EOPNOTSUPP; 2516 } 2517 } 2518 2519 struct mlxsw_sp1_port_link_mode { 2520 enum ethtool_link_mode_bit_indices mask_ethtool; 2521 u32 mask; 2522 u32 speed; 2523 }; 2524 2525 static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = { 2526 { 2527 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 2528 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2529 .speed = SPEED_100, 2530 }, 2531 { 2532 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 2533 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 2534 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2535 .speed = SPEED_1000, 2536 }, 2537 { 2538 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 2539 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2540 .speed = SPEED_10000, 2541 }, 2542 { 2543 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 2544 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 2545 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 2546 .speed = SPEED_10000, 2547 }, 2548 { 2549 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2550 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2551 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2552 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 2553 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2554 .speed = SPEED_10000, 2555 }, 2556 { 2557 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 2558 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 2559 .speed = SPEED_20000, 2560 }, 2561 { 2562 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 2563 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2564 .speed = SPEED_40000, 2565 }, 2566 { 2567 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 2568 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2569 .speed = SPEED_40000, 2570 }, 2571 { 2572 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 2573 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2574 .speed = SPEED_40000, 2575 }, 2576 { 2577 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 2578 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2579 .speed = SPEED_40000, 2580 }, 2581 { 2582 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR, 2583 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2584 .speed = SPEED_25000, 2585 }, 2586 { 2587 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR, 2588 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2589 .speed = SPEED_25000, 2590 }, 2591 { 2592 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 2593 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2594 .speed = SPEED_25000, 2595 }, 2596 { 2597 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2, 2598 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2599 .speed = SPEED_50000, 2600 }, 2601 { 2602 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 2603 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2604 .speed = SPEED_50000, 2605 }, 2606 { 2607 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2, 2608 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2609 .speed = SPEED_50000, 2610 }, 2611 { 2612 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2613 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT, 2614 .speed = SPEED_56000, 2615 }, 2616 { 2617 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2618 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT, 2619 .speed = SPEED_56000, 2620 }, 2621 { 2622 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2623 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT, 2624 .speed = SPEED_56000, 2625 }, 2626 { 2627 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2628 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, 2629 .speed = SPEED_56000, 2630 }, 2631 { 2632 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4, 2633 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2634 .speed = SPEED_100000, 2635 }, 2636 { 2637 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4, 2638 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2639 .speed = SPEED_100000, 2640 }, 2641 { 2642 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4, 2643 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2644 .speed = SPEED_100000, 2645 }, 2646 { 2647 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 2648 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2649 .speed = SPEED_100000, 2650 }, 2651 }; 2652 2653 #define MLXSW_SP1_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp1_port_link_mode) 2654 2655 static void 2656 mlxsw_sp1_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, 2657 u32 ptys_eth_proto, 2658 struct ethtool_link_ksettings *cmd) 2659 { 2660 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2661 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2662 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2663 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2664 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2665 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2666 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 2667 2668 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2669 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2670 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2671 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 2672 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 2673 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 2674 } 2675 2676 static void 2677 mlxsw_sp1_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, 2678 unsigned long *mode) 2679 { 2680 int i; 2681 2682 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2683 if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) 2684 __set_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, 2685 mode); 2686 } 2687 } 2688 2689 static u32 2690 mlxsw_sp1_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto) 2691 { 2692 int i; 2693 2694 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2695 if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) 2696 return mlxsw_sp1_port_link_mode[i].speed; 2697 } 2698 2699 return SPEED_UNKNOWN; 2700 } 2701 2702 static void 2703 mlxsw_sp1_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, 2704 u32 ptys_eth_proto, 2705 struct ethtool_link_ksettings *cmd) 2706 { 2707 cmd->base.speed = SPEED_UNKNOWN; 2708 cmd->base.duplex = DUPLEX_UNKNOWN; 2709 2710 if (!carrier_ok) 2711 return; 2712 2713 cmd->base.speed = mlxsw_sp1_from_ptys_speed(mlxsw_sp, ptys_eth_proto); 2714 if (cmd->base.speed != SPEED_UNKNOWN) 2715 cmd->base.duplex = DUPLEX_FULL; 2716 } 2717 2718 static u32 2719 mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, 2720 const struct ethtool_link_ksettings *cmd) 2721 { 2722 u32 ptys_proto = 0; 2723 int i; 2724 2725 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2726 if (test_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, 2727 cmd->link_modes.advertising)) 2728 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2729 } 2730 return ptys_proto; 2731 } 2732 2733 static u32 mlxsw_sp1_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 speed) 2734 { 2735 u32 ptys_proto = 0; 2736 int i; 2737 2738 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2739 if (speed == mlxsw_sp1_port_link_mode[i].speed) 2740 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2741 } 2742 return ptys_proto; 2743 } 2744 2745 static u32 2746 mlxsw_sp1_to_ptys_upper_speed(struct mlxsw_sp *mlxsw_sp, u32 upper_speed) 2747 { 2748 u32 ptys_proto = 0; 2749 int i; 2750 2751 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2752 if (mlxsw_sp1_port_link_mode[i].speed <= upper_speed) 2753 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2754 } 2755 return ptys_proto; 2756 } 2757 2758 static int 2759 mlxsw_sp1_port_speed_base(struct mlxsw_sp *mlxsw_sp, u8 local_port, 2760 u32 *base_speed) 2761 { 2762 *base_speed = MLXSW_SP_PORT_BASE_SPEED_25G; 2763 return 0; 2764 } 2765 2766 static void 2767 mlxsw_sp1_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, 2768 u8 local_port, u32 proto_admin, bool autoneg) 2769 { 2770 mlxsw_reg_ptys_eth_pack(payload, local_port, proto_admin, autoneg); 2771 } 2772 2773 static void 2774 mlxsw_sp1_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, 2775 u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, 2776 u32 *p_eth_proto_oper) 2777 { 2778 mlxsw_reg_ptys_eth_unpack(payload, p_eth_proto_cap, p_eth_proto_admin, 2779 p_eth_proto_oper); 2780 } 2781 2782 static const struct mlxsw_sp_port_type_speed_ops 2783 mlxsw_sp1_port_type_speed_ops = { 2784 .from_ptys_supported_port = mlxsw_sp1_from_ptys_supported_port, 2785 .from_ptys_link = mlxsw_sp1_from_ptys_link, 2786 .from_ptys_speed = mlxsw_sp1_from_ptys_speed, 2787 .from_ptys_speed_duplex = mlxsw_sp1_from_ptys_speed_duplex, 2788 .to_ptys_advert_link = mlxsw_sp1_to_ptys_advert_link, 2789 .to_ptys_speed = mlxsw_sp1_to_ptys_speed, 2790 .to_ptys_upper_speed = mlxsw_sp1_to_ptys_upper_speed, 2791 .port_speed_base = mlxsw_sp1_port_speed_base, 2792 .reg_ptys_eth_pack = mlxsw_sp1_reg_ptys_eth_pack, 2793 .reg_ptys_eth_unpack = mlxsw_sp1_reg_ptys_eth_unpack, 2794 }; 2795 2796 static const enum ethtool_link_mode_bit_indices 2797 mlxsw_sp2_mask_ethtool_sgmii_100m[] = { 2798 ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2799 }; 2800 2801 #define MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN \ 2802 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_sgmii_100m) 2803 2804 static const enum ethtool_link_mode_bit_indices 2805 mlxsw_sp2_mask_ethtool_1000base_x_sgmii[] = { 2806 ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 2807 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2808 }; 2809 2810 #define MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN \ 2811 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_1000base_x_sgmii) 2812 2813 static const enum ethtool_link_mode_bit_indices 2814 mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii[] = { 2815 ETHTOOL_LINK_MODE_2500baseX_Full_BIT, 2816 }; 2817 2818 #define MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN \ 2819 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii) 2820 2821 static const enum ethtool_link_mode_bit_indices 2822 mlxsw_sp2_mask_ethtool_5gbase_r[] = { 2823 ETHTOOL_LINK_MODE_5000baseT_Full_BIT, 2824 }; 2825 2826 #define MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN \ 2827 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_5gbase_r) 2828 2829 static const enum ethtool_link_mode_bit_indices 2830 mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g[] = { 2831 ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2832 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2833 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, 2834 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, 2835 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 2836 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 2837 ETHTOOL_LINK_MODE_10000baseER_Full_BIT, 2838 }; 2839 2840 #define MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN \ 2841 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g) 2842 2843 static const enum ethtool_link_mode_bit_indices 2844 mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g[] = { 2845 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2846 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2847 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2848 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2849 }; 2850 2851 #define MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN \ 2852 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g) 2853 2854 static const enum ethtool_link_mode_bit_indices 2855 mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr[] = { 2856 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2857 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2858 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2859 }; 2860 2861 #define MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN \ 2862 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr) 2863 2864 static const enum ethtool_link_mode_bit_indices 2865 mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2[] = { 2866 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2867 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2868 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2869 }; 2870 2871 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN \ 2872 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2) 2873 2874 static const enum ethtool_link_mode_bit_indices 2875 mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr[] = { 2876 ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, 2877 ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, 2878 ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, 2879 ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, 2880 ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, 2881 }; 2882 2883 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN \ 2884 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr) 2885 2886 static const enum ethtool_link_mode_bit_indices 2887 mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4[] = { 2888 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2889 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2890 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2891 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2892 }; 2893 2894 #define MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN \ 2895 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4) 2896 2897 static const enum ethtool_link_mode_bit_indices 2898 mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2[] = { 2899 ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, 2900 ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, 2901 ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, 2902 ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, 2903 ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, 2904 }; 2905 2906 #define MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN \ 2907 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2) 2908 2909 static const enum ethtool_link_mode_bit_indices 2910 mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = { 2911 ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, 2912 ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, 2913 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, 2914 ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT, 2915 ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, 2916 }; 2917 2918 #define MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN \ 2919 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4) 2920 2921 struct mlxsw_sp2_port_link_mode { 2922 const enum ethtool_link_mode_bit_indices *mask_ethtool; 2923 int m_ethtool_len; 2924 u32 mask; 2925 u32 speed; 2926 }; 2927 2928 static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = { 2929 { 2930 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_SGMII_100M, 2931 .mask_ethtool = mlxsw_sp2_mask_ethtool_sgmii_100m, 2932 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN, 2933 .speed = SPEED_100, 2934 }, 2935 { 2936 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_1000BASE_X_SGMII, 2937 .mask_ethtool = mlxsw_sp2_mask_ethtool_1000base_x_sgmii, 2938 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN, 2939 .speed = SPEED_1000, 2940 }, 2941 { 2942 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_2_5GBASE_X_2_5GMII, 2943 .mask_ethtool = mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii, 2944 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN, 2945 .speed = SPEED_2500, 2946 }, 2947 { 2948 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R, 2949 .mask_ethtool = mlxsw_sp2_mask_ethtool_5gbase_r, 2950 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN, 2951 .speed = SPEED_5000, 2952 }, 2953 { 2954 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XFI_XAUI_1_10G, 2955 .mask_ethtool = mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g, 2956 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN, 2957 .speed = SPEED_10000, 2958 }, 2959 { 2960 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G, 2961 .mask_ethtool = mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g, 2962 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN, 2963 .speed = SPEED_40000, 2964 }, 2965 { 2966 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR, 2967 .mask_ethtool = mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr, 2968 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN, 2969 .speed = SPEED_25000, 2970 }, 2971 { 2972 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_2_LAUI_2_50GBASE_CR2_KR2, 2973 .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2, 2974 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN, 2975 .speed = SPEED_50000, 2976 }, 2977 { 2978 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR, 2979 .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr, 2980 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN, 2981 .speed = SPEED_50000, 2982 }, 2983 { 2984 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4, 2985 .mask_ethtool = mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4, 2986 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN, 2987 .speed = SPEED_100000, 2988 }, 2989 { 2990 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2, 2991 .mask_ethtool = mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2, 2992 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN, 2993 .speed = SPEED_100000, 2994 }, 2995 { 2996 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4, 2997 .mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4, 2998 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN, 2999 .speed = SPEED_200000, 3000 }, 3001 }; 3002 3003 #define MLXSW_SP2_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp2_port_link_mode) 3004 3005 static void 3006 mlxsw_sp2_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, 3007 u32 ptys_eth_proto, 3008 struct ethtool_link_ksettings *cmd) 3009 { 3010 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 3011 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 3012 } 3013 3014 static void 3015 mlxsw_sp2_set_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, 3016 unsigned long *mode) 3017 { 3018 int i; 3019 3020 for (i = 0; i < link_mode->m_ethtool_len; i++) 3021 __set_bit(link_mode->mask_ethtool[i], mode); 3022 } 3023 3024 static void 3025 mlxsw_sp2_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, 3026 unsigned long *mode) 3027 { 3028 int i; 3029 3030 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3031 if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) 3032 mlxsw_sp2_set_bit_ethtool(&mlxsw_sp2_port_link_mode[i], 3033 mode); 3034 } 3035 } 3036 3037 static u32 3038 mlxsw_sp2_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto) 3039 { 3040 int i; 3041 3042 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3043 if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) 3044 return mlxsw_sp2_port_link_mode[i].speed; 3045 } 3046 3047 return SPEED_UNKNOWN; 3048 } 3049 3050 static void 3051 mlxsw_sp2_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, 3052 u32 ptys_eth_proto, 3053 struct ethtool_link_ksettings *cmd) 3054 { 3055 cmd->base.speed = SPEED_UNKNOWN; 3056 cmd->base.duplex = DUPLEX_UNKNOWN; 3057 3058 if (!carrier_ok) 3059 return; 3060 3061 cmd->base.speed = mlxsw_sp2_from_ptys_speed(mlxsw_sp, ptys_eth_proto); 3062 if (cmd->base.speed != SPEED_UNKNOWN) 3063 cmd->base.duplex = DUPLEX_FULL; 3064 } 3065 3066 static bool 3067 mlxsw_sp2_test_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, 3068 const unsigned long *mode) 3069 { 3070 int cnt = 0; 3071 int i; 3072 3073 for (i = 0; i < link_mode->m_ethtool_len; i++) { 3074 if (test_bit(link_mode->mask_ethtool[i], mode)) 3075 cnt++; 3076 } 3077 3078 return cnt == link_mode->m_ethtool_len; 3079 } 3080 3081 static u32 3082 mlxsw_sp2_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, 3083 const struct ethtool_link_ksettings *cmd) 3084 { 3085 u32 ptys_proto = 0; 3086 int i; 3087 3088 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3089 if (mlxsw_sp2_test_bit_ethtool(&mlxsw_sp2_port_link_mode[i], 3090 cmd->link_modes.advertising)) 3091 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3092 } 3093 return ptys_proto; 3094 } 3095 3096 static u32 mlxsw_sp2_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 speed) 3097 { 3098 u32 ptys_proto = 0; 3099 int i; 3100 3101 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3102 if (speed == mlxsw_sp2_port_link_mode[i].speed) 3103 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3104 } 3105 return ptys_proto; 3106 } 3107 3108 static u32 3109 mlxsw_sp2_to_ptys_upper_speed(struct mlxsw_sp *mlxsw_sp, u32 upper_speed) 3110 { 3111 u32 ptys_proto = 0; 3112 int i; 3113 3114 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3115 if (mlxsw_sp2_port_link_mode[i].speed <= upper_speed) 3116 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3117 } 3118 return ptys_proto; 3119 } 3120 3121 static int 3122 mlxsw_sp2_port_speed_base(struct mlxsw_sp *mlxsw_sp, u8 local_port, 3123 u32 *base_speed) 3124 { 3125 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3126 u32 eth_proto_cap; 3127 int err; 3128 3129 /* In Spectrum-2, the speed of 1x can change from port to port, so query 3130 * it from firmware. 3131 */ 3132 mlxsw_reg_ptys_ext_eth_pack(ptys_pl, local_port, 0, false); 3133 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3134 if (err) 3135 return err; 3136 mlxsw_reg_ptys_ext_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL); 3137 3138 if (eth_proto_cap & 3139 MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR) { 3140 *base_speed = MLXSW_SP_PORT_BASE_SPEED_50G; 3141 return 0; 3142 } 3143 3144 if (eth_proto_cap & 3145 MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR) { 3146 *base_speed = MLXSW_SP_PORT_BASE_SPEED_25G; 3147 return 0; 3148 } 3149 3150 return -EIO; 3151 } 3152 3153 static void 3154 mlxsw_sp2_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, 3155 u8 local_port, u32 proto_admin, 3156 bool autoneg) 3157 { 3158 mlxsw_reg_ptys_ext_eth_pack(payload, local_port, proto_admin, autoneg); 3159 } 3160 3161 static void 3162 mlxsw_sp2_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, 3163 u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, 3164 u32 *p_eth_proto_oper) 3165 { 3166 mlxsw_reg_ptys_ext_eth_unpack(payload, p_eth_proto_cap, 3167 p_eth_proto_admin, p_eth_proto_oper); 3168 } 3169 3170 static const struct mlxsw_sp_port_type_speed_ops 3171 mlxsw_sp2_port_type_speed_ops = { 3172 .from_ptys_supported_port = mlxsw_sp2_from_ptys_supported_port, 3173 .from_ptys_link = mlxsw_sp2_from_ptys_link, 3174 .from_ptys_speed = mlxsw_sp2_from_ptys_speed, 3175 .from_ptys_speed_duplex = mlxsw_sp2_from_ptys_speed_duplex, 3176 .to_ptys_advert_link = mlxsw_sp2_to_ptys_advert_link, 3177 .to_ptys_speed = mlxsw_sp2_to_ptys_speed, 3178 .to_ptys_upper_speed = mlxsw_sp2_to_ptys_upper_speed, 3179 .port_speed_base = mlxsw_sp2_port_speed_base, 3180 .reg_ptys_eth_pack = mlxsw_sp2_reg_ptys_eth_pack, 3181 .reg_ptys_eth_unpack = mlxsw_sp2_reg_ptys_eth_unpack, 3182 }; 3183 3184 static void 3185 mlxsw_sp_port_get_link_supported(struct mlxsw_sp *mlxsw_sp, u32 eth_proto_cap, 3186 struct ethtool_link_ksettings *cmd) 3187 { 3188 const struct mlxsw_sp_port_type_speed_ops *ops; 3189 3190 ops = mlxsw_sp->port_type_speed_ops; 3191 3192 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); 3193 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); 3194 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); 3195 3196 ops->from_ptys_supported_port(mlxsw_sp, eth_proto_cap, cmd); 3197 ops->from_ptys_link(mlxsw_sp, eth_proto_cap, cmd->link_modes.supported); 3198 } 3199 3200 static void 3201 mlxsw_sp_port_get_link_advertise(struct mlxsw_sp *mlxsw_sp, 3202 u32 eth_proto_admin, bool autoneg, 3203 struct ethtool_link_ksettings *cmd) 3204 { 3205 const struct mlxsw_sp_port_type_speed_ops *ops; 3206 3207 ops = mlxsw_sp->port_type_speed_ops; 3208 3209 if (!autoneg) 3210 return; 3211 3212 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); 3213 ops->from_ptys_link(mlxsw_sp, eth_proto_admin, 3214 cmd->link_modes.advertising); 3215 } 3216 3217 static u8 3218 mlxsw_sp_port_connector_port(enum mlxsw_reg_ptys_connector_type connector_type) 3219 { 3220 switch (connector_type) { 3221 case MLXSW_REG_PTYS_CONNECTOR_TYPE_UNKNOWN_OR_NO_CONNECTOR: 3222 return PORT_OTHER; 3223 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_NONE: 3224 return PORT_NONE; 3225 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_TP: 3226 return PORT_TP; 3227 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_AUI: 3228 return PORT_AUI; 3229 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_BNC: 3230 return PORT_BNC; 3231 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_MII: 3232 return PORT_MII; 3233 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_FIBRE: 3234 return PORT_FIBRE; 3235 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_DA: 3236 return PORT_DA; 3237 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_OTHER: 3238 return PORT_OTHER; 3239 default: 3240 WARN_ON_ONCE(1); 3241 return PORT_OTHER; 3242 } 3243 } 3244 3245 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev, 3246 struct ethtool_link_ksettings *cmd) 3247 { 3248 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; 3249 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 3250 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3251 const struct mlxsw_sp_port_type_speed_ops *ops; 3252 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3253 u8 connector_type; 3254 bool autoneg; 3255 int err; 3256 3257 ops = mlxsw_sp->port_type_speed_ops; 3258 3259 autoneg = mlxsw_sp_port->link.autoneg; 3260 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3261 0, false); 3262 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3263 if (err) 3264 return err; 3265 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, 3266 ð_proto_admin, ð_proto_oper); 3267 3268 mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap, cmd); 3269 3270 mlxsw_sp_port_get_link_advertise(mlxsw_sp, eth_proto_admin, autoneg, 3271 cmd); 3272 3273 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 3274 connector_type = mlxsw_reg_ptys_connector_type_get(ptys_pl); 3275 cmd->base.port = mlxsw_sp_port_connector_port(connector_type); 3276 ops->from_ptys_speed_duplex(mlxsw_sp, netif_carrier_ok(dev), 3277 eth_proto_oper, cmd); 3278 3279 return 0; 3280 } 3281 3282 static int 3283 mlxsw_sp_port_set_link_ksettings(struct net_device *dev, 3284 const struct ethtool_link_ksettings *cmd) 3285 { 3286 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 3287 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3288 const struct mlxsw_sp_port_type_speed_ops *ops; 3289 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3290 u32 eth_proto_cap, eth_proto_new; 3291 bool autoneg; 3292 int err; 3293 3294 ops = mlxsw_sp->port_type_speed_ops; 3295 3296 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3297 0, false); 3298 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3299 if (err) 3300 return err; 3301 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, NULL, NULL); 3302 3303 autoneg = cmd->base.autoneg == AUTONEG_ENABLE; 3304 if (!autoneg && cmd->base.speed == SPEED_56000) { 3305 netdev_err(dev, "56G not supported with autoneg off\n"); 3306 return -EINVAL; 3307 } 3308 eth_proto_new = autoneg ? 3309 ops->to_ptys_advert_link(mlxsw_sp, cmd) : 3310 ops->to_ptys_speed(mlxsw_sp, cmd->base.speed); 3311 3312 eth_proto_new = eth_proto_new & eth_proto_cap; 3313 if (!eth_proto_new) { 3314 netdev_err(dev, "No supported speed requested\n"); 3315 return -EINVAL; 3316 } 3317 3318 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3319 eth_proto_new, autoneg); 3320 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3321 if (err) 3322 return err; 3323 3324 mlxsw_sp_port->link.autoneg = autoneg; 3325 3326 if (!netif_running(dev)) 3327 return 0; 3328 3329 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3330 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 3331 3332 return 0; 3333 } 3334 3335 static int mlxsw_sp_get_module_info(struct net_device *netdev, 3336 struct ethtool_modinfo *modinfo) 3337 { 3338 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3339 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3340 int err; 3341 3342 err = mlxsw_env_get_module_info(mlxsw_sp->core, 3343 mlxsw_sp_port->mapping.module, 3344 modinfo); 3345 3346 return err; 3347 } 3348 3349 static int mlxsw_sp_get_module_eeprom(struct net_device *netdev, 3350 struct ethtool_eeprom *ee, 3351 u8 *data) 3352 { 3353 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3354 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3355 int err; 3356 3357 err = mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core, 3358 mlxsw_sp_port->mapping.module, ee, 3359 data); 3360 3361 return err; 3362 } 3363 3364 static int 3365 mlxsw_sp_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info) 3366 { 3367 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3368 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3369 3370 return mlxsw_sp->ptp_ops->get_ts_info(mlxsw_sp, info); 3371 } 3372 3373 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 3374 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 3375 .get_link = ethtool_op_get_link, 3376 .get_pauseparam = mlxsw_sp_port_get_pauseparam, 3377 .set_pauseparam = mlxsw_sp_port_set_pauseparam, 3378 .get_strings = mlxsw_sp_port_get_strings, 3379 .set_phys_id = mlxsw_sp_port_set_phys_id, 3380 .get_ethtool_stats = mlxsw_sp_port_get_stats, 3381 .get_sset_count = mlxsw_sp_port_get_sset_count, 3382 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings, 3383 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings, 3384 .get_module_info = mlxsw_sp_get_module_info, 3385 .get_module_eeprom = mlxsw_sp_get_module_eeprom, 3386 .get_ts_info = mlxsw_sp_get_ts_info, 3387 }; 3388 3389 static int 3390 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width) 3391 { 3392 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3393 const struct mlxsw_sp_port_type_speed_ops *ops; 3394 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3395 u32 eth_proto_admin; 3396 u32 upper_speed; 3397 u32 base_speed; 3398 int err; 3399 3400 ops = mlxsw_sp->port_type_speed_ops; 3401 3402 err = ops->port_speed_base(mlxsw_sp, mlxsw_sp_port->local_port, 3403 &base_speed); 3404 if (err) 3405 return err; 3406 upper_speed = base_speed * width; 3407 3408 eth_proto_admin = ops->to_ptys_upper_speed(mlxsw_sp, upper_speed); 3409 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3410 eth_proto_admin, mlxsw_sp_port->link.autoneg); 3411 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3412 } 3413 3414 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 3415 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 3416 bool dwrr, u8 dwrr_weight) 3417 { 3418 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3419 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3420 3421 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3422 next_index); 3423 mlxsw_reg_qeec_de_set(qeec_pl, true); 3424 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 3425 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 3426 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3427 } 3428 3429 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 3430 enum mlxsw_reg_qeec_hr hr, u8 index, 3431 u8 next_index, u32 maxrate) 3432 { 3433 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3434 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3435 3436 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3437 next_index); 3438 mlxsw_reg_qeec_mase_set(qeec_pl, true); 3439 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 3440 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3441 } 3442 3443 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port, 3444 enum mlxsw_reg_qeec_hr hr, u8 index, 3445 u8 next_index, u32 minrate) 3446 { 3447 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3448 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3449 3450 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3451 next_index); 3452 mlxsw_reg_qeec_mise_set(qeec_pl, true); 3453 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate); 3454 3455 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3456 } 3457 3458 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 3459 u8 switch_prio, u8 tclass) 3460 { 3461 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3462 char qtct_pl[MLXSW_REG_QTCT_LEN]; 3463 3464 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 3465 tclass); 3466 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 3467 } 3468 3469 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 3470 { 3471 int err, i; 3472 3473 /* Setup the elements hierarcy, so that each TC is linked to 3474 * one subgroup, which are all member in the same group. 3475 */ 3476 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3477 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false, 3478 0); 3479 if (err) 3480 return err; 3481 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3482 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3483 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i, 3484 0, false, 0); 3485 if (err) 3486 return err; 3487 } 3488 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3489 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3490 MLXSW_REG_QEEC_HIERARCY_TC, i, i, 3491 false, 0); 3492 if (err) 3493 return err; 3494 3495 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3496 MLXSW_REG_QEEC_HIERARCY_TC, 3497 i + 8, i, 3498 true, 100); 3499 if (err) 3500 return err; 3501 } 3502 3503 /* Make sure the max shaper is disabled in all hierarchies that support 3504 * it. Note that this disables ptps (PTP shaper), but that is intended 3505 * for the initial configuration. 3506 */ 3507 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3508 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0, 3509 MLXSW_REG_QEEC_MAS_DIS); 3510 if (err) 3511 return err; 3512 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3513 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3514 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, 3515 i, 0, 3516 MLXSW_REG_QEEC_MAS_DIS); 3517 if (err) 3518 return err; 3519 } 3520 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3521 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3522 MLXSW_REG_QEEC_HIERARCY_TC, 3523 i, i, 3524 MLXSW_REG_QEEC_MAS_DIS); 3525 if (err) 3526 return err; 3527 3528 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3529 MLXSW_REG_QEEC_HIERARCY_TC, 3530 i + 8, i, 3531 MLXSW_REG_QEEC_MAS_DIS); 3532 if (err) 3533 return err; 3534 } 3535 3536 /* Configure the min shaper for multicast TCs. */ 3537 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3538 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port, 3539 MLXSW_REG_QEEC_HIERARCY_TC, 3540 i + 8, i, 3541 MLXSW_REG_QEEC_MIS_MIN); 3542 if (err) 3543 return err; 3544 } 3545 3546 /* Map all priorities to traffic class 0. */ 3547 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3548 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 3549 if (err) 3550 return err; 3551 } 3552 3553 return 0; 3554 } 3555 3556 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 3557 bool enable) 3558 { 3559 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3560 char qtctm_pl[MLXSW_REG_QTCTM_LEN]; 3561 3562 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable); 3563 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl); 3564 } 3565 3566 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 3567 bool split, u8 module, u8 width, u8 lane) 3568 { 3569 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 3570 struct mlxsw_sp_port *mlxsw_sp_port; 3571 struct net_device *dev; 3572 int err; 3573 3574 err = mlxsw_core_port_init(mlxsw_sp->core, local_port, 3575 module + 1, split, lane / width, 3576 mlxsw_sp->base_mac, 3577 sizeof(mlxsw_sp->base_mac)); 3578 if (err) { 3579 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 3580 local_port); 3581 return err; 3582 } 3583 3584 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 3585 if (!dev) { 3586 err = -ENOMEM; 3587 goto err_alloc_etherdev; 3588 } 3589 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 3590 mlxsw_sp_port = netdev_priv(dev); 3591 mlxsw_sp_port->dev = dev; 3592 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 3593 mlxsw_sp_port->local_port = local_port; 3594 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID; 3595 mlxsw_sp_port->split = split; 3596 mlxsw_sp_port->mapping.module = module; 3597 mlxsw_sp_port->mapping.width = width; 3598 mlxsw_sp_port->mapping.lane = lane; 3599 mlxsw_sp_port->link.autoneg = 1; 3600 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 3601 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list); 3602 3603 mlxsw_sp_port->pcpu_stats = 3604 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 3605 if (!mlxsw_sp_port->pcpu_stats) { 3606 err = -ENOMEM; 3607 goto err_alloc_stats; 3608 } 3609 3610 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample), 3611 GFP_KERNEL); 3612 if (!mlxsw_sp_port->sample) { 3613 err = -ENOMEM; 3614 goto err_alloc_sample; 3615 } 3616 3617 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 3618 &update_stats_cache); 3619 3620 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 3621 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 3622 3623 err = mlxsw_sp_port_module_map(mlxsw_sp_port, module, width, lane); 3624 if (err) { 3625 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 3626 mlxsw_sp_port->local_port); 3627 goto err_port_module_map; 3628 } 3629 3630 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 3631 if (err) { 3632 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 3633 mlxsw_sp_port->local_port); 3634 goto err_port_swid_set; 3635 } 3636 3637 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 3638 if (err) { 3639 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 3640 mlxsw_sp_port->local_port); 3641 goto err_dev_addr_init; 3642 } 3643 3644 netif_carrier_off(dev); 3645 3646 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 3647 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 3648 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK; 3649 3650 dev->min_mtu = 0; 3651 dev->max_mtu = ETH_MAX_MTU; 3652 3653 /* Each packet needs to have a Tx header (metadata) on top all other 3654 * headers. 3655 */ 3656 dev->needed_headroom = MLXSW_TXHDR_LEN; 3657 3658 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 3659 if (err) { 3660 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 3661 mlxsw_sp_port->local_port); 3662 goto err_port_system_port_mapping_set; 3663 } 3664 3665 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width); 3666 if (err) { 3667 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 3668 mlxsw_sp_port->local_port); 3669 goto err_port_speed_by_width_set; 3670 } 3671 3672 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 3673 if (err) { 3674 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 3675 mlxsw_sp_port->local_port); 3676 goto err_port_mtu_set; 3677 } 3678 3679 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3680 if (err) 3681 goto err_port_admin_status_set; 3682 3683 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 3684 if (err) { 3685 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 3686 mlxsw_sp_port->local_port); 3687 goto err_port_buffers_init; 3688 } 3689 3690 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 3691 if (err) { 3692 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 3693 mlxsw_sp_port->local_port); 3694 goto err_port_ets_init; 3695 } 3696 3697 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true); 3698 if (err) { 3699 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n", 3700 mlxsw_sp_port->local_port); 3701 goto err_port_tc_mc_mode; 3702 } 3703 3704 /* ETS and buffers must be initialized before DCB. */ 3705 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 3706 if (err) { 3707 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 3708 mlxsw_sp_port->local_port); 3709 goto err_port_dcb_init; 3710 } 3711 3712 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 3713 if (err) { 3714 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 3715 mlxsw_sp_port->local_port); 3716 goto err_port_fids_init; 3717 } 3718 3719 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); 3720 if (err) { 3721 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", 3722 mlxsw_sp_port->local_port); 3723 goto err_port_qdiscs_init; 3724 } 3725 3726 err = mlxsw_sp_port_nve_init(mlxsw_sp_port); 3727 if (err) { 3728 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n", 3729 mlxsw_sp_port->local_port); 3730 goto err_port_nve_init; 3731 } 3732 3733 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 3734 if (err) { 3735 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n", 3736 mlxsw_sp_port->local_port); 3737 goto err_port_pvid_set; 3738 } 3739 3740 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, 3741 MLXSW_SP_DEFAULT_VID); 3742 if (IS_ERR(mlxsw_sp_port_vlan)) { 3743 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 3744 mlxsw_sp_port->local_port); 3745 err = PTR_ERR(mlxsw_sp_port_vlan); 3746 goto err_port_vlan_create; 3747 } 3748 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan; 3749 3750 INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw, 3751 mlxsw_sp->ptp_ops->shaper_work); 3752 3753 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 3754 err = register_netdev(dev); 3755 if (err) { 3756 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 3757 mlxsw_sp_port->local_port); 3758 goto err_register_netdev; 3759 } 3760 3761 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 3762 mlxsw_sp_port, dev); 3763 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 3764 return 0; 3765 3766 err_register_netdev: 3767 mlxsw_sp->ports[local_port] = NULL; 3768 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 3769 err_port_vlan_create: 3770 err_port_pvid_set: 3771 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3772 err_port_nve_init: 3773 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3774 err_port_qdiscs_init: 3775 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3776 err_port_fids_init: 3777 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3778 err_port_dcb_init: 3779 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3780 err_port_tc_mc_mode: 3781 err_port_ets_init: 3782 err_port_buffers_init: 3783 err_port_admin_status_set: 3784 err_port_mtu_set: 3785 err_port_speed_by_width_set: 3786 err_port_system_port_mapping_set: 3787 err_dev_addr_init: 3788 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3789 err_port_swid_set: 3790 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3791 err_port_module_map: 3792 kfree(mlxsw_sp_port->sample); 3793 err_alloc_sample: 3794 free_percpu(mlxsw_sp_port->pcpu_stats); 3795 err_alloc_stats: 3796 free_netdev(dev); 3797 err_alloc_etherdev: 3798 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3799 return err; 3800 } 3801 3802 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3803 { 3804 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3805 3806 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 3807 cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw); 3808 mlxsw_sp_port_ptp_clear(mlxsw_sp_port); 3809 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 3810 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 3811 mlxsw_sp->ports[local_port] = NULL; 3812 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true); 3813 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3814 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3815 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3816 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3817 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3818 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3819 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3820 kfree(mlxsw_sp_port->sample); 3821 free_percpu(mlxsw_sp_port->pcpu_stats); 3822 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 3823 free_netdev(mlxsw_sp_port->dev); 3824 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3825 } 3826 3827 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3828 { 3829 return mlxsw_sp->ports[local_port] != NULL; 3830 } 3831 3832 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 3833 { 3834 int i; 3835 3836 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 3837 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3838 mlxsw_sp_port_remove(mlxsw_sp, i); 3839 kfree(mlxsw_sp->port_to_module); 3840 kfree(mlxsw_sp->ports); 3841 } 3842 3843 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 3844 { 3845 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 3846 u8 module, width, lane; 3847 size_t alloc_size; 3848 int i; 3849 int err; 3850 3851 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 3852 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 3853 if (!mlxsw_sp->ports) 3854 return -ENOMEM; 3855 3856 mlxsw_sp->port_to_module = kmalloc_array(max_ports, sizeof(int), 3857 GFP_KERNEL); 3858 if (!mlxsw_sp->port_to_module) { 3859 err = -ENOMEM; 3860 goto err_port_to_module_alloc; 3861 } 3862 3863 for (i = 1; i < max_ports; i++) { 3864 /* Mark as invalid */ 3865 mlxsw_sp->port_to_module[i] = -1; 3866 3867 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, 3868 &width, &lane); 3869 if (err) 3870 goto err_port_module_info_get; 3871 if (!width) 3872 continue; 3873 mlxsw_sp->port_to_module[i] = module; 3874 err = mlxsw_sp_port_create(mlxsw_sp, i, false, 3875 module, width, lane); 3876 if (err) 3877 goto err_port_create; 3878 } 3879 return 0; 3880 3881 err_port_create: 3882 err_port_module_info_get: 3883 for (i--; i >= 1; i--) 3884 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3885 mlxsw_sp_port_remove(mlxsw_sp, i); 3886 kfree(mlxsw_sp->port_to_module); 3887 err_port_to_module_alloc: 3888 kfree(mlxsw_sp->ports); 3889 return err; 3890 } 3891 3892 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port) 3893 { 3894 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX; 3895 3896 return local_port - offset; 3897 } 3898 3899 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 3900 u8 module, unsigned int count, u8 offset) 3901 { 3902 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; 3903 int err, i; 3904 3905 for (i = 0; i < count; i++) { 3906 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset, 3907 true, module, width, i * width); 3908 if (err) 3909 goto err_port_create; 3910 } 3911 3912 return 0; 3913 3914 err_port_create: 3915 for (i--; i >= 0; i--) 3916 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 3917 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 3918 return err; 3919 } 3920 3921 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 3922 u8 base_port, unsigned int count) 3923 { 3924 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH; 3925 int i; 3926 3927 /* Split by four means we need to re-create two ports, otherwise 3928 * only one. 3929 */ 3930 count = count / 2; 3931 3932 for (i = 0; i < count; i++) { 3933 local_port = base_port + i * 2; 3934 if (mlxsw_sp->port_to_module[local_port] < 0) 3935 continue; 3936 module = mlxsw_sp->port_to_module[local_port]; 3937 3938 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module, 3939 width, 0); 3940 } 3941 } 3942 3943 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 3944 unsigned int count, 3945 struct netlink_ext_ack *extack) 3946 { 3947 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3948 u8 local_ports_in_1x, local_ports_in_2x, offset; 3949 struct mlxsw_sp_port *mlxsw_sp_port; 3950 u8 module, cur_width, base_port; 3951 int i; 3952 int err; 3953 3954 if (!MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_1X) || 3955 !MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_2X)) 3956 return -EIO; 3957 3958 local_ports_in_1x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_1X); 3959 local_ports_in_2x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_2X); 3960 3961 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3962 if (!mlxsw_sp_port) { 3963 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 3964 local_port); 3965 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 3966 return -EINVAL; 3967 } 3968 3969 module = mlxsw_sp_port->mapping.module; 3970 cur_width = mlxsw_sp_port->mapping.width; 3971 3972 if (count != 2 && count != 4) { 3973 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); 3974 NL_SET_ERR_MSG_MOD(extack, "Port can only be split into 2 or 4 ports"); 3975 return -EINVAL; 3976 } 3977 3978 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { 3979 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); 3980 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further"); 3981 return -EINVAL; 3982 } 3983 3984 /* Make sure we have enough slave (even) ports for the split. */ 3985 if (count == 2) { 3986 offset = local_ports_in_2x; 3987 base_port = local_port; 3988 if (mlxsw_sp->ports[base_port + local_ports_in_2x]) { 3989 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 3990 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 3991 return -EINVAL; 3992 } 3993 } else { 3994 offset = local_ports_in_1x; 3995 base_port = mlxsw_sp_cluster_base_port_get(local_port); 3996 if (mlxsw_sp->ports[base_port + 1] || 3997 mlxsw_sp->ports[base_port + 3]) { 3998 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 3999 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 4000 return -EINVAL; 4001 } 4002 } 4003 4004 for (i = 0; i < count; i++) 4005 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 4006 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 4007 4008 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count, 4009 offset); 4010 if (err) { 4011 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 4012 goto err_port_split_create; 4013 } 4014 4015 return 0; 4016 4017 err_port_split_create: 4018 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 4019 return err; 4020 } 4021 4022 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, 4023 struct netlink_ext_ack *extack) 4024 { 4025 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4026 u8 local_ports_in_1x, local_ports_in_2x, offset; 4027 struct mlxsw_sp_port *mlxsw_sp_port; 4028 u8 cur_width, base_port; 4029 unsigned int count; 4030 int i; 4031 4032 if (!MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_1X) || 4033 !MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_2X)) 4034 return -EIO; 4035 4036 local_ports_in_1x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_1X); 4037 local_ports_in_2x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_2X); 4038 4039 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4040 if (!mlxsw_sp_port) { 4041 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 4042 local_port); 4043 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 4044 return -EINVAL; 4045 } 4046 4047 if (!mlxsw_sp_port->split) { 4048 netdev_err(mlxsw_sp_port->dev, "Port was not split\n"); 4049 NL_SET_ERR_MSG_MOD(extack, "Port was not split"); 4050 return -EINVAL; 4051 } 4052 4053 cur_width = mlxsw_sp_port->mapping.width; 4054 count = cur_width == 1 ? 4 : 2; 4055 4056 if (count == 2) 4057 offset = local_ports_in_2x; 4058 else 4059 offset = local_ports_in_1x; 4060 4061 base_port = mlxsw_sp_cluster_base_port_get(local_port); 4062 4063 /* Determine which ports to remove. */ 4064 if (count == 2 && local_port >= base_port + 2) 4065 base_port = base_port + 2; 4066 4067 for (i = 0; i < count; i++) 4068 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 4069 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 4070 4071 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 4072 4073 return 0; 4074 } 4075 4076 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 4077 char *pude_pl, void *priv) 4078 { 4079 struct mlxsw_sp *mlxsw_sp = priv; 4080 struct mlxsw_sp_port *mlxsw_sp_port; 4081 enum mlxsw_reg_pude_oper_status status; 4082 u8 local_port; 4083 4084 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 4085 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4086 if (!mlxsw_sp_port) 4087 return; 4088 4089 status = mlxsw_reg_pude_oper_status_get(pude_pl); 4090 if (status == MLXSW_PORT_OPER_STATUS_UP) { 4091 netdev_info(mlxsw_sp_port->dev, "link up\n"); 4092 netif_carrier_on(mlxsw_sp_port->dev); 4093 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0); 4094 } else { 4095 netdev_info(mlxsw_sp_port->dev, "link down\n"); 4096 netif_carrier_off(mlxsw_sp_port->dev); 4097 } 4098 } 4099 4100 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp, 4101 char *mtpptr_pl, bool ingress) 4102 { 4103 u8 local_port; 4104 u8 num_rec; 4105 int i; 4106 4107 local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl); 4108 num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl); 4109 for (i = 0; i < num_rec; i++) { 4110 u8 domain_number; 4111 u8 message_type; 4112 u16 sequence_id; 4113 u64 timestamp; 4114 4115 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type, 4116 &domain_number, &sequence_id, 4117 ×tamp); 4118 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port, 4119 message_type, domain_number, 4120 sequence_id, timestamp); 4121 } 4122 } 4123 4124 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg, 4125 char *mtpptr_pl, void *priv) 4126 { 4127 struct mlxsw_sp *mlxsw_sp = priv; 4128 4129 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true); 4130 } 4131 4132 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg, 4133 char *mtpptr_pl, void *priv) 4134 { 4135 struct mlxsw_sp *mlxsw_sp = priv; 4136 4137 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false); 4138 } 4139 4140 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 4141 u8 local_port, void *priv) 4142 { 4143 struct mlxsw_sp *mlxsw_sp = priv; 4144 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4145 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 4146 4147 if (unlikely(!mlxsw_sp_port)) { 4148 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 4149 local_port); 4150 return; 4151 } 4152 4153 skb->dev = mlxsw_sp_port->dev; 4154 4155 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 4156 u64_stats_update_begin(&pcpu_stats->syncp); 4157 pcpu_stats->rx_packets++; 4158 pcpu_stats->rx_bytes += skb->len; 4159 u64_stats_update_end(&pcpu_stats->syncp); 4160 4161 skb->protocol = eth_type_trans(skb, skb->dev); 4162 netif_receive_skb(skb); 4163 } 4164 4165 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, 4166 void *priv) 4167 { 4168 skb->offload_fwd_mark = 1; 4169 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 4170 } 4171 4172 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, 4173 u8 local_port, void *priv) 4174 { 4175 skb->offload_l3_fwd_mark = 1; 4176 skb->offload_fwd_mark = 1; 4177 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 4178 } 4179 4180 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port, 4181 void *priv) 4182 { 4183 struct mlxsw_sp *mlxsw_sp = priv; 4184 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4185 struct psample_group *psample_group; 4186 u32 size; 4187 4188 if (unlikely(!mlxsw_sp_port)) { 4189 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n", 4190 local_port); 4191 goto out; 4192 } 4193 if (unlikely(!mlxsw_sp_port->sample)) { 4194 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n", 4195 local_port); 4196 goto out; 4197 } 4198 4199 size = mlxsw_sp_port->sample->truncate ? 4200 mlxsw_sp_port->sample->trunc_size : skb->len; 4201 4202 rcu_read_lock(); 4203 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group); 4204 if (!psample_group) 4205 goto out_unlock; 4206 psample_sample_packet(psample_group, skb, size, 4207 mlxsw_sp_port->dev->ifindex, 0, 4208 mlxsw_sp_port->sample->rate); 4209 out_unlock: 4210 rcu_read_unlock(); 4211 out: 4212 consume_skb(skb); 4213 } 4214 4215 static void mlxsw_sp_rx_listener_ptp(struct sk_buff *skb, u8 local_port, 4216 void *priv) 4217 { 4218 struct mlxsw_sp *mlxsw_sp = priv; 4219 4220 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port); 4221 } 4222 4223 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4224 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 4225 _is_ctrl, SP_##_trap_group, DISCARD) 4226 4227 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4228 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 4229 _is_ctrl, SP_##_trap_group, DISCARD) 4230 4231 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4232 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \ 4233 _is_ctrl, SP_##_trap_group, DISCARD) 4234 4235 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 4236 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 4237 4238 static const struct mlxsw_listener mlxsw_sp_listener[] = { 4239 /* Events */ 4240 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 4241 /* L2 traps */ 4242 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true), 4243 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true), 4244 MLXSW_RXL(mlxsw_sp_rx_listener_ptp, LLDP, TRAP_TO_CPU, 4245 false, SP_LLDP, DISCARD), 4246 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false), 4247 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false), 4248 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false), 4249 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false), 4250 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false), 4251 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false), 4252 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false), 4253 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false), 4254 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false), 4255 MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD, 4256 false), 4257 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 4258 false), 4259 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD, 4260 false), 4261 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 4262 false), 4263 /* L3 traps */ 4264 MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false), 4265 MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false), 4266 MLXSW_SP_RXL_L3_MARK(LBERROR, MIRROR_TO_CPU, LBERROR, false), 4267 MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), 4268 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 4269 false), 4270 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false), 4271 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 4272 MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false), 4273 MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP, 4274 false), 4275 MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false), 4276 MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false), 4277 MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false), 4278 MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), 4279 MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false), 4280 MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false), 4281 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 4282 false), 4283 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 4284 false), 4285 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 4286 false), 4287 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 4288 false), 4289 MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false), 4290 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 4291 false), 4292 MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, HOST_MISS, false), 4293 MLXSW_SP_RXL_MARK(HOST_MISS_IPV6, TRAP_TO_CPU, HOST_MISS, false), 4294 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), 4295 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), 4296 MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false), 4297 MLXSW_SP_RXL_MARK(DECAP_ECN0, TRAP_TO_CPU, ROUTER_EXP, false), 4298 MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), 4299 MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), 4300 /* PKT Sample trap */ 4301 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, 4302 false, SP_IP2ME, DISCARD), 4303 /* ACL trap */ 4304 MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false), 4305 /* Multicast Router Traps */ 4306 MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false), 4307 MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false), 4308 MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false), 4309 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 4310 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 4311 /* NVE traps */ 4312 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false), 4313 MLXSW_SP_RXL_NO_MARK(NVE_DECAP_ARP, TRAP_TO_CPU, ARP, false), 4314 /* PTP traps */ 4315 MLXSW_RXL(mlxsw_sp_rx_listener_ptp, PTP0, TRAP_TO_CPU, 4316 false, SP_PTP0, DISCARD), 4317 MLXSW_SP_RXL_NO_MARK(PTP1, TRAP_TO_CPU, PTP1, false), 4318 }; 4319 4320 static const struct mlxsw_listener mlxsw_sp1_listener[] = { 4321 /* Events */ 4322 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0), 4323 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0), 4324 }; 4325 4326 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 4327 { 4328 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 4329 enum mlxsw_reg_qpcr_ir_units ir_units; 4330 int max_cpu_policers; 4331 bool is_bytes; 4332 u8 burst_size; 4333 u32 rate; 4334 int i, err; 4335 4336 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 4337 return -EIO; 4338 4339 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 4340 4341 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 4342 for (i = 0; i < max_cpu_policers; i++) { 4343 is_bytes = false; 4344 switch (i) { 4345 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 4346 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 4347 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 4348 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 4349 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 4350 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 4351 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: 4352 rate = 128; 4353 burst_size = 7; 4354 break; 4355 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 4356 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 4357 rate = 16 * 1024; 4358 burst_size = 10; 4359 break; 4360 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 4361 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 4362 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 4363 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 4364 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 4365 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 4366 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 4367 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 4368 rate = 1024; 4369 burst_size = 7; 4370 break; 4371 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 4372 rate = 1024; 4373 burst_size = 7; 4374 break; 4375 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0: 4376 rate = 24 * 1024; 4377 burst_size = 12; 4378 break; 4379 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1: 4380 rate = 19 * 1024; 4381 burst_size = 12; 4382 break; 4383 default: 4384 continue; 4385 } 4386 4387 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 4388 burst_size); 4389 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 4390 if (err) 4391 return err; 4392 } 4393 4394 return 0; 4395 } 4396 4397 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 4398 { 4399 char htgt_pl[MLXSW_REG_HTGT_LEN]; 4400 enum mlxsw_reg_htgt_trap_group i; 4401 int max_cpu_policers; 4402 int max_trap_groups; 4403 u8 priority, tc; 4404 u16 policer_id; 4405 int err; 4406 4407 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 4408 return -EIO; 4409 4410 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 4411 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 4412 4413 for (i = 0; i < max_trap_groups; i++) { 4414 policer_id = i; 4415 switch (i) { 4416 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 4417 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 4418 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 4419 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 4420 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 4421 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0: 4422 priority = 5; 4423 tc = 5; 4424 break; 4425 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 4426 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 4427 priority = 4; 4428 tc = 4; 4429 break; 4430 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 4431 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 4432 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 4433 priority = 3; 4434 tc = 3; 4435 break; 4436 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 4437 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 4438 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 4439 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1: 4440 priority = 2; 4441 tc = 2; 4442 break; 4443 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 4444 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 4445 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 4446 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 4447 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: 4448 priority = 1; 4449 tc = 1; 4450 break; 4451 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 4452 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 4453 tc = MLXSW_REG_HTGT_DEFAULT_TC; 4454 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 4455 break; 4456 default: 4457 continue; 4458 } 4459 4460 if (max_cpu_policers <= policer_id && 4461 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 4462 return -EIO; 4463 4464 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 4465 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 4466 if (err) 4467 return err; 4468 } 4469 4470 return 0; 4471 } 4472 4473 static int mlxsw_sp_traps_register(struct mlxsw_sp *mlxsw_sp, 4474 const struct mlxsw_listener listeners[], 4475 size_t listeners_count) 4476 { 4477 int i; 4478 int err; 4479 4480 for (i = 0; i < listeners_count; i++) { 4481 err = mlxsw_core_trap_register(mlxsw_sp->core, 4482 &listeners[i], 4483 mlxsw_sp); 4484 if (err) 4485 goto err_listener_register; 4486 4487 } 4488 return 0; 4489 4490 err_listener_register: 4491 for (i--; i >= 0; i--) { 4492 mlxsw_core_trap_unregister(mlxsw_sp->core, 4493 &listeners[i], 4494 mlxsw_sp); 4495 } 4496 return err; 4497 } 4498 4499 static void mlxsw_sp_traps_unregister(struct mlxsw_sp *mlxsw_sp, 4500 const struct mlxsw_listener listeners[], 4501 size_t listeners_count) 4502 { 4503 int i; 4504 4505 for (i = 0; i < listeners_count; i++) { 4506 mlxsw_core_trap_unregister(mlxsw_sp->core, 4507 &listeners[i], 4508 mlxsw_sp); 4509 } 4510 } 4511 4512 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 4513 { 4514 int err; 4515 4516 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 4517 if (err) 4518 return err; 4519 4520 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 4521 if (err) 4522 return err; 4523 4524 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp_listener, 4525 ARRAY_SIZE(mlxsw_sp_listener)); 4526 if (err) 4527 return err; 4528 4529 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp->listeners, 4530 mlxsw_sp->listeners_count); 4531 if (err) 4532 goto err_extra_traps_init; 4533 4534 return 0; 4535 4536 err_extra_traps_init: 4537 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 4538 ARRAY_SIZE(mlxsw_sp_listener)); 4539 return err; 4540 } 4541 4542 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 4543 { 4544 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp->listeners, 4545 mlxsw_sp->listeners_count); 4546 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 4547 ARRAY_SIZE(mlxsw_sp_listener)); 4548 } 4549 4550 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe 4551 4552 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 4553 { 4554 char slcr_pl[MLXSW_REG_SLCR_LEN]; 4555 u32 seed; 4556 int err; 4557 4558 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 4559 MLXSW_SP_LAG_SEED_INIT); 4560 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 4561 MLXSW_REG_SLCR_LAG_HASH_DMAC | 4562 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 4563 MLXSW_REG_SLCR_LAG_HASH_VLANID | 4564 MLXSW_REG_SLCR_LAG_HASH_SIP | 4565 MLXSW_REG_SLCR_LAG_HASH_DIP | 4566 MLXSW_REG_SLCR_LAG_HASH_SPORT | 4567 MLXSW_REG_SLCR_LAG_HASH_DPORT | 4568 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed); 4569 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 4570 if (err) 4571 return err; 4572 4573 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 4574 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 4575 return -EIO; 4576 4577 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 4578 sizeof(struct mlxsw_sp_upper), 4579 GFP_KERNEL); 4580 if (!mlxsw_sp->lags) 4581 return -ENOMEM; 4582 4583 return 0; 4584 } 4585 4586 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 4587 { 4588 kfree(mlxsw_sp->lags); 4589 } 4590 4591 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) 4592 { 4593 char htgt_pl[MLXSW_REG_HTGT_LEN]; 4594 4595 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 4596 MLXSW_REG_HTGT_INVALID_POLICER, 4597 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 4598 MLXSW_REG_HTGT_DEFAULT_TC); 4599 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 4600 } 4601 4602 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = { 4603 .clock_init = mlxsw_sp1_ptp_clock_init, 4604 .clock_fini = mlxsw_sp1_ptp_clock_fini, 4605 .init = mlxsw_sp1_ptp_init, 4606 .fini = mlxsw_sp1_ptp_fini, 4607 .receive = mlxsw_sp1_ptp_receive, 4608 .transmitted = mlxsw_sp1_ptp_transmitted, 4609 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get, 4610 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set, 4611 .shaper_work = mlxsw_sp1_ptp_shaper_work, 4612 .get_ts_info = mlxsw_sp1_ptp_get_ts_info, 4613 }; 4614 4615 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { 4616 .clock_init = mlxsw_sp2_ptp_clock_init, 4617 .clock_fini = mlxsw_sp2_ptp_clock_fini, 4618 .init = mlxsw_sp2_ptp_init, 4619 .fini = mlxsw_sp2_ptp_fini, 4620 .receive = mlxsw_sp2_ptp_receive, 4621 .transmitted = mlxsw_sp2_ptp_transmitted, 4622 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get, 4623 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set, 4624 .shaper_work = mlxsw_sp2_ptp_shaper_work, 4625 .get_ts_info = mlxsw_sp2_ptp_get_ts_info, 4626 }; 4627 4628 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 4629 unsigned long event, void *ptr); 4630 4631 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 4632 const struct mlxsw_bus_info *mlxsw_bus_info) 4633 { 4634 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4635 int err; 4636 4637 mlxsw_sp->core = mlxsw_core; 4638 mlxsw_sp->bus_info = mlxsw_bus_info; 4639 4640 err = mlxsw_sp_fw_rev_validate(mlxsw_sp); 4641 if (err) 4642 return err; 4643 4644 err = mlxsw_sp_base_mac_get(mlxsw_sp); 4645 if (err) { 4646 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 4647 return err; 4648 } 4649 4650 err = mlxsw_sp_kvdl_init(mlxsw_sp); 4651 if (err) { 4652 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 4653 return err; 4654 } 4655 4656 err = mlxsw_sp_fids_init(mlxsw_sp); 4657 if (err) { 4658 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 4659 goto err_fids_init; 4660 } 4661 4662 err = mlxsw_sp_traps_init(mlxsw_sp); 4663 if (err) { 4664 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 4665 goto err_traps_init; 4666 } 4667 4668 err = mlxsw_sp_buffers_init(mlxsw_sp); 4669 if (err) { 4670 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 4671 goto err_buffers_init; 4672 } 4673 4674 err = mlxsw_sp_lag_init(mlxsw_sp); 4675 if (err) { 4676 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 4677 goto err_lag_init; 4678 } 4679 4680 /* Initialize SPAN before router and switchdev, so that those components 4681 * can call mlxsw_sp_span_respin(). 4682 */ 4683 err = mlxsw_sp_span_init(mlxsw_sp); 4684 if (err) { 4685 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 4686 goto err_span_init; 4687 } 4688 4689 err = mlxsw_sp_switchdev_init(mlxsw_sp); 4690 if (err) { 4691 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 4692 goto err_switchdev_init; 4693 } 4694 4695 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 4696 if (err) { 4697 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 4698 goto err_counter_pool_init; 4699 } 4700 4701 err = mlxsw_sp_afa_init(mlxsw_sp); 4702 if (err) { 4703 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 4704 goto err_afa_init; 4705 } 4706 4707 err = mlxsw_sp_nve_init(mlxsw_sp); 4708 if (err) { 4709 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n"); 4710 goto err_nve_init; 4711 } 4712 4713 err = mlxsw_sp_acl_init(mlxsw_sp); 4714 if (err) { 4715 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 4716 goto err_acl_init; 4717 } 4718 4719 err = mlxsw_sp_router_init(mlxsw_sp); 4720 if (err) { 4721 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 4722 goto err_router_init; 4723 } 4724 4725 if (mlxsw_sp->bus_info->read_frc_capable) { 4726 /* NULL is a valid return value from clock_init */ 4727 mlxsw_sp->clock = 4728 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp, 4729 mlxsw_sp->bus_info->dev); 4730 if (IS_ERR(mlxsw_sp->clock)) { 4731 err = PTR_ERR(mlxsw_sp->clock); 4732 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n"); 4733 goto err_ptp_clock_init; 4734 } 4735 } 4736 4737 if (mlxsw_sp->clock) { 4738 /* NULL is a valid return value from ptp_ops->init */ 4739 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp); 4740 if (IS_ERR(mlxsw_sp->ptp_state)) { 4741 err = PTR_ERR(mlxsw_sp->ptp_state); 4742 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n"); 4743 goto err_ptp_init; 4744 } 4745 } 4746 4747 /* Initialize netdevice notifier after router and SPAN is initialized, 4748 * so that the event handler can use router structures and call SPAN 4749 * respin. 4750 */ 4751 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 4752 err = register_netdevice_notifier(&mlxsw_sp->netdevice_nb); 4753 if (err) { 4754 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 4755 goto err_netdev_notifier; 4756 } 4757 4758 err = mlxsw_sp_dpipe_init(mlxsw_sp); 4759 if (err) { 4760 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 4761 goto err_dpipe_init; 4762 } 4763 4764 err = mlxsw_sp_ports_create(mlxsw_sp); 4765 if (err) { 4766 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 4767 goto err_ports_create; 4768 } 4769 4770 return 0; 4771 4772 err_ports_create: 4773 mlxsw_sp_dpipe_fini(mlxsw_sp); 4774 err_dpipe_init: 4775 unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); 4776 err_netdev_notifier: 4777 if (mlxsw_sp->clock) 4778 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 4779 err_ptp_init: 4780 if (mlxsw_sp->clock) 4781 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 4782 err_ptp_clock_init: 4783 mlxsw_sp_router_fini(mlxsw_sp); 4784 err_router_init: 4785 mlxsw_sp_acl_fini(mlxsw_sp); 4786 err_acl_init: 4787 mlxsw_sp_nve_fini(mlxsw_sp); 4788 err_nve_init: 4789 mlxsw_sp_afa_fini(mlxsw_sp); 4790 err_afa_init: 4791 mlxsw_sp_counter_pool_fini(mlxsw_sp); 4792 err_counter_pool_init: 4793 mlxsw_sp_switchdev_fini(mlxsw_sp); 4794 err_switchdev_init: 4795 mlxsw_sp_span_fini(mlxsw_sp); 4796 err_span_init: 4797 mlxsw_sp_lag_fini(mlxsw_sp); 4798 err_lag_init: 4799 mlxsw_sp_buffers_fini(mlxsw_sp); 4800 err_buffers_init: 4801 mlxsw_sp_traps_fini(mlxsw_sp); 4802 err_traps_init: 4803 mlxsw_sp_fids_fini(mlxsw_sp); 4804 err_fids_init: 4805 mlxsw_sp_kvdl_fini(mlxsw_sp); 4806 return err; 4807 } 4808 4809 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, 4810 const struct mlxsw_bus_info *mlxsw_bus_info) 4811 { 4812 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4813 4814 mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev; 4815 mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME; 4816 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; 4817 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; 4818 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; 4819 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; 4820 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; 4821 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; 4822 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; 4823 mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr; 4824 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals; 4825 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops; 4826 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops; 4827 mlxsw_sp->listeners = mlxsw_sp1_listener; 4828 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener); 4829 4830 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info); 4831 } 4832 4833 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, 4834 const struct mlxsw_bus_info *mlxsw_bus_info) 4835 { 4836 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4837 4838 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 4839 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 4840 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 4841 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 4842 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 4843 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 4844 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 4845 mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; 4846 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 4847 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 4848 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 4849 4850 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info); 4851 } 4852 4853 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 4854 { 4855 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4856 4857 mlxsw_sp_ports_remove(mlxsw_sp); 4858 mlxsw_sp_dpipe_fini(mlxsw_sp); 4859 unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); 4860 if (mlxsw_sp->clock) { 4861 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 4862 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 4863 } 4864 mlxsw_sp_router_fini(mlxsw_sp); 4865 mlxsw_sp_acl_fini(mlxsw_sp); 4866 mlxsw_sp_nve_fini(mlxsw_sp); 4867 mlxsw_sp_afa_fini(mlxsw_sp); 4868 mlxsw_sp_counter_pool_fini(mlxsw_sp); 4869 mlxsw_sp_switchdev_fini(mlxsw_sp); 4870 mlxsw_sp_span_fini(mlxsw_sp); 4871 mlxsw_sp_lag_fini(mlxsw_sp); 4872 mlxsw_sp_buffers_fini(mlxsw_sp); 4873 mlxsw_sp_traps_fini(mlxsw_sp); 4874 mlxsw_sp_fids_fini(mlxsw_sp); 4875 mlxsw_sp_kvdl_fini(mlxsw_sp); 4876 } 4877 4878 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated 4879 * 802.1Q FIDs 4880 */ 4881 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \ 4882 VLAN_VID_MASK - 1) 4883 4884 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { 4885 .used_max_mid = 1, 4886 .max_mid = MLXSW_SP_MID_MAX, 4887 .used_flood_tables = 1, 4888 .used_flood_mode = 1, 4889 .flood_mode = 3, 4890 .max_fid_flood_tables = 3, 4891 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 4892 .used_max_ib_mc = 1, 4893 .max_ib_mc = 0, 4894 .used_max_pkey = 1, 4895 .max_pkey = 0, 4896 .used_kvd_sizes = 1, 4897 .kvd_hash_single_parts = 59, 4898 .kvd_hash_double_parts = 41, 4899 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 4900 .swid_config = { 4901 { 4902 .used_type = 1, 4903 .type = MLXSW_PORT_SWID_TYPE_ETH, 4904 } 4905 }, 4906 }; 4907 4908 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { 4909 .used_max_mid = 1, 4910 .max_mid = MLXSW_SP_MID_MAX, 4911 .used_flood_tables = 1, 4912 .used_flood_mode = 1, 4913 .flood_mode = 3, 4914 .max_fid_flood_tables = 3, 4915 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 4916 .used_max_ib_mc = 1, 4917 .max_ib_mc = 0, 4918 .used_max_pkey = 1, 4919 .max_pkey = 0, 4920 .swid_config = { 4921 { 4922 .used_type = 1, 4923 .type = MLXSW_PORT_SWID_TYPE_ETH, 4924 } 4925 }, 4926 }; 4927 4928 static void 4929 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, 4930 struct devlink_resource_size_params *kvd_size_params, 4931 struct devlink_resource_size_params *linear_size_params, 4932 struct devlink_resource_size_params *hash_double_size_params, 4933 struct devlink_resource_size_params *hash_single_size_params) 4934 { 4935 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 4936 KVD_SINGLE_MIN_SIZE); 4937 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 4938 KVD_DOUBLE_MIN_SIZE); 4939 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 4940 u32 linear_size_min = 0; 4941 4942 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, 4943 MLXSW_SP_KVD_GRANULARITY, 4944 DEVLINK_RESOURCE_UNIT_ENTRY); 4945 devlink_resource_size_params_init(linear_size_params, linear_size_min, 4946 kvd_size - single_size_min - 4947 double_size_min, 4948 MLXSW_SP_KVD_GRANULARITY, 4949 DEVLINK_RESOURCE_UNIT_ENTRY); 4950 devlink_resource_size_params_init(hash_double_size_params, 4951 double_size_min, 4952 kvd_size - single_size_min - 4953 linear_size_min, 4954 MLXSW_SP_KVD_GRANULARITY, 4955 DEVLINK_RESOURCE_UNIT_ENTRY); 4956 devlink_resource_size_params_init(hash_single_size_params, 4957 single_size_min, 4958 kvd_size - double_size_min - 4959 linear_size_min, 4960 MLXSW_SP_KVD_GRANULARITY, 4961 DEVLINK_RESOURCE_UNIT_ENTRY); 4962 } 4963 4964 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) 4965 { 4966 struct devlink *devlink = priv_to_devlink(mlxsw_core); 4967 struct devlink_resource_size_params hash_single_size_params; 4968 struct devlink_resource_size_params hash_double_size_params; 4969 struct devlink_resource_size_params linear_size_params; 4970 struct devlink_resource_size_params kvd_size_params; 4971 u32 kvd_size, single_size, double_size, linear_size; 4972 const struct mlxsw_config_profile *profile; 4973 int err; 4974 4975 profile = &mlxsw_sp1_config_profile; 4976 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 4977 return -EIO; 4978 4979 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, 4980 &linear_size_params, 4981 &hash_double_size_params, 4982 &hash_single_size_params); 4983 4984 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 4985 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 4986 kvd_size, MLXSW_SP_RESOURCE_KVD, 4987 DEVLINK_RESOURCE_ID_PARENT_TOP, 4988 &kvd_size_params); 4989 if (err) 4990 return err; 4991 4992 linear_size = profile->kvd_linear_size; 4993 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, 4994 linear_size, 4995 MLXSW_SP_RESOURCE_KVD_LINEAR, 4996 MLXSW_SP_RESOURCE_KVD, 4997 &linear_size_params); 4998 if (err) 4999 return err; 5000 5001 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); 5002 if (err) 5003 return err; 5004 5005 double_size = kvd_size - linear_size; 5006 double_size *= profile->kvd_hash_double_parts; 5007 double_size /= profile->kvd_hash_double_parts + 5008 profile->kvd_hash_single_parts; 5009 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY); 5010 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, 5011 double_size, 5012 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 5013 MLXSW_SP_RESOURCE_KVD, 5014 &hash_double_size_params); 5015 if (err) 5016 return err; 5017 5018 single_size = kvd_size - double_size - linear_size; 5019 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, 5020 single_size, 5021 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 5022 MLXSW_SP_RESOURCE_KVD, 5023 &hash_single_size_params); 5024 if (err) 5025 return err; 5026 5027 return 0; 5028 } 5029 5030 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core) 5031 { 5032 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5033 struct devlink_resource_size_params kvd_size_params; 5034 u32 kvd_size; 5035 5036 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 5037 return -EIO; 5038 5039 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 5040 devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size, 5041 MLXSW_SP_KVD_GRANULARITY, 5042 DEVLINK_RESOURCE_UNIT_ENTRY); 5043 5044 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 5045 kvd_size, MLXSW_SP_RESOURCE_KVD, 5046 DEVLINK_RESOURCE_ID_PARENT_TOP, 5047 &kvd_size_params); 5048 } 5049 5050 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) 5051 { 5052 return mlxsw_sp1_resources_kvd_register(mlxsw_core); 5053 } 5054 5055 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) 5056 { 5057 return mlxsw_sp2_resources_kvd_register(mlxsw_core); 5058 } 5059 5060 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 5061 const struct mlxsw_config_profile *profile, 5062 u64 *p_single_size, u64 *p_double_size, 5063 u64 *p_linear_size) 5064 { 5065 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5066 u32 double_size; 5067 int err; 5068 5069 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 5070 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) 5071 return -EIO; 5072 5073 /* The hash part is what left of the kvd without the 5074 * linear part. It is split to the single size and 5075 * double size by the parts ratio from the profile. 5076 * Both sizes must be a multiplications of the 5077 * granularity from the profile. In case the user 5078 * provided the sizes they are obtained via devlink. 5079 */ 5080 err = devlink_resource_size_get(devlink, 5081 MLXSW_SP_RESOURCE_KVD_LINEAR, 5082 p_linear_size); 5083 if (err) 5084 *p_linear_size = profile->kvd_linear_size; 5085 5086 err = devlink_resource_size_get(devlink, 5087 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 5088 p_double_size); 5089 if (err) { 5090 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 5091 *p_linear_size; 5092 double_size *= profile->kvd_hash_double_parts; 5093 double_size /= profile->kvd_hash_double_parts + 5094 profile->kvd_hash_single_parts; 5095 *p_double_size = rounddown(double_size, 5096 MLXSW_SP_KVD_GRANULARITY); 5097 } 5098 5099 err = devlink_resource_size_get(devlink, 5100 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 5101 p_single_size); 5102 if (err) 5103 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 5104 *p_double_size - *p_linear_size; 5105 5106 /* Check results are legal. */ 5107 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 5108 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || 5109 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size) 5110 return -EIO; 5111 5112 return 0; 5113 } 5114 5115 static int 5116 mlxsw_sp_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id, 5117 union devlink_param_value val, 5118 struct netlink_ext_ack *extack) 5119 { 5120 if ((val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER) && 5121 (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)) { 5122 NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'"); 5123 return -EINVAL; 5124 } 5125 5126 return 0; 5127 } 5128 5129 static const struct devlink_param mlxsw_sp_devlink_params[] = { 5130 DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY, 5131 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 5132 NULL, NULL, 5133 mlxsw_sp_devlink_param_fw_load_policy_validate), 5134 }; 5135 5136 static int mlxsw_sp_params_register(struct mlxsw_core *mlxsw_core) 5137 { 5138 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5139 union devlink_param_value value; 5140 int err; 5141 5142 err = devlink_params_register(devlink, mlxsw_sp_devlink_params, 5143 ARRAY_SIZE(mlxsw_sp_devlink_params)); 5144 if (err) 5145 return err; 5146 5147 value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER; 5148 devlink_param_driverinit_value_set(devlink, 5149 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 5150 value); 5151 return 0; 5152 } 5153 5154 static void mlxsw_sp_params_unregister(struct mlxsw_core *mlxsw_core) 5155 { 5156 devlink_params_unregister(priv_to_devlink(mlxsw_core), 5157 mlxsw_sp_devlink_params, 5158 ARRAY_SIZE(mlxsw_sp_devlink_params)); 5159 } 5160 5161 static int 5162 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id, 5163 struct devlink_param_gset_ctx *ctx) 5164 { 5165 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 5166 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5167 5168 ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp); 5169 return 0; 5170 } 5171 5172 static int 5173 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id, 5174 struct devlink_param_gset_ctx *ctx) 5175 { 5176 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 5177 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5178 5179 return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32); 5180 } 5181 5182 static const struct devlink_param mlxsw_sp2_devlink_params[] = { 5183 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 5184 "acl_region_rehash_interval", 5185 DEVLINK_PARAM_TYPE_U32, 5186 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 5187 mlxsw_sp_params_acl_region_rehash_intrvl_get, 5188 mlxsw_sp_params_acl_region_rehash_intrvl_set, 5189 NULL), 5190 }; 5191 5192 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core) 5193 { 5194 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5195 union devlink_param_value value; 5196 int err; 5197 5198 err = mlxsw_sp_params_register(mlxsw_core); 5199 if (err) 5200 return err; 5201 5202 err = devlink_params_register(devlink, mlxsw_sp2_devlink_params, 5203 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 5204 if (err) 5205 goto err_devlink_params_register; 5206 5207 value.vu32 = 0; 5208 devlink_param_driverinit_value_set(devlink, 5209 MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 5210 value); 5211 return 0; 5212 5213 err_devlink_params_register: 5214 mlxsw_sp_params_unregister(mlxsw_core); 5215 return err; 5216 } 5217 5218 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core) 5219 { 5220 devlink_params_unregister(priv_to_devlink(mlxsw_core), 5221 mlxsw_sp2_devlink_params, 5222 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 5223 mlxsw_sp_params_unregister(mlxsw_core); 5224 } 5225 5226 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core, 5227 struct sk_buff *skb, u8 local_port) 5228 { 5229 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5230 5231 skb_pull(skb, MLXSW_TXHDR_LEN); 5232 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port); 5233 } 5234 5235 static struct mlxsw_driver mlxsw_sp1_driver = { 5236 .kind = mlxsw_sp1_driver_name, 5237 .priv_size = sizeof(struct mlxsw_sp), 5238 .init = mlxsw_sp1_init, 5239 .fini = mlxsw_sp_fini, 5240 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5241 .port_split = mlxsw_sp_port_split, 5242 .port_unsplit = mlxsw_sp_port_unsplit, 5243 .sb_pool_get = mlxsw_sp_sb_pool_get, 5244 .sb_pool_set = mlxsw_sp_sb_pool_set, 5245 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5246 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5247 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5248 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5249 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5250 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5251 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5252 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5253 .flash_update = mlxsw_sp_flash_update, 5254 .txhdr_construct = mlxsw_sp_txhdr_construct, 5255 .resources_register = mlxsw_sp1_resources_register, 5256 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, 5257 .params_register = mlxsw_sp_params_register, 5258 .params_unregister = mlxsw_sp_params_unregister, 5259 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5260 .txhdr_len = MLXSW_TXHDR_LEN, 5261 .profile = &mlxsw_sp1_config_profile, 5262 .res_query_enabled = true, 5263 }; 5264 5265 static struct mlxsw_driver mlxsw_sp2_driver = { 5266 .kind = mlxsw_sp2_driver_name, 5267 .priv_size = sizeof(struct mlxsw_sp), 5268 .init = mlxsw_sp2_init, 5269 .fini = mlxsw_sp_fini, 5270 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5271 .port_split = mlxsw_sp_port_split, 5272 .port_unsplit = mlxsw_sp_port_unsplit, 5273 .sb_pool_get = mlxsw_sp_sb_pool_get, 5274 .sb_pool_set = mlxsw_sp_sb_pool_set, 5275 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5276 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5277 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5278 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5279 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5280 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5281 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5282 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5283 .flash_update = mlxsw_sp_flash_update, 5284 .txhdr_construct = mlxsw_sp_txhdr_construct, 5285 .resources_register = mlxsw_sp2_resources_register, 5286 .params_register = mlxsw_sp2_params_register, 5287 .params_unregister = mlxsw_sp2_params_unregister, 5288 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5289 .txhdr_len = MLXSW_TXHDR_LEN, 5290 .profile = &mlxsw_sp2_config_profile, 5291 .res_query_enabled = true, 5292 }; 5293 5294 static struct mlxsw_driver mlxsw_sp3_driver = { 5295 .kind = mlxsw_sp3_driver_name, 5296 .priv_size = sizeof(struct mlxsw_sp), 5297 .init = mlxsw_sp2_init, 5298 .fini = mlxsw_sp_fini, 5299 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5300 .port_split = mlxsw_sp_port_split, 5301 .port_unsplit = mlxsw_sp_port_unsplit, 5302 .sb_pool_get = mlxsw_sp_sb_pool_get, 5303 .sb_pool_set = mlxsw_sp_sb_pool_set, 5304 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5305 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5306 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5307 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5308 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5309 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5310 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5311 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5312 .flash_update = mlxsw_sp_flash_update, 5313 .txhdr_construct = mlxsw_sp_txhdr_construct, 5314 .resources_register = mlxsw_sp2_resources_register, 5315 .params_register = mlxsw_sp2_params_register, 5316 .params_unregister = mlxsw_sp2_params_unregister, 5317 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5318 .txhdr_len = MLXSW_TXHDR_LEN, 5319 .profile = &mlxsw_sp2_config_profile, 5320 .res_query_enabled = true, 5321 }; 5322 5323 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 5324 { 5325 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 5326 } 5327 5328 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data) 5329 { 5330 struct mlxsw_sp_port **p_mlxsw_sp_port = data; 5331 int ret = 0; 5332 5333 if (mlxsw_sp_port_dev_check(lower_dev)) { 5334 *p_mlxsw_sp_port = netdev_priv(lower_dev); 5335 ret = 1; 5336 } 5337 5338 return ret; 5339 } 5340 5341 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 5342 { 5343 struct mlxsw_sp_port *mlxsw_sp_port; 5344 5345 if (mlxsw_sp_port_dev_check(dev)) 5346 return netdev_priv(dev); 5347 5348 mlxsw_sp_port = NULL; 5349 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port); 5350 5351 return mlxsw_sp_port; 5352 } 5353 5354 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 5355 { 5356 struct mlxsw_sp_port *mlxsw_sp_port; 5357 5358 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 5359 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 5360 } 5361 5362 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 5363 { 5364 struct mlxsw_sp_port *mlxsw_sp_port; 5365 5366 if (mlxsw_sp_port_dev_check(dev)) 5367 return netdev_priv(dev); 5368 5369 mlxsw_sp_port = NULL; 5370 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 5371 &mlxsw_sp_port); 5372 5373 return mlxsw_sp_port; 5374 } 5375 5376 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 5377 { 5378 struct mlxsw_sp_port *mlxsw_sp_port; 5379 5380 rcu_read_lock(); 5381 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 5382 if (mlxsw_sp_port) 5383 dev_hold(mlxsw_sp_port->dev); 5384 rcu_read_unlock(); 5385 return mlxsw_sp_port; 5386 } 5387 5388 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 5389 { 5390 dev_put(mlxsw_sp_port->dev); 5391 } 5392 5393 static void 5394 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port, 5395 struct net_device *lag_dev) 5396 { 5397 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev); 5398 struct net_device *upper_dev; 5399 struct list_head *iter; 5400 5401 if (netif_is_bridge_port(lag_dev)) 5402 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev); 5403 5404 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 5405 if (!netif_is_bridge_port(upper_dev)) 5406 continue; 5407 br_dev = netdev_master_upper_dev_get(upper_dev); 5408 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev); 5409 } 5410 } 5411 5412 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 5413 { 5414 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5415 5416 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 5417 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5418 } 5419 5420 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 5421 { 5422 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5423 5424 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 5425 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5426 } 5427 5428 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 5429 u16 lag_id, u8 port_index) 5430 { 5431 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5432 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5433 5434 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 5435 lag_id, port_index); 5436 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5437 } 5438 5439 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 5440 u16 lag_id) 5441 { 5442 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5443 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5444 5445 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 5446 lag_id); 5447 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5448 } 5449 5450 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 5451 u16 lag_id) 5452 { 5453 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5454 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5455 5456 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 5457 lag_id); 5458 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5459 } 5460 5461 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 5462 u16 lag_id) 5463 { 5464 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5465 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5466 5467 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 5468 lag_id); 5469 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5470 } 5471 5472 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 5473 struct net_device *lag_dev, 5474 u16 *p_lag_id) 5475 { 5476 struct mlxsw_sp_upper *lag; 5477 int free_lag_id = -1; 5478 u64 max_lag; 5479 int i; 5480 5481 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 5482 for (i = 0; i < max_lag; i++) { 5483 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 5484 if (lag->ref_count) { 5485 if (lag->dev == lag_dev) { 5486 *p_lag_id = i; 5487 return 0; 5488 } 5489 } else if (free_lag_id < 0) { 5490 free_lag_id = i; 5491 } 5492 } 5493 if (free_lag_id < 0) 5494 return -EBUSY; 5495 *p_lag_id = free_lag_id; 5496 return 0; 5497 } 5498 5499 static bool 5500 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 5501 struct net_device *lag_dev, 5502 struct netdev_lag_upper_info *lag_upper_info, 5503 struct netlink_ext_ack *extack) 5504 { 5505 u16 lag_id; 5506 5507 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 5508 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices"); 5509 return false; 5510 } 5511 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 5512 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 5513 return false; 5514 } 5515 return true; 5516 } 5517 5518 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 5519 u16 lag_id, u8 *p_port_index) 5520 { 5521 u64 max_lag_members; 5522 int i; 5523 5524 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 5525 MAX_LAG_MEMBERS); 5526 for (i = 0; i < max_lag_members; i++) { 5527 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 5528 *p_port_index = i; 5529 return 0; 5530 } 5531 } 5532 return -EBUSY; 5533 } 5534 5535 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 5536 struct net_device *lag_dev) 5537 { 5538 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5539 struct mlxsw_sp_upper *lag; 5540 u16 lag_id; 5541 u8 port_index; 5542 int err; 5543 5544 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 5545 if (err) 5546 return err; 5547 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 5548 if (!lag->ref_count) { 5549 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 5550 if (err) 5551 return err; 5552 lag->dev = lag_dev; 5553 } 5554 5555 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 5556 if (err) 5557 return err; 5558 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 5559 if (err) 5560 goto err_col_port_add; 5561 5562 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 5563 mlxsw_sp_port->local_port); 5564 mlxsw_sp_port->lag_id = lag_id; 5565 mlxsw_sp_port->lagged = 1; 5566 lag->ref_count++; 5567 5568 /* Port is no longer usable as a router interface */ 5569 if (mlxsw_sp_port->default_vlan->fid) 5570 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); 5571 5572 return 0; 5573 5574 err_col_port_add: 5575 if (!lag->ref_count) 5576 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 5577 return err; 5578 } 5579 5580 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 5581 struct net_device *lag_dev) 5582 { 5583 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5584 u16 lag_id = mlxsw_sp_port->lag_id; 5585 struct mlxsw_sp_upper *lag; 5586 5587 if (!mlxsw_sp_port->lagged) 5588 return; 5589 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 5590 WARN_ON(lag->ref_count == 0); 5591 5592 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 5593 5594 /* Any VLANs configured on the port are no longer valid */ 5595 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false); 5596 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan); 5597 /* Make the LAG and its directly linked uppers leave bridges they 5598 * are memeber in 5599 */ 5600 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev); 5601 5602 if (lag->ref_count == 1) 5603 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 5604 5605 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 5606 mlxsw_sp_port->local_port); 5607 mlxsw_sp_port->lagged = 0; 5608 lag->ref_count--; 5609 5610 /* Make sure untagged frames are allowed to ingress */ 5611 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 5612 } 5613 5614 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 5615 u16 lag_id) 5616 { 5617 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5618 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5619 5620 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 5621 mlxsw_sp_port->local_port); 5622 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5623 } 5624 5625 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 5626 u16 lag_id) 5627 { 5628 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5629 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5630 5631 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 5632 mlxsw_sp_port->local_port); 5633 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5634 } 5635 5636 static int 5637 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port) 5638 { 5639 int err; 5640 5641 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, 5642 mlxsw_sp_port->lag_id); 5643 if (err) 5644 return err; 5645 5646 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 5647 if (err) 5648 goto err_dist_port_add; 5649 5650 return 0; 5651 5652 err_dist_port_add: 5653 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id); 5654 return err; 5655 } 5656 5657 static int 5658 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port) 5659 { 5660 int err; 5661 5662 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 5663 mlxsw_sp_port->lag_id); 5664 if (err) 5665 return err; 5666 5667 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, 5668 mlxsw_sp_port->lag_id); 5669 if (err) 5670 goto err_col_port_disable; 5671 5672 return 0; 5673 5674 err_col_port_disable: 5675 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 5676 return err; 5677 } 5678 5679 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 5680 struct netdev_lag_lower_state_info *info) 5681 { 5682 if (info->tx_enabled) 5683 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port); 5684 else 5685 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 5686 } 5687 5688 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 5689 bool enable) 5690 { 5691 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5692 enum mlxsw_reg_spms_state spms_state; 5693 char *spms_pl; 5694 u16 vid; 5695 int err; 5696 5697 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 5698 MLXSW_REG_SPMS_STATE_DISCARDING; 5699 5700 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 5701 if (!spms_pl) 5702 return -ENOMEM; 5703 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 5704 5705 for (vid = 0; vid < VLAN_N_VID; vid++) 5706 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 5707 5708 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 5709 kfree(spms_pl); 5710 return err; 5711 } 5712 5713 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 5714 { 5715 u16 vid = 1; 5716 int err; 5717 5718 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 5719 if (err) 5720 return err; 5721 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 5722 if (err) 5723 goto err_port_stp_set; 5724 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 5725 true, false); 5726 if (err) 5727 goto err_port_vlan_set; 5728 5729 for (; vid <= VLAN_N_VID - 1; vid++) { 5730 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 5731 vid, false); 5732 if (err) 5733 goto err_vid_learning_set; 5734 } 5735 5736 return 0; 5737 5738 err_vid_learning_set: 5739 for (vid--; vid >= 1; vid--) 5740 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 5741 err_port_vlan_set: 5742 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 5743 err_port_stp_set: 5744 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 5745 return err; 5746 } 5747 5748 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 5749 { 5750 u16 vid; 5751 5752 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 5753 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 5754 vid, true); 5755 5756 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 5757 false, false); 5758 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 5759 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 5760 } 5761 5762 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev) 5763 { 5764 unsigned int num_vxlans = 0; 5765 struct net_device *dev; 5766 struct list_head *iter; 5767 5768 netdev_for_each_lower_dev(br_dev, dev, iter) { 5769 if (netif_is_vxlan(dev)) 5770 num_vxlans++; 5771 } 5772 5773 return num_vxlans > 1; 5774 } 5775 5776 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev) 5777 { 5778 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0}; 5779 struct net_device *dev; 5780 struct list_head *iter; 5781 5782 netdev_for_each_lower_dev(br_dev, dev, iter) { 5783 u16 pvid; 5784 int err; 5785 5786 if (!netif_is_vxlan(dev)) 5787 continue; 5788 5789 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid); 5790 if (err || !pvid) 5791 continue; 5792 5793 if (test_and_set_bit(pvid, vlans)) 5794 return false; 5795 } 5796 5797 return true; 5798 } 5799 5800 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev, 5801 struct netlink_ext_ack *extack) 5802 { 5803 if (br_multicast_enabled(br_dev)) { 5804 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device"); 5805 return false; 5806 } 5807 5808 if (!br_vlan_enabled(br_dev) && 5809 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) { 5810 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge"); 5811 return false; 5812 } 5813 5814 if (br_vlan_enabled(br_dev) && 5815 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) { 5816 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged"); 5817 return false; 5818 } 5819 5820 return true; 5821 } 5822 5823 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 5824 struct net_device *dev, 5825 unsigned long event, void *ptr) 5826 { 5827 struct netdev_notifier_changeupper_info *info; 5828 struct mlxsw_sp_port *mlxsw_sp_port; 5829 struct netlink_ext_ack *extack; 5830 struct net_device *upper_dev; 5831 struct mlxsw_sp *mlxsw_sp; 5832 int err = 0; 5833 5834 mlxsw_sp_port = netdev_priv(dev); 5835 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5836 info = ptr; 5837 extack = netdev_notifier_info_to_extack(&info->info); 5838 5839 switch (event) { 5840 case NETDEV_PRECHANGEUPPER: 5841 upper_dev = info->upper_dev; 5842 if (!is_vlan_dev(upper_dev) && 5843 !netif_is_lag_master(upper_dev) && 5844 !netif_is_bridge_master(upper_dev) && 5845 !netif_is_ovs_master(upper_dev) && 5846 !netif_is_macvlan(upper_dev)) { 5847 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5848 return -EINVAL; 5849 } 5850 if (!info->linking) 5851 break; 5852 if (netif_is_bridge_master(upper_dev) && 5853 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 5854 mlxsw_sp_bridge_has_vxlan(upper_dev) && 5855 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 5856 return -EOPNOTSUPP; 5857 if (netdev_has_any_upper_dev(upper_dev) && 5858 (!netif_is_bridge_master(upper_dev) || 5859 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 5860 upper_dev))) { 5861 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 5862 return -EINVAL; 5863 } 5864 if (netif_is_lag_master(upper_dev) && 5865 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 5866 info->upper_info, extack)) 5867 return -EINVAL; 5868 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 5869 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); 5870 return -EINVAL; 5871 } 5872 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 5873 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 5874 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); 5875 return -EINVAL; 5876 } 5877 if (netif_is_macvlan(upper_dev) && 5878 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, lower_dev)) { 5879 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 5880 return -EOPNOTSUPP; 5881 } 5882 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 5883 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); 5884 return -EINVAL; 5885 } 5886 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 5887 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 5888 return -EINVAL; 5889 } 5890 break; 5891 case NETDEV_CHANGEUPPER: 5892 upper_dev = info->upper_dev; 5893 if (netif_is_bridge_master(upper_dev)) { 5894 if (info->linking) 5895 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 5896 lower_dev, 5897 upper_dev, 5898 extack); 5899 else 5900 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 5901 lower_dev, 5902 upper_dev); 5903 } else if (netif_is_lag_master(upper_dev)) { 5904 if (info->linking) { 5905 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 5906 upper_dev); 5907 } else { 5908 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 5909 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 5910 upper_dev); 5911 } 5912 } else if (netif_is_ovs_master(upper_dev)) { 5913 if (info->linking) 5914 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 5915 else 5916 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 5917 } else if (netif_is_macvlan(upper_dev)) { 5918 if (!info->linking) 5919 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 5920 } else if (is_vlan_dev(upper_dev)) { 5921 struct net_device *br_dev; 5922 5923 if (!netif_is_bridge_port(upper_dev)) 5924 break; 5925 if (info->linking) 5926 break; 5927 br_dev = netdev_master_upper_dev_get(upper_dev); 5928 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, 5929 br_dev); 5930 } 5931 break; 5932 } 5933 5934 return err; 5935 } 5936 5937 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 5938 unsigned long event, void *ptr) 5939 { 5940 struct netdev_notifier_changelowerstate_info *info; 5941 struct mlxsw_sp_port *mlxsw_sp_port; 5942 int err; 5943 5944 mlxsw_sp_port = netdev_priv(dev); 5945 info = ptr; 5946 5947 switch (event) { 5948 case NETDEV_CHANGELOWERSTATE: 5949 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 5950 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 5951 info->lower_state_info); 5952 if (err) 5953 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 5954 } 5955 break; 5956 } 5957 5958 return 0; 5959 } 5960 5961 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 5962 struct net_device *port_dev, 5963 unsigned long event, void *ptr) 5964 { 5965 switch (event) { 5966 case NETDEV_PRECHANGEUPPER: 5967 case NETDEV_CHANGEUPPER: 5968 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 5969 event, ptr); 5970 case NETDEV_CHANGELOWERSTATE: 5971 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 5972 ptr); 5973 } 5974 5975 return 0; 5976 } 5977 5978 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 5979 unsigned long event, void *ptr) 5980 { 5981 struct net_device *dev; 5982 struct list_head *iter; 5983 int ret; 5984 5985 netdev_for_each_lower_dev(lag_dev, dev, iter) { 5986 if (mlxsw_sp_port_dev_check(dev)) { 5987 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 5988 ptr); 5989 if (ret) 5990 return ret; 5991 } 5992 } 5993 5994 return 0; 5995 } 5996 5997 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 5998 struct net_device *dev, 5999 unsigned long event, void *ptr, 6000 u16 vid) 6001 { 6002 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 6003 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 6004 struct netdev_notifier_changeupper_info *info = ptr; 6005 struct netlink_ext_ack *extack; 6006 struct net_device *upper_dev; 6007 int err = 0; 6008 6009 extack = netdev_notifier_info_to_extack(&info->info); 6010 6011 switch (event) { 6012 case NETDEV_PRECHANGEUPPER: 6013 upper_dev = info->upper_dev; 6014 if (!netif_is_bridge_master(upper_dev) && 6015 !netif_is_macvlan(upper_dev)) { 6016 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6017 return -EINVAL; 6018 } 6019 if (!info->linking) 6020 break; 6021 if (netif_is_bridge_master(upper_dev) && 6022 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 6023 mlxsw_sp_bridge_has_vxlan(upper_dev) && 6024 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 6025 return -EOPNOTSUPP; 6026 if (netdev_has_any_upper_dev(upper_dev) && 6027 (!netif_is_bridge_master(upper_dev) || 6028 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 6029 upper_dev))) { 6030 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 6031 return -EINVAL; 6032 } 6033 if (netif_is_macvlan(upper_dev) && 6034 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) { 6035 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6036 return -EOPNOTSUPP; 6037 } 6038 break; 6039 case NETDEV_CHANGEUPPER: 6040 upper_dev = info->upper_dev; 6041 if (netif_is_bridge_master(upper_dev)) { 6042 if (info->linking) 6043 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 6044 vlan_dev, 6045 upper_dev, 6046 extack); 6047 else 6048 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 6049 vlan_dev, 6050 upper_dev); 6051 } else if (netif_is_macvlan(upper_dev)) { 6052 if (!info->linking) 6053 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6054 } else { 6055 err = -EINVAL; 6056 WARN_ON(1); 6057 } 6058 break; 6059 } 6060 6061 return err; 6062 } 6063 6064 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 6065 struct net_device *lag_dev, 6066 unsigned long event, 6067 void *ptr, u16 vid) 6068 { 6069 struct net_device *dev; 6070 struct list_head *iter; 6071 int ret; 6072 6073 netdev_for_each_lower_dev(lag_dev, dev, iter) { 6074 if (mlxsw_sp_port_dev_check(dev)) { 6075 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 6076 event, ptr, 6077 vid); 6078 if (ret) 6079 return ret; 6080 } 6081 } 6082 6083 return 0; 6084 } 6085 6086 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev, 6087 struct net_device *br_dev, 6088 unsigned long event, void *ptr, 6089 u16 vid) 6090 { 6091 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); 6092 struct netdev_notifier_changeupper_info *info = ptr; 6093 struct netlink_ext_ack *extack; 6094 struct net_device *upper_dev; 6095 6096 if (!mlxsw_sp) 6097 return 0; 6098 6099 extack = netdev_notifier_info_to_extack(&info->info); 6100 6101 switch (event) { 6102 case NETDEV_PRECHANGEUPPER: 6103 upper_dev = info->upper_dev; 6104 if (!netif_is_macvlan(upper_dev)) { 6105 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6106 return -EOPNOTSUPP; 6107 } 6108 if (!info->linking) 6109 break; 6110 if (netif_is_macvlan(upper_dev) && 6111 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) { 6112 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6113 return -EOPNOTSUPP; 6114 } 6115 break; 6116 case NETDEV_CHANGEUPPER: 6117 upper_dev = info->upper_dev; 6118 if (info->linking) 6119 break; 6120 if (netif_is_macvlan(upper_dev)) 6121 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6122 break; 6123 } 6124 6125 return 0; 6126 } 6127 6128 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 6129 unsigned long event, void *ptr) 6130 { 6131 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 6132 u16 vid = vlan_dev_vlan_id(vlan_dev); 6133 6134 if (mlxsw_sp_port_dev_check(real_dev)) 6135 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 6136 event, ptr, vid); 6137 else if (netif_is_lag_master(real_dev)) 6138 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 6139 real_dev, event, 6140 ptr, vid); 6141 else if (netif_is_bridge_master(real_dev)) 6142 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev, 6143 event, ptr, vid); 6144 6145 return 0; 6146 } 6147 6148 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, 6149 unsigned long event, void *ptr) 6150 { 6151 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev); 6152 struct netdev_notifier_changeupper_info *info = ptr; 6153 struct netlink_ext_ack *extack; 6154 struct net_device *upper_dev; 6155 6156 if (!mlxsw_sp) 6157 return 0; 6158 6159 extack = netdev_notifier_info_to_extack(&info->info); 6160 6161 switch (event) { 6162 case NETDEV_PRECHANGEUPPER: 6163 upper_dev = info->upper_dev; 6164 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) { 6165 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6166 return -EOPNOTSUPP; 6167 } 6168 if (!info->linking) 6169 break; 6170 if (netif_is_macvlan(upper_dev) && 6171 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev)) { 6172 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6173 return -EOPNOTSUPP; 6174 } 6175 break; 6176 case NETDEV_CHANGEUPPER: 6177 upper_dev = info->upper_dev; 6178 if (info->linking) 6179 break; 6180 if (is_vlan_dev(upper_dev)) 6181 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev); 6182 if (netif_is_macvlan(upper_dev)) 6183 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6184 break; 6185 } 6186 6187 return 0; 6188 } 6189 6190 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, 6191 unsigned long event, void *ptr) 6192 { 6193 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); 6194 struct netdev_notifier_changeupper_info *info = ptr; 6195 struct netlink_ext_ack *extack; 6196 6197 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) 6198 return 0; 6199 6200 extack = netdev_notifier_info_to_extack(&info->info); 6201 6202 /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */ 6203 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6204 6205 return -EOPNOTSUPP; 6206 } 6207 6208 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) 6209 { 6210 struct netdev_notifier_changeupper_info *info = ptr; 6211 6212 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) 6213 return false; 6214 return netif_is_l3_master(info->upper_dev); 6215 } 6216 6217 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, 6218 struct net_device *dev, 6219 unsigned long event, void *ptr) 6220 { 6221 struct netdev_notifier_changeupper_info *cu_info; 6222 struct netdev_notifier_info *info = ptr; 6223 struct netlink_ext_ack *extack; 6224 struct net_device *upper_dev; 6225 6226 extack = netdev_notifier_info_to_extack(info); 6227 6228 switch (event) { 6229 case NETDEV_CHANGEUPPER: 6230 cu_info = container_of(info, 6231 struct netdev_notifier_changeupper_info, 6232 info); 6233 upper_dev = cu_info->upper_dev; 6234 if (!netif_is_bridge_master(upper_dev)) 6235 return 0; 6236 if (!mlxsw_sp_lower_get(upper_dev)) 6237 return 0; 6238 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 6239 return -EOPNOTSUPP; 6240 if (cu_info->linking) { 6241 if (!netif_running(dev)) 6242 return 0; 6243 /* When the bridge is VLAN-aware, the VNI of the VxLAN 6244 * device needs to be mapped to a VLAN, but at this 6245 * point no VLANs are configured on the VxLAN device 6246 */ 6247 if (br_vlan_enabled(upper_dev)) 6248 return 0; 6249 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, 6250 dev, 0, extack); 6251 } else { 6252 /* VLANs were already flushed, which triggered the 6253 * necessary cleanup 6254 */ 6255 if (br_vlan_enabled(upper_dev)) 6256 return 0; 6257 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 6258 } 6259 break; 6260 case NETDEV_PRE_UP: 6261 upper_dev = netdev_master_upper_dev_get(dev); 6262 if (!upper_dev) 6263 return 0; 6264 if (!netif_is_bridge_master(upper_dev)) 6265 return 0; 6266 if (!mlxsw_sp_lower_get(upper_dev)) 6267 return 0; 6268 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0, 6269 extack); 6270 case NETDEV_DOWN: 6271 upper_dev = netdev_master_upper_dev_get(dev); 6272 if (!upper_dev) 6273 return 0; 6274 if (!netif_is_bridge_master(upper_dev)) 6275 return 0; 6276 if (!mlxsw_sp_lower_get(upper_dev)) 6277 return 0; 6278 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 6279 break; 6280 } 6281 6282 return 0; 6283 } 6284 6285 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 6286 unsigned long event, void *ptr) 6287 { 6288 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 6289 struct mlxsw_sp_span_entry *span_entry; 6290 struct mlxsw_sp *mlxsw_sp; 6291 int err = 0; 6292 6293 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 6294 if (event == NETDEV_UNREGISTER) { 6295 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); 6296 if (span_entry) 6297 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); 6298 } 6299 mlxsw_sp_span_respin(mlxsw_sp); 6300 6301 if (netif_is_vxlan(dev)) 6302 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr); 6303 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) 6304 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, 6305 event, ptr); 6306 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev)) 6307 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev, 6308 event, ptr); 6309 else if (event == NETDEV_PRE_CHANGEADDR || 6310 event == NETDEV_CHANGEADDR || 6311 event == NETDEV_CHANGEMTU) 6312 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr); 6313 else if (mlxsw_sp_is_vrf_event(event, ptr)) 6314 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); 6315 else if (mlxsw_sp_port_dev_check(dev)) 6316 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 6317 else if (netif_is_lag_master(dev)) 6318 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 6319 else if (is_vlan_dev(dev)) 6320 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 6321 else if (netif_is_bridge_master(dev)) 6322 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); 6323 else if (netif_is_macvlan(dev)) 6324 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); 6325 6326 return notifier_from_errno(err); 6327 } 6328 6329 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = { 6330 .notifier_call = mlxsw_sp_inetaddr_valid_event, 6331 }; 6332 6333 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = { 6334 .notifier_call = mlxsw_sp_inet6addr_valid_event, 6335 }; 6336 6337 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { 6338 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 6339 {0, }, 6340 }; 6341 6342 static struct pci_driver mlxsw_sp1_pci_driver = { 6343 .name = mlxsw_sp1_driver_name, 6344 .id_table = mlxsw_sp1_pci_id_table, 6345 }; 6346 6347 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { 6348 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, 6349 {0, }, 6350 }; 6351 6352 static struct pci_driver mlxsw_sp2_pci_driver = { 6353 .name = mlxsw_sp2_driver_name, 6354 .id_table = mlxsw_sp2_pci_id_table, 6355 }; 6356 6357 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = { 6358 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0}, 6359 {0, }, 6360 }; 6361 6362 static struct pci_driver mlxsw_sp3_pci_driver = { 6363 .name = mlxsw_sp3_driver_name, 6364 .id_table = mlxsw_sp3_pci_id_table, 6365 }; 6366 6367 static int __init mlxsw_sp_module_init(void) 6368 { 6369 int err; 6370 6371 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6372 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6373 6374 err = mlxsw_core_driver_register(&mlxsw_sp1_driver); 6375 if (err) 6376 goto err_sp1_core_driver_register; 6377 6378 err = mlxsw_core_driver_register(&mlxsw_sp2_driver); 6379 if (err) 6380 goto err_sp2_core_driver_register; 6381 6382 err = mlxsw_core_driver_register(&mlxsw_sp3_driver); 6383 if (err) 6384 goto err_sp3_core_driver_register; 6385 6386 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); 6387 if (err) 6388 goto err_sp1_pci_driver_register; 6389 6390 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); 6391 if (err) 6392 goto err_sp2_pci_driver_register; 6393 6394 err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver); 6395 if (err) 6396 goto err_sp3_pci_driver_register; 6397 6398 return 0; 6399 6400 err_sp3_pci_driver_register: 6401 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 6402 err_sp2_pci_driver_register: 6403 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 6404 err_sp1_pci_driver_register: 6405 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 6406 err_sp3_core_driver_register: 6407 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 6408 err_sp2_core_driver_register: 6409 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 6410 err_sp1_core_driver_register: 6411 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6412 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6413 return err; 6414 } 6415 6416 static void __exit mlxsw_sp_module_exit(void) 6417 { 6418 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver); 6419 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 6420 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 6421 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 6422 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 6423 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 6424 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6425 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6426 } 6427 6428 module_init(mlxsw_sp_module_init); 6429 module_exit(mlxsw_sp_module_exit); 6430 6431 MODULE_LICENSE("Dual BSD/GPL"); 6432 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 6433 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 6434 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); 6435 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); 6436 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table); 6437 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); 6438