1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/types.h> 7 #include <linux/pci.h> 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/ethtool.h> 11 #include <linux/slab.h> 12 #include <linux/device.h> 13 #include <linux/skbuff.h> 14 #include <linux/if_vlan.h> 15 #include <linux/if_bridge.h> 16 #include <linux/workqueue.h> 17 #include <linux/jiffies.h> 18 #include <linux/bitops.h> 19 #include <linux/list.h> 20 #include <linux/notifier.h> 21 #include <linux/dcbnl.h> 22 #include <linux/inetdevice.h> 23 #include <linux/netlink.h> 24 #include <net/switchdev.h> 25 #include <net/pkt_cls.h> 26 #include <net/tc_act/tc_mirred.h> 27 #include <net/netevent.h> 28 #include <net/tc_act/tc_sample.h> 29 #include <net/addrconf.h> 30 31 #include "spectrum.h" 32 #include "pci.h" 33 #include "core.h" 34 #include "reg.h" 35 #include "port.h" 36 #include "trap.h" 37 #include "txheader.h" 38 #include "spectrum_cnt.h" 39 #include "spectrum_dpipe.h" 40 #include "spectrum_acl_flex_actions.h" 41 #include "spectrum_span.h" 42 #include "../mlxfw/mlxfw.h" 43 44 #define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100) 45 46 #define MLXSW_SP1_FWREV_MAJOR 13 47 #define MLXSW_SP1_FWREV_MINOR 1703 48 #define MLXSW_SP1_FWREV_SUBMINOR 4 49 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 50 51 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { 52 .major = MLXSW_SP1_FWREV_MAJOR, 53 .minor = MLXSW_SP1_FWREV_MINOR, 54 .subminor = MLXSW_SP1_FWREV_SUBMINOR, 55 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR, 56 }; 57 58 #define MLXSW_SP1_FW_FILENAME \ 59 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ 60 "." __stringify(MLXSW_SP1_FWREV_MINOR) \ 61 "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2" 62 63 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; 64 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; 65 static const char mlxsw_sp_driver_version[] = "1.0"; 66 67 /* tx_hdr_version 68 * Tx header version. 69 * Must be set to 1. 70 */ 71 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 72 73 /* tx_hdr_ctl 74 * Packet control type. 75 * 0 - Ethernet control (e.g. EMADs, LACP) 76 * 1 - Ethernet data 77 */ 78 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 79 80 /* tx_hdr_proto 81 * Packet protocol type. Must be set to 1 (Ethernet). 82 */ 83 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 84 85 /* tx_hdr_rx_is_router 86 * Packet is sent from the router. Valid for data packets only. 87 */ 88 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 89 90 /* tx_hdr_fid_valid 91 * Indicates if the 'fid' field is valid and should be used for 92 * forwarding lookup. Valid for data packets only. 93 */ 94 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 95 96 /* tx_hdr_swid 97 * Switch partition ID. Must be set to 0. 98 */ 99 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 100 101 /* tx_hdr_control_tclass 102 * Indicates if the packet should use the control TClass and not one 103 * of the data TClasses. 104 */ 105 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 106 107 /* tx_hdr_etclass 108 * Egress TClass to be used on the egress device on the egress port. 109 */ 110 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 111 112 /* tx_hdr_port_mid 113 * Destination local port for unicast packets. 114 * Destination multicast ID for multicast packets. 115 * 116 * Control packets are directed to a specific egress port, while data 117 * packets are transmitted through the CPU port (0) into the switch partition, 118 * where forwarding rules are applied. 119 */ 120 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 121 122 /* tx_hdr_fid 123 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 124 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 125 * Valid for data packets only. 126 */ 127 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 128 129 /* tx_hdr_type 130 * 0 - Data packets 131 * 6 - Control packets 132 */ 133 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 134 135 struct mlxsw_sp_mlxfw_dev { 136 struct mlxfw_dev mlxfw_dev; 137 struct mlxsw_sp *mlxsw_sp; 138 }; 139 140 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev, 141 u16 component_index, u32 *p_max_size, 142 u8 *p_align_bits, u16 *p_max_write_size) 143 { 144 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 145 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 146 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 147 char mcqi_pl[MLXSW_REG_MCQI_LEN]; 148 int err; 149 150 mlxsw_reg_mcqi_pack(mcqi_pl, component_index); 151 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl); 152 if (err) 153 return err; 154 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, 155 p_max_write_size); 156 157 *p_align_bits = max_t(u8, *p_align_bits, 2); 158 *p_max_write_size = min_t(u16, *p_max_write_size, 159 MLXSW_REG_MCDA_MAX_DATA_LEN); 160 return 0; 161 } 162 163 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle) 164 { 165 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 166 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 167 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 168 char mcc_pl[MLXSW_REG_MCC_LEN]; 169 u8 control_state; 170 int err; 171 172 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0); 173 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 174 if (err) 175 return err; 176 177 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state); 178 if (control_state != MLXFW_FSM_STATE_IDLE) 179 return -EBUSY; 180 181 mlxsw_reg_mcc_pack(mcc_pl, 182 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 183 0, *fwhandle, 0); 184 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 185 } 186 187 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev, 188 u32 fwhandle, u16 component_index, 189 u32 component_size) 190 { 191 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 192 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 193 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 194 char mcc_pl[MLXSW_REG_MCC_LEN]; 195 196 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT, 197 component_index, fwhandle, component_size); 198 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 199 } 200 201 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev, 202 u32 fwhandle, u8 *data, u16 size, 203 u32 offset) 204 { 205 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 206 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 207 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 208 char mcda_pl[MLXSW_REG_MCDA_LEN]; 209 210 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data); 211 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl); 212 } 213 214 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, 215 u32 fwhandle, u16 component_index) 216 { 217 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 218 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 219 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 220 char mcc_pl[MLXSW_REG_MCC_LEN]; 221 222 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT, 223 component_index, fwhandle, 0); 224 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 225 } 226 227 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 228 { 229 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 230 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 231 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 232 char mcc_pl[MLXSW_REG_MCC_LEN]; 233 234 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, 235 fwhandle, 0); 236 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 237 } 238 239 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 240 enum mlxfw_fsm_state *fsm_state, 241 enum mlxfw_fsm_state_err *fsm_state_err) 242 { 243 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 244 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 245 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 246 char mcc_pl[MLXSW_REG_MCC_LEN]; 247 u8 control_state; 248 u8 error_code; 249 int err; 250 251 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0); 252 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 253 if (err) 254 return err; 255 256 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state); 257 *fsm_state = control_state; 258 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, 259 MLXFW_FSM_STATE_ERR_MAX); 260 return 0; 261 } 262 263 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 264 { 265 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 266 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 267 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 268 char mcc_pl[MLXSW_REG_MCC_LEN]; 269 270 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, 271 fwhandle, 0); 272 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 273 } 274 275 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 276 { 277 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 278 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 279 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 280 char mcc_pl[MLXSW_REG_MCC_LEN]; 281 282 mlxsw_reg_mcc_pack(mcc_pl, 283 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, 284 fwhandle, 0); 285 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 286 } 287 288 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = { 289 .component_query = mlxsw_sp_component_query, 290 .fsm_lock = mlxsw_sp_fsm_lock, 291 .fsm_component_update = mlxsw_sp_fsm_component_update, 292 .fsm_block_download = mlxsw_sp_fsm_block_download, 293 .fsm_component_verify = mlxsw_sp_fsm_component_verify, 294 .fsm_activate = mlxsw_sp_fsm_activate, 295 .fsm_query_state = mlxsw_sp_fsm_query_state, 296 .fsm_cancel = mlxsw_sp_fsm_cancel, 297 .fsm_release = mlxsw_sp_fsm_release 298 }; 299 300 static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp, 301 const struct firmware *firmware) 302 { 303 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = { 304 .mlxfw_dev = { 305 .ops = &mlxsw_sp_mlxfw_dev_ops, 306 .psid = mlxsw_sp->bus_info->psid, 307 .psid_size = strlen(mlxsw_sp->bus_info->psid), 308 }, 309 .mlxsw_sp = mlxsw_sp 310 }; 311 312 return mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware); 313 } 314 315 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) 316 { 317 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev; 318 const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev; 319 const char *fw_filename = mlxsw_sp->fw_filename; 320 const struct firmware *firmware; 321 int err; 322 323 /* Don't check if driver does not require it */ 324 if (!req_rev || !fw_filename) 325 return 0; 326 327 /* Validate driver & FW are compatible */ 328 if (rev->major != req_rev->major) { 329 WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n", 330 rev->major, req_rev->major); 331 return -EINVAL; 332 } 333 if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) == 334 MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor) && 335 (rev->minor > req_rev->minor || 336 (rev->minor == req_rev->minor && 337 rev->subminor >= req_rev->subminor))) 338 return 0; 339 340 dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n", 341 rev->major, rev->minor, rev->subminor); 342 dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n", 343 fw_filename); 344 345 err = request_firmware_direct(&firmware, fw_filename, 346 mlxsw_sp->bus_info->dev); 347 if (err) { 348 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n", 349 fw_filename); 350 return err; 351 } 352 353 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware); 354 release_firmware(firmware); 355 if (err) 356 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n"); 357 358 /* On FW flash success, tell the caller FW reset is needed 359 * if current FW supports it. 360 */ 361 if (rev->minor >= req_rev->can_reset_minor) 362 return err ? err : -EAGAIN; 363 else 364 return 0; 365 } 366 367 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 368 unsigned int counter_index, u64 *packets, 369 u64 *bytes) 370 { 371 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 372 int err; 373 374 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 375 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 376 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 377 if (err) 378 return err; 379 if (packets) 380 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 381 if (bytes) 382 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 383 return 0; 384 } 385 386 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 387 unsigned int counter_index) 388 { 389 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 390 391 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 392 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 393 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 394 } 395 396 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 397 unsigned int *p_counter_index) 398 { 399 int err; 400 401 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 402 p_counter_index); 403 if (err) 404 return err; 405 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 406 if (err) 407 goto err_counter_clear; 408 return 0; 409 410 err_counter_clear: 411 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 412 *p_counter_index); 413 return err; 414 } 415 416 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 417 unsigned int counter_index) 418 { 419 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 420 counter_index); 421 } 422 423 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 424 const struct mlxsw_tx_info *tx_info) 425 { 426 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 427 428 memset(txhdr, 0, MLXSW_TXHDR_LEN); 429 430 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 431 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 432 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 433 mlxsw_tx_hdr_swid_set(txhdr, 0); 434 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 435 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 436 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 437 } 438 439 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) 440 { 441 switch (state) { 442 case BR_STATE_FORWARDING: 443 return MLXSW_REG_SPMS_STATE_FORWARDING; 444 case BR_STATE_LEARNING: 445 return MLXSW_REG_SPMS_STATE_LEARNING; 446 case BR_STATE_LISTENING: /* fall-through */ 447 case BR_STATE_DISABLED: /* fall-through */ 448 case BR_STATE_BLOCKING: 449 return MLXSW_REG_SPMS_STATE_DISCARDING; 450 default: 451 BUG(); 452 } 453 } 454 455 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 456 u8 state) 457 { 458 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state); 459 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 460 char *spms_pl; 461 int err; 462 463 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 464 if (!spms_pl) 465 return -ENOMEM; 466 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 467 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 468 469 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 470 kfree(spms_pl); 471 return err; 472 } 473 474 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 475 { 476 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 477 int err; 478 479 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 480 if (err) 481 return err; 482 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 483 return 0; 484 } 485 486 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port, 487 bool enable, u32 rate) 488 { 489 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 490 char mpsc_pl[MLXSW_REG_MPSC_LEN]; 491 492 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate); 493 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl); 494 } 495 496 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 497 bool is_up) 498 { 499 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 500 char paos_pl[MLXSW_REG_PAOS_LEN]; 501 502 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 503 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 504 MLXSW_PORT_ADMIN_STATUS_DOWN); 505 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 506 } 507 508 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 509 unsigned char *addr) 510 { 511 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 512 char ppad_pl[MLXSW_REG_PPAD_LEN]; 513 514 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 515 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 516 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 517 } 518 519 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 520 { 521 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 522 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 523 524 ether_addr_copy(addr, mlxsw_sp->base_mac); 525 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 526 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 527 } 528 529 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 530 { 531 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 532 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 533 int max_mtu; 534 int err; 535 536 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 537 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 538 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 539 if (err) 540 return err; 541 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 542 543 if (mtu > max_mtu) 544 return -EINVAL; 545 546 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 547 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 548 } 549 550 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 551 { 552 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 553 char pspa_pl[MLXSW_REG_PSPA_LEN]; 554 555 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 556 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 557 } 558 559 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 560 { 561 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 562 char svpe_pl[MLXSW_REG_SVPE_LEN]; 563 564 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 565 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 566 } 567 568 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 569 bool learn_enable) 570 { 571 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 572 char *spvmlr_pl; 573 int err; 574 575 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 576 if (!spvmlr_pl) 577 return -ENOMEM; 578 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 579 learn_enable); 580 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 581 kfree(spvmlr_pl); 582 return err; 583 } 584 585 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 586 u16 vid) 587 { 588 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 589 char spvid_pl[MLXSW_REG_SPVID_LEN]; 590 591 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid); 592 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 593 } 594 595 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 596 bool allow) 597 { 598 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 599 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 600 601 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 602 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 603 } 604 605 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 606 { 607 int err; 608 609 if (!vid) { 610 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 611 if (err) 612 return err; 613 } else { 614 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); 615 if (err) 616 return err; 617 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 618 if (err) 619 goto err_port_allow_untagged_set; 620 } 621 622 mlxsw_sp_port->pvid = vid; 623 return 0; 624 625 err_port_allow_untagged_set: 626 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid); 627 return err; 628 } 629 630 static int 631 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 632 { 633 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 634 char sspr_pl[MLXSW_REG_SSPR_LEN]; 635 636 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 637 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 638 } 639 640 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, 641 u8 local_port, u8 *p_module, 642 u8 *p_width, u8 *p_lane) 643 { 644 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 645 int err; 646 647 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 648 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 649 if (err) 650 return err; 651 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 652 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl); 653 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 654 return 0; 655 } 656 657 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port, 658 u8 module, u8 width, u8 lane) 659 { 660 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 661 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 662 int i; 663 664 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 665 mlxsw_reg_pmlp_width_set(pmlp_pl, width); 666 for (i = 0; i < width; i++) { 667 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module); 668 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */ 669 } 670 671 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 672 } 673 674 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port) 675 { 676 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 677 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 678 679 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 680 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 681 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 682 } 683 684 static int mlxsw_sp_port_open(struct net_device *dev) 685 { 686 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 687 int err; 688 689 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 690 if (err) 691 return err; 692 netif_start_queue(dev); 693 return 0; 694 } 695 696 static int mlxsw_sp_port_stop(struct net_device *dev) 697 { 698 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 699 700 netif_stop_queue(dev); 701 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 702 } 703 704 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 705 struct net_device *dev) 706 { 707 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 708 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 709 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 710 const struct mlxsw_tx_info tx_info = { 711 .local_port = mlxsw_sp_port->local_port, 712 .is_emad = false, 713 }; 714 u64 len; 715 int err; 716 717 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 718 return NETDEV_TX_BUSY; 719 720 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { 721 struct sk_buff *skb_orig = skb; 722 723 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); 724 if (!skb) { 725 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 726 dev_kfree_skb_any(skb_orig); 727 return NETDEV_TX_OK; 728 } 729 dev_consume_skb_any(skb_orig); 730 } 731 732 if (eth_skb_pad(skb)) { 733 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 734 return NETDEV_TX_OK; 735 } 736 737 mlxsw_sp_txhdr_construct(skb, &tx_info); 738 /* TX header is consumed by HW on the way so we shouldn't count its 739 * bytes as being sent. 740 */ 741 len = skb->len - MLXSW_TXHDR_LEN; 742 743 /* Due to a race we might fail here because of a full queue. In that 744 * unlikely case we simply drop the packet. 745 */ 746 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 747 748 if (!err) { 749 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 750 u64_stats_update_begin(&pcpu_stats->syncp); 751 pcpu_stats->tx_packets++; 752 pcpu_stats->tx_bytes += len; 753 u64_stats_update_end(&pcpu_stats->syncp); 754 } else { 755 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 756 dev_kfree_skb_any(skb); 757 } 758 return NETDEV_TX_OK; 759 } 760 761 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 762 { 763 } 764 765 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 766 { 767 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 768 struct sockaddr *addr = p; 769 int err; 770 771 if (!is_valid_ether_addr(addr->sa_data)) 772 return -EADDRNOTAVAIL; 773 774 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 775 if (err) 776 return err; 777 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 778 return 0; 779 } 780 781 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, 782 int mtu) 783 { 784 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu); 785 } 786 787 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ 788 789 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 790 u16 delay) 791 { 792 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay, 793 BITS_PER_BYTE)); 794 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp, 795 mtu); 796 } 797 798 /* Maximum delay buffer needed in case of PAUSE frames, in bytes. 799 * Assumes 100m cable and maximum MTU. 800 */ 801 #define MLXSW_SP_PAUSE_DELAY 58752 802 803 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 804 u16 delay, bool pfc, bool pause) 805 { 806 if (pfc) 807 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay); 808 else if (pause) 809 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY); 810 else 811 return 0; 812 } 813 814 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, 815 bool lossy) 816 { 817 if (lossy) 818 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size); 819 else 820 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size, 821 thres); 822 } 823 824 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, 825 u8 *prio_tc, bool pause_en, 826 struct ieee_pfc *my_pfc) 827 { 828 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 829 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; 830 u16 delay = !!my_pfc ? my_pfc->delay : 0; 831 char pbmc_pl[MLXSW_REG_PBMC_LEN]; 832 int i, j, err; 833 834 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); 835 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 836 if (err) 837 return err; 838 839 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 840 bool configure = false; 841 bool pfc = false; 842 bool lossy; 843 u16 thres; 844 845 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { 846 if (prio_tc[j] == i) { 847 pfc = pfc_en & BIT(j); 848 configure = true; 849 break; 850 } 851 } 852 853 if (!configure) 854 continue; 855 856 lossy = !(pfc || pause_en); 857 thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); 858 delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc, 859 pause_en); 860 mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy); 861 } 862 863 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 864 } 865 866 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, 867 int mtu, bool pause_en) 868 { 869 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; 870 bool dcb_en = !!mlxsw_sp_port->dcb.ets; 871 struct ieee_pfc *my_pfc; 872 u8 *prio_tc; 873 874 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; 875 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; 876 877 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, 878 pause_en, my_pfc); 879 } 880 881 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 882 { 883 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 884 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 885 int err; 886 887 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); 888 if (err) 889 return err; 890 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu); 891 if (err) 892 goto err_span_port_mtu_update; 893 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 894 if (err) 895 goto err_port_mtu_set; 896 dev->mtu = mtu; 897 return 0; 898 899 err_port_mtu_set: 900 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu); 901 err_span_port_mtu_update: 902 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 903 return err; 904 } 905 906 static int 907 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 908 struct rtnl_link_stats64 *stats) 909 { 910 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 911 struct mlxsw_sp_port_pcpu_stats *p; 912 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 913 u32 tx_dropped = 0; 914 unsigned int start; 915 int i; 916 917 for_each_possible_cpu(i) { 918 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 919 do { 920 start = u64_stats_fetch_begin_irq(&p->syncp); 921 rx_packets = p->rx_packets; 922 rx_bytes = p->rx_bytes; 923 tx_packets = p->tx_packets; 924 tx_bytes = p->tx_bytes; 925 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 926 927 stats->rx_packets += rx_packets; 928 stats->rx_bytes += rx_bytes; 929 stats->tx_packets += tx_packets; 930 stats->tx_bytes += tx_bytes; 931 /* tx_dropped is u32, updated without syncp protection. */ 932 tx_dropped += p->tx_dropped; 933 } 934 stats->tx_dropped = tx_dropped; 935 return 0; 936 } 937 938 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 939 { 940 switch (attr_id) { 941 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 942 return true; 943 } 944 945 return false; 946 } 947 948 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 949 void *sp) 950 { 951 switch (attr_id) { 952 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 953 return mlxsw_sp_port_get_sw_stats64(dev, sp); 954 } 955 956 return -EINVAL; 957 } 958 959 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 960 int prio, char *ppcnt_pl) 961 { 962 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 963 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 964 965 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 966 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 967 } 968 969 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 970 struct rtnl_link_stats64 *stats) 971 { 972 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 973 int err; 974 975 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 976 0, ppcnt_pl); 977 if (err) 978 goto out; 979 980 stats->tx_packets = 981 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 982 stats->rx_packets = 983 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 984 stats->tx_bytes = 985 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 986 stats->rx_bytes = 987 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 988 stats->multicast = 989 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 990 991 stats->rx_crc_errors = 992 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 993 stats->rx_frame_errors = 994 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 995 996 stats->rx_length_errors = ( 997 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 998 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 999 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 1000 1001 stats->rx_errors = (stats->rx_crc_errors + 1002 stats->rx_frame_errors + stats->rx_length_errors); 1003 1004 out: 1005 return err; 1006 } 1007 1008 static void 1009 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 1010 struct mlxsw_sp_port_xstats *xstats) 1011 { 1012 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1013 int err, i; 1014 1015 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 1016 ppcnt_pl); 1017 if (!err) 1018 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 1019 1020 for (i = 0; i < TC_MAX_QUEUE; i++) { 1021 err = mlxsw_sp_port_get_stats_raw(dev, 1022 MLXSW_REG_PPCNT_TC_CONG_TC, 1023 i, ppcnt_pl); 1024 if (!err) 1025 xstats->wred_drop[i] = 1026 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 1027 1028 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 1029 i, ppcnt_pl); 1030 if (err) 1031 continue; 1032 1033 xstats->backlog[i] = 1034 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 1035 xstats->tail_drop[i] = 1036 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 1037 } 1038 1039 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1040 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, 1041 i, ppcnt_pl); 1042 if (err) 1043 continue; 1044 1045 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); 1046 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); 1047 } 1048 } 1049 1050 static void update_stats_cache(struct work_struct *work) 1051 { 1052 struct mlxsw_sp_port *mlxsw_sp_port = 1053 container_of(work, struct mlxsw_sp_port, 1054 periodic_hw_stats.update_dw.work); 1055 1056 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 1057 goto out; 1058 1059 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 1060 &mlxsw_sp_port->periodic_hw_stats.stats); 1061 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 1062 &mlxsw_sp_port->periodic_hw_stats.xstats); 1063 1064 out: 1065 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1066 MLXSW_HW_STATS_UPDATE_TIME); 1067 } 1068 1069 /* Return the stats from a cache that is updated periodically, 1070 * as this function might get called in an atomic context. 1071 */ 1072 static void 1073 mlxsw_sp_port_get_stats64(struct net_device *dev, 1074 struct rtnl_link_stats64 *stats) 1075 { 1076 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1077 1078 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 1079 } 1080 1081 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 1082 u16 vid_begin, u16 vid_end, 1083 bool is_member, bool untagged) 1084 { 1085 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1086 char *spvm_pl; 1087 int err; 1088 1089 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 1090 if (!spvm_pl) 1091 return -ENOMEM; 1092 1093 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 1094 vid_end, is_member, untagged); 1095 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 1096 kfree(spvm_pl); 1097 return err; 1098 } 1099 1100 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 1101 u16 vid_end, bool is_member, bool untagged) 1102 { 1103 u16 vid, vid_e; 1104 int err; 1105 1106 for (vid = vid_begin; vid <= vid_end; 1107 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 1108 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 1109 vid_end); 1110 1111 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 1112 is_member, untagged); 1113 if (err) 1114 return err; 1115 } 1116 1117 return 0; 1118 } 1119 1120 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port) 1121 { 1122 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 1123 1124 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 1125 &mlxsw_sp_port->vlans_list, list) 1126 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); 1127 } 1128 1129 static struct mlxsw_sp_port_vlan * 1130 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1131 { 1132 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1133 bool untagged = vid == 1; 1134 int err; 1135 1136 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 1137 if (err) 1138 return ERR_PTR(err); 1139 1140 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 1141 if (!mlxsw_sp_port_vlan) { 1142 err = -ENOMEM; 1143 goto err_port_vlan_alloc; 1144 } 1145 1146 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1147 mlxsw_sp_port_vlan->ref_count = 1; 1148 mlxsw_sp_port_vlan->vid = vid; 1149 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1150 1151 return mlxsw_sp_port_vlan; 1152 1153 err_port_vlan_alloc: 1154 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1155 return ERR_PTR(err); 1156 } 1157 1158 static void 1159 mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1160 { 1161 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1162 u16 vid = mlxsw_sp_port_vlan->vid; 1163 1164 list_del(&mlxsw_sp_port_vlan->list); 1165 kfree(mlxsw_sp_port_vlan); 1166 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1167 } 1168 1169 struct mlxsw_sp_port_vlan * 1170 mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1171 { 1172 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1173 1174 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1175 if (mlxsw_sp_port_vlan) { 1176 mlxsw_sp_port_vlan->ref_count++; 1177 return mlxsw_sp_port_vlan; 1178 } 1179 1180 return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid); 1181 } 1182 1183 void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1184 { 1185 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; 1186 1187 if (--mlxsw_sp_port_vlan->ref_count != 0) 1188 return; 1189 1190 if (mlxsw_sp_port_vlan->bridge_port) 1191 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1192 else if (fid) 1193 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 1194 1195 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1196 } 1197 1198 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1199 __be16 __always_unused proto, u16 vid) 1200 { 1201 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1202 1203 /* VLAN 0 is added to HW filter when device goes up, but it is 1204 * reserved in our case, so simply return. 1205 */ 1206 if (!vid) 1207 return 0; 1208 1209 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid)); 1210 } 1211 1212 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1213 __be16 __always_unused proto, u16 vid) 1214 { 1215 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1216 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1217 1218 /* VLAN 0 is removed from HW filter when device goes down, but 1219 * it is reserved in our case, so simply return. 1220 */ 1221 if (!vid) 1222 return 0; 1223 1224 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1225 if (!mlxsw_sp_port_vlan) 1226 return 0; 1227 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); 1228 1229 return 0; 1230 } 1231 1232 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name, 1233 size_t len) 1234 { 1235 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1236 1237 return mlxsw_core_port_get_phys_port_name(mlxsw_sp_port->mlxsw_sp->core, 1238 mlxsw_sp_port->local_port, 1239 name, len); 1240 } 1241 1242 static struct mlxsw_sp_port_mall_tc_entry * 1243 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port, 1244 unsigned long cookie) { 1245 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1246 1247 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list) 1248 if (mall_tc_entry->cookie == cookie) 1249 return mall_tc_entry; 1250 1251 return NULL; 1252 } 1253 1254 static int 1255 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1256 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror, 1257 const struct tc_action *a, 1258 bool ingress) 1259 { 1260 enum mlxsw_sp_span_type span_type; 1261 struct net_device *to_dev; 1262 1263 to_dev = tcf_mirred_dev(a); 1264 if (!to_dev) { 1265 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); 1266 return -EINVAL; 1267 } 1268 1269 mirror->ingress = ingress; 1270 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1271 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_dev, span_type, 1272 true, &mirror->span_id); 1273 } 1274 1275 static void 1276 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1277 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror) 1278 { 1279 enum mlxsw_sp_span_type span_type; 1280 1281 span_type = mirror->ingress ? 1282 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1283 mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->span_id, 1284 span_type, true); 1285 } 1286 1287 static int 1288 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port, 1289 struct tc_cls_matchall_offload *cls, 1290 const struct tc_action *a, 1291 bool ingress) 1292 { 1293 int err; 1294 1295 if (!mlxsw_sp_port->sample) 1296 return -EOPNOTSUPP; 1297 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) { 1298 netdev_err(mlxsw_sp_port->dev, "sample already active\n"); 1299 return -EEXIST; 1300 } 1301 if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) { 1302 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n"); 1303 return -EOPNOTSUPP; 1304 } 1305 1306 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group, 1307 tcf_sample_psample_group(a)); 1308 mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a); 1309 mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a); 1310 mlxsw_sp_port->sample->rate = tcf_sample_rate(a); 1311 1312 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a)); 1313 if (err) 1314 goto err_port_sample_set; 1315 return 0; 1316 1317 err_port_sample_set: 1318 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1319 return err; 1320 } 1321 1322 static void 1323 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port) 1324 { 1325 if (!mlxsw_sp_port->sample) 1326 return; 1327 1328 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1); 1329 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1330 } 1331 1332 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1333 struct tc_cls_matchall_offload *f, 1334 bool ingress) 1335 { 1336 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1337 __be16 protocol = f->common.protocol; 1338 const struct tc_action *a; 1339 LIST_HEAD(actions); 1340 int err; 1341 1342 if (!tcf_exts_has_one_action(f->exts)) { 1343 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); 1344 return -EOPNOTSUPP; 1345 } 1346 1347 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1348 if (!mall_tc_entry) 1349 return -ENOMEM; 1350 mall_tc_entry->cookie = f->cookie; 1351 1352 a = tcf_exts_first_action(f->exts); 1353 1354 if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { 1355 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; 1356 1357 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; 1358 mirror = &mall_tc_entry->mirror; 1359 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, 1360 mirror, a, ingress); 1361 } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) { 1362 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE; 1363 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f, 1364 a, ingress); 1365 } else { 1366 err = -EOPNOTSUPP; 1367 } 1368 1369 if (err) 1370 goto err_add_action; 1371 1372 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); 1373 return 0; 1374 1375 err_add_action: 1376 kfree(mall_tc_entry); 1377 return err; 1378 } 1379 1380 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1381 struct tc_cls_matchall_offload *f) 1382 { 1383 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1384 1385 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port, 1386 f->cookie); 1387 if (!mall_tc_entry) { 1388 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); 1389 return; 1390 } 1391 list_del(&mall_tc_entry->list); 1392 1393 switch (mall_tc_entry->type) { 1394 case MLXSW_SP_PORT_MALL_MIRROR: 1395 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port, 1396 &mall_tc_entry->mirror); 1397 break; 1398 case MLXSW_SP_PORT_MALL_SAMPLE: 1399 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port); 1400 break; 1401 default: 1402 WARN_ON(1); 1403 } 1404 1405 kfree(mall_tc_entry); 1406 } 1407 1408 static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1409 struct tc_cls_matchall_offload *f, 1410 bool ingress) 1411 { 1412 switch (f->command) { 1413 case TC_CLSMATCHALL_REPLACE: 1414 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f, 1415 ingress); 1416 case TC_CLSMATCHALL_DESTROY: 1417 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f); 1418 return 0; 1419 default: 1420 return -EOPNOTSUPP; 1421 } 1422 } 1423 1424 static int 1425 mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block, 1426 struct tc_cls_flower_offload *f) 1427 { 1428 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block); 1429 1430 switch (f->command) { 1431 case TC_CLSFLOWER_REPLACE: 1432 return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f); 1433 case TC_CLSFLOWER_DESTROY: 1434 mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f); 1435 return 0; 1436 case TC_CLSFLOWER_STATS: 1437 return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f); 1438 case TC_CLSFLOWER_TMPLT_CREATE: 1439 return mlxsw_sp_flower_tmplt_create(mlxsw_sp, acl_block, f); 1440 case TC_CLSFLOWER_TMPLT_DESTROY: 1441 mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, acl_block, f); 1442 return 0; 1443 default: 1444 return -EOPNOTSUPP; 1445 } 1446 } 1447 1448 static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type, 1449 void *type_data, 1450 void *cb_priv, bool ingress) 1451 { 1452 struct mlxsw_sp_port *mlxsw_sp_port = cb_priv; 1453 1454 switch (type) { 1455 case TC_SETUP_CLSMATCHALL: 1456 if (!tc_cls_can_offload_and_chain0(mlxsw_sp_port->dev, 1457 type_data)) 1458 return -EOPNOTSUPP; 1459 1460 return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data, 1461 ingress); 1462 case TC_SETUP_CLSFLOWER: 1463 return 0; 1464 default: 1465 return -EOPNOTSUPP; 1466 } 1467 } 1468 1469 static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type, 1470 void *type_data, 1471 void *cb_priv) 1472 { 1473 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1474 cb_priv, true); 1475 } 1476 1477 static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type, 1478 void *type_data, 1479 void *cb_priv) 1480 { 1481 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1482 cb_priv, false); 1483 } 1484 1485 static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type, 1486 void *type_data, void *cb_priv) 1487 { 1488 struct mlxsw_sp_acl_block *acl_block = cb_priv; 1489 1490 switch (type) { 1491 case TC_SETUP_CLSMATCHALL: 1492 return 0; 1493 case TC_SETUP_CLSFLOWER: 1494 if (mlxsw_sp_acl_block_disabled(acl_block)) 1495 return -EOPNOTSUPP; 1496 1497 return mlxsw_sp_setup_tc_cls_flower(acl_block, type_data); 1498 default: 1499 return -EOPNOTSUPP; 1500 } 1501 } 1502 1503 static int 1504 mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port, 1505 struct tcf_block *block, bool ingress, 1506 struct netlink_ext_ack *extack) 1507 { 1508 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1509 struct mlxsw_sp_acl_block *acl_block; 1510 struct tcf_block_cb *block_cb; 1511 int err; 1512 1513 block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower, 1514 mlxsw_sp); 1515 if (!block_cb) { 1516 acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, block->net); 1517 if (!acl_block) 1518 return -ENOMEM; 1519 block_cb = __tcf_block_cb_register(block, 1520 mlxsw_sp_setup_tc_block_cb_flower, 1521 mlxsw_sp, acl_block, extack); 1522 if (IS_ERR(block_cb)) { 1523 err = PTR_ERR(block_cb); 1524 goto err_cb_register; 1525 } 1526 } else { 1527 acl_block = tcf_block_cb_priv(block_cb); 1528 } 1529 tcf_block_cb_incref(block_cb); 1530 err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block, 1531 mlxsw_sp_port, ingress); 1532 if (err) 1533 goto err_block_bind; 1534 1535 if (ingress) 1536 mlxsw_sp_port->ing_acl_block = acl_block; 1537 else 1538 mlxsw_sp_port->eg_acl_block = acl_block; 1539 1540 return 0; 1541 1542 err_block_bind: 1543 if (!tcf_block_cb_decref(block_cb)) { 1544 __tcf_block_cb_unregister(block, block_cb); 1545 err_cb_register: 1546 mlxsw_sp_acl_block_destroy(acl_block); 1547 } 1548 return err; 1549 } 1550 1551 static void 1552 mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port, 1553 struct tcf_block *block, bool ingress) 1554 { 1555 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1556 struct mlxsw_sp_acl_block *acl_block; 1557 struct tcf_block_cb *block_cb; 1558 int err; 1559 1560 block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower, 1561 mlxsw_sp); 1562 if (!block_cb) 1563 return; 1564 1565 if (ingress) 1566 mlxsw_sp_port->ing_acl_block = NULL; 1567 else 1568 mlxsw_sp_port->eg_acl_block = NULL; 1569 1570 acl_block = tcf_block_cb_priv(block_cb); 1571 err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block, 1572 mlxsw_sp_port, ingress); 1573 if (!err && !tcf_block_cb_decref(block_cb)) { 1574 __tcf_block_cb_unregister(block, block_cb); 1575 mlxsw_sp_acl_block_destroy(acl_block); 1576 } 1577 } 1578 1579 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 1580 struct tc_block_offload *f) 1581 { 1582 tc_setup_cb_t *cb; 1583 bool ingress; 1584 int err; 1585 1586 if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { 1587 cb = mlxsw_sp_setup_tc_block_cb_matchall_ig; 1588 ingress = true; 1589 } else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { 1590 cb = mlxsw_sp_setup_tc_block_cb_matchall_eg; 1591 ingress = false; 1592 } else { 1593 return -EOPNOTSUPP; 1594 } 1595 1596 switch (f->command) { 1597 case TC_BLOCK_BIND: 1598 err = tcf_block_cb_register(f->block, cb, mlxsw_sp_port, 1599 mlxsw_sp_port, f->extack); 1600 if (err) 1601 return err; 1602 err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port, 1603 f->block, ingress, 1604 f->extack); 1605 if (err) { 1606 tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port); 1607 return err; 1608 } 1609 return 0; 1610 case TC_BLOCK_UNBIND: 1611 mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port, 1612 f->block, ingress); 1613 tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port); 1614 return 0; 1615 default: 1616 return -EOPNOTSUPP; 1617 } 1618 } 1619 1620 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1621 void *type_data) 1622 { 1623 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1624 1625 switch (type) { 1626 case TC_SETUP_BLOCK: 1627 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1628 case TC_SETUP_QDISC_RED: 1629 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1630 case TC_SETUP_QDISC_PRIO: 1631 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); 1632 default: 1633 return -EOPNOTSUPP; 1634 } 1635 } 1636 1637 1638 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) 1639 { 1640 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1641 1642 if (!enable) { 1643 if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->ing_acl_block) || 1644 mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->eg_acl_block) || 1645 !list_empty(&mlxsw_sp_port->mall_tc_list)) { 1646 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 1647 return -EINVAL; 1648 } 1649 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->ing_acl_block); 1650 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->eg_acl_block); 1651 } else { 1652 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->ing_acl_block); 1653 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->eg_acl_block); 1654 } 1655 return 0; 1656 } 1657 1658 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); 1659 1660 static int mlxsw_sp_handle_feature(struct net_device *dev, 1661 netdev_features_t wanted_features, 1662 netdev_features_t feature, 1663 mlxsw_sp_feature_handler feature_handler) 1664 { 1665 netdev_features_t changes = wanted_features ^ dev->features; 1666 bool enable = !!(wanted_features & feature); 1667 int err; 1668 1669 if (!(changes & feature)) 1670 return 0; 1671 1672 err = feature_handler(dev, enable); 1673 if (err) { 1674 netdev_err(dev, "%s feature %pNF failed, err %d\n", 1675 enable ? "Enable" : "Disable", &feature, err); 1676 return err; 1677 } 1678 1679 if (enable) 1680 dev->features |= feature; 1681 else 1682 dev->features &= ~feature; 1683 1684 return 0; 1685 } 1686 static int mlxsw_sp_set_features(struct net_device *dev, 1687 netdev_features_t features) 1688 { 1689 return mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, 1690 mlxsw_sp_feature_hw_tc); 1691 } 1692 1693 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1694 .ndo_open = mlxsw_sp_port_open, 1695 .ndo_stop = mlxsw_sp_port_stop, 1696 .ndo_start_xmit = mlxsw_sp_port_xmit, 1697 .ndo_setup_tc = mlxsw_sp_setup_tc, 1698 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1699 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1700 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1701 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1702 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1703 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1704 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1705 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1706 .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name, 1707 .ndo_set_features = mlxsw_sp_set_features, 1708 }; 1709 1710 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 1711 struct ethtool_drvinfo *drvinfo) 1712 { 1713 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1714 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1715 1716 strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind, 1717 sizeof(drvinfo->driver)); 1718 strlcpy(drvinfo->version, mlxsw_sp_driver_version, 1719 sizeof(drvinfo->version)); 1720 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 1721 "%d.%d.%d", 1722 mlxsw_sp->bus_info->fw_rev.major, 1723 mlxsw_sp->bus_info->fw_rev.minor, 1724 mlxsw_sp->bus_info->fw_rev.subminor); 1725 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 1726 sizeof(drvinfo->bus_info)); 1727 } 1728 1729 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, 1730 struct ethtool_pauseparam *pause) 1731 { 1732 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1733 1734 pause->rx_pause = mlxsw_sp_port->link.rx_pause; 1735 pause->tx_pause = mlxsw_sp_port->link.tx_pause; 1736 } 1737 1738 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, 1739 struct ethtool_pauseparam *pause) 1740 { 1741 char pfcc_pl[MLXSW_REG_PFCC_LEN]; 1742 1743 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); 1744 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause); 1745 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause); 1746 1747 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), 1748 pfcc_pl); 1749 } 1750 1751 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, 1752 struct ethtool_pauseparam *pause) 1753 { 1754 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1755 bool pause_en = pause->tx_pause || pause->rx_pause; 1756 int err; 1757 1758 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { 1759 netdev_err(dev, "PFC already enabled on port\n"); 1760 return -EINVAL; 1761 } 1762 1763 if (pause->autoneg) { 1764 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); 1765 return -EINVAL; 1766 } 1767 1768 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1769 if (err) { 1770 netdev_err(dev, "Failed to configure port's headroom\n"); 1771 return err; 1772 } 1773 1774 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause); 1775 if (err) { 1776 netdev_err(dev, "Failed to set PAUSE parameters\n"); 1777 goto err_port_pause_configure; 1778 } 1779 1780 mlxsw_sp_port->link.rx_pause = pause->rx_pause; 1781 mlxsw_sp_port->link.tx_pause = pause->tx_pause; 1782 1783 return 0; 1784 1785 err_port_pause_configure: 1786 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 1787 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1788 return err; 1789 } 1790 1791 struct mlxsw_sp_port_hw_stats { 1792 char str[ETH_GSTRING_LEN]; 1793 u64 (*getter)(const char *payload); 1794 bool cells_bytes; 1795 }; 1796 1797 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 1798 { 1799 .str = "a_frames_transmitted_ok", 1800 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 1801 }, 1802 { 1803 .str = "a_frames_received_ok", 1804 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 1805 }, 1806 { 1807 .str = "a_frame_check_sequence_errors", 1808 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 1809 }, 1810 { 1811 .str = "a_alignment_errors", 1812 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 1813 }, 1814 { 1815 .str = "a_octets_transmitted_ok", 1816 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 1817 }, 1818 { 1819 .str = "a_octets_received_ok", 1820 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 1821 }, 1822 { 1823 .str = "a_multicast_frames_xmitted_ok", 1824 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 1825 }, 1826 { 1827 .str = "a_broadcast_frames_xmitted_ok", 1828 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 1829 }, 1830 { 1831 .str = "a_multicast_frames_received_ok", 1832 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 1833 }, 1834 { 1835 .str = "a_broadcast_frames_received_ok", 1836 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 1837 }, 1838 { 1839 .str = "a_in_range_length_errors", 1840 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 1841 }, 1842 { 1843 .str = "a_out_of_range_length_field", 1844 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 1845 }, 1846 { 1847 .str = "a_frame_too_long_errors", 1848 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 1849 }, 1850 { 1851 .str = "a_symbol_error_during_carrier", 1852 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 1853 }, 1854 { 1855 .str = "a_mac_control_frames_transmitted", 1856 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 1857 }, 1858 { 1859 .str = "a_mac_control_frames_received", 1860 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 1861 }, 1862 { 1863 .str = "a_unsupported_opcodes_received", 1864 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 1865 }, 1866 { 1867 .str = "a_pause_mac_ctrl_frames_received", 1868 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 1869 }, 1870 { 1871 .str = "a_pause_mac_ctrl_frames_xmitted", 1872 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 1873 }, 1874 }; 1875 1876 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 1877 1878 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = { 1879 { 1880 .str = "ether_pkts64octets", 1881 .getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get, 1882 }, 1883 { 1884 .str = "ether_pkts65to127octets", 1885 .getter = mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get, 1886 }, 1887 { 1888 .str = "ether_pkts128to255octets", 1889 .getter = mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get, 1890 }, 1891 { 1892 .str = "ether_pkts256to511octets", 1893 .getter = mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get, 1894 }, 1895 { 1896 .str = "ether_pkts512to1023octets", 1897 .getter = mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get, 1898 }, 1899 { 1900 .str = "ether_pkts1024to1518octets", 1901 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get, 1902 }, 1903 { 1904 .str = "ether_pkts1519to2047octets", 1905 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get, 1906 }, 1907 { 1908 .str = "ether_pkts2048to4095octets", 1909 .getter = mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get, 1910 }, 1911 { 1912 .str = "ether_pkts4096to8191octets", 1913 .getter = mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get, 1914 }, 1915 { 1916 .str = "ether_pkts8192to10239octets", 1917 .getter = mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get, 1918 }, 1919 }; 1920 1921 #define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \ 1922 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats) 1923 1924 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { 1925 { 1926 .str = "rx_octets_prio", 1927 .getter = mlxsw_reg_ppcnt_rx_octets_get, 1928 }, 1929 { 1930 .str = "rx_frames_prio", 1931 .getter = mlxsw_reg_ppcnt_rx_frames_get, 1932 }, 1933 { 1934 .str = "tx_octets_prio", 1935 .getter = mlxsw_reg_ppcnt_tx_octets_get, 1936 }, 1937 { 1938 .str = "tx_frames_prio", 1939 .getter = mlxsw_reg_ppcnt_tx_frames_get, 1940 }, 1941 { 1942 .str = "rx_pause_prio", 1943 .getter = mlxsw_reg_ppcnt_rx_pause_get, 1944 }, 1945 { 1946 .str = "rx_pause_duration_prio", 1947 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get, 1948 }, 1949 { 1950 .str = "tx_pause_prio", 1951 .getter = mlxsw_reg_ppcnt_tx_pause_get, 1952 }, 1953 { 1954 .str = "tx_pause_duration_prio", 1955 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get, 1956 }, 1957 }; 1958 1959 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) 1960 1961 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { 1962 { 1963 .str = "tc_transmit_queue_tc", 1964 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get, 1965 .cells_bytes = true, 1966 }, 1967 { 1968 .str = "tc_no_buffer_discard_uc_tc", 1969 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get, 1970 }, 1971 }; 1972 1973 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) 1974 1975 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ 1976 MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \ 1977 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \ 1978 IEEE_8021QAZ_MAX_TCS) + \ 1979 (MLXSW_SP_PORT_HW_TC_STATS_LEN * \ 1980 TC_MAX_QUEUE)) 1981 1982 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) 1983 { 1984 int i; 1985 1986 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { 1987 snprintf(*p, ETH_GSTRING_LEN, "%s_%d", 1988 mlxsw_sp_port_hw_prio_stats[i].str, prio); 1989 *p += ETH_GSTRING_LEN; 1990 } 1991 } 1992 1993 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) 1994 { 1995 int i; 1996 1997 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { 1998 snprintf(*p, ETH_GSTRING_LEN, "%s_%d", 1999 mlxsw_sp_port_hw_tc_stats[i].str, tc); 2000 *p += ETH_GSTRING_LEN; 2001 } 2002 } 2003 2004 static void mlxsw_sp_port_get_strings(struct net_device *dev, 2005 u32 stringset, u8 *data) 2006 { 2007 u8 *p = data; 2008 int i; 2009 2010 switch (stringset) { 2011 case ETH_SS_STATS: 2012 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 2013 memcpy(p, mlxsw_sp_port_hw_stats[i].str, 2014 ETH_GSTRING_LEN); 2015 p += ETH_GSTRING_LEN; 2016 } 2017 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) { 2018 memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str, 2019 ETH_GSTRING_LEN); 2020 p += ETH_GSTRING_LEN; 2021 } 2022 2023 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 2024 mlxsw_sp_port_get_prio_strings(&p, i); 2025 2026 for (i = 0; i < TC_MAX_QUEUE; i++) 2027 mlxsw_sp_port_get_tc_strings(&p, i); 2028 2029 break; 2030 } 2031 } 2032 2033 static int mlxsw_sp_port_set_phys_id(struct net_device *dev, 2034 enum ethtool_phys_id_state state) 2035 { 2036 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2037 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2038 char mlcr_pl[MLXSW_REG_MLCR_LEN]; 2039 bool active; 2040 2041 switch (state) { 2042 case ETHTOOL_ID_ACTIVE: 2043 active = true; 2044 break; 2045 case ETHTOOL_ID_INACTIVE: 2046 active = false; 2047 break; 2048 default: 2049 return -EOPNOTSUPP; 2050 } 2051 2052 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); 2053 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); 2054 } 2055 2056 static int 2057 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, 2058 int *p_len, enum mlxsw_reg_ppcnt_grp grp) 2059 { 2060 switch (grp) { 2061 case MLXSW_REG_PPCNT_IEEE_8023_CNT: 2062 *p_hw_stats = mlxsw_sp_port_hw_stats; 2063 *p_len = MLXSW_SP_PORT_HW_STATS_LEN; 2064 break; 2065 case MLXSW_REG_PPCNT_RFC_2819_CNT: 2066 *p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats; 2067 *p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2068 break; 2069 case MLXSW_REG_PPCNT_PRIO_CNT: 2070 *p_hw_stats = mlxsw_sp_port_hw_prio_stats; 2071 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2072 break; 2073 case MLXSW_REG_PPCNT_TC_CNT: 2074 *p_hw_stats = mlxsw_sp_port_hw_tc_stats; 2075 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN; 2076 break; 2077 default: 2078 WARN_ON(1); 2079 return -EOPNOTSUPP; 2080 } 2081 return 0; 2082 } 2083 2084 static void __mlxsw_sp_port_get_stats(struct net_device *dev, 2085 enum mlxsw_reg_ppcnt_grp grp, int prio, 2086 u64 *data, int data_index) 2087 { 2088 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2089 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2090 struct mlxsw_sp_port_hw_stats *hw_stats; 2091 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 2092 int i, len; 2093 int err; 2094 2095 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); 2096 if (err) 2097 return; 2098 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl); 2099 for (i = 0; i < len; i++) { 2100 data[data_index + i] = hw_stats[i].getter(ppcnt_pl); 2101 if (!hw_stats[i].cells_bytes) 2102 continue; 2103 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp, 2104 data[data_index + i]); 2105 } 2106 } 2107 2108 static void mlxsw_sp_port_get_stats(struct net_device *dev, 2109 struct ethtool_stats *stats, u64 *data) 2110 { 2111 int i, data_index = 0; 2112 2113 /* IEEE 802.3 Counters */ 2114 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0, 2115 data, data_index); 2116 data_index = MLXSW_SP_PORT_HW_STATS_LEN; 2117 2118 /* RFC 2819 Counters */ 2119 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0, 2120 data, data_index); 2121 data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2122 2123 /* Per-Priority Counters */ 2124 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2125 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, 2126 data, data_index); 2127 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2128 } 2129 2130 /* Per-TC Counters */ 2131 for (i = 0; i < TC_MAX_QUEUE; i++) { 2132 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, 2133 data, data_index); 2134 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; 2135 } 2136 } 2137 2138 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 2139 { 2140 switch (sset) { 2141 case ETH_SS_STATS: 2142 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN; 2143 default: 2144 return -EOPNOTSUPP; 2145 } 2146 } 2147 2148 struct mlxsw_sp_port_link_mode { 2149 enum ethtool_link_mode_bit_indices mask_ethtool; 2150 u32 mask; 2151 u32 speed; 2152 }; 2153 2154 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = { 2155 { 2156 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 2157 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2158 .speed = SPEED_100, 2159 }, 2160 { 2161 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 2162 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 2163 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2164 .speed = SPEED_1000, 2165 }, 2166 { 2167 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 2168 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2169 .speed = SPEED_10000, 2170 }, 2171 { 2172 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 2173 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 2174 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 2175 .speed = SPEED_10000, 2176 }, 2177 { 2178 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2179 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2180 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2181 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 2182 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2183 .speed = SPEED_10000, 2184 }, 2185 { 2186 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 2187 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 2188 .speed = SPEED_20000, 2189 }, 2190 { 2191 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 2192 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2193 .speed = SPEED_40000, 2194 }, 2195 { 2196 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 2197 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2198 .speed = SPEED_40000, 2199 }, 2200 { 2201 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 2202 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2203 .speed = SPEED_40000, 2204 }, 2205 { 2206 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 2207 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2208 .speed = SPEED_40000, 2209 }, 2210 { 2211 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR, 2212 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2213 .speed = SPEED_25000, 2214 }, 2215 { 2216 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR, 2217 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2218 .speed = SPEED_25000, 2219 }, 2220 { 2221 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 2222 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2223 .speed = SPEED_25000, 2224 }, 2225 { 2226 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 2227 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2228 .speed = SPEED_25000, 2229 }, 2230 { 2231 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2, 2232 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2233 .speed = SPEED_50000, 2234 }, 2235 { 2236 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 2237 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2238 .speed = SPEED_50000, 2239 }, 2240 { 2241 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2, 2242 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2243 .speed = SPEED_50000, 2244 }, 2245 { 2246 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2247 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT, 2248 .speed = SPEED_56000, 2249 }, 2250 { 2251 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2252 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT, 2253 .speed = SPEED_56000, 2254 }, 2255 { 2256 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2257 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT, 2258 .speed = SPEED_56000, 2259 }, 2260 { 2261 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2262 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, 2263 .speed = SPEED_56000, 2264 }, 2265 { 2266 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4, 2267 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2268 .speed = SPEED_100000, 2269 }, 2270 { 2271 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4, 2272 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2273 .speed = SPEED_100000, 2274 }, 2275 { 2276 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4, 2277 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2278 .speed = SPEED_100000, 2279 }, 2280 { 2281 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 2282 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2283 .speed = SPEED_100000, 2284 }, 2285 }; 2286 2287 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode) 2288 2289 static void 2290 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto, 2291 struct ethtool_link_ksettings *cmd) 2292 { 2293 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2294 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2295 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2296 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2297 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2298 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2299 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 2300 2301 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2302 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2303 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2304 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 2305 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 2306 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 2307 } 2308 2309 static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode) 2310 { 2311 int i; 2312 2313 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2314 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) 2315 __set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool, 2316 mode); 2317 } 2318 } 2319 2320 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto, 2321 struct ethtool_link_ksettings *cmd) 2322 { 2323 u32 speed = SPEED_UNKNOWN; 2324 u8 duplex = DUPLEX_UNKNOWN; 2325 int i; 2326 2327 if (!carrier_ok) 2328 goto out; 2329 2330 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2331 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) { 2332 speed = mlxsw_sp_port_link_mode[i].speed; 2333 duplex = DUPLEX_FULL; 2334 break; 2335 } 2336 } 2337 out: 2338 cmd->base.speed = speed; 2339 cmd->base.duplex = duplex; 2340 } 2341 2342 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto) 2343 { 2344 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2345 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2346 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2347 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2348 return PORT_FIBRE; 2349 2350 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2351 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2352 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4)) 2353 return PORT_DA; 2354 2355 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2356 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2357 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2358 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4)) 2359 return PORT_NONE; 2360 2361 return PORT_OTHER; 2362 } 2363 2364 static u32 2365 mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd) 2366 { 2367 u32 ptys_proto = 0; 2368 int i; 2369 2370 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2371 if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool, 2372 cmd->link_modes.advertising)) 2373 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 2374 } 2375 return ptys_proto; 2376 } 2377 2378 static u32 mlxsw_sp_to_ptys_speed(u32 speed) 2379 { 2380 u32 ptys_proto = 0; 2381 int i; 2382 2383 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2384 if (speed == mlxsw_sp_port_link_mode[i].speed) 2385 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 2386 } 2387 return ptys_proto; 2388 } 2389 2390 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed) 2391 { 2392 u32 ptys_proto = 0; 2393 int i; 2394 2395 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2396 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed) 2397 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 2398 } 2399 return ptys_proto; 2400 } 2401 2402 static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap, 2403 struct ethtool_link_ksettings *cmd) 2404 { 2405 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); 2406 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); 2407 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); 2408 2409 mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd); 2410 mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported); 2411 } 2412 2413 static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg, 2414 struct ethtool_link_ksettings *cmd) 2415 { 2416 if (!autoneg) 2417 return; 2418 2419 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); 2420 mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising); 2421 } 2422 2423 static void 2424 mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status, 2425 struct ethtool_link_ksettings *cmd) 2426 { 2427 if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp) 2428 return; 2429 2430 ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg); 2431 mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising); 2432 } 2433 2434 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev, 2435 struct ethtool_link_ksettings *cmd) 2436 { 2437 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp; 2438 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2439 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2440 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2441 u8 autoneg_status; 2442 bool autoneg; 2443 int err; 2444 2445 autoneg = mlxsw_sp_port->link.autoneg; 2446 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0, false); 2447 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2448 if (err) 2449 return err; 2450 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, ð_proto_admin, 2451 ð_proto_oper); 2452 2453 mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd); 2454 2455 mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd); 2456 2457 eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl); 2458 autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl); 2459 mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd); 2460 2461 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 2462 cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper); 2463 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper, 2464 cmd); 2465 2466 return 0; 2467 } 2468 2469 static int 2470 mlxsw_sp_port_set_link_ksettings(struct net_device *dev, 2471 const struct ethtool_link_ksettings *cmd) 2472 { 2473 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2474 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2475 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2476 u32 eth_proto_cap, eth_proto_new; 2477 bool autoneg; 2478 int err; 2479 2480 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0, false); 2481 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2482 if (err) 2483 return err; 2484 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL); 2485 2486 autoneg = cmd->base.autoneg == AUTONEG_ENABLE; 2487 eth_proto_new = autoneg ? 2488 mlxsw_sp_to_ptys_advert_link(cmd) : 2489 mlxsw_sp_to_ptys_speed(cmd->base.speed); 2490 2491 eth_proto_new = eth_proto_new & eth_proto_cap; 2492 if (!eth_proto_new) { 2493 netdev_err(dev, "No supported speed requested\n"); 2494 return -EINVAL; 2495 } 2496 2497 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 2498 eth_proto_new, autoneg); 2499 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2500 if (err) 2501 return err; 2502 2503 if (!netif_running(dev)) 2504 return 0; 2505 2506 mlxsw_sp_port->link.autoneg = autoneg; 2507 2508 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 2509 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 2510 2511 return 0; 2512 } 2513 2514 static int mlxsw_sp_flash_device(struct net_device *dev, 2515 struct ethtool_flash *flash) 2516 { 2517 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2518 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2519 const struct firmware *firmware; 2520 int err; 2521 2522 if (flash->region != ETHTOOL_FLASH_ALL_REGIONS) 2523 return -EOPNOTSUPP; 2524 2525 dev_hold(dev); 2526 rtnl_unlock(); 2527 2528 err = request_firmware_direct(&firmware, flash->data, &dev->dev); 2529 if (err) 2530 goto out; 2531 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware); 2532 release_firmware(firmware); 2533 out: 2534 rtnl_lock(); 2535 dev_put(dev); 2536 return err; 2537 } 2538 2539 #define MLXSW_SP_I2C_ADDR_LOW 0x50 2540 #define MLXSW_SP_I2C_ADDR_HIGH 0x51 2541 #define MLXSW_SP_EEPROM_PAGE_LENGTH 256 2542 2543 static int mlxsw_sp_query_module_eeprom(struct mlxsw_sp_port *mlxsw_sp_port, 2544 u16 offset, u16 size, void *data, 2545 unsigned int *p_read_size) 2546 { 2547 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2548 char eeprom_tmp[MLXSW_SP_REG_MCIA_EEPROM_SIZE]; 2549 char mcia_pl[MLXSW_REG_MCIA_LEN]; 2550 u16 i2c_addr; 2551 int status; 2552 int err; 2553 2554 size = min_t(u16, size, MLXSW_SP_REG_MCIA_EEPROM_SIZE); 2555 2556 if (offset < MLXSW_SP_EEPROM_PAGE_LENGTH && 2557 offset + size > MLXSW_SP_EEPROM_PAGE_LENGTH) 2558 /* Cross pages read, read until offset 256 in low page */ 2559 size = MLXSW_SP_EEPROM_PAGE_LENGTH - offset; 2560 2561 i2c_addr = MLXSW_SP_I2C_ADDR_LOW; 2562 if (offset >= MLXSW_SP_EEPROM_PAGE_LENGTH) { 2563 i2c_addr = MLXSW_SP_I2C_ADDR_HIGH; 2564 offset -= MLXSW_SP_EEPROM_PAGE_LENGTH; 2565 } 2566 2567 mlxsw_reg_mcia_pack(mcia_pl, mlxsw_sp_port->mapping.module, 2568 0, 0, offset, size, i2c_addr); 2569 2570 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcia), mcia_pl); 2571 if (err) 2572 return err; 2573 2574 status = mlxsw_reg_mcia_status_get(mcia_pl); 2575 if (status) 2576 return -EIO; 2577 2578 mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp); 2579 memcpy(data, eeprom_tmp, size); 2580 *p_read_size = size; 2581 2582 return 0; 2583 } 2584 2585 enum mlxsw_sp_eeprom_module_info_rev_id { 2586 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_UNSPC = 0x00, 2587 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8436 = 0x01, 2588 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636 = 0x03, 2589 }; 2590 2591 enum mlxsw_sp_eeprom_module_info_id { 2592 MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP = 0x03, 2593 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP = 0x0C, 2594 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS = 0x0D, 2595 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 = 0x11, 2596 }; 2597 2598 enum mlxsw_sp_eeprom_module_info { 2599 MLXSW_SP_EEPROM_MODULE_INFO_ID, 2600 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID, 2601 MLXSW_SP_EEPROM_MODULE_INFO_SIZE, 2602 }; 2603 2604 static int mlxsw_sp_get_module_info(struct net_device *netdev, 2605 struct ethtool_modinfo *modinfo) 2606 { 2607 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 2608 u8 module_info[MLXSW_SP_EEPROM_MODULE_INFO_SIZE]; 2609 u8 module_rev_id, module_id; 2610 unsigned int read_size; 2611 int err; 2612 2613 err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, 0, 2614 MLXSW_SP_EEPROM_MODULE_INFO_SIZE, 2615 module_info, &read_size); 2616 if (err) 2617 return err; 2618 2619 if (read_size < MLXSW_SP_EEPROM_MODULE_INFO_SIZE) 2620 return -EIO; 2621 2622 module_rev_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_REV_ID]; 2623 module_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_ID]; 2624 2625 switch (module_id) { 2626 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP: 2627 modinfo->type = ETH_MODULE_SFF_8436; 2628 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 2629 break; 2630 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS: 2631 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28: 2632 if (module_id == MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 || 2633 module_rev_id >= MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636) { 2634 modinfo->type = ETH_MODULE_SFF_8636; 2635 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; 2636 } else { 2637 modinfo->type = ETH_MODULE_SFF_8436; 2638 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 2639 } 2640 break; 2641 case MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP: 2642 modinfo->type = ETH_MODULE_SFF_8472; 2643 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 2644 break; 2645 default: 2646 return -EINVAL; 2647 } 2648 2649 return 0; 2650 } 2651 2652 static int mlxsw_sp_get_module_eeprom(struct net_device *netdev, 2653 struct ethtool_eeprom *ee, 2654 u8 *data) 2655 { 2656 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 2657 int offset = ee->offset; 2658 unsigned int read_size; 2659 int i = 0; 2660 int err; 2661 2662 if (!ee->len) 2663 return -EINVAL; 2664 2665 memset(data, 0, ee->len); 2666 2667 while (i < ee->len) { 2668 err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, offset, 2669 ee->len - i, data + i, 2670 &read_size); 2671 if (err) { 2672 netdev_err(mlxsw_sp_port->dev, "Eeprom query failed\n"); 2673 return err; 2674 } 2675 2676 i += read_size; 2677 offset += read_size; 2678 } 2679 2680 return 0; 2681 } 2682 2683 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 2684 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 2685 .get_link = ethtool_op_get_link, 2686 .get_pauseparam = mlxsw_sp_port_get_pauseparam, 2687 .set_pauseparam = mlxsw_sp_port_set_pauseparam, 2688 .get_strings = mlxsw_sp_port_get_strings, 2689 .set_phys_id = mlxsw_sp_port_set_phys_id, 2690 .get_ethtool_stats = mlxsw_sp_port_get_stats, 2691 .get_sset_count = mlxsw_sp_port_get_sset_count, 2692 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings, 2693 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings, 2694 .flash_device = mlxsw_sp_flash_device, 2695 .get_module_info = mlxsw_sp_get_module_info, 2696 .get_module_eeprom = mlxsw_sp_get_module_eeprom, 2697 }; 2698 2699 static int 2700 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width) 2701 { 2702 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2703 u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width; 2704 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2705 u32 eth_proto_admin; 2706 2707 eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed); 2708 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 2709 eth_proto_admin, mlxsw_sp_port->link.autoneg); 2710 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2711 } 2712 2713 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 2714 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 2715 bool dwrr, u8 dwrr_weight) 2716 { 2717 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2718 char qeec_pl[MLXSW_REG_QEEC_LEN]; 2719 2720 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 2721 next_index); 2722 mlxsw_reg_qeec_de_set(qeec_pl, true); 2723 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 2724 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 2725 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 2726 } 2727 2728 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 2729 enum mlxsw_reg_qeec_hr hr, u8 index, 2730 u8 next_index, u32 maxrate) 2731 { 2732 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2733 char qeec_pl[MLXSW_REG_QEEC_LEN]; 2734 2735 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 2736 next_index); 2737 mlxsw_reg_qeec_mase_set(qeec_pl, true); 2738 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 2739 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 2740 } 2741 2742 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 2743 u8 switch_prio, u8 tclass) 2744 { 2745 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2746 char qtct_pl[MLXSW_REG_QTCT_LEN]; 2747 2748 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 2749 tclass); 2750 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 2751 } 2752 2753 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 2754 { 2755 int err, i; 2756 2757 /* Setup the elements hierarcy, so that each TC is linked to 2758 * one subgroup, which are all member in the same group. 2759 */ 2760 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2761 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false, 2762 0); 2763 if (err) 2764 return err; 2765 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2766 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2767 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i, 2768 0, false, 0); 2769 if (err) 2770 return err; 2771 } 2772 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2773 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2774 MLXSW_REG_QEEC_HIERARCY_TC, i, i, 2775 false, 0); 2776 if (err) 2777 return err; 2778 2779 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2780 MLXSW_REG_QEEC_HIERARCY_TC, 2781 i + 8, i, 2782 false, 0); 2783 if (err) 2784 return err; 2785 } 2786 2787 /* Make sure the max shaper is disabled in all hierarchies that 2788 * support it. 2789 */ 2790 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2791 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0, 2792 MLXSW_REG_QEEC_MAS_DIS); 2793 if (err) 2794 return err; 2795 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2796 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2797 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, 2798 i, 0, 2799 MLXSW_REG_QEEC_MAS_DIS); 2800 if (err) 2801 return err; 2802 } 2803 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2804 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2805 MLXSW_REG_QEEC_HIERARCY_TC, 2806 i, i, 2807 MLXSW_REG_QEEC_MAS_DIS); 2808 if (err) 2809 return err; 2810 2811 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2812 MLXSW_REG_QEEC_HIERARCY_TC, 2813 i + 8, i, 2814 MLXSW_REG_QEEC_MAS_DIS); 2815 if (err) 2816 return err; 2817 } 2818 2819 /* Map all priorities to traffic class 0. */ 2820 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2821 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 2822 if (err) 2823 return err; 2824 } 2825 2826 return 0; 2827 } 2828 2829 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 2830 bool enable) 2831 { 2832 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2833 char qtctm_pl[MLXSW_REG_QTCTM_LEN]; 2834 2835 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable); 2836 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl); 2837 } 2838 2839 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 2840 bool split, u8 module, u8 width, u8 lane) 2841 { 2842 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 2843 struct mlxsw_sp_port *mlxsw_sp_port; 2844 struct net_device *dev; 2845 int err; 2846 2847 err = mlxsw_core_port_init(mlxsw_sp->core, local_port); 2848 if (err) { 2849 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 2850 local_port); 2851 return err; 2852 } 2853 2854 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 2855 if (!dev) { 2856 err = -ENOMEM; 2857 goto err_alloc_etherdev; 2858 } 2859 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 2860 mlxsw_sp_port = netdev_priv(dev); 2861 mlxsw_sp_port->dev = dev; 2862 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 2863 mlxsw_sp_port->local_port = local_port; 2864 mlxsw_sp_port->pvid = 1; 2865 mlxsw_sp_port->split = split; 2866 mlxsw_sp_port->mapping.module = module; 2867 mlxsw_sp_port->mapping.width = width; 2868 mlxsw_sp_port->mapping.lane = lane; 2869 mlxsw_sp_port->link.autoneg = 1; 2870 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 2871 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list); 2872 2873 mlxsw_sp_port->pcpu_stats = 2874 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 2875 if (!mlxsw_sp_port->pcpu_stats) { 2876 err = -ENOMEM; 2877 goto err_alloc_stats; 2878 } 2879 2880 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample), 2881 GFP_KERNEL); 2882 if (!mlxsw_sp_port->sample) { 2883 err = -ENOMEM; 2884 goto err_alloc_sample; 2885 } 2886 2887 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 2888 &update_stats_cache); 2889 2890 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 2891 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 2892 2893 err = mlxsw_sp_port_module_map(mlxsw_sp_port, module, width, lane); 2894 if (err) { 2895 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 2896 mlxsw_sp_port->local_port); 2897 goto err_port_module_map; 2898 } 2899 2900 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 2901 if (err) { 2902 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 2903 mlxsw_sp_port->local_port); 2904 goto err_port_swid_set; 2905 } 2906 2907 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 2908 if (err) { 2909 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 2910 mlxsw_sp_port->local_port); 2911 goto err_dev_addr_init; 2912 } 2913 2914 netif_carrier_off(dev); 2915 2916 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 2917 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 2918 dev->hw_features |= NETIF_F_HW_TC; 2919 2920 dev->min_mtu = 0; 2921 dev->max_mtu = ETH_MAX_MTU; 2922 2923 /* Each packet needs to have a Tx header (metadata) on top all other 2924 * headers. 2925 */ 2926 dev->needed_headroom = MLXSW_TXHDR_LEN; 2927 2928 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 2929 if (err) { 2930 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 2931 mlxsw_sp_port->local_port); 2932 goto err_port_system_port_mapping_set; 2933 } 2934 2935 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width); 2936 if (err) { 2937 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 2938 mlxsw_sp_port->local_port); 2939 goto err_port_speed_by_width_set; 2940 } 2941 2942 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 2943 if (err) { 2944 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 2945 mlxsw_sp_port->local_port); 2946 goto err_port_mtu_set; 2947 } 2948 2949 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 2950 if (err) 2951 goto err_port_admin_status_set; 2952 2953 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 2954 if (err) { 2955 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 2956 mlxsw_sp_port->local_port); 2957 goto err_port_buffers_init; 2958 } 2959 2960 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 2961 if (err) { 2962 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 2963 mlxsw_sp_port->local_port); 2964 goto err_port_ets_init; 2965 } 2966 2967 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true); 2968 if (err) { 2969 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n", 2970 mlxsw_sp_port->local_port); 2971 goto err_port_tc_mc_mode; 2972 } 2973 2974 /* ETS and buffers must be initialized before DCB. */ 2975 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 2976 if (err) { 2977 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 2978 mlxsw_sp_port->local_port); 2979 goto err_port_dcb_init; 2980 } 2981 2982 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 2983 if (err) { 2984 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 2985 mlxsw_sp_port->local_port); 2986 goto err_port_fids_init; 2987 } 2988 2989 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); 2990 if (err) { 2991 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", 2992 mlxsw_sp_port->local_port); 2993 goto err_port_qdiscs_init; 2994 } 2995 2996 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1); 2997 if (IS_ERR(mlxsw_sp_port_vlan)) { 2998 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 2999 mlxsw_sp_port->local_port); 3000 err = PTR_ERR(mlxsw_sp_port_vlan); 3001 goto err_port_vlan_get; 3002 } 3003 3004 mlxsw_sp_port_switchdev_init(mlxsw_sp_port); 3005 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 3006 err = register_netdev(dev); 3007 if (err) { 3008 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 3009 mlxsw_sp_port->local_port); 3010 goto err_register_netdev; 3011 } 3012 3013 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 3014 mlxsw_sp_port, dev, module + 1, 3015 mlxsw_sp_port->split, lane / width); 3016 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 3017 return 0; 3018 3019 err_register_netdev: 3020 mlxsw_sp->ports[local_port] = NULL; 3021 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 3022 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); 3023 err_port_vlan_get: 3024 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3025 err_port_qdiscs_init: 3026 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3027 err_port_fids_init: 3028 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3029 err_port_dcb_init: 3030 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3031 err_port_tc_mc_mode: 3032 err_port_ets_init: 3033 err_port_buffers_init: 3034 err_port_admin_status_set: 3035 err_port_mtu_set: 3036 err_port_speed_by_width_set: 3037 err_port_system_port_mapping_set: 3038 err_dev_addr_init: 3039 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3040 err_port_swid_set: 3041 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3042 err_port_module_map: 3043 kfree(mlxsw_sp_port->sample); 3044 err_alloc_sample: 3045 free_percpu(mlxsw_sp_port->pcpu_stats); 3046 err_alloc_stats: 3047 free_netdev(dev); 3048 err_alloc_etherdev: 3049 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3050 return err; 3051 } 3052 3053 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3054 { 3055 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3056 3057 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 3058 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 3059 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 3060 mlxsw_sp->ports[local_port] = NULL; 3061 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 3062 mlxsw_sp_port_vlan_flush(mlxsw_sp_port); 3063 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3064 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3065 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3066 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3067 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3068 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3069 kfree(mlxsw_sp_port->sample); 3070 free_percpu(mlxsw_sp_port->pcpu_stats); 3071 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 3072 free_netdev(mlxsw_sp_port->dev); 3073 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3074 } 3075 3076 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3077 { 3078 return mlxsw_sp->ports[local_port] != NULL; 3079 } 3080 3081 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 3082 { 3083 int i; 3084 3085 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 3086 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3087 mlxsw_sp_port_remove(mlxsw_sp, i); 3088 kfree(mlxsw_sp->port_to_module); 3089 kfree(mlxsw_sp->ports); 3090 } 3091 3092 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 3093 { 3094 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 3095 u8 module, width, lane; 3096 size_t alloc_size; 3097 int i; 3098 int err; 3099 3100 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 3101 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 3102 if (!mlxsw_sp->ports) 3103 return -ENOMEM; 3104 3105 mlxsw_sp->port_to_module = kmalloc_array(max_ports, sizeof(int), 3106 GFP_KERNEL); 3107 if (!mlxsw_sp->port_to_module) { 3108 err = -ENOMEM; 3109 goto err_port_to_module_alloc; 3110 } 3111 3112 for (i = 1; i < max_ports; i++) { 3113 /* Mark as invalid */ 3114 mlxsw_sp->port_to_module[i] = -1; 3115 3116 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, 3117 &width, &lane); 3118 if (err) 3119 goto err_port_module_info_get; 3120 if (!width) 3121 continue; 3122 mlxsw_sp->port_to_module[i] = module; 3123 err = mlxsw_sp_port_create(mlxsw_sp, i, false, 3124 module, width, lane); 3125 if (err) 3126 goto err_port_create; 3127 } 3128 return 0; 3129 3130 err_port_create: 3131 err_port_module_info_get: 3132 for (i--; i >= 1; i--) 3133 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3134 mlxsw_sp_port_remove(mlxsw_sp, i); 3135 kfree(mlxsw_sp->port_to_module); 3136 err_port_to_module_alloc: 3137 kfree(mlxsw_sp->ports); 3138 return err; 3139 } 3140 3141 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port) 3142 { 3143 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX; 3144 3145 return local_port - offset; 3146 } 3147 3148 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 3149 u8 module, unsigned int count) 3150 { 3151 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; 3152 int err, i; 3153 3154 for (i = 0; i < count; i++) { 3155 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true, 3156 module, width, i * width); 3157 if (err) 3158 goto err_port_create; 3159 } 3160 3161 return 0; 3162 3163 err_port_create: 3164 for (i--; i >= 0; i--) 3165 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) 3166 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 3167 return err; 3168 } 3169 3170 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 3171 u8 base_port, unsigned int count) 3172 { 3173 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH; 3174 int i; 3175 3176 /* Split by four means we need to re-create two ports, otherwise 3177 * only one. 3178 */ 3179 count = count / 2; 3180 3181 for (i = 0; i < count; i++) { 3182 local_port = base_port + i * 2; 3183 if (mlxsw_sp->port_to_module[local_port] < 0) 3184 continue; 3185 module = mlxsw_sp->port_to_module[local_port]; 3186 3187 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module, 3188 width, 0); 3189 } 3190 } 3191 3192 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 3193 unsigned int count, 3194 struct netlink_ext_ack *extack) 3195 { 3196 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3197 struct mlxsw_sp_port *mlxsw_sp_port; 3198 u8 module, cur_width, base_port; 3199 int i; 3200 int err; 3201 3202 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3203 if (!mlxsw_sp_port) { 3204 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 3205 local_port); 3206 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 3207 return -EINVAL; 3208 } 3209 3210 module = mlxsw_sp_port->mapping.module; 3211 cur_width = mlxsw_sp_port->mapping.width; 3212 3213 if (count != 2 && count != 4) { 3214 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); 3215 NL_SET_ERR_MSG_MOD(extack, "Port can only be split into 2 or 4 ports"); 3216 return -EINVAL; 3217 } 3218 3219 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { 3220 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); 3221 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further"); 3222 return -EINVAL; 3223 } 3224 3225 /* Make sure we have enough slave (even) ports for the split. */ 3226 if (count == 2) { 3227 base_port = local_port; 3228 if (mlxsw_sp->ports[base_port + 1]) { 3229 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 3230 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 3231 return -EINVAL; 3232 } 3233 } else { 3234 base_port = mlxsw_sp_cluster_base_port_get(local_port); 3235 if (mlxsw_sp->ports[base_port + 1] || 3236 mlxsw_sp->ports[base_port + 3]) { 3237 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 3238 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 3239 return -EINVAL; 3240 } 3241 } 3242 3243 for (i = 0; i < count; i++) 3244 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) 3245 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 3246 3247 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count); 3248 if (err) { 3249 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 3250 goto err_port_split_create; 3251 } 3252 3253 return 0; 3254 3255 err_port_split_create: 3256 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 3257 return err; 3258 } 3259 3260 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, 3261 struct netlink_ext_ack *extack) 3262 { 3263 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3264 struct mlxsw_sp_port *mlxsw_sp_port; 3265 u8 cur_width, base_port; 3266 unsigned int count; 3267 int i; 3268 3269 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3270 if (!mlxsw_sp_port) { 3271 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 3272 local_port); 3273 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 3274 return -EINVAL; 3275 } 3276 3277 if (!mlxsw_sp_port->split) { 3278 netdev_err(mlxsw_sp_port->dev, "Port was not split\n"); 3279 NL_SET_ERR_MSG_MOD(extack, "Port was not split"); 3280 return -EINVAL; 3281 } 3282 3283 cur_width = mlxsw_sp_port->mapping.width; 3284 count = cur_width == 1 ? 4 : 2; 3285 3286 base_port = mlxsw_sp_cluster_base_port_get(local_port); 3287 3288 /* Determine which ports to remove. */ 3289 if (count == 2 && local_port >= base_port + 2) 3290 base_port = base_port + 2; 3291 3292 for (i = 0; i < count; i++) 3293 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) 3294 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 3295 3296 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 3297 3298 return 0; 3299 } 3300 3301 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 3302 char *pude_pl, void *priv) 3303 { 3304 struct mlxsw_sp *mlxsw_sp = priv; 3305 struct mlxsw_sp_port *mlxsw_sp_port; 3306 enum mlxsw_reg_pude_oper_status status; 3307 u8 local_port; 3308 3309 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 3310 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3311 if (!mlxsw_sp_port) 3312 return; 3313 3314 status = mlxsw_reg_pude_oper_status_get(pude_pl); 3315 if (status == MLXSW_PORT_OPER_STATUS_UP) { 3316 netdev_info(mlxsw_sp_port->dev, "link up\n"); 3317 netif_carrier_on(mlxsw_sp_port->dev); 3318 } else { 3319 netdev_info(mlxsw_sp_port->dev, "link down\n"); 3320 netif_carrier_off(mlxsw_sp_port->dev); 3321 } 3322 } 3323 3324 static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 3325 u8 local_port, void *priv) 3326 { 3327 struct mlxsw_sp *mlxsw_sp = priv; 3328 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3329 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 3330 3331 if (unlikely(!mlxsw_sp_port)) { 3332 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 3333 local_port); 3334 return; 3335 } 3336 3337 skb->dev = mlxsw_sp_port->dev; 3338 3339 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 3340 u64_stats_update_begin(&pcpu_stats->syncp); 3341 pcpu_stats->rx_packets++; 3342 pcpu_stats->rx_bytes += skb->len; 3343 u64_stats_update_end(&pcpu_stats->syncp); 3344 3345 skb->protocol = eth_type_trans(skb, skb->dev); 3346 netif_receive_skb(skb); 3347 } 3348 3349 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, 3350 void *priv) 3351 { 3352 skb->offload_fwd_mark = 1; 3353 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 3354 } 3355 3356 static void mlxsw_sp_rx_listener_mr_mark_func(struct sk_buff *skb, 3357 u8 local_port, void *priv) 3358 { 3359 skb->offload_mr_fwd_mark = 1; 3360 skb->offload_fwd_mark = 1; 3361 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 3362 } 3363 3364 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port, 3365 void *priv) 3366 { 3367 struct mlxsw_sp *mlxsw_sp = priv; 3368 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3369 struct psample_group *psample_group; 3370 u32 size; 3371 3372 if (unlikely(!mlxsw_sp_port)) { 3373 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n", 3374 local_port); 3375 goto out; 3376 } 3377 if (unlikely(!mlxsw_sp_port->sample)) { 3378 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n", 3379 local_port); 3380 goto out; 3381 } 3382 3383 size = mlxsw_sp_port->sample->truncate ? 3384 mlxsw_sp_port->sample->trunc_size : skb->len; 3385 3386 rcu_read_lock(); 3387 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group); 3388 if (!psample_group) 3389 goto out_unlock; 3390 psample_sample_packet(psample_group, skb, size, 3391 mlxsw_sp_port->dev->ifindex, 0, 3392 mlxsw_sp_port->sample->rate); 3393 out_unlock: 3394 rcu_read_unlock(); 3395 out: 3396 consume_skb(skb); 3397 } 3398 3399 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 3400 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 3401 _is_ctrl, SP_##_trap_group, DISCARD) 3402 3403 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 3404 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 3405 _is_ctrl, SP_##_trap_group, DISCARD) 3406 3407 #define MLXSW_SP_RXL_MR_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 3408 MLXSW_RXL(mlxsw_sp_rx_listener_mr_mark_func, _trap_id, _action, \ 3409 _is_ctrl, SP_##_trap_group, DISCARD) 3410 3411 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 3412 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 3413 3414 static const struct mlxsw_listener mlxsw_sp_listener[] = { 3415 /* Events */ 3416 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 3417 /* L2 traps */ 3418 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true), 3419 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true), 3420 MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true), 3421 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false), 3422 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false), 3423 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false), 3424 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false), 3425 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false), 3426 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false), 3427 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false), 3428 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false), 3429 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false), 3430 MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD, 3431 false), 3432 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 3433 false), 3434 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD, 3435 false), 3436 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 3437 false), 3438 /* L3 traps */ 3439 MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3440 MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3441 MLXSW_SP_RXL_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3442 MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), 3443 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 3444 false), 3445 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false), 3446 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 3447 MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false), 3448 MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP, 3449 false), 3450 MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false), 3451 MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false), 3452 MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false), 3453 MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), 3454 MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false), 3455 MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false), 3456 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 3457 false), 3458 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 3459 false), 3460 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 3461 false), 3462 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 3463 false), 3464 MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false), 3465 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 3466 false), 3467 MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, HOST_MISS, false), 3468 MLXSW_SP_RXL_MARK(HOST_MISS_IPV6, TRAP_TO_CPU, HOST_MISS, false), 3469 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), 3470 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), 3471 MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3472 MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), 3473 MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), 3474 /* PKT Sample trap */ 3475 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, 3476 false, SP_IP2ME, DISCARD), 3477 /* ACL trap */ 3478 MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false), 3479 /* Multicast Router Traps */ 3480 MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false), 3481 MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false), 3482 MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false), 3483 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 3484 MLXSW_SP_RXL_MR_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 3485 }; 3486 3487 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 3488 { 3489 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 3490 enum mlxsw_reg_qpcr_ir_units ir_units; 3491 int max_cpu_policers; 3492 bool is_bytes; 3493 u8 burst_size; 3494 u32 rate; 3495 int i, err; 3496 3497 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 3498 return -EIO; 3499 3500 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 3501 3502 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 3503 for (i = 0; i < max_cpu_policers; i++) { 3504 is_bytes = false; 3505 switch (i) { 3506 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 3507 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 3508 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 3509 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 3510 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 3511 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 3512 rate = 128; 3513 burst_size = 7; 3514 break; 3515 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 3516 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 3517 rate = 16 * 1024; 3518 burst_size = 10; 3519 break; 3520 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 3521 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 3522 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 3523 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 3524 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 3525 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 3526 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 3527 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 3528 rate = 1024; 3529 burst_size = 7; 3530 break; 3531 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 3532 is_bytes = true; 3533 rate = 4 * 1024; 3534 burst_size = 4; 3535 break; 3536 default: 3537 continue; 3538 } 3539 3540 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 3541 burst_size); 3542 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 3543 if (err) 3544 return err; 3545 } 3546 3547 return 0; 3548 } 3549 3550 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 3551 { 3552 char htgt_pl[MLXSW_REG_HTGT_LEN]; 3553 enum mlxsw_reg_htgt_trap_group i; 3554 int max_cpu_policers; 3555 int max_trap_groups; 3556 u8 priority, tc; 3557 u16 policer_id; 3558 int err; 3559 3560 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 3561 return -EIO; 3562 3563 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 3564 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 3565 3566 for (i = 0; i < max_trap_groups; i++) { 3567 policer_id = i; 3568 switch (i) { 3569 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 3570 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 3571 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 3572 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 3573 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 3574 priority = 5; 3575 tc = 5; 3576 break; 3577 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 3578 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 3579 priority = 4; 3580 tc = 4; 3581 break; 3582 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 3583 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 3584 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 3585 priority = 3; 3586 tc = 3; 3587 break; 3588 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 3589 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 3590 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 3591 priority = 2; 3592 tc = 2; 3593 break; 3594 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 3595 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 3596 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 3597 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 3598 priority = 1; 3599 tc = 1; 3600 break; 3601 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 3602 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 3603 tc = MLXSW_REG_HTGT_DEFAULT_TC; 3604 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 3605 break; 3606 default: 3607 continue; 3608 } 3609 3610 if (max_cpu_policers <= policer_id && 3611 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 3612 return -EIO; 3613 3614 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 3615 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 3616 if (err) 3617 return err; 3618 } 3619 3620 return 0; 3621 } 3622 3623 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 3624 { 3625 int i; 3626 int err; 3627 3628 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 3629 if (err) 3630 return err; 3631 3632 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 3633 if (err) 3634 return err; 3635 3636 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) { 3637 err = mlxsw_core_trap_register(mlxsw_sp->core, 3638 &mlxsw_sp_listener[i], 3639 mlxsw_sp); 3640 if (err) 3641 goto err_listener_register; 3642 3643 } 3644 return 0; 3645 3646 err_listener_register: 3647 for (i--; i >= 0; i--) { 3648 mlxsw_core_trap_unregister(mlxsw_sp->core, 3649 &mlxsw_sp_listener[i], 3650 mlxsw_sp); 3651 } 3652 return err; 3653 } 3654 3655 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 3656 { 3657 int i; 3658 3659 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) { 3660 mlxsw_core_trap_unregister(mlxsw_sp->core, 3661 &mlxsw_sp_listener[i], 3662 mlxsw_sp); 3663 } 3664 } 3665 3666 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 3667 { 3668 char slcr_pl[MLXSW_REG_SLCR_LEN]; 3669 int err; 3670 3671 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 3672 MLXSW_REG_SLCR_LAG_HASH_DMAC | 3673 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 3674 MLXSW_REG_SLCR_LAG_HASH_VLANID | 3675 MLXSW_REG_SLCR_LAG_HASH_SIP | 3676 MLXSW_REG_SLCR_LAG_HASH_DIP | 3677 MLXSW_REG_SLCR_LAG_HASH_SPORT | 3678 MLXSW_REG_SLCR_LAG_HASH_DPORT | 3679 MLXSW_REG_SLCR_LAG_HASH_IPPROTO); 3680 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 3681 if (err) 3682 return err; 3683 3684 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 3685 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 3686 return -EIO; 3687 3688 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 3689 sizeof(struct mlxsw_sp_upper), 3690 GFP_KERNEL); 3691 if (!mlxsw_sp->lags) 3692 return -ENOMEM; 3693 3694 return 0; 3695 } 3696 3697 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 3698 { 3699 kfree(mlxsw_sp->lags); 3700 } 3701 3702 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) 3703 { 3704 char htgt_pl[MLXSW_REG_HTGT_LEN]; 3705 3706 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 3707 MLXSW_REG_HTGT_INVALID_POLICER, 3708 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 3709 MLXSW_REG_HTGT_DEFAULT_TC); 3710 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 3711 } 3712 3713 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 3714 unsigned long event, void *ptr); 3715 3716 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 3717 const struct mlxsw_bus_info *mlxsw_bus_info) 3718 { 3719 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3720 int err; 3721 3722 mlxsw_sp->core = mlxsw_core; 3723 mlxsw_sp->bus_info = mlxsw_bus_info; 3724 3725 err = mlxsw_sp_fw_rev_validate(mlxsw_sp); 3726 if (err) 3727 return err; 3728 3729 err = mlxsw_sp_base_mac_get(mlxsw_sp); 3730 if (err) { 3731 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 3732 return err; 3733 } 3734 3735 err = mlxsw_sp_kvdl_init(mlxsw_sp); 3736 if (err) { 3737 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 3738 return err; 3739 } 3740 3741 err = mlxsw_sp_fids_init(mlxsw_sp); 3742 if (err) { 3743 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 3744 goto err_fids_init; 3745 } 3746 3747 err = mlxsw_sp_traps_init(mlxsw_sp); 3748 if (err) { 3749 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 3750 goto err_traps_init; 3751 } 3752 3753 err = mlxsw_sp_buffers_init(mlxsw_sp); 3754 if (err) { 3755 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 3756 goto err_buffers_init; 3757 } 3758 3759 err = mlxsw_sp_lag_init(mlxsw_sp); 3760 if (err) { 3761 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 3762 goto err_lag_init; 3763 } 3764 3765 /* Initialize SPAN before router and switchdev, so that those components 3766 * can call mlxsw_sp_span_respin(). 3767 */ 3768 err = mlxsw_sp_span_init(mlxsw_sp); 3769 if (err) { 3770 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 3771 goto err_span_init; 3772 } 3773 3774 err = mlxsw_sp_switchdev_init(mlxsw_sp); 3775 if (err) { 3776 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 3777 goto err_switchdev_init; 3778 } 3779 3780 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 3781 if (err) { 3782 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 3783 goto err_counter_pool_init; 3784 } 3785 3786 err = mlxsw_sp_afa_init(mlxsw_sp); 3787 if (err) { 3788 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 3789 goto err_afa_init; 3790 } 3791 3792 err = mlxsw_sp_router_init(mlxsw_sp); 3793 if (err) { 3794 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 3795 goto err_router_init; 3796 } 3797 3798 /* Initialize netdevice notifier after router and SPAN is initialized, 3799 * so that the event handler can use router structures and call SPAN 3800 * respin. 3801 */ 3802 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 3803 err = register_netdevice_notifier(&mlxsw_sp->netdevice_nb); 3804 if (err) { 3805 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 3806 goto err_netdev_notifier; 3807 } 3808 3809 err = mlxsw_sp_acl_init(mlxsw_sp); 3810 if (err) { 3811 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 3812 goto err_acl_init; 3813 } 3814 3815 err = mlxsw_sp_dpipe_init(mlxsw_sp); 3816 if (err) { 3817 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 3818 goto err_dpipe_init; 3819 } 3820 3821 err = mlxsw_sp_ports_create(mlxsw_sp); 3822 if (err) { 3823 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 3824 goto err_ports_create; 3825 } 3826 3827 return 0; 3828 3829 err_ports_create: 3830 mlxsw_sp_dpipe_fini(mlxsw_sp); 3831 err_dpipe_init: 3832 mlxsw_sp_acl_fini(mlxsw_sp); 3833 err_acl_init: 3834 unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); 3835 err_netdev_notifier: 3836 mlxsw_sp_router_fini(mlxsw_sp); 3837 err_router_init: 3838 mlxsw_sp_afa_fini(mlxsw_sp); 3839 err_afa_init: 3840 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3841 err_counter_pool_init: 3842 mlxsw_sp_switchdev_fini(mlxsw_sp); 3843 err_switchdev_init: 3844 mlxsw_sp_span_fini(mlxsw_sp); 3845 err_span_init: 3846 mlxsw_sp_lag_fini(mlxsw_sp); 3847 err_lag_init: 3848 mlxsw_sp_buffers_fini(mlxsw_sp); 3849 err_buffers_init: 3850 mlxsw_sp_traps_fini(mlxsw_sp); 3851 err_traps_init: 3852 mlxsw_sp_fids_fini(mlxsw_sp); 3853 err_fids_init: 3854 mlxsw_sp_kvdl_fini(mlxsw_sp); 3855 return err; 3856 } 3857 3858 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, 3859 const struct mlxsw_bus_info *mlxsw_bus_info) 3860 { 3861 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3862 3863 mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev; 3864 mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME; 3865 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; 3866 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; 3867 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; 3868 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; 3869 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; 3870 3871 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info); 3872 } 3873 3874 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, 3875 const struct mlxsw_bus_info *mlxsw_bus_info) 3876 { 3877 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3878 3879 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 3880 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 3881 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 3882 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 3883 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 3884 3885 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info); 3886 } 3887 3888 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 3889 { 3890 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3891 3892 mlxsw_sp_ports_remove(mlxsw_sp); 3893 mlxsw_sp_dpipe_fini(mlxsw_sp); 3894 mlxsw_sp_acl_fini(mlxsw_sp); 3895 unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); 3896 mlxsw_sp_router_fini(mlxsw_sp); 3897 mlxsw_sp_afa_fini(mlxsw_sp); 3898 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3899 mlxsw_sp_switchdev_fini(mlxsw_sp); 3900 mlxsw_sp_span_fini(mlxsw_sp); 3901 mlxsw_sp_lag_fini(mlxsw_sp); 3902 mlxsw_sp_buffers_fini(mlxsw_sp); 3903 mlxsw_sp_traps_fini(mlxsw_sp); 3904 mlxsw_sp_fids_fini(mlxsw_sp); 3905 mlxsw_sp_kvdl_fini(mlxsw_sp); 3906 } 3907 3908 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { 3909 .used_max_mid = 1, 3910 .max_mid = MLXSW_SP_MID_MAX, 3911 .used_flood_tables = 1, 3912 .used_flood_mode = 1, 3913 .flood_mode = 3, 3914 .max_fid_offset_flood_tables = 3, 3915 .fid_offset_flood_table_size = VLAN_N_VID - 1, 3916 .max_fid_flood_tables = 3, 3917 .fid_flood_table_size = MLXSW_SP_FID_8021D_MAX, 3918 .used_max_ib_mc = 1, 3919 .max_ib_mc = 0, 3920 .used_max_pkey = 1, 3921 .max_pkey = 0, 3922 .used_kvd_sizes = 1, 3923 .kvd_hash_single_parts = 59, 3924 .kvd_hash_double_parts = 41, 3925 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 3926 .swid_config = { 3927 { 3928 .used_type = 1, 3929 .type = MLXSW_PORT_SWID_TYPE_ETH, 3930 } 3931 }, 3932 }; 3933 3934 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { 3935 .used_max_mid = 1, 3936 .max_mid = MLXSW_SP_MID_MAX, 3937 .used_flood_tables = 1, 3938 .used_flood_mode = 1, 3939 .flood_mode = 3, 3940 .max_fid_offset_flood_tables = 3, 3941 .fid_offset_flood_table_size = VLAN_N_VID - 1, 3942 .max_fid_flood_tables = 3, 3943 .fid_flood_table_size = MLXSW_SP_FID_8021D_MAX, 3944 .used_max_ib_mc = 1, 3945 .max_ib_mc = 0, 3946 .used_max_pkey = 1, 3947 .max_pkey = 0, 3948 .swid_config = { 3949 { 3950 .used_type = 1, 3951 .type = MLXSW_PORT_SWID_TYPE_ETH, 3952 } 3953 }, 3954 }; 3955 3956 static void 3957 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, 3958 struct devlink_resource_size_params *kvd_size_params, 3959 struct devlink_resource_size_params *linear_size_params, 3960 struct devlink_resource_size_params *hash_double_size_params, 3961 struct devlink_resource_size_params *hash_single_size_params) 3962 { 3963 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 3964 KVD_SINGLE_MIN_SIZE); 3965 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 3966 KVD_DOUBLE_MIN_SIZE); 3967 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3968 u32 linear_size_min = 0; 3969 3970 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, 3971 MLXSW_SP_KVD_GRANULARITY, 3972 DEVLINK_RESOURCE_UNIT_ENTRY); 3973 devlink_resource_size_params_init(linear_size_params, linear_size_min, 3974 kvd_size - single_size_min - 3975 double_size_min, 3976 MLXSW_SP_KVD_GRANULARITY, 3977 DEVLINK_RESOURCE_UNIT_ENTRY); 3978 devlink_resource_size_params_init(hash_double_size_params, 3979 double_size_min, 3980 kvd_size - single_size_min - 3981 linear_size_min, 3982 MLXSW_SP_KVD_GRANULARITY, 3983 DEVLINK_RESOURCE_UNIT_ENTRY); 3984 devlink_resource_size_params_init(hash_single_size_params, 3985 single_size_min, 3986 kvd_size - double_size_min - 3987 linear_size_min, 3988 MLXSW_SP_KVD_GRANULARITY, 3989 DEVLINK_RESOURCE_UNIT_ENTRY); 3990 } 3991 3992 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) 3993 { 3994 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3995 struct devlink_resource_size_params hash_single_size_params; 3996 struct devlink_resource_size_params hash_double_size_params; 3997 struct devlink_resource_size_params linear_size_params; 3998 struct devlink_resource_size_params kvd_size_params; 3999 u32 kvd_size, single_size, double_size, linear_size; 4000 const struct mlxsw_config_profile *profile; 4001 int err; 4002 4003 profile = &mlxsw_sp1_config_profile; 4004 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 4005 return -EIO; 4006 4007 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, 4008 &linear_size_params, 4009 &hash_double_size_params, 4010 &hash_single_size_params); 4011 4012 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 4013 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 4014 kvd_size, MLXSW_SP_RESOURCE_KVD, 4015 DEVLINK_RESOURCE_ID_PARENT_TOP, 4016 &kvd_size_params); 4017 if (err) 4018 return err; 4019 4020 linear_size = profile->kvd_linear_size; 4021 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, 4022 linear_size, 4023 MLXSW_SP_RESOURCE_KVD_LINEAR, 4024 MLXSW_SP_RESOURCE_KVD, 4025 &linear_size_params); 4026 if (err) 4027 return err; 4028 4029 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); 4030 if (err) 4031 return err; 4032 4033 double_size = kvd_size - linear_size; 4034 double_size *= profile->kvd_hash_double_parts; 4035 double_size /= profile->kvd_hash_double_parts + 4036 profile->kvd_hash_single_parts; 4037 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY); 4038 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, 4039 double_size, 4040 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 4041 MLXSW_SP_RESOURCE_KVD, 4042 &hash_double_size_params); 4043 if (err) 4044 return err; 4045 4046 single_size = kvd_size - double_size - linear_size; 4047 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, 4048 single_size, 4049 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 4050 MLXSW_SP_RESOURCE_KVD, 4051 &hash_single_size_params); 4052 if (err) 4053 return err; 4054 4055 return 0; 4056 } 4057 4058 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) 4059 { 4060 return mlxsw_sp1_resources_kvd_register(mlxsw_core); 4061 } 4062 4063 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) 4064 { 4065 return 0; 4066 } 4067 4068 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 4069 const struct mlxsw_config_profile *profile, 4070 u64 *p_single_size, u64 *p_double_size, 4071 u64 *p_linear_size) 4072 { 4073 struct devlink *devlink = priv_to_devlink(mlxsw_core); 4074 u32 double_size; 4075 int err; 4076 4077 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 4078 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) 4079 return -EIO; 4080 4081 /* The hash part is what left of the kvd without the 4082 * linear part. It is split to the single size and 4083 * double size by the parts ratio from the profile. 4084 * Both sizes must be a multiplications of the 4085 * granularity from the profile. In case the user 4086 * provided the sizes they are obtained via devlink. 4087 */ 4088 err = devlink_resource_size_get(devlink, 4089 MLXSW_SP_RESOURCE_KVD_LINEAR, 4090 p_linear_size); 4091 if (err) 4092 *p_linear_size = profile->kvd_linear_size; 4093 4094 err = devlink_resource_size_get(devlink, 4095 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 4096 p_double_size); 4097 if (err) { 4098 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 4099 *p_linear_size; 4100 double_size *= profile->kvd_hash_double_parts; 4101 double_size /= profile->kvd_hash_double_parts + 4102 profile->kvd_hash_single_parts; 4103 *p_double_size = rounddown(double_size, 4104 MLXSW_SP_KVD_GRANULARITY); 4105 } 4106 4107 err = devlink_resource_size_get(devlink, 4108 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 4109 p_single_size); 4110 if (err) 4111 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 4112 *p_double_size - *p_linear_size; 4113 4114 /* Check results are legal. */ 4115 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 4116 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || 4117 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size) 4118 return -EIO; 4119 4120 return 0; 4121 } 4122 4123 static struct mlxsw_driver mlxsw_sp1_driver = { 4124 .kind = mlxsw_sp1_driver_name, 4125 .priv_size = sizeof(struct mlxsw_sp), 4126 .init = mlxsw_sp1_init, 4127 .fini = mlxsw_sp_fini, 4128 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 4129 .port_split = mlxsw_sp_port_split, 4130 .port_unsplit = mlxsw_sp_port_unsplit, 4131 .sb_pool_get = mlxsw_sp_sb_pool_get, 4132 .sb_pool_set = mlxsw_sp_sb_pool_set, 4133 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 4134 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 4135 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 4136 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 4137 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 4138 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 4139 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 4140 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 4141 .txhdr_construct = mlxsw_sp_txhdr_construct, 4142 .resources_register = mlxsw_sp1_resources_register, 4143 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, 4144 .txhdr_len = MLXSW_TXHDR_LEN, 4145 .profile = &mlxsw_sp1_config_profile, 4146 .res_query_enabled = true, 4147 }; 4148 4149 static struct mlxsw_driver mlxsw_sp2_driver = { 4150 .kind = mlxsw_sp2_driver_name, 4151 .priv_size = sizeof(struct mlxsw_sp), 4152 .init = mlxsw_sp2_init, 4153 .fini = mlxsw_sp_fini, 4154 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 4155 .port_split = mlxsw_sp_port_split, 4156 .port_unsplit = mlxsw_sp_port_unsplit, 4157 .sb_pool_get = mlxsw_sp_sb_pool_get, 4158 .sb_pool_set = mlxsw_sp_sb_pool_set, 4159 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 4160 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 4161 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 4162 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 4163 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 4164 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 4165 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 4166 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 4167 .txhdr_construct = mlxsw_sp_txhdr_construct, 4168 .resources_register = mlxsw_sp2_resources_register, 4169 .txhdr_len = MLXSW_TXHDR_LEN, 4170 .profile = &mlxsw_sp2_config_profile, 4171 .res_query_enabled = true, 4172 }; 4173 4174 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 4175 { 4176 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 4177 } 4178 4179 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data) 4180 { 4181 struct mlxsw_sp_port **p_mlxsw_sp_port = data; 4182 int ret = 0; 4183 4184 if (mlxsw_sp_port_dev_check(lower_dev)) { 4185 *p_mlxsw_sp_port = netdev_priv(lower_dev); 4186 ret = 1; 4187 } 4188 4189 return ret; 4190 } 4191 4192 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 4193 { 4194 struct mlxsw_sp_port *mlxsw_sp_port; 4195 4196 if (mlxsw_sp_port_dev_check(dev)) 4197 return netdev_priv(dev); 4198 4199 mlxsw_sp_port = NULL; 4200 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port); 4201 4202 return mlxsw_sp_port; 4203 } 4204 4205 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 4206 { 4207 struct mlxsw_sp_port *mlxsw_sp_port; 4208 4209 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 4210 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 4211 } 4212 4213 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 4214 { 4215 struct mlxsw_sp_port *mlxsw_sp_port; 4216 4217 if (mlxsw_sp_port_dev_check(dev)) 4218 return netdev_priv(dev); 4219 4220 mlxsw_sp_port = NULL; 4221 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 4222 &mlxsw_sp_port); 4223 4224 return mlxsw_sp_port; 4225 } 4226 4227 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 4228 { 4229 struct mlxsw_sp_port *mlxsw_sp_port; 4230 4231 rcu_read_lock(); 4232 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 4233 if (mlxsw_sp_port) 4234 dev_hold(mlxsw_sp_port->dev); 4235 rcu_read_unlock(); 4236 return mlxsw_sp_port; 4237 } 4238 4239 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 4240 { 4241 dev_put(mlxsw_sp_port->dev); 4242 } 4243 4244 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 4245 { 4246 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4247 4248 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 4249 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4250 } 4251 4252 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 4253 { 4254 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4255 4256 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 4257 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4258 } 4259 4260 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 4261 u16 lag_id, u8 port_index) 4262 { 4263 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4264 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4265 4266 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 4267 lag_id, port_index); 4268 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4269 } 4270 4271 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 4272 u16 lag_id) 4273 { 4274 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4275 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4276 4277 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 4278 lag_id); 4279 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4280 } 4281 4282 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 4283 u16 lag_id) 4284 { 4285 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4286 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4287 4288 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 4289 lag_id); 4290 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4291 } 4292 4293 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 4294 u16 lag_id) 4295 { 4296 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4297 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4298 4299 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 4300 lag_id); 4301 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4302 } 4303 4304 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 4305 struct net_device *lag_dev, 4306 u16 *p_lag_id) 4307 { 4308 struct mlxsw_sp_upper *lag; 4309 int free_lag_id = -1; 4310 u64 max_lag; 4311 int i; 4312 4313 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 4314 for (i = 0; i < max_lag; i++) { 4315 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 4316 if (lag->ref_count) { 4317 if (lag->dev == lag_dev) { 4318 *p_lag_id = i; 4319 return 0; 4320 } 4321 } else if (free_lag_id < 0) { 4322 free_lag_id = i; 4323 } 4324 } 4325 if (free_lag_id < 0) 4326 return -EBUSY; 4327 *p_lag_id = free_lag_id; 4328 return 0; 4329 } 4330 4331 static bool 4332 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 4333 struct net_device *lag_dev, 4334 struct netdev_lag_upper_info *lag_upper_info, 4335 struct netlink_ext_ack *extack) 4336 { 4337 u16 lag_id; 4338 4339 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 4340 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices"); 4341 return false; 4342 } 4343 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 4344 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 4345 return false; 4346 } 4347 return true; 4348 } 4349 4350 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 4351 u16 lag_id, u8 *p_port_index) 4352 { 4353 u64 max_lag_members; 4354 int i; 4355 4356 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 4357 MAX_LAG_MEMBERS); 4358 for (i = 0; i < max_lag_members; i++) { 4359 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 4360 *p_port_index = i; 4361 return 0; 4362 } 4363 } 4364 return -EBUSY; 4365 } 4366 4367 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 4368 struct net_device *lag_dev) 4369 { 4370 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4371 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 4372 struct mlxsw_sp_upper *lag; 4373 u16 lag_id; 4374 u8 port_index; 4375 int err; 4376 4377 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 4378 if (err) 4379 return err; 4380 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 4381 if (!lag->ref_count) { 4382 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 4383 if (err) 4384 return err; 4385 lag->dev = lag_dev; 4386 } 4387 4388 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 4389 if (err) 4390 return err; 4391 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 4392 if (err) 4393 goto err_col_port_add; 4394 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id); 4395 if (err) 4396 goto err_col_port_enable; 4397 4398 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 4399 mlxsw_sp_port->local_port); 4400 mlxsw_sp_port->lag_id = lag_id; 4401 mlxsw_sp_port->lagged = 1; 4402 lag->ref_count++; 4403 4404 /* Port is no longer usable as a router interface */ 4405 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1); 4406 if (mlxsw_sp_port_vlan->fid) 4407 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 4408 4409 return 0; 4410 4411 err_col_port_enable: 4412 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4413 err_col_port_add: 4414 if (!lag->ref_count) 4415 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 4416 return err; 4417 } 4418 4419 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 4420 struct net_device *lag_dev) 4421 { 4422 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4423 u16 lag_id = mlxsw_sp_port->lag_id; 4424 struct mlxsw_sp_upper *lag; 4425 4426 if (!mlxsw_sp_port->lagged) 4427 return; 4428 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 4429 WARN_ON(lag->ref_count == 0); 4430 4431 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id); 4432 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4433 4434 /* Any VLANs configured on the port are no longer valid */ 4435 mlxsw_sp_port_vlan_flush(mlxsw_sp_port); 4436 4437 if (lag->ref_count == 1) 4438 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 4439 4440 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 4441 mlxsw_sp_port->local_port); 4442 mlxsw_sp_port->lagged = 0; 4443 lag->ref_count--; 4444 4445 mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1); 4446 /* Make sure untagged frames are allowed to ingress */ 4447 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1); 4448 } 4449 4450 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 4451 u16 lag_id) 4452 { 4453 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4454 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4455 4456 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 4457 mlxsw_sp_port->local_port); 4458 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4459 } 4460 4461 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 4462 u16 lag_id) 4463 { 4464 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4465 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4466 4467 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 4468 mlxsw_sp_port->local_port); 4469 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4470 } 4471 4472 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port, 4473 bool lag_tx_enabled) 4474 { 4475 if (lag_tx_enabled) 4476 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, 4477 mlxsw_sp_port->lag_id); 4478 else 4479 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 4480 mlxsw_sp_port->lag_id); 4481 } 4482 4483 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 4484 struct netdev_lag_lower_state_info *info) 4485 { 4486 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled); 4487 } 4488 4489 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 4490 bool enable) 4491 { 4492 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4493 enum mlxsw_reg_spms_state spms_state; 4494 char *spms_pl; 4495 u16 vid; 4496 int err; 4497 4498 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 4499 MLXSW_REG_SPMS_STATE_DISCARDING; 4500 4501 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 4502 if (!spms_pl) 4503 return -ENOMEM; 4504 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 4505 4506 for (vid = 0; vid < VLAN_N_VID; vid++) 4507 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 4508 4509 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 4510 kfree(spms_pl); 4511 return err; 4512 } 4513 4514 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 4515 { 4516 u16 vid = 1; 4517 int err; 4518 4519 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 4520 if (err) 4521 return err; 4522 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 4523 if (err) 4524 goto err_port_stp_set; 4525 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1, 4526 true, false); 4527 if (err) 4528 goto err_port_vlan_set; 4529 4530 for (; vid <= VLAN_N_VID - 1; vid++) { 4531 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4532 vid, false); 4533 if (err) 4534 goto err_vid_learning_set; 4535 } 4536 4537 return 0; 4538 4539 err_vid_learning_set: 4540 for (vid--; vid >= 1; vid--) 4541 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 4542 err_port_vlan_set: 4543 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4544 err_port_stp_set: 4545 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4546 return err; 4547 } 4548 4549 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 4550 { 4551 u16 vid; 4552 4553 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 4554 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4555 vid, true); 4556 4557 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1, 4558 false, false); 4559 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4560 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4561 } 4562 4563 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 4564 struct net_device *dev, 4565 unsigned long event, void *ptr) 4566 { 4567 struct netdev_notifier_changeupper_info *info; 4568 struct mlxsw_sp_port *mlxsw_sp_port; 4569 struct netlink_ext_ack *extack; 4570 struct net_device *upper_dev; 4571 struct mlxsw_sp *mlxsw_sp; 4572 int err = 0; 4573 4574 mlxsw_sp_port = netdev_priv(dev); 4575 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4576 info = ptr; 4577 extack = netdev_notifier_info_to_extack(&info->info); 4578 4579 switch (event) { 4580 case NETDEV_PRECHANGEUPPER: 4581 upper_dev = info->upper_dev; 4582 if (!is_vlan_dev(upper_dev) && 4583 !netif_is_lag_master(upper_dev) && 4584 !netif_is_bridge_master(upper_dev) && 4585 !netif_is_ovs_master(upper_dev) && 4586 !netif_is_macvlan(upper_dev)) { 4587 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4588 return -EINVAL; 4589 } 4590 if (!info->linking) 4591 break; 4592 if (netdev_has_any_upper_dev(upper_dev) && 4593 (!netif_is_bridge_master(upper_dev) || 4594 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 4595 upper_dev))) { 4596 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 4597 return -EINVAL; 4598 } 4599 if (netif_is_lag_master(upper_dev) && 4600 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 4601 info->upper_info, extack)) 4602 return -EINVAL; 4603 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 4604 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); 4605 return -EINVAL; 4606 } 4607 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 4608 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 4609 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); 4610 return -EINVAL; 4611 } 4612 if (netif_is_macvlan(upper_dev) && 4613 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, lower_dev)) { 4614 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4615 return -EOPNOTSUPP; 4616 } 4617 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 4618 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); 4619 return -EINVAL; 4620 } 4621 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 4622 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 4623 return -EINVAL; 4624 } 4625 if (is_vlan_dev(upper_dev) && 4626 vlan_dev_vlan_id(upper_dev) == 1) { 4627 NL_SET_ERR_MSG_MOD(extack, "Creating a VLAN device with VID 1 is unsupported: VLAN 1 carries untagged traffic"); 4628 return -EINVAL; 4629 } 4630 break; 4631 case NETDEV_CHANGEUPPER: 4632 upper_dev = info->upper_dev; 4633 if (netif_is_bridge_master(upper_dev)) { 4634 if (info->linking) 4635 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4636 lower_dev, 4637 upper_dev, 4638 extack); 4639 else 4640 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4641 lower_dev, 4642 upper_dev); 4643 } else if (netif_is_lag_master(upper_dev)) { 4644 if (info->linking) 4645 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 4646 upper_dev); 4647 else 4648 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 4649 upper_dev); 4650 } else if (netif_is_ovs_master(upper_dev)) { 4651 if (info->linking) 4652 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 4653 else 4654 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 4655 } else if (netif_is_macvlan(upper_dev)) { 4656 if (!info->linking) 4657 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4658 } 4659 break; 4660 } 4661 4662 return err; 4663 } 4664 4665 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 4666 unsigned long event, void *ptr) 4667 { 4668 struct netdev_notifier_changelowerstate_info *info; 4669 struct mlxsw_sp_port *mlxsw_sp_port; 4670 int err; 4671 4672 mlxsw_sp_port = netdev_priv(dev); 4673 info = ptr; 4674 4675 switch (event) { 4676 case NETDEV_CHANGELOWERSTATE: 4677 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 4678 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 4679 info->lower_state_info); 4680 if (err) 4681 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 4682 } 4683 break; 4684 } 4685 4686 return 0; 4687 } 4688 4689 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 4690 struct net_device *port_dev, 4691 unsigned long event, void *ptr) 4692 { 4693 switch (event) { 4694 case NETDEV_PRECHANGEUPPER: 4695 case NETDEV_CHANGEUPPER: 4696 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 4697 event, ptr); 4698 case NETDEV_CHANGELOWERSTATE: 4699 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 4700 ptr); 4701 } 4702 4703 return 0; 4704 } 4705 4706 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 4707 unsigned long event, void *ptr) 4708 { 4709 struct net_device *dev; 4710 struct list_head *iter; 4711 int ret; 4712 4713 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4714 if (mlxsw_sp_port_dev_check(dev)) { 4715 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 4716 ptr); 4717 if (ret) 4718 return ret; 4719 } 4720 } 4721 4722 return 0; 4723 } 4724 4725 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 4726 struct net_device *dev, 4727 unsigned long event, void *ptr, 4728 u16 vid) 4729 { 4730 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 4731 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4732 struct netdev_notifier_changeupper_info *info = ptr; 4733 struct netlink_ext_ack *extack; 4734 struct net_device *upper_dev; 4735 int err = 0; 4736 4737 extack = netdev_notifier_info_to_extack(&info->info); 4738 4739 switch (event) { 4740 case NETDEV_PRECHANGEUPPER: 4741 upper_dev = info->upper_dev; 4742 if (!netif_is_bridge_master(upper_dev) && 4743 !netif_is_macvlan(upper_dev)) { 4744 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4745 return -EINVAL; 4746 } 4747 if (!info->linking) 4748 break; 4749 if (netdev_has_any_upper_dev(upper_dev) && 4750 (!netif_is_bridge_master(upper_dev) || 4751 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 4752 upper_dev))) { 4753 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 4754 return -EINVAL; 4755 } 4756 if (netif_is_macvlan(upper_dev) && 4757 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) { 4758 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4759 return -EOPNOTSUPP; 4760 } 4761 break; 4762 case NETDEV_CHANGEUPPER: 4763 upper_dev = info->upper_dev; 4764 if (netif_is_bridge_master(upper_dev)) { 4765 if (info->linking) 4766 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4767 vlan_dev, 4768 upper_dev, 4769 extack); 4770 else 4771 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4772 vlan_dev, 4773 upper_dev); 4774 } else if (netif_is_macvlan(upper_dev)) { 4775 if (!info->linking) 4776 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4777 } else { 4778 err = -EINVAL; 4779 WARN_ON(1); 4780 } 4781 break; 4782 } 4783 4784 return err; 4785 } 4786 4787 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 4788 struct net_device *lag_dev, 4789 unsigned long event, 4790 void *ptr, u16 vid) 4791 { 4792 struct net_device *dev; 4793 struct list_head *iter; 4794 int ret; 4795 4796 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4797 if (mlxsw_sp_port_dev_check(dev)) { 4798 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 4799 event, ptr, 4800 vid); 4801 if (ret) 4802 return ret; 4803 } 4804 } 4805 4806 return 0; 4807 } 4808 4809 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 4810 unsigned long event, void *ptr) 4811 { 4812 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 4813 u16 vid = vlan_dev_vlan_id(vlan_dev); 4814 4815 if (mlxsw_sp_port_dev_check(real_dev)) 4816 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 4817 event, ptr, vid); 4818 else if (netif_is_lag_master(real_dev)) 4819 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 4820 real_dev, event, 4821 ptr, vid); 4822 4823 return 0; 4824 } 4825 4826 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, 4827 unsigned long event, void *ptr) 4828 { 4829 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev); 4830 struct netdev_notifier_changeupper_info *info = ptr; 4831 struct netlink_ext_ack *extack; 4832 struct net_device *upper_dev; 4833 4834 if (!mlxsw_sp) 4835 return 0; 4836 4837 extack = netdev_notifier_info_to_extack(&info->info); 4838 4839 switch (event) { 4840 case NETDEV_PRECHANGEUPPER: 4841 upper_dev = info->upper_dev; 4842 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) { 4843 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4844 return -EOPNOTSUPP; 4845 } 4846 if (!info->linking) 4847 break; 4848 if (netif_is_macvlan(upper_dev) && 4849 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev)) { 4850 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4851 return -EOPNOTSUPP; 4852 } 4853 break; 4854 case NETDEV_CHANGEUPPER: 4855 upper_dev = info->upper_dev; 4856 if (info->linking) 4857 break; 4858 if (is_vlan_dev(upper_dev)) 4859 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev); 4860 if (netif_is_macvlan(upper_dev)) 4861 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4862 break; 4863 } 4864 4865 return 0; 4866 } 4867 4868 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, 4869 unsigned long event, void *ptr) 4870 { 4871 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); 4872 struct netdev_notifier_changeupper_info *info = ptr; 4873 struct netlink_ext_ack *extack; 4874 4875 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) 4876 return 0; 4877 4878 extack = netdev_notifier_info_to_extack(&info->info); 4879 4880 /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */ 4881 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4882 4883 return -EOPNOTSUPP; 4884 } 4885 4886 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) 4887 { 4888 struct netdev_notifier_changeupper_info *info = ptr; 4889 4890 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) 4891 return false; 4892 return netif_is_l3_master(info->upper_dev); 4893 } 4894 4895 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 4896 unsigned long event, void *ptr) 4897 { 4898 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 4899 struct mlxsw_sp_span_entry *span_entry; 4900 struct mlxsw_sp *mlxsw_sp; 4901 int err = 0; 4902 4903 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 4904 if (event == NETDEV_UNREGISTER) { 4905 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); 4906 if (span_entry) 4907 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); 4908 } 4909 mlxsw_sp_span_respin(mlxsw_sp); 4910 4911 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) 4912 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, 4913 event, ptr); 4914 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev)) 4915 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev, 4916 event, ptr); 4917 else if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU) 4918 err = mlxsw_sp_netdevice_router_port_event(dev); 4919 else if (mlxsw_sp_is_vrf_event(event, ptr)) 4920 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); 4921 else if (mlxsw_sp_port_dev_check(dev)) 4922 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 4923 else if (netif_is_lag_master(dev)) 4924 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 4925 else if (is_vlan_dev(dev)) 4926 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 4927 else if (netif_is_bridge_master(dev)) 4928 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); 4929 else if (netif_is_macvlan(dev)) 4930 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); 4931 4932 return notifier_from_errno(err); 4933 } 4934 4935 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = { 4936 .notifier_call = mlxsw_sp_inetaddr_valid_event, 4937 }; 4938 4939 static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = { 4940 .notifier_call = mlxsw_sp_inetaddr_event, 4941 }; 4942 4943 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = { 4944 .notifier_call = mlxsw_sp_inet6addr_valid_event, 4945 }; 4946 4947 static struct notifier_block mlxsw_sp_inet6addr_nb __read_mostly = { 4948 .notifier_call = mlxsw_sp_inet6addr_event, 4949 }; 4950 4951 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { 4952 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 4953 {0, }, 4954 }; 4955 4956 static struct pci_driver mlxsw_sp1_pci_driver = { 4957 .name = mlxsw_sp1_driver_name, 4958 .id_table = mlxsw_sp1_pci_id_table, 4959 }; 4960 4961 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { 4962 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, 4963 {0, }, 4964 }; 4965 4966 static struct pci_driver mlxsw_sp2_pci_driver = { 4967 .name = mlxsw_sp2_driver_name, 4968 .id_table = mlxsw_sp2_pci_id_table, 4969 }; 4970 4971 static int __init mlxsw_sp_module_init(void) 4972 { 4973 int err; 4974 4975 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 4976 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 4977 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 4978 register_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); 4979 4980 err = mlxsw_core_driver_register(&mlxsw_sp1_driver); 4981 if (err) 4982 goto err_sp1_core_driver_register; 4983 4984 err = mlxsw_core_driver_register(&mlxsw_sp2_driver); 4985 if (err) 4986 goto err_sp2_core_driver_register; 4987 4988 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); 4989 if (err) 4990 goto err_sp1_pci_driver_register; 4991 4992 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); 4993 if (err) 4994 goto err_sp2_pci_driver_register; 4995 4996 return 0; 4997 4998 err_sp2_pci_driver_register: 4999 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 5000 err_sp1_pci_driver_register: 5001 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 5002 err_sp2_core_driver_register: 5003 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 5004 err_sp1_core_driver_register: 5005 unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); 5006 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 5007 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 5008 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 5009 return err; 5010 } 5011 5012 static void __exit mlxsw_sp_module_exit(void) 5013 { 5014 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 5015 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 5016 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 5017 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 5018 unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); 5019 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 5020 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 5021 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 5022 } 5023 5024 module_init(mlxsw_sp_module_init); 5025 module_exit(mlxsw_sp_module_exit); 5026 5027 MODULE_LICENSE("Dual BSD/GPL"); 5028 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 5029 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 5030 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); 5031 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); 5032 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); 5033