1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/types.h> 7 #include <linux/pci.h> 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/ethtool.h> 11 #include <linux/slab.h> 12 #include <linux/device.h> 13 #include <linux/skbuff.h> 14 #include <linux/if_vlan.h> 15 #include <linux/if_bridge.h> 16 #include <linux/workqueue.h> 17 #include <linux/jiffies.h> 18 #include <linux/bitops.h> 19 #include <linux/list.h> 20 #include <linux/notifier.h> 21 #include <linux/dcbnl.h> 22 #include <linux/inetdevice.h> 23 #include <linux/netlink.h> 24 #include <linux/jhash.h> 25 #include <linux/log2.h> 26 #include <net/switchdev.h> 27 #include <net/pkt_cls.h> 28 #include <net/netevent.h> 29 #include <net/addrconf.h> 30 31 #include "spectrum.h" 32 #include "pci.h" 33 #include "core.h" 34 #include "core_env.h" 35 #include "reg.h" 36 #include "port.h" 37 #include "trap.h" 38 #include "txheader.h" 39 #include "spectrum_cnt.h" 40 #include "spectrum_dpipe.h" 41 #include "spectrum_acl_flex_actions.h" 42 #include "spectrum_span.h" 43 #include "spectrum_ptp.h" 44 #include "spectrum_trap.h" 45 #include "../mlxfw/mlxfw.h" 46 47 #define MLXSW_SP1_FWREV_MAJOR 13 48 #define MLXSW_SP1_FWREV_MINOR 2007 49 #define MLXSW_SP1_FWREV_SUBMINOR 1168 50 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 51 52 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { 53 .major = MLXSW_SP1_FWREV_MAJOR, 54 .minor = MLXSW_SP1_FWREV_MINOR, 55 .subminor = MLXSW_SP1_FWREV_SUBMINOR, 56 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR, 57 }; 58 59 #define MLXSW_SP1_FW_FILENAME \ 60 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ 61 "." __stringify(MLXSW_SP1_FWREV_MINOR) \ 62 "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2" 63 64 #define MLXSW_SP2_FWREV_MAJOR 29 65 #define MLXSW_SP2_FWREV_MINOR 2007 66 #define MLXSW_SP2_FWREV_SUBMINOR 1168 67 68 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { 69 .major = MLXSW_SP2_FWREV_MAJOR, 70 .minor = MLXSW_SP2_FWREV_MINOR, 71 .subminor = MLXSW_SP2_FWREV_SUBMINOR, 72 }; 73 74 #define MLXSW_SP2_FW_FILENAME \ 75 "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \ 76 "." __stringify(MLXSW_SP2_FWREV_MINOR) \ 77 "." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2" 78 79 #define MLXSW_SP3_FWREV_MAJOR 30 80 #define MLXSW_SP3_FWREV_MINOR 2007 81 #define MLXSW_SP3_FWREV_SUBMINOR 1168 82 83 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = { 84 .major = MLXSW_SP3_FWREV_MAJOR, 85 .minor = MLXSW_SP3_FWREV_MINOR, 86 .subminor = MLXSW_SP3_FWREV_SUBMINOR, 87 }; 88 89 #define MLXSW_SP3_FW_FILENAME \ 90 "mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \ 91 "." __stringify(MLXSW_SP3_FWREV_MINOR) \ 92 "." __stringify(MLXSW_SP3_FWREV_SUBMINOR) ".mfa2" 93 94 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; 95 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; 96 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3"; 97 98 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = { 99 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00 100 }; 101 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = { 102 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00 103 }; 104 105 /* tx_hdr_version 106 * Tx header version. 107 * Must be set to 1. 108 */ 109 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 110 111 /* tx_hdr_ctl 112 * Packet control type. 113 * 0 - Ethernet control (e.g. EMADs, LACP) 114 * 1 - Ethernet data 115 */ 116 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 117 118 /* tx_hdr_proto 119 * Packet protocol type. Must be set to 1 (Ethernet). 120 */ 121 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 122 123 /* tx_hdr_rx_is_router 124 * Packet is sent from the router. Valid for data packets only. 125 */ 126 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 127 128 /* tx_hdr_fid_valid 129 * Indicates if the 'fid' field is valid and should be used for 130 * forwarding lookup. Valid for data packets only. 131 */ 132 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 133 134 /* tx_hdr_swid 135 * Switch partition ID. Must be set to 0. 136 */ 137 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 138 139 /* tx_hdr_control_tclass 140 * Indicates if the packet should use the control TClass and not one 141 * of the data TClasses. 142 */ 143 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 144 145 /* tx_hdr_etclass 146 * Egress TClass to be used on the egress device on the egress port. 147 */ 148 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 149 150 /* tx_hdr_port_mid 151 * Destination local port for unicast packets. 152 * Destination multicast ID for multicast packets. 153 * 154 * Control packets are directed to a specific egress port, while data 155 * packets are transmitted through the CPU port (0) into the switch partition, 156 * where forwarding rules are applied. 157 */ 158 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 159 160 /* tx_hdr_fid 161 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 162 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 163 * Valid for data packets only. 164 */ 165 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 166 167 /* tx_hdr_type 168 * 0 - Data packets 169 * 6 - Control packets 170 */ 171 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 172 173 struct mlxsw_sp_mlxfw_dev { 174 struct mlxfw_dev mlxfw_dev; 175 struct mlxsw_sp *mlxsw_sp; 176 }; 177 178 struct mlxsw_sp_span_ops { 179 u32 (*buffsize_get)(int mtu, u32 speed); 180 }; 181 182 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev, 183 u16 component_index, u32 *p_max_size, 184 u8 *p_align_bits, u16 *p_max_write_size) 185 { 186 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 187 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 188 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 189 char mcqi_pl[MLXSW_REG_MCQI_LEN]; 190 int err; 191 192 mlxsw_reg_mcqi_pack(mcqi_pl, component_index); 193 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl); 194 if (err) 195 return err; 196 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, 197 p_max_write_size); 198 199 *p_align_bits = max_t(u8, *p_align_bits, 2); 200 *p_max_write_size = min_t(u16, *p_max_write_size, 201 MLXSW_REG_MCDA_MAX_DATA_LEN); 202 return 0; 203 } 204 205 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle) 206 { 207 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 208 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 209 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 210 char mcc_pl[MLXSW_REG_MCC_LEN]; 211 u8 control_state; 212 int err; 213 214 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0); 215 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 216 if (err) 217 return err; 218 219 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state); 220 if (control_state != MLXFW_FSM_STATE_IDLE) 221 return -EBUSY; 222 223 mlxsw_reg_mcc_pack(mcc_pl, 224 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 225 0, *fwhandle, 0); 226 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 227 } 228 229 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev, 230 u32 fwhandle, u16 component_index, 231 u32 component_size) 232 { 233 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 234 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 235 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 236 char mcc_pl[MLXSW_REG_MCC_LEN]; 237 238 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT, 239 component_index, fwhandle, component_size); 240 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 241 } 242 243 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev, 244 u32 fwhandle, u8 *data, u16 size, 245 u32 offset) 246 { 247 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 248 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 249 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 250 char mcda_pl[MLXSW_REG_MCDA_LEN]; 251 252 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data); 253 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl); 254 } 255 256 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, 257 u32 fwhandle, u16 component_index) 258 { 259 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 260 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 261 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 262 char mcc_pl[MLXSW_REG_MCC_LEN]; 263 264 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT, 265 component_index, fwhandle, 0); 266 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 267 } 268 269 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 270 { 271 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 272 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 273 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 274 char mcc_pl[MLXSW_REG_MCC_LEN]; 275 276 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, 277 fwhandle, 0); 278 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 279 } 280 281 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 282 enum mlxfw_fsm_state *fsm_state, 283 enum mlxfw_fsm_state_err *fsm_state_err) 284 { 285 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 286 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 287 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 288 char mcc_pl[MLXSW_REG_MCC_LEN]; 289 u8 control_state; 290 u8 error_code; 291 int err; 292 293 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0); 294 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 295 if (err) 296 return err; 297 298 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state); 299 *fsm_state = control_state; 300 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, 301 MLXFW_FSM_STATE_ERR_MAX); 302 return 0; 303 } 304 305 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 306 { 307 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 308 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 309 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 310 char mcc_pl[MLXSW_REG_MCC_LEN]; 311 312 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, 313 fwhandle, 0); 314 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 315 } 316 317 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 318 { 319 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 320 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 321 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 322 char mcc_pl[MLXSW_REG_MCC_LEN]; 323 324 mlxsw_reg_mcc_pack(mcc_pl, 325 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, 326 fwhandle, 0); 327 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 328 } 329 330 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = { 331 .component_query = mlxsw_sp_component_query, 332 .fsm_lock = mlxsw_sp_fsm_lock, 333 .fsm_component_update = mlxsw_sp_fsm_component_update, 334 .fsm_block_download = mlxsw_sp_fsm_block_download, 335 .fsm_component_verify = mlxsw_sp_fsm_component_verify, 336 .fsm_activate = mlxsw_sp_fsm_activate, 337 .fsm_query_state = mlxsw_sp_fsm_query_state, 338 .fsm_cancel = mlxsw_sp_fsm_cancel, 339 .fsm_release = mlxsw_sp_fsm_release, 340 }; 341 342 static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp, 343 const struct firmware *firmware, 344 struct netlink_ext_ack *extack) 345 { 346 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = { 347 .mlxfw_dev = { 348 .ops = &mlxsw_sp_mlxfw_dev_ops, 349 .psid = mlxsw_sp->bus_info->psid, 350 .psid_size = strlen(mlxsw_sp->bus_info->psid), 351 .devlink = priv_to_devlink(mlxsw_sp->core), 352 }, 353 .mlxsw_sp = mlxsw_sp 354 }; 355 int err; 356 357 mlxsw_core_fw_flash_start(mlxsw_sp->core); 358 err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, 359 firmware, extack); 360 mlxsw_core_fw_flash_end(mlxsw_sp->core); 361 362 return err; 363 } 364 365 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) 366 { 367 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev; 368 const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev; 369 const char *fw_filename = mlxsw_sp->fw_filename; 370 union devlink_param_value value; 371 const struct firmware *firmware; 372 int err; 373 374 /* Don't check if driver does not require it */ 375 if (!req_rev || !fw_filename) 376 return 0; 377 378 /* Don't check if devlink 'fw_load_policy' param is 'flash' */ 379 err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_sp->core), 380 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 381 &value); 382 if (err) 383 return err; 384 if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) 385 return 0; 386 387 /* Validate driver & FW are compatible */ 388 if (rev->major != req_rev->major) { 389 WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n", 390 rev->major, req_rev->major); 391 return -EINVAL; 392 } 393 if (mlxsw_core_fw_rev_minor_subminor_validate(rev, req_rev)) 394 return 0; 395 396 dev_err(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver (required >= %d.%d.%d)\n", 397 rev->major, rev->minor, rev->subminor, req_rev->major, 398 req_rev->minor, req_rev->subminor); 399 dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n", 400 fw_filename); 401 402 err = request_firmware_direct(&firmware, fw_filename, 403 mlxsw_sp->bus_info->dev); 404 if (err) { 405 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n", 406 fw_filename); 407 return err; 408 } 409 410 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, NULL); 411 release_firmware(firmware); 412 if (err) 413 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n"); 414 415 /* On FW flash success, tell the caller FW reset is needed 416 * if current FW supports it. 417 */ 418 if (rev->minor >= req_rev->can_reset_minor) 419 return err ? err : -EAGAIN; 420 else 421 return 0; 422 } 423 424 static int mlxsw_sp_flash_update(struct mlxsw_core *mlxsw_core, 425 const char *file_name, const char *component, 426 struct netlink_ext_ack *extack) 427 { 428 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 429 const struct firmware *firmware; 430 int err; 431 432 if (component) 433 return -EOPNOTSUPP; 434 435 err = request_firmware_direct(&firmware, file_name, 436 mlxsw_sp->bus_info->dev); 437 if (err) 438 return err; 439 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, extack); 440 release_firmware(firmware); 441 442 return err; 443 } 444 445 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 446 unsigned int counter_index, u64 *packets, 447 u64 *bytes) 448 { 449 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 450 int err; 451 452 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 453 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 454 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 455 if (err) 456 return err; 457 if (packets) 458 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 459 if (bytes) 460 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 461 return 0; 462 } 463 464 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 465 unsigned int counter_index) 466 { 467 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 468 469 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 470 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 471 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 472 } 473 474 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 475 unsigned int *p_counter_index) 476 { 477 int err; 478 479 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 480 p_counter_index); 481 if (err) 482 return err; 483 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 484 if (err) 485 goto err_counter_clear; 486 return 0; 487 488 err_counter_clear: 489 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 490 *p_counter_index); 491 return err; 492 } 493 494 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 495 unsigned int counter_index) 496 { 497 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 498 counter_index); 499 } 500 501 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 502 const struct mlxsw_tx_info *tx_info) 503 { 504 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 505 506 memset(txhdr, 0, MLXSW_TXHDR_LEN); 507 508 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 509 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 510 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 511 mlxsw_tx_hdr_swid_set(txhdr, 0); 512 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 513 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 514 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 515 } 516 517 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) 518 { 519 switch (state) { 520 case BR_STATE_FORWARDING: 521 return MLXSW_REG_SPMS_STATE_FORWARDING; 522 case BR_STATE_LEARNING: 523 return MLXSW_REG_SPMS_STATE_LEARNING; 524 case BR_STATE_LISTENING: /* fall-through */ 525 case BR_STATE_DISABLED: /* fall-through */ 526 case BR_STATE_BLOCKING: 527 return MLXSW_REG_SPMS_STATE_DISCARDING; 528 default: 529 BUG(); 530 } 531 } 532 533 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 534 u8 state) 535 { 536 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state); 537 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 538 char *spms_pl; 539 int err; 540 541 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 542 if (!spms_pl) 543 return -ENOMEM; 544 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 545 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 546 547 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 548 kfree(spms_pl); 549 return err; 550 } 551 552 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 553 { 554 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 555 int err; 556 557 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 558 if (err) 559 return err; 560 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 561 return 0; 562 } 563 564 int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 565 bool is_up) 566 { 567 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 568 char paos_pl[MLXSW_REG_PAOS_LEN]; 569 570 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 571 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 572 MLXSW_PORT_ADMIN_STATUS_DOWN); 573 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 574 } 575 576 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 577 unsigned char *addr) 578 { 579 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 580 char ppad_pl[MLXSW_REG_PPAD_LEN]; 581 582 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 583 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 584 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 585 } 586 587 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 588 { 589 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 590 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 591 592 ether_addr_copy(addr, mlxsw_sp->base_mac); 593 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 594 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 595 } 596 597 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 598 { 599 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 600 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 601 int max_mtu; 602 int err; 603 604 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 605 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 606 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 607 if (err) 608 return err; 609 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 610 611 if (mtu > max_mtu) 612 return -EINVAL; 613 614 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 615 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 616 } 617 618 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 619 { 620 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 621 char pspa_pl[MLXSW_REG_PSPA_LEN]; 622 623 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 624 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 625 } 626 627 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 628 { 629 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 630 char svpe_pl[MLXSW_REG_SVPE_LEN]; 631 632 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 633 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 634 } 635 636 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 637 bool learn_enable) 638 { 639 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 640 char *spvmlr_pl; 641 int err; 642 643 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 644 if (!spvmlr_pl) 645 return -ENOMEM; 646 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 647 learn_enable); 648 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 649 kfree(spvmlr_pl); 650 return err; 651 } 652 653 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 654 u16 vid) 655 { 656 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 657 char spvid_pl[MLXSW_REG_SPVID_LEN]; 658 659 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid); 660 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 661 } 662 663 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 664 bool allow) 665 { 666 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 667 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 668 669 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 670 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 671 } 672 673 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 674 { 675 int err; 676 677 if (!vid) { 678 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 679 if (err) 680 return err; 681 } else { 682 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); 683 if (err) 684 return err; 685 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 686 if (err) 687 goto err_port_allow_untagged_set; 688 } 689 690 mlxsw_sp_port->pvid = vid; 691 return 0; 692 693 err_port_allow_untagged_set: 694 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid); 695 return err; 696 } 697 698 static int 699 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 700 { 701 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 702 char sspr_pl[MLXSW_REG_SSPR_LEN]; 703 704 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 705 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 706 } 707 708 static int 709 mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port, 710 struct mlxsw_sp_port_mapping *port_mapping) 711 { 712 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 713 bool separate_rxtx; 714 u8 module; 715 u8 width; 716 int err; 717 int i; 718 719 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 720 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 721 if (err) 722 return err; 723 module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 724 width = mlxsw_reg_pmlp_width_get(pmlp_pl); 725 separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl); 726 727 if (width && !is_power_of_2(width)) { 728 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n", 729 local_port); 730 return -EINVAL; 731 } 732 733 for (i = 0; i < width; i++) { 734 if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) { 735 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n", 736 local_port); 737 return -EINVAL; 738 } 739 if (separate_rxtx && 740 mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != 741 mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) { 742 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n", 743 local_port); 744 return -EINVAL; 745 } 746 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i) { 747 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n", 748 local_port); 749 return -EINVAL; 750 } 751 } 752 753 port_mapping->module = module; 754 port_mapping->width = width; 755 port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 756 return 0; 757 } 758 759 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port) 760 { 761 struct mlxsw_sp_port_mapping *port_mapping = &mlxsw_sp_port->mapping; 762 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 763 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 764 int i; 765 766 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 767 mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width); 768 for (i = 0; i < port_mapping->width; i++) { 769 mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module); 770 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */ 771 } 772 773 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 774 } 775 776 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port) 777 { 778 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 779 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 780 781 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 782 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 783 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 784 } 785 786 static int mlxsw_sp_port_open(struct net_device *dev) 787 { 788 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 789 int err; 790 791 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 792 if (err) 793 return err; 794 netif_start_queue(dev); 795 return 0; 796 } 797 798 static int mlxsw_sp_port_stop(struct net_device *dev) 799 { 800 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 801 802 netif_stop_queue(dev); 803 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 804 } 805 806 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 807 struct net_device *dev) 808 { 809 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 810 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 811 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 812 const struct mlxsw_tx_info tx_info = { 813 .local_port = mlxsw_sp_port->local_port, 814 .is_emad = false, 815 }; 816 u64 len; 817 int err; 818 819 if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) { 820 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 821 dev_kfree_skb_any(skb); 822 return NETDEV_TX_OK; 823 } 824 825 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); 826 827 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 828 return NETDEV_TX_BUSY; 829 830 if (eth_skb_pad(skb)) { 831 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 832 return NETDEV_TX_OK; 833 } 834 835 mlxsw_sp_txhdr_construct(skb, &tx_info); 836 /* TX header is consumed by HW on the way so we shouldn't count its 837 * bytes as being sent. 838 */ 839 len = skb->len - MLXSW_TXHDR_LEN; 840 841 /* Due to a race we might fail here because of a full queue. In that 842 * unlikely case we simply drop the packet. 843 */ 844 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 845 846 if (!err) { 847 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 848 u64_stats_update_begin(&pcpu_stats->syncp); 849 pcpu_stats->tx_packets++; 850 pcpu_stats->tx_bytes += len; 851 u64_stats_update_end(&pcpu_stats->syncp); 852 } else { 853 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 854 dev_kfree_skb_any(skb); 855 } 856 return NETDEV_TX_OK; 857 } 858 859 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 860 { 861 } 862 863 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 864 { 865 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 866 struct sockaddr *addr = p; 867 int err; 868 869 if (!is_valid_ether_addr(addr->sa_data)) 870 return -EADDRNOTAVAIL; 871 872 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 873 if (err) 874 return err; 875 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 876 return 0; 877 } 878 879 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, 880 int mtu) 881 { 882 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu); 883 } 884 885 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ 886 887 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 888 u16 delay) 889 { 890 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay, 891 BITS_PER_BYTE)); 892 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp, 893 mtu); 894 } 895 896 /* Maximum delay buffer needed in case of PAUSE frames, in bytes. 897 * Assumes 100m cable and maximum MTU. 898 */ 899 #define MLXSW_SP_PAUSE_DELAY 58752 900 901 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 902 u16 delay, bool pfc, bool pause) 903 { 904 if (pfc) 905 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay); 906 else if (pause) 907 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY); 908 else 909 return 0; 910 } 911 912 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, 913 bool lossy) 914 { 915 if (lossy) 916 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size); 917 else 918 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size, 919 thres); 920 } 921 922 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, 923 u8 *prio_tc, bool pause_en, 924 struct ieee_pfc *my_pfc) 925 { 926 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 927 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; 928 u16 delay = !!my_pfc ? my_pfc->delay : 0; 929 char pbmc_pl[MLXSW_REG_PBMC_LEN]; 930 u32 taken_headroom_cells = 0; 931 u32 max_headroom_cells; 932 int i, j, err; 933 934 max_headroom_cells = mlxsw_sp_sb_max_headroom_cells(mlxsw_sp); 935 936 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); 937 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 938 if (err) 939 return err; 940 941 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 942 bool configure = false; 943 bool pfc = false; 944 u16 thres_cells; 945 u16 delay_cells; 946 u16 total_cells; 947 bool lossy; 948 949 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { 950 if (prio_tc[j] == i) { 951 pfc = pfc_en & BIT(j); 952 configure = true; 953 break; 954 } 955 } 956 957 if (!configure) 958 continue; 959 960 lossy = !(pfc || pause_en); 961 thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); 962 thres_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, thres_cells); 963 delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, 964 pfc, pause_en); 965 delay_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, delay_cells); 966 total_cells = thres_cells + delay_cells; 967 968 taken_headroom_cells += total_cells; 969 if (taken_headroom_cells > max_headroom_cells) 970 return -ENOBUFS; 971 972 mlxsw_sp_pg_buf_pack(pbmc_pl, i, total_cells, 973 thres_cells, lossy); 974 } 975 976 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 977 } 978 979 int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, 980 int mtu, bool pause_en) 981 { 982 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; 983 bool dcb_en = !!mlxsw_sp_port->dcb.ets; 984 struct ieee_pfc *my_pfc; 985 u8 *prio_tc; 986 987 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; 988 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; 989 990 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, 991 pause_en, my_pfc); 992 } 993 994 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 995 { 996 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 997 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 998 int err; 999 1000 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); 1001 if (err) 1002 return err; 1003 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu); 1004 if (err) 1005 goto err_span_port_mtu_update; 1006 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 1007 if (err) 1008 goto err_port_mtu_set; 1009 dev->mtu = mtu; 1010 return 0; 1011 1012 err_port_mtu_set: 1013 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu); 1014 err_span_port_mtu_update: 1015 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1016 return err; 1017 } 1018 1019 static int 1020 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 1021 struct rtnl_link_stats64 *stats) 1022 { 1023 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1024 struct mlxsw_sp_port_pcpu_stats *p; 1025 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1026 u32 tx_dropped = 0; 1027 unsigned int start; 1028 int i; 1029 1030 for_each_possible_cpu(i) { 1031 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 1032 do { 1033 start = u64_stats_fetch_begin_irq(&p->syncp); 1034 rx_packets = p->rx_packets; 1035 rx_bytes = p->rx_bytes; 1036 tx_packets = p->tx_packets; 1037 tx_bytes = p->tx_bytes; 1038 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 1039 1040 stats->rx_packets += rx_packets; 1041 stats->rx_bytes += rx_bytes; 1042 stats->tx_packets += tx_packets; 1043 stats->tx_bytes += tx_bytes; 1044 /* tx_dropped is u32, updated without syncp protection. */ 1045 tx_dropped += p->tx_dropped; 1046 } 1047 stats->tx_dropped = tx_dropped; 1048 return 0; 1049 } 1050 1051 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 1052 { 1053 switch (attr_id) { 1054 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1055 return true; 1056 } 1057 1058 return false; 1059 } 1060 1061 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 1062 void *sp) 1063 { 1064 switch (attr_id) { 1065 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1066 return mlxsw_sp_port_get_sw_stats64(dev, sp); 1067 } 1068 1069 return -EINVAL; 1070 } 1071 1072 int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 1073 int prio, char *ppcnt_pl) 1074 { 1075 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1076 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1077 1078 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 1079 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 1080 } 1081 1082 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 1083 struct rtnl_link_stats64 *stats) 1084 { 1085 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1086 int err; 1087 1088 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 1089 0, ppcnt_pl); 1090 if (err) 1091 goto out; 1092 1093 stats->tx_packets = 1094 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 1095 stats->rx_packets = 1096 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 1097 stats->tx_bytes = 1098 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 1099 stats->rx_bytes = 1100 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 1101 stats->multicast = 1102 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 1103 1104 stats->rx_crc_errors = 1105 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 1106 stats->rx_frame_errors = 1107 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 1108 1109 stats->rx_length_errors = ( 1110 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 1111 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 1112 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 1113 1114 stats->rx_errors = (stats->rx_crc_errors + 1115 stats->rx_frame_errors + stats->rx_length_errors); 1116 1117 out: 1118 return err; 1119 } 1120 1121 static void 1122 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 1123 struct mlxsw_sp_port_xstats *xstats) 1124 { 1125 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1126 int err, i; 1127 1128 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 1129 ppcnt_pl); 1130 if (!err) 1131 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 1132 1133 for (i = 0; i < TC_MAX_QUEUE; i++) { 1134 err = mlxsw_sp_port_get_stats_raw(dev, 1135 MLXSW_REG_PPCNT_TC_CONG_TC, 1136 i, ppcnt_pl); 1137 if (!err) 1138 xstats->wred_drop[i] = 1139 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 1140 1141 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 1142 i, ppcnt_pl); 1143 if (err) 1144 continue; 1145 1146 xstats->backlog[i] = 1147 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 1148 xstats->tail_drop[i] = 1149 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 1150 } 1151 1152 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1153 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, 1154 i, ppcnt_pl); 1155 if (err) 1156 continue; 1157 1158 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); 1159 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); 1160 } 1161 } 1162 1163 static void update_stats_cache(struct work_struct *work) 1164 { 1165 struct mlxsw_sp_port *mlxsw_sp_port = 1166 container_of(work, struct mlxsw_sp_port, 1167 periodic_hw_stats.update_dw.work); 1168 1169 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 1170 /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as 1171 * necessary when port goes down. 1172 */ 1173 goto out; 1174 1175 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 1176 &mlxsw_sp_port->periodic_hw_stats.stats); 1177 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 1178 &mlxsw_sp_port->periodic_hw_stats.xstats); 1179 1180 out: 1181 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1182 MLXSW_HW_STATS_UPDATE_TIME); 1183 } 1184 1185 /* Return the stats from a cache that is updated periodically, 1186 * as this function might get called in an atomic context. 1187 */ 1188 static void 1189 mlxsw_sp_port_get_stats64(struct net_device *dev, 1190 struct rtnl_link_stats64 *stats) 1191 { 1192 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1193 1194 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 1195 } 1196 1197 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 1198 u16 vid_begin, u16 vid_end, 1199 bool is_member, bool untagged) 1200 { 1201 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1202 char *spvm_pl; 1203 int err; 1204 1205 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 1206 if (!spvm_pl) 1207 return -ENOMEM; 1208 1209 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 1210 vid_end, is_member, untagged); 1211 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 1212 kfree(spvm_pl); 1213 return err; 1214 } 1215 1216 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 1217 u16 vid_end, bool is_member, bool untagged) 1218 { 1219 u16 vid, vid_e; 1220 int err; 1221 1222 for (vid = vid_begin; vid <= vid_end; 1223 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 1224 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 1225 vid_end); 1226 1227 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 1228 is_member, untagged); 1229 if (err) 1230 return err; 1231 } 1232 1233 return 0; 1234 } 1235 1236 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port, 1237 bool flush_default) 1238 { 1239 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 1240 1241 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 1242 &mlxsw_sp_port->vlans_list, list) { 1243 if (!flush_default && 1244 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID) 1245 continue; 1246 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1247 } 1248 } 1249 1250 static void 1251 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1252 { 1253 if (mlxsw_sp_port_vlan->bridge_port) 1254 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1255 else if (mlxsw_sp_port_vlan->fid) 1256 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 1257 } 1258 1259 struct mlxsw_sp_port_vlan * 1260 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1261 { 1262 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1263 bool untagged = vid == MLXSW_SP_DEFAULT_VID; 1264 int err; 1265 1266 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1267 if (mlxsw_sp_port_vlan) 1268 return ERR_PTR(-EEXIST); 1269 1270 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 1271 if (err) 1272 return ERR_PTR(err); 1273 1274 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 1275 if (!mlxsw_sp_port_vlan) { 1276 err = -ENOMEM; 1277 goto err_port_vlan_alloc; 1278 } 1279 1280 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1281 mlxsw_sp_port_vlan->vid = vid; 1282 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1283 1284 return mlxsw_sp_port_vlan; 1285 1286 err_port_vlan_alloc: 1287 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1288 return ERR_PTR(err); 1289 } 1290 1291 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1292 { 1293 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1294 u16 vid = mlxsw_sp_port_vlan->vid; 1295 1296 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan); 1297 list_del(&mlxsw_sp_port_vlan->list); 1298 kfree(mlxsw_sp_port_vlan); 1299 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1300 } 1301 1302 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1303 __be16 __always_unused proto, u16 vid) 1304 { 1305 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1306 1307 /* VLAN 0 is added to HW filter when device goes up, but it is 1308 * reserved in our case, so simply return. 1309 */ 1310 if (!vid) 1311 return 0; 1312 1313 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid)); 1314 } 1315 1316 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1317 __be16 __always_unused proto, u16 vid) 1318 { 1319 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1320 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1321 1322 /* VLAN 0 is removed from HW filter when device goes down, but 1323 * it is reserved in our case, so simply return. 1324 */ 1325 if (!vid) 1326 return 0; 1327 1328 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1329 if (!mlxsw_sp_port_vlan) 1330 return 0; 1331 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1332 1333 return 0; 1334 } 1335 1336 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1337 void *type_data) 1338 { 1339 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1340 1341 switch (type) { 1342 case TC_SETUP_BLOCK: 1343 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1344 case TC_SETUP_QDISC_RED: 1345 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1346 case TC_SETUP_QDISC_PRIO: 1347 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); 1348 case TC_SETUP_QDISC_ETS: 1349 return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data); 1350 case TC_SETUP_QDISC_TBF: 1351 return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data); 1352 case TC_SETUP_QDISC_FIFO: 1353 return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data); 1354 default: 1355 return -EOPNOTSUPP; 1356 } 1357 } 1358 1359 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) 1360 { 1361 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1362 1363 if (!enable) { 1364 if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) || 1365 mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) { 1366 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 1367 return -EINVAL; 1368 } 1369 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block); 1370 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block); 1371 } else { 1372 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block); 1373 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block); 1374 } 1375 return 0; 1376 } 1377 1378 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable) 1379 { 1380 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1381 char pplr_pl[MLXSW_REG_PPLR_LEN]; 1382 int err; 1383 1384 if (netif_running(dev)) 1385 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1386 1387 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable); 1388 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr), 1389 pplr_pl); 1390 1391 if (netif_running(dev)) 1392 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1393 1394 return err; 1395 } 1396 1397 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); 1398 1399 static int mlxsw_sp_handle_feature(struct net_device *dev, 1400 netdev_features_t wanted_features, 1401 netdev_features_t feature, 1402 mlxsw_sp_feature_handler feature_handler) 1403 { 1404 netdev_features_t changes = wanted_features ^ dev->features; 1405 bool enable = !!(wanted_features & feature); 1406 int err; 1407 1408 if (!(changes & feature)) 1409 return 0; 1410 1411 err = feature_handler(dev, enable); 1412 if (err) { 1413 netdev_err(dev, "%s feature %pNF failed, err %d\n", 1414 enable ? "Enable" : "Disable", &feature, err); 1415 return err; 1416 } 1417 1418 if (enable) 1419 dev->features |= feature; 1420 else 1421 dev->features &= ~feature; 1422 1423 return 0; 1424 } 1425 static int mlxsw_sp_set_features(struct net_device *dev, 1426 netdev_features_t features) 1427 { 1428 netdev_features_t oper_features = dev->features; 1429 int err = 0; 1430 1431 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, 1432 mlxsw_sp_feature_hw_tc); 1433 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK, 1434 mlxsw_sp_feature_loopback); 1435 1436 if (err) { 1437 dev->features = oper_features; 1438 return -EINVAL; 1439 } 1440 1441 return 0; 1442 } 1443 1444 static struct devlink_port * 1445 mlxsw_sp_port_get_devlink_port(struct net_device *dev) 1446 { 1447 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1448 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1449 1450 return mlxsw_core_port_devlink_port_get(mlxsw_sp->core, 1451 mlxsw_sp_port->local_port); 1452 } 1453 1454 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, 1455 struct ifreq *ifr) 1456 { 1457 struct hwtstamp_config config; 1458 int err; 1459 1460 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 1461 return -EFAULT; 1462 1463 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, 1464 &config); 1465 if (err) 1466 return err; 1467 1468 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1469 return -EFAULT; 1470 1471 return 0; 1472 } 1473 1474 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port, 1475 struct ifreq *ifr) 1476 { 1477 struct hwtstamp_config config; 1478 int err; 1479 1480 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port, 1481 &config); 1482 if (err) 1483 return err; 1484 1485 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1486 return -EFAULT; 1487 1488 return 0; 1489 } 1490 1491 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port) 1492 { 1493 struct hwtstamp_config config = {0}; 1494 1495 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config); 1496 } 1497 1498 static int 1499 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1500 { 1501 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1502 1503 switch (cmd) { 1504 case SIOCSHWTSTAMP: 1505 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr); 1506 case SIOCGHWTSTAMP: 1507 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr); 1508 default: 1509 return -EOPNOTSUPP; 1510 } 1511 } 1512 1513 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1514 .ndo_open = mlxsw_sp_port_open, 1515 .ndo_stop = mlxsw_sp_port_stop, 1516 .ndo_start_xmit = mlxsw_sp_port_xmit, 1517 .ndo_setup_tc = mlxsw_sp_setup_tc, 1518 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1519 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1520 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1521 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1522 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1523 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1524 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1525 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1526 .ndo_set_features = mlxsw_sp_set_features, 1527 .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port, 1528 .ndo_do_ioctl = mlxsw_sp_port_ioctl, 1529 }; 1530 1531 static int 1532 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port) 1533 { 1534 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1535 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; 1536 const struct mlxsw_sp_port_type_speed_ops *ops; 1537 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1538 int err; 1539 1540 ops = mlxsw_sp->port_type_speed_ops; 1541 1542 /* Set advertised speeds to supported speeds. */ 1543 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 1544 0, false); 1545 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1546 if (err) 1547 return err; 1548 1549 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, 1550 ð_proto_admin, ð_proto_oper); 1551 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 1552 eth_proto_cap, mlxsw_sp_port->link.autoneg); 1553 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1554 } 1555 1556 int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed) 1557 { 1558 const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops; 1559 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1560 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1561 u32 eth_proto_oper; 1562 int err; 1563 1564 port_type_speed_ops = mlxsw_sp->port_type_speed_ops; 1565 port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, 1566 mlxsw_sp_port->local_port, 0, 1567 false); 1568 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1569 if (err) 1570 return err; 1571 port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL, 1572 ð_proto_oper); 1573 *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper); 1574 return 0; 1575 } 1576 1577 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 1578 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 1579 bool dwrr, u8 dwrr_weight) 1580 { 1581 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1582 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1583 1584 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1585 next_index); 1586 mlxsw_reg_qeec_de_set(qeec_pl, true); 1587 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 1588 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 1589 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1590 } 1591 1592 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 1593 enum mlxsw_reg_qeec_hr hr, u8 index, 1594 u8 next_index, u32 maxrate, u8 burst_size) 1595 { 1596 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1597 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1598 1599 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1600 next_index); 1601 mlxsw_reg_qeec_mase_set(qeec_pl, true); 1602 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 1603 mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size); 1604 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1605 } 1606 1607 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port, 1608 enum mlxsw_reg_qeec_hr hr, u8 index, 1609 u8 next_index, u32 minrate) 1610 { 1611 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1612 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1613 1614 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1615 next_index); 1616 mlxsw_reg_qeec_mise_set(qeec_pl, true); 1617 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate); 1618 1619 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1620 } 1621 1622 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 1623 u8 switch_prio, u8 tclass) 1624 { 1625 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1626 char qtct_pl[MLXSW_REG_QTCT_LEN]; 1627 1628 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 1629 tclass); 1630 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 1631 } 1632 1633 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 1634 { 1635 int err, i; 1636 1637 /* Setup the elements hierarcy, so that each TC is linked to 1638 * one subgroup, which are all member in the same group. 1639 */ 1640 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1641 MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0); 1642 if (err) 1643 return err; 1644 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1645 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1646 MLXSW_REG_QEEC_HR_SUBGROUP, i, 1647 0, false, 0); 1648 if (err) 1649 return err; 1650 } 1651 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1652 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1653 MLXSW_REG_QEEC_HR_TC, i, i, 1654 false, 0); 1655 if (err) 1656 return err; 1657 1658 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1659 MLXSW_REG_QEEC_HR_TC, 1660 i + 8, i, 1661 true, 100); 1662 if (err) 1663 return err; 1664 } 1665 1666 /* Make sure the max shaper is disabled in all hierarchies that support 1667 * it. Note that this disables ptps (PTP shaper), but that is intended 1668 * for the initial configuration. 1669 */ 1670 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1671 MLXSW_REG_QEEC_HR_PORT, 0, 0, 1672 MLXSW_REG_QEEC_MAS_DIS, 0); 1673 if (err) 1674 return err; 1675 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1676 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1677 MLXSW_REG_QEEC_HR_SUBGROUP, 1678 i, 0, 1679 MLXSW_REG_QEEC_MAS_DIS, 0); 1680 if (err) 1681 return err; 1682 } 1683 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1684 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1685 MLXSW_REG_QEEC_HR_TC, 1686 i, i, 1687 MLXSW_REG_QEEC_MAS_DIS, 0); 1688 if (err) 1689 return err; 1690 1691 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1692 MLXSW_REG_QEEC_HR_TC, 1693 i + 8, i, 1694 MLXSW_REG_QEEC_MAS_DIS, 0); 1695 if (err) 1696 return err; 1697 } 1698 1699 /* Configure the min shaper for multicast TCs. */ 1700 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1701 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port, 1702 MLXSW_REG_QEEC_HR_TC, 1703 i + 8, i, 1704 MLXSW_REG_QEEC_MIS_MIN); 1705 if (err) 1706 return err; 1707 } 1708 1709 /* Map all priorities to traffic class 0. */ 1710 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1711 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 1712 if (err) 1713 return err; 1714 } 1715 1716 return 0; 1717 } 1718 1719 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 1720 bool enable) 1721 { 1722 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1723 char qtctm_pl[MLXSW_REG_QTCTM_LEN]; 1724 1725 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable); 1726 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl); 1727 } 1728 1729 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 1730 u8 split_base_local_port, 1731 struct mlxsw_sp_port_mapping *port_mapping) 1732 { 1733 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1734 bool split = !!split_base_local_port; 1735 struct mlxsw_sp_port *mlxsw_sp_port; 1736 u32 lanes = port_mapping->width; 1737 struct net_device *dev; 1738 bool splittable; 1739 int err; 1740 1741 splittable = lanes > 1 && !split; 1742 err = mlxsw_core_port_init(mlxsw_sp->core, local_port, 1743 port_mapping->module + 1, split, 1744 port_mapping->lane / lanes, 1745 splittable, lanes, 1746 mlxsw_sp->base_mac, 1747 sizeof(mlxsw_sp->base_mac)); 1748 if (err) { 1749 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 1750 local_port); 1751 return err; 1752 } 1753 1754 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 1755 if (!dev) { 1756 err = -ENOMEM; 1757 goto err_alloc_etherdev; 1758 } 1759 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 1760 dev_net_set(dev, mlxsw_sp_net(mlxsw_sp)); 1761 mlxsw_sp_port = netdev_priv(dev); 1762 mlxsw_sp_port->dev = dev; 1763 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 1764 mlxsw_sp_port->local_port = local_port; 1765 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID; 1766 mlxsw_sp_port->split = split; 1767 mlxsw_sp_port->split_base_local_port = split_base_local_port; 1768 mlxsw_sp_port->mapping = *port_mapping; 1769 mlxsw_sp_port->link.autoneg = 1; 1770 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 1771 1772 mlxsw_sp_port->pcpu_stats = 1773 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 1774 if (!mlxsw_sp_port->pcpu_stats) { 1775 err = -ENOMEM; 1776 goto err_alloc_stats; 1777 } 1778 1779 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1780 &update_stats_cache); 1781 1782 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 1783 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 1784 1785 err = mlxsw_sp_port_module_map(mlxsw_sp_port); 1786 if (err) { 1787 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 1788 mlxsw_sp_port->local_port); 1789 goto err_port_module_map; 1790 } 1791 1792 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 1793 if (err) { 1794 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 1795 mlxsw_sp_port->local_port); 1796 goto err_port_swid_set; 1797 } 1798 1799 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 1800 if (err) { 1801 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 1802 mlxsw_sp_port->local_port); 1803 goto err_dev_addr_init; 1804 } 1805 1806 netif_carrier_off(dev); 1807 1808 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 1809 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 1810 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK; 1811 1812 dev->min_mtu = 0; 1813 dev->max_mtu = ETH_MAX_MTU; 1814 1815 /* Each packet needs to have a Tx header (metadata) on top all other 1816 * headers. 1817 */ 1818 dev->needed_headroom = MLXSW_TXHDR_LEN; 1819 1820 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 1821 if (err) { 1822 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 1823 mlxsw_sp_port->local_port); 1824 goto err_port_system_port_mapping_set; 1825 } 1826 1827 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port); 1828 if (err) { 1829 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 1830 mlxsw_sp_port->local_port); 1831 goto err_port_speed_by_width_set; 1832 } 1833 1834 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 1835 if (err) { 1836 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 1837 mlxsw_sp_port->local_port); 1838 goto err_port_mtu_set; 1839 } 1840 1841 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1842 if (err) 1843 goto err_port_admin_status_set; 1844 1845 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 1846 if (err) { 1847 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 1848 mlxsw_sp_port->local_port); 1849 goto err_port_buffers_init; 1850 } 1851 1852 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 1853 if (err) { 1854 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 1855 mlxsw_sp_port->local_port); 1856 goto err_port_ets_init; 1857 } 1858 1859 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true); 1860 if (err) { 1861 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n", 1862 mlxsw_sp_port->local_port); 1863 goto err_port_tc_mc_mode; 1864 } 1865 1866 /* ETS and buffers must be initialized before DCB. */ 1867 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 1868 if (err) { 1869 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 1870 mlxsw_sp_port->local_port); 1871 goto err_port_dcb_init; 1872 } 1873 1874 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 1875 if (err) { 1876 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 1877 mlxsw_sp_port->local_port); 1878 goto err_port_fids_init; 1879 } 1880 1881 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); 1882 if (err) { 1883 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", 1884 mlxsw_sp_port->local_port); 1885 goto err_port_qdiscs_init; 1886 } 1887 1888 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false, 1889 false); 1890 if (err) { 1891 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n", 1892 mlxsw_sp_port->local_port); 1893 goto err_port_vlan_clear; 1894 } 1895 1896 err = mlxsw_sp_port_nve_init(mlxsw_sp_port); 1897 if (err) { 1898 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n", 1899 mlxsw_sp_port->local_port); 1900 goto err_port_nve_init; 1901 } 1902 1903 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 1904 if (err) { 1905 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n", 1906 mlxsw_sp_port->local_port); 1907 goto err_port_pvid_set; 1908 } 1909 1910 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, 1911 MLXSW_SP_DEFAULT_VID); 1912 if (IS_ERR(mlxsw_sp_port_vlan)) { 1913 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 1914 mlxsw_sp_port->local_port); 1915 err = PTR_ERR(mlxsw_sp_port_vlan); 1916 goto err_port_vlan_create; 1917 } 1918 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan; 1919 1920 INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw, 1921 mlxsw_sp->ptp_ops->shaper_work); 1922 INIT_DELAYED_WORK(&mlxsw_sp_port->span.speed_update_dw, 1923 mlxsw_sp_span_speed_update_work); 1924 1925 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 1926 err = register_netdev(dev); 1927 if (err) { 1928 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 1929 mlxsw_sp_port->local_port); 1930 goto err_register_netdev; 1931 } 1932 1933 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 1934 mlxsw_sp_port, dev); 1935 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 1936 return 0; 1937 1938 err_register_netdev: 1939 mlxsw_sp->ports[local_port] = NULL; 1940 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1941 err_port_vlan_create: 1942 err_port_pvid_set: 1943 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 1944 err_port_nve_init: 1945 err_port_vlan_clear: 1946 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 1947 err_port_qdiscs_init: 1948 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 1949 err_port_fids_init: 1950 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 1951 err_port_dcb_init: 1952 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 1953 err_port_tc_mc_mode: 1954 err_port_ets_init: 1955 err_port_buffers_init: 1956 err_port_admin_status_set: 1957 err_port_mtu_set: 1958 err_port_speed_by_width_set: 1959 err_port_system_port_mapping_set: 1960 err_dev_addr_init: 1961 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 1962 err_port_swid_set: 1963 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 1964 err_port_module_map: 1965 free_percpu(mlxsw_sp_port->pcpu_stats); 1966 err_alloc_stats: 1967 free_netdev(dev); 1968 err_alloc_etherdev: 1969 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 1970 return err; 1971 } 1972 1973 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 1974 { 1975 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1976 1977 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 1978 cancel_delayed_work_sync(&mlxsw_sp_port->span.speed_update_dw); 1979 cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw); 1980 mlxsw_sp_port_ptp_clear(mlxsw_sp_port); 1981 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 1982 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 1983 mlxsw_sp->ports[local_port] = NULL; 1984 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true); 1985 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 1986 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 1987 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 1988 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 1989 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 1990 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 1991 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 1992 free_percpu(mlxsw_sp_port->pcpu_stats); 1993 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 1994 free_netdev(mlxsw_sp_port->dev); 1995 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 1996 } 1997 1998 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp) 1999 { 2000 struct mlxsw_sp_port *mlxsw_sp_port; 2001 int err; 2002 2003 mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL); 2004 if (!mlxsw_sp_port) 2005 return -ENOMEM; 2006 2007 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 2008 mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT; 2009 2010 err = mlxsw_core_cpu_port_init(mlxsw_sp->core, 2011 mlxsw_sp_port, 2012 mlxsw_sp->base_mac, 2013 sizeof(mlxsw_sp->base_mac)); 2014 if (err) { 2015 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n"); 2016 goto err_core_cpu_port_init; 2017 } 2018 2019 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port; 2020 return 0; 2021 2022 err_core_cpu_port_init: 2023 kfree(mlxsw_sp_port); 2024 return err; 2025 } 2026 2027 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp) 2028 { 2029 struct mlxsw_sp_port *mlxsw_sp_port = 2030 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT]; 2031 2032 mlxsw_core_cpu_port_fini(mlxsw_sp->core); 2033 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL; 2034 kfree(mlxsw_sp_port); 2035 } 2036 2037 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) 2038 { 2039 return mlxsw_sp->ports[local_port] != NULL; 2040 } 2041 2042 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 2043 { 2044 int i; 2045 2046 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 2047 if (mlxsw_sp_port_created(mlxsw_sp, i)) 2048 mlxsw_sp_port_remove(mlxsw_sp, i); 2049 mlxsw_sp_cpu_port_remove(mlxsw_sp); 2050 kfree(mlxsw_sp->ports); 2051 mlxsw_sp->ports = NULL; 2052 } 2053 2054 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 2055 { 2056 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 2057 struct mlxsw_sp_port_mapping *port_mapping; 2058 size_t alloc_size; 2059 int i; 2060 int err; 2061 2062 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 2063 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 2064 if (!mlxsw_sp->ports) 2065 return -ENOMEM; 2066 2067 err = mlxsw_sp_cpu_port_create(mlxsw_sp); 2068 if (err) 2069 goto err_cpu_port_create; 2070 2071 for (i = 1; i < max_ports; i++) { 2072 port_mapping = mlxsw_sp->port_mapping[i]; 2073 if (!port_mapping) 2074 continue; 2075 err = mlxsw_sp_port_create(mlxsw_sp, i, 0, port_mapping); 2076 if (err) 2077 goto err_port_create; 2078 } 2079 return 0; 2080 2081 err_port_create: 2082 for (i--; i >= 1; i--) 2083 if (mlxsw_sp_port_created(mlxsw_sp, i)) 2084 mlxsw_sp_port_remove(mlxsw_sp, i); 2085 mlxsw_sp_cpu_port_remove(mlxsw_sp); 2086 err_cpu_port_create: 2087 kfree(mlxsw_sp->ports); 2088 mlxsw_sp->ports = NULL; 2089 return err; 2090 } 2091 2092 static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp) 2093 { 2094 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 2095 struct mlxsw_sp_port_mapping port_mapping; 2096 int i; 2097 int err; 2098 2099 mlxsw_sp->port_mapping = kcalloc(max_ports, 2100 sizeof(struct mlxsw_sp_port_mapping *), 2101 GFP_KERNEL); 2102 if (!mlxsw_sp->port_mapping) 2103 return -ENOMEM; 2104 2105 for (i = 1; i < max_ports; i++) { 2106 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &port_mapping); 2107 if (err) 2108 goto err_port_module_info_get; 2109 if (!port_mapping.width) 2110 continue; 2111 2112 mlxsw_sp->port_mapping[i] = kmemdup(&port_mapping, 2113 sizeof(port_mapping), 2114 GFP_KERNEL); 2115 if (!mlxsw_sp->port_mapping[i]) { 2116 err = -ENOMEM; 2117 goto err_port_module_info_dup; 2118 } 2119 } 2120 return 0; 2121 2122 err_port_module_info_get: 2123 err_port_module_info_dup: 2124 for (i--; i >= 1; i--) 2125 kfree(mlxsw_sp->port_mapping[i]); 2126 kfree(mlxsw_sp->port_mapping); 2127 return err; 2128 } 2129 2130 static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp) 2131 { 2132 int i; 2133 2134 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 2135 kfree(mlxsw_sp->port_mapping[i]); 2136 kfree(mlxsw_sp->port_mapping); 2137 } 2138 2139 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port, unsigned int max_width) 2140 { 2141 u8 offset = (local_port - 1) % max_width; 2142 2143 return local_port - offset; 2144 } 2145 2146 static int 2147 mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 2148 struct mlxsw_sp_port_mapping *port_mapping, 2149 unsigned int count, u8 offset) 2150 { 2151 struct mlxsw_sp_port_mapping split_port_mapping; 2152 int err, i; 2153 2154 split_port_mapping = *port_mapping; 2155 split_port_mapping.width /= count; 2156 for (i = 0; i < count; i++) { 2157 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset, 2158 base_port, &split_port_mapping); 2159 if (err) 2160 goto err_port_create; 2161 split_port_mapping.lane += split_port_mapping.width; 2162 } 2163 2164 return 0; 2165 2166 err_port_create: 2167 for (i--; i >= 0; i--) 2168 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 2169 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 2170 return err; 2171 } 2172 2173 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 2174 u8 base_port, 2175 unsigned int count, u8 offset) 2176 { 2177 struct mlxsw_sp_port_mapping *port_mapping; 2178 int i; 2179 2180 /* Go over original unsplit ports in the gap and recreate them. */ 2181 for (i = 0; i < count * offset; i++) { 2182 port_mapping = mlxsw_sp->port_mapping[base_port + i]; 2183 if (!port_mapping) 2184 continue; 2185 mlxsw_sp_port_create(mlxsw_sp, base_port + i, 0, port_mapping); 2186 } 2187 } 2188 2189 static int mlxsw_sp_local_ports_offset(struct mlxsw_core *mlxsw_core, 2190 unsigned int count, 2191 unsigned int max_width) 2192 { 2193 enum mlxsw_res_id local_ports_in_x_res_id; 2194 int split_width = max_width / count; 2195 2196 if (split_width == 1) 2197 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_1X; 2198 else if (split_width == 2) 2199 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_2X; 2200 else if (split_width == 4) 2201 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_4X; 2202 else 2203 return -EINVAL; 2204 2205 if (!mlxsw_core_res_valid(mlxsw_core, local_ports_in_x_res_id)) 2206 return -EINVAL; 2207 return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id); 2208 } 2209 2210 static struct mlxsw_sp_port * 2211 mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port) 2212 { 2213 if (mlxsw_sp->ports && mlxsw_sp->ports[local_port]) 2214 return mlxsw_sp->ports[local_port]; 2215 return NULL; 2216 } 2217 2218 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 2219 unsigned int count, 2220 struct netlink_ext_ack *extack) 2221 { 2222 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2223 struct mlxsw_sp_port_mapping port_mapping; 2224 struct mlxsw_sp_port *mlxsw_sp_port; 2225 int max_width; 2226 u8 base_port; 2227 int offset; 2228 int i; 2229 int err; 2230 2231 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); 2232 if (!mlxsw_sp_port) { 2233 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 2234 local_port); 2235 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 2236 return -EINVAL; 2237 } 2238 2239 max_width = mlxsw_core_module_max_width(mlxsw_core, 2240 mlxsw_sp_port->mapping.module); 2241 if (max_width < 0) { 2242 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n"); 2243 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module"); 2244 return max_width; 2245 } 2246 2247 /* Split port with non-max cannot be split. */ 2248 if (mlxsw_sp_port->mapping.width != max_width) { 2249 netdev_err(mlxsw_sp_port->dev, "Port cannot be split\n"); 2250 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split"); 2251 return -EINVAL; 2252 } 2253 2254 offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width); 2255 if (offset < 0) { 2256 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n"); 2257 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset"); 2258 return -EINVAL; 2259 } 2260 2261 /* Only in case max split is being done, the local port and 2262 * base port may differ. 2263 */ 2264 base_port = count == max_width ? 2265 mlxsw_sp_cluster_base_port_get(local_port, max_width) : 2266 local_port; 2267 2268 for (i = 0; i < count * offset; i++) { 2269 /* Expect base port to exist and also the one in the middle in 2270 * case of maximal split count. 2271 */ 2272 if (i == 0 || (count == max_width && i == count / 2)) 2273 continue; 2274 2275 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) { 2276 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 2277 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 2278 return -EINVAL; 2279 } 2280 } 2281 2282 port_mapping = mlxsw_sp_port->mapping; 2283 2284 for (i = 0; i < count; i++) 2285 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 2286 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 2287 2288 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, &port_mapping, 2289 count, offset); 2290 if (err) { 2291 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 2292 goto err_port_split_create; 2293 } 2294 2295 return 0; 2296 2297 err_port_split_create: 2298 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset); 2299 return err; 2300 } 2301 2302 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, 2303 struct netlink_ext_ack *extack) 2304 { 2305 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2306 struct mlxsw_sp_port *mlxsw_sp_port; 2307 unsigned int count; 2308 int max_width; 2309 u8 base_port; 2310 int offset; 2311 int i; 2312 2313 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); 2314 if (!mlxsw_sp_port) { 2315 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 2316 local_port); 2317 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 2318 return -EINVAL; 2319 } 2320 2321 if (!mlxsw_sp_port->split) { 2322 netdev_err(mlxsw_sp_port->dev, "Port was not split\n"); 2323 NL_SET_ERR_MSG_MOD(extack, "Port was not split"); 2324 return -EINVAL; 2325 } 2326 2327 max_width = mlxsw_core_module_max_width(mlxsw_core, 2328 mlxsw_sp_port->mapping.module); 2329 if (max_width < 0) { 2330 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n"); 2331 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module"); 2332 return max_width; 2333 } 2334 2335 count = max_width / mlxsw_sp_port->mapping.width; 2336 2337 offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width); 2338 if (WARN_ON(offset < 0)) { 2339 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n"); 2340 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset"); 2341 return -EINVAL; 2342 } 2343 2344 base_port = mlxsw_sp_port->split_base_local_port; 2345 2346 for (i = 0; i < count; i++) 2347 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 2348 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 2349 2350 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset); 2351 2352 return 0; 2353 } 2354 2355 static void 2356 mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port) 2357 { 2358 int i; 2359 2360 for (i = 0; i < TC_MAX_QUEUE; i++) 2361 mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0; 2362 } 2363 2364 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 2365 char *pude_pl, void *priv) 2366 { 2367 struct mlxsw_sp *mlxsw_sp = priv; 2368 struct mlxsw_sp_port *mlxsw_sp_port; 2369 enum mlxsw_reg_pude_oper_status status; 2370 u8 local_port; 2371 2372 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 2373 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2374 if (!mlxsw_sp_port) 2375 return; 2376 2377 status = mlxsw_reg_pude_oper_status_get(pude_pl); 2378 if (status == MLXSW_PORT_OPER_STATUS_UP) { 2379 netdev_info(mlxsw_sp_port->dev, "link up\n"); 2380 netif_carrier_on(mlxsw_sp_port->dev); 2381 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0); 2382 mlxsw_core_schedule_dw(&mlxsw_sp_port->span.speed_update_dw, 0); 2383 } else { 2384 netdev_info(mlxsw_sp_port->dev, "link down\n"); 2385 netif_carrier_off(mlxsw_sp_port->dev); 2386 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port); 2387 } 2388 } 2389 2390 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp, 2391 char *mtpptr_pl, bool ingress) 2392 { 2393 u8 local_port; 2394 u8 num_rec; 2395 int i; 2396 2397 local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl); 2398 num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl); 2399 for (i = 0; i < num_rec; i++) { 2400 u8 domain_number; 2401 u8 message_type; 2402 u16 sequence_id; 2403 u64 timestamp; 2404 2405 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type, 2406 &domain_number, &sequence_id, 2407 ×tamp); 2408 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port, 2409 message_type, domain_number, 2410 sequence_id, timestamp); 2411 } 2412 } 2413 2414 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg, 2415 char *mtpptr_pl, void *priv) 2416 { 2417 struct mlxsw_sp *mlxsw_sp = priv; 2418 2419 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true); 2420 } 2421 2422 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg, 2423 char *mtpptr_pl, void *priv) 2424 { 2425 struct mlxsw_sp *mlxsw_sp = priv; 2426 2427 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false); 2428 } 2429 2430 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 2431 u8 local_port, void *priv) 2432 { 2433 struct mlxsw_sp *mlxsw_sp = priv; 2434 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2435 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 2436 2437 if (unlikely(!mlxsw_sp_port)) { 2438 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 2439 local_port); 2440 return; 2441 } 2442 2443 skb->dev = mlxsw_sp_port->dev; 2444 2445 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 2446 u64_stats_update_begin(&pcpu_stats->syncp); 2447 pcpu_stats->rx_packets++; 2448 pcpu_stats->rx_bytes += skb->len; 2449 u64_stats_update_end(&pcpu_stats->syncp); 2450 2451 skb->protocol = eth_type_trans(skb, skb->dev); 2452 netif_receive_skb(skb); 2453 } 2454 2455 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, 2456 void *priv) 2457 { 2458 skb->offload_fwd_mark = 1; 2459 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 2460 } 2461 2462 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, 2463 u8 local_port, void *priv) 2464 { 2465 skb->offload_l3_fwd_mark = 1; 2466 skb->offload_fwd_mark = 1; 2467 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 2468 } 2469 2470 void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 2471 u8 local_port) 2472 { 2473 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port); 2474 } 2475 2476 void mlxsw_sp_sample_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 2477 u8 local_port) 2478 { 2479 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2480 struct mlxsw_sp_port_sample *sample; 2481 u32 size; 2482 2483 if (unlikely(!mlxsw_sp_port)) { 2484 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n", 2485 local_port); 2486 goto out; 2487 } 2488 2489 rcu_read_lock(); 2490 sample = rcu_dereference(mlxsw_sp_port->sample); 2491 if (!sample) 2492 goto out_unlock; 2493 size = sample->truncate ? sample->trunc_size : skb->len; 2494 psample_sample_packet(sample->psample_group, skb, size, 2495 mlxsw_sp_port->dev->ifindex, 0, sample->rate); 2496 out_unlock: 2497 rcu_read_unlock(); 2498 out: 2499 consume_skb(skb); 2500 } 2501 2502 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2503 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 2504 _is_ctrl, SP_##_trap_group, DISCARD) 2505 2506 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2507 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 2508 _is_ctrl, SP_##_trap_group, DISCARD) 2509 2510 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2511 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \ 2512 _is_ctrl, SP_##_trap_group, DISCARD) 2513 2514 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 2515 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 2516 2517 static const struct mlxsw_listener mlxsw_sp_listener[] = { 2518 /* Events */ 2519 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 2520 /* L2 traps */ 2521 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false), 2522 /* L3 traps */ 2523 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 2524 false), 2525 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 2526 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 2527 false), 2528 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD, 2529 ROUTER_EXP, false), 2530 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD, 2531 ROUTER_EXP, false), 2532 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD, 2533 ROUTER_EXP, false), 2534 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD, 2535 ROUTER_EXP, false), 2536 /* Multicast Router Traps */ 2537 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 2538 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 2539 /* NVE traps */ 2540 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false), 2541 }; 2542 2543 static const struct mlxsw_listener mlxsw_sp1_listener[] = { 2544 /* Events */ 2545 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0), 2546 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0), 2547 }; 2548 2549 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 2550 { 2551 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2552 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 2553 enum mlxsw_reg_qpcr_ir_units ir_units; 2554 int max_cpu_policers; 2555 bool is_bytes; 2556 u8 burst_size; 2557 u32 rate; 2558 int i, err; 2559 2560 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 2561 return -EIO; 2562 2563 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 2564 2565 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 2566 for (i = 0; i < max_cpu_policers; i++) { 2567 is_bytes = false; 2568 switch (i) { 2569 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 2570 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 2571 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS: 2572 rate = 1024; 2573 burst_size = 7; 2574 break; 2575 default: 2576 continue; 2577 } 2578 2579 __set_bit(i, mlxsw_sp->trap->policers_usage); 2580 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 2581 burst_size); 2582 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 2583 if (err) 2584 return err; 2585 } 2586 2587 return 0; 2588 } 2589 2590 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 2591 { 2592 char htgt_pl[MLXSW_REG_HTGT_LEN]; 2593 enum mlxsw_reg_htgt_trap_group i; 2594 int max_cpu_policers; 2595 int max_trap_groups; 2596 u8 priority, tc; 2597 u16 policer_id; 2598 int err; 2599 2600 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 2601 return -EIO; 2602 2603 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 2604 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 2605 2606 for (i = 0; i < max_trap_groups; i++) { 2607 policer_id = i; 2608 switch (i) { 2609 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 2610 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 2611 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS: 2612 priority = 1; 2613 tc = 1; 2614 break; 2615 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 2616 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 2617 tc = MLXSW_REG_HTGT_DEFAULT_TC; 2618 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 2619 break; 2620 default: 2621 continue; 2622 } 2623 2624 if (max_cpu_policers <= policer_id && 2625 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 2626 return -EIO; 2627 2628 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 2629 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 2630 if (err) 2631 return err; 2632 } 2633 2634 return 0; 2635 } 2636 2637 static int mlxsw_sp_traps_register(struct mlxsw_sp *mlxsw_sp, 2638 const struct mlxsw_listener listeners[], 2639 size_t listeners_count) 2640 { 2641 int i; 2642 int err; 2643 2644 for (i = 0; i < listeners_count; i++) { 2645 err = mlxsw_core_trap_register(mlxsw_sp->core, 2646 &listeners[i], 2647 mlxsw_sp); 2648 if (err) 2649 goto err_listener_register; 2650 2651 } 2652 return 0; 2653 2654 err_listener_register: 2655 for (i--; i >= 0; i--) { 2656 mlxsw_core_trap_unregister(mlxsw_sp->core, 2657 &listeners[i], 2658 mlxsw_sp); 2659 } 2660 return err; 2661 } 2662 2663 static void mlxsw_sp_traps_unregister(struct mlxsw_sp *mlxsw_sp, 2664 const struct mlxsw_listener listeners[], 2665 size_t listeners_count) 2666 { 2667 int i; 2668 2669 for (i = 0; i < listeners_count; i++) { 2670 mlxsw_core_trap_unregister(mlxsw_sp->core, 2671 &listeners[i], 2672 mlxsw_sp); 2673 } 2674 } 2675 2676 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 2677 { 2678 struct mlxsw_sp_trap *trap; 2679 u64 max_policers; 2680 int err; 2681 2682 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS)) 2683 return -EIO; 2684 max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS); 2685 trap = kzalloc(struct_size(trap, policers_usage, 2686 BITS_TO_LONGS(max_policers)), GFP_KERNEL); 2687 if (!trap) 2688 return -ENOMEM; 2689 trap->max_policers = max_policers; 2690 mlxsw_sp->trap = trap; 2691 2692 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 2693 if (err) 2694 goto err_cpu_policers_set; 2695 2696 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 2697 if (err) 2698 goto err_trap_groups_set; 2699 2700 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp_listener, 2701 ARRAY_SIZE(mlxsw_sp_listener)); 2702 if (err) 2703 goto err_traps_register; 2704 2705 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp->listeners, 2706 mlxsw_sp->listeners_count); 2707 if (err) 2708 goto err_extra_traps_init; 2709 2710 return 0; 2711 2712 err_extra_traps_init: 2713 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 2714 ARRAY_SIZE(mlxsw_sp_listener)); 2715 err_traps_register: 2716 err_trap_groups_set: 2717 err_cpu_policers_set: 2718 kfree(trap); 2719 return err; 2720 } 2721 2722 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 2723 { 2724 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp->listeners, 2725 mlxsw_sp->listeners_count); 2726 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 2727 ARRAY_SIZE(mlxsw_sp_listener)); 2728 kfree(mlxsw_sp->trap); 2729 } 2730 2731 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe 2732 2733 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 2734 { 2735 char slcr_pl[MLXSW_REG_SLCR_LEN]; 2736 u32 seed; 2737 int err; 2738 2739 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 2740 MLXSW_SP_LAG_SEED_INIT); 2741 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 2742 MLXSW_REG_SLCR_LAG_HASH_DMAC | 2743 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 2744 MLXSW_REG_SLCR_LAG_HASH_VLANID | 2745 MLXSW_REG_SLCR_LAG_HASH_SIP | 2746 MLXSW_REG_SLCR_LAG_HASH_DIP | 2747 MLXSW_REG_SLCR_LAG_HASH_SPORT | 2748 MLXSW_REG_SLCR_LAG_HASH_DPORT | 2749 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed); 2750 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 2751 if (err) 2752 return err; 2753 2754 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 2755 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 2756 return -EIO; 2757 2758 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 2759 sizeof(struct mlxsw_sp_upper), 2760 GFP_KERNEL); 2761 if (!mlxsw_sp->lags) 2762 return -ENOMEM; 2763 2764 return 0; 2765 } 2766 2767 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 2768 { 2769 kfree(mlxsw_sp->lags); 2770 } 2771 2772 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) 2773 { 2774 char htgt_pl[MLXSW_REG_HTGT_LEN]; 2775 2776 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 2777 MLXSW_REG_HTGT_INVALID_POLICER, 2778 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 2779 MLXSW_REG_HTGT_DEFAULT_TC); 2780 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 2781 } 2782 2783 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = { 2784 .clock_init = mlxsw_sp1_ptp_clock_init, 2785 .clock_fini = mlxsw_sp1_ptp_clock_fini, 2786 .init = mlxsw_sp1_ptp_init, 2787 .fini = mlxsw_sp1_ptp_fini, 2788 .receive = mlxsw_sp1_ptp_receive, 2789 .transmitted = mlxsw_sp1_ptp_transmitted, 2790 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get, 2791 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set, 2792 .shaper_work = mlxsw_sp1_ptp_shaper_work, 2793 .get_ts_info = mlxsw_sp1_ptp_get_ts_info, 2794 .get_stats_count = mlxsw_sp1_get_stats_count, 2795 .get_stats_strings = mlxsw_sp1_get_stats_strings, 2796 .get_stats = mlxsw_sp1_get_stats, 2797 }; 2798 2799 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { 2800 .clock_init = mlxsw_sp2_ptp_clock_init, 2801 .clock_fini = mlxsw_sp2_ptp_clock_fini, 2802 .init = mlxsw_sp2_ptp_init, 2803 .fini = mlxsw_sp2_ptp_fini, 2804 .receive = mlxsw_sp2_ptp_receive, 2805 .transmitted = mlxsw_sp2_ptp_transmitted, 2806 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get, 2807 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set, 2808 .shaper_work = mlxsw_sp2_ptp_shaper_work, 2809 .get_ts_info = mlxsw_sp2_ptp_get_ts_info, 2810 .get_stats_count = mlxsw_sp2_get_stats_count, 2811 .get_stats_strings = mlxsw_sp2_get_stats_strings, 2812 .get_stats = mlxsw_sp2_get_stats, 2813 }; 2814 2815 static u32 mlxsw_sp1_span_buffsize_get(int mtu, u32 speed) 2816 { 2817 return mtu * 5 / 2; 2818 } 2819 2820 static const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops = { 2821 .buffsize_get = mlxsw_sp1_span_buffsize_get, 2822 }; 2823 2824 #define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38 2825 #define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50 2826 2827 static u32 __mlxsw_sp_span_buffsize_get(int mtu, u32 speed, u32 buffer_factor) 2828 { 2829 return 3 * mtu + buffer_factor * speed / 1000; 2830 } 2831 2832 static u32 mlxsw_sp2_span_buffsize_get(int mtu, u32 speed) 2833 { 2834 int factor = MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR; 2835 2836 return __mlxsw_sp_span_buffsize_get(mtu, speed, factor); 2837 } 2838 2839 static const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops = { 2840 .buffsize_get = mlxsw_sp2_span_buffsize_get, 2841 }; 2842 2843 static u32 mlxsw_sp3_span_buffsize_get(int mtu, u32 speed) 2844 { 2845 int factor = MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR; 2846 2847 return __mlxsw_sp_span_buffsize_get(mtu, speed, factor); 2848 } 2849 2850 static const struct mlxsw_sp_span_ops mlxsw_sp3_span_ops = { 2851 .buffsize_get = mlxsw_sp3_span_buffsize_get, 2852 }; 2853 2854 u32 mlxsw_sp_span_buffsize_get(struct mlxsw_sp *mlxsw_sp, int mtu, u32 speed) 2855 { 2856 u32 buffsize = mlxsw_sp->span_ops->buffsize_get(speed, mtu); 2857 2858 return mlxsw_sp_bytes_cells(mlxsw_sp, buffsize) + 1; 2859 } 2860 2861 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 2862 unsigned long event, void *ptr); 2863 2864 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 2865 const struct mlxsw_bus_info *mlxsw_bus_info, 2866 struct netlink_ext_ack *extack) 2867 { 2868 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2869 int err; 2870 2871 mlxsw_sp->core = mlxsw_core; 2872 mlxsw_sp->bus_info = mlxsw_bus_info; 2873 2874 err = mlxsw_sp_fw_rev_validate(mlxsw_sp); 2875 if (err) 2876 return err; 2877 2878 mlxsw_core_emad_string_tlv_enable(mlxsw_core); 2879 2880 err = mlxsw_sp_base_mac_get(mlxsw_sp); 2881 if (err) { 2882 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 2883 return err; 2884 } 2885 2886 err = mlxsw_sp_kvdl_init(mlxsw_sp); 2887 if (err) { 2888 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 2889 return err; 2890 } 2891 2892 err = mlxsw_sp_fids_init(mlxsw_sp); 2893 if (err) { 2894 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 2895 goto err_fids_init; 2896 } 2897 2898 err = mlxsw_sp_traps_init(mlxsw_sp); 2899 if (err) { 2900 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 2901 goto err_traps_init; 2902 } 2903 2904 err = mlxsw_sp_devlink_traps_init(mlxsw_sp); 2905 if (err) { 2906 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n"); 2907 goto err_devlink_traps_init; 2908 } 2909 2910 err = mlxsw_sp_buffers_init(mlxsw_sp); 2911 if (err) { 2912 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 2913 goto err_buffers_init; 2914 } 2915 2916 err = mlxsw_sp_lag_init(mlxsw_sp); 2917 if (err) { 2918 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 2919 goto err_lag_init; 2920 } 2921 2922 /* Initialize SPAN before router and switchdev, so that those components 2923 * can call mlxsw_sp_span_respin(). 2924 */ 2925 err = mlxsw_sp_span_init(mlxsw_sp); 2926 if (err) { 2927 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 2928 goto err_span_init; 2929 } 2930 2931 err = mlxsw_sp_switchdev_init(mlxsw_sp); 2932 if (err) { 2933 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 2934 goto err_switchdev_init; 2935 } 2936 2937 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 2938 if (err) { 2939 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 2940 goto err_counter_pool_init; 2941 } 2942 2943 err = mlxsw_sp_afa_init(mlxsw_sp); 2944 if (err) { 2945 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 2946 goto err_afa_init; 2947 } 2948 2949 err = mlxsw_sp_nve_init(mlxsw_sp); 2950 if (err) { 2951 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n"); 2952 goto err_nve_init; 2953 } 2954 2955 err = mlxsw_sp_acl_init(mlxsw_sp); 2956 if (err) { 2957 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 2958 goto err_acl_init; 2959 } 2960 2961 err = mlxsw_sp_router_init(mlxsw_sp, extack); 2962 if (err) { 2963 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 2964 goto err_router_init; 2965 } 2966 2967 if (mlxsw_sp->bus_info->read_frc_capable) { 2968 /* NULL is a valid return value from clock_init */ 2969 mlxsw_sp->clock = 2970 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp, 2971 mlxsw_sp->bus_info->dev); 2972 if (IS_ERR(mlxsw_sp->clock)) { 2973 err = PTR_ERR(mlxsw_sp->clock); 2974 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n"); 2975 goto err_ptp_clock_init; 2976 } 2977 } 2978 2979 if (mlxsw_sp->clock) { 2980 /* NULL is a valid return value from ptp_ops->init */ 2981 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp); 2982 if (IS_ERR(mlxsw_sp->ptp_state)) { 2983 err = PTR_ERR(mlxsw_sp->ptp_state); 2984 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n"); 2985 goto err_ptp_init; 2986 } 2987 } 2988 2989 /* Initialize netdevice notifier after router and SPAN is initialized, 2990 * so that the event handler can use router structures and call SPAN 2991 * respin. 2992 */ 2993 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 2994 err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 2995 &mlxsw_sp->netdevice_nb); 2996 if (err) { 2997 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 2998 goto err_netdev_notifier; 2999 } 3000 3001 err = mlxsw_sp_dpipe_init(mlxsw_sp); 3002 if (err) { 3003 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 3004 goto err_dpipe_init; 3005 } 3006 3007 err = mlxsw_sp_port_module_info_init(mlxsw_sp); 3008 if (err) { 3009 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n"); 3010 goto err_port_module_info_init; 3011 } 3012 3013 err = mlxsw_sp_ports_create(mlxsw_sp); 3014 if (err) { 3015 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 3016 goto err_ports_create; 3017 } 3018 3019 return 0; 3020 3021 err_ports_create: 3022 mlxsw_sp_port_module_info_fini(mlxsw_sp); 3023 err_port_module_info_init: 3024 mlxsw_sp_dpipe_fini(mlxsw_sp); 3025 err_dpipe_init: 3026 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 3027 &mlxsw_sp->netdevice_nb); 3028 err_netdev_notifier: 3029 if (mlxsw_sp->clock) 3030 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 3031 err_ptp_init: 3032 if (mlxsw_sp->clock) 3033 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 3034 err_ptp_clock_init: 3035 mlxsw_sp_router_fini(mlxsw_sp); 3036 err_router_init: 3037 mlxsw_sp_acl_fini(mlxsw_sp); 3038 err_acl_init: 3039 mlxsw_sp_nve_fini(mlxsw_sp); 3040 err_nve_init: 3041 mlxsw_sp_afa_fini(mlxsw_sp); 3042 err_afa_init: 3043 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3044 err_counter_pool_init: 3045 mlxsw_sp_switchdev_fini(mlxsw_sp); 3046 err_switchdev_init: 3047 mlxsw_sp_span_fini(mlxsw_sp); 3048 err_span_init: 3049 mlxsw_sp_lag_fini(mlxsw_sp); 3050 err_lag_init: 3051 mlxsw_sp_buffers_fini(mlxsw_sp); 3052 err_buffers_init: 3053 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 3054 err_devlink_traps_init: 3055 mlxsw_sp_traps_fini(mlxsw_sp); 3056 err_traps_init: 3057 mlxsw_sp_fids_fini(mlxsw_sp); 3058 err_fids_init: 3059 mlxsw_sp_kvdl_fini(mlxsw_sp); 3060 return err; 3061 } 3062 3063 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, 3064 const struct mlxsw_bus_info *mlxsw_bus_info, 3065 struct netlink_ext_ack *extack) 3066 { 3067 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3068 3069 mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev; 3070 mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME; 3071 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; 3072 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; 3073 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; 3074 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; 3075 mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops; 3076 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; 3077 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; 3078 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; 3079 mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr; 3080 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals; 3081 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops; 3082 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops; 3083 mlxsw_sp->span_ops = &mlxsw_sp1_span_ops; 3084 mlxsw_sp->listeners = mlxsw_sp1_listener; 3085 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener); 3086 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1; 3087 3088 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3089 } 3090 3091 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, 3092 const struct mlxsw_bus_info *mlxsw_bus_info, 3093 struct netlink_ext_ack *extack) 3094 { 3095 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3096 3097 mlxsw_sp->req_rev = &mlxsw_sp2_fw_rev; 3098 mlxsw_sp->fw_filename = MLXSW_SP2_FW_FILENAME; 3099 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 3100 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 3101 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 3102 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 3103 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 3104 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 3105 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 3106 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 3107 mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; 3108 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 3109 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 3110 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 3111 mlxsw_sp->span_ops = &mlxsw_sp2_span_ops; 3112 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2; 3113 3114 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3115 } 3116 3117 static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core, 3118 const struct mlxsw_bus_info *mlxsw_bus_info, 3119 struct netlink_ext_ack *extack) 3120 { 3121 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3122 3123 mlxsw_sp->req_rev = &mlxsw_sp3_fw_rev; 3124 mlxsw_sp->fw_filename = MLXSW_SP3_FW_FILENAME; 3125 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 3126 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 3127 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 3128 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 3129 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 3130 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 3131 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 3132 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 3133 mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; 3134 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 3135 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 3136 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 3137 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops; 3138 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3; 3139 3140 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3141 } 3142 3143 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 3144 { 3145 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3146 3147 mlxsw_sp_ports_remove(mlxsw_sp); 3148 mlxsw_sp_port_module_info_fini(mlxsw_sp); 3149 mlxsw_sp_dpipe_fini(mlxsw_sp); 3150 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 3151 &mlxsw_sp->netdevice_nb); 3152 if (mlxsw_sp->clock) { 3153 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 3154 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 3155 } 3156 mlxsw_sp_router_fini(mlxsw_sp); 3157 mlxsw_sp_acl_fini(mlxsw_sp); 3158 mlxsw_sp_nve_fini(mlxsw_sp); 3159 mlxsw_sp_afa_fini(mlxsw_sp); 3160 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3161 mlxsw_sp_switchdev_fini(mlxsw_sp); 3162 mlxsw_sp_span_fini(mlxsw_sp); 3163 mlxsw_sp_lag_fini(mlxsw_sp); 3164 mlxsw_sp_buffers_fini(mlxsw_sp); 3165 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 3166 mlxsw_sp_traps_fini(mlxsw_sp); 3167 mlxsw_sp_fids_fini(mlxsw_sp); 3168 mlxsw_sp_kvdl_fini(mlxsw_sp); 3169 } 3170 3171 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated 3172 * 802.1Q FIDs 3173 */ 3174 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \ 3175 VLAN_VID_MASK - 1) 3176 3177 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { 3178 .used_max_mid = 1, 3179 .max_mid = MLXSW_SP_MID_MAX, 3180 .used_flood_tables = 1, 3181 .used_flood_mode = 1, 3182 .flood_mode = 3, 3183 .max_fid_flood_tables = 3, 3184 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 3185 .used_max_ib_mc = 1, 3186 .max_ib_mc = 0, 3187 .used_max_pkey = 1, 3188 .max_pkey = 0, 3189 .used_kvd_sizes = 1, 3190 .kvd_hash_single_parts = 59, 3191 .kvd_hash_double_parts = 41, 3192 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 3193 .swid_config = { 3194 { 3195 .used_type = 1, 3196 .type = MLXSW_PORT_SWID_TYPE_ETH, 3197 } 3198 }, 3199 }; 3200 3201 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { 3202 .used_max_mid = 1, 3203 .max_mid = MLXSW_SP_MID_MAX, 3204 .used_flood_tables = 1, 3205 .used_flood_mode = 1, 3206 .flood_mode = 3, 3207 .max_fid_flood_tables = 3, 3208 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 3209 .used_max_ib_mc = 1, 3210 .max_ib_mc = 0, 3211 .used_max_pkey = 1, 3212 .max_pkey = 0, 3213 .swid_config = { 3214 { 3215 .used_type = 1, 3216 .type = MLXSW_PORT_SWID_TYPE_ETH, 3217 } 3218 }, 3219 }; 3220 3221 static void 3222 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, 3223 struct devlink_resource_size_params *kvd_size_params, 3224 struct devlink_resource_size_params *linear_size_params, 3225 struct devlink_resource_size_params *hash_double_size_params, 3226 struct devlink_resource_size_params *hash_single_size_params) 3227 { 3228 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 3229 KVD_SINGLE_MIN_SIZE); 3230 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 3231 KVD_DOUBLE_MIN_SIZE); 3232 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3233 u32 linear_size_min = 0; 3234 3235 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, 3236 MLXSW_SP_KVD_GRANULARITY, 3237 DEVLINK_RESOURCE_UNIT_ENTRY); 3238 devlink_resource_size_params_init(linear_size_params, linear_size_min, 3239 kvd_size - single_size_min - 3240 double_size_min, 3241 MLXSW_SP_KVD_GRANULARITY, 3242 DEVLINK_RESOURCE_UNIT_ENTRY); 3243 devlink_resource_size_params_init(hash_double_size_params, 3244 double_size_min, 3245 kvd_size - single_size_min - 3246 linear_size_min, 3247 MLXSW_SP_KVD_GRANULARITY, 3248 DEVLINK_RESOURCE_UNIT_ENTRY); 3249 devlink_resource_size_params_init(hash_single_size_params, 3250 single_size_min, 3251 kvd_size - double_size_min - 3252 linear_size_min, 3253 MLXSW_SP_KVD_GRANULARITY, 3254 DEVLINK_RESOURCE_UNIT_ENTRY); 3255 } 3256 3257 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) 3258 { 3259 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3260 struct devlink_resource_size_params hash_single_size_params; 3261 struct devlink_resource_size_params hash_double_size_params; 3262 struct devlink_resource_size_params linear_size_params; 3263 struct devlink_resource_size_params kvd_size_params; 3264 u32 kvd_size, single_size, double_size, linear_size; 3265 const struct mlxsw_config_profile *profile; 3266 int err; 3267 3268 profile = &mlxsw_sp1_config_profile; 3269 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 3270 return -EIO; 3271 3272 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, 3273 &linear_size_params, 3274 &hash_double_size_params, 3275 &hash_single_size_params); 3276 3277 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3278 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 3279 kvd_size, MLXSW_SP_RESOURCE_KVD, 3280 DEVLINK_RESOURCE_ID_PARENT_TOP, 3281 &kvd_size_params); 3282 if (err) 3283 return err; 3284 3285 linear_size = profile->kvd_linear_size; 3286 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, 3287 linear_size, 3288 MLXSW_SP_RESOURCE_KVD_LINEAR, 3289 MLXSW_SP_RESOURCE_KVD, 3290 &linear_size_params); 3291 if (err) 3292 return err; 3293 3294 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); 3295 if (err) 3296 return err; 3297 3298 double_size = kvd_size - linear_size; 3299 double_size *= profile->kvd_hash_double_parts; 3300 double_size /= profile->kvd_hash_double_parts + 3301 profile->kvd_hash_single_parts; 3302 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY); 3303 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, 3304 double_size, 3305 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 3306 MLXSW_SP_RESOURCE_KVD, 3307 &hash_double_size_params); 3308 if (err) 3309 return err; 3310 3311 single_size = kvd_size - double_size - linear_size; 3312 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, 3313 single_size, 3314 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 3315 MLXSW_SP_RESOURCE_KVD, 3316 &hash_single_size_params); 3317 if (err) 3318 return err; 3319 3320 return 0; 3321 } 3322 3323 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core) 3324 { 3325 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3326 struct devlink_resource_size_params kvd_size_params; 3327 u32 kvd_size; 3328 3329 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 3330 return -EIO; 3331 3332 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3333 devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size, 3334 MLXSW_SP_KVD_GRANULARITY, 3335 DEVLINK_RESOURCE_UNIT_ENTRY); 3336 3337 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 3338 kvd_size, MLXSW_SP_RESOURCE_KVD, 3339 DEVLINK_RESOURCE_ID_PARENT_TOP, 3340 &kvd_size_params); 3341 } 3342 3343 static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core) 3344 { 3345 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3346 struct devlink_resource_size_params span_size_params; 3347 u32 max_span; 3348 3349 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN)) 3350 return -EIO; 3351 3352 max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN); 3353 devlink_resource_size_params_init(&span_size_params, max_span, max_span, 3354 1, DEVLINK_RESOURCE_UNIT_ENTRY); 3355 3356 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN, 3357 max_span, MLXSW_SP_RESOURCE_SPAN, 3358 DEVLINK_RESOURCE_ID_PARENT_TOP, 3359 &span_size_params); 3360 } 3361 3362 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) 3363 { 3364 int err; 3365 3366 err = mlxsw_sp1_resources_kvd_register(mlxsw_core); 3367 if (err) 3368 return err; 3369 3370 err = mlxsw_sp_resources_span_register(mlxsw_core); 3371 if (err) 3372 goto err_resources_span_register; 3373 3374 err = mlxsw_sp_counter_resources_register(mlxsw_core); 3375 if (err) 3376 goto err_resources_counter_register; 3377 3378 return 0; 3379 3380 err_resources_counter_register: 3381 err_resources_span_register: 3382 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL); 3383 return err; 3384 } 3385 3386 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) 3387 { 3388 int err; 3389 3390 err = mlxsw_sp2_resources_kvd_register(mlxsw_core); 3391 if (err) 3392 return err; 3393 3394 err = mlxsw_sp_resources_span_register(mlxsw_core); 3395 if (err) 3396 goto err_resources_span_register; 3397 3398 err = mlxsw_sp_counter_resources_register(mlxsw_core); 3399 if (err) 3400 goto err_resources_counter_register; 3401 3402 return 0; 3403 3404 err_resources_counter_register: 3405 err_resources_span_register: 3406 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL); 3407 return err; 3408 } 3409 3410 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 3411 const struct mlxsw_config_profile *profile, 3412 u64 *p_single_size, u64 *p_double_size, 3413 u64 *p_linear_size) 3414 { 3415 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3416 u32 double_size; 3417 int err; 3418 3419 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 3420 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) 3421 return -EIO; 3422 3423 /* The hash part is what left of the kvd without the 3424 * linear part. It is split to the single size and 3425 * double size by the parts ratio from the profile. 3426 * Both sizes must be a multiplications of the 3427 * granularity from the profile. In case the user 3428 * provided the sizes they are obtained via devlink. 3429 */ 3430 err = devlink_resource_size_get(devlink, 3431 MLXSW_SP_RESOURCE_KVD_LINEAR, 3432 p_linear_size); 3433 if (err) 3434 *p_linear_size = profile->kvd_linear_size; 3435 3436 err = devlink_resource_size_get(devlink, 3437 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 3438 p_double_size); 3439 if (err) { 3440 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 3441 *p_linear_size; 3442 double_size *= profile->kvd_hash_double_parts; 3443 double_size /= profile->kvd_hash_double_parts + 3444 profile->kvd_hash_single_parts; 3445 *p_double_size = rounddown(double_size, 3446 MLXSW_SP_KVD_GRANULARITY); 3447 } 3448 3449 err = devlink_resource_size_get(devlink, 3450 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 3451 p_single_size); 3452 if (err) 3453 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 3454 *p_double_size - *p_linear_size; 3455 3456 /* Check results are legal. */ 3457 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 3458 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || 3459 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size) 3460 return -EIO; 3461 3462 return 0; 3463 } 3464 3465 static int 3466 mlxsw_sp_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id, 3467 union devlink_param_value val, 3468 struct netlink_ext_ack *extack) 3469 { 3470 if ((val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER) && 3471 (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)) { 3472 NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'"); 3473 return -EINVAL; 3474 } 3475 3476 return 0; 3477 } 3478 3479 static const struct devlink_param mlxsw_sp_devlink_params[] = { 3480 DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY, 3481 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 3482 NULL, NULL, 3483 mlxsw_sp_devlink_param_fw_load_policy_validate), 3484 }; 3485 3486 static int mlxsw_sp_params_register(struct mlxsw_core *mlxsw_core) 3487 { 3488 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3489 union devlink_param_value value; 3490 int err; 3491 3492 err = devlink_params_register(devlink, mlxsw_sp_devlink_params, 3493 ARRAY_SIZE(mlxsw_sp_devlink_params)); 3494 if (err) 3495 return err; 3496 3497 value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER; 3498 devlink_param_driverinit_value_set(devlink, 3499 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 3500 value); 3501 return 0; 3502 } 3503 3504 static void mlxsw_sp_params_unregister(struct mlxsw_core *mlxsw_core) 3505 { 3506 devlink_params_unregister(priv_to_devlink(mlxsw_core), 3507 mlxsw_sp_devlink_params, 3508 ARRAY_SIZE(mlxsw_sp_devlink_params)); 3509 } 3510 3511 static int 3512 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id, 3513 struct devlink_param_gset_ctx *ctx) 3514 { 3515 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 3516 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3517 3518 ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp); 3519 return 0; 3520 } 3521 3522 static int 3523 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id, 3524 struct devlink_param_gset_ctx *ctx) 3525 { 3526 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 3527 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3528 3529 return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32); 3530 } 3531 3532 static const struct devlink_param mlxsw_sp2_devlink_params[] = { 3533 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 3534 "acl_region_rehash_interval", 3535 DEVLINK_PARAM_TYPE_U32, 3536 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 3537 mlxsw_sp_params_acl_region_rehash_intrvl_get, 3538 mlxsw_sp_params_acl_region_rehash_intrvl_set, 3539 NULL), 3540 }; 3541 3542 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core) 3543 { 3544 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3545 union devlink_param_value value; 3546 int err; 3547 3548 err = mlxsw_sp_params_register(mlxsw_core); 3549 if (err) 3550 return err; 3551 3552 err = devlink_params_register(devlink, mlxsw_sp2_devlink_params, 3553 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 3554 if (err) 3555 goto err_devlink_params_register; 3556 3557 value.vu32 = 0; 3558 devlink_param_driverinit_value_set(devlink, 3559 MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 3560 value); 3561 return 0; 3562 3563 err_devlink_params_register: 3564 mlxsw_sp_params_unregister(mlxsw_core); 3565 return err; 3566 } 3567 3568 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core) 3569 { 3570 devlink_params_unregister(priv_to_devlink(mlxsw_core), 3571 mlxsw_sp2_devlink_params, 3572 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 3573 mlxsw_sp_params_unregister(mlxsw_core); 3574 } 3575 3576 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core, 3577 struct sk_buff *skb, u8 local_port) 3578 { 3579 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3580 3581 skb_pull(skb, MLXSW_TXHDR_LEN); 3582 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port); 3583 } 3584 3585 static struct mlxsw_driver mlxsw_sp1_driver = { 3586 .kind = mlxsw_sp1_driver_name, 3587 .priv_size = sizeof(struct mlxsw_sp), 3588 .init = mlxsw_sp1_init, 3589 .fini = mlxsw_sp_fini, 3590 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 3591 .port_split = mlxsw_sp_port_split, 3592 .port_unsplit = mlxsw_sp_port_unsplit, 3593 .sb_pool_get = mlxsw_sp_sb_pool_get, 3594 .sb_pool_set = mlxsw_sp_sb_pool_set, 3595 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3596 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3597 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3598 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3599 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3600 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3601 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3602 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3603 .flash_update = mlxsw_sp_flash_update, 3604 .trap_init = mlxsw_sp_trap_init, 3605 .trap_fini = mlxsw_sp_trap_fini, 3606 .trap_action_set = mlxsw_sp_trap_action_set, 3607 .trap_group_init = mlxsw_sp_trap_group_init, 3608 .trap_group_set = mlxsw_sp_trap_group_set, 3609 .trap_policer_init = mlxsw_sp_trap_policer_init, 3610 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3611 .trap_policer_set = mlxsw_sp_trap_policer_set, 3612 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3613 .txhdr_construct = mlxsw_sp_txhdr_construct, 3614 .resources_register = mlxsw_sp1_resources_register, 3615 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, 3616 .params_register = mlxsw_sp_params_register, 3617 .params_unregister = mlxsw_sp_params_unregister, 3618 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3619 .txhdr_len = MLXSW_TXHDR_LEN, 3620 .profile = &mlxsw_sp1_config_profile, 3621 .res_query_enabled = true, 3622 }; 3623 3624 static struct mlxsw_driver mlxsw_sp2_driver = { 3625 .kind = mlxsw_sp2_driver_name, 3626 .priv_size = sizeof(struct mlxsw_sp), 3627 .init = mlxsw_sp2_init, 3628 .fini = mlxsw_sp_fini, 3629 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 3630 .port_split = mlxsw_sp_port_split, 3631 .port_unsplit = mlxsw_sp_port_unsplit, 3632 .sb_pool_get = mlxsw_sp_sb_pool_get, 3633 .sb_pool_set = mlxsw_sp_sb_pool_set, 3634 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3635 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3636 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3637 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3638 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3639 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3640 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3641 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3642 .flash_update = mlxsw_sp_flash_update, 3643 .trap_init = mlxsw_sp_trap_init, 3644 .trap_fini = mlxsw_sp_trap_fini, 3645 .trap_action_set = mlxsw_sp_trap_action_set, 3646 .trap_group_init = mlxsw_sp_trap_group_init, 3647 .trap_group_set = mlxsw_sp_trap_group_set, 3648 .trap_policer_init = mlxsw_sp_trap_policer_init, 3649 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3650 .trap_policer_set = mlxsw_sp_trap_policer_set, 3651 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3652 .txhdr_construct = mlxsw_sp_txhdr_construct, 3653 .resources_register = mlxsw_sp2_resources_register, 3654 .params_register = mlxsw_sp2_params_register, 3655 .params_unregister = mlxsw_sp2_params_unregister, 3656 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3657 .txhdr_len = MLXSW_TXHDR_LEN, 3658 .profile = &mlxsw_sp2_config_profile, 3659 .res_query_enabled = true, 3660 }; 3661 3662 static struct mlxsw_driver mlxsw_sp3_driver = { 3663 .kind = mlxsw_sp3_driver_name, 3664 .priv_size = sizeof(struct mlxsw_sp), 3665 .init = mlxsw_sp3_init, 3666 .fini = mlxsw_sp_fini, 3667 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 3668 .port_split = mlxsw_sp_port_split, 3669 .port_unsplit = mlxsw_sp_port_unsplit, 3670 .sb_pool_get = mlxsw_sp_sb_pool_get, 3671 .sb_pool_set = mlxsw_sp_sb_pool_set, 3672 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3673 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3674 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3675 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3676 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3677 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3678 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3679 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3680 .flash_update = mlxsw_sp_flash_update, 3681 .trap_init = mlxsw_sp_trap_init, 3682 .trap_fini = mlxsw_sp_trap_fini, 3683 .trap_action_set = mlxsw_sp_trap_action_set, 3684 .trap_group_init = mlxsw_sp_trap_group_init, 3685 .trap_group_set = mlxsw_sp_trap_group_set, 3686 .trap_policer_init = mlxsw_sp_trap_policer_init, 3687 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3688 .trap_policer_set = mlxsw_sp_trap_policer_set, 3689 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3690 .txhdr_construct = mlxsw_sp_txhdr_construct, 3691 .resources_register = mlxsw_sp2_resources_register, 3692 .params_register = mlxsw_sp2_params_register, 3693 .params_unregister = mlxsw_sp2_params_unregister, 3694 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3695 .txhdr_len = MLXSW_TXHDR_LEN, 3696 .profile = &mlxsw_sp2_config_profile, 3697 .res_query_enabled = true, 3698 }; 3699 3700 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 3701 { 3702 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 3703 } 3704 3705 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data) 3706 { 3707 struct mlxsw_sp_port **p_mlxsw_sp_port = data; 3708 int ret = 0; 3709 3710 if (mlxsw_sp_port_dev_check(lower_dev)) { 3711 *p_mlxsw_sp_port = netdev_priv(lower_dev); 3712 ret = 1; 3713 } 3714 3715 return ret; 3716 } 3717 3718 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 3719 { 3720 struct mlxsw_sp_port *mlxsw_sp_port; 3721 3722 if (mlxsw_sp_port_dev_check(dev)) 3723 return netdev_priv(dev); 3724 3725 mlxsw_sp_port = NULL; 3726 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port); 3727 3728 return mlxsw_sp_port; 3729 } 3730 3731 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 3732 { 3733 struct mlxsw_sp_port *mlxsw_sp_port; 3734 3735 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 3736 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 3737 } 3738 3739 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 3740 { 3741 struct mlxsw_sp_port *mlxsw_sp_port; 3742 3743 if (mlxsw_sp_port_dev_check(dev)) 3744 return netdev_priv(dev); 3745 3746 mlxsw_sp_port = NULL; 3747 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 3748 &mlxsw_sp_port); 3749 3750 return mlxsw_sp_port; 3751 } 3752 3753 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 3754 { 3755 struct mlxsw_sp_port *mlxsw_sp_port; 3756 3757 rcu_read_lock(); 3758 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 3759 if (mlxsw_sp_port) 3760 dev_hold(mlxsw_sp_port->dev); 3761 rcu_read_unlock(); 3762 return mlxsw_sp_port; 3763 } 3764 3765 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 3766 { 3767 dev_put(mlxsw_sp_port->dev); 3768 } 3769 3770 static void 3771 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port, 3772 struct net_device *lag_dev) 3773 { 3774 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev); 3775 struct net_device *upper_dev; 3776 struct list_head *iter; 3777 3778 if (netif_is_bridge_port(lag_dev)) 3779 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev); 3780 3781 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 3782 if (!netif_is_bridge_port(upper_dev)) 3783 continue; 3784 br_dev = netdev_master_upper_dev_get(upper_dev); 3785 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev); 3786 } 3787 } 3788 3789 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 3790 { 3791 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3792 3793 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 3794 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3795 } 3796 3797 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 3798 { 3799 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3800 3801 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 3802 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3803 } 3804 3805 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 3806 u16 lag_id, u8 port_index) 3807 { 3808 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3809 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3810 3811 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 3812 lag_id, port_index); 3813 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3814 } 3815 3816 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 3817 u16 lag_id) 3818 { 3819 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3820 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3821 3822 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 3823 lag_id); 3824 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3825 } 3826 3827 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 3828 u16 lag_id) 3829 { 3830 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3831 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3832 3833 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 3834 lag_id); 3835 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3836 } 3837 3838 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 3839 u16 lag_id) 3840 { 3841 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3842 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3843 3844 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 3845 lag_id); 3846 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3847 } 3848 3849 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 3850 struct net_device *lag_dev, 3851 u16 *p_lag_id) 3852 { 3853 struct mlxsw_sp_upper *lag; 3854 int free_lag_id = -1; 3855 u64 max_lag; 3856 int i; 3857 3858 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 3859 for (i = 0; i < max_lag; i++) { 3860 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 3861 if (lag->ref_count) { 3862 if (lag->dev == lag_dev) { 3863 *p_lag_id = i; 3864 return 0; 3865 } 3866 } else if (free_lag_id < 0) { 3867 free_lag_id = i; 3868 } 3869 } 3870 if (free_lag_id < 0) 3871 return -EBUSY; 3872 *p_lag_id = free_lag_id; 3873 return 0; 3874 } 3875 3876 static bool 3877 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 3878 struct net_device *lag_dev, 3879 struct netdev_lag_upper_info *lag_upper_info, 3880 struct netlink_ext_ack *extack) 3881 { 3882 u16 lag_id; 3883 3884 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 3885 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices"); 3886 return false; 3887 } 3888 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 3889 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 3890 return false; 3891 } 3892 return true; 3893 } 3894 3895 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 3896 u16 lag_id, u8 *p_port_index) 3897 { 3898 u64 max_lag_members; 3899 int i; 3900 3901 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 3902 MAX_LAG_MEMBERS); 3903 for (i = 0; i < max_lag_members; i++) { 3904 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 3905 *p_port_index = i; 3906 return 0; 3907 } 3908 } 3909 return -EBUSY; 3910 } 3911 3912 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 3913 struct net_device *lag_dev) 3914 { 3915 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3916 struct mlxsw_sp_upper *lag; 3917 u16 lag_id; 3918 u8 port_index; 3919 int err; 3920 3921 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 3922 if (err) 3923 return err; 3924 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 3925 if (!lag->ref_count) { 3926 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 3927 if (err) 3928 return err; 3929 lag->dev = lag_dev; 3930 } 3931 3932 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 3933 if (err) 3934 return err; 3935 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 3936 if (err) 3937 goto err_col_port_add; 3938 3939 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 3940 mlxsw_sp_port->local_port); 3941 mlxsw_sp_port->lag_id = lag_id; 3942 mlxsw_sp_port->lagged = 1; 3943 lag->ref_count++; 3944 3945 /* Port is no longer usable as a router interface */ 3946 if (mlxsw_sp_port->default_vlan->fid) 3947 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); 3948 3949 return 0; 3950 3951 err_col_port_add: 3952 if (!lag->ref_count) 3953 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 3954 return err; 3955 } 3956 3957 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 3958 struct net_device *lag_dev) 3959 { 3960 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3961 u16 lag_id = mlxsw_sp_port->lag_id; 3962 struct mlxsw_sp_upper *lag; 3963 3964 if (!mlxsw_sp_port->lagged) 3965 return; 3966 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 3967 WARN_ON(lag->ref_count == 0); 3968 3969 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 3970 3971 /* Any VLANs configured on the port are no longer valid */ 3972 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false); 3973 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan); 3974 /* Make the LAG and its directly linked uppers leave bridges they 3975 * are memeber in 3976 */ 3977 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev); 3978 3979 if (lag->ref_count == 1) 3980 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 3981 3982 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 3983 mlxsw_sp_port->local_port); 3984 mlxsw_sp_port->lagged = 0; 3985 lag->ref_count--; 3986 3987 /* Make sure untagged frames are allowed to ingress */ 3988 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 3989 } 3990 3991 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 3992 u16 lag_id) 3993 { 3994 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3995 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3996 3997 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 3998 mlxsw_sp_port->local_port); 3999 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4000 } 4001 4002 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 4003 u16 lag_id) 4004 { 4005 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4006 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4007 4008 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 4009 mlxsw_sp_port->local_port); 4010 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4011 } 4012 4013 static int 4014 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port) 4015 { 4016 int err; 4017 4018 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, 4019 mlxsw_sp_port->lag_id); 4020 if (err) 4021 return err; 4022 4023 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 4024 if (err) 4025 goto err_dist_port_add; 4026 4027 return 0; 4028 4029 err_dist_port_add: 4030 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id); 4031 return err; 4032 } 4033 4034 static int 4035 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port) 4036 { 4037 int err; 4038 4039 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 4040 mlxsw_sp_port->lag_id); 4041 if (err) 4042 return err; 4043 4044 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, 4045 mlxsw_sp_port->lag_id); 4046 if (err) 4047 goto err_col_port_disable; 4048 4049 return 0; 4050 4051 err_col_port_disable: 4052 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 4053 return err; 4054 } 4055 4056 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 4057 struct netdev_lag_lower_state_info *info) 4058 { 4059 if (info->tx_enabled) 4060 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port); 4061 else 4062 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 4063 } 4064 4065 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 4066 bool enable) 4067 { 4068 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4069 enum mlxsw_reg_spms_state spms_state; 4070 char *spms_pl; 4071 u16 vid; 4072 int err; 4073 4074 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 4075 MLXSW_REG_SPMS_STATE_DISCARDING; 4076 4077 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 4078 if (!spms_pl) 4079 return -ENOMEM; 4080 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 4081 4082 for (vid = 0; vid < VLAN_N_VID; vid++) 4083 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 4084 4085 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 4086 kfree(spms_pl); 4087 return err; 4088 } 4089 4090 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 4091 { 4092 u16 vid = 1; 4093 int err; 4094 4095 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 4096 if (err) 4097 return err; 4098 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 4099 if (err) 4100 goto err_port_stp_set; 4101 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 4102 true, false); 4103 if (err) 4104 goto err_port_vlan_set; 4105 4106 for (; vid <= VLAN_N_VID - 1; vid++) { 4107 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4108 vid, false); 4109 if (err) 4110 goto err_vid_learning_set; 4111 } 4112 4113 return 0; 4114 4115 err_vid_learning_set: 4116 for (vid--; vid >= 1; vid--) 4117 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 4118 err_port_vlan_set: 4119 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4120 err_port_stp_set: 4121 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4122 return err; 4123 } 4124 4125 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 4126 { 4127 u16 vid; 4128 4129 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 4130 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4131 vid, true); 4132 4133 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 4134 false, false); 4135 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4136 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4137 } 4138 4139 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev) 4140 { 4141 unsigned int num_vxlans = 0; 4142 struct net_device *dev; 4143 struct list_head *iter; 4144 4145 netdev_for_each_lower_dev(br_dev, dev, iter) { 4146 if (netif_is_vxlan(dev)) 4147 num_vxlans++; 4148 } 4149 4150 return num_vxlans > 1; 4151 } 4152 4153 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev) 4154 { 4155 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0}; 4156 struct net_device *dev; 4157 struct list_head *iter; 4158 4159 netdev_for_each_lower_dev(br_dev, dev, iter) { 4160 u16 pvid; 4161 int err; 4162 4163 if (!netif_is_vxlan(dev)) 4164 continue; 4165 4166 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid); 4167 if (err || !pvid) 4168 continue; 4169 4170 if (test_and_set_bit(pvid, vlans)) 4171 return false; 4172 } 4173 4174 return true; 4175 } 4176 4177 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev, 4178 struct netlink_ext_ack *extack) 4179 { 4180 if (br_multicast_enabled(br_dev)) { 4181 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device"); 4182 return false; 4183 } 4184 4185 if (!br_vlan_enabled(br_dev) && 4186 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) { 4187 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge"); 4188 return false; 4189 } 4190 4191 if (br_vlan_enabled(br_dev) && 4192 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) { 4193 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged"); 4194 return false; 4195 } 4196 4197 return true; 4198 } 4199 4200 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 4201 struct net_device *dev, 4202 unsigned long event, void *ptr) 4203 { 4204 struct netdev_notifier_changeupper_info *info; 4205 struct mlxsw_sp_port *mlxsw_sp_port; 4206 struct netlink_ext_ack *extack; 4207 struct net_device *upper_dev; 4208 struct mlxsw_sp *mlxsw_sp; 4209 int err = 0; 4210 4211 mlxsw_sp_port = netdev_priv(dev); 4212 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4213 info = ptr; 4214 extack = netdev_notifier_info_to_extack(&info->info); 4215 4216 switch (event) { 4217 case NETDEV_PRECHANGEUPPER: 4218 upper_dev = info->upper_dev; 4219 if (!is_vlan_dev(upper_dev) && 4220 !netif_is_lag_master(upper_dev) && 4221 !netif_is_bridge_master(upper_dev) && 4222 !netif_is_ovs_master(upper_dev) && 4223 !netif_is_macvlan(upper_dev)) { 4224 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4225 return -EINVAL; 4226 } 4227 if (!info->linking) 4228 break; 4229 if (netif_is_bridge_master(upper_dev) && 4230 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 4231 mlxsw_sp_bridge_has_vxlan(upper_dev) && 4232 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 4233 return -EOPNOTSUPP; 4234 if (netdev_has_any_upper_dev(upper_dev) && 4235 (!netif_is_bridge_master(upper_dev) || 4236 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 4237 upper_dev))) { 4238 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 4239 return -EINVAL; 4240 } 4241 if (netif_is_lag_master(upper_dev) && 4242 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 4243 info->upper_info, extack)) 4244 return -EINVAL; 4245 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 4246 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); 4247 return -EINVAL; 4248 } 4249 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 4250 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 4251 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); 4252 return -EINVAL; 4253 } 4254 if (netif_is_macvlan(upper_dev) && 4255 !mlxsw_sp_rif_exists(mlxsw_sp, lower_dev)) { 4256 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4257 return -EOPNOTSUPP; 4258 } 4259 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 4260 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); 4261 return -EINVAL; 4262 } 4263 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 4264 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 4265 return -EINVAL; 4266 } 4267 break; 4268 case NETDEV_CHANGEUPPER: 4269 upper_dev = info->upper_dev; 4270 if (netif_is_bridge_master(upper_dev)) { 4271 if (info->linking) 4272 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4273 lower_dev, 4274 upper_dev, 4275 extack); 4276 else 4277 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4278 lower_dev, 4279 upper_dev); 4280 } else if (netif_is_lag_master(upper_dev)) { 4281 if (info->linking) { 4282 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 4283 upper_dev); 4284 } else { 4285 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 4286 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 4287 upper_dev); 4288 } 4289 } else if (netif_is_ovs_master(upper_dev)) { 4290 if (info->linking) 4291 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 4292 else 4293 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 4294 } else if (netif_is_macvlan(upper_dev)) { 4295 if (!info->linking) 4296 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4297 } else if (is_vlan_dev(upper_dev)) { 4298 struct net_device *br_dev; 4299 4300 if (!netif_is_bridge_port(upper_dev)) 4301 break; 4302 if (info->linking) 4303 break; 4304 br_dev = netdev_master_upper_dev_get(upper_dev); 4305 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, 4306 br_dev); 4307 } 4308 break; 4309 } 4310 4311 return err; 4312 } 4313 4314 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 4315 unsigned long event, void *ptr) 4316 { 4317 struct netdev_notifier_changelowerstate_info *info; 4318 struct mlxsw_sp_port *mlxsw_sp_port; 4319 int err; 4320 4321 mlxsw_sp_port = netdev_priv(dev); 4322 info = ptr; 4323 4324 switch (event) { 4325 case NETDEV_CHANGELOWERSTATE: 4326 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 4327 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 4328 info->lower_state_info); 4329 if (err) 4330 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 4331 } 4332 break; 4333 } 4334 4335 return 0; 4336 } 4337 4338 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 4339 struct net_device *port_dev, 4340 unsigned long event, void *ptr) 4341 { 4342 switch (event) { 4343 case NETDEV_PRECHANGEUPPER: 4344 case NETDEV_CHANGEUPPER: 4345 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 4346 event, ptr); 4347 case NETDEV_CHANGELOWERSTATE: 4348 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 4349 ptr); 4350 } 4351 4352 return 0; 4353 } 4354 4355 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 4356 unsigned long event, void *ptr) 4357 { 4358 struct net_device *dev; 4359 struct list_head *iter; 4360 int ret; 4361 4362 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4363 if (mlxsw_sp_port_dev_check(dev)) { 4364 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 4365 ptr); 4366 if (ret) 4367 return ret; 4368 } 4369 } 4370 4371 return 0; 4372 } 4373 4374 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 4375 struct net_device *dev, 4376 unsigned long event, void *ptr, 4377 u16 vid) 4378 { 4379 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 4380 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4381 struct netdev_notifier_changeupper_info *info = ptr; 4382 struct netlink_ext_ack *extack; 4383 struct net_device *upper_dev; 4384 int err = 0; 4385 4386 extack = netdev_notifier_info_to_extack(&info->info); 4387 4388 switch (event) { 4389 case NETDEV_PRECHANGEUPPER: 4390 upper_dev = info->upper_dev; 4391 if (!netif_is_bridge_master(upper_dev) && 4392 !netif_is_macvlan(upper_dev)) { 4393 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4394 return -EINVAL; 4395 } 4396 if (!info->linking) 4397 break; 4398 if (netif_is_bridge_master(upper_dev) && 4399 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 4400 mlxsw_sp_bridge_has_vxlan(upper_dev) && 4401 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 4402 return -EOPNOTSUPP; 4403 if (netdev_has_any_upper_dev(upper_dev) && 4404 (!netif_is_bridge_master(upper_dev) || 4405 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 4406 upper_dev))) { 4407 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 4408 return -EINVAL; 4409 } 4410 if (netif_is_macvlan(upper_dev) && 4411 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) { 4412 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4413 return -EOPNOTSUPP; 4414 } 4415 break; 4416 case NETDEV_CHANGEUPPER: 4417 upper_dev = info->upper_dev; 4418 if (netif_is_bridge_master(upper_dev)) { 4419 if (info->linking) 4420 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4421 vlan_dev, 4422 upper_dev, 4423 extack); 4424 else 4425 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4426 vlan_dev, 4427 upper_dev); 4428 } else if (netif_is_macvlan(upper_dev)) { 4429 if (!info->linking) 4430 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4431 } else { 4432 err = -EINVAL; 4433 WARN_ON(1); 4434 } 4435 break; 4436 } 4437 4438 return err; 4439 } 4440 4441 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 4442 struct net_device *lag_dev, 4443 unsigned long event, 4444 void *ptr, u16 vid) 4445 { 4446 struct net_device *dev; 4447 struct list_head *iter; 4448 int ret; 4449 4450 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4451 if (mlxsw_sp_port_dev_check(dev)) { 4452 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 4453 event, ptr, 4454 vid); 4455 if (ret) 4456 return ret; 4457 } 4458 } 4459 4460 return 0; 4461 } 4462 4463 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev, 4464 struct net_device *br_dev, 4465 unsigned long event, void *ptr, 4466 u16 vid) 4467 { 4468 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); 4469 struct netdev_notifier_changeupper_info *info = ptr; 4470 struct netlink_ext_ack *extack; 4471 struct net_device *upper_dev; 4472 4473 if (!mlxsw_sp) 4474 return 0; 4475 4476 extack = netdev_notifier_info_to_extack(&info->info); 4477 4478 switch (event) { 4479 case NETDEV_PRECHANGEUPPER: 4480 upper_dev = info->upper_dev; 4481 if (!netif_is_macvlan(upper_dev)) { 4482 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4483 return -EOPNOTSUPP; 4484 } 4485 if (!info->linking) 4486 break; 4487 if (netif_is_macvlan(upper_dev) && 4488 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) { 4489 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4490 return -EOPNOTSUPP; 4491 } 4492 break; 4493 case NETDEV_CHANGEUPPER: 4494 upper_dev = info->upper_dev; 4495 if (info->linking) 4496 break; 4497 if (netif_is_macvlan(upper_dev)) 4498 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4499 break; 4500 } 4501 4502 return 0; 4503 } 4504 4505 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 4506 unsigned long event, void *ptr) 4507 { 4508 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 4509 u16 vid = vlan_dev_vlan_id(vlan_dev); 4510 4511 if (mlxsw_sp_port_dev_check(real_dev)) 4512 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 4513 event, ptr, vid); 4514 else if (netif_is_lag_master(real_dev)) 4515 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 4516 real_dev, event, 4517 ptr, vid); 4518 else if (netif_is_bridge_master(real_dev)) 4519 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev, 4520 event, ptr, vid); 4521 4522 return 0; 4523 } 4524 4525 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, 4526 unsigned long event, void *ptr) 4527 { 4528 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev); 4529 struct netdev_notifier_changeupper_info *info = ptr; 4530 struct netlink_ext_ack *extack; 4531 struct net_device *upper_dev; 4532 4533 if (!mlxsw_sp) 4534 return 0; 4535 4536 extack = netdev_notifier_info_to_extack(&info->info); 4537 4538 switch (event) { 4539 case NETDEV_PRECHANGEUPPER: 4540 upper_dev = info->upper_dev; 4541 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) { 4542 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4543 return -EOPNOTSUPP; 4544 } 4545 if (!info->linking) 4546 break; 4547 if (netif_is_macvlan(upper_dev) && 4548 !mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) { 4549 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4550 return -EOPNOTSUPP; 4551 } 4552 break; 4553 case NETDEV_CHANGEUPPER: 4554 upper_dev = info->upper_dev; 4555 if (info->linking) 4556 break; 4557 if (is_vlan_dev(upper_dev)) 4558 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev); 4559 if (netif_is_macvlan(upper_dev)) 4560 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4561 break; 4562 } 4563 4564 return 0; 4565 } 4566 4567 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, 4568 unsigned long event, void *ptr) 4569 { 4570 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); 4571 struct netdev_notifier_changeupper_info *info = ptr; 4572 struct netlink_ext_ack *extack; 4573 4574 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) 4575 return 0; 4576 4577 extack = netdev_notifier_info_to_extack(&info->info); 4578 4579 /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */ 4580 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4581 4582 return -EOPNOTSUPP; 4583 } 4584 4585 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) 4586 { 4587 struct netdev_notifier_changeupper_info *info = ptr; 4588 4589 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) 4590 return false; 4591 return netif_is_l3_master(info->upper_dev); 4592 } 4593 4594 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, 4595 struct net_device *dev, 4596 unsigned long event, void *ptr) 4597 { 4598 struct netdev_notifier_changeupper_info *cu_info; 4599 struct netdev_notifier_info *info = ptr; 4600 struct netlink_ext_ack *extack; 4601 struct net_device *upper_dev; 4602 4603 extack = netdev_notifier_info_to_extack(info); 4604 4605 switch (event) { 4606 case NETDEV_CHANGEUPPER: 4607 cu_info = container_of(info, 4608 struct netdev_notifier_changeupper_info, 4609 info); 4610 upper_dev = cu_info->upper_dev; 4611 if (!netif_is_bridge_master(upper_dev)) 4612 return 0; 4613 if (!mlxsw_sp_lower_get(upper_dev)) 4614 return 0; 4615 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 4616 return -EOPNOTSUPP; 4617 if (cu_info->linking) { 4618 if (!netif_running(dev)) 4619 return 0; 4620 /* When the bridge is VLAN-aware, the VNI of the VxLAN 4621 * device needs to be mapped to a VLAN, but at this 4622 * point no VLANs are configured on the VxLAN device 4623 */ 4624 if (br_vlan_enabled(upper_dev)) 4625 return 0; 4626 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, 4627 dev, 0, extack); 4628 } else { 4629 /* VLANs were already flushed, which triggered the 4630 * necessary cleanup 4631 */ 4632 if (br_vlan_enabled(upper_dev)) 4633 return 0; 4634 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 4635 } 4636 break; 4637 case NETDEV_PRE_UP: 4638 upper_dev = netdev_master_upper_dev_get(dev); 4639 if (!upper_dev) 4640 return 0; 4641 if (!netif_is_bridge_master(upper_dev)) 4642 return 0; 4643 if (!mlxsw_sp_lower_get(upper_dev)) 4644 return 0; 4645 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0, 4646 extack); 4647 case NETDEV_DOWN: 4648 upper_dev = netdev_master_upper_dev_get(dev); 4649 if (!upper_dev) 4650 return 0; 4651 if (!netif_is_bridge_master(upper_dev)) 4652 return 0; 4653 if (!mlxsw_sp_lower_get(upper_dev)) 4654 return 0; 4655 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 4656 break; 4657 } 4658 4659 return 0; 4660 } 4661 4662 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 4663 unsigned long event, void *ptr) 4664 { 4665 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 4666 struct mlxsw_sp_span_entry *span_entry; 4667 struct mlxsw_sp *mlxsw_sp; 4668 int err = 0; 4669 4670 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 4671 if (event == NETDEV_UNREGISTER) { 4672 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); 4673 if (span_entry) 4674 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); 4675 } 4676 mlxsw_sp_span_respin(mlxsw_sp); 4677 4678 if (netif_is_vxlan(dev)) 4679 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr); 4680 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) 4681 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, 4682 event, ptr); 4683 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev)) 4684 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev, 4685 event, ptr); 4686 else if (event == NETDEV_PRE_CHANGEADDR || 4687 event == NETDEV_CHANGEADDR || 4688 event == NETDEV_CHANGEMTU) 4689 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr); 4690 else if (mlxsw_sp_is_vrf_event(event, ptr)) 4691 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); 4692 else if (mlxsw_sp_port_dev_check(dev)) 4693 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 4694 else if (netif_is_lag_master(dev)) 4695 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 4696 else if (is_vlan_dev(dev)) 4697 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 4698 else if (netif_is_bridge_master(dev)) 4699 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); 4700 else if (netif_is_macvlan(dev)) 4701 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); 4702 4703 return notifier_from_errno(err); 4704 } 4705 4706 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = { 4707 .notifier_call = mlxsw_sp_inetaddr_valid_event, 4708 }; 4709 4710 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = { 4711 .notifier_call = mlxsw_sp_inet6addr_valid_event, 4712 }; 4713 4714 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { 4715 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 4716 {0, }, 4717 }; 4718 4719 static struct pci_driver mlxsw_sp1_pci_driver = { 4720 .name = mlxsw_sp1_driver_name, 4721 .id_table = mlxsw_sp1_pci_id_table, 4722 }; 4723 4724 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { 4725 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, 4726 {0, }, 4727 }; 4728 4729 static struct pci_driver mlxsw_sp2_pci_driver = { 4730 .name = mlxsw_sp2_driver_name, 4731 .id_table = mlxsw_sp2_pci_id_table, 4732 }; 4733 4734 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = { 4735 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0}, 4736 {0, }, 4737 }; 4738 4739 static struct pci_driver mlxsw_sp3_pci_driver = { 4740 .name = mlxsw_sp3_driver_name, 4741 .id_table = mlxsw_sp3_pci_id_table, 4742 }; 4743 4744 static int __init mlxsw_sp_module_init(void) 4745 { 4746 int err; 4747 4748 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 4749 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 4750 4751 err = mlxsw_core_driver_register(&mlxsw_sp1_driver); 4752 if (err) 4753 goto err_sp1_core_driver_register; 4754 4755 err = mlxsw_core_driver_register(&mlxsw_sp2_driver); 4756 if (err) 4757 goto err_sp2_core_driver_register; 4758 4759 err = mlxsw_core_driver_register(&mlxsw_sp3_driver); 4760 if (err) 4761 goto err_sp3_core_driver_register; 4762 4763 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); 4764 if (err) 4765 goto err_sp1_pci_driver_register; 4766 4767 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); 4768 if (err) 4769 goto err_sp2_pci_driver_register; 4770 4771 err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver); 4772 if (err) 4773 goto err_sp3_pci_driver_register; 4774 4775 return 0; 4776 4777 err_sp3_pci_driver_register: 4778 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 4779 err_sp2_pci_driver_register: 4780 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 4781 err_sp1_pci_driver_register: 4782 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 4783 err_sp3_core_driver_register: 4784 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 4785 err_sp2_core_driver_register: 4786 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 4787 err_sp1_core_driver_register: 4788 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 4789 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 4790 return err; 4791 } 4792 4793 static void __exit mlxsw_sp_module_exit(void) 4794 { 4795 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver); 4796 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 4797 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 4798 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 4799 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 4800 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 4801 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 4802 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 4803 } 4804 4805 module_init(mlxsw_sp_module_init); 4806 module_exit(mlxsw_sp_module_exit); 4807 4808 MODULE_LICENSE("Dual BSD/GPL"); 4809 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 4810 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 4811 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); 4812 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); 4813 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table); 4814 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); 4815 MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME); 4816 MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME); 4817