1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/types.h> 7 #include <linux/pci.h> 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/ethtool.h> 11 #include <linux/slab.h> 12 #include <linux/device.h> 13 #include <linux/skbuff.h> 14 #include <linux/if_vlan.h> 15 #include <linux/if_bridge.h> 16 #include <linux/workqueue.h> 17 #include <linux/jiffies.h> 18 #include <linux/bitops.h> 19 #include <linux/list.h> 20 #include <linux/notifier.h> 21 #include <linux/dcbnl.h> 22 #include <linux/inetdevice.h> 23 #include <linux/netlink.h> 24 #include <linux/jhash.h> 25 #include <linux/log2.h> 26 #include <linux/refcount.h> 27 #include <linux/rhashtable.h> 28 #include <net/switchdev.h> 29 #include <net/pkt_cls.h> 30 #include <net/netevent.h> 31 #include <net/addrconf.h> 32 33 #include "spectrum.h" 34 #include "pci.h" 35 #include "core.h" 36 #include "core_env.h" 37 #include "reg.h" 38 #include "port.h" 39 #include "trap.h" 40 #include "txheader.h" 41 #include "spectrum_cnt.h" 42 #include "spectrum_dpipe.h" 43 #include "spectrum_acl_flex_actions.h" 44 #include "spectrum_span.h" 45 #include "spectrum_ptp.h" 46 #include "spectrum_trap.h" 47 48 #define MLXSW_SP_FWREV_MINOR 2010 49 #define MLXSW_SP_FWREV_SUBMINOR 1006 50 51 #define MLXSW_SP1_FWREV_MAJOR 13 52 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 53 54 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { 55 .major = MLXSW_SP1_FWREV_MAJOR, 56 .minor = MLXSW_SP_FWREV_MINOR, 57 .subminor = MLXSW_SP_FWREV_SUBMINOR, 58 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR, 59 }; 60 61 #define MLXSW_SP1_FW_FILENAME \ 62 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ 63 "." __stringify(MLXSW_SP_FWREV_MINOR) \ 64 "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2" 65 66 #define MLXSW_SP2_FWREV_MAJOR 29 67 68 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { 69 .major = MLXSW_SP2_FWREV_MAJOR, 70 .minor = MLXSW_SP_FWREV_MINOR, 71 .subminor = MLXSW_SP_FWREV_SUBMINOR, 72 }; 73 74 #define MLXSW_SP2_FW_FILENAME \ 75 "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \ 76 "." __stringify(MLXSW_SP_FWREV_MINOR) \ 77 "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2" 78 79 #define MLXSW_SP3_FWREV_MAJOR 30 80 81 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = { 82 .major = MLXSW_SP3_FWREV_MAJOR, 83 .minor = MLXSW_SP_FWREV_MINOR, 84 .subminor = MLXSW_SP_FWREV_SUBMINOR, 85 }; 86 87 #define MLXSW_SP3_FW_FILENAME \ 88 "mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \ 89 "." __stringify(MLXSW_SP_FWREV_MINOR) \ 90 "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2" 91 92 #define MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME \ 93 "mellanox/lc_ini_bundle_" \ 94 __stringify(MLXSW_SP_FWREV_MINOR) "_" \ 95 __stringify(MLXSW_SP_FWREV_SUBMINOR) ".bin" 96 97 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; 98 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; 99 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3"; 100 static const char mlxsw_sp4_driver_name[] = "mlxsw_spectrum4"; 101 102 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = { 103 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00 104 }; 105 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = { 106 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00 107 }; 108 109 /* tx_hdr_version 110 * Tx header version. 111 * Must be set to 1. 112 */ 113 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 114 115 /* tx_hdr_ctl 116 * Packet control type. 117 * 0 - Ethernet control (e.g. EMADs, LACP) 118 * 1 - Ethernet data 119 */ 120 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 121 122 /* tx_hdr_proto 123 * Packet protocol type. Must be set to 1 (Ethernet). 124 */ 125 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 126 127 /* tx_hdr_rx_is_router 128 * Packet is sent from the router. Valid for data packets only. 129 */ 130 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 131 132 /* tx_hdr_fid_valid 133 * Indicates if the 'fid' field is valid and should be used for 134 * forwarding lookup. Valid for data packets only. 135 */ 136 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 137 138 /* tx_hdr_swid 139 * Switch partition ID. Must be set to 0. 140 */ 141 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 142 143 /* tx_hdr_control_tclass 144 * Indicates if the packet should use the control TClass and not one 145 * of the data TClasses. 146 */ 147 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 148 149 /* tx_hdr_etclass 150 * Egress TClass to be used on the egress device on the egress port. 151 */ 152 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 153 154 /* tx_hdr_port_mid 155 * Destination local port for unicast packets. 156 * Destination multicast ID for multicast packets. 157 * 158 * Control packets are directed to a specific egress port, while data 159 * packets are transmitted through the CPU port (0) into the switch partition, 160 * where forwarding rules are applied. 161 */ 162 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 163 164 /* tx_hdr_fid 165 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 166 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 167 * Valid for data packets only. 168 */ 169 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 170 171 /* tx_hdr_type 172 * 0 - Data packets 173 * 6 - Control packets 174 */ 175 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 176 177 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 178 unsigned int counter_index, u64 *packets, 179 u64 *bytes) 180 { 181 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 182 int err; 183 184 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 185 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 186 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 187 if (err) 188 return err; 189 if (packets) 190 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 191 if (bytes) 192 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 193 return 0; 194 } 195 196 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 197 unsigned int counter_index) 198 { 199 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 200 201 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 202 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 203 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 204 } 205 206 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 207 unsigned int *p_counter_index) 208 { 209 int err; 210 211 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 212 p_counter_index); 213 if (err) 214 return err; 215 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 216 if (err) 217 goto err_counter_clear; 218 return 0; 219 220 err_counter_clear: 221 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 222 *p_counter_index); 223 return err; 224 } 225 226 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 227 unsigned int counter_index) 228 { 229 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 230 counter_index); 231 } 232 233 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 234 const struct mlxsw_tx_info *tx_info) 235 { 236 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 237 238 memset(txhdr, 0, MLXSW_TXHDR_LEN); 239 240 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 241 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 242 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 243 mlxsw_tx_hdr_swid_set(txhdr, 0); 244 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 245 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 246 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 247 } 248 249 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) 250 { 251 switch (state) { 252 case BR_STATE_FORWARDING: 253 return MLXSW_REG_SPMS_STATE_FORWARDING; 254 case BR_STATE_LEARNING: 255 return MLXSW_REG_SPMS_STATE_LEARNING; 256 case BR_STATE_LISTENING: 257 case BR_STATE_DISABLED: 258 case BR_STATE_BLOCKING: 259 return MLXSW_REG_SPMS_STATE_DISCARDING; 260 default: 261 BUG(); 262 } 263 } 264 265 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 266 u8 state) 267 { 268 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state); 269 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 270 char *spms_pl; 271 int err; 272 273 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 274 if (!spms_pl) 275 return -ENOMEM; 276 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 277 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 278 279 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 280 kfree(spms_pl); 281 return err; 282 } 283 284 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 285 { 286 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 287 int err; 288 289 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 290 if (err) 291 return err; 292 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 293 return 0; 294 } 295 296 int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 297 bool is_up) 298 { 299 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 300 char paos_pl[MLXSW_REG_PAOS_LEN]; 301 302 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 303 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 304 MLXSW_PORT_ADMIN_STATUS_DOWN); 305 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 306 } 307 308 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 309 const unsigned char *addr) 310 { 311 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 312 char ppad_pl[MLXSW_REG_PPAD_LEN]; 313 314 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 315 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 316 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 317 } 318 319 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 320 { 321 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 322 323 eth_hw_addr_gen(mlxsw_sp_port->dev, mlxsw_sp->base_mac, 324 mlxsw_sp_port->local_port); 325 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, 326 mlxsw_sp_port->dev->dev_addr); 327 } 328 329 static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu) 330 { 331 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 332 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 333 int err; 334 335 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 336 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 337 if (err) 338 return err; 339 340 *p_max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 341 return 0; 342 } 343 344 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 345 { 346 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 347 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 348 349 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 350 if (mtu > mlxsw_sp_port->max_mtu) 351 return -EINVAL; 352 353 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 354 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 355 } 356 357 static int mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, 358 u16 local_port, u8 swid) 359 { 360 char pspa_pl[MLXSW_REG_PSPA_LEN]; 361 362 mlxsw_reg_pspa_pack(pspa_pl, swid, local_port); 363 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 364 } 365 366 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 367 { 368 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 369 char svpe_pl[MLXSW_REG_SVPE_LEN]; 370 371 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 372 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 373 } 374 375 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 376 bool learn_enable) 377 { 378 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 379 char *spvmlr_pl; 380 int err; 381 382 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 383 if (!spvmlr_pl) 384 return -ENOMEM; 385 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 386 learn_enable); 387 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 388 kfree(spvmlr_pl); 389 return err; 390 } 391 392 int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type) 393 { 394 switch (ethtype) { 395 case ETH_P_8021Q: 396 *p_sver_type = 0; 397 break; 398 case ETH_P_8021AD: 399 *p_sver_type = 1; 400 break; 401 default: 402 return -EINVAL; 403 } 404 405 return 0; 406 } 407 408 int mlxsw_sp_port_egress_ethtype_set(struct mlxsw_sp_port *mlxsw_sp_port, 409 u16 ethtype) 410 { 411 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 412 char spevet_pl[MLXSW_REG_SPEVET_LEN]; 413 u8 sver_type; 414 int err; 415 416 err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type); 417 if (err) 418 return err; 419 420 mlxsw_reg_spevet_pack(spevet_pl, mlxsw_sp_port->local_port, sver_type); 421 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spevet), spevet_pl); 422 } 423 424 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 425 u16 vid, u16 ethtype) 426 { 427 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 428 char spvid_pl[MLXSW_REG_SPVID_LEN]; 429 u8 sver_type; 430 int err; 431 432 err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type); 433 if (err) 434 return err; 435 436 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid, 437 sver_type); 438 439 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 440 } 441 442 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 443 bool allow) 444 { 445 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 446 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 447 448 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 449 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 450 } 451 452 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 453 u16 ethtype) 454 { 455 int err; 456 457 if (!vid) { 458 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 459 if (err) 460 return err; 461 } else { 462 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid, ethtype); 463 if (err) 464 return err; 465 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 466 if (err) 467 goto err_port_allow_untagged_set; 468 } 469 470 mlxsw_sp_port->pvid = vid; 471 return 0; 472 473 err_port_allow_untagged_set: 474 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid, ethtype); 475 return err; 476 } 477 478 static int 479 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 480 { 481 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 482 char sspr_pl[MLXSW_REG_SSPR_LEN]; 483 484 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 485 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 486 } 487 488 static int 489 mlxsw_sp_port_module_info_parse(struct mlxsw_sp *mlxsw_sp, 490 u16 local_port, char *pmlp_pl, 491 struct mlxsw_sp_port_mapping *port_mapping) 492 { 493 bool separate_rxtx; 494 u8 first_lane; 495 u8 slot_index; 496 u8 module; 497 u8 width; 498 int i; 499 500 module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 501 slot_index = mlxsw_reg_pmlp_slot_index_get(pmlp_pl, 0); 502 width = mlxsw_reg_pmlp_width_get(pmlp_pl); 503 separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl); 504 first_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 505 506 if (width && !is_power_of_2(width)) { 507 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n", 508 local_port); 509 return -EINVAL; 510 } 511 512 for (i = 0; i < width; i++) { 513 if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) { 514 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n", 515 local_port); 516 return -EINVAL; 517 } 518 if (mlxsw_reg_pmlp_slot_index_get(pmlp_pl, i) != slot_index) { 519 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple slot indexes\n", 520 local_port); 521 return -EINVAL; 522 } 523 if (separate_rxtx && 524 mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != 525 mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) { 526 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n", 527 local_port); 528 return -EINVAL; 529 } 530 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i + first_lane) { 531 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n", 532 local_port); 533 return -EINVAL; 534 } 535 } 536 537 port_mapping->module = module; 538 port_mapping->slot_index = slot_index; 539 port_mapping->width = width; 540 port_mapping->module_width = width; 541 port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 542 return 0; 543 } 544 545 static int 546 mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port, 547 struct mlxsw_sp_port_mapping *port_mapping) 548 { 549 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 550 int err; 551 552 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 553 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 554 if (err) 555 return err; 556 return mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port, 557 pmlp_pl, port_mapping); 558 } 559 560 static int 561 mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port, 562 const struct mlxsw_sp_port_mapping *port_mapping) 563 { 564 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 565 int i, err; 566 567 mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->slot_index, 568 port_mapping->module); 569 570 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 571 mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width); 572 for (i = 0; i < port_mapping->width; i++) { 573 mlxsw_reg_pmlp_slot_index_set(pmlp_pl, i, 574 port_mapping->slot_index); 575 mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module); 576 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */ 577 } 578 579 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 580 if (err) 581 goto err_pmlp_write; 582 return 0; 583 584 err_pmlp_write: 585 mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->slot_index, 586 port_mapping->module); 587 return err; 588 } 589 590 static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u16 local_port, 591 u8 slot_index, u8 module) 592 { 593 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 594 595 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 596 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 597 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 598 mlxsw_env_module_port_unmap(mlxsw_sp->core, slot_index, module); 599 } 600 601 static int mlxsw_sp_port_open(struct net_device *dev) 602 { 603 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 604 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 605 int err; 606 607 err = mlxsw_env_module_port_up(mlxsw_sp->core, 608 mlxsw_sp_port->mapping.slot_index, 609 mlxsw_sp_port->mapping.module); 610 if (err) 611 return err; 612 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 613 if (err) 614 goto err_port_admin_status_set; 615 netif_start_queue(dev); 616 return 0; 617 618 err_port_admin_status_set: 619 mlxsw_env_module_port_down(mlxsw_sp->core, 620 mlxsw_sp_port->mapping.slot_index, 621 mlxsw_sp_port->mapping.module); 622 return err; 623 } 624 625 static int mlxsw_sp_port_stop(struct net_device *dev) 626 { 627 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 628 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 629 630 netif_stop_queue(dev); 631 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 632 mlxsw_env_module_port_down(mlxsw_sp->core, 633 mlxsw_sp_port->mapping.slot_index, 634 mlxsw_sp_port->mapping.module); 635 return 0; 636 } 637 638 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 639 struct net_device *dev) 640 { 641 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 642 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 643 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 644 const struct mlxsw_tx_info tx_info = { 645 .local_port = mlxsw_sp_port->local_port, 646 .is_emad = false, 647 }; 648 u64 len; 649 int err; 650 651 if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) { 652 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 653 dev_kfree_skb_any(skb); 654 return NETDEV_TX_OK; 655 } 656 657 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); 658 659 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 660 return NETDEV_TX_BUSY; 661 662 if (eth_skb_pad(skb)) { 663 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 664 return NETDEV_TX_OK; 665 } 666 667 mlxsw_sp_txhdr_construct(skb, &tx_info); 668 /* TX header is consumed by HW on the way so we shouldn't count its 669 * bytes as being sent. 670 */ 671 len = skb->len - MLXSW_TXHDR_LEN; 672 673 /* Due to a race we might fail here because of a full queue. In that 674 * unlikely case we simply drop the packet. 675 */ 676 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 677 678 if (!err) { 679 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 680 u64_stats_update_begin(&pcpu_stats->syncp); 681 pcpu_stats->tx_packets++; 682 pcpu_stats->tx_bytes += len; 683 u64_stats_update_end(&pcpu_stats->syncp); 684 } else { 685 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 686 dev_kfree_skb_any(skb); 687 } 688 return NETDEV_TX_OK; 689 } 690 691 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 692 { 693 } 694 695 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 696 { 697 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 698 struct sockaddr *addr = p; 699 int err; 700 701 if (!is_valid_ether_addr(addr->sa_data)) 702 return -EADDRNOTAVAIL; 703 704 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 705 if (err) 706 return err; 707 eth_hw_addr_set(dev, addr->sa_data); 708 return 0; 709 } 710 711 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 712 { 713 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 714 struct mlxsw_sp_hdroom orig_hdroom; 715 struct mlxsw_sp_hdroom hdroom; 716 int err; 717 718 orig_hdroom = *mlxsw_sp_port->hdroom; 719 720 hdroom = orig_hdroom; 721 hdroom.mtu = mtu; 722 mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom); 723 724 err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom); 725 if (err) { 726 netdev_err(dev, "Failed to configure port's headroom\n"); 727 return err; 728 } 729 730 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 731 if (err) 732 goto err_port_mtu_set; 733 dev->mtu = mtu; 734 return 0; 735 736 err_port_mtu_set: 737 mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom); 738 return err; 739 } 740 741 static int 742 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 743 struct rtnl_link_stats64 *stats) 744 { 745 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 746 struct mlxsw_sp_port_pcpu_stats *p; 747 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 748 u32 tx_dropped = 0; 749 unsigned int start; 750 int i; 751 752 for_each_possible_cpu(i) { 753 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 754 do { 755 start = u64_stats_fetch_begin_irq(&p->syncp); 756 rx_packets = p->rx_packets; 757 rx_bytes = p->rx_bytes; 758 tx_packets = p->tx_packets; 759 tx_bytes = p->tx_bytes; 760 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 761 762 stats->rx_packets += rx_packets; 763 stats->rx_bytes += rx_bytes; 764 stats->tx_packets += tx_packets; 765 stats->tx_bytes += tx_bytes; 766 /* tx_dropped is u32, updated without syncp protection. */ 767 tx_dropped += p->tx_dropped; 768 } 769 stats->tx_dropped = tx_dropped; 770 return 0; 771 } 772 773 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 774 { 775 switch (attr_id) { 776 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 777 return true; 778 } 779 780 return false; 781 } 782 783 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 784 void *sp) 785 { 786 switch (attr_id) { 787 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 788 return mlxsw_sp_port_get_sw_stats64(dev, sp); 789 } 790 791 return -EINVAL; 792 } 793 794 int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 795 int prio, char *ppcnt_pl) 796 { 797 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 798 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 799 800 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 801 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 802 } 803 804 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 805 struct rtnl_link_stats64 *stats) 806 { 807 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 808 int err; 809 810 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 811 0, ppcnt_pl); 812 if (err) 813 goto out; 814 815 stats->tx_packets = 816 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 817 stats->rx_packets = 818 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 819 stats->tx_bytes = 820 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 821 stats->rx_bytes = 822 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 823 stats->multicast = 824 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 825 826 stats->rx_crc_errors = 827 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 828 stats->rx_frame_errors = 829 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 830 831 stats->rx_length_errors = ( 832 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 833 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 834 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 835 836 stats->rx_errors = (stats->rx_crc_errors + 837 stats->rx_frame_errors + stats->rx_length_errors); 838 839 out: 840 return err; 841 } 842 843 static void 844 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 845 struct mlxsw_sp_port_xstats *xstats) 846 { 847 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 848 int err, i; 849 850 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 851 ppcnt_pl); 852 if (!err) 853 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 854 855 for (i = 0; i < TC_MAX_QUEUE; i++) { 856 err = mlxsw_sp_port_get_stats_raw(dev, 857 MLXSW_REG_PPCNT_TC_CONG_CNT, 858 i, ppcnt_pl); 859 if (err) 860 goto tc_cnt; 861 862 xstats->wred_drop[i] = 863 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 864 xstats->tc_ecn[i] = mlxsw_reg_ppcnt_ecn_marked_tc_get(ppcnt_pl); 865 866 tc_cnt: 867 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 868 i, ppcnt_pl); 869 if (err) 870 continue; 871 872 xstats->backlog[i] = 873 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 874 xstats->tail_drop[i] = 875 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 876 } 877 878 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 879 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, 880 i, ppcnt_pl); 881 if (err) 882 continue; 883 884 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); 885 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); 886 } 887 } 888 889 static void update_stats_cache(struct work_struct *work) 890 { 891 struct mlxsw_sp_port *mlxsw_sp_port = 892 container_of(work, struct mlxsw_sp_port, 893 periodic_hw_stats.update_dw.work); 894 895 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 896 /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as 897 * necessary when port goes down. 898 */ 899 goto out; 900 901 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 902 &mlxsw_sp_port->periodic_hw_stats.stats); 903 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 904 &mlxsw_sp_port->periodic_hw_stats.xstats); 905 906 out: 907 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 908 MLXSW_HW_STATS_UPDATE_TIME); 909 } 910 911 /* Return the stats from a cache that is updated periodically, 912 * as this function might get called in an atomic context. 913 */ 914 static void 915 mlxsw_sp_port_get_stats64(struct net_device *dev, 916 struct rtnl_link_stats64 *stats) 917 { 918 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 919 920 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 921 } 922 923 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 924 u16 vid_begin, u16 vid_end, 925 bool is_member, bool untagged) 926 { 927 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 928 char *spvm_pl; 929 int err; 930 931 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 932 if (!spvm_pl) 933 return -ENOMEM; 934 935 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 936 vid_end, is_member, untagged); 937 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 938 kfree(spvm_pl); 939 return err; 940 } 941 942 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 943 u16 vid_end, bool is_member, bool untagged) 944 { 945 u16 vid, vid_e; 946 int err; 947 948 for (vid = vid_begin; vid <= vid_end; 949 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 950 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 951 vid_end); 952 953 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 954 is_member, untagged); 955 if (err) 956 return err; 957 } 958 959 return 0; 960 } 961 962 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port, 963 bool flush_default) 964 { 965 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 966 967 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 968 &mlxsw_sp_port->vlans_list, list) { 969 if (!flush_default && 970 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID) 971 continue; 972 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 973 } 974 } 975 976 static void 977 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 978 { 979 if (mlxsw_sp_port_vlan->bridge_port) 980 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 981 else if (mlxsw_sp_port_vlan->fid) 982 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 983 } 984 985 struct mlxsw_sp_port_vlan * 986 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 987 { 988 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 989 bool untagged = vid == MLXSW_SP_DEFAULT_VID; 990 int err; 991 992 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 993 if (mlxsw_sp_port_vlan) 994 return ERR_PTR(-EEXIST); 995 996 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 997 if (err) 998 return ERR_PTR(err); 999 1000 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 1001 if (!mlxsw_sp_port_vlan) { 1002 err = -ENOMEM; 1003 goto err_port_vlan_alloc; 1004 } 1005 1006 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1007 mlxsw_sp_port_vlan->vid = vid; 1008 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1009 1010 return mlxsw_sp_port_vlan; 1011 1012 err_port_vlan_alloc: 1013 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1014 return ERR_PTR(err); 1015 } 1016 1017 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1018 { 1019 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1020 u16 vid = mlxsw_sp_port_vlan->vid; 1021 1022 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan); 1023 list_del(&mlxsw_sp_port_vlan->list); 1024 kfree(mlxsw_sp_port_vlan); 1025 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1026 } 1027 1028 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1029 __be16 __always_unused proto, u16 vid) 1030 { 1031 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1032 1033 /* VLAN 0 is added to HW filter when device goes up, but it is 1034 * reserved in our case, so simply return. 1035 */ 1036 if (!vid) 1037 return 0; 1038 1039 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid)); 1040 } 1041 1042 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1043 __be16 __always_unused proto, u16 vid) 1044 { 1045 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1046 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1047 1048 /* VLAN 0 is removed from HW filter when device goes down, but 1049 * it is reserved in our case, so simply return. 1050 */ 1051 if (!vid) 1052 return 0; 1053 1054 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1055 if (!mlxsw_sp_port_vlan) 1056 return 0; 1057 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1058 1059 return 0; 1060 } 1061 1062 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 1063 struct flow_block_offload *f) 1064 { 1065 switch (f->binder_type) { 1066 case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS: 1067 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true); 1068 case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS: 1069 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false); 1070 case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP: 1071 return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f); 1072 case FLOW_BLOCK_BINDER_TYPE_RED_MARK: 1073 return mlxsw_sp_setup_tc_block_qevent_mark(mlxsw_sp_port, f); 1074 default: 1075 return -EOPNOTSUPP; 1076 } 1077 } 1078 1079 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1080 void *type_data) 1081 { 1082 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1083 1084 switch (type) { 1085 case TC_SETUP_BLOCK: 1086 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1087 case TC_SETUP_QDISC_RED: 1088 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1089 case TC_SETUP_QDISC_PRIO: 1090 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); 1091 case TC_SETUP_QDISC_ETS: 1092 return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data); 1093 case TC_SETUP_QDISC_TBF: 1094 return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data); 1095 case TC_SETUP_QDISC_FIFO: 1096 return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data); 1097 default: 1098 return -EOPNOTSUPP; 1099 } 1100 } 1101 1102 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) 1103 { 1104 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1105 1106 if (!enable) { 1107 if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) || 1108 mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) { 1109 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 1110 return -EINVAL; 1111 } 1112 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block); 1113 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block); 1114 } else { 1115 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block); 1116 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block); 1117 } 1118 return 0; 1119 } 1120 1121 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable) 1122 { 1123 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1124 char pplr_pl[MLXSW_REG_PPLR_LEN]; 1125 int err; 1126 1127 if (netif_running(dev)) 1128 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1129 1130 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable); 1131 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr), 1132 pplr_pl); 1133 1134 if (netif_running(dev)) 1135 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1136 1137 return err; 1138 } 1139 1140 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); 1141 1142 static int mlxsw_sp_handle_feature(struct net_device *dev, 1143 netdev_features_t wanted_features, 1144 netdev_features_t feature, 1145 mlxsw_sp_feature_handler feature_handler) 1146 { 1147 netdev_features_t changes = wanted_features ^ dev->features; 1148 bool enable = !!(wanted_features & feature); 1149 int err; 1150 1151 if (!(changes & feature)) 1152 return 0; 1153 1154 err = feature_handler(dev, enable); 1155 if (err) { 1156 netdev_err(dev, "%s feature %pNF failed, err %d\n", 1157 enable ? "Enable" : "Disable", &feature, err); 1158 return err; 1159 } 1160 1161 if (enable) 1162 dev->features |= feature; 1163 else 1164 dev->features &= ~feature; 1165 1166 return 0; 1167 } 1168 static int mlxsw_sp_set_features(struct net_device *dev, 1169 netdev_features_t features) 1170 { 1171 netdev_features_t oper_features = dev->features; 1172 int err = 0; 1173 1174 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, 1175 mlxsw_sp_feature_hw_tc); 1176 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK, 1177 mlxsw_sp_feature_loopback); 1178 1179 if (err) { 1180 dev->features = oper_features; 1181 return -EINVAL; 1182 } 1183 1184 return 0; 1185 } 1186 1187 static struct devlink_port * 1188 mlxsw_sp_port_get_devlink_port(struct net_device *dev) 1189 { 1190 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1191 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1192 1193 return mlxsw_core_port_devlink_port_get(mlxsw_sp->core, 1194 mlxsw_sp_port->local_port); 1195 } 1196 1197 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, 1198 struct ifreq *ifr) 1199 { 1200 struct hwtstamp_config config; 1201 int err; 1202 1203 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 1204 return -EFAULT; 1205 1206 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, 1207 &config); 1208 if (err) 1209 return err; 1210 1211 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1212 return -EFAULT; 1213 1214 return 0; 1215 } 1216 1217 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port, 1218 struct ifreq *ifr) 1219 { 1220 struct hwtstamp_config config; 1221 int err; 1222 1223 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port, 1224 &config); 1225 if (err) 1226 return err; 1227 1228 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1229 return -EFAULT; 1230 1231 return 0; 1232 } 1233 1234 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port) 1235 { 1236 struct hwtstamp_config config = {0}; 1237 1238 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config); 1239 } 1240 1241 static int 1242 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1243 { 1244 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1245 1246 switch (cmd) { 1247 case SIOCSHWTSTAMP: 1248 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr); 1249 case SIOCGHWTSTAMP: 1250 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr); 1251 default: 1252 return -EOPNOTSUPP; 1253 } 1254 } 1255 1256 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1257 .ndo_open = mlxsw_sp_port_open, 1258 .ndo_stop = mlxsw_sp_port_stop, 1259 .ndo_start_xmit = mlxsw_sp_port_xmit, 1260 .ndo_setup_tc = mlxsw_sp_setup_tc, 1261 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1262 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1263 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1264 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1265 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1266 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1267 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1268 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1269 .ndo_set_features = mlxsw_sp_set_features, 1270 .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port, 1271 .ndo_eth_ioctl = mlxsw_sp_port_ioctl, 1272 }; 1273 1274 static int 1275 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port) 1276 { 1277 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1278 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; 1279 const struct mlxsw_sp_port_type_speed_ops *ops; 1280 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1281 u32 eth_proto_cap_masked; 1282 int err; 1283 1284 ops = mlxsw_sp->port_type_speed_ops; 1285 1286 /* Set advertised speeds to speeds supported by both the driver 1287 * and the device. 1288 */ 1289 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 1290 0, false); 1291 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1292 if (err) 1293 return err; 1294 1295 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, 1296 ð_proto_admin, ð_proto_oper); 1297 eth_proto_cap_masked = ops->ptys_proto_cap_masked_get(eth_proto_cap); 1298 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 1299 eth_proto_cap_masked, 1300 mlxsw_sp_port->link.autoneg); 1301 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1302 } 1303 1304 int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed) 1305 { 1306 const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops; 1307 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1308 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1309 u32 eth_proto_oper; 1310 int err; 1311 1312 port_type_speed_ops = mlxsw_sp->port_type_speed_ops; 1313 port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, 1314 mlxsw_sp_port->local_port, 0, 1315 false); 1316 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1317 if (err) 1318 return err; 1319 port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL, 1320 ð_proto_oper); 1321 *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper); 1322 return 0; 1323 } 1324 1325 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 1326 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 1327 bool dwrr, u8 dwrr_weight) 1328 { 1329 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1330 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1331 1332 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1333 next_index); 1334 mlxsw_reg_qeec_de_set(qeec_pl, true); 1335 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 1336 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 1337 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1338 } 1339 1340 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 1341 enum mlxsw_reg_qeec_hr hr, u8 index, 1342 u8 next_index, u32 maxrate, u8 burst_size) 1343 { 1344 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1345 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1346 1347 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1348 next_index); 1349 mlxsw_reg_qeec_mase_set(qeec_pl, true); 1350 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 1351 mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size); 1352 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1353 } 1354 1355 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port, 1356 enum mlxsw_reg_qeec_hr hr, u8 index, 1357 u8 next_index, u32 minrate) 1358 { 1359 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1360 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1361 1362 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1363 next_index); 1364 mlxsw_reg_qeec_mise_set(qeec_pl, true); 1365 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate); 1366 1367 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1368 } 1369 1370 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 1371 u8 switch_prio, u8 tclass) 1372 { 1373 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1374 char qtct_pl[MLXSW_REG_QTCT_LEN]; 1375 1376 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 1377 tclass); 1378 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 1379 } 1380 1381 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 1382 { 1383 int err, i; 1384 1385 /* Setup the elements hierarcy, so that each TC is linked to 1386 * one subgroup, which are all member in the same group. 1387 */ 1388 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1389 MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0); 1390 if (err) 1391 return err; 1392 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1393 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1394 MLXSW_REG_QEEC_HR_SUBGROUP, i, 1395 0, false, 0); 1396 if (err) 1397 return err; 1398 } 1399 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1400 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1401 MLXSW_REG_QEEC_HR_TC, i, i, 1402 false, 0); 1403 if (err) 1404 return err; 1405 1406 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1407 MLXSW_REG_QEEC_HR_TC, 1408 i + 8, i, 1409 true, 100); 1410 if (err) 1411 return err; 1412 } 1413 1414 /* Make sure the max shaper is disabled in all hierarchies that support 1415 * it. Note that this disables ptps (PTP shaper), but that is intended 1416 * for the initial configuration. 1417 */ 1418 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1419 MLXSW_REG_QEEC_HR_PORT, 0, 0, 1420 MLXSW_REG_QEEC_MAS_DIS, 0); 1421 if (err) 1422 return err; 1423 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1424 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1425 MLXSW_REG_QEEC_HR_SUBGROUP, 1426 i, 0, 1427 MLXSW_REG_QEEC_MAS_DIS, 0); 1428 if (err) 1429 return err; 1430 } 1431 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1432 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1433 MLXSW_REG_QEEC_HR_TC, 1434 i, i, 1435 MLXSW_REG_QEEC_MAS_DIS, 0); 1436 if (err) 1437 return err; 1438 1439 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1440 MLXSW_REG_QEEC_HR_TC, 1441 i + 8, i, 1442 MLXSW_REG_QEEC_MAS_DIS, 0); 1443 if (err) 1444 return err; 1445 } 1446 1447 /* Configure the min shaper for multicast TCs. */ 1448 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1449 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port, 1450 MLXSW_REG_QEEC_HR_TC, 1451 i + 8, i, 1452 MLXSW_REG_QEEC_MIS_MIN); 1453 if (err) 1454 return err; 1455 } 1456 1457 /* Map all priorities to traffic class 0. */ 1458 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1459 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 1460 if (err) 1461 return err; 1462 } 1463 1464 return 0; 1465 } 1466 1467 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 1468 bool enable) 1469 { 1470 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1471 char qtctm_pl[MLXSW_REG_QTCTM_LEN]; 1472 1473 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable); 1474 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl); 1475 } 1476 1477 static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port) 1478 { 1479 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1480 u8 slot_index = mlxsw_sp_port->mapping.slot_index; 1481 u8 module = mlxsw_sp_port->mapping.module; 1482 u64 overheat_counter; 1483 int err; 1484 1485 err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, slot_index, 1486 module, &overheat_counter); 1487 if (err) 1488 return err; 1489 1490 mlxsw_sp_port->module_overheat_initial_val = overheat_counter; 1491 return 0; 1492 } 1493 1494 int 1495 mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port, 1496 bool is_8021ad_tagged, 1497 bool is_8021q_tagged) 1498 { 1499 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1500 char spvc_pl[MLXSW_REG_SPVC_LEN]; 1501 1502 mlxsw_reg_spvc_pack(spvc_pl, mlxsw_sp_port->local_port, 1503 is_8021ad_tagged, is_8021q_tagged); 1504 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvc), spvc_pl); 1505 } 1506 1507 static int mlxsw_sp_port_label_info_get(struct mlxsw_sp *mlxsw_sp, 1508 u16 local_port, u8 *port_number, 1509 u8 *split_port_subnumber, 1510 u8 *slot_index) 1511 { 1512 char pllp_pl[MLXSW_REG_PLLP_LEN]; 1513 int err; 1514 1515 mlxsw_reg_pllp_pack(pllp_pl, local_port); 1516 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pllp), pllp_pl); 1517 if (err) 1518 return err; 1519 mlxsw_reg_pllp_unpack(pllp_pl, port_number, 1520 split_port_subnumber, slot_index); 1521 return 0; 1522 } 1523 1524 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port, 1525 bool split, 1526 struct mlxsw_sp_port_mapping *port_mapping) 1527 { 1528 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1529 struct mlxsw_sp_port *mlxsw_sp_port; 1530 u32 lanes = port_mapping->width; 1531 u8 split_port_subnumber; 1532 struct net_device *dev; 1533 u8 port_number; 1534 u8 slot_index; 1535 bool splittable; 1536 int err; 1537 1538 err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, port_mapping); 1539 if (err) { 1540 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 1541 local_port); 1542 return err; 1543 } 1544 1545 err = mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 0); 1546 if (err) { 1547 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 1548 local_port); 1549 goto err_port_swid_set; 1550 } 1551 1552 err = mlxsw_sp_port_label_info_get(mlxsw_sp, local_port, &port_number, 1553 &split_port_subnumber, &slot_index); 1554 if (err) { 1555 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get port label information\n", 1556 local_port); 1557 goto err_port_label_info_get; 1558 } 1559 1560 splittable = lanes > 1 && !split; 1561 err = mlxsw_core_port_init(mlxsw_sp->core, local_port, slot_index, 1562 port_number, split, split_port_subnumber, 1563 splittable, lanes, mlxsw_sp->base_mac, 1564 sizeof(mlxsw_sp->base_mac)); 1565 if (err) { 1566 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 1567 local_port); 1568 goto err_core_port_init; 1569 } 1570 1571 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 1572 if (!dev) { 1573 err = -ENOMEM; 1574 goto err_alloc_etherdev; 1575 } 1576 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 1577 dev_net_set(dev, mlxsw_sp_net(mlxsw_sp)); 1578 mlxsw_sp_port = netdev_priv(dev); 1579 mlxsw_sp_port->dev = dev; 1580 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 1581 mlxsw_sp_port->local_port = local_port; 1582 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID; 1583 mlxsw_sp_port->split = split; 1584 mlxsw_sp_port->mapping = *port_mapping; 1585 mlxsw_sp_port->link.autoneg = 1; 1586 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 1587 1588 mlxsw_sp_port->pcpu_stats = 1589 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 1590 if (!mlxsw_sp_port->pcpu_stats) { 1591 err = -ENOMEM; 1592 goto err_alloc_stats; 1593 } 1594 1595 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1596 &update_stats_cache); 1597 1598 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 1599 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 1600 1601 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 1602 if (err) { 1603 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 1604 mlxsw_sp_port->local_port); 1605 goto err_dev_addr_init; 1606 } 1607 1608 netif_carrier_off(dev); 1609 1610 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 1611 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 1612 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK; 1613 1614 dev->min_mtu = 0; 1615 dev->max_mtu = ETH_MAX_MTU; 1616 1617 /* Each packet needs to have a Tx header (metadata) on top all other 1618 * headers. 1619 */ 1620 dev->needed_headroom = MLXSW_TXHDR_LEN; 1621 1622 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 1623 if (err) { 1624 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 1625 mlxsw_sp_port->local_port); 1626 goto err_port_system_port_mapping_set; 1627 } 1628 1629 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port); 1630 if (err) { 1631 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 1632 mlxsw_sp_port->local_port); 1633 goto err_port_speed_by_width_set; 1634 } 1635 1636 err = mlxsw_sp->port_type_speed_ops->ptys_max_speed(mlxsw_sp_port, 1637 &mlxsw_sp_port->max_speed); 1638 if (err) { 1639 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum speed\n", 1640 mlxsw_sp_port->local_port); 1641 goto err_max_speed_get; 1642 } 1643 1644 err = mlxsw_sp_port_max_mtu_get(mlxsw_sp_port, &mlxsw_sp_port->max_mtu); 1645 if (err) { 1646 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum MTU\n", 1647 mlxsw_sp_port->local_port); 1648 goto err_port_max_mtu_get; 1649 } 1650 1651 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 1652 if (err) { 1653 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 1654 mlxsw_sp_port->local_port); 1655 goto err_port_mtu_set; 1656 } 1657 1658 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1659 if (err) 1660 goto err_port_admin_status_set; 1661 1662 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 1663 if (err) { 1664 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 1665 mlxsw_sp_port->local_port); 1666 goto err_port_buffers_init; 1667 } 1668 1669 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 1670 if (err) { 1671 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 1672 mlxsw_sp_port->local_port); 1673 goto err_port_ets_init; 1674 } 1675 1676 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true); 1677 if (err) { 1678 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n", 1679 mlxsw_sp_port->local_port); 1680 goto err_port_tc_mc_mode; 1681 } 1682 1683 /* ETS and buffers must be initialized before DCB. */ 1684 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 1685 if (err) { 1686 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 1687 mlxsw_sp_port->local_port); 1688 goto err_port_dcb_init; 1689 } 1690 1691 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 1692 if (err) { 1693 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 1694 mlxsw_sp_port->local_port); 1695 goto err_port_fids_init; 1696 } 1697 1698 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); 1699 if (err) { 1700 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", 1701 mlxsw_sp_port->local_port); 1702 goto err_port_qdiscs_init; 1703 } 1704 1705 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false, 1706 false); 1707 if (err) { 1708 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n", 1709 mlxsw_sp_port->local_port); 1710 goto err_port_vlan_clear; 1711 } 1712 1713 err = mlxsw_sp_port_nve_init(mlxsw_sp_port); 1714 if (err) { 1715 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n", 1716 mlxsw_sp_port->local_port); 1717 goto err_port_nve_init; 1718 } 1719 1720 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID, 1721 ETH_P_8021Q); 1722 if (err) { 1723 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n", 1724 mlxsw_sp_port->local_port); 1725 goto err_port_pvid_set; 1726 } 1727 1728 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, 1729 MLXSW_SP_DEFAULT_VID); 1730 if (IS_ERR(mlxsw_sp_port_vlan)) { 1731 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 1732 mlxsw_sp_port->local_port); 1733 err = PTR_ERR(mlxsw_sp_port_vlan); 1734 goto err_port_vlan_create; 1735 } 1736 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan; 1737 1738 /* Set SPVC.et0=true and SPVC.et1=false to make the local port to treat 1739 * only packets with 802.1q header as tagged packets. 1740 */ 1741 err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true); 1742 if (err) { 1743 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set default VLAN classification\n", 1744 local_port); 1745 goto err_port_vlan_classification_set; 1746 } 1747 1748 INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw, 1749 mlxsw_sp->ptp_ops->shaper_work); 1750 1751 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 1752 1753 err = mlxsw_sp_port_overheat_init_val_set(mlxsw_sp_port); 1754 if (err) { 1755 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set overheat initial value\n", 1756 mlxsw_sp_port->local_port); 1757 goto err_port_overheat_init_val_set; 1758 } 1759 1760 err = register_netdev(dev); 1761 if (err) { 1762 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 1763 mlxsw_sp_port->local_port); 1764 goto err_register_netdev; 1765 } 1766 1767 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 1768 mlxsw_sp_port, dev); 1769 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 1770 return 0; 1771 1772 err_register_netdev: 1773 err_port_overheat_init_val_set: 1774 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true); 1775 err_port_vlan_classification_set: 1776 mlxsw_sp->ports[local_port] = NULL; 1777 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1778 err_port_vlan_create: 1779 err_port_pvid_set: 1780 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 1781 err_port_nve_init: 1782 err_port_vlan_clear: 1783 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 1784 err_port_qdiscs_init: 1785 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 1786 err_port_fids_init: 1787 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 1788 err_port_dcb_init: 1789 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 1790 err_port_tc_mc_mode: 1791 err_port_ets_init: 1792 mlxsw_sp_port_buffers_fini(mlxsw_sp_port); 1793 err_port_buffers_init: 1794 err_port_admin_status_set: 1795 err_port_mtu_set: 1796 err_port_max_mtu_get: 1797 err_max_speed_get: 1798 err_port_speed_by_width_set: 1799 err_port_system_port_mapping_set: 1800 err_dev_addr_init: 1801 free_percpu(mlxsw_sp_port->pcpu_stats); 1802 err_alloc_stats: 1803 free_netdev(dev); 1804 err_alloc_etherdev: 1805 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 1806 err_core_port_init: 1807 err_port_label_info_get: 1808 mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 1809 MLXSW_PORT_SWID_DISABLED_PORT); 1810 err_port_swid_set: 1811 mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, 1812 port_mapping->slot_index, 1813 port_mapping->module); 1814 return err; 1815 } 1816 1817 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port) 1818 { 1819 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1820 u8 slot_index = mlxsw_sp_port->mapping.slot_index; 1821 u8 module = mlxsw_sp_port->mapping.module; 1822 1823 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 1824 cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw); 1825 mlxsw_sp_port_ptp_clear(mlxsw_sp_port); 1826 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 1827 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 1828 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true); 1829 mlxsw_sp->ports[local_port] = NULL; 1830 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true); 1831 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 1832 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 1833 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 1834 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 1835 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 1836 mlxsw_sp_port_buffers_fini(mlxsw_sp_port); 1837 free_percpu(mlxsw_sp_port->pcpu_stats); 1838 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 1839 free_netdev(mlxsw_sp_port->dev); 1840 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 1841 mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 1842 MLXSW_PORT_SWID_DISABLED_PORT); 1843 mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, slot_index, module); 1844 } 1845 1846 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp) 1847 { 1848 struct mlxsw_sp_port *mlxsw_sp_port; 1849 int err; 1850 1851 mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL); 1852 if (!mlxsw_sp_port) 1853 return -ENOMEM; 1854 1855 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 1856 mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT; 1857 1858 err = mlxsw_core_cpu_port_init(mlxsw_sp->core, 1859 mlxsw_sp_port, 1860 mlxsw_sp->base_mac, 1861 sizeof(mlxsw_sp->base_mac)); 1862 if (err) { 1863 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n"); 1864 goto err_core_cpu_port_init; 1865 } 1866 1867 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port; 1868 return 0; 1869 1870 err_core_cpu_port_init: 1871 kfree(mlxsw_sp_port); 1872 return err; 1873 } 1874 1875 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp) 1876 { 1877 struct mlxsw_sp_port *mlxsw_sp_port = 1878 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT]; 1879 1880 mlxsw_core_cpu_port_fini(mlxsw_sp->core); 1881 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL; 1882 kfree(mlxsw_sp_port); 1883 } 1884 1885 static bool mlxsw_sp_local_port_valid(u16 local_port) 1886 { 1887 return local_port != MLXSW_PORT_CPU_PORT; 1888 } 1889 1890 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u16 local_port) 1891 { 1892 if (!mlxsw_sp_local_port_valid(local_port)) 1893 return false; 1894 return mlxsw_sp->ports[local_port] != NULL; 1895 } 1896 1897 static int mlxsw_sp_port_mapping_event_set(struct mlxsw_sp *mlxsw_sp, 1898 u16 local_port, bool enable) 1899 { 1900 char pmecr_pl[MLXSW_REG_PMECR_LEN]; 1901 1902 mlxsw_reg_pmecr_pack(pmecr_pl, local_port, 1903 enable ? MLXSW_REG_PMECR_E_GENERATE_EVENT : 1904 MLXSW_REG_PMECR_E_DO_NOT_GENERATE_EVENT); 1905 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmecr), pmecr_pl); 1906 } 1907 1908 struct mlxsw_sp_port_mapping_event { 1909 struct list_head list; 1910 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 1911 }; 1912 1913 static void mlxsw_sp_port_mapping_events_work(struct work_struct *work) 1914 { 1915 struct mlxsw_sp_port_mapping_event *event, *next_event; 1916 struct mlxsw_sp_port_mapping_events *events; 1917 struct mlxsw_sp_port_mapping port_mapping; 1918 struct mlxsw_sp *mlxsw_sp; 1919 struct devlink *devlink; 1920 LIST_HEAD(event_queue); 1921 u16 local_port; 1922 int err; 1923 1924 events = container_of(work, struct mlxsw_sp_port_mapping_events, work); 1925 mlxsw_sp = container_of(events, struct mlxsw_sp, port_mapping_events); 1926 devlink = priv_to_devlink(mlxsw_sp->core); 1927 1928 spin_lock_bh(&events->queue_lock); 1929 list_splice_init(&events->queue, &event_queue); 1930 spin_unlock_bh(&events->queue_lock); 1931 1932 list_for_each_entry_safe(event, next_event, &event_queue, list) { 1933 local_port = mlxsw_reg_pmlp_local_port_get(event->pmlp_pl); 1934 err = mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port, 1935 event->pmlp_pl, &port_mapping); 1936 if (err) 1937 goto out; 1938 1939 if (WARN_ON_ONCE(!port_mapping.width)) 1940 goto out; 1941 1942 devl_lock(devlink); 1943 1944 if (!mlxsw_sp_port_created(mlxsw_sp, local_port)) 1945 mlxsw_sp_port_create(mlxsw_sp, local_port, 1946 false, &port_mapping); 1947 else 1948 WARN_ON_ONCE(1); 1949 1950 devl_unlock(devlink); 1951 1952 mlxsw_sp->port_mapping[local_port] = port_mapping; 1953 1954 out: 1955 kfree(event); 1956 } 1957 } 1958 1959 static void 1960 mlxsw_sp_port_mapping_listener_func(const struct mlxsw_reg_info *reg, 1961 char *pmlp_pl, void *priv) 1962 { 1963 struct mlxsw_sp_port_mapping_events *events; 1964 struct mlxsw_sp_port_mapping_event *event; 1965 struct mlxsw_sp *mlxsw_sp = priv; 1966 u16 local_port; 1967 1968 local_port = mlxsw_reg_pmlp_local_port_get(pmlp_pl); 1969 if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port))) 1970 return; 1971 1972 events = &mlxsw_sp->port_mapping_events; 1973 event = kmalloc(sizeof(*event), GFP_ATOMIC); 1974 if (!event) 1975 return; 1976 memcpy(event->pmlp_pl, pmlp_pl, sizeof(event->pmlp_pl)); 1977 spin_lock(&events->queue_lock); 1978 list_add_tail(&event->list, &events->queue); 1979 spin_unlock(&events->queue_lock); 1980 mlxsw_core_schedule_work(&events->work); 1981 } 1982 1983 static void 1984 __mlxsw_sp_port_mapping_events_cancel(struct mlxsw_sp *mlxsw_sp) 1985 { 1986 struct mlxsw_sp_port_mapping_event *event, *next_event; 1987 struct mlxsw_sp_port_mapping_events *events; 1988 1989 events = &mlxsw_sp->port_mapping_events; 1990 1991 /* Caller needs to make sure that no new event is going to appear. */ 1992 cancel_work_sync(&events->work); 1993 list_for_each_entry_safe(event, next_event, &events->queue, list) { 1994 list_del(&event->list); 1995 kfree(event); 1996 } 1997 } 1998 1999 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 2000 { 2001 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 2002 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); 2003 int i; 2004 2005 for (i = 1; i < max_ports; i++) 2006 mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false); 2007 /* Make sure all scheduled events are processed */ 2008 __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp); 2009 2010 devl_lock(devlink); 2011 for (i = 1; i < max_ports; i++) 2012 if (mlxsw_sp_port_created(mlxsw_sp, i)) 2013 mlxsw_sp_port_remove(mlxsw_sp, i); 2014 mlxsw_sp_cpu_port_remove(mlxsw_sp); 2015 devl_unlock(devlink); 2016 kfree(mlxsw_sp->ports); 2017 mlxsw_sp->ports = NULL; 2018 } 2019 2020 static void 2021 mlxsw_sp_ports_remove_selected(struct mlxsw_core *mlxsw_core, 2022 bool (*selector)(void *priv, u16 local_port), 2023 void *priv) 2024 { 2025 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2026 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_core); 2027 int i; 2028 2029 for (i = 1; i < max_ports; i++) 2030 if (mlxsw_sp_port_created(mlxsw_sp, i) && selector(priv, i)) 2031 mlxsw_sp_port_remove(mlxsw_sp, i); 2032 } 2033 2034 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 2035 { 2036 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 2037 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); 2038 struct mlxsw_sp_port_mapping_events *events; 2039 struct mlxsw_sp_port_mapping *port_mapping; 2040 size_t alloc_size; 2041 int i; 2042 int err; 2043 2044 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 2045 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 2046 if (!mlxsw_sp->ports) 2047 return -ENOMEM; 2048 2049 events = &mlxsw_sp->port_mapping_events; 2050 INIT_LIST_HEAD(&events->queue); 2051 spin_lock_init(&events->queue_lock); 2052 INIT_WORK(&events->work, mlxsw_sp_port_mapping_events_work); 2053 2054 for (i = 1; i < max_ports; i++) { 2055 err = mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, true); 2056 if (err) 2057 goto err_event_enable; 2058 } 2059 2060 devl_lock(devlink); 2061 err = mlxsw_sp_cpu_port_create(mlxsw_sp); 2062 if (err) 2063 goto err_cpu_port_create; 2064 2065 for (i = 1; i < max_ports; i++) { 2066 port_mapping = &mlxsw_sp->port_mapping[i]; 2067 if (!port_mapping->width) 2068 continue; 2069 err = mlxsw_sp_port_create(mlxsw_sp, i, false, port_mapping); 2070 if (err) 2071 goto err_port_create; 2072 } 2073 devl_unlock(devlink); 2074 return 0; 2075 2076 err_port_create: 2077 for (i--; i >= 1; i--) 2078 if (mlxsw_sp_port_created(mlxsw_sp, i)) 2079 mlxsw_sp_port_remove(mlxsw_sp, i); 2080 i = max_ports; 2081 mlxsw_sp_cpu_port_remove(mlxsw_sp); 2082 err_cpu_port_create: 2083 devl_unlock(devlink); 2084 err_event_enable: 2085 for (i--; i >= 1; i--) 2086 mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false); 2087 /* Make sure all scheduled events are processed */ 2088 __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp); 2089 kfree(mlxsw_sp->ports); 2090 mlxsw_sp->ports = NULL; 2091 return err; 2092 } 2093 2094 static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp) 2095 { 2096 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 2097 struct mlxsw_sp_port_mapping *port_mapping; 2098 int i; 2099 int err; 2100 2101 mlxsw_sp->port_mapping = kcalloc(max_ports, 2102 sizeof(struct mlxsw_sp_port_mapping), 2103 GFP_KERNEL); 2104 if (!mlxsw_sp->port_mapping) 2105 return -ENOMEM; 2106 2107 for (i = 1; i < max_ports; i++) { 2108 port_mapping = &mlxsw_sp->port_mapping[i]; 2109 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, port_mapping); 2110 if (err) 2111 goto err_port_module_info_get; 2112 } 2113 return 0; 2114 2115 err_port_module_info_get: 2116 kfree(mlxsw_sp->port_mapping); 2117 return err; 2118 } 2119 2120 static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp) 2121 { 2122 kfree(mlxsw_sp->port_mapping); 2123 } 2124 2125 static int 2126 mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, 2127 struct mlxsw_sp_port_mapping *port_mapping, 2128 unsigned int count, const char *pmtdb_pl) 2129 { 2130 struct mlxsw_sp_port_mapping split_port_mapping; 2131 int err, i; 2132 2133 split_port_mapping = *port_mapping; 2134 split_port_mapping.width /= count; 2135 for (i = 0; i < count; i++) { 2136 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2137 2138 if (!mlxsw_sp_local_port_valid(s_local_port)) 2139 continue; 2140 2141 err = mlxsw_sp_port_create(mlxsw_sp, s_local_port, 2142 true, &split_port_mapping); 2143 if (err) 2144 goto err_port_create; 2145 split_port_mapping.lane += split_port_mapping.width; 2146 } 2147 2148 return 0; 2149 2150 err_port_create: 2151 for (i--; i >= 0; i--) { 2152 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2153 2154 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) 2155 mlxsw_sp_port_remove(mlxsw_sp, s_local_port); 2156 } 2157 return err; 2158 } 2159 2160 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 2161 unsigned int count, 2162 const char *pmtdb_pl) 2163 { 2164 struct mlxsw_sp_port_mapping *port_mapping; 2165 int i; 2166 2167 /* Go over original unsplit ports in the gap and recreate them. */ 2168 for (i = 0; i < count; i++) { 2169 u16 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2170 2171 port_mapping = &mlxsw_sp->port_mapping[local_port]; 2172 if (!port_mapping->width || !mlxsw_sp_local_port_valid(local_port)) 2173 continue; 2174 mlxsw_sp_port_create(mlxsw_sp, local_port, 2175 false, port_mapping); 2176 } 2177 } 2178 2179 static struct mlxsw_sp_port * 2180 mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u16 local_port) 2181 { 2182 if (mlxsw_sp->ports && mlxsw_sp->ports[local_port]) 2183 return mlxsw_sp->ports[local_port]; 2184 return NULL; 2185 } 2186 2187 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port, 2188 unsigned int count, 2189 struct netlink_ext_ack *extack) 2190 { 2191 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2192 struct mlxsw_sp_port_mapping port_mapping; 2193 struct mlxsw_sp_port *mlxsw_sp_port; 2194 enum mlxsw_reg_pmtdb_status status; 2195 char pmtdb_pl[MLXSW_REG_PMTDB_LEN]; 2196 int i; 2197 int err; 2198 2199 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); 2200 if (!mlxsw_sp_port) { 2201 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 2202 local_port); 2203 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 2204 return -EINVAL; 2205 } 2206 2207 if (mlxsw_sp_port->split) { 2208 NL_SET_ERR_MSG_MOD(extack, "Port is already split"); 2209 return -EINVAL; 2210 } 2211 2212 mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index, 2213 mlxsw_sp_port->mapping.module, 2214 mlxsw_sp_port->mapping.module_width / count, 2215 count); 2216 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl); 2217 if (err) { 2218 NL_SET_ERR_MSG_MOD(extack, "Failed to query split info"); 2219 return err; 2220 } 2221 2222 status = mlxsw_reg_pmtdb_status_get(pmtdb_pl); 2223 if (status != MLXSW_REG_PMTDB_STATUS_SUCCESS) { 2224 NL_SET_ERR_MSG_MOD(extack, "Unsupported split configuration"); 2225 return -EINVAL; 2226 } 2227 2228 port_mapping = mlxsw_sp_port->mapping; 2229 2230 for (i = 0; i < count; i++) { 2231 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2232 2233 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) 2234 mlxsw_sp_port_remove(mlxsw_sp, s_local_port); 2235 } 2236 2237 err = mlxsw_sp_port_split_create(mlxsw_sp, &port_mapping, 2238 count, pmtdb_pl); 2239 if (err) { 2240 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 2241 goto err_port_split_create; 2242 } 2243 2244 return 0; 2245 2246 err_port_split_create: 2247 mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl); 2248 2249 return err; 2250 } 2251 2252 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u16 local_port, 2253 struct netlink_ext_ack *extack) 2254 { 2255 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2256 struct mlxsw_sp_port *mlxsw_sp_port; 2257 char pmtdb_pl[MLXSW_REG_PMTDB_LEN]; 2258 unsigned int count; 2259 int i; 2260 int err; 2261 2262 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); 2263 if (!mlxsw_sp_port) { 2264 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 2265 local_port); 2266 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 2267 return -EINVAL; 2268 } 2269 2270 if (!mlxsw_sp_port->split) { 2271 NL_SET_ERR_MSG_MOD(extack, "Port was not split"); 2272 return -EINVAL; 2273 } 2274 2275 count = mlxsw_sp_port->mapping.module_width / 2276 mlxsw_sp_port->mapping.width; 2277 2278 mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index, 2279 mlxsw_sp_port->mapping.module, 2280 mlxsw_sp_port->mapping.module_width / count, 2281 count); 2282 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl); 2283 if (err) { 2284 NL_SET_ERR_MSG_MOD(extack, "Failed to query split info"); 2285 return err; 2286 } 2287 2288 for (i = 0; i < count; i++) { 2289 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2290 2291 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) 2292 mlxsw_sp_port_remove(mlxsw_sp, s_local_port); 2293 } 2294 2295 mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl); 2296 2297 return 0; 2298 } 2299 2300 static void 2301 mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port) 2302 { 2303 int i; 2304 2305 for (i = 0; i < TC_MAX_QUEUE; i++) 2306 mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0; 2307 } 2308 2309 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 2310 char *pude_pl, void *priv) 2311 { 2312 struct mlxsw_sp *mlxsw_sp = priv; 2313 struct mlxsw_sp_port *mlxsw_sp_port; 2314 enum mlxsw_reg_pude_oper_status status; 2315 u16 local_port; 2316 2317 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 2318 2319 if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port))) 2320 return; 2321 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2322 if (!mlxsw_sp_port) 2323 return; 2324 2325 status = mlxsw_reg_pude_oper_status_get(pude_pl); 2326 if (status == MLXSW_PORT_OPER_STATUS_UP) { 2327 netdev_info(mlxsw_sp_port->dev, "link up\n"); 2328 netif_carrier_on(mlxsw_sp_port->dev); 2329 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0); 2330 } else { 2331 netdev_info(mlxsw_sp_port->dev, "link down\n"); 2332 netif_carrier_off(mlxsw_sp_port->dev); 2333 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port); 2334 } 2335 } 2336 2337 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp, 2338 char *mtpptr_pl, bool ingress) 2339 { 2340 u16 local_port; 2341 u8 num_rec; 2342 int i; 2343 2344 local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl); 2345 num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl); 2346 for (i = 0; i < num_rec; i++) { 2347 u8 domain_number; 2348 u8 message_type; 2349 u16 sequence_id; 2350 u64 timestamp; 2351 2352 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type, 2353 &domain_number, &sequence_id, 2354 ×tamp); 2355 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port, 2356 message_type, domain_number, 2357 sequence_id, timestamp); 2358 } 2359 } 2360 2361 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg, 2362 char *mtpptr_pl, void *priv) 2363 { 2364 struct mlxsw_sp *mlxsw_sp = priv; 2365 2366 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true); 2367 } 2368 2369 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg, 2370 char *mtpptr_pl, void *priv) 2371 { 2372 struct mlxsw_sp *mlxsw_sp = priv; 2373 2374 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false); 2375 } 2376 2377 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 2378 u16 local_port, void *priv) 2379 { 2380 struct mlxsw_sp *mlxsw_sp = priv; 2381 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2382 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 2383 2384 if (unlikely(!mlxsw_sp_port)) { 2385 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 2386 local_port); 2387 return; 2388 } 2389 2390 skb->dev = mlxsw_sp_port->dev; 2391 2392 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 2393 u64_stats_update_begin(&pcpu_stats->syncp); 2394 pcpu_stats->rx_packets++; 2395 pcpu_stats->rx_bytes += skb->len; 2396 u64_stats_update_end(&pcpu_stats->syncp); 2397 2398 skb->protocol = eth_type_trans(skb, skb->dev); 2399 netif_receive_skb(skb); 2400 } 2401 2402 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u16 local_port, 2403 void *priv) 2404 { 2405 skb->offload_fwd_mark = 1; 2406 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 2407 } 2408 2409 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, 2410 u16 local_port, void *priv) 2411 { 2412 skb->offload_l3_fwd_mark = 1; 2413 skb->offload_fwd_mark = 1; 2414 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 2415 } 2416 2417 void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 2418 u16 local_port) 2419 { 2420 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port); 2421 } 2422 2423 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2424 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 2425 _is_ctrl, SP_##_trap_group, DISCARD) 2426 2427 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2428 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 2429 _is_ctrl, SP_##_trap_group, DISCARD) 2430 2431 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2432 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \ 2433 _is_ctrl, SP_##_trap_group, DISCARD) 2434 2435 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 2436 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 2437 2438 static const struct mlxsw_listener mlxsw_sp_listener[] = { 2439 /* Events */ 2440 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 2441 /* L2 traps */ 2442 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false), 2443 /* L3 traps */ 2444 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 2445 false), 2446 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 2447 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 2448 false), 2449 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD, 2450 ROUTER_EXP, false), 2451 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD, 2452 ROUTER_EXP, false), 2453 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD, 2454 ROUTER_EXP, false), 2455 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD, 2456 ROUTER_EXP, false), 2457 /* Multicast Router Traps */ 2458 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 2459 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 2460 /* NVE traps */ 2461 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false), 2462 }; 2463 2464 static const struct mlxsw_listener mlxsw_sp1_listener[] = { 2465 /* Events */ 2466 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0), 2467 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0), 2468 }; 2469 2470 static const struct mlxsw_listener mlxsw_sp2_listener[] = { 2471 /* Events */ 2472 MLXSW_SP_EVENTL(mlxsw_sp_port_mapping_listener_func, PMLPE), 2473 }; 2474 2475 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 2476 { 2477 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2478 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 2479 enum mlxsw_reg_qpcr_ir_units ir_units; 2480 int max_cpu_policers; 2481 bool is_bytes; 2482 u8 burst_size; 2483 u32 rate; 2484 int i, err; 2485 2486 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 2487 return -EIO; 2488 2489 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 2490 2491 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 2492 for (i = 0; i < max_cpu_policers; i++) { 2493 is_bytes = false; 2494 switch (i) { 2495 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 2496 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 2497 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS: 2498 rate = 1024; 2499 burst_size = 7; 2500 break; 2501 default: 2502 continue; 2503 } 2504 2505 __set_bit(i, mlxsw_sp->trap->policers_usage); 2506 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 2507 burst_size); 2508 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 2509 if (err) 2510 return err; 2511 } 2512 2513 return 0; 2514 } 2515 2516 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 2517 { 2518 char htgt_pl[MLXSW_REG_HTGT_LEN]; 2519 enum mlxsw_reg_htgt_trap_group i; 2520 int max_cpu_policers; 2521 int max_trap_groups; 2522 u8 priority, tc; 2523 u16 policer_id; 2524 int err; 2525 2526 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 2527 return -EIO; 2528 2529 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 2530 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 2531 2532 for (i = 0; i < max_trap_groups; i++) { 2533 policer_id = i; 2534 switch (i) { 2535 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 2536 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 2537 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS: 2538 priority = 1; 2539 tc = 1; 2540 break; 2541 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 2542 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 2543 tc = MLXSW_REG_HTGT_DEFAULT_TC; 2544 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 2545 break; 2546 default: 2547 continue; 2548 } 2549 2550 if (max_cpu_policers <= policer_id && 2551 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 2552 return -EIO; 2553 2554 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 2555 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 2556 if (err) 2557 return err; 2558 } 2559 2560 return 0; 2561 } 2562 2563 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 2564 { 2565 struct mlxsw_sp_trap *trap; 2566 u64 max_policers; 2567 int err; 2568 2569 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS)) 2570 return -EIO; 2571 max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS); 2572 trap = kzalloc(struct_size(trap, policers_usage, 2573 BITS_TO_LONGS(max_policers)), GFP_KERNEL); 2574 if (!trap) 2575 return -ENOMEM; 2576 trap->max_policers = max_policers; 2577 mlxsw_sp->trap = trap; 2578 2579 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 2580 if (err) 2581 goto err_cpu_policers_set; 2582 2583 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 2584 if (err) 2585 goto err_trap_groups_set; 2586 2587 err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp_listener, 2588 ARRAY_SIZE(mlxsw_sp_listener), 2589 mlxsw_sp); 2590 if (err) 2591 goto err_traps_register; 2592 2593 err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp->listeners, 2594 mlxsw_sp->listeners_count, mlxsw_sp); 2595 if (err) 2596 goto err_extra_traps_init; 2597 2598 return 0; 2599 2600 err_extra_traps_init: 2601 mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener, 2602 ARRAY_SIZE(mlxsw_sp_listener), 2603 mlxsw_sp); 2604 err_traps_register: 2605 err_trap_groups_set: 2606 err_cpu_policers_set: 2607 kfree(trap); 2608 return err; 2609 } 2610 2611 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 2612 { 2613 mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp->listeners, 2614 mlxsw_sp->listeners_count, 2615 mlxsw_sp); 2616 mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener, 2617 ARRAY_SIZE(mlxsw_sp_listener), mlxsw_sp); 2618 kfree(mlxsw_sp->trap); 2619 } 2620 2621 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe 2622 2623 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 2624 { 2625 char slcr_pl[MLXSW_REG_SLCR_LEN]; 2626 u32 seed; 2627 int err; 2628 2629 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 2630 MLXSW_SP_LAG_SEED_INIT); 2631 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 2632 MLXSW_REG_SLCR_LAG_HASH_DMAC | 2633 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 2634 MLXSW_REG_SLCR_LAG_HASH_VLANID | 2635 MLXSW_REG_SLCR_LAG_HASH_SIP | 2636 MLXSW_REG_SLCR_LAG_HASH_DIP | 2637 MLXSW_REG_SLCR_LAG_HASH_SPORT | 2638 MLXSW_REG_SLCR_LAG_HASH_DPORT | 2639 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed); 2640 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 2641 if (err) 2642 return err; 2643 2644 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 2645 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 2646 return -EIO; 2647 2648 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 2649 sizeof(struct mlxsw_sp_upper), 2650 GFP_KERNEL); 2651 if (!mlxsw_sp->lags) 2652 return -ENOMEM; 2653 2654 return 0; 2655 } 2656 2657 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 2658 { 2659 kfree(mlxsw_sp->lags); 2660 } 2661 2662 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = { 2663 .clock_init = mlxsw_sp1_ptp_clock_init, 2664 .clock_fini = mlxsw_sp1_ptp_clock_fini, 2665 .init = mlxsw_sp1_ptp_init, 2666 .fini = mlxsw_sp1_ptp_fini, 2667 .receive = mlxsw_sp1_ptp_receive, 2668 .transmitted = mlxsw_sp1_ptp_transmitted, 2669 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get, 2670 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set, 2671 .shaper_work = mlxsw_sp1_ptp_shaper_work, 2672 .get_ts_info = mlxsw_sp1_ptp_get_ts_info, 2673 .get_stats_count = mlxsw_sp1_get_stats_count, 2674 .get_stats_strings = mlxsw_sp1_get_stats_strings, 2675 .get_stats = mlxsw_sp1_get_stats, 2676 }; 2677 2678 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { 2679 .clock_init = mlxsw_sp2_ptp_clock_init, 2680 .clock_fini = mlxsw_sp2_ptp_clock_fini, 2681 .init = mlxsw_sp2_ptp_init, 2682 .fini = mlxsw_sp2_ptp_fini, 2683 .receive = mlxsw_sp2_ptp_receive, 2684 .transmitted = mlxsw_sp2_ptp_transmitted, 2685 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get, 2686 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set, 2687 .shaper_work = mlxsw_sp2_ptp_shaper_work, 2688 .get_ts_info = mlxsw_sp2_ptp_get_ts_info, 2689 .get_stats_count = mlxsw_sp2_get_stats_count, 2690 .get_stats_strings = mlxsw_sp2_get_stats_strings, 2691 .get_stats = mlxsw_sp2_get_stats, 2692 }; 2693 2694 struct mlxsw_sp_sample_trigger_node { 2695 struct mlxsw_sp_sample_trigger trigger; 2696 struct mlxsw_sp_sample_params params; 2697 struct rhash_head ht_node; 2698 struct rcu_head rcu; 2699 refcount_t refcount; 2700 }; 2701 2702 static const struct rhashtable_params mlxsw_sp_sample_trigger_ht_params = { 2703 .key_offset = offsetof(struct mlxsw_sp_sample_trigger_node, trigger), 2704 .head_offset = offsetof(struct mlxsw_sp_sample_trigger_node, ht_node), 2705 .key_len = sizeof(struct mlxsw_sp_sample_trigger), 2706 .automatic_shrinking = true, 2707 }; 2708 2709 static void 2710 mlxsw_sp_sample_trigger_key_init(struct mlxsw_sp_sample_trigger *key, 2711 const struct mlxsw_sp_sample_trigger *trigger) 2712 { 2713 memset(key, 0, sizeof(*key)); 2714 key->type = trigger->type; 2715 key->local_port = trigger->local_port; 2716 } 2717 2718 /* RCU read lock must be held */ 2719 struct mlxsw_sp_sample_params * 2720 mlxsw_sp_sample_trigger_params_lookup(struct mlxsw_sp *mlxsw_sp, 2721 const struct mlxsw_sp_sample_trigger *trigger) 2722 { 2723 struct mlxsw_sp_sample_trigger_node *trigger_node; 2724 struct mlxsw_sp_sample_trigger key; 2725 2726 mlxsw_sp_sample_trigger_key_init(&key, trigger); 2727 trigger_node = rhashtable_lookup(&mlxsw_sp->sample_trigger_ht, &key, 2728 mlxsw_sp_sample_trigger_ht_params); 2729 if (!trigger_node) 2730 return NULL; 2731 2732 return &trigger_node->params; 2733 } 2734 2735 static int 2736 mlxsw_sp_sample_trigger_node_init(struct mlxsw_sp *mlxsw_sp, 2737 const struct mlxsw_sp_sample_trigger *trigger, 2738 const struct mlxsw_sp_sample_params *params) 2739 { 2740 struct mlxsw_sp_sample_trigger_node *trigger_node; 2741 int err; 2742 2743 trigger_node = kzalloc(sizeof(*trigger_node), GFP_KERNEL); 2744 if (!trigger_node) 2745 return -ENOMEM; 2746 2747 trigger_node->trigger = *trigger; 2748 trigger_node->params = *params; 2749 refcount_set(&trigger_node->refcount, 1); 2750 2751 err = rhashtable_insert_fast(&mlxsw_sp->sample_trigger_ht, 2752 &trigger_node->ht_node, 2753 mlxsw_sp_sample_trigger_ht_params); 2754 if (err) 2755 goto err_rhashtable_insert; 2756 2757 return 0; 2758 2759 err_rhashtable_insert: 2760 kfree(trigger_node); 2761 return err; 2762 } 2763 2764 static void 2765 mlxsw_sp_sample_trigger_node_fini(struct mlxsw_sp *mlxsw_sp, 2766 struct mlxsw_sp_sample_trigger_node *trigger_node) 2767 { 2768 rhashtable_remove_fast(&mlxsw_sp->sample_trigger_ht, 2769 &trigger_node->ht_node, 2770 mlxsw_sp_sample_trigger_ht_params); 2771 kfree_rcu(trigger_node, rcu); 2772 } 2773 2774 int 2775 mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp, 2776 const struct mlxsw_sp_sample_trigger *trigger, 2777 const struct mlxsw_sp_sample_params *params, 2778 struct netlink_ext_ack *extack) 2779 { 2780 struct mlxsw_sp_sample_trigger_node *trigger_node; 2781 struct mlxsw_sp_sample_trigger key; 2782 2783 ASSERT_RTNL(); 2784 2785 mlxsw_sp_sample_trigger_key_init(&key, trigger); 2786 2787 trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht, 2788 &key, 2789 mlxsw_sp_sample_trigger_ht_params); 2790 if (!trigger_node) 2791 return mlxsw_sp_sample_trigger_node_init(mlxsw_sp, &key, 2792 params); 2793 2794 if (trigger_node->trigger.local_port) { 2795 NL_SET_ERR_MSG_MOD(extack, "Sampling already enabled on port"); 2796 return -EINVAL; 2797 } 2798 2799 if (trigger_node->params.psample_group != params->psample_group || 2800 trigger_node->params.truncate != params->truncate || 2801 trigger_node->params.rate != params->rate || 2802 trigger_node->params.trunc_size != params->trunc_size) { 2803 NL_SET_ERR_MSG_MOD(extack, "Sampling parameters do not match for an existing sampling trigger"); 2804 return -EINVAL; 2805 } 2806 2807 refcount_inc(&trigger_node->refcount); 2808 2809 return 0; 2810 } 2811 2812 void 2813 mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp, 2814 const struct mlxsw_sp_sample_trigger *trigger) 2815 { 2816 struct mlxsw_sp_sample_trigger_node *trigger_node; 2817 struct mlxsw_sp_sample_trigger key; 2818 2819 ASSERT_RTNL(); 2820 2821 mlxsw_sp_sample_trigger_key_init(&key, trigger); 2822 2823 trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht, 2824 &key, 2825 mlxsw_sp_sample_trigger_ht_params); 2826 if (!trigger_node) 2827 return; 2828 2829 if (!refcount_dec_and_test(&trigger_node->refcount)) 2830 return; 2831 2832 mlxsw_sp_sample_trigger_node_fini(mlxsw_sp, trigger_node); 2833 } 2834 2835 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 2836 unsigned long event, void *ptr); 2837 2838 #define MLXSW_SP_DEFAULT_PARSING_DEPTH 96 2839 #define MLXSW_SP_INCREASED_PARSING_DEPTH 128 2840 #define MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT 4789 2841 2842 static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp) 2843 { 2844 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH; 2845 mlxsw_sp->parsing.vxlan_udp_dport = MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT; 2846 mutex_init(&mlxsw_sp->parsing.lock); 2847 } 2848 2849 static void mlxsw_sp_parsing_fini(struct mlxsw_sp *mlxsw_sp) 2850 { 2851 mutex_destroy(&mlxsw_sp->parsing.lock); 2852 } 2853 2854 struct mlxsw_sp_ipv6_addr_node { 2855 struct in6_addr key; 2856 struct rhash_head ht_node; 2857 u32 kvdl_index; 2858 refcount_t refcount; 2859 }; 2860 2861 static const struct rhashtable_params mlxsw_sp_ipv6_addr_ht_params = { 2862 .key_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, key), 2863 .head_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, ht_node), 2864 .key_len = sizeof(struct in6_addr), 2865 .automatic_shrinking = true, 2866 }; 2867 2868 static int 2869 mlxsw_sp_ipv6_addr_init(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6, 2870 u32 *p_kvdl_index) 2871 { 2872 struct mlxsw_sp_ipv6_addr_node *node; 2873 char rips_pl[MLXSW_REG_RIPS_LEN]; 2874 int err; 2875 2876 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 2877 MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, 2878 p_kvdl_index); 2879 if (err) 2880 return err; 2881 2882 mlxsw_reg_rips_pack(rips_pl, *p_kvdl_index, addr6); 2883 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rips), rips_pl); 2884 if (err) 2885 goto err_rips_write; 2886 2887 node = kzalloc(sizeof(*node), GFP_KERNEL); 2888 if (!node) { 2889 err = -ENOMEM; 2890 goto err_node_alloc; 2891 } 2892 2893 node->key = *addr6; 2894 node->kvdl_index = *p_kvdl_index; 2895 refcount_set(&node->refcount, 1); 2896 2897 err = rhashtable_insert_fast(&mlxsw_sp->ipv6_addr_ht, 2898 &node->ht_node, 2899 mlxsw_sp_ipv6_addr_ht_params); 2900 if (err) 2901 goto err_rhashtable_insert; 2902 2903 return 0; 2904 2905 err_rhashtable_insert: 2906 kfree(node); 2907 err_node_alloc: 2908 err_rips_write: 2909 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, 2910 *p_kvdl_index); 2911 return err; 2912 } 2913 2914 static void mlxsw_sp_ipv6_addr_fini(struct mlxsw_sp *mlxsw_sp, 2915 struct mlxsw_sp_ipv6_addr_node *node) 2916 { 2917 u32 kvdl_index = node->kvdl_index; 2918 2919 rhashtable_remove_fast(&mlxsw_sp->ipv6_addr_ht, &node->ht_node, 2920 mlxsw_sp_ipv6_addr_ht_params); 2921 kfree(node); 2922 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, 2923 kvdl_index); 2924 } 2925 2926 int mlxsw_sp_ipv6_addr_kvdl_index_get(struct mlxsw_sp *mlxsw_sp, 2927 const struct in6_addr *addr6, 2928 u32 *p_kvdl_index) 2929 { 2930 struct mlxsw_sp_ipv6_addr_node *node; 2931 int err = 0; 2932 2933 mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock); 2934 node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6, 2935 mlxsw_sp_ipv6_addr_ht_params); 2936 if (node) { 2937 refcount_inc(&node->refcount); 2938 *p_kvdl_index = node->kvdl_index; 2939 goto out_unlock; 2940 } 2941 2942 err = mlxsw_sp_ipv6_addr_init(mlxsw_sp, addr6, p_kvdl_index); 2943 2944 out_unlock: 2945 mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock); 2946 return err; 2947 } 2948 2949 void 2950 mlxsw_sp_ipv6_addr_put(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6) 2951 { 2952 struct mlxsw_sp_ipv6_addr_node *node; 2953 2954 mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock); 2955 node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6, 2956 mlxsw_sp_ipv6_addr_ht_params); 2957 if (WARN_ON(!node)) 2958 goto out_unlock; 2959 2960 if (!refcount_dec_and_test(&node->refcount)) 2961 goto out_unlock; 2962 2963 mlxsw_sp_ipv6_addr_fini(mlxsw_sp, node); 2964 2965 out_unlock: 2966 mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock); 2967 } 2968 2969 static int mlxsw_sp_ipv6_addr_ht_init(struct mlxsw_sp *mlxsw_sp) 2970 { 2971 int err; 2972 2973 err = rhashtable_init(&mlxsw_sp->ipv6_addr_ht, 2974 &mlxsw_sp_ipv6_addr_ht_params); 2975 if (err) 2976 return err; 2977 2978 mutex_init(&mlxsw_sp->ipv6_addr_ht_lock); 2979 return 0; 2980 } 2981 2982 static void mlxsw_sp_ipv6_addr_ht_fini(struct mlxsw_sp *mlxsw_sp) 2983 { 2984 mutex_destroy(&mlxsw_sp->ipv6_addr_ht_lock); 2985 rhashtable_destroy(&mlxsw_sp->ipv6_addr_ht); 2986 } 2987 2988 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 2989 const struct mlxsw_bus_info *mlxsw_bus_info, 2990 struct netlink_ext_ack *extack) 2991 { 2992 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2993 int err; 2994 2995 mlxsw_sp->core = mlxsw_core; 2996 mlxsw_sp->bus_info = mlxsw_bus_info; 2997 2998 mlxsw_sp_parsing_init(mlxsw_sp); 2999 mlxsw_core_emad_string_tlv_enable(mlxsw_core); 3000 3001 err = mlxsw_sp_base_mac_get(mlxsw_sp); 3002 if (err) { 3003 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 3004 return err; 3005 } 3006 3007 err = mlxsw_sp_kvdl_init(mlxsw_sp); 3008 if (err) { 3009 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 3010 return err; 3011 } 3012 3013 err = mlxsw_sp_fids_init(mlxsw_sp); 3014 if (err) { 3015 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 3016 goto err_fids_init; 3017 } 3018 3019 err = mlxsw_sp_policers_init(mlxsw_sp); 3020 if (err) { 3021 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize policers\n"); 3022 goto err_policers_init; 3023 } 3024 3025 err = mlxsw_sp_traps_init(mlxsw_sp); 3026 if (err) { 3027 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 3028 goto err_traps_init; 3029 } 3030 3031 err = mlxsw_sp_devlink_traps_init(mlxsw_sp); 3032 if (err) { 3033 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n"); 3034 goto err_devlink_traps_init; 3035 } 3036 3037 err = mlxsw_sp_buffers_init(mlxsw_sp); 3038 if (err) { 3039 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 3040 goto err_buffers_init; 3041 } 3042 3043 err = mlxsw_sp_lag_init(mlxsw_sp); 3044 if (err) { 3045 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 3046 goto err_lag_init; 3047 } 3048 3049 /* Initialize SPAN before router and switchdev, so that those components 3050 * can call mlxsw_sp_span_respin(). 3051 */ 3052 err = mlxsw_sp_span_init(mlxsw_sp); 3053 if (err) { 3054 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 3055 goto err_span_init; 3056 } 3057 3058 err = mlxsw_sp_switchdev_init(mlxsw_sp); 3059 if (err) { 3060 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 3061 goto err_switchdev_init; 3062 } 3063 3064 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 3065 if (err) { 3066 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 3067 goto err_counter_pool_init; 3068 } 3069 3070 err = mlxsw_sp_afa_init(mlxsw_sp); 3071 if (err) { 3072 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 3073 goto err_afa_init; 3074 } 3075 3076 err = mlxsw_sp_ipv6_addr_ht_init(mlxsw_sp); 3077 if (err) { 3078 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize hash table for IPv6 addresses\n"); 3079 goto err_ipv6_addr_ht_init; 3080 } 3081 3082 err = mlxsw_sp_nve_init(mlxsw_sp); 3083 if (err) { 3084 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n"); 3085 goto err_nve_init; 3086 } 3087 3088 err = mlxsw_sp_acl_init(mlxsw_sp); 3089 if (err) { 3090 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 3091 goto err_acl_init; 3092 } 3093 3094 err = mlxsw_sp_router_init(mlxsw_sp, extack); 3095 if (err) { 3096 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 3097 goto err_router_init; 3098 } 3099 3100 if (mlxsw_sp->bus_info->read_frc_capable) { 3101 /* NULL is a valid return value from clock_init */ 3102 mlxsw_sp->clock = 3103 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp, 3104 mlxsw_sp->bus_info->dev); 3105 if (IS_ERR(mlxsw_sp->clock)) { 3106 err = PTR_ERR(mlxsw_sp->clock); 3107 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n"); 3108 goto err_ptp_clock_init; 3109 } 3110 } 3111 3112 if (mlxsw_sp->clock) { 3113 /* NULL is a valid return value from ptp_ops->init */ 3114 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp); 3115 if (IS_ERR(mlxsw_sp->ptp_state)) { 3116 err = PTR_ERR(mlxsw_sp->ptp_state); 3117 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n"); 3118 goto err_ptp_init; 3119 } 3120 } 3121 3122 /* Initialize netdevice notifier after SPAN is initialized, so that the 3123 * event handler can call SPAN respin. 3124 */ 3125 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 3126 err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 3127 &mlxsw_sp->netdevice_nb); 3128 if (err) { 3129 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 3130 goto err_netdev_notifier; 3131 } 3132 3133 err = mlxsw_sp_dpipe_init(mlxsw_sp); 3134 if (err) { 3135 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 3136 goto err_dpipe_init; 3137 } 3138 3139 err = mlxsw_sp_port_module_info_init(mlxsw_sp); 3140 if (err) { 3141 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n"); 3142 goto err_port_module_info_init; 3143 } 3144 3145 err = rhashtable_init(&mlxsw_sp->sample_trigger_ht, 3146 &mlxsw_sp_sample_trigger_ht_params); 3147 if (err) { 3148 dev_err(mlxsw_sp->bus_info->dev, "Failed to init sampling trigger hashtable\n"); 3149 goto err_sample_trigger_init; 3150 } 3151 3152 err = mlxsw_sp_ports_create(mlxsw_sp); 3153 if (err) { 3154 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 3155 goto err_ports_create; 3156 } 3157 3158 return 0; 3159 3160 err_ports_create: 3161 rhashtable_destroy(&mlxsw_sp->sample_trigger_ht); 3162 err_sample_trigger_init: 3163 mlxsw_sp_port_module_info_fini(mlxsw_sp); 3164 err_port_module_info_init: 3165 mlxsw_sp_dpipe_fini(mlxsw_sp); 3166 err_dpipe_init: 3167 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 3168 &mlxsw_sp->netdevice_nb); 3169 err_netdev_notifier: 3170 if (mlxsw_sp->clock) 3171 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 3172 err_ptp_init: 3173 if (mlxsw_sp->clock) 3174 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 3175 err_ptp_clock_init: 3176 mlxsw_sp_router_fini(mlxsw_sp); 3177 err_router_init: 3178 mlxsw_sp_acl_fini(mlxsw_sp); 3179 err_acl_init: 3180 mlxsw_sp_nve_fini(mlxsw_sp); 3181 err_nve_init: 3182 mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp); 3183 err_ipv6_addr_ht_init: 3184 mlxsw_sp_afa_fini(mlxsw_sp); 3185 err_afa_init: 3186 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3187 err_counter_pool_init: 3188 mlxsw_sp_switchdev_fini(mlxsw_sp); 3189 err_switchdev_init: 3190 mlxsw_sp_span_fini(mlxsw_sp); 3191 err_span_init: 3192 mlxsw_sp_lag_fini(mlxsw_sp); 3193 err_lag_init: 3194 mlxsw_sp_buffers_fini(mlxsw_sp); 3195 err_buffers_init: 3196 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 3197 err_devlink_traps_init: 3198 mlxsw_sp_traps_fini(mlxsw_sp); 3199 err_traps_init: 3200 mlxsw_sp_policers_fini(mlxsw_sp); 3201 err_policers_init: 3202 mlxsw_sp_fids_fini(mlxsw_sp); 3203 err_fids_init: 3204 mlxsw_sp_kvdl_fini(mlxsw_sp); 3205 mlxsw_sp_parsing_fini(mlxsw_sp); 3206 return err; 3207 } 3208 3209 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, 3210 const struct mlxsw_bus_info *mlxsw_bus_info, 3211 struct netlink_ext_ack *extack) 3212 { 3213 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3214 3215 mlxsw_sp->switchdev_ops = &mlxsw_sp1_switchdev_ops; 3216 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; 3217 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; 3218 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; 3219 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; 3220 mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops; 3221 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; 3222 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; 3223 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; 3224 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals; 3225 mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops; 3226 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops; 3227 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops; 3228 mlxsw_sp->span_ops = &mlxsw_sp1_span_ops; 3229 mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops; 3230 mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops; 3231 mlxsw_sp->mall_ops = &mlxsw_sp1_mall_ops; 3232 mlxsw_sp->router_ops = &mlxsw_sp1_router_ops; 3233 mlxsw_sp->listeners = mlxsw_sp1_listener; 3234 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener); 3235 mlxsw_sp->fid_family_arr = mlxsw_sp1_fid_family_arr; 3236 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1; 3237 3238 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3239 } 3240 3241 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, 3242 const struct mlxsw_bus_info *mlxsw_bus_info, 3243 struct netlink_ext_ack *extack) 3244 { 3245 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3246 3247 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; 3248 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 3249 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 3250 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 3251 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 3252 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 3253 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 3254 mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops; 3255 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 3256 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 3257 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 3258 mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops; 3259 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 3260 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 3261 mlxsw_sp->span_ops = &mlxsw_sp2_span_ops; 3262 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; 3263 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; 3264 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; 3265 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; 3266 mlxsw_sp->listeners = mlxsw_sp2_listener; 3267 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener); 3268 mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr; 3269 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2; 3270 3271 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3272 } 3273 3274 static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core, 3275 const struct mlxsw_bus_info *mlxsw_bus_info, 3276 struct netlink_ext_ack *extack) 3277 { 3278 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3279 3280 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; 3281 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 3282 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 3283 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 3284 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 3285 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 3286 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 3287 mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops; 3288 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 3289 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 3290 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 3291 mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops; 3292 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 3293 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 3294 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops; 3295 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; 3296 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; 3297 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; 3298 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; 3299 mlxsw_sp->listeners = mlxsw_sp2_listener; 3300 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener); 3301 mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr; 3302 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3; 3303 3304 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3305 } 3306 3307 static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core, 3308 const struct mlxsw_bus_info *mlxsw_bus_info, 3309 struct netlink_ext_ack *extack) 3310 { 3311 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3312 3313 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; 3314 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 3315 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 3316 mlxsw_sp->afk_ops = &mlxsw_sp4_afk_ops; 3317 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 3318 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 3319 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 3320 mlxsw_sp->acl_bf_ops = &mlxsw_sp4_acl_bf_ops; 3321 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 3322 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 3323 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 3324 mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops; 3325 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 3326 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 3327 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops; 3328 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; 3329 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; 3330 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; 3331 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; 3332 mlxsw_sp->listeners = mlxsw_sp2_listener; 3333 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener); 3334 mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr; 3335 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4; 3336 3337 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3338 } 3339 3340 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 3341 { 3342 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3343 3344 mlxsw_sp_ports_remove(mlxsw_sp); 3345 rhashtable_destroy(&mlxsw_sp->sample_trigger_ht); 3346 mlxsw_sp_port_module_info_fini(mlxsw_sp); 3347 mlxsw_sp_dpipe_fini(mlxsw_sp); 3348 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 3349 &mlxsw_sp->netdevice_nb); 3350 if (mlxsw_sp->clock) { 3351 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 3352 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 3353 } 3354 mlxsw_sp_router_fini(mlxsw_sp); 3355 mlxsw_sp_acl_fini(mlxsw_sp); 3356 mlxsw_sp_nve_fini(mlxsw_sp); 3357 mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp); 3358 mlxsw_sp_afa_fini(mlxsw_sp); 3359 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3360 mlxsw_sp_switchdev_fini(mlxsw_sp); 3361 mlxsw_sp_span_fini(mlxsw_sp); 3362 mlxsw_sp_lag_fini(mlxsw_sp); 3363 mlxsw_sp_buffers_fini(mlxsw_sp); 3364 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 3365 mlxsw_sp_traps_fini(mlxsw_sp); 3366 mlxsw_sp_policers_fini(mlxsw_sp); 3367 mlxsw_sp_fids_fini(mlxsw_sp); 3368 mlxsw_sp_kvdl_fini(mlxsw_sp); 3369 mlxsw_sp_parsing_fini(mlxsw_sp); 3370 } 3371 3372 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated 3373 * 802.1Q FIDs 3374 */ 3375 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \ 3376 VLAN_VID_MASK - 1) 3377 3378 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { 3379 .used_max_mid = 1, 3380 .max_mid = MLXSW_SP_MID_MAX, 3381 .used_flood_tables = 1, 3382 .used_flood_mode = 1, 3383 .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_MIXED, 3384 .max_fid_flood_tables = 3, 3385 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 3386 .used_max_ib_mc = 1, 3387 .max_ib_mc = 0, 3388 .used_max_pkey = 1, 3389 .max_pkey = 0, 3390 .used_kvd_sizes = 1, 3391 .kvd_hash_single_parts = 59, 3392 .kvd_hash_double_parts = 41, 3393 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 3394 .swid_config = { 3395 { 3396 .used_type = 1, 3397 .type = MLXSW_PORT_SWID_TYPE_ETH, 3398 } 3399 }, 3400 }; 3401 3402 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { 3403 .used_max_mid = 1, 3404 .max_mid = MLXSW_SP_MID_MAX, 3405 .used_flood_tables = 1, 3406 .used_flood_mode = 1, 3407 .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_MIXED, 3408 .max_fid_flood_tables = 3, 3409 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 3410 .used_max_ib_mc = 1, 3411 .max_ib_mc = 0, 3412 .used_max_pkey = 1, 3413 .max_pkey = 0, 3414 .swid_config = { 3415 { 3416 .used_type = 1, 3417 .type = MLXSW_PORT_SWID_TYPE_ETH, 3418 } 3419 }, 3420 }; 3421 3422 static void 3423 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, 3424 struct devlink_resource_size_params *kvd_size_params, 3425 struct devlink_resource_size_params *linear_size_params, 3426 struct devlink_resource_size_params *hash_double_size_params, 3427 struct devlink_resource_size_params *hash_single_size_params) 3428 { 3429 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 3430 KVD_SINGLE_MIN_SIZE); 3431 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 3432 KVD_DOUBLE_MIN_SIZE); 3433 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3434 u32 linear_size_min = 0; 3435 3436 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, 3437 MLXSW_SP_KVD_GRANULARITY, 3438 DEVLINK_RESOURCE_UNIT_ENTRY); 3439 devlink_resource_size_params_init(linear_size_params, linear_size_min, 3440 kvd_size - single_size_min - 3441 double_size_min, 3442 MLXSW_SP_KVD_GRANULARITY, 3443 DEVLINK_RESOURCE_UNIT_ENTRY); 3444 devlink_resource_size_params_init(hash_double_size_params, 3445 double_size_min, 3446 kvd_size - single_size_min - 3447 linear_size_min, 3448 MLXSW_SP_KVD_GRANULARITY, 3449 DEVLINK_RESOURCE_UNIT_ENTRY); 3450 devlink_resource_size_params_init(hash_single_size_params, 3451 single_size_min, 3452 kvd_size - double_size_min - 3453 linear_size_min, 3454 MLXSW_SP_KVD_GRANULARITY, 3455 DEVLINK_RESOURCE_UNIT_ENTRY); 3456 } 3457 3458 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) 3459 { 3460 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3461 struct devlink_resource_size_params hash_single_size_params; 3462 struct devlink_resource_size_params hash_double_size_params; 3463 struct devlink_resource_size_params linear_size_params; 3464 struct devlink_resource_size_params kvd_size_params; 3465 u32 kvd_size, single_size, double_size, linear_size; 3466 const struct mlxsw_config_profile *profile; 3467 int err; 3468 3469 profile = &mlxsw_sp1_config_profile; 3470 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 3471 return -EIO; 3472 3473 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, 3474 &linear_size_params, 3475 &hash_double_size_params, 3476 &hash_single_size_params); 3477 3478 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3479 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 3480 kvd_size, MLXSW_SP_RESOURCE_KVD, 3481 DEVLINK_RESOURCE_ID_PARENT_TOP, 3482 &kvd_size_params); 3483 if (err) 3484 return err; 3485 3486 linear_size = profile->kvd_linear_size; 3487 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, 3488 linear_size, 3489 MLXSW_SP_RESOURCE_KVD_LINEAR, 3490 MLXSW_SP_RESOURCE_KVD, 3491 &linear_size_params); 3492 if (err) 3493 return err; 3494 3495 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); 3496 if (err) 3497 return err; 3498 3499 double_size = kvd_size - linear_size; 3500 double_size *= profile->kvd_hash_double_parts; 3501 double_size /= profile->kvd_hash_double_parts + 3502 profile->kvd_hash_single_parts; 3503 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY); 3504 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, 3505 double_size, 3506 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 3507 MLXSW_SP_RESOURCE_KVD, 3508 &hash_double_size_params); 3509 if (err) 3510 return err; 3511 3512 single_size = kvd_size - double_size - linear_size; 3513 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, 3514 single_size, 3515 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 3516 MLXSW_SP_RESOURCE_KVD, 3517 &hash_single_size_params); 3518 if (err) 3519 return err; 3520 3521 return 0; 3522 } 3523 3524 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core) 3525 { 3526 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3527 struct devlink_resource_size_params kvd_size_params; 3528 u32 kvd_size; 3529 3530 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 3531 return -EIO; 3532 3533 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3534 devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size, 3535 MLXSW_SP_KVD_GRANULARITY, 3536 DEVLINK_RESOURCE_UNIT_ENTRY); 3537 3538 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 3539 kvd_size, MLXSW_SP_RESOURCE_KVD, 3540 DEVLINK_RESOURCE_ID_PARENT_TOP, 3541 &kvd_size_params); 3542 } 3543 3544 static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core) 3545 { 3546 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3547 struct devlink_resource_size_params span_size_params; 3548 u32 max_span; 3549 3550 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN)) 3551 return -EIO; 3552 3553 max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN); 3554 devlink_resource_size_params_init(&span_size_params, max_span, max_span, 3555 1, DEVLINK_RESOURCE_UNIT_ENTRY); 3556 3557 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN, 3558 max_span, MLXSW_SP_RESOURCE_SPAN, 3559 DEVLINK_RESOURCE_ID_PARENT_TOP, 3560 &span_size_params); 3561 } 3562 3563 static int 3564 mlxsw_sp_resources_rif_mac_profile_register(struct mlxsw_core *mlxsw_core) 3565 { 3566 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3567 struct devlink_resource_size_params size_params; 3568 u8 max_rif_mac_profiles; 3569 3570 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIF_MAC_PROFILES)) 3571 max_rif_mac_profiles = 1; 3572 else 3573 max_rif_mac_profiles = MLXSW_CORE_RES_GET(mlxsw_core, 3574 MAX_RIF_MAC_PROFILES); 3575 devlink_resource_size_params_init(&size_params, max_rif_mac_profiles, 3576 max_rif_mac_profiles, 1, 3577 DEVLINK_RESOURCE_UNIT_ENTRY); 3578 3579 return devlink_resource_register(devlink, 3580 "rif_mac_profiles", 3581 max_rif_mac_profiles, 3582 MLXSW_SP_RESOURCE_RIF_MAC_PROFILES, 3583 DEVLINK_RESOURCE_ID_PARENT_TOP, 3584 &size_params); 3585 } 3586 3587 static int mlxsw_sp_resources_rifs_register(struct mlxsw_core *mlxsw_core) 3588 { 3589 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3590 struct devlink_resource_size_params size_params; 3591 u64 max_rifs; 3592 3593 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIFS)) 3594 return -EIO; 3595 3596 max_rifs = MLXSW_CORE_RES_GET(mlxsw_core, MAX_RIFS); 3597 devlink_resource_size_params_init(&size_params, max_rifs, max_rifs, 3598 1, DEVLINK_RESOURCE_UNIT_ENTRY); 3599 3600 return devlink_resource_register(devlink, "rifs", max_rifs, 3601 MLXSW_SP_RESOURCE_RIFS, 3602 DEVLINK_RESOURCE_ID_PARENT_TOP, 3603 &size_params); 3604 } 3605 3606 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) 3607 { 3608 int err; 3609 3610 err = mlxsw_sp1_resources_kvd_register(mlxsw_core); 3611 if (err) 3612 return err; 3613 3614 err = mlxsw_sp_resources_span_register(mlxsw_core); 3615 if (err) 3616 goto err_resources_span_register; 3617 3618 err = mlxsw_sp_counter_resources_register(mlxsw_core); 3619 if (err) 3620 goto err_resources_counter_register; 3621 3622 err = mlxsw_sp_policer_resources_register(mlxsw_core); 3623 if (err) 3624 goto err_policer_resources_register; 3625 3626 err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core); 3627 if (err) 3628 goto err_resources_rif_mac_profile_register; 3629 3630 err = mlxsw_sp_resources_rifs_register(mlxsw_core); 3631 if (err) 3632 goto err_resources_rifs_register; 3633 3634 return 0; 3635 3636 err_resources_rifs_register: 3637 err_resources_rif_mac_profile_register: 3638 err_policer_resources_register: 3639 err_resources_counter_register: 3640 err_resources_span_register: 3641 devlink_resources_unregister(priv_to_devlink(mlxsw_core)); 3642 return err; 3643 } 3644 3645 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) 3646 { 3647 int err; 3648 3649 err = mlxsw_sp2_resources_kvd_register(mlxsw_core); 3650 if (err) 3651 return err; 3652 3653 err = mlxsw_sp_resources_span_register(mlxsw_core); 3654 if (err) 3655 goto err_resources_span_register; 3656 3657 err = mlxsw_sp_counter_resources_register(mlxsw_core); 3658 if (err) 3659 goto err_resources_counter_register; 3660 3661 err = mlxsw_sp_policer_resources_register(mlxsw_core); 3662 if (err) 3663 goto err_policer_resources_register; 3664 3665 err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core); 3666 if (err) 3667 goto err_resources_rif_mac_profile_register; 3668 3669 err = mlxsw_sp_resources_rifs_register(mlxsw_core); 3670 if (err) 3671 goto err_resources_rifs_register; 3672 3673 return 0; 3674 3675 err_resources_rifs_register: 3676 err_resources_rif_mac_profile_register: 3677 err_policer_resources_register: 3678 err_resources_counter_register: 3679 err_resources_span_register: 3680 devlink_resources_unregister(priv_to_devlink(mlxsw_core)); 3681 return err; 3682 } 3683 3684 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 3685 const struct mlxsw_config_profile *profile, 3686 u64 *p_single_size, u64 *p_double_size, 3687 u64 *p_linear_size) 3688 { 3689 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3690 u32 double_size; 3691 int err; 3692 3693 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 3694 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) 3695 return -EIO; 3696 3697 /* The hash part is what left of the kvd without the 3698 * linear part. It is split to the single size and 3699 * double size by the parts ratio from the profile. 3700 * Both sizes must be a multiplications of the 3701 * granularity from the profile. In case the user 3702 * provided the sizes they are obtained via devlink. 3703 */ 3704 err = devlink_resource_size_get(devlink, 3705 MLXSW_SP_RESOURCE_KVD_LINEAR, 3706 p_linear_size); 3707 if (err) 3708 *p_linear_size = profile->kvd_linear_size; 3709 3710 err = devlink_resource_size_get(devlink, 3711 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 3712 p_double_size); 3713 if (err) { 3714 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 3715 *p_linear_size; 3716 double_size *= profile->kvd_hash_double_parts; 3717 double_size /= profile->kvd_hash_double_parts + 3718 profile->kvd_hash_single_parts; 3719 *p_double_size = rounddown(double_size, 3720 MLXSW_SP_KVD_GRANULARITY); 3721 } 3722 3723 err = devlink_resource_size_get(devlink, 3724 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 3725 p_single_size); 3726 if (err) 3727 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 3728 *p_double_size - *p_linear_size; 3729 3730 /* Check results are legal. */ 3731 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 3732 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || 3733 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size) 3734 return -EIO; 3735 3736 return 0; 3737 } 3738 3739 static int 3740 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id, 3741 struct devlink_param_gset_ctx *ctx) 3742 { 3743 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 3744 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3745 3746 ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp); 3747 return 0; 3748 } 3749 3750 static int 3751 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id, 3752 struct devlink_param_gset_ctx *ctx) 3753 { 3754 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 3755 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3756 3757 return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32); 3758 } 3759 3760 static const struct devlink_param mlxsw_sp2_devlink_params[] = { 3761 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 3762 "acl_region_rehash_interval", 3763 DEVLINK_PARAM_TYPE_U32, 3764 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 3765 mlxsw_sp_params_acl_region_rehash_intrvl_get, 3766 mlxsw_sp_params_acl_region_rehash_intrvl_set, 3767 NULL), 3768 }; 3769 3770 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core) 3771 { 3772 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3773 union devlink_param_value value; 3774 int err; 3775 3776 err = devlink_params_register(devlink, mlxsw_sp2_devlink_params, 3777 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 3778 if (err) 3779 return err; 3780 3781 value.vu32 = 0; 3782 devlink_param_driverinit_value_set(devlink, 3783 MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 3784 value); 3785 return 0; 3786 } 3787 3788 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core) 3789 { 3790 devlink_params_unregister(priv_to_devlink(mlxsw_core), 3791 mlxsw_sp2_devlink_params, 3792 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 3793 } 3794 3795 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core, 3796 struct sk_buff *skb, u16 local_port) 3797 { 3798 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3799 3800 skb_pull(skb, MLXSW_TXHDR_LEN); 3801 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port); 3802 } 3803 3804 static struct mlxsw_driver mlxsw_sp1_driver = { 3805 .kind = mlxsw_sp1_driver_name, 3806 .priv_size = sizeof(struct mlxsw_sp), 3807 .fw_req_rev = &mlxsw_sp1_fw_rev, 3808 .fw_filename = MLXSW_SP1_FW_FILENAME, 3809 .init = mlxsw_sp1_init, 3810 .fini = mlxsw_sp_fini, 3811 .port_split = mlxsw_sp_port_split, 3812 .port_unsplit = mlxsw_sp_port_unsplit, 3813 .sb_pool_get = mlxsw_sp_sb_pool_get, 3814 .sb_pool_set = mlxsw_sp_sb_pool_set, 3815 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3816 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3817 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3818 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3819 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3820 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3821 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3822 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3823 .trap_init = mlxsw_sp_trap_init, 3824 .trap_fini = mlxsw_sp_trap_fini, 3825 .trap_action_set = mlxsw_sp_trap_action_set, 3826 .trap_group_init = mlxsw_sp_trap_group_init, 3827 .trap_group_set = mlxsw_sp_trap_group_set, 3828 .trap_policer_init = mlxsw_sp_trap_policer_init, 3829 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3830 .trap_policer_set = mlxsw_sp_trap_policer_set, 3831 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3832 .txhdr_construct = mlxsw_sp_txhdr_construct, 3833 .resources_register = mlxsw_sp1_resources_register, 3834 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, 3835 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3836 .txhdr_len = MLXSW_TXHDR_LEN, 3837 .profile = &mlxsw_sp1_config_profile, 3838 }; 3839 3840 static struct mlxsw_driver mlxsw_sp2_driver = { 3841 .kind = mlxsw_sp2_driver_name, 3842 .priv_size = sizeof(struct mlxsw_sp), 3843 .fw_req_rev = &mlxsw_sp2_fw_rev, 3844 .fw_filename = MLXSW_SP2_FW_FILENAME, 3845 .init = mlxsw_sp2_init, 3846 .fini = mlxsw_sp_fini, 3847 .port_split = mlxsw_sp_port_split, 3848 .port_unsplit = mlxsw_sp_port_unsplit, 3849 .ports_remove_selected = mlxsw_sp_ports_remove_selected, 3850 .sb_pool_get = mlxsw_sp_sb_pool_get, 3851 .sb_pool_set = mlxsw_sp_sb_pool_set, 3852 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3853 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3854 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3855 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3856 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3857 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3858 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3859 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3860 .trap_init = mlxsw_sp_trap_init, 3861 .trap_fini = mlxsw_sp_trap_fini, 3862 .trap_action_set = mlxsw_sp_trap_action_set, 3863 .trap_group_init = mlxsw_sp_trap_group_init, 3864 .trap_group_set = mlxsw_sp_trap_group_set, 3865 .trap_policer_init = mlxsw_sp_trap_policer_init, 3866 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3867 .trap_policer_set = mlxsw_sp_trap_policer_set, 3868 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3869 .txhdr_construct = mlxsw_sp_txhdr_construct, 3870 .resources_register = mlxsw_sp2_resources_register, 3871 .params_register = mlxsw_sp2_params_register, 3872 .params_unregister = mlxsw_sp2_params_unregister, 3873 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3874 .txhdr_len = MLXSW_TXHDR_LEN, 3875 .profile = &mlxsw_sp2_config_profile, 3876 }; 3877 3878 static struct mlxsw_driver mlxsw_sp3_driver = { 3879 .kind = mlxsw_sp3_driver_name, 3880 .priv_size = sizeof(struct mlxsw_sp), 3881 .fw_req_rev = &mlxsw_sp3_fw_rev, 3882 .fw_filename = MLXSW_SP3_FW_FILENAME, 3883 .init = mlxsw_sp3_init, 3884 .fini = mlxsw_sp_fini, 3885 .port_split = mlxsw_sp_port_split, 3886 .port_unsplit = mlxsw_sp_port_unsplit, 3887 .ports_remove_selected = mlxsw_sp_ports_remove_selected, 3888 .sb_pool_get = mlxsw_sp_sb_pool_get, 3889 .sb_pool_set = mlxsw_sp_sb_pool_set, 3890 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3891 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3892 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3893 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3894 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3895 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3896 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3897 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3898 .trap_init = mlxsw_sp_trap_init, 3899 .trap_fini = mlxsw_sp_trap_fini, 3900 .trap_action_set = mlxsw_sp_trap_action_set, 3901 .trap_group_init = mlxsw_sp_trap_group_init, 3902 .trap_group_set = mlxsw_sp_trap_group_set, 3903 .trap_policer_init = mlxsw_sp_trap_policer_init, 3904 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3905 .trap_policer_set = mlxsw_sp_trap_policer_set, 3906 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3907 .txhdr_construct = mlxsw_sp_txhdr_construct, 3908 .resources_register = mlxsw_sp2_resources_register, 3909 .params_register = mlxsw_sp2_params_register, 3910 .params_unregister = mlxsw_sp2_params_unregister, 3911 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3912 .txhdr_len = MLXSW_TXHDR_LEN, 3913 .profile = &mlxsw_sp2_config_profile, 3914 }; 3915 3916 static struct mlxsw_driver mlxsw_sp4_driver = { 3917 .kind = mlxsw_sp4_driver_name, 3918 .priv_size = sizeof(struct mlxsw_sp), 3919 .init = mlxsw_sp4_init, 3920 .fini = mlxsw_sp_fini, 3921 .port_split = mlxsw_sp_port_split, 3922 .port_unsplit = mlxsw_sp_port_unsplit, 3923 .ports_remove_selected = mlxsw_sp_ports_remove_selected, 3924 .sb_pool_get = mlxsw_sp_sb_pool_get, 3925 .sb_pool_set = mlxsw_sp_sb_pool_set, 3926 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3927 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3928 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3929 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3930 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3931 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3932 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3933 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3934 .trap_init = mlxsw_sp_trap_init, 3935 .trap_fini = mlxsw_sp_trap_fini, 3936 .trap_action_set = mlxsw_sp_trap_action_set, 3937 .trap_group_init = mlxsw_sp_trap_group_init, 3938 .trap_group_set = mlxsw_sp_trap_group_set, 3939 .trap_policer_init = mlxsw_sp_trap_policer_init, 3940 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3941 .trap_policer_set = mlxsw_sp_trap_policer_set, 3942 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3943 .txhdr_construct = mlxsw_sp_txhdr_construct, 3944 .resources_register = mlxsw_sp2_resources_register, 3945 .params_register = mlxsw_sp2_params_register, 3946 .params_unregister = mlxsw_sp2_params_unregister, 3947 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3948 .txhdr_len = MLXSW_TXHDR_LEN, 3949 .profile = &mlxsw_sp2_config_profile, 3950 }; 3951 3952 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 3953 { 3954 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 3955 } 3956 3957 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, 3958 struct netdev_nested_priv *priv) 3959 { 3960 int ret = 0; 3961 3962 if (mlxsw_sp_port_dev_check(lower_dev)) { 3963 priv->data = (void *)netdev_priv(lower_dev); 3964 ret = 1; 3965 } 3966 3967 return ret; 3968 } 3969 3970 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 3971 { 3972 struct netdev_nested_priv priv = { 3973 .data = NULL, 3974 }; 3975 3976 if (mlxsw_sp_port_dev_check(dev)) 3977 return netdev_priv(dev); 3978 3979 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &priv); 3980 3981 return (struct mlxsw_sp_port *)priv.data; 3982 } 3983 3984 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 3985 { 3986 struct mlxsw_sp_port *mlxsw_sp_port; 3987 3988 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 3989 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 3990 } 3991 3992 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 3993 { 3994 struct netdev_nested_priv priv = { 3995 .data = NULL, 3996 }; 3997 3998 if (mlxsw_sp_port_dev_check(dev)) 3999 return netdev_priv(dev); 4000 4001 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 4002 &priv); 4003 4004 return (struct mlxsw_sp_port *)priv.data; 4005 } 4006 4007 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 4008 { 4009 struct mlxsw_sp_port *mlxsw_sp_port; 4010 4011 rcu_read_lock(); 4012 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 4013 if (mlxsw_sp_port) 4014 dev_hold(mlxsw_sp_port->dev); 4015 rcu_read_unlock(); 4016 return mlxsw_sp_port; 4017 } 4018 4019 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 4020 { 4021 dev_put(mlxsw_sp_port->dev); 4022 } 4023 4024 int mlxsw_sp_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp) 4025 { 4026 char mprs_pl[MLXSW_REG_MPRS_LEN]; 4027 int err = 0; 4028 4029 mutex_lock(&mlxsw_sp->parsing.lock); 4030 4031 if (refcount_inc_not_zero(&mlxsw_sp->parsing.parsing_depth_ref)) 4032 goto out_unlock; 4033 4034 mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_INCREASED_PARSING_DEPTH, 4035 mlxsw_sp->parsing.vxlan_udp_dport); 4036 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); 4037 if (err) 4038 goto out_unlock; 4039 4040 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_INCREASED_PARSING_DEPTH; 4041 refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 1); 4042 4043 out_unlock: 4044 mutex_unlock(&mlxsw_sp->parsing.lock); 4045 return err; 4046 } 4047 4048 void mlxsw_sp_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp) 4049 { 4050 char mprs_pl[MLXSW_REG_MPRS_LEN]; 4051 4052 mutex_lock(&mlxsw_sp->parsing.lock); 4053 4054 if (!refcount_dec_and_test(&mlxsw_sp->parsing.parsing_depth_ref)) 4055 goto out_unlock; 4056 4057 mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_DEFAULT_PARSING_DEPTH, 4058 mlxsw_sp->parsing.vxlan_udp_dport); 4059 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); 4060 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH; 4061 4062 out_unlock: 4063 mutex_unlock(&mlxsw_sp->parsing.lock); 4064 } 4065 4066 int mlxsw_sp_parsing_vxlan_udp_dport_set(struct mlxsw_sp *mlxsw_sp, 4067 __be16 udp_dport) 4068 { 4069 char mprs_pl[MLXSW_REG_MPRS_LEN]; 4070 int err; 4071 4072 mutex_lock(&mlxsw_sp->parsing.lock); 4073 4074 mlxsw_reg_mprs_pack(mprs_pl, mlxsw_sp->parsing.parsing_depth, 4075 be16_to_cpu(udp_dport)); 4076 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); 4077 if (err) 4078 goto out_unlock; 4079 4080 mlxsw_sp->parsing.vxlan_udp_dport = be16_to_cpu(udp_dport); 4081 4082 out_unlock: 4083 mutex_unlock(&mlxsw_sp->parsing.lock); 4084 return err; 4085 } 4086 4087 static void 4088 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port, 4089 struct net_device *lag_dev) 4090 { 4091 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev); 4092 struct net_device *upper_dev; 4093 struct list_head *iter; 4094 4095 if (netif_is_bridge_port(lag_dev)) 4096 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev); 4097 4098 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 4099 if (!netif_is_bridge_port(upper_dev)) 4100 continue; 4101 br_dev = netdev_master_upper_dev_get(upper_dev); 4102 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev); 4103 } 4104 } 4105 4106 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 4107 { 4108 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4109 4110 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 4111 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4112 } 4113 4114 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 4115 { 4116 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4117 4118 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 4119 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4120 } 4121 4122 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 4123 u16 lag_id, u8 port_index) 4124 { 4125 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4126 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4127 4128 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 4129 lag_id, port_index); 4130 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4131 } 4132 4133 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 4134 u16 lag_id) 4135 { 4136 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4137 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4138 4139 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 4140 lag_id); 4141 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4142 } 4143 4144 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 4145 u16 lag_id) 4146 { 4147 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4148 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4149 4150 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 4151 lag_id); 4152 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4153 } 4154 4155 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 4156 u16 lag_id) 4157 { 4158 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4159 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4160 4161 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 4162 lag_id); 4163 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4164 } 4165 4166 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 4167 struct net_device *lag_dev, 4168 u16 *p_lag_id) 4169 { 4170 struct mlxsw_sp_upper *lag; 4171 int free_lag_id = -1; 4172 u64 max_lag; 4173 int i; 4174 4175 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 4176 for (i = 0; i < max_lag; i++) { 4177 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 4178 if (lag->ref_count) { 4179 if (lag->dev == lag_dev) { 4180 *p_lag_id = i; 4181 return 0; 4182 } 4183 } else if (free_lag_id < 0) { 4184 free_lag_id = i; 4185 } 4186 } 4187 if (free_lag_id < 0) 4188 return -EBUSY; 4189 *p_lag_id = free_lag_id; 4190 return 0; 4191 } 4192 4193 static bool 4194 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 4195 struct net_device *lag_dev, 4196 struct netdev_lag_upper_info *lag_upper_info, 4197 struct netlink_ext_ack *extack) 4198 { 4199 u16 lag_id; 4200 4201 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 4202 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices"); 4203 return false; 4204 } 4205 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 4206 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 4207 return false; 4208 } 4209 return true; 4210 } 4211 4212 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 4213 u16 lag_id, u8 *p_port_index) 4214 { 4215 u64 max_lag_members; 4216 int i; 4217 4218 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 4219 MAX_LAG_MEMBERS); 4220 for (i = 0; i < max_lag_members; i++) { 4221 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 4222 *p_port_index = i; 4223 return 0; 4224 } 4225 } 4226 return -EBUSY; 4227 } 4228 4229 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 4230 struct net_device *lag_dev, 4231 struct netlink_ext_ack *extack) 4232 { 4233 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4234 struct mlxsw_sp_upper *lag; 4235 u16 lag_id; 4236 u8 port_index; 4237 int err; 4238 4239 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 4240 if (err) 4241 return err; 4242 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 4243 if (!lag->ref_count) { 4244 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 4245 if (err) 4246 return err; 4247 lag->dev = lag_dev; 4248 } 4249 4250 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 4251 if (err) 4252 return err; 4253 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 4254 if (err) 4255 goto err_col_port_add; 4256 4257 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 4258 mlxsw_sp_port->local_port); 4259 mlxsw_sp_port->lag_id = lag_id; 4260 mlxsw_sp_port->lagged = 1; 4261 lag->ref_count++; 4262 4263 /* Port is no longer usable as a router interface */ 4264 if (mlxsw_sp_port->default_vlan->fid) 4265 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); 4266 4267 /* Join a router interface configured on the LAG, if exists */ 4268 err = mlxsw_sp_port_vlan_router_join(mlxsw_sp_port->default_vlan, 4269 lag_dev, extack); 4270 if (err) 4271 goto err_router_join; 4272 4273 return 0; 4274 4275 err_router_join: 4276 lag->ref_count--; 4277 mlxsw_sp_port->lagged = 0; 4278 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 4279 mlxsw_sp_port->local_port); 4280 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4281 err_col_port_add: 4282 if (!lag->ref_count) 4283 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 4284 return err; 4285 } 4286 4287 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 4288 struct net_device *lag_dev) 4289 { 4290 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4291 u16 lag_id = mlxsw_sp_port->lag_id; 4292 struct mlxsw_sp_upper *lag; 4293 4294 if (!mlxsw_sp_port->lagged) 4295 return; 4296 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 4297 WARN_ON(lag->ref_count == 0); 4298 4299 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4300 4301 /* Any VLANs configured on the port are no longer valid */ 4302 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false); 4303 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan); 4304 /* Make the LAG and its directly linked uppers leave bridges they 4305 * are memeber in 4306 */ 4307 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev); 4308 4309 if (lag->ref_count == 1) 4310 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 4311 4312 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 4313 mlxsw_sp_port->local_port); 4314 mlxsw_sp_port->lagged = 0; 4315 lag->ref_count--; 4316 4317 /* Make sure untagged frames are allowed to ingress */ 4318 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID, 4319 ETH_P_8021Q); 4320 } 4321 4322 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 4323 u16 lag_id) 4324 { 4325 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4326 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4327 4328 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 4329 mlxsw_sp_port->local_port); 4330 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4331 } 4332 4333 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 4334 u16 lag_id) 4335 { 4336 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4337 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4338 4339 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 4340 mlxsw_sp_port->local_port); 4341 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4342 } 4343 4344 static int 4345 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port) 4346 { 4347 int err; 4348 4349 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, 4350 mlxsw_sp_port->lag_id); 4351 if (err) 4352 return err; 4353 4354 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 4355 if (err) 4356 goto err_dist_port_add; 4357 4358 return 0; 4359 4360 err_dist_port_add: 4361 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id); 4362 return err; 4363 } 4364 4365 static int 4366 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port) 4367 { 4368 int err; 4369 4370 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 4371 mlxsw_sp_port->lag_id); 4372 if (err) 4373 return err; 4374 4375 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, 4376 mlxsw_sp_port->lag_id); 4377 if (err) 4378 goto err_col_port_disable; 4379 4380 return 0; 4381 4382 err_col_port_disable: 4383 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 4384 return err; 4385 } 4386 4387 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 4388 struct netdev_lag_lower_state_info *info) 4389 { 4390 if (info->tx_enabled) 4391 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port); 4392 else 4393 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 4394 } 4395 4396 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 4397 bool enable) 4398 { 4399 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4400 enum mlxsw_reg_spms_state spms_state; 4401 char *spms_pl; 4402 u16 vid; 4403 int err; 4404 4405 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 4406 MLXSW_REG_SPMS_STATE_DISCARDING; 4407 4408 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 4409 if (!spms_pl) 4410 return -ENOMEM; 4411 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 4412 4413 for (vid = 0; vid < VLAN_N_VID; vid++) 4414 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 4415 4416 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 4417 kfree(spms_pl); 4418 return err; 4419 } 4420 4421 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 4422 { 4423 u16 vid = 1; 4424 int err; 4425 4426 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 4427 if (err) 4428 return err; 4429 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 4430 if (err) 4431 goto err_port_stp_set; 4432 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 4433 true, false); 4434 if (err) 4435 goto err_port_vlan_set; 4436 4437 for (; vid <= VLAN_N_VID - 1; vid++) { 4438 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4439 vid, false); 4440 if (err) 4441 goto err_vid_learning_set; 4442 } 4443 4444 return 0; 4445 4446 err_vid_learning_set: 4447 for (vid--; vid >= 1; vid--) 4448 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 4449 err_port_vlan_set: 4450 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4451 err_port_stp_set: 4452 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4453 return err; 4454 } 4455 4456 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 4457 { 4458 u16 vid; 4459 4460 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 4461 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4462 vid, true); 4463 4464 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 4465 false, false); 4466 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4467 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4468 } 4469 4470 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev) 4471 { 4472 unsigned int num_vxlans = 0; 4473 struct net_device *dev; 4474 struct list_head *iter; 4475 4476 netdev_for_each_lower_dev(br_dev, dev, iter) { 4477 if (netif_is_vxlan(dev)) 4478 num_vxlans++; 4479 } 4480 4481 return num_vxlans > 1; 4482 } 4483 4484 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev) 4485 { 4486 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0}; 4487 struct net_device *dev; 4488 struct list_head *iter; 4489 4490 netdev_for_each_lower_dev(br_dev, dev, iter) { 4491 u16 pvid; 4492 int err; 4493 4494 if (!netif_is_vxlan(dev)) 4495 continue; 4496 4497 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid); 4498 if (err || !pvid) 4499 continue; 4500 4501 if (test_and_set_bit(pvid, vlans)) 4502 return false; 4503 } 4504 4505 return true; 4506 } 4507 4508 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev, 4509 struct netlink_ext_ack *extack) 4510 { 4511 if (br_multicast_enabled(br_dev)) { 4512 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device"); 4513 return false; 4514 } 4515 4516 if (!br_vlan_enabled(br_dev) && 4517 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) { 4518 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge"); 4519 return false; 4520 } 4521 4522 if (br_vlan_enabled(br_dev) && 4523 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) { 4524 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged"); 4525 return false; 4526 } 4527 4528 return true; 4529 } 4530 4531 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 4532 struct net_device *dev, 4533 unsigned long event, void *ptr) 4534 { 4535 struct netdev_notifier_changeupper_info *info; 4536 struct mlxsw_sp_port *mlxsw_sp_port; 4537 struct netlink_ext_ack *extack; 4538 struct net_device *upper_dev; 4539 struct mlxsw_sp *mlxsw_sp; 4540 int err = 0; 4541 u16 proto; 4542 4543 mlxsw_sp_port = netdev_priv(dev); 4544 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4545 info = ptr; 4546 extack = netdev_notifier_info_to_extack(&info->info); 4547 4548 switch (event) { 4549 case NETDEV_PRECHANGEUPPER: 4550 upper_dev = info->upper_dev; 4551 if (!is_vlan_dev(upper_dev) && 4552 !netif_is_lag_master(upper_dev) && 4553 !netif_is_bridge_master(upper_dev) && 4554 !netif_is_ovs_master(upper_dev) && 4555 !netif_is_macvlan(upper_dev) && 4556 !netif_is_l3_master(upper_dev)) { 4557 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4558 return -EINVAL; 4559 } 4560 if (!info->linking) 4561 break; 4562 if (netif_is_bridge_master(upper_dev) && 4563 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 4564 mlxsw_sp_bridge_has_vxlan(upper_dev) && 4565 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 4566 return -EOPNOTSUPP; 4567 if (netdev_has_any_upper_dev(upper_dev) && 4568 (!netif_is_bridge_master(upper_dev) || 4569 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 4570 upper_dev))) { 4571 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 4572 return -EINVAL; 4573 } 4574 if (netif_is_lag_master(upper_dev) && 4575 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 4576 info->upper_info, extack)) 4577 return -EINVAL; 4578 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 4579 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); 4580 return -EINVAL; 4581 } 4582 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 4583 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 4584 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); 4585 return -EINVAL; 4586 } 4587 if (netif_is_macvlan(upper_dev) && 4588 !mlxsw_sp_rif_exists(mlxsw_sp, lower_dev)) { 4589 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4590 return -EOPNOTSUPP; 4591 } 4592 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 4593 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); 4594 return -EINVAL; 4595 } 4596 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 4597 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 4598 return -EINVAL; 4599 } 4600 if (netif_is_bridge_master(upper_dev)) { 4601 br_vlan_get_proto(upper_dev, &proto); 4602 if (br_vlan_enabled(upper_dev) && 4603 proto != ETH_P_8021Q && proto != ETH_P_8021AD) { 4604 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a bridge with unknown VLAN protocol is not supported"); 4605 return -EOPNOTSUPP; 4606 } 4607 if (vlan_uses_dev(lower_dev) && 4608 br_vlan_enabled(upper_dev) && 4609 proto == ETH_P_8021AD) { 4610 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port that already has a VLAN upper to an 802.1ad bridge is not supported"); 4611 return -EOPNOTSUPP; 4612 } 4613 } 4614 if (netif_is_bridge_port(lower_dev) && is_vlan_dev(upper_dev)) { 4615 struct net_device *br_dev = netdev_master_upper_dev_get(lower_dev); 4616 4617 if (br_vlan_enabled(br_dev)) { 4618 br_vlan_get_proto(br_dev, &proto); 4619 if (proto == ETH_P_8021AD) { 4620 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a port enslaved to an 802.1ad bridge"); 4621 return -EOPNOTSUPP; 4622 } 4623 } 4624 } 4625 if (is_vlan_dev(upper_dev) && 4626 ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) { 4627 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol"); 4628 return -EOPNOTSUPP; 4629 } 4630 break; 4631 case NETDEV_CHANGEUPPER: 4632 upper_dev = info->upper_dev; 4633 if (netif_is_bridge_master(upper_dev)) { 4634 if (info->linking) 4635 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4636 lower_dev, 4637 upper_dev, 4638 extack); 4639 else 4640 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4641 lower_dev, 4642 upper_dev); 4643 } else if (netif_is_lag_master(upper_dev)) { 4644 if (info->linking) { 4645 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 4646 upper_dev, extack); 4647 } else { 4648 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 4649 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 4650 upper_dev); 4651 } 4652 } else if (netif_is_ovs_master(upper_dev)) { 4653 if (info->linking) 4654 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 4655 else 4656 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 4657 } else if (netif_is_macvlan(upper_dev)) { 4658 if (!info->linking) 4659 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4660 } else if (is_vlan_dev(upper_dev)) { 4661 struct net_device *br_dev; 4662 4663 if (!netif_is_bridge_port(upper_dev)) 4664 break; 4665 if (info->linking) 4666 break; 4667 br_dev = netdev_master_upper_dev_get(upper_dev); 4668 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, 4669 br_dev); 4670 } 4671 break; 4672 } 4673 4674 return err; 4675 } 4676 4677 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 4678 unsigned long event, void *ptr) 4679 { 4680 struct netdev_notifier_changelowerstate_info *info; 4681 struct mlxsw_sp_port *mlxsw_sp_port; 4682 int err; 4683 4684 mlxsw_sp_port = netdev_priv(dev); 4685 info = ptr; 4686 4687 switch (event) { 4688 case NETDEV_CHANGELOWERSTATE: 4689 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 4690 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 4691 info->lower_state_info); 4692 if (err) 4693 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 4694 } 4695 break; 4696 } 4697 4698 return 0; 4699 } 4700 4701 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 4702 struct net_device *port_dev, 4703 unsigned long event, void *ptr) 4704 { 4705 switch (event) { 4706 case NETDEV_PRECHANGEUPPER: 4707 case NETDEV_CHANGEUPPER: 4708 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 4709 event, ptr); 4710 case NETDEV_CHANGELOWERSTATE: 4711 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 4712 ptr); 4713 } 4714 4715 return 0; 4716 } 4717 4718 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 4719 unsigned long event, void *ptr) 4720 { 4721 struct net_device *dev; 4722 struct list_head *iter; 4723 int ret; 4724 4725 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4726 if (mlxsw_sp_port_dev_check(dev)) { 4727 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 4728 ptr); 4729 if (ret) 4730 return ret; 4731 } 4732 } 4733 4734 return 0; 4735 } 4736 4737 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 4738 struct net_device *dev, 4739 unsigned long event, void *ptr, 4740 u16 vid) 4741 { 4742 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 4743 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4744 struct netdev_notifier_changeupper_info *info = ptr; 4745 struct netlink_ext_ack *extack; 4746 struct net_device *upper_dev; 4747 int err = 0; 4748 4749 extack = netdev_notifier_info_to_extack(&info->info); 4750 4751 switch (event) { 4752 case NETDEV_PRECHANGEUPPER: 4753 upper_dev = info->upper_dev; 4754 if (!netif_is_bridge_master(upper_dev) && 4755 !netif_is_macvlan(upper_dev) && 4756 !netif_is_l3_master(upper_dev)) { 4757 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4758 return -EINVAL; 4759 } 4760 if (!info->linking) 4761 break; 4762 if (netif_is_bridge_master(upper_dev) && 4763 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 4764 mlxsw_sp_bridge_has_vxlan(upper_dev) && 4765 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 4766 return -EOPNOTSUPP; 4767 if (netdev_has_any_upper_dev(upper_dev) && 4768 (!netif_is_bridge_master(upper_dev) || 4769 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 4770 upper_dev))) { 4771 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 4772 return -EINVAL; 4773 } 4774 if (netif_is_macvlan(upper_dev) && 4775 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) { 4776 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4777 return -EOPNOTSUPP; 4778 } 4779 break; 4780 case NETDEV_CHANGEUPPER: 4781 upper_dev = info->upper_dev; 4782 if (netif_is_bridge_master(upper_dev)) { 4783 if (info->linking) 4784 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4785 vlan_dev, 4786 upper_dev, 4787 extack); 4788 else 4789 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4790 vlan_dev, 4791 upper_dev); 4792 } else if (netif_is_macvlan(upper_dev)) { 4793 if (!info->linking) 4794 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4795 } 4796 break; 4797 } 4798 4799 return err; 4800 } 4801 4802 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 4803 struct net_device *lag_dev, 4804 unsigned long event, 4805 void *ptr, u16 vid) 4806 { 4807 struct net_device *dev; 4808 struct list_head *iter; 4809 int ret; 4810 4811 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4812 if (mlxsw_sp_port_dev_check(dev)) { 4813 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 4814 event, ptr, 4815 vid); 4816 if (ret) 4817 return ret; 4818 } 4819 } 4820 4821 return 0; 4822 } 4823 4824 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev, 4825 struct net_device *br_dev, 4826 unsigned long event, void *ptr, 4827 u16 vid) 4828 { 4829 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); 4830 struct netdev_notifier_changeupper_info *info = ptr; 4831 struct netlink_ext_ack *extack; 4832 struct net_device *upper_dev; 4833 4834 if (!mlxsw_sp) 4835 return 0; 4836 4837 extack = netdev_notifier_info_to_extack(&info->info); 4838 4839 switch (event) { 4840 case NETDEV_PRECHANGEUPPER: 4841 upper_dev = info->upper_dev; 4842 if (!netif_is_macvlan(upper_dev) && 4843 !netif_is_l3_master(upper_dev)) { 4844 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4845 return -EOPNOTSUPP; 4846 } 4847 if (!info->linking) 4848 break; 4849 if (netif_is_macvlan(upper_dev) && 4850 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) { 4851 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4852 return -EOPNOTSUPP; 4853 } 4854 break; 4855 case NETDEV_CHANGEUPPER: 4856 upper_dev = info->upper_dev; 4857 if (info->linking) 4858 break; 4859 if (netif_is_macvlan(upper_dev)) 4860 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4861 break; 4862 } 4863 4864 return 0; 4865 } 4866 4867 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 4868 unsigned long event, void *ptr) 4869 { 4870 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 4871 u16 vid = vlan_dev_vlan_id(vlan_dev); 4872 4873 if (mlxsw_sp_port_dev_check(real_dev)) 4874 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 4875 event, ptr, vid); 4876 else if (netif_is_lag_master(real_dev)) 4877 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 4878 real_dev, event, 4879 ptr, vid); 4880 else if (netif_is_bridge_master(real_dev)) 4881 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev, 4882 event, ptr, vid); 4883 4884 return 0; 4885 } 4886 4887 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, 4888 unsigned long event, void *ptr) 4889 { 4890 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev); 4891 struct netdev_notifier_changeupper_info *info = ptr; 4892 struct netlink_ext_ack *extack; 4893 struct net_device *upper_dev; 4894 u16 proto; 4895 4896 if (!mlxsw_sp) 4897 return 0; 4898 4899 extack = netdev_notifier_info_to_extack(&info->info); 4900 4901 switch (event) { 4902 case NETDEV_PRECHANGEUPPER: 4903 upper_dev = info->upper_dev; 4904 if (!is_vlan_dev(upper_dev) && 4905 !netif_is_macvlan(upper_dev) && 4906 !netif_is_l3_master(upper_dev)) { 4907 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4908 return -EOPNOTSUPP; 4909 } 4910 if (!info->linking) 4911 break; 4912 if (br_vlan_enabled(br_dev)) { 4913 br_vlan_get_proto(br_dev, &proto); 4914 if (proto == ETH_P_8021AD) { 4915 NL_SET_ERR_MSG_MOD(extack, "Upper devices are not supported on top of an 802.1ad bridge"); 4916 return -EOPNOTSUPP; 4917 } 4918 } 4919 if (is_vlan_dev(upper_dev) && 4920 ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) { 4921 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol"); 4922 return -EOPNOTSUPP; 4923 } 4924 if (netif_is_macvlan(upper_dev) && 4925 !mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) { 4926 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4927 return -EOPNOTSUPP; 4928 } 4929 break; 4930 case NETDEV_CHANGEUPPER: 4931 upper_dev = info->upper_dev; 4932 if (info->linking) 4933 break; 4934 if (is_vlan_dev(upper_dev)) 4935 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev); 4936 if (netif_is_macvlan(upper_dev)) 4937 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4938 break; 4939 } 4940 4941 return 0; 4942 } 4943 4944 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, 4945 unsigned long event, void *ptr) 4946 { 4947 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); 4948 struct netdev_notifier_changeupper_info *info = ptr; 4949 struct netlink_ext_ack *extack; 4950 struct net_device *upper_dev; 4951 4952 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) 4953 return 0; 4954 4955 extack = netdev_notifier_info_to_extack(&info->info); 4956 upper_dev = info->upper_dev; 4957 4958 if (!netif_is_l3_master(upper_dev)) { 4959 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4960 return -EOPNOTSUPP; 4961 } 4962 4963 return 0; 4964 } 4965 4966 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, 4967 struct net_device *dev, 4968 unsigned long event, void *ptr) 4969 { 4970 struct netdev_notifier_changeupper_info *cu_info; 4971 struct netdev_notifier_info *info = ptr; 4972 struct netlink_ext_ack *extack; 4973 struct net_device *upper_dev; 4974 4975 extack = netdev_notifier_info_to_extack(info); 4976 4977 switch (event) { 4978 case NETDEV_CHANGEUPPER: 4979 cu_info = container_of(info, 4980 struct netdev_notifier_changeupper_info, 4981 info); 4982 upper_dev = cu_info->upper_dev; 4983 if (!netif_is_bridge_master(upper_dev)) 4984 return 0; 4985 if (!mlxsw_sp_lower_get(upper_dev)) 4986 return 0; 4987 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 4988 return -EOPNOTSUPP; 4989 if (cu_info->linking) { 4990 if (!netif_running(dev)) 4991 return 0; 4992 /* When the bridge is VLAN-aware, the VNI of the VxLAN 4993 * device needs to be mapped to a VLAN, but at this 4994 * point no VLANs are configured on the VxLAN device 4995 */ 4996 if (br_vlan_enabled(upper_dev)) 4997 return 0; 4998 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, 4999 dev, 0, extack); 5000 } else { 5001 /* VLANs were already flushed, which triggered the 5002 * necessary cleanup 5003 */ 5004 if (br_vlan_enabled(upper_dev)) 5005 return 0; 5006 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 5007 } 5008 break; 5009 case NETDEV_PRE_UP: 5010 upper_dev = netdev_master_upper_dev_get(dev); 5011 if (!upper_dev) 5012 return 0; 5013 if (!netif_is_bridge_master(upper_dev)) 5014 return 0; 5015 if (!mlxsw_sp_lower_get(upper_dev)) 5016 return 0; 5017 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0, 5018 extack); 5019 case NETDEV_DOWN: 5020 upper_dev = netdev_master_upper_dev_get(dev); 5021 if (!upper_dev) 5022 return 0; 5023 if (!netif_is_bridge_master(upper_dev)) 5024 return 0; 5025 if (!mlxsw_sp_lower_get(upper_dev)) 5026 return 0; 5027 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 5028 break; 5029 } 5030 5031 return 0; 5032 } 5033 5034 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 5035 unsigned long event, void *ptr) 5036 { 5037 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 5038 struct mlxsw_sp_span_entry *span_entry; 5039 struct mlxsw_sp *mlxsw_sp; 5040 int err = 0; 5041 5042 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 5043 if (event == NETDEV_UNREGISTER) { 5044 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); 5045 if (span_entry) 5046 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); 5047 } 5048 mlxsw_sp_span_respin(mlxsw_sp); 5049 5050 if (netif_is_vxlan(dev)) 5051 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr); 5052 else if (mlxsw_sp_port_dev_check(dev)) 5053 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 5054 else if (netif_is_lag_master(dev)) 5055 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 5056 else if (is_vlan_dev(dev)) 5057 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 5058 else if (netif_is_bridge_master(dev)) 5059 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); 5060 else if (netif_is_macvlan(dev)) 5061 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); 5062 5063 return notifier_from_errno(err); 5064 } 5065 5066 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = { 5067 .notifier_call = mlxsw_sp_inetaddr_valid_event, 5068 }; 5069 5070 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = { 5071 .notifier_call = mlxsw_sp_inet6addr_valid_event, 5072 }; 5073 5074 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { 5075 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 5076 {0, }, 5077 }; 5078 5079 static struct pci_driver mlxsw_sp1_pci_driver = { 5080 .name = mlxsw_sp1_driver_name, 5081 .id_table = mlxsw_sp1_pci_id_table, 5082 }; 5083 5084 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { 5085 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, 5086 {0, }, 5087 }; 5088 5089 static struct pci_driver mlxsw_sp2_pci_driver = { 5090 .name = mlxsw_sp2_driver_name, 5091 .id_table = mlxsw_sp2_pci_id_table, 5092 }; 5093 5094 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = { 5095 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0}, 5096 {0, }, 5097 }; 5098 5099 static struct pci_driver mlxsw_sp3_pci_driver = { 5100 .name = mlxsw_sp3_driver_name, 5101 .id_table = mlxsw_sp3_pci_id_table, 5102 }; 5103 5104 static const struct pci_device_id mlxsw_sp4_pci_id_table[] = { 5105 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM4), 0}, 5106 {0, }, 5107 }; 5108 5109 static struct pci_driver mlxsw_sp4_pci_driver = { 5110 .name = mlxsw_sp4_driver_name, 5111 .id_table = mlxsw_sp4_pci_id_table, 5112 }; 5113 5114 static int __init mlxsw_sp_module_init(void) 5115 { 5116 int err; 5117 5118 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 5119 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 5120 5121 err = mlxsw_core_driver_register(&mlxsw_sp1_driver); 5122 if (err) 5123 goto err_sp1_core_driver_register; 5124 5125 err = mlxsw_core_driver_register(&mlxsw_sp2_driver); 5126 if (err) 5127 goto err_sp2_core_driver_register; 5128 5129 err = mlxsw_core_driver_register(&mlxsw_sp3_driver); 5130 if (err) 5131 goto err_sp3_core_driver_register; 5132 5133 err = mlxsw_core_driver_register(&mlxsw_sp4_driver); 5134 if (err) 5135 goto err_sp4_core_driver_register; 5136 5137 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); 5138 if (err) 5139 goto err_sp1_pci_driver_register; 5140 5141 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); 5142 if (err) 5143 goto err_sp2_pci_driver_register; 5144 5145 err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver); 5146 if (err) 5147 goto err_sp3_pci_driver_register; 5148 5149 err = mlxsw_pci_driver_register(&mlxsw_sp4_pci_driver); 5150 if (err) 5151 goto err_sp4_pci_driver_register; 5152 5153 return 0; 5154 5155 err_sp4_pci_driver_register: 5156 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver); 5157 err_sp3_pci_driver_register: 5158 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 5159 err_sp2_pci_driver_register: 5160 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 5161 err_sp1_pci_driver_register: 5162 mlxsw_core_driver_unregister(&mlxsw_sp4_driver); 5163 err_sp4_core_driver_register: 5164 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 5165 err_sp3_core_driver_register: 5166 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 5167 err_sp2_core_driver_register: 5168 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 5169 err_sp1_core_driver_register: 5170 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 5171 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 5172 return err; 5173 } 5174 5175 static void __exit mlxsw_sp_module_exit(void) 5176 { 5177 mlxsw_pci_driver_unregister(&mlxsw_sp4_pci_driver); 5178 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver); 5179 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 5180 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 5181 mlxsw_core_driver_unregister(&mlxsw_sp4_driver); 5182 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 5183 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 5184 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 5185 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 5186 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 5187 } 5188 5189 module_init(mlxsw_sp_module_init); 5190 module_exit(mlxsw_sp_module_exit); 5191 5192 MODULE_LICENSE("Dual BSD/GPL"); 5193 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 5194 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 5195 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); 5196 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); 5197 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table); 5198 MODULE_DEVICE_TABLE(pci, mlxsw_sp4_pci_id_table); 5199 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); 5200 MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME); 5201 MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME); 5202 MODULE_FIRMWARE(MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME); 5203