1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/types.h> 7 #include <linux/pci.h> 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/ethtool.h> 11 #include <linux/slab.h> 12 #include <linux/device.h> 13 #include <linux/skbuff.h> 14 #include <linux/if_vlan.h> 15 #include <linux/if_bridge.h> 16 #include <linux/workqueue.h> 17 #include <linux/jiffies.h> 18 #include <linux/bitops.h> 19 #include <linux/list.h> 20 #include <linux/notifier.h> 21 #include <linux/dcbnl.h> 22 #include <linux/inetdevice.h> 23 #include <linux/netlink.h> 24 #include <linux/jhash.h> 25 #include <linux/log2.h> 26 #include <linux/refcount.h> 27 #include <linux/rhashtable.h> 28 #include <net/switchdev.h> 29 #include <net/pkt_cls.h> 30 #include <net/netevent.h> 31 #include <net/addrconf.h> 32 #include <linux/ptp_classify.h> 33 34 #include "spectrum.h" 35 #include "pci.h" 36 #include "core.h" 37 #include "core_env.h" 38 #include "reg.h" 39 #include "port.h" 40 #include "trap.h" 41 #include "txheader.h" 42 #include "spectrum_cnt.h" 43 #include "spectrum_dpipe.h" 44 #include "spectrum_acl_flex_actions.h" 45 #include "spectrum_span.h" 46 #include "spectrum_ptp.h" 47 #include "spectrum_trap.h" 48 49 #define MLXSW_SP_FWREV_MINOR 2010 50 #define MLXSW_SP_FWREV_SUBMINOR 1006 51 52 #define MLXSW_SP1_FWREV_MAJOR 13 53 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 54 55 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { 56 .major = MLXSW_SP1_FWREV_MAJOR, 57 .minor = MLXSW_SP_FWREV_MINOR, 58 .subminor = MLXSW_SP_FWREV_SUBMINOR, 59 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR, 60 }; 61 62 #define MLXSW_SP1_FW_FILENAME \ 63 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ 64 "." __stringify(MLXSW_SP_FWREV_MINOR) \ 65 "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2" 66 67 #define MLXSW_SP2_FWREV_MAJOR 29 68 69 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { 70 .major = MLXSW_SP2_FWREV_MAJOR, 71 .minor = MLXSW_SP_FWREV_MINOR, 72 .subminor = MLXSW_SP_FWREV_SUBMINOR, 73 }; 74 75 #define MLXSW_SP2_FW_FILENAME \ 76 "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \ 77 "." __stringify(MLXSW_SP_FWREV_MINOR) \ 78 "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2" 79 80 #define MLXSW_SP3_FWREV_MAJOR 30 81 82 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = { 83 .major = MLXSW_SP3_FWREV_MAJOR, 84 .minor = MLXSW_SP_FWREV_MINOR, 85 .subminor = MLXSW_SP_FWREV_SUBMINOR, 86 }; 87 88 #define MLXSW_SP3_FW_FILENAME \ 89 "mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \ 90 "." __stringify(MLXSW_SP_FWREV_MINOR) \ 91 "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2" 92 93 #define MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME \ 94 "mellanox/lc_ini_bundle_" \ 95 __stringify(MLXSW_SP_FWREV_MINOR) "_" \ 96 __stringify(MLXSW_SP_FWREV_SUBMINOR) ".bin" 97 98 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; 99 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; 100 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3"; 101 static const char mlxsw_sp4_driver_name[] = "mlxsw_spectrum4"; 102 103 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = { 104 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00 105 }; 106 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = { 107 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00 108 }; 109 110 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 111 unsigned int counter_index, bool clear, 112 u64 *packets, u64 *bytes) 113 { 114 enum mlxsw_reg_mgpc_opcode op = clear ? MLXSW_REG_MGPC_OPCODE_CLEAR : 115 MLXSW_REG_MGPC_OPCODE_NOP; 116 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 117 int err; 118 119 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, op, 120 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 121 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 122 if (err) 123 return err; 124 if (packets) 125 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 126 if (bytes) 127 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 128 return 0; 129 } 130 131 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 132 unsigned int counter_index) 133 { 134 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 135 136 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 137 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 138 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 139 } 140 141 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 142 unsigned int *p_counter_index) 143 { 144 int err; 145 146 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 147 p_counter_index); 148 if (err) 149 return err; 150 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 151 if (err) 152 goto err_counter_clear; 153 return 0; 154 155 err_counter_clear: 156 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 157 *p_counter_index); 158 return err; 159 } 160 161 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 162 unsigned int counter_index) 163 { 164 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 165 counter_index); 166 } 167 168 static bool mlxsw_sp_skb_requires_ts(struct sk_buff *skb) 169 { 170 unsigned int type; 171 172 if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) 173 return false; 174 175 type = ptp_classify_raw(skb); 176 return !!ptp_parse_header(skb, type); 177 } 178 179 static void mlxsw_sp_txhdr_info_data_init(struct mlxsw_core *mlxsw_core, 180 struct sk_buff *skb, 181 struct mlxsw_txhdr_info *txhdr_info) 182 { 183 /* Resource validation was done as part of PTP init. */ 184 u16 max_fid = MLXSW_CORE_RES_GET(mlxsw_core, FID); 185 186 txhdr_info->data = true; 187 txhdr_info->max_fid = max_fid; 188 } 189 190 static struct sk_buff * 191 mlxsw_sp_vlan_tag_push(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb) 192 { 193 /* In some Spectrum ASICs, in order for PTP event packets to have their 194 * correction field correctly set on the egress port they must be 195 * transmitted as data packets. Such packets ingress the ASIC via the 196 * CPU port and must have a VLAN tag, as the CPU port is not configured 197 * with a PVID. Push the default VLAN (4095), which is configured as 198 * egress untagged on all the ports. 199 */ 200 if (skb_vlan_tagged(skb)) 201 return skb; 202 203 return vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q), 204 MLXSW_SP_DEFAULT_VID); 205 } 206 207 static struct sk_buff * 208 mlxsw_sp_txhdr_preparations(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 209 struct mlxsw_txhdr_info *txhdr_info) 210 { 211 if (likely(!mlxsw_sp_skb_requires_ts(skb))) 212 return skb; 213 214 if (!mlxsw_sp->ptp_ops->tx_as_data) 215 return skb; 216 217 /* Special handling for PTP events that require a time stamp and cannot 218 * be transmitted as regular control packets. 219 */ 220 mlxsw_sp_txhdr_info_data_init(mlxsw_sp->core, skb, txhdr_info); 221 return mlxsw_sp_vlan_tag_push(mlxsw_sp, skb); 222 } 223 224 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) 225 { 226 switch (state) { 227 case BR_STATE_FORWARDING: 228 return MLXSW_REG_SPMS_STATE_FORWARDING; 229 case BR_STATE_LEARNING: 230 return MLXSW_REG_SPMS_STATE_LEARNING; 231 case BR_STATE_LISTENING: 232 case BR_STATE_DISABLED: 233 case BR_STATE_BLOCKING: 234 return MLXSW_REG_SPMS_STATE_DISCARDING; 235 default: 236 BUG(); 237 } 238 } 239 240 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 241 u8 state) 242 { 243 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state); 244 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 245 char *spms_pl; 246 int err; 247 248 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 249 if (!spms_pl) 250 return -ENOMEM; 251 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 252 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 253 254 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 255 kfree(spms_pl); 256 return err; 257 } 258 259 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 260 { 261 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 262 int err; 263 264 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 265 if (err) 266 return err; 267 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 268 return 0; 269 } 270 271 int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 272 bool is_up) 273 { 274 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 275 char paos_pl[MLXSW_REG_PAOS_LEN]; 276 277 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 278 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 279 MLXSW_PORT_ADMIN_STATUS_DOWN); 280 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 281 } 282 283 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 284 const unsigned char *addr) 285 { 286 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 287 char ppad_pl[MLXSW_REG_PPAD_LEN]; 288 289 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 290 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 291 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 292 } 293 294 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 295 { 296 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 297 298 eth_hw_addr_gen(mlxsw_sp_port->dev, mlxsw_sp->base_mac, 299 mlxsw_sp_port->local_port); 300 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, 301 mlxsw_sp_port->dev->dev_addr); 302 } 303 304 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 305 { 306 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 307 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 308 309 mtu += MLXSW_PORT_ETH_FRAME_HDR; 310 311 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 312 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 313 } 314 315 static int mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, 316 u16 local_port, u8 swid) 317 { 318 char pspa_pl[MLXSW_REG_PSPA_LEN]; 319 320 mlxsw_reg_pspa_pack(pspa_pl, swid, local_port); 321 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 322 } 323 324 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 325 { 326 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 327 char svpe_pl[MLXSW_REG_SVPE_LEN]; 328 329 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 330 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 331 } 332 333 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 334 bool learn_enable) 335 { 336 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 337 char *spvmlr_pl; 338 int err; 339 340 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 341 if (!spvmlr_pl) 342 return -ENOMEM; 343 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 344 learn_enable); 345 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 346 kfree(spvmlr_pl); 347 return err; 348 } 349 350 int mlxsw_sp_port_security_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 351 { 352 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 353 char spfsr_pl[MLXSW_REG_SPFSR_LEN]; 354 int err; 355 356 if (mlxsw_sp_port->security == enable) 357 return 0; 358 359 mlxsw_reg_spfsr_pack(spfsr_pl, mlxsw_sp_port->local_port, enable); 360 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spfsr), spfsr_pl); 361 if (err) 362 return err; 363 364 mlxsw_sp_port->security = enable; 365 return 0; 366 } 367 368 int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type) 369 { 370 switch (ethtype) { 371 case ETH_P_8021Q: 372 *p_sver_type = 0; 373 break; 374 case ETH_P_8021AD: 375 *p_sver_type = 1; 376 break; 377 default: 378 return -EINVAL; 379 } 380 381 return 0; 382 } 383 384 int mlxsw_sp_port_egress_ethtype_set(struct mlxsw_sp_port *mlxsw_sp_port, 385 u16 ethtype) 386 { 387 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 388 char spevet_pl[MLXSW_REG_SPEVET_LEN]; 389 u8 sver_type; 390 int err; 391 392 err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type); 393 if (err) 394 return err; 395 396 mlxsw_reg_spevet_pack(spevet_pl, mlxsw_sp_port->local_port, sver_type); 397 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spevet), spevet_pl); 398 } 399 400 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 401 u16 vid, u16 ethtype) 402 { 403 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 404 char spvid_pl[MLXSW_REG_SPVID_LEN]; 405 u8 sver_type; 406 int err; 407 408 err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type); 409 if (err) 410 return err; 411 412 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid, 413 sver_type); 414 415 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 416 } 417 418 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 419 bool allow) 420 { 421 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 422 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 423 424 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 425 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 426 } 427 428 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 429 u16 ethtype) 430 { 431 int err; 432 433 if (!vid) { 434 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 435 if (err) 436 return err; 437 } else { 438 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid, ethtype); 439 if (err) 440 return err; 441 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 442 if (err) 443 goto err_port_allow_untagged_set; 444 } 445 446 mlxsw_sp_port->pvid = vid; 447 return 0; 448 449 err_port_allow_untagged_set: 450 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid, ethtype); 451 return err; 452 } 453 454 static int 455 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 456 { 457 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 458 char sspr_pl[MLXSW_REG_SSPR_LEN]; 459 460 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 461 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 462 } 463 464 static int 465 mlxsw_sp_port_module_info_parse(struct mlxsw_sp *mlxsw_sp, 466 u16 local_port, char *pmlp_pl, 467 struct mlxsw_sp_port_mapping *port_mapping) 468 { 469 bool separate_rxtx; 470 u8 first_lane; 471 u8 slot_index; 472 u8 module; 473 u8 width; 474 int i; 475 476 module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 477 slot_index = mlxsw_reg_pmlp_slot_index_get(pmlp_pl, 0); 478 width = mlxsw_reg_pmlp_width_get(pmlp_pl); 479 separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl); 480 first_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 481 482 if (width && !is_power_of_2(width)) { 483 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n", 484 local_port); 485 return -EINVAL; 486 } 487 488 for (i = 0; i < width; i++) { 489 if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) { 490 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n", 491 local_port); 492 return -EINVAL; 493 } 494 if (mlxsw_reg_pmlp_slot_index_get(pmlp_pl, i) != slot_index) { 495 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple slot indexes\n", 496 local_port); 497 return -EINVAL; 498 } 499 if (separate_rxtx && 500 mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != 501 mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) { 502 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n", 503 local_port); 504 return -EINVAL; 505 } 506 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i + first_lane) { 507 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n", 508 local_port); 509 return -EINVAL; 510 } 511 } 512 513 port_mapping->module = module; 514 port_mapping->slot_index = slot_index; 515 port_mapping->width = width; 516 port_mapping->module_width = width; 517 port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 518 return 0; 519 } 520 521 static int 522 mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port, 523 struct mlxsw_sp_port_mapping *port_mapping) 524 { 525 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 526 int err; 527 528 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 529 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 530 if (err) 531 return err; 532 return mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port, 533 pmlp_pl, port_mapping); 534 } 535 536 static int 537 mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port, 538 const struct mlxsw_sp_port_mapping *port_mapping) 539 { 540 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 541 int i, err; 542 543 mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->slot_index, 544 port_mapping->module); 545 546 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 547 mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width); 548 for (i = 0; i < port_mapping->width; i++) { 549 mlxsw_reg_pmlp_slot_index_set(pmlp_pl, i, 550 port_mapping->slot_index); 551 mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module); 552 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */ 553 } 554 555 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 556 if (err) 557 goto err_pmlp_write; 558 return 0; 559 560 err_pmlp_write: 561 mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->slot_index, 562 port_mapping->module); 563 return err; 564 } 565 566 static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u16 local_port, 567 u8 slot_index, u8 module) 568 { 569 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 570 571 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 572 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 573 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 574 mlxsw_env_module_port_unmap(mlxsw_sp->core, slot_index, module); 575 } 576 577 static int mlxsw_sp_port_open(struct net_device *dev) 578 { 579 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 580 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 581 int err; 582 583 err = mlxsw_env_module_port_up(mlxsw_sp->core, 584 mlxsw_sp_port->mapping.slot_index, 585 mlxsw_sp_port->mapping.module); 586 if (err) 587 return err; 588 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 589 if (err) 590 goto err_port_admin_status_set; 591 netif_start_queue(dev); 592 return 0; 593 594 err_port_admin_status_set: 595 mlxsw_env_module_port_down(mlxsw_sp->core, 596 mlxsw_sp_port->mapping.slot_index, 597 mlxsw_sp_port->mapping.module); 598 return err; 599 } 600 601 static int mlxsw_sp_port_stop(struct net_device *dev) 602 { 603 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 604 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 605 606 netif_stop_queue(dev); 607 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 608 mlxsw_env_module_port_down(mlxsw_sp->core, 609 mlxsw_sp_port->mapping.slot_index, 610 mlxsw_sp_port->mapping.module); 611 return 0; 612 } 613 614 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 615 struct net_device *dev) 616 { 617 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 618 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 619 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 620 struct mlxsw_txhdr_info txhdr_info = { 621 .tx_info.local_port = mlxsw_sp_port->local_port, 622 .tx_info.is_emad = false, 623 }; 624 u64 len; 625 int err; 626 627 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); 628 629 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &txhdr_info.tx_info)) 630 return NETDEV_TX_BUSY; 631 632 if (eth_skb_pad(skb)) { 633 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 634 return NETDEV_TX_OK; 635 } 636 637 skb = mlxsw_sp_txhdr_preparations(mlxsw_sp, skb, &txhdr_info); 638 if (!skb) { 639 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 640 return NETDEV_TX_OK; 641 } 642 643 /* TX header is consumed by HW on the way so we shouldn't count its 644 * bytes as being sent. 645 */ 646 len = skb->len - MLXSW_TXHDR_LEN; 647 648 /* Due to a race we might fail here because of a full queue. In that 649 * unlikely case we simply drop the packet. 650 */ 651 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &txhdr_info); 652 653 if (!err) { 654 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 655 u64_stats_update_begin(&pcpu_stats->syncp); 656 pcpu_stats->tx_packets++; 657 pcpu_stats->tx_bytes += len; 658 u64_stats_update_end(&pcpu_stats->syncp); 659 } else { 660 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 661 dev_kfree_skb_any(skb); 662 } 663 return NETDEV_TX_OK; 664 } 665 666 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 667 { 668 } 669 670 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 671 { 672 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 673 struct sockaddr *addr = p; 674 int err; 675 676 if (!is_valid_ether_addr(addr->sa_data)) 677 return -EADDRNOTAVAIL; 678 679 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 680 if (err) 681 return err; 682 eth_hw_addr_set(dev, addr->sa_data); 683 return 0; 684 } 685 686 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 687 { 688 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 689 struct mlxsw_sp_hdroom orig_hdroom; 690 struct mlxsw_sp_hdroom hdroom; 691 int err; 692 693 orig_hdroom = *mlxsw_sp_port->hdroom; 694 695 hdroom = orig_hdroom; 696 hdroom.mtu = mtu; 697 mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom); 698 699 err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom); 700 if (err) { 701 netdev_err(dev, "Failed to configure port's headroom\n"); 702 return err; 703 } 704 705 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 706 if (err) 707 goto err_port_mtu_set; 708 WRITE_ONCE(dev->mtu, mtu); 709 return 0; 710 711 err_port_mtu_set: 712 mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom); 713 return err; 714 } 715 716 static int 717 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 718 struct rtnl_link_stats64 *stats) 719 { 720 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 721 struct mlxsw_sp_port_pcpu_stats *p; 722 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 723 u32 tx_dropped = 0; 724 unsigned int start; 725 int i; 726 727 for_each_possible_cpu(i) { 728 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 729 do { 730 start = u64_stats_fetch_begin(&p->syncp); 731 rx_packets = p->rx_packets; 732 rx_bytes = p->rx_bytes; 733 tx_packets = p->tx_packets; 734 tx_bytes = p->tx_bytes; 735 } while (u64_stats_fetch_retry(&p->syncp, start)); 736 737 stats->rx_packets += rx_packets; 738 stats->rx_bytes += rx_bytes; 739 stats->tx_packets += tx_packets; 740 stats->tx_bytes += tx_bytes; 741 /* tx_dropped is u32, updated without syncp protection. */ 742 tx_dropped += p->tx_dropped; 743 } 744 stats->tx_dropped = tx_dropped; 745 return 0; 746 } 747 748 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 749 { 750 switch (attr_id) { 751 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 752 return true; 753 } 754 755 return false; 756 } 757 758 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 759 void *sp) 760 { 761 switch (attr_id) { 762 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 763 return mlxsw_sp_port_get_sw_stats64(dev, sp); 764 } 765 766 return -EINVAL; 767 } 768 769 int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 770 int prio, char *ppcnt_pl) 771 { 772 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 773 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 774 775 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 776 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 777 } 778 779 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 780 struct rtnl_link_stats64 *stats) 781 { 782 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 783 int err; 784 785 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 786 0, ppcnt_pl); 787 if (err) 788 goto out; 789 790 stats->tx_packets = 791 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 792 stats->rx_packets = 793 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 794 stats->tx_bytes = 795 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 796 stats->rx_bytes = 797 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 798 stats->multicast = 799 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 800 801 stats->rx_crc_errors = 802 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 803 stats->rx_frame_errors = 804 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 805 806 stats->rx_length_errors = ( 807 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 808 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 809 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 810 811 stats->rx_errors = (stats->rx_crc_errors + 812 stats->rx_frame_errors + stats->rx_length_errors); 813 814 out: 815 return err; 816 } 817 818 static void 819 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 820 struct mlxsw_sp_port_xstats *xstats) 821 { 822 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 823 int err, i; 824 825 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 826 ppcnt_pl); 827 if (!err) 828 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 829 830 for (i = 0; i < TC_MAX_QUEUE; i++) { 831 err = mlxsw_sp_port_get_stats_raw(dev, 832 MLXSW_REG_PPCNT_TC_CONG_CNT, 833 i, ppcnt_pl); 834 if (err) 835 goto tc_cnt; 836 837 xstats->wred_drop[i] = 838 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 839 xstats->tc_ecn[i] = mlxsw_reg_ppcnt_ecn_marked_tc_get(ppcnt_pl); 840 841 tc_cnt: 842 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 843 i, ppcnt_pl); 844 if (err) 845 continue; 846 847 xstats->backlog[i] = 848 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 849 xstats->tail_drop[i] = 850 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 851 } 852 853 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 854 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, 855 i, ppcnt_pl); 856 if (err) 857 continue; 858 859 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); 860 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); 861 } 862 } 863 864 static void update_stats_cache(struct work_struct *work) 865 { 866 struct mlxsw_sp_port *mlxsw_sp_port = 867 container_of(work, struct mlxsw_sp_port, 868 periodic_hw_stats.update_dw.work); 869 870 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 871 /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as 872 * necessary when port goes down. 873 */ 874 goto out; 875 876 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 877 &mlxsw_sp_port->periodic_hw_stats.stats); 878 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 879 &mlxsw_sp_port->periodic_hw_stats.xstats); 880 881 out: 882 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 883 MLXSW_HW_STATS_UPDATE_TIME); 884 } 885 886 /* Return the stats from a cache that is updated periodically, 887 * as this function might get called in an atomic context. 888 */ 889 static void 890 mlxsw_sp_port_get_stats64(struct net_device *dev, 891 struct rtnl_link_stats64 *stats) 892 { 893 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 894 895 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 896 } 897 898 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 899 u16 vid_begin, u16 vid_end, 900 bool is_member, bool untagged) 901 { 902 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 903 char *spvm_pl; 904 int err; 905 906 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 907 if (!spvm_pl) 908 return -ENOMEM; 909 910 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 911 vid_end, is_member, untagged); 912 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 913 kfree(spvm_pl); 914 return err; 915 } 916 917 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 918 u16 vid_end, bool is_member, bool untagged) 919 { 920 u16 vid, vid_e; 921 int err; 922 923 for (vid = vid_begin; vid <= vid_end; 924 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 925 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 926 vid_end); 927 928 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 929 is_member, untagged); 930 if (err) 931 return err; 932 } 933 934 return 0; 935 } 936 937 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port, 938 bool flush_default) 939 { 940 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 941 942 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 943 &mlxsw_sp_port->vlans_list, list) { 944 if (!flush_default && 945 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID) 946 continue; 947 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 948 } 949 } 950 951 static void 952 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 953 { 954 if (mlxsw_sp_port_vlan->bridge_port) 955 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 956 else if (mlxsw_sp_port_vlan->fid) 957 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 958 } 959 960 struct mlxsw_sp_port_vlan * 961 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 962 { 963 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 964 bool untagged = vid == MLXSW_SP_DEFAULT_VID; 965 int err; 966 967 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 968 if (mlxsw_sp_port_vlan) 969 return ERR_PTR(-EEXIST); 970 971 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 972 if (err) 973 return ERR_PTR(err); 974 975 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 976 if (!mlxsw_sp_port_vlan) { 977 err = -ENOMEM; 978 goto err_port_vlan_alloc; 979 } 980 981 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 982 mlxsw_sp_port_vlan->vid = vid; 983 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 984 985 return mlxsw_sp_port_vlan; 986 987 err_port_vlan_alloc: 988 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 989 return ERR_PTR(err); 990 } 991 992 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 993 { 994 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 995 u16 vid = mlxsw_sp_port_vlan->vid; 996 997 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan); 998 list_del(&mlxsw_sp_port_vlan->list); 999 kfree(mlxsw_sp_port_vlan); 1000 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1001 } 1002 1003 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1004 __be16 __always_unused proto, u16 vid) 1005 { 1006 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1007 1008 /* VLAN 0 is added to HW filter when device goes up, but it is 1009 * reserved in our case, so simply return. 1010 */ 1011 if (!vid) 1012 return 0; 1013 1014 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid)); 1015 } 1016 1017 int mlxsw_sp_port_kill_vid(struct net_device *dev, 1018 __be16 __always_unused proto, u16 vid) 1019 { 1020 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1021 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1022 1023 /* VLAN 0 is removed from HW filter when device goes down, but 1024 * it is reserved in our case, so simply return. 1025 */ 1026 if (!vid) 1027 return 0; 1028 1029 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1030 if (!mlxsw_sp_port_vlan) 1031 return 0; 1032 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1033 1034 return 0; 1035 } 1036 1037 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 1038 struct flow_block_offload *f) 1039 { 1040 switch (f->binder_type) { 1041 case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS: 1042 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true); 1043 case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS: 1044 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false); 1045 case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP: 1046 return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f); 1047 case FLOW_BLOCK_BINDER_TYPE_RED_MARK: 1048 return mlxsw_sp_setup_tc_block_qevent_mark(mlxsw_sp_port, f); 1049 default: 1050 return -EOPNOTSUPP; 1051 } 1052 } 1053 1054 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1055 void *type_data) 1056 { 1057 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1058 1059 switch (type) { 1060 case TC_SETUP_BLOCK: 1061 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1062 case TC_SETUP_QDISC_RED: 1063 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1064 case TC_SETUP_QDISC_PRIO: 1065 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); 1066 case TC_SETUP_QDISC_ETS: 1067 return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data); 1068 case TC_SETUP_QDISC_TBF: 1069 return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data); 1070 case TC_SETUP_QDISC_FIFO: 1071 return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data); 1072 default: 1073 return -EOPNOTSUPP; 1074 } 1075 } 1076 1077 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) 1078 { 1079 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1080 1081 if (!enable) { 1082 if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) || 1083 mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) { 1084 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 1085 return -EINVAL; 1086 } 1087 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block); 1088 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block); 1089 } else { 1090 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block); 1091 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block); 1092 } 1093 return 0; 1094 } 1095 1096 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable) 1097 { 1098 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1099 char pplr_pl[MLXSW_REG_PPLR_LEN]; 1100 int err; 1101 1102 if (netif_running(dev)) 1103 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1104 1105 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable); 1106 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr), 1107 pplr_pl); 1108 1109 if (netif_running(dev)) 1110 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1111 1112 return err; 1113 } 1114 1115 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); 1116 1117 static int mlxsw_sp_handle_feature(struct net_device *dev, 1118 netdev_features_t wanted_features, 1119 netdev_features_t feature, 1120 mlxsw_sp_feature_handler feature_handler) 1121 { 1122 netdev_features_t changes = wanted_features ^ dev->features; 1123 bool enable = !!(wanted_features & feature); 1124 int err; 1125 1126 if (!(changes & feature)) 1127 return 0; 1128 1129 err = feature_handler(dev, enable); 1130 if (err) { 1131 netdev_err(dev, "%s feature %pNF failed, err %d\n", 1132 enable ? "Enable" : "Disable", &feature, err); 1133 return err; 1134 } 1135 1136 if (enable) 1137 dev->features |= feature; 1138 else 1139 dev->features &= ~feature; 1140 1141 return 0; 1142 } 1143 static int mlxsw_sp_set_features(struct net_device *dev, 1144 netdev_features_t features) 1145 { 1146 netdev_features_t oper_features = dev->features; 1147 int err = 0; 1148 1149 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, 1150 mlxsw_sp_feature_hw_tc); 1151 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK, 1152 mlxsw_sp_feature_loopback); 1153 1154 if (err) { 1155 dev->features = oper_features; 1156 return -EINVAL; 1157 } 1158 1159 return 0; 1160 } 1161 1162 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, 1163 struct ifreq *ifr) 1164 { 1165 struct hwtstamp_config config; 1166 int err; 1167 1168 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 1169 return -EFAULT; 1170 1171 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, 1172 &config); 1173 if (err) 1174 return err; 1175 1176 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1177 return -EFAULT; 1178 1179 return 0; 1180 } 1181 1182 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port, 1183 struct ifreq *ifr) 1184 { 1185 struct hwtstamp_config config; 1186 int err; 1187 1188 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port, 1189 &config); 1190 if (err) 1191 return err; 1192 1193 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1194 return -EFAULT; 1195 1196 return 0; 1197 } 1198 1199 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port) 1200 { 1201 struct hwtstamp_config config = {0}; 1202 1203 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config); 1204 } 1205 1206 static int 1207 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1208 { 1209 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1210 1211 switch (cmd) { 1212 case SIOCSHWTSTAMP: 1213 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr); 1214 case SIOCGHWTSTAMP: 1215 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr); 1216 default: 1217 return -EOPNOTSUPP; 1218 } 1219 } 1220 1221 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1222 .ndo_open = mlxsw_sp_port_open, 1223 .ndo_stop = mlxsw_sp_port_stop, 1224 .ndo_start_xmit = mlxsw_sp_port_xmit, 1225 .ndo_setup_tc = mlxsw_sp_setup_tc, 1226 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1227 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1228 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1229 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1230 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1231 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1232 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1233 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1234 .ndo_set_features = mlxsw_sp_set_features, 1235 .ndo_eth_ioctl = mlxsw_sp_port_ioctl, 1236 }; 1237 1238 static int 1239 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port) 1240 { 1241 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1242 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; 1243 const struct mlxsw_sp_port_type_speed_ops *ops; 1244 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1245 u32 eth_proto_cap_masked; 1246 int err; 1247 1248 ops = mlxsw_sp->port_type_speed_ops; 1249 1250 /* Set advertised speeds to speeds supported by both the driver 1251 * and the device. 1252 */ 1253 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 1254 0, false); 1255 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1256 if (err) 1257 return err; 1258 1259 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, 1260 ð_proto_admin, ð_proto_oper); 1261 eth_proto_cap_masked = ops->ptys_proto_cap_masked_get(eth_proto_cap); 1262 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 1263 eth_proto_cap_masked, 1264 mlxsw_sp_port->link.autoneg); 1265 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1266 } 1267 1268 int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed) 1269 { 1270 const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops; 1271 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1272 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1273 u32 eth_proto_oper; 1274 int err; 1275 1276 port_type_speed_ops = mlxsw_sp->port_type_speed_ops; 1277 port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, 1278 mlxsw_sp_port->local_port, 0, 1279 false); 1280 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1281 if (err) 1282 return err; 1283 port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL, 1284 ð_proto_oper); 1285 *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper); 1286 return 0; 1287 } 1288 1289 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 1290 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 1291 bool dwrr, u8 dwrr_weight) 1292 { 1293 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1294 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1295 1296 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1297 next_index); 1298 mlxsw_reg_qeec_de_set(qeec_pl, true); 1299 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 1300 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 1301 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1302 } 1303 1304 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 1305 enum mlxsw_reg_qeec_hr hr, u8 index, 1306 u8 next_index, u32 maxrate, u8 burst_size) 1307 { 1308 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1309 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1310 1311 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1312 next_index); 1313 mlxsw_reg_qeec_mase_set(qeec_pl, true); 1314 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 1315 mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size); 1316 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1317 } 1318 1319 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port, 1320 enum mlxsw_reg_qeec_hr hr, u8 index, 1321 u8 next_index, u32 minrate) 1322 { 1323 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1324 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1325 1326 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1327 next_index); 1328 mlxsw_reg_qeec_mise_set(qeec_pl, true); 1329 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate); 1330 1331 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1332 } 1333 1334 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 1335 u8 switch_prio, u8 tclass) 1336 { 1337 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1338 char qtct_pl[MLXSW_REG_QTCT_LEN]; 1339 1340 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 1341 tclass); 1342 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 1343 } 1344 1345 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 1346 { 1347 int err, i; 1348 1349 /* Setup the elements hierarcy, so that each TC is linked to 1350 * one subgroup, which are all member in the same group. 1351 */ 1352 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1353 MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0); 1354 if (err) 1355 return err; 1356 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1357 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1358 MLXSW_REG_QEEC_HR_SUBGROUP, i, 1359 0, false, 0); 1360 if (err) 1361 return err; 1362 } 1363 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1364 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1365 MLXSW_REG_QEEC_HR_TC, i, i, 1366 false, 0); 1367 if (err) 1368 return err; 1369 1370 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1371 MLXSW_REG_QEEC_HR_TC, 1372 i + 8, i, 1373 true, 100); 1374 if (err) 1375 return err; 1376 } 1377 1378 /* Make sure the max shaper is disabled in all hierarchies that support 1379 * it. Note that this disables ptps (PTP shaper), but that is intended 1380 * for the initial configuration. 1381 */ 1382 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1383 MLXSW_REG_QEEC_HR_PORT, 0, 0, 1384 MLXSW_REG_QEEC_MAS_DIS, 0); 1385 if (err) 1386 return err; 1387 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1388 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1389 MLXSW_REG_QEEC_HR_SUBGROUP, 1390 i, 0, 1391 MLXSW_REG_QEEC_MAS_DIS, 0); 1392 if (err) 1393 return err; 1394 } 1395 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1396 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1397 MLXSW_REG_QEEC_HR_TC, 1398 i, i, 1399 MLXSW_REG_QEEC_MAS_DIS, 0); 1400 if (err) 1401 return err; 1402 1403 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1404 MLXSW_REG_QEEC_HR_TC, 1405 i + 8, i, 1406 MLXSW_REG_QEEC_MAS_DIS, 0); 1407 if (err) 1408 return err; 1409 } 1410 1411 /* Configure the min shaper for multicast TCs. */ 1412 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1413 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port, 1414 MLXSW_REG_QEEC_HR_TC, 1415 i + 8, i, 1416 MLXSW_REG_QEEC_MIS_MIN); 1417 if (err) 1418 return err; 1419 } 1420 1421 /* Map all priorities to traffic class 0. */ 1422 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1423 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 1424 if (err) 1425 return err; 1426 } 1427 1428 return 0; 1429 } 1430 1431 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 1432 bool enable) 1433 { 1434 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1435 char qtctm_pl[MLXSW_REG_QTCTM_LEN]; 1436 1437 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable); 1438 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl); 1439 } 1440 1441 static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port) 1442 { 1443 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1444 u8 slot_index = mlxsw_sp_port->mapping.slot_index; 1445 u8 module = mlxsw_sp_port->mapping.module; 1446 u64 overheat_counter; 1447 int err; 1448 1449 err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, slot_index, 1450 module, &overheat_counter); 1451 if (err) 1452 return err; 1453 1454 mlxsw_sp_port->module_overheat_initial_val = overheat_counter; 1455 return 0; 1456 } 1457 1458 int 1459 mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port, 1460 bool is_8021ad_tagged, 1461 bool is_8021q_tagged) 1462 { 1463 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1464 char spvc_pl[MLXSW_REG_SPVC_LEN]; 1465 1466 mlxsw_reg_spvc_pack(spvc_pl, mlxsw_sp_port->local_port, 1467 is_8021ad_tagged, is_8021q_tagged); 1468 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvc), spvc_pl); 1469 } 1470 1471 static int mlxsw_sp_port_label_info_get(struct mlxsw_sp *mlxsw_sp, 1472 u16 local_port, u8 *port_number, 1473 u8 *split_port_subnumber, 1474 u8 *slot_index) 1475 { 1476 char pllp_pl[MLXSW_REG_PLLP_LEN]; 1477 int err; 1478 1479 mlxsw_reg_pllp_pack(pllp_pl, local_port); 1480 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pllp), pllp_pl); 1481 if (err) 1482 return err; 1483 mlxsw_reg_pllp_unpack(pllp_pl, port_number, 1484 split_port_subnumber, slot_index); 1485 return 0; 1486 } 1487 1488 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port, 1489 bool split, 1490 struct mlxsw_sp_port_mapping *port_mapping) 1491 { 1492 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1493 struct mlxsw_sp_port *mlxsw_sp_port; 1494 u32 lanes = port_mapping->width; 1495 u8 split_port_subnumber; 1496 struct net_device *dev; 1497 u8 port_number; 1498 u8 slot_index; 1499 bool splittable; 1500 int err; 1501 1502 err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, port_mapping); 1503 if (err) { 1504 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 1505 local_port); 1506 return err; 1507 } 1508 1509 err = mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 0); 1510 if (err) { 1511 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 1512 local_port); 1513 goto err_port_swid_set; 1514 } 1515 1516 err = mlxsw_sp_port_label_info_get(mlxsw_sp, local_port, &port_number, 1517 &split_port_subnumber, &slot_index); 1518 if (err) { 1519 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get port label information\n", 1520 local_port); 1521 goto err_port_label_info_get; 1522 } 1523 1524 splittable = lanes > 1 && !split; 1525 err = mlxsw_core_port_init(mlxsw_sp->core, local_port, slot_index, 1526 port_number, split, split_port_subnumber, 1527 splittable, lanes, mlxsw_sp->base_mac, 1528 sizeof(mlxsw_sp->base_mac)); 1529 if (err) { 1530 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 1531 local_port); 1532 goto err_core_port_init; 1533 } 1534 1535 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 1536 if (!dev) { 1537 err = -ENOMEM; 1538 goto err_alloc_etherdev; 1539 } 1540 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 1541 dev_net_set(dev, mlxsw_sp_net(mlxsw_sp)); 1542 mlxsw_sp_port = netdev_priv(dev); 1543 mlxsw_core_port_netdev_link(mlxsw_sp->core, local_port, 1544 mlxsw_sp_port, dev); 1545 mlxsw_sp_port->dev = dev; 1546 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 1547 mlxsw_sp_port->local_port = local_port; 1548 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID; 1549 mlxsw_sp_port->split = split; 1550 mlxsw_sp_port->mapping = *port_mapping; 1551 mlxsw_sp_port->link.autoneg = 1; 1552 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 1553 1554 mlxsw_sp_port->pcpu_stats = 1555 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 1556 if (!mlxsw_sp_port->pcpu_stats) { 1557 err = -ENOMEM; 1558 goto err_alloc_stats; 1559 } 1560 1561 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1562 &update_stats_cache); 1563 1564 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 1565 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 1566 1567 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 1568 if (err) { 1569 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 1570 mlxsw_sp_port->local_port); 1571 goto err_dev_addr_init; 1572 } 1573 1574 netif_carrier_off(dev); 1575 1576 dev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_FILTER | 1577 NETIF_F_HW_TC | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1578 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK | 1579 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1580 dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1581 dev->lltx = true; 1582 dev->netns_immutable = true; 1583 1584 dev->min_mtu = ETH_MIN_MTU; 1585 dev->max_mtu = MLXSW_PORT_MAX_MTU - MLXSW_PORT_ETH_FRAME_HDR; 1586 1587 /* Each packet needs to have a Tx header (metadata) on top all other 1588 * headers. 1589 */ 1590 dev->needed_headroom = MLXSW_TXHDR_LEN; 1591 1592 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 1593 if (err) { 1594 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 1595 mlxsw_sp_port->local_port); 1596 goto err_port_system_port_mapping_set; 1597 } 1598 1599 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port); 1600 if (err) { 1601 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 1602 mlxsw_sp_port->local_port); 1603 goto err_port_speed_by_width_set; 1604 } 1605 1606 err = mlxsw_sp->port_type_speed_ops->ptys_max_speed(mlxsw_sp_port, 1607 &mlxsw_sp_port->max_speed); 1608 if (err) { 1609 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum speed\n", 1610 mlxsw_sp_port->local_port); 1611 goto err_max_speed_get; 1612 } 1613 1614 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 1615 if (err) { 1616 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 1617 mlxsw_sp_port->local_port); 1618 goto err_port_mtu_set; 1619 } 1620 1621 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1622 if (err) 1623 goto err_port_admin_status_set; 1624 1625 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 1626 if (err) { 1627 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 1628 mlxsw_sp_port->local_port); 1629 goto err_port_buffers_init; 1630 } 1631 1632 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 1633 if (err) { 1634 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 1635 mlxsw_sp_port->local_port); 1636 goto err_port_ets_init; 1637 } 1638 1639 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true); 1640 if (err) { 1641 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n", 1642 mlxsw_sp_port->local_port); 1643 goto err_port_tc_mc_mode; 1644 } 1645 1646 /* ETS and buffers must be initialized before DCB. */ 1647 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 1648 if (err) { 1649 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 1650 mlxsw_sp_port->local_port); 1651 goto err_port_dcb_init; 1652 } 1653 1654 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 1655 if (err) { 1656 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 1657 mlxsw_sp_port->local_port); 1658 goto err_port_fids_init; 1659 } 1660 1661 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); 1662 if (err) { 1663 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", 1664 mlxsw_sp_port->local_port); 1665 goto err_port_qdiscs_init; 1666 } 1667 1668 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false, 1669 false); 1670 if (err) { 1671 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n", 1672 mlxsw_sp_port->local_port); 1673 goto err_port_vlan_clear; 1674 } 1675 1676 err = mlxsw_sp_port_nve_init(mlxsw_sp_port); 1677 if (err) { 1678 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n", 1679 mlxsw_sp_port->local_port); 1680 goto err_port_nve_init; 1681 } 1682 1683 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID, 1684 ETH_P_8021Q); 1685 if (err) { 1686 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n", 1687 mlxsw_sp_port->local_port); 1688 goto err_port_pvid_set; 1689 } 1690 1691 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, 1692 MLXSW_SP_DEFAULT_VID); 1693 if (IS_ERR(mlxsw_sp_port_vlan)) { 1694 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 1695 mlxsw_sp_port->local_port); 1696 err = PTR_ERR(mlxsw_sp_port_vlan); 1697 goto err_port_vlan_create; 1698 } 1699 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan; 1700 1701 /* Set SPVC.et0=true and SPVC.et1=false to make the local port to treat 1702 * only packets with 802.1q header as tagged packets. 1703 */ 1704 err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true); 1705 if (err) { 1706 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set default VLAN classification\n", 1707 local_port); 1708 goto err_port_vlan_classification_set; 1709 } 1710 1711 INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw, 1712 mlxsw_sp->ptp_ops->shaper_work); 1713 1714 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 1715 1716 err = mlxsw_sp_port_overheat_init_val_set(mlxsw_sp_port); 1717 if (err) { 1718 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set overheat initial value\n", 1719 mlxsw_sp_port->local_port); 1720 goto err_port_overheat_init_val_set; 1721 } 1722 1723 err = register_netdev(dev); 1724 if (err) { 1725 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 1726 mlxsw_sp_port->local_port); 1727 goto err_register_netdev; 1728 } 1729 1730 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 1731 return 0; 1732 1733 err_register_netdev: 1734 err_port_overheat_init_val_set: 1735 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true); 1736 err_port_vlan_classification_set: 1737 mlxsw_sp->ports[local_port] = NULL; 1738 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1739 err_port_vlan_create: 1740 err_port_pvid_set: 1741 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 1742 err_port_nve_init: 1743 err_port_vlan_clear: 1744 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 1745 err_port_qdiscs_init: 1746 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 1747 err_port_fids_init: 1748 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 1749 err_port_dcb_init: 1750 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 1751 err_port_tc_mc_mode: 1752 err_port_ets_init: 1753 mlxsw_sp_port_buffers_fini(mlxsw_sp_port); 1754 err_port_buffers_init: 1755 err_port_admin_status_set: 1756 err_port_mtu_set: 1757 err_max_speed_get: 1758 err_port_speed_by_width_set: 1759 err_port_system_port_mapping_set: 1760 err_dev_addr_init: 1761 free_percpu(mlxsw_sp_port->pcpu_stats); 1762 err_alloc_stats: 1763 free_netdev(dev); 1764 err_alloc_etherdev: 1765 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 1766 err_core_port_init: 1767 err_port_label_info_get: 1768 mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 1769 MLXSW_PORT_SWID_DISABLED_PORT); 1770 err_port_swid_set: 1771 mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, 1772 port_mapping->slot_index, 1773 port_mapping->module); 1774 return err; 1775 } 1776 1777 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port) 1778 { 1779 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1780 u8 slot_index = mlxsw_sp_port->mapping.slot_index; 1781 u8 module = mlxsw_sp_port->mapping.module; 1782 1783 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 1784 cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw); 1785 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 1786 mlxsw_sp_port_ptp_clear(mlxsw_sp_port); 1787 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true); 1788 mlxsw_sp->ports[local_port] = NULL; 1789 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true); 1790 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 1791 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 1792 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 1793 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 1794 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 1795 mlxsw_sp_port_buffers_fini(mlxsw_sp_port); 1796 free_percpu(mlxsw_sp_port->pcpu_stats); 1797 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 1798 free_netdev(mlxsw_sp_port->dev); 1799 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 1800 mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 1801 MLXSW_PORT_SWID_DISABLED_PORT); 1802 mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, slot_index, module); 1803 } 1804 1805 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp) 1806 { 1807 struct mlxsw_sp_port *mlxsw_sp_port; 1808 int err; 1809 1810 mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL); 1811 if (!mlxsw_sp_port) 1812 return -ENOMEM; 1813 1814 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 1815 mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT; 1816 1817 err = mlxsw_core_cpu_port_init(mlxsw_sp->core, 1818 mlxsw_sp_port, 1819 mlxsw_sp->base_mac, 1820 sizeof(mlxsw_sp->base_mac)); 1821 if (err) { 1822 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n"); 1823 goto err_core_cpu_port_init; 1824 } 1825 1826 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port; 1827 return 0; 1828 1829 err_core_cpu_port_init: 1830 kfree(mlxsw_sp_port); 1831 return err; 1832 } 1833 1834 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp) 1835 { 1836 struct mlxsw_sp_port *mlxsw_sp_port = 1837 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT]; 1838 1839 mlxsw_core_cpu_port_fini(mlxsw_sp->core); 1840 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL; 1841 kfree(mlxsw_sp_port); 1842 } 1843 1844 static bool mlxsw_sp_local_port_valid(u16 local_port) 1845 { 1846 return local_port != MLXSW_PORT_CPU_PORT; 1847 } 1848 1849 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u16 local_port) 1850 { 1851 if (!mlxsw_sp_local_port_valid(local_port)) 1852 return false; 1853 return mlxsw_sp->ports[local_port] != NULL; 1854 } 1855 1856 static int mlxsw_sp_port_mapping_event_set(struct mlxsw_sp *mlxsw_sp, 1857 u16 local_port, bool enable) 1858 { 1859 char pmecr_pl[MLXSW_REG_PMECR_LEN]; 1860 1861 mlxsw_reg_pmecr_pack(pmecr_pl, local_port, 1862 enable ? MLXSW_REG_PMECR_E_GENERATE_EVENT : 1863 MLXSW_REG_PMECR_E_DO_NOT_GENERATE_EVENT); 1864 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmecr), pmecr_pl); 1865 } 1866 1867 struct mlxsw_sp_port_mapping_event { 1868 struct list_head list; 1869 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 1870 }; 1871 1872 static void mlxsw_sp_port_mapping_events_work(struct work_struct *work) 1873 { 1874 struct mlxsw_sp_port_mapping_event *event, *next_event; 1875 struct mlxsw_sp_port_mapping_events *events; 1876 struct mlxsw_sp_port_mapping port_mapping; 1877 struct mlxsw_sp *mlxsw_sp; 1878 struct devlink *devlink; 1879 LIST_HEAD(event_queue); 1880 u16 local_port; 1881 int err; 1882 1883 events = container_of(work, struct mlxsw_sp_port_mapping_events, work); 1884 mlxsw_sp = container_of(events, struct mlxsw_sp, port_mapping_events); 1885 devlink = priv_to_devlink(mlxsw_sp->core); 1886 1887 spin_lock_bh(&events->queue_lock); 1888 list_splice_init(&events->queue, &event_queue); 1889 spin_unlock_bh(&events->queue_lock); 1890 1891 list_for_each_entry_safe(event, next_event, &event_queue, list) { 1892 local_port = mlxsw_reg_pmlp_local_port_get(event->pmlp_pl); 1893 err = mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port, 1894 event->pmlp_pl, &port_mapping); 1895 if (err) 1896 goto out; 1897 1898 if (WARN_ON_ONCE(!port_mapping.width)) 1899 goto out; 1900 1901 devl_lock(devlink); 1902 1903 if (!mlxsw_sp_port_created(mlxsw_sp, local_port)) 1904 mlxsw_sp_port_create(mlxsw_sp, local_port, 1905 false, &port_mapping); 1906 else 1907 WARN_ON_ONCE(1); 1908 1909 devl_unlock(devlink); 1910 1911 mlxsw_sp->port_mapping[local_port] = port_mapping; 1912 1913 out: 1914 kfree(event); 1915 } 1916 } 1917 1918 static void 1919 mlxsw_sp_port_mapping_listener_func(const struct mlxsw_reg_info *reg, 1920 char *pmlp_pl, void *priv) 1921 { 1922 struct mlxsw_sp_port_mapping_events *events; 1923 struct mlxsw_sp_port_mapping_event *event; 1924 struct mlxsw_sp *mlxsw_sp = priv; 1925 u16 local_port; 1926 1927 local_port = mlxsw_reg_pmlp_local_port_get(pmlp_pl); 1928 if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port))) 1929 return; 1930 1931 events = &mlxsw_sp->port_mapping_events; 1932 event = kmalloc(sizeof(*event), GFP_ATOMIC); 1933 if (!event) 1934 return; 1935 memcpy(event->pmlp_pl, pmlp_pl, sizeof(event->pmlp_pl)); 1936 spin_lock(&events->queue_lock); 1937 list_add_tail(&event->list, &events->queue); 1938 spin_unlock(&events->queue_lock); 1939 mlxsw_core_schedule_work(&events->work); 1940 } 1941 1942 static void 1943 __mlxsw_sp_port_mapping_events_cancel(struct mlxsw_sp *mlxsw_sp) 1944 { 1945 struct mlxsw_sp_port_mapping_event *event, *next_event; 1946 struct mlxsw_sp_port_mapping_events *events; 1947 1948 events = &mlxsw_sp->port_mapping_events; 1949 1950 /* Caller needs to make sure that no new event is going to appear. */ 1951 cancel_work_sync(&events->work); 1952 list_for_each_entry_safe(event, next_event, &events->queue, list) { 1953 list_del(&event->list); 1954 kfree(event); 1955 } 1956 } 1957 1958 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 1959 { 1960 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 1961 int i; 1962 1963 for (i = 1; i < max_ports; i++) 1964 mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false); 1965 /* Make sure all scheduled events are processed */ 1966 __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp); 1967 1968 for (i = 1; i < max_ports; i++) 1969 if (mlxsw_sp_port_created(mlxsw_sp, i)) 1970 mlxsw_sp_port_remove(mlxsw_sp, i); 1971 mlxsw_sp_cpu_port_remove(mlxsw_sp); 1972 kfree(mlxsw_sp->ports); 1973 mlxsw_sp->ports = NULL; 1974 } 1975 1976 static void 1977 mlxsw_sp_ports_remove_selected(struct mlxsw_core *mlxsw_core, 1978 bool (*selector)(void *priv, u16 local_port), 1979 void *priv) 1980 { 1981 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 1982 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_core); 1983 int i; 1984 1985 for (i = 1; i < max_ports; i++) 1986 if (mlxsw_sp_port_created(mlxsw_sp, i) && selector(priv, i)) 1987 mlxsw_sp_port_remove(mlxsw_sp, i); 1988 } 1989 1990 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 1991 { 1992 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 1993 struct mlxsw_sp_port_mapping_events *events; 1994 struct mlxsw_sp_port_mapping *port_mapping; 1995 size_t alloc_size; 1996 int i; 1997 int err; 1998 1999 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 2000 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 2001 if (!mlxsw_sp->ports) 2002 return -ENOMEM; 2003 2004 events = &mlxsw_sp->port_mapping_events; 2005 INIT_LIST_HEAD(&events->queue); 2006 spin_lock_init(&events->queue_lock); 2007 INIT_WORK(&events->work, mlxsw_sp_port_mapping_events_work); 2008 2009 for (i = 1; i < max_ports; i++) { 2010 err = mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, true); 2011 if (err) 2012 goto err_event_enable; 2013 } 2014 2015 err = mlxsw_sp_cpu_port_create(mlxsw_sp); 2016 if (err) 2017 goto err_cpu_port_create; 2018 2019 for (i = 1; i < max_ports; i++) { 2020 port_mapping = &mlxsw_sp->port_mapping[i]; 2021 if (!port_mapping->width) 2022 continue; 2023 err = mlxsw_sp_port_create(mlxsw_sp, i, false, port_mapping); 2024 if (err) 2025 goto err_port_create; 2026 } 2027 return 0; 2028 2029 err_port_create: 2030 for (i--; i >= 1; i--) 2031 if (mlxsw_sp_port_created(mlxsw_sp, i)) 2032 mlxsw_sp_port_remove(mlxsw_sp, i); 2033 i = max_ports; 2034 mlxsw_sp_cpu_port_remove(mlxsw_sp); 2035 err_cpu_port_create: 2036 err_event_enable: 2037 for (i--; i >= 1; i--) 2038 mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false); 2039 /* Make sure all scheduled events are processed */ 2040 __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp); 2041 kfree(mlxsw_sp->ports); 2042 mlxsw_sp->ports = NULL; 2043 return err; 2044 } 2045 2046 static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp) 2047 { 2048 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 2049 struct mlxsw_sp_port_mapping *port_mapping; 2050 int i; 2051 int err; 2052 2053 mlxsw_sp->port_mapping = kcalloc(max_ports, 2054 sizeof(struct mlxsw_sp_port_mapping), 2055 GFP_KERNEL); 2056 if (!mlxsw_sp->port_mapping) 2057 return -ENOMEM; 2058 2059 for (i = 1; i < max_ports; i++) { 2060 port_mapping = &mlxsw_sp->port_mapping[i]; 2061 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, port_mapping); 2062 if (err) 2063 goto err_port_module_info_get; 2064 } 2065 return 0; 2066 2067 err_port_module_info_get: 2068 kfree(mlxsw_sp->port_mapping); 2069 return err; 2070 } 2071 2072 static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp) 2073 { 2074 kfree(mlxsw_sp->port_mapping); 2075 } 2076 2077 static int 2078 mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, 2079 struct mlxsw_sp_port_mapping *port_mapping, 2080 unsigned int count, const char *pmtdb_pl) 2081 { 2082 struct mlxsw_sp_port_mapping split_port_mapping; 2083 int err, i; 2084 2085 split_port_mapping = *port_mapping; 2086 split_port_mapping.width /= count; 2087 for (i = 0; i < count; i++) { 2088 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2089 2090 if (!mlxsw_sp_local_port_valid(s_local_port)) 2091 continue; 2092 2093 err = mlxsw_sp_port_create(mlxsw_sp, s_local_port, 2094 true, &split_port_mapping); 2095 if (err) 2096 goto err_port_create; 2097 split_port_mapping.lane += split_port_mapping.width; 2098 } 2099 2100 return 0; 2101 2102 err_port_create: 2103 for (i--; i >= 0; i--) { 2104 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2105 2106 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) 2107 mlxsw_sp_port_remove(mlxsw_sp, s_local_port); 2108 } 2109 return err; 2110 } 2111 2112 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 2113 unsigned int count, 2114 const char *pmtdb_pl) 2115 { 2116 struct mlxsw_sp_port_mapping *port_mapping; 2117 int i; 2118 2119 /* Go over original unsplit ports in the gap and recreate them. */ 2120 for (i = 0; i < count; i++) { 2121 u16 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2122 2123 port_mapping = &mlxsw_sp->port_mapping[local_port]; 2124 if (!port_mapping->width || !mlxsw_sp_local_port_valid(local_port)) 2125 continue; 2126 mlxsw_sp_port_create(mlxsw_sp, local_port, 2127 false, port_mapping); 2128 } 2129 } 2130 2131 static struct mlxsw_sp_port * 2132 mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u16 local_port) 2133 { 2134 if (mlxsw_sp->ports && mlxsw_sp->ports[local_port]) 2135 return mlxsw_sp->ports[local_port]; 2136 return NULL; 2137 } 2138 2139 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port, 2140 unsigned int count, 2141 struct netlink_ext_ack *extack) 2142 { 2143 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2144 struct mlxsw_sp_port_mapping port_mapping; 2145 struct mlxsw_sp_port *mlxsw_sp_port; 2146 enum mlxsw_reg_pmtdb_status status; 2147 char pmtdb_pl[MLXSW_REG_PMTDB_LEN]; 2148 int i; 2149 int err; 2150 2151 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); 2152 if (!mlxsw_sp_port) { 2153 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 2154 local_port); 2155 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 2156 return -EINVAL; 2157 } 2158 2159 if (mlxsw_sp_port->split) { 2160 NL_SET_ERR_MSG_MOD(extack, "Port is already split"); 2161 return -EINVAL; 2162 } 2163 2164 mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index, 2165 mlxsw_sp_port->mapping.module, 2166 mlxsw_sp_port->mapping.module_width / count, 2167 count); 2168 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl); 2169 if (err) { 2170 NL_SET_ERR_MSG_MOD(extack, "Failed to query split info"); 2171 return err; 2172 } 2173 2174 status = mlxsw_reg_pmtdb_status_get(pmtdb_pl); 2175 if (status != MLXSW_REG_PMTDB_STATUS_SUCCESS) { 2176 NL_SET_ERR_MSG_MOD(extack, "Unsupported split configuration"); 2177 return -EINVAL; 2178 } 2179 2180 port_mapping = mlxsw_sp_port->mapping; 2181 2182 for (i = 0; i < count; i++) { 2183 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2184 2185 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) 2186 mlxsw_sp_port_remove(mlxsw_sp, s_local_port); 2187 } 2188 2189 err = mlxsw_sp_port_split_create(mlxsw_sp, &port_mapping, 2190 count, pmtdb_pl); 2191 if (err) { 2192 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 2193 goto err_port_split_create; 2194 } 2195 2196 return 0; 2197 2198 err_port_split_create: 2199 mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl); 2200 2201 return err; 2202 } 2203 2204 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u16 local_port, 2205 struct netlink_ext_ack *extack) 2206 { 2207 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2208 struct mlxsw_sp_port *mlxsw_sp_port; 2209 char pmtdb_pl[MLXSW_REG_PMTDB_LEN]; 2210 unsigned int count; 2211 int i; 2212 int err; 2213 2214 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); 2215 if (!mlxsw_sp_port) { 2216 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 2217 local_port); 2218 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 2219 return -EINVAL; 2220 } 2221 2222 if (!mlxsw_sp_port->split) { 2223 NL_SET_ERR_MSG_MOD(extack, "Port was not split"); 2224 return -EINVAL; 2225 } 2226 2227 count = mlxsw_sp_port->mapping.module_width / 2228 mlxsw_sp_port->mapping.width; 2229 2230 mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index, 2231 mlxsw_sp_port->mapping.module, 2232 mlxsw_sp_port->mapping.module_width / count, 2233 count); 2234 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl); 2235 if (err) { 2236 NL_SET_ERR_MSG_MOD(extack, "Failed to query split info"); 2237 return err; 2238 } 2239 2240 for (i = 0; i < count; i++) { 2241 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2242 2243 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) 2244 mlxsw_sp_port_remove(mlxsw_sp, s_local_port); 2245 } 2246 2247 mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl); 2248 2249 return 0; 2250 } 2251 2252 static void 2253 mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port) 2254 { 2255 int i; 2256 2257 for (i = 0; i < TC_MAX_QUEUE; i++) 2258 mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0; 2259 } 2260 2261 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 2262 char *pude_pl, void *priv) 2263 { 2264 struct mlxsw_sp *mlxsw_sp = priv; 2265 struct mlxsw_sp_port *mlxsw_sp_port; 2266 enum mlxsw_reg_pude_oper_status status; 2267 u16 local_port; 2268 2269 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 2270 2271 if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port))) 2272 return; 2273 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2274 if (!mlxsw_sp_port) 2275 return; 2276 2277 status = mlxsw_reg_pude_oper_status_get(pude_pl); 2278 if (status == MLXSW_PORT_OPER_STATUS_UP) { 2279 netdev_info(mlxsw_sp_port->dev, "link up\n"); 2280 netif_carrier_on(mlxsw_sp_port->dev); 2281 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0); 2282 } else { 2283 netdev_info(mlxsw_sp_port->dev, "link down\n"); 2284 netif_carrier_off(mlxsw_sp_port->dev); 2285 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port); 2286 } 2287 } 2288 2289 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp, 2290 char *mtpptr_pl, bool ingress) 2291 { 2292 u16 local_port; 2293 u8 num_rec; 2294 int i; 2295 2296 local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl); 2297 num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl); 2298 for (i = 0; i < num_rec; i++) { 2299 u8 domain_number; 2300 u8 message_type; 2301 u16 sequence_id; 2302 u64 timestamp; 2303 2304 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type, 2305 &domain_number, &sequence_id, 2306 ×tamp); 2307 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port, 2308 message_type, domain_number, 2309 sequence_id, timestamp); 2310 } 2311 } 2312 2313 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg, 2314 char *mtpptr_pl, void *priv) 2315 { 2316 struct mlxsw_sp *mlxsw_sp = priv; 2317 2318 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true); 2319 } 2320 2321 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg, 2322 char *mtpptr_pl, void *priv) 2323 { 2324 struct mlxsw_sp *mlxsw_sp = priv; 2325 2326 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false); 2327 } 2328 2329 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 2330 u16 local_port, void *priv) 2331 { 2332 struct mlxsw_sp *mlxsw_sp = priv; 2333 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2334 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 2335 2336 if (unlikely(!mlxsw_sp_port)) { 2337 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 2338 local_port); 2339 return; 2340 } 2341 2342 skb->dev = mlxsw_sp_port->dev; 2343 2344 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 2345 u64_stats_update_begin(&pcpu_stats->syncp); 2346 pcpu_stats->rx_packets++; 2347 pcpu_stats->rx_bytes += skb->len; 2348 u64_stats_update_end(&pcpu_stats->syncp); 2349 2350 skb->protocol = eth_type_trans(skb, skb->dev); 2351 napi_gro_receive(mlxsw_skb_cb(skb)->rx_md_info.napi, skb); 2352 } 2353 2354 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u16 local_port, 2355 void *priv) 2356 { 2357 skb->offload_fwd_mark = 1; 2358 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 2359 } 2360 2361 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, 2362 u16 local_port, void *priv) 2363 { 2364 skb->offload_l3_fwd_mark = 1; 2365 skb->offload_fwd_mark = 1; 2366 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 2367 } 2368 2369 void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 2370 u16 local_port) 2371 { 2372 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port); 2373 } 2374 2375 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2376 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 2377 _is_ctrl, SP_##_trap_group, DISCARD) 2378 2379 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2380 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 2381 _is_ctrl, SP_##_trap_group, DISCARD) 2382 2383 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2384 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \ 2385 _is_ctrl, SP_##_trap_group, DISCARD) 2386 2387 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 2388 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 2389 2390 static const struct mlxsw_listener mlxsw_sp_listener[] = { 2391 /* Events */ 2392 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 2393 /* L2 traps */ 2394 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false), 2395 /* L3 traps */ 2396 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 2397 false), 2398 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 2399 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 2400 false), 2401 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD, 2402 ROUTER_EXP, false), 2403 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD, 2404 ROUTER_EXP, false), 2405 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD, 2406 ROUTER_EXP, false), 2407 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD, 2408 ROUTER_EXP, false), 2409 /* Multicast Router Traps */ 2410 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 2411 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 2412 /* NVE traps */ 2413 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false), 2414 }; 2415 2416 static const struct mlxsw_listener mlxsw_sp1_listener[] = { 2417 /* Events */ 2418 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0), 2419 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0), 2420 }; 2421 2422 static const struct mlxsw_listener mlxsw_sp2_listener[] = { 2423 /* Events */ 2424 MLXSW_SP_EVENTL(mlxsw_sp_port_mapping_listener_func, PMLPE), 2425 }; 2426 2427 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 2428 { 2429 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2430 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 2431 enum mlxsw_reg_qpcr_ir_units ir_units; 2432 int max_cpu_policers; 2433 bool is_bytes; 2434 u8 burst_size; 2435 u32 rate; 2436 int i, err; 2437 2438 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 2439 return -EIO; 2440 2441 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 2442 2443 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 2444 for (i = 0; i < max_cpu_policers; i++) { 2445 is_bytes = false; 2446 switch (i) { 2447 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 2448 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 2449 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS: 2450 rate = 1024; 2451 burst_size = 7; 2452 break; 2453 default: 2454 continue; 2455 } 2456 2457 __set_bit(i, mlxsw_sp->trap->policers_usage); 2458 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 2459 burst_size); 2460 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 2461 if (err) 2462 return err; 2463 } 2464 2465 return 0; 2466 } 2467 2468 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 2469 { 2470 char htgt_pl[MLXSW_REG_HTGT_LEN]; 2471 enum mlxsw_reg_htgt_trap_group i; 2472 int max_cpu_policers; 2473 int max_trap_groups; 2474 u8 priority, tc; 2475 u16 policer_id; 2476 int err; 2477 2478 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 2479 return -EIO; 2480 2481 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 2482 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 2483 2484 for (i = 0; i < max_trap_groups; i++) { 2485 policer_id = i; 2486 switch (i) { 2487 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 2488 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 2489 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS: 2490 priority = 1; 2491 tc = 1; 2492 break; 2493 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 2494 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 2495 tc = MLXSW_REG_HTGT_DEFAULT_TC; 2496 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 2497 break; 2498 default: 2499 continue; 2500 } 2501 2502 if (max_cpu_policers <= policer_id && 2503 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 2504 return -EIO; 2505 2506 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 2507 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 2508 if (err) 2509 return err; 2510 } 2511 2512 return 0; 2513 } 2514 2515 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 2516 { 2517 struct mlxsw_sp_trap *trap; 2518 u64 max_policers; 2519 int err; 2520 2521 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS)) 2522 return -EIO; 2523 max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS); 2524 trap = kzalloc(struct_size(trap, policers_usage, 2525 BITS_TO_LONGS(max_policers)), GFP_KERNEL); 2526 if (!trap) 2527 return -ENOMEM; 2528 trap->max_policers = max_policers; 2529 mlxsw_sp->trap = trap; 2530 2531 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 2532 if (err) 2533 goto err_cpu_policers_set; 2534 2535 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 2536 if (err) 2537 goto err_trap_groups_set; 2538 2539 err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp_listener, 2540 ARRAY_SIZE(mlxsw_sp_listener), 2541 mlxsw_sp); 2542 if (err) 2543 goto err_traps_register; 2544 2545 err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp->listeners, 2546 mlxsw_sp->listeners_count, mlxsw_sp); 2547 if (err) 2548 goto err_extra_traps_init; 2549 2550 return 0; 2551 2552 err_extra_traps_init: 2553 mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener, 2554 ARRAY_SIZE(mlxsw_sp_listener), 2555 mlxsw_sp); 2556 err_traps_register: 2557 err_trap_groups_set: 2558 err_cpu_policers_set: 2559 kfree(trap); 2560 return err; 2561 } 2562 2563 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 2564 { 2565 mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp->listeners, 2566 mlxsw_sp->listeners_count, 2567 mlxsw_sp); 2568 mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener, 2569 ARRAY_SIZE(mlxsw_sp_listener), mlxsw_sp); 2570 kfree(mlxsw_sp->trap); 2571 } 2572 2573 static int mlxsw_sp_lag_pgt_init(struct mlxsw_sp *mlxsw_sp) 2574 { 2575 char sgcr_pl[MLXSW_REG_SGCR_LEN]; 2576 int err; 2577 2578 if (mlxsw_core_lag_mode(mlxsw_sp->core) != 2579 MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW) 2580 return 0; 2581 2582 /* In DDD mode, which we by default use, each LAG entry is 8 PGT 2583 * entries. The LAG table address needs to be 8-aligned, but that ought 2584 * to be the case, since the LAG table is allocated first. 2585 */ 2586 err = mlxsw_sp_pgt_mid_alloc_range(mlxsw_sp, &mlxsw_sp->lag_pgt_base, 2587 mlxsw_sp->max_lag * 8); 2588 if (err) 2589 return err; 2590 if (WARN_ON_ONCE(mlxsw_sp->lag_pgt_base % 8)) { 2591 err = -EINVAL; 2592 goto err_mid_alloc_range; 2593 } 2594 2595 mlxsw_reg_sgcr_pack(sgcr_pl, mlxsw_sp->lag_pgt_base); 2596 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sgcr), sgcr_pl); 2597 if (err) 2598 goto err_mid_alloc_range; 2599 2600 return 0; 2601 2602 err_mid_alloc_range: 2603 mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mlxsw_sp->lag_pgt_base, 2604 mlxsw_sp->max_lag * 8); 2605 return err; 2606 } 2607 2608 static void mlxsw_sp_lag_pgt_fini(struct mlxsw_sp *mlxsw_sp) 2609 { 2610 if (mlxsw_core_lag_mode(mlxsw_sp->core) != 2611 MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW) 2612 return; 2613 2614 mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mlxsw_sp->lag_pgt_base, 2615 mlxsw_sp->max_lag * 8); 2616 } 2617 2618 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe 2619 2620 struct mlxsw_sp_lag { 2621 struct net_device *dev; 2622 refcount_t ref_count; 2623 u16 lag_id; 2624 }; 2625 2626 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 2627 { 2628 char slcr_pl[MLXSW_REG_SLCR_LEN]; 2629 u32 seed; 2630 int err; 2631 2632 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 2633 MLXSW_SP_LAG_SEED_INIT); 2634 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 2635 MLXSW_REG_SLCR_LAG_HASH_DMAC | 2636 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 2637 MLXSW_REG_SLCR_LAG_HASH_VLANID | 2638 MLXSW_REG_SLCR_LAG_HASH_SIP | 2639 MLXSW_REG_SLCR_LAG_HASH_DIP | 2640 MLXSW_REG_SLCR_LAG_HASH_SPORT | 2641 MLXSW_REG_SLCR_LAG_HASH_DPORT | 2642 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed); 2643 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 2644 if (err) 2645 return err; 2646 2647 err = mlxsw_core_max_lag(mlxsw_sp->core, &mlxsw_sp->max_lag); 2648 if (err) 2649 return err; 2650 2651 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 2652 return -EIO; 2653 2654 err = mlxsw_sp_lag_pgt_init(mlxsw_sp); 2655 if (err) 2656 return err; 2657 2658 mlxsw_sp->lags = kcalloc(mlxsw_sp->max_lag, sizeof(struct mlxsw_sp_lag), 2659 GFP_KERNEL); 2660 if (!mlxsw_sp->lags) { 2661 err = -ENOMEM; 2662 goto err_kcalloc; 2663 } 2664 2665 return 0; 2666 2667 err_kcalloc: 2668 mlxsw_sp_lag_pgt_fini(mlxsw_sp); 2669 return err; 2670 } 2671 2672 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 2673 { 2674 mlxsw_sp_lag_pgt_fini(mlxsw_sp); 2675 kfree(mlxsw_sp->lags); 2676 } 2677 2678 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = { 2679 .clock_init = mlxsw_sp1_ptp_clock_init, 2680 .clock_fini = mlxsw_sp1_ptp_clock_fini, 2681 .init = mlxsw_sp1_ptp_init, 2682 .fini = mlxsw_sp1_ptp_fini, 2683 .receive = mlxsw_sp1_ptp_receive, 2684 .transmitted = mlxsw_sp1_ptp_transmitted, 2685 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get, 2686 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set, 2687 .shaper_work = mlxsw_sp1_ptp_shaper_work, 2688 #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) 2689 .get_ts_info = mlxsw_sp1_ptp_get_ts_info, 2690 #endif 2691 .get_stats_count = mlxsw_sp1_get_stats_count, 2692 .get_stats_strings = mlxsw_sp1_get_stats_strings, 2693 .get_stats = mlxsw_sp1_get_stats, 2694 }; 2695 2696 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { 2697 .clock_init = mlxsw_sp2_ptp_clock_init, 2698 .clock_fini = mlxsw_sp2_ptp_clock_fini, 2699 .init = mlxsw_sp2_ptp_init, 2700 .fini = mlxsw_sp2_ptp_fini, 2701 .receive = mlxsw_sp2_ptp_receive, 2702 .transmitted = mlxsw_sp2_ptp_transmitted, 2703 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get, 2704 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set, 2705 .shaper_work = mlxsw_sp2_ptp_shaper_work, 2706 #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) 2707 .get_ts_info = mlxsw_sp2_ptp_get_ts_info, 2708 #endif 2709 .get_stats_count = mlxsw_sp2_get_stats_count, 2710 .get_stats_strings = mlxsw_sp2_get_stats_strings, 2711 .get_stats = mlxsw_sp2_get_stats, 2712 .tx_as_data = true, 2713 }; 2714 2715 static const struct mlxsw_sp_ptp_ops mlxsw_sp4_ptp_ops = { 2716 .clock_init = mlxsw_sp2_ptp_clock_init, 2717 .clock_fini = mlxsw_sp2_ptp_clock_fini, 2718 .init = mlxsw_sp2_ptp_init, 2719 .fini = mlxsw_sp2_ptp_fini, 2720 .receive = mlxsw_sp2_ptp_receive, 2721 .transmitted = mlxsw_sp2_ptp_transmitted, 2722 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get, 2723 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set, 2724 .shaper_work = mlxsw_sp2_ptp_shaper_work, 2725 #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) 2726 .get_ts_info = mlxsw_sp2_ptp_get_ts_info, 2727 #endif 2728 .get_stats_count = mlxsw_sp2_get_stats_count, 2729 .get_stats_strings = mlxsw_sp2_get_stats_strings, 2730 .get_stats = mlxsw_sp2_get_stats, 2731 }; 2732 2733 struct mlxsw_sp_sample_trigger_node { 2734 struct mlxsw_sp_sample_trigger trigger; 2735 struct mlxsw_sp_sample_params params; 2736 struct rhash_head ht_node; 2737 struct rcu_head rcu; 2738 refcount_t refcount; 2739 }; 2740 2741 static const struct rhashtable_params mlxsw_sp_sample_trigger_ht_params = { 2742 .key_offset = offsetof(struct mlxsw_sp_sample_trigger_node, trigger), 2743 .head_offset = offsetof(struct mlxsw_sp_sample_trigger_node, ht_node), 2744 .key_len = sizeof(struct mlxsw_sp_sample_trigger), 2745 .automatic_shrinking = true, 2746 }; 2747 2748 static void 2749 mlxsw_sp_sample_trigger_key_init(struct mlxsw_sp_sample_trigger *key, 2750 const struct mlxsw_sp_sample_trigger *trigger) 2751 { 2752 memset(key, 0, sizeof(*key)); 2753 key->type = trigger->type; 2754 key->local_port = trigger->local_port; 2755 } 2756 2757 /* RCU read lock must be held */ 2758 struct mlxsw_sp_sample_params * 2759 mlxsw_sp_sample_trigger_params_lookup(struct mlxsw_sp *mlxsw_sp, 2760 const struct mlxsw_sp_sample_trigger *trigger) 2761 { 2762 struct mlxsw_sp_sample_trigger_node *trigger_node; 2763 struct mlxsw_sp_sample_trigger key; 2764 2765 mlxsw_sp_sample_trigger_key_init(&key, trigger); 2766 trigger_node = rhashtable_lookup(&mlxsw_sp->sample_trigger_ht, &key, 2767 mlxsw_sp_sample_trigger_ht_params); 2768 if (!trigger_node) 2769 return NULL; 2770 2771 return &trigger_node->params; 2772 } 2773 2774 static int 2775 mlxsw_sp_sample_trigger_node_init(struct mlxsw_sp *mlxsw_sp, 2776 const struct mlxsw_sp_sample_trigger *trigger, 2777 const struct mlxsw_sp_sample_params *params) 2778 { 2779 struct mlxsw_sp_sample_trigger_node *trigger_node; 2780 int err; 2781 2782 trigger_node = kzalloc(sizeof(*trigger_node), GFP_KERNEL); 2783 if (!trigger_node) 2784 return -ENOMEM; 2785 2786 trigger_node->trigger = *trigger; 2787 trigger_node->params = *params; 2788 refcount_set(&trigger_node->refcount, 1); 2789 2790 err = rhashtable_insert_fast(&mlxsw_sp->sample_trigger_ht, 2791 &trigger_node->ht_node, 2792 mlxsw_sp_sample_trigger_ht_params); 2793 if (err) 2794 goto err_rhashtable_insert; 2795 2796 return 0; 2797 2798 err_rhashtable_insert: 2799 kfree(trigger_node); 2800 return err; 2801 } 2802 2803 static void 2804 mlxsw_sp_sample_trigger_node_fini(struct mlxsw_sp *mlxsw_sp, 2805 struct mlxsw_sp_sample_trigger_node *trigger_node) 2806 { 2807 rhashtable_remove_fast(&mlxsw_sp->sample_trigger_ht, 2808 &trigger_node->ht_node, 2809 mlxsw_sp_sample_trigger_ht_params); 2810 kfree_rcu(trigger_node, rcu); 2811 } 2812 2813 int 2814 mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp, 2815 const struct mlxsw_sp_sample_trigger *trigger, 2816 const struct mlxsw_sp_sample_params *params, 2817 struct netlink_ext_ack *extack) 2818 { 2819 struct mlxsw_sp_sample_trigger_node *trigger_node; 2820 struct mlxsw_sp_sample_trigger key; 2821 2822 ASSERT_RTNL(); 2823 2824 mlxsw_sp_sample_trigger_key_init(&key, trigger); 2825 2826 trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht, 2827 &key, 2828 mlxsw_sp_sample_trigger_ht_params); 2829 if (!trigger_node) 2830 return mlxsw_sp_sample_trigger_node_init(mlxsw_sp, &key, 2831 params); 2832 2833 if (trigger_node->trigger.local_port) { 2834 NL_SET_ERR_MSG_MOD(extack, "Sampling already enabled on port"); 2835 return -EINVAL; 2836 } 2837 2838 if (trigger_node->params.psample_group != params->psample_group || 2839 trigger_node->params.truncate != params->truncate || 2840 trigger_node->params.rate != params->rate || 2841 trigger_node->params.trunc_size != params->trunc_size) { 2842 NL_SET_ERR_MSG_MOD(extack, "Sampling parameters do not match for an existing sampling trigger"); 2843 return -EINVAL; 2844 } 2845 2846 refcount_inc(&trigger_node->refcount); 2847 2848 return 0; 2849 } 2850 2851 void 2852 mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp, 2853 const struct mlxsw_sp_sample_trigger *trigger) 2854 { 2855 struct mlxsw_sp_sample_trigger_node *trigger_node; 2856 struct mlxsw_sp_sample_trigger key; 2857 2858 ASSERT_RTNL(); 2859 2860 mlxsw_sp_sample_trigger_key_init(&key, trigger); 2861 2862 trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht, 2863 &key, 2864 mlxsw_sp_sample_trigger_ht_params); 2865 if (!trigger_node) 2866 return; 2867 2868 if (!refcount_dec_and_test(&trigger_node->refcount)) 2869 return; 2870 2871 mlxsw_sp_sample_trigger_node_fini(mlxsw_sp, trigger_node); 2872 } 2873 2874 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 2875 unsigned long event, void *ptr); 2876 2877 #define MLXSW_SP_DEFAULT_PARSING_DEPTH 96 2878 #define MLXSW_SP_INCREASED_PARSING_DEPTH 128 2879 #define MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT 4789 2880 2881 static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp) 2882 { 2883 refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 0); 2884 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH; 2885 mlxsw_sp->parsing.vxlan_udp_dport = MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT; 2886 mutex_init(&mlxsw_sp->parsing.lock); 2887 } 2888 2889 static void mlxsw_sp_parsing_fini(struct mlxsw_sp *mlxsw_sp) 2890 { 2891 mutex_destroy(&mlxsw_sp->parsing.lock); 2892 WARN_ON_ONCE(refcount_read(&mlxsw_sp->parsing.parsing_depth_ref)); 2893 } 2894 2895 struct mlxsw_sp_ipv6_addr_node { 2896 struct in6_addr key; 2897 struct rhash_head ht_node; 2898 u32 kvdl_index; 2899 refcount_t refcount; 2900 }; 2901 2902 static const struct rhashtable_params mlxsw_sp_ipv6_addr_ht_params = { 2903 .key_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, key), 2904 .head_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, ht_node), 2905 .key_len = sizeof(struct in6_addr), 2906 .automatic_shrinking = true, 2907 }; 2908 2909 static int 2910 mlxsw_sp_ipv6_addr_init(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6, 2911 u32 *p_kvdl_index) 2912 { 2913 struct mlxsw_sp_ipv6_addr_node *node; 2914 char rips_pl[MLXSW_REG_RIPS_LEN]; 2915 int err; 2916 2917 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 2918 MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, 2919 p_kvdl_index); 2920 if (err) 2921 return err; 2922 2923 mlxsw_reg_rips_pack(rips_pl, *p_kvdl_index, addr6); 2924 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rips), rips_pl); 2925 if (err) 2926 goto err_rips_write; 2927 2928 node = kzalloc(sizeof(*node), GFP_KERNEL); 2929 if (!node) { 2930 err = -ENOMEM; 2931 goto err_node_alloc; 2932 } 2933 2934 node->key = *addr6; 2935 node->kvdl_index = *p_kvdl_index; 2936 refcount_set(&node->refcount, 1); 2937 2938 err = rhashtable_insert_fast(&mlxsw_sp->ipv6_addr_ht, 2939 &node->ht_node, 2940 mlxsw_sp_ipv6_addr_ht_params); 2941 if (err) 2942 goto err_rhashtable_insert; 2943 2944 return 0; 2945 2946 err_rhashtable_insert: 2947 kfree(node); 2948 err_node_alloc: 2949 err_rips_write: 2950 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, 2951 *p_kvdl_index); 2952 return err; 2953 } 2954 2955 static void mlxsw_sp_ipv6_addr_fini(struct mlxsw_sp *mlxsw_sp, 2956 struct mlxsw_sp_ipv6_addr_node *node) 2957 { 2958 u32 kvdl_index = node->kvdl_index; 2959 2960 rhashtable_remove_fast(&mlxsw_sp->ipv6_addr_ht, &node->ht_node, 2961 mlxsw_sp_ipv6_addr_ht_params); 2962 kfree(node); 2963 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, 2964 kvdl_index); 2965 } 2966 2967 int mlxsw_sp_ipv6_addr_kvdl_index_get(struct mlxsw_sp *mlxsw_sp, 2968 const struct in6_addr *addr6, 2969 u32 *p_kvdl_index) 2970 { 2971 struct mlxsw_sp_ipv6_addr_node *node; 2972 int err = 0; 2973 2974 mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock); 2975 node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6, 2976 mlxsw_sp_ipv6_addr_ht_params); 2977 if (node) { 2978 refcount_inc(&node->refcount); 2979 *p_kvdl_index = node->kvdl_index; 2980 goto out_unlock; 2981 } 2982 2983 err = mlxsw_sp_ipv6_addr_init(mlxsw_sp, addr6, p_kvdl_index); 2984 2985 out_unlock: 2986 mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock); 2987 return err; 2988 } 2989 2990 void 2991 mlxsw_sp_ipv6_addr_put(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6) 2992 { 2993 struct mlxsw_sp_ipv6_addr_node *node; 2994 2995 mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock); 2996 node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6, 2997 mlxsw_sp_ipv6_addr_ht_params); 2998 if (WARN_ON(!node)) 2999 goto out_unlock; 3000 3001 if (!refcount_dec_and_test(&node->refcount)) 3002 goto out_unlock; 3003 3004 mlxsw_sp_ipv6_addr_fini(mlxsw_sp, node); 3005 3006 out_unlock: 3007 mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock); 3008 } 3009 3010 static int mlxsw_sp_ipv6_addr_ht_init(struct mlxsw_sp *mlxsw_sp) 3011 { 3012 int err; 3013 3014 err = rhashtable_init(&mlxsw_sp->ipv6_addr_ht, 3015 &mlxsw_sp_ipv6_addr_ht_params); 3016 if (err) 3017 return err; 3018 3019 mutex_init(&mlxsw_sp->ipv6_addr_ht_lock); 3020 return 0; 3021 } 3022 3023 static void mlxsw_sp_ipv6_addr_ht_fini(struct mlxsw_sp *mlxsw_sp) 3024 { 3025 mutex_destroy(&mlxsw_sp->ipv6_addr_ht_lock); 3026 rhashtable_destroy(&mlxsw_sp->ipv6_addr_ht); 3027 } 3028 3029 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 3030 const struct mlxsw_bus_info *mlxsw_bus_info, 3031 struct netlink_ext_ack *extack) 3032 { 3033 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3034 int err; 3035 3036 mlxsw_sp->core = mlxsw_core; 3037 mlxsw_sp->bus_info = mlxsw_bus_info; 3038 3039 mlxsw_sp_parsing_init(mlxsw_sp); 3040 3041 err = mlxsw_sp_base_mac_get(mlxsw_sp); 3042 if (err) { 3043 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 3044 return err; 3045 } 3046 3047 err = mlxsw_sp_kvdl_init(mlxsw_sp); 3048 if (err) { 3049 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 3050 return err; 3051 } 3052 3053 err = mlxsw_sp_pgt_init(mlxsw_sp); 3054 if (err) { 3055 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PGT\n"); 3056 goto err_pgt_init; 3057 } 3058 3059 /* Initialize before FIDs so that the LAG table is at the start of PGT 3060 * and 8-aligned without overallocation. 3061 */ 3062 err = mlxsw_sp_lag_init(mlxsw_sp); 3063 if (err) { 3064 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 3065 goto err_lag_init; 3066 } 3067 3068 err = mlxsw_sp->fid_core_ops->init(mlxsw_sp); 3069 if (err) { 3070 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 3071 goto err_fid_core_init; 3072 } 3073 3074 err = mlxsw_sp_policers_init(mlxsw_sp); 3075 if (err) { 3076 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize policers\n"); 3077 goto err_policers_init; 3078 } 3079 3080 err = mlxsw_sp_traps_init(mlxsw_sp); 3081 if (err) { 3082 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 3083 goto err_traps_init; 3084 } 3085 3086 err = mlxsw_sp_devlink_traps_init(mlxsw_sp); 3087 if (err) { 3088 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n"); 3089 goto err_devlink_traps_init; 3090 } 3091 3092 err = mlxsw_sp_buffers_init(mlxsw_sp); 3093 if (err) { 3094 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 3095 goto err_buffers_init; 3096 } 3097 3098 /* Initialize SPAN before router and switchdev, so that those components 3099 * can call mlxsw_sp_span_respin(). 3100 */ 3101 err = mlxsw_sp_span_init(mlxsw_sp); 3102 if (err) { 3103 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 3104 goto err_span_init; 3105 } 3106 3107 err = mlxsw_sp_switchdev_init(mlxsw_sp); 3108 if (err) { 3109 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 3110 goto err_switchdev_init; 3111 } 3112 3113 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 3114 if (err) { 3115 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 3116 goto err_counter_pool_init; 3117 } 3118 3119 err = mlxsw_sp_afa_init(mlxsw_sp); 3120 if (err) { 3121 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 3122 goto err_afa_init; 3123 } 3124 3125 err = mlxsw_sp_ipv6_addr_ht_init(mlxsw_sp); 3126 if (err) { 3127 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize hash table for IPv6 addresses\n"); 3128 goto err_ipv6_addr_ht_init; 3129 } 3130 3131 err = mlxsw_sp_nve_init(mlxsw_sp); 3132 if (err) { 3133 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n"); 3134 goto err_nve_init; 3135 } 3136 3137 err = mlxsw_sp_port_range_init(mlxsw_sp); 3138 if (err) { 3139 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize port ranges\n"); 3140 goto err_port_range_init; 3141 } 3142 3143 err = mlxsw_sp_acl_init(mlxsw_sp); 3144 if (err) { 3145 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 3146 goto err_acl_init; 3147 } 3148 3149 err = mlxsw_sp_router_init(mlxsw_sp, extack); 3150 if (err) { 3151 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 3152 goto err_router_init; 3153 } 3154 3155 if (mlxsw_sp->bus_info->read_clock_capable) { 3156 /* NULL is a valid return value from clock_init */ 3157 mlxsw_sp->clock = 3158 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp, 3159 mlxsw_sp->bus_info->dev); 3160 if (IS_ERR(mlxsw_sp->clock)) { 3161 err = PTR_ERR(mlxsw_sp->clock); 3162 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n"); 3163 goto err_ptp_clock_init; 3164 } 3165 } 3166 3167 if (mlxsw_sp->clock) { 3168 /* NULL is a valid return value from ptp_ops->init */ 3169 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp); 3170 if (IS_ERR(mlxsw_sp->ptp_state)) { 3171 err = PTR_ERR(mlxsw_sp->ptp_state); 3172 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n"); 3173 goto err_ptp_init; 3174 } 3175 } 3176 3177 /* Initialize netdevice notifier after SPAN is initialized, so that the 3178 * event handler can call SPAN respin. 3179 */ 3180 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 3181 err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 3182 &mlxsw_sp->netdevice_nb); 3183 if (err) { 3184 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 3185 goto err_netdev_notifier; 3186 } 3187 3188 err = mlxsw_sp_dpipe_init(mlxsw_sp); 3189 if (err) { 3190 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 3191 goto err_dpipe_init; 3192 } 3193 3194 err = mlxsw_sp_port_module_info_init(mlxsw_sp); 3195 if (err) { 3196 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n"); 3197 goto err_port_module_info_init; 3198 } 3199 3200 err = rhashtable_init(&mlxsw_sp->sample_trigger_ht, 3201 &mlxsw_sp_sample_trigger_ht_params); 3202 if (err) { 3203 dev_err(mlxsw_sp->bus_info->dev, "Failed to init sampling trigger hashtable\n"); 3204 goto err_sample_trigger_init; 3205 } 3206 3207 err = mlxsw_sp_ports_create(mlxsw_sp); 3208 if (err) { 3209 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 3210 goto err_ports_create; 3211 } 3212 3213 return 0; 3214 3215 err_ports_create: 3216 rhashtable_destroy(&mlxsw_sp->sample_trigger_ht); 3217 err_sample_trigger_init: 3218 mlxsw_sp_port_module_info_fini(mlxsw_sp); 3219 err_port_module_info_init: 3220 mlxsw_sp_dpipe_fini(mlxsw_sp); 3221 err_dpipe_init: 3222 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 3223 &mlxsw_sp->netdevice_nb); 3224 err_netdev_notifier: 3225 if (mlxsw_sp->clock) 3226 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 3227 err_ptp_init: 3228 if (mlxsw_sp->clock) 3229 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 3230 err_ptp_clock_init: 3231 mlxsw_sp_router_fini(mlxsw_sp); 3232 err_router_init: 3233 mlxsw_sp_acl_fini(mlxsw_sp); 3234 err_acl_init: 3235 mlxsw_sp_port_range_fini(mlxsw_sp); 3236 err_port_range_init: 3237 mlxsw_sp_nve_fini(mlxsw_sp); 3238 err_nve_init: 3239 mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp); 3240 err_ipv6_addr_ht_init: 3241 mlxsw_sp_afa_fini(mlxsw_sp); 3242 err_afa_init: 3243 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3244 err_counter_pool_init: 3245 mlxsw_sp_switchdev_fini(mlxsw_sp); 3246 err_switchdev_init: 3247 mlxsw_sp_span_fini(mlxsw_sp); 3248 err_span_init: 3249 mlxsw_sp_buffers_fini(mlxsw_sp); 3250 err_buffers_init: 3251 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 3252 err_devlink_traps_init: 3253 mlxsw_sp_traps_fini(mlxsw_sp); 3254 err_traps_init: 3255 mlxsw_sp_policers_fini(mlxsw_sp); 3256 err_policers_init: 3257 mlxsw_sp->fid_core_ops->fini(mlxsw_sp); 3258 err_fid_core_init: 3259 mlxsw_sp_lag_fini(mlxsw_sp); 3260 err_lag_init: 3261 mlxsw_sp_pgt_fini(mlxsw_sp); 3262 err_pgt_init: 3263 mlxsw_sp_kvdl_fini(mlxsw_sp); 3264 mlxsw_sp_parsing_fini(mlxsw_sp); 3265 return err; 3266 } 3267 3268 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, 3269 const struct mlxsw_bus_info *mlxsw_bus_info, 3270 struct netlink_ext_ack *extack) 3271 { 3272 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3273 3274 mlxsw_sp->switchdev_ops = &mlxsw_sp1_switchdev_ops; 3275 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; 3276 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; 3277 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; 3278 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; 3279 mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops; 3280 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; 3281 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; 3282 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; 3283 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals; 3284 mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops; 3285 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops; 3286 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops; 3287 mlxsw_sp->span_ops = &mlxsw_sp1_span_ops; 3288 mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops; 3289 mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops; 3290 mlxsw_sp->mall_ops = &mlxsw_sp1_mall_ops; 3291 mlxsw_sp->router_ops = &mlxsw_sp1_router_ops; 3292 mlxsw_sp->listeners = mlxsw_sp1_listener; 3293 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener); 3294 mlxsw_sp->fid_core_ops = &mlxsw_sp1_fid_core_ops; 3295 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1; 3296 mlxsw_sp->pgt_smpe_index_valid = true; 3297 3298 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3299 } 3300 3301 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, 3302 const struct mlxsw_bus_info *mlxsw_bus_info, 3303 struct netlink_ext_ack *extack) 3304 { 3305 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3306 3307 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; 3308 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 3309 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 3310 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 3311 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 3312 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 3313 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 3314 mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops; 3315 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 3316 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 3317 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 3318 mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops; 3319 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 3320 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 3321 mlxsw_sp->span_ops = &mlxsw_sp2_span_ops; 3322 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; 3323 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; 3324 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; 3325 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; 3326 mlxsw_sp->listeners = mlxsw_sp2_listener; 3327 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener); 3328 mlxsw_sp->fid_core_ops = &mlxsw_sp2_fid_core_ops; 3329 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2; 3330 mlxsw_sp->pgt_smpe_index_valid = false; 3331 3332 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3333 } 3334 3335 static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core, 3336 const struct mlxsw_bus_info *mlxsw_bus_info, 3337 struct netlink_ext_ack *extack) 3338 { 3339 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3340 3341 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; 3342 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 3343 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 3344 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 3345 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 3346 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 3347 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 3348 mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops; 3349 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 3350 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 3351 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 3352 mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops; 3353 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 3354 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 3355 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops; 3356 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; 3357 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; 3358 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; 3359 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; 3360 mlxsw_sp->listeners = mlxsw_sp2_listener; 3361 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener); 3362 mlxsw_sp->fid_core_ops = &mlxsw_sp2_fid_core_ops; 3363 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3; 3364 mlxsw_sp->pgt_smpe_index_valid = false; 3365 3366 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3367 } 3368 3369 static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core, 3370 const struct mlxsw_bus_info *mlxsw_bus_info, 3371 struct netlink_ext_ack *extack) 3372 { 3373 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3374 3375 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; 3376 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 3377 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 3378 mlxsw_sp->afk_ops = &mlxsw_sp4_afk_ops; 3379 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 3380 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 3381 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 3382 mlxsw_sp->acl_bf_ops = &mlxsw_sp4_acl_bf_ops; 3383 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 3384 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 3385 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 3386 mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops; 3387 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 3388 mlxsw_sp->ptp_ops = &mlxsw_sp4_ptp_ops; 3389 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops; 3390 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; 3391 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; 3392 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; 3393 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; 3394 mlxsw_sp->listeners = mlxsw_sp2_listener; 3395 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener); 3396 mlxsw_sp->fid_core_ops = &mlxsw_sp2_fid_core_ops; 3397 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4; 3398 mlxsw_sp->pgt_smpe_index_valid = false; 3399 3400 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3401 } 3402 3403 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 3404 { 3405 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3406 3407 mlxsw_sp_ports_remove(mlxsw_sp); 3408 rhashtable_destroy(&mlxsw_sp->sample_trigger_ht); 3409 mlxsw_sp_port_module_info_fini(mlxsw_sp); 3410 mlxsw_sp_dpipe_fini(mlxsw_sp); 3411 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 3412 &mlxsw_sp->netdevice_nb); 3413 if (mlxsw_sp->clock) { 3414 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 3415 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 3416 } 3417 mlxsw_sp_router_fini(mlxsw_sp); 3418 mlxsw_sp_acl_fini(mlxsw_sp); 3419 mlxsw_sp_port_range_fini(mlxsw_sp); 3420 mlxsw_sp_nve_fini(mlxsw_sp); 3421 mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp); 3422 mlxsw_sp_afa_fini(mlxsw_sp); 3423 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3424 mlxsw_sp_switchdev_fini(mlxsw_sp); 3425 mlxsw_sp_span_fini(mlxsw_sp); 3426 mlxsw_sp_buffers_fini(mlxsw_sp); 3427 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 3428 mlxsw_sp_traps_fini(mlxsw_sp); 3429 mlxsw_sp_policers_fini(mlxsw_sp); 3430 mlxsw_sp->fid_core_ops->fini(mlxsw_sp); 3431 mlxsw_sp_lag_fini(mlxsw_sp); 3432 mlxsw_sp_pgt_fini(mlxsw_sp); 3433 mlxsw_sp_kvdl_fini(mlxsw_sp); 3434 mlxsw_sp_parsing_fini(mlxsw_sp); 3435 } 3436 3437 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { 3438 .used_flood_mode = 1, 3439 .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED, 3440 .used_max_ib_mc = 1, 3441 .max_ib_mc = 0, 3442 .used_max_pkey = 1, 3443 .max_pkey = 0, 3444 .used_ubridge = 1, 3445 .ubridge = 1, 3446 .used_kvd_sizes = 1, 3447 .kvd_hash_single_parts = 59, 3448 .kvd_hash_double_parts = 41, 3449 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 3450 .swid_config = { 3451 { 3452 .used_type = 1, 3453 .type = MLXSW_PORT_SWID_TYPE_ETH, 3454 } 3455 }, 3456 }; 3457 3458 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { 3459 .used_flood_mode = 1, 3460 .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED, 3461 .used_max_ib_mc = 1, 3462 .max_ib_mc = 0, 3463 .used_max_pkey = 1, 3464 .max_pkey = 0, 3465 .used_ubridge = 1, 3466 .ubridge = 1, 3467 .swid_config = { 3468 { 3469 .used_type = 1, 3470 .type = MLXSW_PORT_SWID_TYPE_ETH, 3471 } 3472 }, 3473 .used_cqe_time_stamp_type = 1, 3474 .cqe_time_stamp_type = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC, 3475 .lag_mode_prefer_sw = true, 3476 .flood_mode_prefer_cff = true, 3477 }; 3478 3479 /* Reduce number of LAGs from full capacity (256) to the maximum supported LAGs 3480 * in Spectrum-2/3, to avoid regression in number of free entries in the PGT 3481 * table. 3482 */ 3483 #define MLXSW_SP4_CONFIG_PROFILE_MAX_LAG 128 3484 3485 static const struct mlxsw_config_profile mlxsw_sp4_config_profile = { 3486 .used_max_lag = 1, 3487 .max_lag = MLXSW_SP4_CONFIG_PROFILE_MAX_LAG, 3488 .used_flood_mode = 1, 3489 .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED, 3490 .used_max_ib_mc = 1, 3491 .max_ib_mc = 0, 3492 .used_max_pkey = 1, 3493 .max_pkey = 0, 3494 .used_ubridge = 1, 3495 .ubridge = 1, 3496 .swid_config = { 3497 { 3498 .used_type = 1, 3499 .type = MLXSW_PORT_SWID_TYPE_ETH, 3500 } 3501 }, 3502 .used_cqe_time_stamp_type = 1, 3503 .cqe_time_stamp_type = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC, 3504 .lag_mode_prefer_sw = true, 3505 .flood_mode_prefer_cff = true, 3506 }; 3507 3508 static void 3509 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, 3510 struct devlink_resource_size_params *kvd_size_params, 3511 struct devlink_resource_size_params *linear_size_params, 3512 struct devlink_resource_size_params *hash_double_size_params, 3513 struct devlink_resource_size_params *hash_single_size_params) 3514 { 3515 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 3516 KVD_SINGLE_MIN_SIZE); 3517 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 3518 KVD_DOUBLE_MIN_SIZE); 3519 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3520 u32 linear_size_min = 0; 3521 3522 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, 3523 MLXSW_SP_KVD_GRANULARITY, 3524 DEVLINK_RESOURCE_UNIT_ENTRY); 3525 devlink_resource_size_params_init(linear_size_params, linear_size_min, 3526 kvd_size - single_size_min - 3527 double_size_min, 3528 MLXSW_SP_KVD_GRANULARITY, 3529 DEVLINK_RESOURCE_UNIT_ENTRY); 3530 devlink_resource_size_params_init(hash_double_size_params, 3531 double_size_min, 3532 kvd_size - single_size_min - 3533 linear_size_min, 3534 MLXSW_SP_KVD_GRANULARITY, 3535 DEVLINK_RESOURCE_UNIT_ENTRY); 3536 devlink_resource_size_params_init(hash_single_size_params, 3537 single_size_min, 3538 kvd_size - double_size_min - 3539 linear_size_min, 3540 MLXSW_SP_KVD_GRANULARITY, 3541 DEVLINK_RESOURCE_UNIT_ENTRY); 3542 } 3543 3544 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) 3545 { 3546 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3547 struct devlink_resource_size_params hash_single_size_params; 3548 struct devlink_resource_size_params hash_double_size_params; 3549 struct devlink_resource_size_params linear_size_params; 3550 struct devlink_resource_size_params kvd_size_params; 3551 u32 kvd_size, single_size, double_size, linear_size; 3552 const struct mlxsw_config_profile *profile; 3553 int err; 3554 3555 profile = &mlxsw_sp1_config_profile; 3556 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 3557 return -EIO; 3558 3559 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, 3560 &linear_size_params, 3561 &hash_double_size_params, 3562 &hash_single_size_params); 3563 3564 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3565 err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 3566 kvd_size, MLXSW_SP_RESOURCE_KVD, 3567 DEVLINK_RESOURCE_ID_PARENT_TOP, 3568 &kvd_size_params); 3569 if (err) 3570 return err; 3571 3572 linear_size = profile->kvd_linear_size; 3573 err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, 3574 linear_size, 3575 MLXSW_SP_RESOURCE_KVD_LINEAR, 3576 MLXSW_SP_RESOURCE_KVD, 3577 &linear_size_params); 3578 if (err) 3579 return err; 3580 3581 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); 3582 if (err) 3583 return err; 3584 3585 double_size = kvd_size - linear_size; 3586 double_size *= profile->kvd_hash_double_parts; 3587 double_size /= profile->kvd_hash_double_parts + 3588 profile->kvd_hash_single_parts; 3589 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY); 3590 err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, 3591 double_size, 3592 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 3593 MLXSW_SP_RESOURCE_KVD, 3594 &hash_double_size_params); 3595 if (err) 3596 return err; 3597 3598 single_size = kvd_size - double_size - linear_size; 3599 err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, 3600 single_size, 3601 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 3602 MLXSW_SP_RESOURCE_KVD, 3603 &hash_single_size_params); 3604 if (err) 3605 return err; 3606 3607 return 0; 3608 } 3609 3610 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core) 3611 { 3612 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3613 struct devlink_resource_size_params kvd_size_params; 3614 u32 kvd_size; 3615 3616 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 3617 return -EIO; 3618 3619 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3620 devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size, 3621 MLXSW_SP_KVD_GRANULARITY, 3622 DEVLINK_RESOURCE_UNIT_ENTRY); 3623 3624 return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 3625 kvd_size, MLXSW_SP_RESOURCE_KVD, 3626 DEVLINK_RESOURCE_ID_PARENT_TOP, 3627 &kvd_size_params); 3628 } 3629 3630 static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core) 3631 { 3632 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3633 struct devlink_resource_size_params span_size_params; 3634 u32 max_span; 3635 3636 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN)) 3637 return -EIO; 3638 3639 max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN); 3640 devlink_resource_size_params_init(&span_size_params, max_span, max_span, 3641 1, DEVLINK_RESOURCE_UNIT_ENTRY); 3642 3643 return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN, 3644 max_span, MLXSW_SP_RESOURCE_SPAN, 3645 DEVLINK_RESOURCE_ID_PARENT_TOP, 3646 &span_size_params); 3647 } 3648 3649 static int 3650 mlxsw_sp_resources_rif_mac_profile_register(struct mlxsw_core *mlxsw_core) 3651 { 3652 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3653 struct devlink_resource_size_params size_params; 3654 u8 max_rif_mac_profiles; 3655 3656 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIF_MAC_PROFILES)) 3657 max_rif_mac_profiles = 1; 3658 else 3659 max_rif_mac_profiles = MLXSW_CORE_RES_GET(mlxsw_core, 3660 MAX_RIF_MAC_PROFILES); 3661 devlink_resource_size_params_init(&size_params, max_rif_mac_profiles, 3662 max_rif_mac_profiles, 1, 3663 DEVLINK_RESOURCE_UNIT_ENTRY); 3664 3665 return devl_resource_register(devlink, 3666 "rif_mac_profiles", 3667 max_rif_mac_profiles, 3668 MLXSW_SP_RESOURCE_RIF_MAC_PROFILES, 3669 DEVLINK_RESOURCE_ID_PARENT_TOP, 3670 &size_params); 3671 } 3672 3673 static int mlxsw_sp_resources_rifs_register(struct mlxsw_core *mlxsw_core) 3674 { 3675 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3676 struct devlink_resource_size_params size_params; 3677 u64 max_rifs; 3678 3679 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIFS)) 3680 return -EIO; 3681 3682 max_rifs = MLXSW_CORE_RES_GET(mlxsw_core, MAX_RIFS); 3683 devlink_resource_size_params_init(&size_params, max_rifs, max_rifs, 3684 1, DEVLINK_RESOURCE_UNIT_ENTRY); 3685 3686 return devl_resource_register(devlink, "rifs", max_rifs, 3687 MLXSW_SP_RESOURCE_RIFS, 3688 DEVLINK_RESOURCE_ID_PARENT_TOP, 3689 &size_params); 3690 } 3691 3692 static int 3693 mlxsw_sp_resources_port_range_register(struct mlxsw_core *mlxsw_core) 3694 { 3695 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3696 struct devlink_resource_size_params size_params; 3697 u64 max; 3698 3699 if (!MLXSW_CORE_RES_VALID(mlxsw_core, ACL_MAX_L4_PORT_RANGE)) 3700 return -EIO; 3701 3702 max = MLXSW_CORE_RES_GET(mlxsw_core, ACL_MAX_L4_PORT_RANGE); 3703 devlink_resource_size_params_init(&size_params, max, max, 1, 3704 DEVLINK_RESOURCE_UNIT_ENTRY); 3705 3706 return devl_resource_register(devlink, "port_range_registers", max, 3707 MLXSW_SP_RESOURCE_PORT_RANGE_REGISTERS, 3708 DEVLINK_RESOURCE_ID_PARENT_TOP, 3709 &size_params); 3710 } 3711 3712 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) 3713 { 3714 int err; 3715 3716 err = mlxsw_sp1_resources_kvd_register(mlxsw_core); 3717 if (err) 3718 return err; 3719 3720 err = mlxsw_sp_resources_span_register(mlxsw_core); 3721 if (err) 3722 goto err_resources_span_register; 3723 3724 err = mlxsw_sp_counter_resources_register(mlxsw_core); 3725 if (err) 3726 goto err_resources_counter_register; 3727 3728 err = mlxsw_sp_policer_resources_register(mlxsw_core); 3729 if (err) 3730 goto err_policer_resources_register; 3731 3732 err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core); 3733 if (err) 3734 goto err_resources_rif_mac_profile_register; 3735 3736 err = mlxsw_sp_resources_rifs_register(mlxsw_core); 3737 if (err) 3738 goto err_resources_rifs_register; 3739 3740 err = mlxsw_sp_resources_port_range_register(mlxsw_core); 3741 if (err) 3742 goto err_resources_port_range_register; 3743 3744 return 0; 3745 3746 err_resources_port_range_register: 3747 err_resources_rifs_register: 3748 err_resources_rif_mac_profile_register: 3749 err_policer_resources_register: 3750 err_resources_counter_register: 3751 err_resources_span_register: 3752 devl_resources_unregister(priv_to_devlink(mlxsw_core)); 3753 return err; 3754 } 3755 3756 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) 3757 { 3758 int err; 3759 3760 err = mlxsw_sp2_resources_kvd_register(mlxsw_core); 3761 if (err) 3762 return err; 3763 3764 err = mlxsw_sp_resources_span_register(mlxsw_core); 3765 if (err) 3766 goto err_resources_span_register; 3767 3768 err = mlxsw_sp_counter_resources_register(mlxsw_core); 3769 if (err) 3770 goto err_resources_counter_register; 3771 3772 err = mlxsw_sp_policer_resources_register(mlxsw_core); 3773 if (err) 3774 goto err_policer_resources_register; 3775 3776 err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core); 3777 if (err) 3778 goto err_resources_rif_mac_profile_register; 3779 3780 err = mlxsw_sp_resources_rifs_register(mlxsw_core); 3781 if (err) 3782 goto err_resources_rifs_register; 3783 3784 err = mlxsw_sp_resources_port_range_register(mlxsw_core); 3785 if (err) 3786 goto err_resources_port_range_register; 3787 3788 return 0; 3789 3790 err_resources_port_range_register: 3791 err_resources_rifs_register: 3792 err_resources_rif_mac_profile_register: 3793 err_policer_resources_register: 3794 err_resources_counter_register: 3795 err_resources_span_register: 3796 devl_resources_unregister(priv_to_devlink(mlxsw_core)); 3797 return err; 3798 } 3799 3800 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 3801 const struct mlxsw_config_profile *profile, 3802 u64 *p_single_size, u64 *p_double_size, 3803 u64 *p_linear_size) 3804 { 3805 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3806 u32 double_size; 3807 int err; 3808 3809 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 3810 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) 3811 return -EIO; 3812 3813 /* The hash part is what left of the kvd without the 3814 * linear part. It is split to the single size and 3815 * double size by the parts ratio from the profile. 3816 * Both sizes must be a multiplications of the 3817 * granularity from the profile. In case the user 3818 * provided the sizes they are obtained via devlink. 3819 */ 3820 err = devl_resource_size_get(devlink, 3821 MLXSW_SP_RESOURCE_KVD_LINEAR, 3822 p_linear_size); 3823 if (err) 3824 *p_linear_size = profile->kvd_linear_size; 3825 3826 err = devl_resource_size_get(devlink, 3827 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 3828 p_double_size); 3829 if (err) { 3830 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 3831 *p_linear_size; 3832 double_size *= profile->kvd_hash_double_parts; 3833 double_size /= profile->kvd_hash_double_parts + 3834 profile->kvd_hash_single_parts; 3835 *p_double_size = rounddown(double_size, 3836 MLXSW_SP_KVD_GRANULARITY); 3837 } 3838 3839 err = devl_resource_size_get(devlink, 3840 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 3841 p_single_size); 3842 if (err) 3843 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 3844 *p_double_size - *p_linear_size; 3845 3846 /* Check results are legal. */ 3847 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 3848 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || 3849 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size) 3850 return -EIO; 3851 3852 return 0; 3853 } 3854 3855 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core, 3856 struct sk_buff *skb, u16 local_port) 3857 { 3858 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3859 3860 skb_pull(skb, MLXSW_TXHDR_LEN); 3861 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port); 3862 } 3863 3864 static struct mlxsw_driver mlxsw_sp1_driver = { 3865 .kind = mlxsw_sp1_driver_name, 3866 .priv_size = sizeof(struct mlxsw_sp), 3867 .fw_req_rev = &mlxsw_sp1_fw_rev, 3868 .fw_filename = MLXSW_SP1_FW_FILENAME, 3869 .init = mlxsw_sp1_init, 3870 .fini = mlxsw_sp_fini, 3871 .port_split = mlxsw_sp_port_split, 3872 .port_unsplit = mlxsw_sp_port_unsplit, 3873 .sb_pool_get = mlxsw_sp_sb_pool_get, 3874 .sb_pool_set = mlxsw_sp_sb_pool_set, 3875 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3876 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3877 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3878 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3879 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3880 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3881 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3882 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3883 .trap_init = mlxsw_sp_trap_init, 3884 .trap_fini = mlxsw_sp_trap_fini, 3885 .trap_action_set = mlxsw_sp_trap_action_set, 3886 .trap_group_init = mlxsw_sp_trap_group_init, 3887 .trap_group_set = mlxsw_sp_trap_group_set, 3888 .trap_policer_init = mlxsw_sp_trap_policer_init, 3889 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3890 .trap_policer_set = mlxsw_sp_trap_policer_set, 3891 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3892 .resources_register = mlxsw_sp1_resources_register, 3893 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, 3894 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3895 .profile = &mlxsw_sp1_config_profile, 3896 .sdq_supports_cqe_v2 = false, 3897 }; 3898 3899 static struct mlxsw_driver mlxsw_sp2_driver = { 3900 .kind = mlxsw_sp2_driver_name, 3901 .priv_size = sizeof(struct mlxsw_sp), 3902 .fw_req_rev = &mlxsw_sp2_fw_rev, 3903 .fw_filename = MLXSW_SP2_FW_FILENAME, 3904 .init = mlxsw_sp2_init, 3905 .fini = mlxsw_sp_fini, 3906 .port_split = mlxsw_sp_port_split, 3907 .port_unsplit = mlxsw_sp_port_unsplit, 3908 .ports_remove_selected = mlxsw_sp_ports_remove_selected, 3909 .sb_pool_get = mlxsw_sp_sb_pool_get, 3910 .sb_pool_set = mlxsw_sp_sb_pool_set, 3911 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3912 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3913 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3914 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3915 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3916 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3917 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3918 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3919 .trap_init = mlxsw_sp_trap_init, 3920 .trap_fini = mlxsw_sp_trap_fini, 3921 .trap_action_set = mlxsw_sp_trap_action_set, 3922 .trap_group_init = mlxsw_sp_trap_group_init, 3923 .trap_group_set = mlxsw_sp_trap_group_set, 3924 .trap_policer_init = mlxsw_sp_trap_policer_init, 3925 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3926 .trap_policer_set = mlxsw_sp_trap_policer_set, 3927 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3928 .resources_register = mlxsw_sp2_resources_register, 3929 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3930 .profile = &mlxsw_sp2_config_profile, 3931 .sdq_supports_cqe_v2 = true, 3932 }; 3933 3934 static struct mlxsw_driver mlxsw_sp3_driver = { 3935 .kind = mlxsw_sp3_driver_name, 3936 .priv_size = sizeof(struct mlxsw_sp), 3937 .fw_req_rev = &mlxsw_sp3_fw_rev, 3938 .fw_filename = MLXSW_SP3_FW_FILENAME, 3939 .init = mlxsw_sp3_init, 3940 .fini = mlxsw_sp_fini, 3941 .port_split = mlxsw_sp_port_split, 3942 .port_unsplit = mlxsw_sp_port_unsplit, 3943 .ports_remove_selected = mlxsw_sp_ports_remove_selected, 3944 .sb_pool_get = mlxsw_sp_sb_pool_get, 3945 .sb_pool_set = mlxsw_sp_sb_pool_set, 3946 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3947 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3948 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3949 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3950 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3951 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3952 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3953 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3954 .trap_init = mlxsw_sp_trap_init, 3955 .trap_fini = mlxsw_sp_trap_fini, 3956 .trap_action_set = mlxsw_sp_trap_action_set, 3957 .trap_group_init = mlxsw_sp_trap_group_init, 3958 .trap_group_set = mlxsw_sp_trap_group_set, 3959 .trap_policer_init = mlxsw_sp_trap_policer_init, 3960 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3961 .trap_policer_set = mlxsw_sp_trap_policer_set, 3962 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3963 .resources_register = mlxsw_sp2_resources_register, 3964 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3965 .profile = &mlxsw_sp2_config_profile, 3966 .sdq_supports_cqe_v2 = true, 3967 }; 3968 3969 static struct mlxsw_driver mlxsw_sp4_driver = { 3970 .kind = mlxsw_sp4_driver_name, 3971 .priv_size = sizeof(struct mlxsw_sp), 3972 .init = mlxsw_sp4_init, 3973 .fini = mlxsw_sp_fini, 3974 .port_split = mlxsw_sp_port_split, 3975 .port_unsplit = mlxsw_sp_port_unsplit, 3976 .ports_remove_selected = mlxsw_sp_ports_remove_selected, 3977 .sb_pool_get = mlxsw_sp_sb_pool_get, 3978 .sb_pool_set = mlxsw_sp_sb_pool_set, 3979 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3980 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3981 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3982 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3983 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3984 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3985 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3986 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3987 .trap_init = mlxsw_sp_trap_init, 3988 .trap_fini = mlxsw_sp_trap_fini, 3989 .trap_action_set = mlxsw_sp_trap_action_set, 3990 .trap_group_init = mlxsw_sp_trap_group_init, 3991 .trap_group_set = mlxsw_sp_trap_group_set, 3992 .trap_policer_init = mlxsw_sp_trap_policer_init, 3993 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3994 .trap_policer_set = mlxsw_sp_trap_policer_set, 3995 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3996 .resources_register = mlxsw_sp2_resources_register, 3997 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3998 .profile = &mlxsw_sp4_config_profile, 3999 .sdq_supports_cqe_v2 = true, 4000 }; 4001 4002 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 4003 { 4004 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 4005 } 4006 4007 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, 4008 struct netdev_nested_priv *priv) 4009 { 4010 int ret = 0; 4011 4012 if (mlxsw_sp_port_dev_check(lower_dev)) { 4013 priv->data = (void *)netdev_priv(lower_dev); 4014 ret = 1; 4015 } 4016 4017 return ret; 4018 } 4019 4020 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 4021 { 4022 struct netdev_nested_priv priv = { 4023 .data = NULL, 4024 }; 4025 4026 if (mlxsw_sp_port_dev_check(dev)) 4027 return netdev_priv(dev); 4028 4029 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &priv); 4030 4031 return (struct mlxsw_sp_port *)priv.data; 4032 } 4033 4034 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 4035 { 4036 struct mlxsw_sp_port *mlxsw_sp_port; 4037 4038 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 4039 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 4040 } 4041 4042 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 4043 { 4044 struct netdev_nested_priv priv = { 4045 .data = NULL, 4046 }; 4047 4048 if (mlxsw_sp_port_dev_check(dev)) 4049 return netdev_priv(dev); 4050 4051 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 4052 &priv); 4053 4054 return (struct mlxsw_sp_port *)priv.data; 4055 } 4056 4057 int mlxsw_sp_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp) 4058 { 4059 char mprs_pl[MLXSW_REG_MPRS_LEN]; 4060 int err = 0; 4061 4062 mutex_lock(&mlxsw_sp->parsing.lock); 4063 4064 if (refcount_inc_not_zero(&mlxsw_sp->parsing.parsing_depth_ref)) 4065 goto out_unlock; 4066 4067 mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_INCREASED_PARSING_DEPTH, 4068 mlxsw_sp->parsing.vxlan_udp_dport); 4069 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); 4070 if (err) 4071 goto out_unlock; 4072 4073 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_INCREASED_PARSING_DEPTH; 4074 refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 1); 4075 4076 out_unlock: 4077 mutex_unlock(&mlxsw_sp->parsing.lock); 4078 return err; 4079 } 4080 4081 void mlxsw_sp_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp) 4082 { 4083 char mprs_pl[MLXSW_REG_MPRS_LEN]; 4084 4085 mutex_lock(&mlxsw_sp->parsing.lock); 4086 4087 if (!refcount_dec_and_test(&mlxsw_sp->parsing.parsing_depth_ref)) 4088 goto out_unlock; 4089 4090 mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_DEFAULT_PARSING_DEPTH, 4091 mlxsw_sp->parsing.vxlan_udp_dport); 4092 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); 4093 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH; 4094 4095 out_unlock: 4096 mutex_unlock(&mlxsw_sp->parsing.lock); 4097 } 4098 4099 int mlxsw_sp_parsing_vxlan_udp_dport_set(struct mlxsw_sp *mlxsw_sp, 4100 __be16 udp_dport) 4101 { 4102 char mprs_pl[MLXSW_REG_MPRS_LEN]; 4103 int err; 4104 4105 mutex_lock(&mlxsw_sp->parsing.lock); 4106 4107 mlxsw_reg_mprs_pack(mprs_pl, mlxsw_sp->parsing.parsing_depth, 4108 be16_to_cpu(udp_dport)); 4109 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); 4110 if (err) 4111 goto out_unlock; 4112 4113 mlxsw_sp->parsing.vxlan_udp_dport = be16_to_cpu(udp_dport); 4114 4115 out_unlock: 4116 mutex_unlock(&mlxsw_sp->parsing.lock); 4117 return err; 4118 } 4119 4120 static void 4121 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port, 4122 struct net_device *lag_dev) 4123 { 4124 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev); 4125 struct net_device *upper_dev; 4126 struct list_head *iter; 4127 4128 if (netif_is_bridge_port(lag_dev)) 4129 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev); 4130 4131 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 4132 if (!netif_is_bridge_port(upper_dev)) 4133 continue; 4134 br_dev = netdev_master_upper_dev_get(upper_dev); 4135 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev); 4136 } 4137 } 4138 4139 static struct mlxsw_sp_lag * 4140 mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, struct net_device *lag_dev, 4141 struct netlink_ext_ack *extack) 4142 { 4143 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4144 struct mlxsw_sp_lag *lag; 4145 u16 lag_id; 4146 int i, err; 4147 4148 for (i = 0; i < mlxsw_sp->max_lag; i++) { 4149 if (!mlxsw_sp->lags[i].dev) 4150 break; 4151 } 4152 4153 if (i == mlxsw_sp->max_lag) { 4154 NL_SET_ERR_MSG_MOD(extack, 4155 "Exceeded number of supported LAG devices"); 4156 return ERR_PTR(-EBUSY); 4157 } 4158 4159 lag_id = i; 4160 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 4161 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4162 if (err) 4163 return ERR_PTR(err); 4164 4165 lag = &mlxsw_sp->lags[lag_id]; 4166 lag->lag_id = lag_id; 4167 lag->dev = lag_dev; 4168 refcount_set(&lag->ref_count, 1); 4169 4170 return lag; 4171 } 4172 4173 static int 4174 mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_lag *lag) 4175 { 4176 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4177 4178 lag->dev = NULL; 4179 4180 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag->lag_id); 4181 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4182 } 4183 4184 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 4185 u16 lag_id, u8 port_index) 4186 { 4187 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4188 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4189 4190 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 4191 lag_id, port_index); 4192 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4193 } 4194 4195 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 4196 u16 lag_id) 4197 { 4198 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4199 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4200 4201 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 4202 lag_id); 4203 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4204 } 4205 4206 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 4207 u16 lag_id) 4208 { 4209 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4210 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4211 4212 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 4213 lag_id); 4214 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4215 } 4216 4217 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 4218 u16 lag_id) 4219 { 4220 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4221 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4222 4223 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 4224 lag_id); 4225 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4226 } 4227 4228 static struct mlxsw_sp_lag * 4229 mlxsw_sp_lag_find(struct mlxsw_sp *mlxsw_sp, struct net_device *lag_dev) 4230 { 4231 int i; 4232 4233 for (i = 0; i < mlxsw_sp->max_lag; i++) { 4234 if (!mlxsw_sp->lags[i].dev) 4235 continue; 4236 4237 if (mlxsw_sp->lags[i].dev == lag_dev) 4238 return &mlxsw_sp->lags[i]; 4239 } 4240 4241 return NULL; 4242 } 4243 4244 static struct mlxsw_sp_lag * 4245 mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, struct net_device *lag_dev, 4246 struct netlink_ext_ack *extack) 4247 { 4248 struct mlxsw_sp_lag *lag; 4249 4250 lag = mlxsw_sp_lag_find(mlxsw_sp, lag_dev); 4251 if (lag) { 4252 refcount_inc(&lag->ref_count); 4253 return lag; 4254 } 4255 4256 return mlxsw_sp_lag_create(mlxsw_sp, lag_dev, extack); 4257 } 4258 4259 static void 4260 mlxsw_sp_lag_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_lag *lag) 4261 { 4262 if (!refcount_dec_and_test(&lag->ref_count)) 4263 return; 4264 4265 mlxsw_sp_lag_destroy(mlxsw_sp, lag); 4266 } 4267 4268 static bool 4269 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 4270 struct net_device *lag_dev, 4271 struct netdev_lag_upper_info *lag_upper_info, 4272 struct netlink_ext_ack *extack) 4273 { 4274 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 4275 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 4276 return false; 4277 } 4278 return true; 4279 } 4280 4281 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 4282 u16 lag_id, u8 *p_port_index) 4283 { 4284 u64 max_lag_members; 4285 int i; 4286 4287 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 4288 MAX_LAG_MEMBERS); 4289 for (i = 0; i < max_lag_members; i++) { 4290 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 4291 *p_port_index = i; 4292 return 0; 4293 } 4294 } 4295 return -EBUSY; 4296 } 4297 4298 static int mlxsw_sp_lag_uppers_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port, 4299 struct net_device *lag_dev, 4300 struct netlink_ext_ack *extack) 4301 { 4302 struct net_device *upper_dev; 4303 struct net_device *master; 4304 struct list_head *iter; 4305 int done = 0; 4306 int err; 4307 4308 master = netdev_master_upper_dev_get(lag_dev); 4309 if (master && netif_is_bridge_master(master)) { 4310 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, lag_dev, master, 4311 extack); 4312 if (err) 4313 return err; 4314 } 4315 4316 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 4317 if (!is_vlan_dev(upper_dev)) 4318 continue; 4319 4320 master = netdev_master_upper_dev_get(upper_dev); 4321 if (master && netif_is_bridge_master(master)) { 4322 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4323 upper_dev, master, 4324 extack); 4325 if (err) 4326 goto err_port_bridge_join; 4327 } 4328 4329 ++done; 4330 } 4331 4332 return 0; 4333 4334 err_port_bridge_join: 4335 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 4336 if (!is_vlan_dev(upper_dev)) 4337 continue; 4338 4339 master = netdev_master_upper_dev_get(upper_dev); 4340 if (!master || !netif_is_bridge_master(master)) 4341 continue; 4342 4343 if (!done--) 4344 break; 4345 4346 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, master); 4347 } 4348 4349 master = netdev_master_upper_dev_get(lag_dev); 4350 if (master && netif_is_bridge_master(master)) 4351 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, master); 4352 4353 return err; 4354 } 4355 4356 static void 4357 mlxsw_sp_lag_uppers_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port, 4358 struct net_device *lag_dev) 4359 { 4360 struct net_device *upper_dev; 4361 struct net_device *master; 4362 struct list_head *iter; 4363 4364 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 4365 if (!is_vlan_dev(upper_dev)) 4366 continue; 4367 4368 master = netdev_master_upper_dev_get(upper_dev); 4369 if (!master) 4370 continue; 4371 4372 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, master); 4373 } 4374 4375 master = netdev_master_upper_dev_get(lag_dev); 4376 if (master) 4377 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, master); 4378 } 4379 4380 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 4381 struct net_device *lag_dev, 4382 struct netlink_ext_ack *extack) 4383 { 4384 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4385 struct mlxsw_sp_lag *lag; 4386 u16 lag_id; 4387 u8 port_index; 4388 int err; 4389 4390 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_dev, extack); 4391 if (IS_ERR(lag)) 4392 return PTR_ERR(lag); 4393 4394 lag_id = lag->lag_id; 4395 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 4396 if (err) 4397 return err; 4398 4399 err = mlxsw_sp_lag_uppers_bridge_join(mlxsw_sp_port, lag_dev, 4400 extack); 4401 if (err) 4402 goto err_lag_uppers_bridge_join; 4403 4404 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 4405 if (err) 4406 goto err_col_port_add; 4407 4408 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 4409 mlxsw_sp_port->local_port); 4410 mlxsw_sp_port->lag_id = lag_id; 4411 mlxsw_sp_port->lagged = 1; 4412 4413 err = mlxsw_sp_fid_port_join_lag(mlxsw_sp_port); 4414 if (err) 4415 goto err_fid_port_join_lag; 4416 4417 /* Port is no longer usable as a router interface */ 4418 if (mlxsw_sp_port->default_vlan->fid) 4419 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); 4420 4421 /* Join a router interface configured on the LAG, if exists */ 4422 err = mlxsw_sp_router_port_join_lag(mlxsw_sp_port, lag_dev, 4423 extack); 4424 if (err) 4425 goto err_router_join; 4426 4427 err = mlxsw_sp_netdevice_enslavement_replay(mlxsw_sp, lag_dev, extack); 4428 if (err) 4429 goto err_replay; 4430 4431 return 0; 4432 4433 err_replay: 4434 mlxsw_sp_router_port_leave_lag(mlxsw_sp_port, lag_dev); 4435 err_router_join: 4436 mlxsw_sp_fid_port_leave_lag(mlxsw_sp_port); 4437 err_fid_port_join_lag: 4438 mlxsw_sp_port->lagged = 0; 4439 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 4440 mlxsw_sp_port->local_port); 4441 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4442 err_col_port_add: 4443 mlxsw_sp_lag_uppers_bridge_leave(mlxsw_sp_port, lag_dev); 4444 err_lag_uppers_bridge_join: 4445 mlxsw_sp_lag_put(mlxsw_sp, lag); 4446 return err; 4447 } 4448 4449 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 4450 struct net_device *lag_dev) 4451 { 4452 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4453 u16 lag_id = mlxsw_sp_port->lag_id; 4454 struct mlxsw_sp_lag *lag; 4455 4456 if (!mlxsw_sp_port->lagged) 4457 return; 4458 lag = &mlxsw_sp->lags[lag_id]; 4459 4460 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4461 4462 /* Any VLANs configured on the port are no longer valid */ 4463 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false); 4464 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan); 4465 /* Make the LAG and its directly linked uppers leave bridges they 4466 * are memeber in 4467 */ 4468 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev); 4469 4470 mlxsw_sp_fid_port_leave_lag(mlxsw_sp_port); 4471 4472 mlxsw_sp_lag_put(mlxsw_sp, lag); 4473 4474 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 4475 mlxsw_sp_port->local_port); 4476 mlxsw_sp_port->lagged = 0; 4477 4478 /* Make sure untagged frames are allowed to ingress */ 4479 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID, 4480 ETH_P_8021Q); 4481 } 4482 4483 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 4484 u16 lag_id) 4485 { 4486 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4487 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4488 4489 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 4490 mlxsw_sp_port->local_port); 4491 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4492 } 4493 4494 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 4495 u16 lag_id) 4496 { 4497 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4498 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4499 4500 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 4501 mlxsw_sp_port->local_port); 4502 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4503 } 4504 4505 static int 4506 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port) 4507 { 4508 int err; 4509 4510 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, 4511 mlxsw_sp_port->lag_id); 4512 if (err) 4513 return err; 4514 4515 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 4516 if (err) 4517 goto err_dist_port_add; 4518 4519 return 0; 4520 4521 err_dist_port_add: 4522 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id); 4523 return err; 4524 } 4525 4526 static int 4527 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port) 4528 { 4529 int err; 4530 4531 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 4532 mlxsw_sp_port->lag_id); 4533 if (err) 4534 return err; 4535 4536 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, 4537 mlxsw_sp_port->lag_id); 4538 if (err) 4539 goto err_col_port_disable; 4540 4541 return 0; 4542 4543 err_col_port_disable: 4544 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 4545 return err; 4546 } 4547 4548 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 4549 struct netdev_lag_lower_state_info *info) 4550 { 4551 if (info->tx_enabled) 4552 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port); 4553 else 4554 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 4555 } 4556 4557 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 4558 bool enable) 4559 { 4560 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4561 enum mlxsw_reg_spms_state spms_state; 4562 char *spms_pl; 4563 u16 vid; 4564 int err; 4565 4566 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 4567 MLXSW_REG_SPMS_STATE_DISCARDING; 4568 4569 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 4570 if (!spms_pl) 4571 return -ENOMEM; 4572 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 4573 4574 for (vid = 0; vid < VLAN_N_VID; vid++) 4575 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 4576 4577 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 4578 kfree(spms_pl); 4579 return err; 4580 } 4581 4582 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 4583 { 4584 u16 vid = 1; 4585 int err; 4586 4587 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 4588 if (err) 4589 return err; 4590 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 4591 if (err) 4592 goto err_port_stp_set; 4593 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 4594 true, false); 4595 if (err) 4596 goto err_port_vlan_set; 4597 4598 for (; vid <= VLAN_N_VID - 1; vid++) { 4599 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4600 vid, false); 4601 if (err) 4602 goto err_vid_learning_set; 4603 } 4604 4605 return 0; 4606 4607 err_vid_learning_set: 4608 for (vid--; vid >= 1; vid--) 4609 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 4610 err_port_vlan_set: 4611 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4612 err_port_stp_set: 4613 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4614 return err; 4615 } 4616 4617 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 4618 { 4619 u16 vid; 4620 4621 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 4622 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4623 vid, true); 4624 4625 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 4626 false, false); 4627 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4628 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4629 } 4630 4631 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev) 4632 { 4633 unsigned int num_vxlans = 0; 4634 struct net_device *dev; 4635 struct list_head *iter; 4636 4637 netdev_for_each_lower_dev(br_dev, dev, iter) { 4638 if (netif_is_vxlan(dev)) 4639 num_vxlans++; 4640 } 4641 4642 return num_vxlans > 1; 4643 } 4644 4645 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev) 4646 { 4647 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0}; 4648 struct net_device *dev; 4649 struct list_head *iter; 4650 4651 netdev_for_each_lower_dev(br_dev, dev, iter) { 4652 u16 pvid; 4653 int err; 4654 4655 if (!netif_is_vxlan(dev)) 4656 continue; 4657 4658 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid); 4659 if (err || !pvid) 4660 continue; 4661 4662 if (test_and_set_bit(pvid, vlans)) 4663 return false; 4664 } 4665 4666 return true; 4667 } 4668 4669 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev, 4670 struct netlink_ext_ack *extack) 4671 { 4672 if (br_multicast_enabled(br_dev)) { 4673 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device"); 4674 return false; 4675 } 4676 4677 if (!br_vlan_enabled(br_dev) && 4678 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) { 4679 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge"); 4680 return false; 4681 } 4682 4683 if (br_vlan_enabled(br_dev) && 4684 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) { 4685 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged"); 4686 return false; 4687 } 4688 4689 return true; 4690 } 4691 4692 static bool mlxsw_sp_netdev_is_master(struct net_device *upper_dev, 4693 struct net_device *dev) 4694 { 4695 return upper_dev == netdev_master_upper_dev_get(dev); 4696 } 4697 4698 static int __mlxsw_sp_netdevice_event(struct mlxsw_sp *mlxsw_sp, 4699 unsigned long event, void *ptr, 4700 bool process_foreign); 4701 4702 static int mlxsw_sp_netdevice_validate_uppers(struct mlxsw_sp *mlxsw_sp, 4703 struct net_device *dev, 4704 struct netlink_ext_ack *extack) 4705 { 4706 struct net_device *upper_dev; 4707 struct list_head *iter; 4708 int err; 4709 4710 netdev_for_each_upper_dev_rcu(dev, upper_dev, iter) { 4711 struct netdev_notifier_changeupper_info info = { 4712 .info = { 4713 .dev = dev, 4714 .extack = extack, 4715 }, 4716 .master = mlxsw_sp_netdev_is_master(upper_dev, dev), 4717 .upper_dev = upper_dev, 4718 .linking = true, 4719 4720 /* upper_info is relevant for LAG devices. But we would 4721 * only need this if LAG were a valid upper above 4722 * another upper (e.g. a bridge that is a member of a 4723 * LAG), and that is never a valid configuration. So we 4724 * can keep this as NULL. 4725 */ 4726 .upper_info = NULL, 4727 }; 4728 4729 err = __mlxsw_sp_netdevice_event(mlxsw_sp, 4730 NETDEV_PRECHANGEUPPER, 4731 &info, true); 4732 if (err) 4733 return err; 4734 4735 err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp, upper_dev, 4736 extack); 4737 if (err) 4738 return err; 4739 } 4740 4741 return 0; 4742 } 4743 4744 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 4745 struct net_device *dev, 4746 unsigned long event, void *ptr, 4747 bool replay_deslavement) 4748 { 4749 struct netdev_notifier_changeupper_info *info; 4750 struct mlxsw_sp_port *mlxsw_sp_port; 4751 struct netlink_ext_ack *extack; 4752 struct net_device *upper_dev; 4753 struct mlxsw_sp *mlxsw_sp; 4754 int err = 0; 4755 u16 proto; 4756 4757 mlxsw_sp_port = netdev_priv(dev); 4758 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4759 info = ptr; 4760 extack = netdev_notifier_info_to_extack(&info->info); 4761 4762 switch (event) { 4763 case NETDEV_PRECHANGEUPPER: 4764 upper_dev = info->upper_dev; 4765 if (!is_vlan_dev(upper_dev) && 4766 !netif_is_lag_master(upper_dev) && 4767 !netif_is_bridge_master(upper_dev) && 4768 !netif_is_ovs_master(upper_dev) && 4769 !netif_is_macvlan(upper_dev) && 4770 !netif_is_l3_master(upper_dev)) { 4771 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4772 return -EINVAL; 4773 } 4774 if (!info->linking) 4775 break; 4776 if (netif_is_bridge_master(upper_dev) && 4777 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 4778 mlxsw_sp_bridge_has_vxlan(upper_dev) && 4779 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 4780 return -EOPNOTSUPP; 4781 if (netdev_has_any_upper_dev(upper_dev) && 4782 (!netif_is_bridge_master(upper_dev) || 4783 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 4784 upper_dev))) { 4785 err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp, 4786 upper_dev, 4787 extack); 4788 if (err) 4789 return err; 4790 } 4791 if (netif_is_lag_master(upper_dev) && 4792 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 4793 info->upper_info, extack)) 4794 return -EINVAL; 4795 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 4796 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); 4797 return -EINVAL; 4798 } 4799 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 4800 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 4801 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); 4802 return -EINVAL; 4803 } 4804 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 4805 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); 4806 return -EINVAL; 4807 } 4808 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 4809 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 4810 return -EINVAL; 4811 } 4812 if (netif_is_bridge_master(upper_dev)) { 4813 br_vlan_get_proto(upper_dev, &proto); 4814 if (br_vlan_enabled(upper_dev) && 4815 proto != ETH_P_8021Q && proto != ETH_P_8021AD) { 4816 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a bridge with unknown VLAN protocol is not supported"); 4817 return -EOPNOTSUPP; 4818 } 4819 if (vlan_uses_dev(lower_dev) && 4820 br_vlan_enabled(upper_dev) && 4821 proto == ETH_P_8021AD) { 4822 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port that already has a VLAN upper to an 802.1ad bridge is not supported"); 4823 return -EOPNOTSUPP; 4824 } 4825 } 4826 if (netif_is_bridge_port(lower_dev) && is_vlan_dev(upper_dev)) { 4827 struct net_device *br_dev = netdev_master_upper_dev_get(lower_dev); 4828 4829 if (br_vlan_enabled(br_dev)) { 4830 br_vlan_get_proto(br_dev, &proto); 4831 if (proto == ETH_P_8021AD) { 4832 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a port enslaved to an 802.1ad bridge"); 4833 return -EOPNOTSUPP; 4834 } 4835 } 4836 } 4837 if (is_vlan_dev(upper_dev) && 4838 ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) { 4839 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol"); 4840 return -EOPNOTSUPP; 4841 } 4842 if (is_vlan_dev(upper_dev) && mlxsw_sp_port->security) { 4843 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a locked port"); 4844 return -EOPNOTSUPP; 4845 } 4846 break; 4847 case NETDEV_CHANGEUPPER: 4848 upper_dev = info->upper_dev; 4849 if (netif_is_bridge_master(upper_dev)) { 4850 if (info->linking) { 4851 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4852 lower_dev, 4853 upper_dev, 4854 extack); 4855 } else { 4856 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4857 lower_dev, 4858 upper_dev); 4859 if (!replay_deslavement) 4860 break; 4861 mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp, 4862 lower_dev); 4863 } 4864 } else if (netif_is_lag_master(upper_dev)) { 4865 if (info->linking) { 4866 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 4867 upper_dev, extack); 4868 } else { 4869 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 4870 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 4871 upper_dev); 4872 mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp, 4873 dev); 4874 } 4875 } else if (netif_is_ovs_master(upper_dev)) { 4876 if (info->linking) 4877 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 4878 else 4879 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 4880 } else if (netif_is_macvlan(upper_dev)) { 4881 if (!info->linking) 4882 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4883 } else if (is_vlan_dev(upper_dev)) { 4884 struct net_device *br_dev; 4885 4886 if (!netif_is_bridge_port(upper_dev)) 4887 break; 4888 if (info->linking) 4889 break; 4890 br_dev = netdev_master_upper_dev_get(upper_dev); 4891 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, 4892 br_dev); 4893 } 4894 break; 4895 } 4896 4897 return err; 4898 } 4899 4900 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 4901 unsigned long event, void *ptr) 4902 { 4903 struct netdev_notifier_changelowerstate_info *info; 4904 struct mlxsw_sp_port *mlxsw_sp_port; 4905 int err; 4906 4907 mlxsw_sp_port = netdev_priv(dev); 4908 info = ptr; 4909 4910 switch (event) { 4911 case NETDEV_CHANGELOWERSTATE: 4912 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 4913 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 4914 info->lower_state_info); 4915 if (err) 4916 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 4917 } 4918 break; 4919 } 4920 4921 return 0; 4922 } 4923 4924 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 4925 struct net_device *port_dev, 4926 unsigned long event, void *ptr, 4927 bool replay_deslavement) 4928 { 4929 switch (event) { 4930 case NETDEV_PRECHANGEUPPER: 4931 case NETDEV_CHANGEUPPER: 4932 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 4933 event, ptr, 4934 replay_deslavement); 4935 case NETDEV_CHANGELOWERSTATE: 4936 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 4937 ptr); 4938 } 4939 4940 return 0; 4941 } 4942 4943 /* Called for LAG or its upper VLAN after the per-LAG-lower processing was done, 4944 * to do any per-LAG / per-LAG-upper processing. 4945 */ 4946 static int mlxsw_sp_netdevice_post_lag_event(struct net_device *dev, 4947 unsigned long event, 4948 void *ptr) 4949 { 4950 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(dev); 4951 struct netdev_notifier_changeupper_info *info = ptr; 4952 4953 if (!mlxsw_sp) 4954 return 0; 4955 4956 switch (event) { 4957 case NETDEV_CHANGEUPPER: 4958 if (info->linking) 4959 break; 4960 if (netif_is_bridge_master(info->upper_dev)) 4961 mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp, dev); 4962 break; 4963 } 4964 return 0; 4965 } 4966 4967 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 4968 unsigned long event, void *ptr) 4969 { 4970 struct net_device *dev; 4971 struct list_head *iter; 4972 int ret; 4973 4974 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4975 if (mlxsw_sp_port_dev_check(dev)) { 4976 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 4977 ptr, false); 4978 if (ret) 4979 return ret; 4980 } 4981 } 4982 4983 return mlxsw_sp_netdevice_post_lag_event(lag_dev, event, ptr); 4984 } 4985 4986 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 4987 struct net_device *dev, 4988 unsigned long event, void *ptr, 4989 u16 vid, bool replay_deslavement) 4990 { 4991 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 4992 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4993 struct netdev_notifier_changeupper_info *info = ptr; 4994 struct netlink_ext_ack *extack; 4995 struct net_device *upper_dev; 4996 int err = 0; 4997 4998 extack = netdev_notifier_info_to_extack(&info->info); 4999 5000 switch (event) { 5001 case NETDEV_PRECHANGEUPPER: 5002 upper_dev = info->upper_dev; 5003 if (!netif_is_bridge_master(upper_dev) && 5004 !netif_is_macvlan(upper_dev) && 5005 !netif_is_l3_master(upper_dev)) { 5006 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5007 return -EINVAL; 5008 } 5009 if (!info->linking) 5010 break; 5011 if (netif_is_bridge_master(upper_dev) && 5012 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 5013 mlxsw_sp_bridge_has_vxlan(upper_dev) && 5014 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 5015 return -EOPNOTSUPP; 5016 if (netdev_has_any_upper_dev(upper_dev) && 5017 (!netif_is_bridge_master(upper_dev) || 5018 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 5019 upper_dev))) { 5020 err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp, 5021 upper_dev, 5022 extack); 5023 if (err) 5024 return err; 5025 } 5026 break; 5027 case NETDEV_CHANGEUPPER: 5028 upper_dev = info->upper_dev; 5029 if (netif_is_bridge_master(upper_dev)) { 5030 if (info->linking) { 5031 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 5032 vlan_dev, 5033 upper_dev, 5034 extack); 5035 } else { 5036 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 5037 vlan_dev, 5038 upper_dev); 5039 if (!replay_deslavement) 5040 break; 5041 mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp, 5042 vlan_dev); 5043 } 5044 } else if (netif_is_macvlan(upper_dev)) { 5045 if (!info->linking) 5046 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 5047 } 5048 break; 5049 } 5050 5051 return err; 5052 } 5053 5054 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 5055 struct net_device *lag_dev, 5056 unsigned long event, 5057 void *ptr, u16 vid) 5058 { 5059 struct net_device *dev; 5060 struct list_head *iter; 5061 int ret; 5062 5063 netdev_for_each_lower_dev(lag_dev, dev, iter) { 5064 if (mlxsw_sp_port_dev_check(dev)) { 5065 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 5066 event, ptr, 5067 vid, false); 5068 if (ret) 5069 return ret; 5070 } 5071 } 5072 5073 return mlxsw_sp_netdevice_post_lag_event(vlan_dev, event, ptr); 5074 } 5075 5076 static int mlxsw_sp_netdevice_bridge_vlan_event(struct mlxsw_sp *mlxsw_sp, 5077 struct net_device *vlan_dev, 5078 struct net_device *br_dev, 5079 unsigned long event, void *ptr, 5080 u16 vid, bool process_foreign) 5081 { 5082 struct netdev_notifier_changeupper_info *info = ptr; 5083 struct netlink_ext_ack *extack; 5084 struct net_device *upper_dev; 5085 5086 if (!process_foreign && !mlxsw_sp_lower_get(vlan_dev)) 5087 return 0; 5088 5089 extack = netdev_notifier_info_to_extack(&info->info); 5090 5091 switch (event) { 5092 case NETDEV_PRECHANGEUPPER: 5093 upper_dev = info->upper_dev; 5094 if (!netif_is_macvlan(upper_dev) && 5095 !netif_is_l3_master(upper_dev)) { 5096 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5097 return -EOPNOTSUPP; 5098 } 5099 break; 5100 case NETDEV_CHANGEUPPER: 5101 upper_dev = info->upper_dev; 5102 if (info->linking) 5103 break; 5104 if (netif_is_macvlan(upper_dev)) 5105 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 5106 break; 5107 } 5108 5109 return 0; 5110 } 5111 5112 static int mlxsw_sp_netdevice_vlan_event(struct mlxsw_sp *mlxsw_sp, 5113 struct net_device *vlan_dev, 5114 unsigned long event, void *ptr, 5115 bool process_foreign) 5116 { 5117 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 5118 u16 vid = vlan_dev_vlan_id(vlan_dev); 5119 5120 if (mlxsw_sp_port_dev_check(real_dev)) 5121 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 5122 event, ptr, vid, 5123 true); 5124 else if (netif_is_lag_master(real_dev)) 5125 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 5126 real_dev, event, 5127 ptr, vid); 5128 else if (netif_is_bridge_master(real_dev)) 5129 return mlxsw_sp_netdevice_bridge_vlan_event(mlxsw_sp, vlan_dev, 5130 real_dev, event, 5131 ptr, vid, 5132 process_foreign); 5133 5134 return 0; 5135 } 5136 5137 static int mlxsw_sp_netdevice_bridge_event(struct mlxsw_sp *mlxsw_sp, 5138 struct net_device *br_dev, 5139 unsigned long event, void *ptr, 5140 bool process_foreign) 5141 { 5142 struct netdev_notifier_changeupper_info *info = ptr; 5143 struct netlink_ext_ack *extack; 5144 struct net_device *upper_dev; 5145 u16 proto; 5146 5147 if (!process_foreign && !mlxsw_sp_lower_get(br_dev)) 5148 return 0; 5149 5150 extack = netdev_notifier_info_to_extack(&info->info); 5151 5152 switch (event) { 5153 case NETDEV_PRECHANGEUPPER: 5154 upper_dev = info->upper_dev; 5155 if (!is_vlan_dev(upper_dev) && 5156 !netif_is_macvlan(upper_dev) && 5157 !netif_is_l3_master(upper_dev)) { 5158 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5159 return -EOPNOTSUPP; 5160 } 5161 if (!info->linking) 5162 break; 5163 if (br_vlan_enabled(br_dev)) { 5164 br_vlan_get_proto(br_dev, &proto); 5165 if (proto == ETH_P_8021AD) { 5166 NL_SET_ERR_MSG_MOD(extack, "Upper devices are not supported on top of an 802.1ad bridge"); 5167 return -EOPNOTSUPP; 5168 } 5169 } 5170 if (is_vlan_dev(upper_dev) && 5171 ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) { 5172 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol"); 5173 return -EOPNOTSUPP; 5174 } 5175 break; 5176 case NETDEV_CHANGEUPPER: 5177 upper_dev = info->upper_dev; 5178 if (info->linking) 5179 break; 5180 if (is_vlan_dev(upper_dev)) 5181 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev); 5182 if (netif_is_macvlan(upper_dev)) 5183 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 5184 break; 5185 } 5186 5187 return 0; 5188 } 5189 5190 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, 5191 unsigned long event, void *ptr) 5192 { 5193 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); 5194 struct netdev_notifier_changeupper_info *info = ptr; 5195 struct netlink_ext_ack *extack; 5196 struct net_device *upper_dev; 5197 5198 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) 5199 return 0; 5200 5201 extack = netdev_notifier_info_to_extack(&info->info); 5202 upper_dev = info->upper_dev; 5203 5204 if (!netif_is_l3_master(upper_dev)) { 5205 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5206 return -EOPNOTSUPP; 5207 } 5208 5209 return 0; 5210 } 5211 5212 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, 5213 struct net_device *dev, 5214 unsigned long event, void *ptr) 5215 { 5216 struct netdev_notifier_changeupper_info *cu_info; 5217 struct netdev_notifier_info *info = ptr; 5218 struct netlink_ext_ack *extack; 5219 struct net_device *upper_dev; 5220 5221 extack = netdev_notifier_info_to_extack(info); 5222 5223 switch (event) { 5224 case NETDEV_CHANGEUPPER: 5225 cu_info = container_of(info, 5226 struct netdev_notifier_changeupper_info, 5227 info); 5228 upper_dev = cu_info->upper_dev; 5229 if (!netif_is_bridge_master(upper_dev)) 5230 return 0; 5231 if (!mlxsw_sp_lower_get(upper_dev)) 5232 return 0; 5233 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 5234 return -EOPNOTSUPP; 5235 if (cu_info->linking) { 5236 if (!netif_running(dev)) 5237 return 0; 5238 /* When the bridge is VLAN-aware, the VNI of the VxLAN 5239 * device needs to be mapped to a VLAN, but at this 5240 * point no VLANs are configured on the VxLAN device 5241 */ 5242 if (br_vlan_enabled(upper_dev)) 5243 return 0; 5244 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, 5245 dev, 0, extack); 5246 } else { 5247 /* VLANs were already flushed, which triggered the 5248 * necessary cleanup 5249 */ 5250 if (br_vlan_enabled(upper_dev)) 5251 return 0; 5252 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 5253 } 5254 break; 5255 case NETDEV_PRE_UP: 5256 upper_dev = netdev_master_upper_dev_get(dev); 5257 if (!upper_dev) 5258 return 0; 5259 if (!netif_is_bridge_master(upper_dev)) 5260 return 0; 5261 if (!mlxsw_sp_lower_get(upper_dev)) 5262 return 0; 5263 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0, 5264 extack); 5265 case NETDEV_DOWN: 5266 upper_dev = netdev_master_upper_dev_get(dev); 5267 if (!upper_dev) 5268 return 0; 5269 if (!netif_is_bridge_master(upper_dev)) 5270 return 0; 5271 if (!mlxsw_sp_lower_get(upper_dev)) 5272 return 0; 5273 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 5274 break; 5275 } 5276 5277 return 0; 5278 } 5279 5280 static int __mlxsw_sp_netdevice_event(struct mlxsw_sp *mlxsw_sp, 5281 unsigned long event, void *ptr, 5282 bool process_foreign) 5283 { 5284 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 5285 struct mlxsw_sp_span_entry *span_entry; 5286 int err = 0; 5287 5288 if (event == NETDEV_UNREGISTER) { 5289 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); 5290 if (span_entry) 5291 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); 5292 } 5293 5294 if (netif_is_vxlan(dev)) 5295 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr); 5296 else if (mlxsw_sp_port_dev_check(dev)) 5297 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr, true); 5298 else if (netif_is_lag_master(dev)) 5299 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 5300 else if (is_vlan_dev(dev)) 5301 err = mlxsw_sp_netdevice_vlan_event(mlxsw_sp, dev, event, ptr, 5302 process_foreign); 5303 else if (netif_is_bridge_master(dev)) 5304 err = mlxsw_sp_netdevice_bridge_event(mlxsw_sp, dev, event, ptr, 5305 process_foreign); 5306 else if (netif_is_macvlan(dev)) 5307 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); 5308 5309 return err; 5310 } 5311 5312 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 5313 unsigned long event, void *ptr) 5314 { 5315 struct mlxsw_sp *mlxsw_sp; 5316 int err; 5317 5318 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 5319 mlxsw_sp_span_respin(mlxsw_sp); 5320 err = __mlxsw_sp_netdevice_event(mlxsw_sp, event, ptr, false); 5321 5322 return notifier_from_errno(err); 5323 } 5324 5325 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { 5326 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 5327 {0, }, 5328 }; 5329 5330 static struct pci_driver mlxsw_sp1_pci_driver = { 5331 .name = mlxsw_sp1_driver_name, 5332 .id_table = mlxsw_sp1_pci_id_table, 5333 }; 5334 5335 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { 5336 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, 5337 {0, }, 5338 }; 5339 5340 static struct pci_driver mlxsw_sp2_pci_driver = { 5341 .name = mlxsw_sp2_driver_name, 5342 .id_table = mlxsw_sp2_pci_id_table, 5343 }; 5344 5345 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = { 5346 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0}, 5347 {0, }, 5348 }; 5349 5350 static struct pci_driver mlxsw_sp3_pci_driver = { 5351 .name = mlxsw_sp3_driver_name, 5352 .id_table = mlxsw_sp3_pci_id_table, 5353 }; 5354 5355 static const struct pci_device_id mlxsw_sp4_pci_id_table[] = { 5356 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM4), 0}, 5357 {0, }, 5358 }; 5359 5360 static struct pci_driver mlxsw_sp4_pci_driver = { 5361 .name = mlxsw_sp4_driver_name, 5362 .id_table = mlxsw_sp4_pci_id_table, 5363 }; 5364 5365 static int __init mlxsw_sp_module_init(void) 5366 { 5367 int err; 5368 5369 err = mlxsw_core_driver_register(&mlxsw_sp1_driver); 5370 if (err) 5371 return err; 5372 5373 err = mlxsw_core_driver_register(&mlxsw_sp2_driver); 5374 if (err) 5375 goto err_sp2_core_driver_register; 5376 5377 err = mlxsw_core_driver_register(&mlxsw_sp3_driver); 5378 if (err) 5379 goto err_sp3_core_driver_register; 5380 5381 err = mlxsw_core_driver_register(&mlxsw_sp4_driver); 5382 if (err) 5383 goto err_sp4_core_driver_register; 5384 5385 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); 5386 if (err) 5387 goto err_sp1_pci_driver_register; 5388 5389 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); 5390 if (err) 5391 goto err_sp2_pci_driver_register; 5392 5393 err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver); 5394 if (err) 5395 goto err_sp3_pci_driver_register; 5396 5397 err = mlxsw_pci_driver_register(&mlxsw_sp4_pci_driver); 5398 if (err) 5399 goto err_sp4_pci_driver_register; 5400 5401 return 0; 5402 5403 err_sp4_pci_driver_register: 5404 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver); 5405 err_sp3_pci_driver_register: 5406 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 5407 err_sp2_pci_driver_register: 5408 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 5409 err_sp1_pci_driver_register: 5410 mlxsw_core_driver_unregister(&mlxsw_sp4_driver); 5411 err_sp4_core_driver_register: 5412 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 5413 err_sp3_core_driver_register: 5414 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 5415 err_sp2_core_driver_register: 5416 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 5417 return err; 5418 } 5419 5420 static void __exit mlxsw_sp_module_exit(void) 5421 { 5422 mlxsw_pci_driver_unregister(&mlxsw_sp4_pci_driver); 5423 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver); 5424 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 5425 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 5426 mlxsw_core_driver_unregister(&mlxsw_sp4_driver); 5427 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 5428 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 5429 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 5430 } 5431 5432 module_init(mlxsw_sp_module_init); 5433 module_exit(mlxsw_sp_module_exit); 5434 5435 MODULE_LICENSE("Dual BSD/GPL"); 5436 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 5437 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 5438 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); 5439 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); 5440 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table); 5441 MODULE_DEVICE_TABLE(pci, mlxsw_sp4_pci_id_table); 5442 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); 5443 MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME); 5444 MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME); 5445 MODULE_FIRMWARE(MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME); 5446