1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/types.h> 7 #include <linux/pci.h> 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/ethtool.h> 11 #include <linux/slab.h> 12 #include <linux/device.h> 13 #include <linux/skbuff.h> 14 #include <linux/if_vlan.h> 15 #include <linux/if_bridge.h> 16 #include <linux/workqueue.h> 17 #include <linux/jiffies.h> 18 #include <linux/bitops.h> 19 #include <linux/list.h> 20 #include <linux/notifier.h> 21 #include <linux/dcbnl.h> 22 #include <linux/inetdevice.h> 23 #include <linux/netlink.h> 24 #include <linux/jhash.h> 25 #include <linux/log2.h> 26 #include <linux/refcount.h> 27 #include <linux/rhashtable.h> 28 #include <net/switchdev.h> 29 #include <net/pkt_cls.h> 30 #include <net/netevent.h> 31 #include <net/addrconf.h> 32 #include <linux/ptp_classify.h> 33 34 #include "spectrum.h" 35 #include "pci.h" 36 #include "core.h" 37 #include "core_env.h" 38 #include "reg.h" 39 #include "port.h" 40 #include "trap.h" 41 #include "txheader.h" 42 #include "spectrum_cnt.h" 43 #include "spectrum_dpipe.h" 44 #include "spectrum_acl_flex_actions.h" 45 #include "spectrum_span.h" 46 #include "spectrum_ptp.h" 47 #include "spectrum_trap.h" 48 49 #define MLXSW_SP_FWREV_MINOR 2010 50 #define MLXSW_SP_FWREV_SUBMINOR 1006 51 52 #define MLXSW_SP1_FWREV_MAJOR 13 53 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 54 55 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { 56 .major = MLXSW_SP1_FWREV_MAJOR, 57 .minor = MLXSW_SP_FWREV_MINOR, 58 .subminor = MLXSW_SP_FWREV_SUBMINOR, 59 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR, 60 }; 61 62 #define MLXSW_SP1_FW_FILENAME \ 63 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ 64 "." __stringify(MLXSW_SP_FWREV_MINOR) \ 65 "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2" 66 67 #define MLXSW_SP2_FWREV_MAJOR 29 68 69 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { 70 .major = MLXSW_SP2_FWREV_MAJOR, 71 .minor = MLXSW_SP_FWREV_MINOR, 72 .subminor = MLXSW_SP_FWREV_SUBMINOR, 73 }; 74 75 #define MLXSW_SP2_FW_FILENAME \ 76 "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \ 77 "." __stringify(MLXSW_SP_FWREV_MINOR) \ 78 "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2" 79 80 #define MLXSW_SP3_FWREV_MAJOR 30 81 82 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = { 83 .major = MLXSW_SP3_FWREV_MAJOR, 84 .minor = MLXSW_SP_FWREV_MINOR, 85 .subminor = MLXSW_SP_FWREV_SUBMINOR, 86 }; 87 88 #define MLXSW_SP3_FW_FILENAME \ 89 "mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \ 90 "." __stringify(MLXSW_SP_FWREV_MINOR) \ 91 "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2" 92 93 #define MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME \ 94 "mellanox/lc_ini_bundle_" \ 95 __stringify(MLXSW_SP_FWREV_MINOR) "_" \ 96 __stringify(MLXSW_SP_FWREV_SUBMINOR) ".bin" 97 98 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; 99 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; 100 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3"; 101 static const char mlxsw_sp4_driver_name[] = "mlxsw_spectrum4"; 102 103 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = { 104 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00 105 }; 106 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = { 107 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00 108 }; 109 110 /* tx_hdr_version 111 * Tx header version. 112 * Must be set to 1. 113 */ 114 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 115 116 /* tx_hdr_ctl 117 * Packet control type. 118 * 0 - Ethernet control (e.g. EMADs, LACP) 119 * 1 - Ethernet data 120 */ 121 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 122 123 /* tx_hdr_proto 124 * Packet protocol type. Must be set to 1 (Ethernet). 125 */ 126 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 127 128 /* tx_hdr_rx_is_router 129 * Packet is sent from the router. Valid for data packets only. 130 */ 131 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 132 133 /* tx_hdr_fid_valid 134 * Indicates if the 'fid' field is valid and should be used for 135 * forwarding lookup. Valid for data packets only. 136 */ 137 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 138 139 /* tx_hdr_swid 140 * Switch partition ID. Must be set to 0. 141 */ 142 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 143 144 /* tx_hdr_control_tclass 145 * Indicates if the packet should use the control TClass and not one 146 * of the data TClasses. 147 */ 148 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 149 150 /* tx_hdr_etclass 151 * Egress TClass to be used on the egress device on the egress port. 152 */ 153 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 154 155 /* tx_hdr_port_mid 156 * Destination local port for unicast packets. 157 * Destination multicast ID for multicast packets. 158 * 159 * Control packets are directed to a specific egress port, while data 160 * packets are transmitted through the CPU port (0) into the switch partition, 161 * where forwarding rules are applied. 162 */ 163 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 164 165 /* tx_hdr_fid 166 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 167 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 168 * Valid for data packets only. 169 */ 170 MLXSW_ITEM32(tx, hdr, fid, 0x08, 16, 16); 171 172 /* tx_hdr_type 173 * 0 - Data packets 174 * 6 - Control packets 175 */ 176 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 177 178 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 179 unsigned int counter_index, u64 *packets, 180 u64 *bytes) 181 { 182 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 183 int err; 184 185 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 186 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 187 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 188 if (err) 189 return err; 190 if (packets) 191 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 192 if (bytes) 193 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 194 return 0; 195 } 196 197 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 198 unsigned int counter_index) 199 { 200 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 201 202 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 203 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 204 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 205 } 206 207 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 208 unsigned int *p_counter_index) 209 { 210 int err; 211 212 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 213 p_counter_index); 214 if (err) 215 return err; 216 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 217 if (err) 218 goto err_counter_clear; 219 return 0; 220 221 err_counter_clear: 222 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 223 *p_counter_index); 224 return err; 225 } 226 227 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 228 unsigned int counter_index) 229 { 230 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 231 counter_index); 232 } 233 234 void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 235 const struct mlxsw_tx_info *tx_info) 236 { 237 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 238 239 memset(txhdr, 0, MLXSW_TXHDR_LEN); 240 241 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 242 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 243 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 244 mlxsw_tx_hdr_swid_set(txhdr, 0); 245 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 246 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 247 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 248 } 249 250 int 251 mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core, 252 struct mlxsw_sp_port *mlxsw_sp_port, 253 struct sk_buff *skb, 254 const struct mlxsw_tx_info *tx_info) 255 { 256 char *txhdr; 257 u16 max_fid; 258 int err; 259 260 if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) { 261 err = -ENOMEM; 262 goto err_skb_cow_head; 263 } 264 265 if (!MLXSW_CORE_RES_VALID(mlxsw_core, FID)) { 266 err = -EIO; 267 goto err_res_valid; 268 } 269 max_fid = MLXSW_CORE_RES_GET(mlxsw_core, FID); 270 271 txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 272 memset(txhdr, 0, MLXSW_TXHDR_LEN); 273 274 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 275 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 276 mlxsw_tx_hdr_rx_is_router_set(txhdr, true); 277 mlxsw_tx_hdr_fid_valid_set(txhdr, true); 278 mlxsw_tx_hdr_fid_set(txhdr, max_fid + tx_info->local_port - 1); 279 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_DATA); 280 return 0; 281 282 err_res_valid: 283 err_skb_cow_head: 284 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 285 dev_kfree_skb_any(skb); 286 return err; 287 } 288 289 static bool mlxsw_sp_skb_requires_ts(struct sk_buff *skb) 290 { 291 unsigned int type; 292 293 if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) 294 return false; 295 296 type = ptp_classify_raw(skb); 297 return !!ptp_parse_header(skb, type); 298 } 299 300 static int mlxsw_sp_txhdr_handle(struct mlxsw_core *mlxsw_core, 301 struct mlxsw_sp_port *mlxsw_sp_port, 302 struct sk_buff *skb, 303 const struct mlxsw_tx_info *tx_info) 304 { 305 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 306 307 /* In Spectrum-2 and Spectrum-3, PTP events that require a time stamp 308 * need special handling and cannot be transmitted as regular control 309 * packets. 310 */ 311 if (unlikely(mlxsw_sp_skb_requires_ts(skb))) 312 return mlxsw_sp->ptp_ops->txhdr_construct(mlxsw_core, 313 mlxsw_sp_port, skb, 314 tx_info); 315 316 if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) { 317 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 318 dev_kfree_skb_any(skb); 319 return -ENOMEM; 320 } 321 322 mlxsw_sp_txhdr_construct(skb, tx_info); 323 return 0; 324 } 325 326 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) 327 { 328 switch (state) { 329 case BR_STATE_FORWARDING: 330 return MLXSW_REG_SPMS_STATE_FORWARDING; 331 case BR_STATE_LEARNING: 332 return MLXSW_REG_SPMS_STATE_LEARNING; 333 case BR_STATE_LISTENING: 334 case BR_STATE_DISABLED: 335 case BR_STATE_BLOCKING: 336 return MLXSW_REG_SPMS_STATE_DISCARDING; 337 default: 338 BUG(); 339 } 340 } 341 342 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 343 u8 state) 344 { 345 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state); 346 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 347 char *spms_pl; 348 int err; 349 350 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 351 if (!spms_pl) 352 return -ENOMEM; 353 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 354 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 355 356 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 357 kfree(spms_pl); 358 return err; 359 } 360 361 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 362 { 363 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 364 int err; 365 366 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 367 if (err) 368 return err; 369 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 370 return 0; 371 } 372 373 int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 374 bool is_up) 375 { 376 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 377 char paos_pl[MLXSW_REG_PAOS_LEN]; 378 379 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 380 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 381 MLXSW_PORT_ADMIN_STATUS_DOWN); 382 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 383 } 384 385 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 386 const unsigned char *addr) 387 { 388 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 389 char ppad_pl[MLXSW_REG_PPAD_LEN]; 390 391 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 392 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 393 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 394 } 395 396 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 397 { 398 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 399 400 eth_hw_addr_gen(mlxsw_sp_port->dev, mlxsw_sp->base_mac, 401 mlxsw_sp_port->local_port); 402 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, 403 mlxsw_sp_port->dev->dev_addr); 404 } 405 406 static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu) 407 { 408 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 409 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 410 int err; 411 412 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 413 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 414 if (err) 415 return err; 416 417 *p_max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 418 return 0; 419 } 420 421 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 422 { 423 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 424 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 425 426 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 427 if (mtu > mlxsw_sp_port->max_mtu) 428 return -EINVAL; 429 430 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 431 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 432 } 433 434 static int mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, 435 u16 local_port, u8 swid) 436 { 437 char pspa_pl[MLXSW_REG_PSPA_LEN]; 438 439 mlxsw_reg_pspa_pack(pspa_pl, swid, local_port); 440 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 441 } 442 443 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 444 { 445 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 446 char svpe_pl[MLXSW_REG_SVPE_LEN]; 447 448 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 449 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 450 } 451 452 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 453 bool learn_enable) 454 { 455 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 456 char *spvmlr_pl; 457 int err; 458 459 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 460 if (!spvmlr_pl) 461 return -ENOMEM; 462 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 463 learn_enable); 464 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 465 kfree(spvmlr_pl); 466 return err; 467 } 468 469 int mlxsw_sp_port_security_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 470 { 471 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 472 char spfsr_pl[MLXSW_REG_SPFSR_LEN]; 473 int err; 474 475 if (mlxsw_sp_port->security == enable) 476 return 0; 477 478 mlxsw_reg_spfsr_pack(spfsr_pl, mlxsw_sp_port->local_port, enable); 479 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spfsr), spfsr_pl); 480 if (err) 481 return err; 482 483 mlxsw_sp_port->security = enable; 484 return 0; 485 } 486 487 int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type) 488 { 489 switch (ethtype) { 490 case ETH_P_8021Q: 491 *p_sver_type = 0; 492 break; 493 case ETH_P_8021AD: 494 *p_sver_type = 1; 495 break; 496 default: 497 return -EINVAL; 498 } 499 500 return 0; 501 } 502 503 int mlxsw_sp_port_egress_ethtype_set(struct mlxsw_sp_port *mlxsw_sp_port, 504 u16 ethtype) 505 { 506 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 507 char spevet_pl[MLXSW_REG_SPEVET_LEN]; 508 u8 sver_type; 509 int err; 510 511 err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type); 512 if (err) 513 return err; 514 515 mlxsw_reg_spevet_pack(spevet_pl, mlxsw_sp_port->local_port, sver_type); 516 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spevet), spevet_pl); 517 } 518 519 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 520 u16 vid, u16 ethtype) 521 { 522 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 523 char spvid_pl[MLXSW_REG_SPVID_LEN]; 524 u8 sver_type; 525 int err; 526 527 err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type); 528 if (err) 529 return err; 530 531 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid, 532 sver_type); 533 534 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 535 } 536 537 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 538 bool allow) 539 { 540 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 541 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 542 543 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 544 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 545 } 546 547 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 548 u16 ethtype) 549 { 550 int err; 551 552 if (!vid) { 553 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 554 if (err) 555 return err; 556 } else { 557 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid, ethtype); 558 if (err) 559 return err; 560 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 561 if (err) 562 goto err_port_allow_untagged_set; 563 } 564 565 mlxsw_sp_port->pvid = vid; 566 return 0; 567 568 err_port_allow_untagged_set: 569 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid, ethtype); 570 return err; 571 } 572 573 static int 574 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 575 { 576 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 577 char sspr_pl[MLXSW_REG_SSPR_LEN]; 578 579 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 580 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 581 } 582 583 static int 584 mlxsw_sp_port_module_info_parse(struct mlxsw_sp *mlxsw_sp, 585 u16 local_port, char *pmlp_pl, 586 struct mlxsw_sp_port_mapping *port_mapping) 587 { 588 bool separate_rxtx; 589 u8 first_lane; 590 u8 slot_index; 591 u8 module; 592 u8 width; 593 int i; 594 595 module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 596 slot_index = mlxsw_reg_pmlp_slot_index_get(pmlp_pl, 0); 597 width = mlxsw_reg_pmlp_width_get(pmlp_pl); 598 separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl); 599 first_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 600 601 if (width && !is_power_of_2(width)) { 602 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n", 603 local_port); 604 return -EINVAL; 605 } 606 607 for (i = 0; i < width; i++) { 608 if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) { 609 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n", 610 local_port); 611 return -EINVAL; 612 } 613 if (mlxsw_reg_pmlp_slot_index_get(pmlp_pl, i) != slot_index) { 614 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple slot indexes\n", 615 local_port); 616 return -EINVAL; 617 } 618 if (separate_rxtx && 619 mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != 620 mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) { 621 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n", 622 local_port); 623 return -EINVAL; 624 } 625 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i + first_lane) { 626 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n", 627 local_port); 628 return -EINVAL; 629 } 630 } 631 632 port_mapping->module = module; 633 port_mapping->slot_index = slot_index; 634 port_mapping->width = width; 635 port_mapping->module_width = width; 636 port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 637 return 0; 638 } 639 640 static int 641 mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port, 642 struct mlxsw_sp_port_mapping *port_mapping) 643 { 644 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 645 int err; 646 647 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 648 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 649 if (err) 650 return err; 651 return mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port, 652 pmlp_pl, port_mapping); 653 } 654 655 static int 656 mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port, 657 const struct mlxsw_sp_port_mapping *port_mapping) 658 { 659 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 660 int i, err; 661 662 mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->slot_index, 663 port_mapping->module); 664 665 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 666 mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width); 667 for (i = 0; i < port_mapping->width; i++) { 668 mlxsw_reg_pmlp_slot_index_set(pmlp_pl, i, 669 port_mapping->slot_index); 670 mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module); 671 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */ 672 } 673 674 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 675 if (err) 676 goto err_pmlp_write; 677 return 0; 678 679 err_pmlp_write: 680 mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->slot_index, 681 port_mapping->module); 682 return err; 683 } 684 685 static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u16 local_port, 686 u8 slot_index, u8 module) 687 { 688 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 689 690 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 691 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 692 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 693 mlxsw_env_module_port_unmap(mlxsw_sp->core, slot_index, module); 694 } 695 696 static int mlxsw_sp_port_open(struct net_device *dev) 697 { 698 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 699 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 700 int err; 701 702 err = mlxsw_env_module_port_up(mlxsw_sp->core, 703 mlxsw_sp_port->mapping.slot_index, 704 mlxsw_sp_port->mapping.module); 705 if (err) 706 return err; 707 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 708 if (err) 709 goto err_port_admin_status_set; 710 netif_start_queue(dev); 711 return 0; 712 713 err_port_admin_status_set: 714 mlxsw_env_module_port_down(mlxsw_sp->core, 715 mlxsw_sp_port->mapping.slot_index, 716 mlxsw_sp_port->mapping.module); 717 return err; 718 } 719 720 static int mlxsw_sp_port_stop(struct net_device *dev) 721 { 722 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 723 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 724 725 netif_stop_queue(dev); 726 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 727 mlxsw_env_module_port_down(mlxsw_sp->core, 728 mlxsw_sp_port->mapping.slot_index, 729 mlxsw_sp_port->mapping.module); 730 return 0; 731 } 732 733 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 734 struct net_device *dev) 735 { 736 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 737 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 738 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 739 const struct mlxsw_tx_info tx_info = { 740 .local_port = mlxsw_sp_port->local_port, 741 .is_emad = false, 742 }; 743 u64 len; 744 int err; 745 746 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); 747 748 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 749 return NETDEV_TX_BUSY; 750 751 if (eth_skb_pad(skb)) { 752 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 753 return NETDEV_TX_OK; 754 } 755 756 err = mlxsw_sp_txhdr_handle(mlxsw_sp->core, mlxsw_sp_port, skb, 757 &tx_info); 758 if (err) 759 return NETDEV_TX_OK; 760 761 /* TX header is consumed by HW on the way so we shouldn't count its 762 * bytes as being sent. 763 */ 764 len = skb->len - MLXSW_TXHDR_LEN; 765 766 /* Due to a race we might fail here because of a full queue. In that 767 * unlikely case we simply drop the packet. 768 */ 769 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 770 771 if (!err) { 772 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 773 u64_stats_update_begin(&pcpu_stats->syncp); 774 pcpu_stats->tx_packets++; 775 pcpu_stats->tx_bytes += len; 776 u64_stats_update_end(&pcpu_stats->syncp); 777 } else { 778 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 779 dev_kfree_skb_any(skb); 780 } 781 return NETDEV_TX_OK; 782 } 783 784 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 785 { 786 } 787 788 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 789 { 790 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 791 struct sockaddr *addr = p; 792 int err; 793 794 if (!is_valid_ether_addr(addr->sa_data)) 795 return -EADDRNOTAVAIL; 796 797 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 798 if (err) 799 return err; 800 eth_hw_addr_set(dev, addr->sa_data); 801 return 0; 802 } 803 804 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 805 { 806 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 807 struct mlxsw_sp_hdroom orig_hdroom; 808 struct mlxsw_sp_hdroom hdroom; 809 int err; 810 811 orig_hdroom = *mlxsw_sp_port->hdroom; 812 813 hdroom = orig_hdroom; 814 hdroom.mtu = mtu; 815 mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom); 816 817 err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom); 818 if (err) { 819 netdev_err(dev, "Failed to configure port's headroom\n"); 820 return err; 821 } 822 823 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 824 if (err) 825 goto err_port_mtu_set; 826 dev->mtu = mtu; 827 return 0; 828 829 err_port_mtu_set: 830 mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom); 831 return err; 832 } 833 834 static int 835 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 836 struct rtnl_link_stats64 *stats) 837 { 838 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 839 struct mlxsw_sp_port_pcpu_stats *p; 840 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 841 u32 tx_dropped = 0; 842 unsigned int start; 843 int i; 844 845 for_each_possible_cpu(i) { 846 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 847 do { 848 start = u64_stats_fetch_begin(&p->syncp); 849 rx_packets = p->rx_packets; 850 rx_bytes = p->rx_bytes; 851 tx_packets = p->tx_packets; 852 tx_bytes = p->tx_bytes; 853 } while (u64_stats_fetch_retry(&p->syncp, start)); 854 855 stats->rx_packets += rx_packets; 856 stats->rx_bytes += rx_bytes; 857 stats->tx_packets += tx_packets; 858 stats->tx_bytes += tx_bytes; 859 /* tx_dropped is u32, updated without syncp protection. */ 860 tx_dropped += p->tx_dropped; 861 } 862 stats->tx_dropped = tx_dropped; 863 return 0; 864 } 865 866 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 867 { 868 switch (attr_id) { 869 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 870 return true; 871 } 872 873 return false; 874 } 875 876 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 877 void *sp) 878 { 879 switch (attr_id) { 880 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 881 return mlxsw_sp_port_get_sw_stats64(dev, sp); 882 } 883 884 return -EINVAL; 885 } 886 887 int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 888 int prio, char *ppcnt_pl) 889 { 890 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 891 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 892 893 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 894 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 895 } 896 897 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 898 struct rtnl_link_stats64 *stats) 899 { 900 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 901 int err; 902 903 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 904 0, ppcnt_pl); 905 if (err) 906 goto out; 907 908 stats->tx_packets = 909 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 910 stats->rx_packets = 911 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 912 stats->tx_bytes = 913 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 914 stats->rx_bytes = 915 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 916 stats->multicast = 917 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 918 919 stats->rx_crc_errors = 920 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 921 stats->rx_frame_errors = 922 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 923 924 stats->rx_length_errors = ( 925 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 926 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 927 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 928 929 stats->rx_errors = (stats->rx_crc_errors + 930 stats->rx_frame_errors + stats->rx_length_errors); 931 932 out: 933 return err; 934 } 935 936 static void 937 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 938 struct mlxsw_sp_port_xstats *xstats) 939 { 940 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 941 int err, i; 942 943 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 944 ppcnt_pl); 945 if (!err) 946 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 947 948 for (i = 0; i < TC_MAX_QUEUE; i++) { 949 err = mlxsw_sp_port_get_stats_raw(dev, 950 MLXSW_REG_PPCNT_TC_CONG_CNT, 951 i, ppcnt_pl); 952 if (err) 953 goto tc_cnt; 954 955 xstats->wred_drop[i] = 956 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 957 xstats->tc_ecn[i] = mlxsw_reg_ppcnt_ecn_marked_tc_get(ppcnt_pl); 958 959 tc_cnt: 960 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 961 i, ppcnt_pl); 962 if (err) 963 continue; 964 965 xstats->backlog[i] = 966 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 967 xstats->tail_drop[i] = 968 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 969 } 970 971 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 972 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, 973 i, ppcnt_pl); 974 if (err) 975 continue; 976 977 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); 978 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); 979 } 980 } 981 982 static void update_stats_cache(struct work_struct *work) 983 { 984 struct mlxsw_sp_port *mlxsw_sp_port = 985 container_of(work, struct mlxsw_sp_port, 986 periodic_hw_stats.update_dw.work); 987 988 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 989 /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as 990 * necessary when port goes down. 991 */ 992 goto out; 993 994 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 995 &mlxsw_sp_port->periodic_hw_stats.stats); 996 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 997 &mlxsw_sp_port->periodic_hw_stats.xstats); 998 999 out: 1000 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1001 MLXSW_HW_STATS_UPDATE_TIME); 1002 } 1003 1004 /* Return the stats from a cache that is updated periodically, 1005 * as this function might get called in an atomic context. 1006 */ 1007 static void 1008 mlxsw_sp_port_get_stats64(struct net_device *dev, 1009 struct rtnl_link_stats64 *stats) 1010 { 1011 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1012 1013 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 1014 } 1015 1016 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 1017 u16 vid_begin, u16 vid_end, 1018 bool is_member, bool untagged) 1019 { 1020 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1021 char *spvm_pl; 1022 int err; 1023 1024 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 1025 if (!spvm_pl) 1026 return -ENOMEM; 1027 1028 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 1029 vid_end, is_member, untagged); 1030 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 1031 kfree(spvm_pl); 1032 return err; 1033 } 1034 1035 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 1036 u16 vid_end, bool is_member, bool untagged) 1037 { 1038 u16 vid, vid_e; 1039 int err; 1040 1041 for (vid = vid_begin; vid <= vid_end; 1042 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 1043 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 1044 vid_end); 1045 1046 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 1047 is_member, untagged); 1048 if (err) 1049 return err; 1050 } 1051 1052 return 0; 1053 } 1054 1055 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port, 1056 bool flush_default) 1057 { 1058 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 1059 1060 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 1061 &mlxsw_sp_port->vlans_list, list) { 1062 if (!flush_default && 1063 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID) 1064 continue; 1065 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1066 } 1067 } 1068 1069 static void 1070 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1071 { 1072 if (mlxsw_sp_port_vlan->bridge_port) 1073 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1074 else if (mlxsw_sp_port_vlan->fid) 1075 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 1076 } 1077 1078 struct mlxsw_sp_port_vlan * 1079 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1080 { 1081 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1082 bool untagged = vid == MLXSW_SP_DEFAULT_VID; 1083 int err; 1084 1085 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1086 if (mlxsw_sp_port_vlan) 1087 return ERR_PTR(-EEXIST); 1088 1089 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 1090 if (err) 1091 return ERR_PTR(err); 1092 1093 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 1094 if (!mlxsw_sp_port_vlan) { 1095 err = -ENOMEM; 1096 goto err_port_vlan_alloc; 1097 } 1098 1099 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1100 mlxsw_sp_port_vlan->vid = vid; 1101 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1102 1103 return mlxsw_sp_port_vlan; 1104 1105 err_port_vlan_alloc: 1106 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1107 return ERR_PTR(err); 1108 } 1109 1110 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1111 { 1112 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1113 u16 vid = mlxsw_sp_port_vlan->vid; 1114 1115 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan); 1116 list_del(&mlxsw_sp_port_vlan->list); 1117 kfree(mlxsw_sp_port_vlan); 1118 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1119 } 1120 1121 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1122 __be16 __always_unused proto, u16 vid) 1123 { 1124 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1125 1126 /* VLAN 0 is added to HW filter when device goes up, but it is 1127 * reserved in our case, so simply return. 1128 */ 1129 if (!vid) 1130 return 0; 1131 1132 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid)); 1133 } 1134 1135 int mlxsw_sp_port_kill_vid(struct net_device *dev, 1136 __be16 __always_unused proto, u16 vid) 1137 { 1138 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1139 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1140 1141 /* VLAN 0 is removed from HW filter when device goes down, but 1142 * it is reserved in our case, so simply return. 1143 */ 1144 if (!vid) 1145 return 0; 1146 1147 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1148 if (!mlxsw_sp_port_vlan) 1149 return 0; 1150 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1151 1152 return 0; 1153 } 1154 1155 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 1156 struct flow_block_offload *f) 1157 { 1158 switch (f->binder_type) { 1159 case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS: 1160 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true); 1161 case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS: 1162 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false); 1163 case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP: 1164 return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f); 1165 case FLOW_BLOCK_BINDER_TYPE_RED_MARK: 1166 return mlxsw_sp_setup_tc_block_qevent_mark(mlxsw_sp_port, f); 1167 default: 1168 return -EOPNOTSUPP; 1169 } 1170 } 1171 1172 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1173 void *type_data) 1174 { 1175 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1176 1177 switch (type) { 1178 case TC_SETUP_BLOCK: 1179 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1180 case TC_SETUP_QDISC_RED: 1181 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1182 case TC_SETUP_QDISC_PRIO: 1183 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); 1184 case TC_SETUP_QDISC_ETS: 1185 return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data); 1186 case TC_SETUP_QDISC_TBF: 1187 return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data); 1188 case TC_SETUP_QDISC_FIFO: 1189 return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data); 1190 default: 1191 return -EOPNOTSUPP; 1192 } 1193 } 1194 1195 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) 1196 { 1197 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1198 1199 if (!enable) { 1200 if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) || 1201 mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) { 1202 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 1203 return -EINVAL; 1204 } 1205 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block); 1206 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block); 1207 } else { 1208 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block); 1209 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block); 1210 } 1211 return 0; 1212 } 1213 1214 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable) 1215 { 1216 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1217 char pplr_pl[MLXSW_REG_PPLR_LEN]; 1218 int err; 1219 1220 if (netif_running(dev)) 1221 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1222 1223 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable); 1224 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr), 1225 pplr_pl); 1226 1227 if (netif_running(dev)) 1228 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1229 1230 return err; 1231 } 1232 1233 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); 1234 1235 static int mlxsw_sp_handle_feature(struct net_device *dev, 1236 netdev_features_t wanted_features, 1237 netdev_features_t feature, 1238 mlxsw_sp_feature_handler feature_handler) 1239 { 1240 netdev_features_t changes = wanted_features ^ dev->features; 1241 bool enable = !!(wanted_features & feature); 1242 int err; 1243 1244 if (!(changes & feature)) 1245 return 0; 1246 1247 err = feature_handler(dev, enable); 1248 if (err) { 1249 netdev_err(dev, "%s feature %pNF failed, err %d\n", 1250 enable ? "Enable" : "Disable", &feature, err); 1251 return err; 1252 } 1253 1254 if (enable) 1255 dev->features |= feature; 1256 else 1257 dev->features &= ~feature; 1258 1259 return 0; 1260 } 1261 static int mlxsw_sp_set_features(struct net_device *dev, 1262 netdev_features_t features) 1263 { 1264 netdev_features_t oper_features = dev->features; 1265 int err = 0; 1266 1267 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, 1268 mlxsw_sp_feature_hw_tc); 1269 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK, 1270 mlxsw_sp_feature_loopback); 1271 1272 if (err) { 1273 dev->features = oper_features; 1274 return -EINVAL; 1275 } 1276 1277 return 0; 1278 } 1279 1280 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, 1281 struct ifreq *ifr) 1282 { 1283 struct hwtstamp_config config; 1284 int err; 1285 1286 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 1287 return -EFAULT; 1288 1289 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, 1290 &config); 1291 if (err) 1292 return err; 1293 1294 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1295 return -EFAULT; 1296 1297 return 0; 1298 } 1299 1300 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port, 1301 struct ifreq *ifr) 1302 { 1303 struct hwtstamp_config config; 1304 int err; 1305 1306 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port, 1307 &config); 1308 if (err) 1309 return err; 1310 1311 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1312 return -EFAULT; 1313 1314 return 0; 1315 } 1316 1317 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port) 1318 { 1319 struct hwtstamp_config config = {0}; 1320 1321 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config); 1322 } 1323 1324 static int 1325 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1326 { 1327 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1328 1329 switch (cmd) { 1330 case SIOCSHWTSTAMP: 1331 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr); 1332 case SIOCGHWTSTAMP: 1333 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr); 1334 default: 1335 return -EOPNOTSUPP; 1336 } 1337 } 1338 1339 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1340 .ndo_open = mlxsw_sp_port_open, 1341 .ndo_stop = mlxsw_sp_port_stop, 1342 .ndo_start_xmit = mlxsw_sp_port_xmit, 1343 .ndo_setup_tc = mlxsw_sp_setup_tc, 1344 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1345 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1346 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1347 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1348 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1349 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1350 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1351 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1352 .ndo_set_features = mlxsw_sp_set_features, 1353 .ndo_eth_ioctl = mlxsw_sp_port_ioctl, 1354 }; 1355 1356 static int 1357 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port) 1358 { 1359 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1360 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; 1361 const struct mlxsw_sp_port_type_speed_ops *ops; 1362 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1363 u32 eth_proto_cap_masked; 1364 int err; 1365 1366 ops = mlxsw_sp->port_type_speed_ops; 1367 1368 /* Set advertised speeds to speeds supported by both the driver 1369 * and the device. 1370 */ 1371 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 1372 0, false); 1373 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1374 if (err) 1375 return err; 1376 1377 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, 1378 ð_proto_admin, ð_proto_oper); 1379 eth_proto_cap_masked = ops->ptys_proto_cap_masked_get(eth_proto_cap); 1380 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 1381 eth_proto_cap_masked, 1382 mlxsw_sp_port->link.autoneg); 1383 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1384 } 1385 1386 int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed) 1387 { 1388 const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops; 1389 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1390 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1391 u32 eth_proto_oper; 1392 int err; 1393 1394 port_type_speed_ops = mlxsw_sp->port_type_speed_ops; 1395 port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, 1396 mlxsw_sp_port->local_port, 0, 1397 false); 1398 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1399 if (err) 1400 return err; 1401 port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL, 1402 ð_proto_oper); 1403 *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper); 1404 return 0; 1405 } 1406 1407 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 1408 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 1409 bool dwrr, u8 dwrr_weight) 1410 { 1411 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1412 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1413 1414 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1415 next_index); 1416 mlxsw_reg_qeec_de_set(qeec_pl, true); 1417 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 1418 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 1419 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1420 } 1421 1422 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 1423 enum mlxsw_reg_qeec_hr hr, u8 index, 1424 u8 next_index, u32 maxrate, u8 burst_size) 1425 { 1426 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1427 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1428 1429 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1430 next_index); 1431 mlxsw_reg_qeec_mase_set(qeec_pl, true); 1432 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 1433 mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size); 1434 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1435 } 1436 1437 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port, 1438 enum mlxsw_reg_qeec_hr hr, u8 index, 1439 u8 next_index, u32 minrate) 1440 { 1441 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1442 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1443 1444 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1445 next_index); 1446 mlxsw_reg_qeec_mise_set(qeec_pl, true); 1447 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate); 1448 1449 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1450 } 1451 1452 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 1453 u8 switch_prio, u8 tclass) 1454 { 1455 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1456 char qtct_pl[MLXSW_REG_QTCT_LEN]; 1457 1458 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 1459 tclass); 1460 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 1461 } 1462 1463 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 1464 { 1465 int err, i; 1466 1467 /* Setup the elements hierarcy, so that each TC is linked to 1468 * one subgroup, which are all member in the same group. 1469 */ 1470 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1471 MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0); 1472 if (err) 1473 return err; 1474 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1475 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1476 MLXSW_REG_QEEC_HR_SUBGROUP, i, 1477 0, false, 0); 1478 if (err) 1479 return err; 1480 } 1481 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1482 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1483 MLXSW_REG_QEEC_HR_TC, i, i, 1484 false, 0); 1485 if (err) 1486 return err; 1487 1488 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1489 MLXSW_REG_QEEC_HR_TC, 1490 i + 8, i, 1491 true, 100); 1492 if (err) 1493 return err; 1494 } 1495 1496 /* Make sure the max shaper is disabled in all hierarchies that support 1497 * it. Note that this disables ptps (PTP shaper), but that is intended 1498 * for the initial configuration. 1499 */ 1500 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1501 MLXSW_REG_QEEC_HR_PORT, 0, 0, 1502 MLXSW_REG_QEEC_MAS_DIS, 0); 1503 if (err) 1504 return err; 1505 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1506 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1507 MLXSW_REG_QEEC_HR_SUBGROUP, 1508 i, 0, 1509 MLXSW_REG_QEEC_MAS_DIS, 0); 1510 if (err) 1511 return err; 1512 } 1513 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1514 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1515 MLXSW_REG_QEEC_HR_TC, 1516 i, i, 1517 MLXSW_REG_QEEC_MAS_DIS, 0); 1518 if (err) 1519 return err; 1520 1521 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1522 MLXSW_REG_QEEC_HR_TC, 1523 i + 8, i, 1524 MLXSW_REG_QEEC_MAS_DIS, 0); 1525 if (err) 1526 return err; 1527 } 1528 1529 /* Configure the min shaper for multicast TCs. */ 1530 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1531 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port, 1532 MLXSW_REG_QEEC_HR_TC, 1533 i + 8, i, 1534 MLXSW_REG_QEEC_MIS_MIN); 1535 if (err) 1536 return err; 1537 } 1538 1539 /* Map all priorities to traffic class 0. */ 1540 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1541 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 1542 if (err) 1543 return err; 1544 } 1545 1546 return 0; 1547 } 1548 1549 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 1550 bool enable) 1551 { 1552 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1553 char qtctm_pl[MLXSW_REG_QTCTM_LEN]; 1554 1555 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable); 1556 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl); 1557 } 1558 1559 static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port) 1560 { 1561 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1562 u8 slot_index = mlxsw_sp_port->mapping.slot_index; 1563 u8 module = mlxsw_sp_port->mapping.module; 1564 u64 overheat_counter; 1565 int err; 1566 1567 err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, slot_index, 1568 module, &overheat_counter); 1569 if (err) 1570 return err; 1571 1572 mlxsw_sp_port->module_overheat_initial_val = overheat_counter; 1573 return 0; 1574 } 1575 1576 int 1577 mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port, 1578 bool is_8021ad_tagged, 1579 bool is_8021q_tagged) 1580 { 1581 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1582 char spvc_pl[MLXSW_REG_SPVC_LEN]; 1583 1584 mlxsw_reg_spvc_pack(spvc_pl, mlxsw_sp_port->local_port, 1585 is_8021ad_tagged, is_8021q_tagged); 1586 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvc), spvc_pl); 1587 } 1588 1589 static int mlxsw_sp_port_label_info_get(struct mlxsw_sp *mlxsw_sp, 1590 u16 local_port, u8 *port_number, 1591 u8 *split_port_subnumber, 1592 u8 *slot_index) 1593 { 1594 char pllp_pl[MLXSW_REG_PLLP_LEN]; 1595 int err; 1596 1597 mlxsw_reg_pllp_pack(pllp_pl, local_port); 1598 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pllp), pllp_pl); 1599 if (err) 1600 return err; 1601 mlxsw_reg_pllp_unpack(pllp_pl, port_number, 1602 split_port_subnumber, slot_index); 1603 return 0; 1604 } 1605 1606 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port, 1607 bool split, 1608 struct mlxsw_sp_port_mapping *port_mapping) 1609 { 1610 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1611 struct mlxsw_sp_port *mlxsw_sp_port; 1612 u32 lanes = port_mapping->width; 1613 u8 split_port_subnumber; 1614 struct net_device *dev; 1615 u8 port_number; 1616 u8 slot_index; 1617 bool splittable; 1618 int err; 1619 1620 err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, port_mapping); 1621 if (err) { 1622 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 1623 local_port); 1624 return err; 1625 } 1626 1627 err = mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 0); 1628 if (err) { 1629 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 1630 local_port); 1631 goto err_port_swid_set; 1632 } 1633 1634 err = mlxsw_sp_port_label_info_get(mlxsw_sp, local_port, &port_number, 1635 &split_port_subnumber, &slot_index); 1636 if (err) { 1637 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get port label information\n", 1638 local_port); 1639 goto err_port_label_info_get; 1640 } 1641 1642 splittable = lanes > 1 && !split; 1643 err = mlxsw_core_port_init(mlxsw_sp->core, local_port, slot_index, 1644 port_number, split, split_port_subnumber, 1645 splittable, lanes, mlxsw_sp->base_mac, 1646 sizeof(mlxsw_sp->base_mac)); 1647 if (err) { 1648 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 1649 local_port); 1650 goto err_core_port_init; 1651 } 1652 1653 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 1654 if (!dev) { 1655 err = -ENOMEM; 1656 goto err_alloc_etherdev; 1657 } 1658 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 1659 dev_net_set(dev, mlxsw_sp_net(mlxsw_sp)); 1660 mlxsw_sp_port = netdev_priv(dev); 1661 mlxsw_core_port_netdev_link(mlxsw_sp->core, local_port, 1662 mlxsw_sp_port, dev); 1663 mlxsw_sp_port->dev = dev; 1664 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 1665 mlxsw_sp_port->local_port = local_port; 1666 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID; 1667 mlxsw_sp_port->split = split; 1668 mlxsw_sp_port->mapping = *port_mapping; 1669 mlxsw_sp_port->link.autoneg = 1; 1670 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 1671 1672 mlxsw_sp_port->pcpu_stats = 1673 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 1674 if (!mlxsw_sp_port->pcpu_stats) { 1675 err = -ENOMEM; 1676 goto err_alloc_stats; 1677 } 1678 1679 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1680 &update_stats_cache); 1681 1682 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 1683 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 1684 1685 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 1686 if (err) { 1687 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 1688 mlxsw_sp_port->local_port); 1689 goto err_dev_addr_init; 1690 } 1691 1692 netif_carrier_off(dev); 1693 1694 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 1695 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 1696 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK; 1697 1698 dev->min_mtu = 0; 1699 dev->max_mtu = ETH_MAX_MTU; 1700 1701 /* Each packet needs to have a Tx header (metadata) on top all other 1702 * headers. 1703 */ 1704 dev->needed_headroom = MLXSW_TXHDR_LEN; 1705 1706 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 1707 if (err) { 1708 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 1709 mlxsw_sp_port->local_port); 1710 goto err_port_system_port_mapping_set; 1711 } 1712 1713 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port); 1714 if (err) { 1715 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 1716 mlxsw_sp_port->local_port); 1717 goto err_port_speed_by_width_set; 1718 } 1719 1720 err = mlxsw_sp->port_type_speed_ops->ptys_max_speed(mlxsw_sp_port, 1721 &mlxsw_sp_port->max_speed); 1722 if (err) { 1723 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum speed\n", 1724 mlxsw_sp_port->local_port); 1725 goto err_max_speed_get; 1726 } 1727 1728 err = mlxsw_sp_port_max_mtu_get(mlxsw_sp_port, &mlxsw_sp_port->max_mtu); 1729 if (err) { 1730 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum MTU\n", 1731 mlxsw_sp_port->local_port); 1732 goto err_port_max_mtu_get; 1733 } 1734 1735 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 1736 if (err) { 1737 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 1738 mlxsw_sp_port->local_port); 1739 goto err_port_mtu_set; 1740 } 1741 1742 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1743 if (err) 1744 goto err_port_admin_status_set; 1745 1746 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 1747 if (err) { 1748 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 1749 mlxsw_sp_port->local_port); 1750 goto err_port_buffers_init; 1751 } 1752 1753 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 1754 if (err) { 1755 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 1756 mlxsw_sp_port->local_port); 1757 goto err_port_ets_init; 1758 } 1759 1760 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true); 1761 if (err) { 1762 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n", 1763 mlxsw_sp_port->local_port); 1764 goto err_port_tc_mc_mode; 1765 } 1766 1767 /* ETS and buffers must be initialized before DCB. */ 1768 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 1769 if (err) { 1770 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 1771 mlxsw_sp_port->local_port); 1772 goto err_port_dcb_init; 1773 } 1774 1775 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 1776 if (err) { 1777 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 1778 mlxsw_sp_port->local_port); 1779 goto err_port_fids_init; 1780 } 1781 1782 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); 1783 if (err) { 1784 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", 1785 mlxsw_sp_port->local_port); 1786 goto err_port_qdiscs_init; 1787 } 1788 1789 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false, 1790 false); 1791 if (err) { 1792 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n", 1793 mlxsw_sp_port->local_port); 1794 goto err_port_vlan_clear; 1795 } 1796 1797 err = mlxsw_sp_port_nve_init(mlxsw_sp_port); 1798 if (err) { 1799 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n", 1800 mlxsw_sp_port->local_port); 1801 goto err_port_nve_init; 1802 } 1803 1804 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID, 1805 ETH_P_8021Q); 1806 if (err) { 1807 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n", 1808 mlxsw_sp_port->local_port); 1809 goto err_port_pvid_set; 1810 } 1811 1812 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, 1813 MLXSW_SP_DEFAULT_VID); 1814 if (IS_ERR(mlxsw_sp_port_vlan)) { 1815 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 1816 mlxsw_sp_port->local_port); 1817 err = PTR_ERR(mlxsw_sp_port_vlan); 1818 goto err_port_vlan_create; 1819 } 1820 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan; 1821 1822 /* Set SPVC.et0=true and SPVC.et1=false to make the local port to treat 1823 * only packets with 802.1q header as tagged packets. 1824 */ 1825 err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true); 1826 if (err) { 1827 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set default VLAN classification\n", 1828 local_port); 1829 goto err_port_vlan_classification_set; 1830 } 1831 1832 INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw, 1833 mlxsw_sp->ptp_ops->shaper_work); 1834 1835 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 1836 1837 err = mlxsw_sp_port_overheat_init_val_set(mlxsw_sp_port); 1838 if (err) { 1839 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set overheat initial value\n", 1840 mlxsw_sp_port->local_port); 1841 goto err_port_overheat_init_val_set; 1842 } 1843 1844 err = register_netdev(dev); 1845 if (err) { 1846 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 1847 mlxsw_sp_port->local_port); 1848 goto err_register_netdev; 1849 } 1850 1851 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 1852 return 0; 1853 1854 err_register_netdev: 1855 err_port_overheat_init_val_set: 1856 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true); 1857 err_port_vlan_classification_set: 1858 mlxsw_sp->ports[local_port] = NULL; 1859 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1860 err_port_vlan_create: 1861 err_port_pvid_set: 1862 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 1863 err_port_nve_init: 1864 err_port_vlan_clear: 1865 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 1866 err_port_qdiscs_init: 1867 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 1868 err_port_fids_init: 1869 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 1870 err_port_dcb_init: 1871 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 1872 err_port_tc_mc_mode: 1873 err_port_ets_init: 1874 mlxsw_sp_port_buffers_fini(mlxsw_sp_port); 1875 err_port_buffers_init: 1876 err_port_admin_status_set: 1877 err_port_mtu_set: 1878 err_port_max_mtu_get: 1879 err_max_speed_get: 1880 err_port_speed_by_width_set: 1881 err_port_system_port_mapping_set: 1882 err_dev_addr_init: 1883 free_percpu(mlxsw_sp_port->pcpu_stats); 1884 err_alloc_stats: 1885 free_netdev(dev); 1886 err_alloc_etherdev: 1887 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 1888 err_core_port_init: 1889 err_port_label_info_get: 1890 mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 1891 MLXSW_PORT_SWID_DISABLED_PORT); 1892 err_port_swid_set: 1893 mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, 1894 port_mapping->slot_index, 1895 port_mapping->module); 1896 return err; 1897 } 1898 1899 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port) 1900 { 1901 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1902 u8 slot_index = mlxsw_sp_port->mapping.slot_index; 1903 u8 module = mlxsw_sp_port->mapping.module; 1904 1905 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 1906 cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw); 1907 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 1908 mlxsw_sp_port_ptp_clear(mlxsw_sp_port); 1909 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true); 1910 mlxsw_sp->ports[local_port] = NULL; 1911 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true); 1912 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 1913 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 1914 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 1915 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 1916 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 1917 mlxsw_sp_port_buffers_fini(mlxsw_sp_port); 1918 free_percpu(mlxsw_sp_port->pcpu_stats); 1919 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 1920 free_netdev(mlxsw_sp_port->dev); 1921 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 1922 mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 1923 MLXSW_PORT_SWID_DISABLED_PORT); 1924 mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, slot_index, module); 1925 } 1926 1927 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp) 1928 { 1929 struct mlxsw_sp_port *mlxsw_sp_port; 1930 int err; 1931 1932 mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL); 1933 if (!mlxsw_sp_port) 1934 return -ENOMEM; 1935 1936 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 1937 mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT; 1938 1939 err = mlxsw_core_cpu_port_init(mlxsw_sp->core, 1940 mlxsw_sp_port, 1941 mlxsw_sp->base_mac, 1942 sizeof(mlxsw_sp->base_mac)); 1943 if (err) { 1944 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n"); 1945 goto err_core_cpu_port_init; 1946 } 1947 1948 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port; 1949 return 0; 1950 1951 err_core_cpu_port_init: 1952 kfree(mlxsw_sp_port); 1953 return err; 1954 } 1955 1956 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp) 1957 { 1958 struct mlxsw_sp_port *mlxsw_sp_port = 1959 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT]; 1960 1961 mlxsw_core_cpu_port_fini(mlxsw_sp->core); 1962 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL; 1963 kfree(mlxsw_sp_port); 1964 } 1965 1966 static bool mlxsw_sp_local_port_valid(u16 local_port) 1967 { 1968 return local_port != MLXSW_PORT_CPU_PORT; 1969 } 1970 1971 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u16 local_port) 1972 { 1973 if (!mlxsw_sp_local_port_valid(local_port)) 1974 return false; 1975 return mlxsw_sp->ports[local_port] != NULL; 1976 } 1977 1978 static int mlxsw_sp_port_mapping_event_set(struct mlxsw_sp *mlxsw_sp, 1979 u16 local_port, bool enable) 1980 { 1981 char pmecr_pl[MLXSW_REG_PMECR_LEN]; 1982 1983 mlxsw_reg_pmecr_pack(pmecr_pl, local_port, 1984 enable ? MLXSW_REG_PMECR_E_GENERATE_EVENT : 1985 MLXSW_REG_PMECR_E_DO_NOT_GENERATE_EVENT); 1986 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmecr), pmecr_pl); 1987 } 1988 1989 struct mlxsw_sp_port_mapping_event { 1990 struct list_head list; 1991 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 1992 }; 1993 1994 static void mlxsw_sp_port_mapping_events_work(struct work_struct *work) 1995 { 1996 struct mlxsw_sp_port_mapping_event *event, *next_event; 1997 struct mlxsw_sp_port_mapping_events *events; 1998 struct mlxsw_sp_port_mapping port_mapping; 1999 struct mlxsw_sp *mlxsw_sp; 2000 struct devlink *devlink; 2001 LIST_HEAD(event_queue); 2002 u16 local_port; 2003 int err; 2004 2005 events = container_of(work, struct mlxsw_sp_port_mapping_events, work); 2006 mlxsw_sp = container_of(events, struct mlxsw_sp, port_mapping_events); 2007 devlink = priv_to_devlink(mlxsw_sp->core); 2008 2009 spin_lock_bh(&events->queue_lock); 2010 list_splice_init(&events->queue, &event_queue); 2011 spin_unlock_bh(&events->queue_lock); 2012 2013 list_for_each_entry_safe(event, next_event, &event_queue, list) { 2014 local_port = mlxsw_reg_pmlp_local_port_get(event->pmlp_pl); 2015 err = mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port, 2016 event->pmlp_pl, &port_mapping); 2017 if (err) 2018 goto out; 2019 2020 if (WARN_ON_ONCE(!port_mapping.width)) 2021 goto out; 2022 2023 devl_lock(devlink); 2024 2025 if (!mlxsw_sp_port_created(mlxsw_sp, local_port)) 2026 mlxsw_sp_port_create(mlxsw_sp, local_port, 2027 false, &port_mapping); 2028 else 2029 WARN_ON_ONCE(1); 2030 2031 devl_unlock(devlink); 2032 2033 mlxsw_sp->port_mapping[local_port] = port_mapping; 2034 2035 out: 2036 kfree(event); 2037 } 2038 } 2039 2040 static void 2041 mlxsw_sp_port_mapping_listener_func(const struct mlxsw_reg_info *reg, 2042 char *pmlp_pl, void *priv) 2043 { 2044 struct mlxsw_sp_port_mapping_events *events; 2045 struct mlxsw_sp_port_mapping_event *event; 2046 struct mlxsw_sp *mlxsw_sp = priv; 2047 u16 local_port; 2048 2049 local_port = mlxsw_reg_pmlp_local_port_get(pmlp_pl); 2050 if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port))) 2051 return; 2052 2053 events = &mlxsw_sp->port_mapping_events; 2054 event = kmalloc(sizeof(*event), GFP_ATOMIC); 2055 if (!event) 2056 return; 2057 memcpy(event->pmlp_pl, pmlp_pl, sizeof(event->pmlp_pl)); 2058 spin_lock(&events->queue_lock); 2059 list_add_tail(&event->list, &events->queue); 2060 spin_unlock(&events->queue_lock); 2061 mlxsw_core_schedule_work(&events->work); 2062 } 2063 2064 static void 2065 __mlxsw_sp_port_mapping_events_cancel(struct mlxsw_sp *mlxsw_sp) 2066 { 2067 struct mlxsw_sp_port_mapping_event *event, *next_event; 2068 struct mlxsw_sp_port_mapping_events *events; 2069 2070 events = &mlxsw_sp->port_mapping_events; 2071 2072 /* Caller needs to make sure that no new event is going to appear. */ 2073 cancel_work_sync(&events->work); 2074 list_for_each_entry_safe(event, next_event, &events->queue, list) { 2075 list_del(&event->list); 2076 kfree(event); 2077 } 2078 } 2079 2080 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 2081 { 2082 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 2083 int i; 2084 2085 for (i = 1; i < max_ports; i++) 2086 mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false); 2087 /* Make sure all scheduled events are processed */ 2088 __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp); 2089 2090 for (i = 1; i < max_ports; i++) 2091 if (mlxsw_sp_port_created(mlxsw_sp, i)) 2092 mlxsw_sp_port_remove(mlxsw_sp, i); 2093 mlxsw_sp_cpu_port_remove(mlxsw_sp); 2094 kfree(mlxsw_sp->ports); 2095 mlxsw_sp->ports = NULL; 2096 } 2097 2098 static void 2099 mlxsw_sp_ports_remove_selected(struct mlxsw_core *mlxsw_core, 2100 bool (*selector)(void *priv, u16 local_port), 2101 void *priv) 2102 { 2103 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2104 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_core); 2105 int i; 2106 2107 for (i = 1; i < max_ports; i++) 2108 if (mlxsw_sp_port_created(mlxsw_sp, i) && selector(priv, i)) 2109 mlxsw_sp_port_remove(mlxsw_sp, i); 2110 } 2111 2112 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 2113 { 2114 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 2115 struct mlxsw_sp_port_mapping_events *events; 2116 struct mlxsw_sp_port_mapping *port_mapping; 2117 size_t alloc_size; 2118 int i; 2119 int err; 2120 2121 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 2122 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 2123 if (!mlxsw_sp->ports) 2124 return -ENOMEM; 2125 2126 events = &mlxsw_sp->port_mapping_events; 2127 INIT_LIST_HEAD(&events->queue); 2128 spin_lock_init(&events->queue_lock); 2129 INIT_WORK(&events->work, mlxsw_sp_port_mapping_events_work); 2130 2131 for (i = 1; i < max_ports; i++) { 2132 err = mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, true); 2133 if (err) 2134 goto err_event_enable; 2135 } 2136 2137 err = mlxsw_sp_cpu_port_create(mlxsw_sp); 2138 if (err) 2139 goto err_cpu_port_create; 2140 2141 for (i = 1; i < max_ports; i++) { 2142 port_mapping = &mlxsw_sp->port_mapping[i]; 2143 if (!port_mapping->width) 2144 continue; 2145 err = mlxsw_sp_port_create(mlxsw_sp, i, false, port_mapping); 2146 if (err) 2147 goto err_port_create; 2148 } 2149 return 0; 2150 2151 err_port_create: 2152 for (i--; i >= 1; i--) 2153 if (mlxsw_sp_port_created(mlxsw_sp, i)) 2154 mlxsw_sp_port_remove(mlxsw_sp, i); 2155 i = max_ports; 2156 mlxsw_sp_cpu_port_remove(mlxsw_sp); 2157 err_cpu_port_create: 2158 err_event_enable: 2159 for (i--; i >= 1; i--) 2160 mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false); 2161 /* Make sure all scheduled events are processed */ 2162 __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp); 2163 kfree(mlxsw_sp->ports); 2164 mlxsw_sp->ports = NULL; 2165 return err; 2166 } 2167 2168 static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp) 2169 { 2170 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 2171 struct mlxsw_sp_port_mapping *port_mapping; 2172 int i; 2173 int err; 2174 2175 mlxsw_sp->port_mapping = kcalloc(max_ports, 2176 sizeof(struct mlxsw_sp_port_mapping), 2177 GFP_KERNEL); 2178 if (!mlxsw_sp->port_mapping) 2179 return -ENOMEM; 2180 2181 for (i = 1; i < max_ports; i++) { 2182 port_mapping = &mlxsw_sp->port_mapping[i]; 2183 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, port_mapping); 2184 if (err) 2185 goto err_port_module_info_get; 2186 } 2187 return 0; 2188 2189 err_port_module_info_get: 2190 kfree(mlxsw_sp->port_mapping); 2191 return err; 2192 } 2193 2194 static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp) 2195 { 2196 kfree(mlxsw_sp->port_mapping); 2197 } 2198 2199 static int 2200 mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, 2201 struct mlxsw_sp_port_mapping *port_mapping, 2202 unsigned int count, const char *pmtdb_pl) 2203 { 2204 struct mlxsw_sp_port_mapping split_port_mapping; 2205 int err, i; 2206 2207 split_port_mapping = *port_mapping; 2208 split_port_mapping.width /= count; 2209 for (i = 0; i < count; i++) { 2210 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2211 2212 if (!mlxsw_sp_local_port_valid(s_local_port)) 2213 continue; 2214 2215 err = mlxsw_sp_port_create(mlxsw_sp, s_local_port, 2216 true, &split_port_mapping); 2217 if (err) 2218 goto err_port_create; 2219 split_port_mapping.lane += split_port_mapping.width; 2220 } 2221 2222 return 0; 2223 2224 err_port_create: 2225 for (i--; i >= 0; i--) { 2226 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2227 2228 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) 2229 mlxsw_sp_port_remove(mlxsw_sp, s_local_port); 2230 } 2231 return err; 2232 } 2233 2234 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 2235 unsigned int count, 2236 const char *pmtdb_pl) 2237 { 2238 struct mlxsw_sp_port_mapping *port_mapping; 2239 int i; 2240 2241 /* Go over original unsplit ports in the gap and recreate them. */ 2242 for (i = 0; i < count; i++) { 2243 u16 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2244 2245 port_mapping = &mlxsw_sp->port_mapping[local_port]; 2246 if (!port_mapping->width || !mlxsw_sp_local_port_valid(local_port)) 2247 continue; 2248 mlxsw_sp_port_create(mlxsw_sp, local_port, 2249 false, port_mapping); 2250 } 2251 } 2252 2253 static struct mlxsw_sp_port * 2254 mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u16 local_port) 2255 { 2256 if (mlxsw_sp->ports && mlxsw_sp->ports[local_port]) 2257 return mlxsw_sp->ports[local_port]; 2258 return NULL; 2259 } 2260 2261 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port, 2262 unsigned int count, 2263 struct netlink_ext_ack *extack) 2264 { 2265 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2266 struct mlxsw_sp_port_mapping port_mapping; 2267 struct mlxsw_sp_port *mlxsw_sp_port; 2268 enum mlxsw_reg_pmtdb_status status; 2269 char pmtdb_pl[MLXSW_REG_PMTDB_LEN]; 2270 int i; 2271 int err; 2272 2273 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); 2274 if (!mlxsw_sp_port) { 2275 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 2276 local_port); 2277 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 2278 return -EINVAL; 2279 } 2280 2281 if (mlxsw_sp_port->split) { 2282 NL_SET_ERR_MSG_MOD(extack, "Port is already split"); 2283 return -EINVAL; 2284 } 2285 2286 mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index, 2287 mlxsw_sp_port->mapping.module, 2288 mlxsw_sp_port->mapping.module_width / count, 2289 count); 2290 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl); 2291 if (err) { 2292 NL_SET_ERR_MSG_MOD(extack, "Failed to query split info"); 2293 return err; 2294 } 2295 2296 status = mlxsw_reg_pmtdb_status_get(pmtdb_pl); 2297 if (status != MLXSW_REG_PMTDB_STATUS_SUCCESS) { 2298 NL_SET_ERR_MSG_MOD(extack, "Unsupported split configuration"); 2299 return -EINVAL; 2300 } 2301 2302 port_mapping = mlxsw_sp_port->mapping; 2303 2304 for (i = 0; i < count; i++) { 2305 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2306 2307 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) 2308 mlxsw_sp_port_remove(mlxsw_sp, s_local_port); 2309 } 2310 2311 err = mlxsw_sp_port_split_create(mlxsw_sp, &port_mapping, 2312 count, pmtdb_pl); 2313 if (err) { 2314 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 2315 goto err_port_split_create; 2316 } 2317 2318 return 0; 2319 2320 err_port_split_create: 2321 mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl); 2322 2323 return err; 2324 } 2325 2326 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u16 local_port, 2327 struct netlink_ext_ack *extack) 2328 { 2329 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2330 struct mlxsw_sp_port *mlxsw_sp_port; 2331 char pmtdb_pl[MLXSW_REG_PMTDB_LEN]; 2332 unsigned int count; 2333 int i; 2334 int err; 2335 2336 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); 2337 if (!mlxsw_sp_port) { 2338 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 2339 local_port); 2340 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 2341 return -EINVAL; 2342 } 2343 2344 if (!mlxsw_sp_port->split) { 2345 NL_SET_ERR_MSG_MOD(extack, "Port was not split"); 2346 return -EINVAL; 2347 } 2348 2349 count = mlxsw_sp_port->mapping.module_width / 2350 mlxsw_sp_port->mapping.width; 2351 2352 mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index, 2353 mlxsw_sp_port->mapping.module, 2354 mlxsw_sp_port->mapping.module_width / count, 2355 count); 2356 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl); 2357 if (err) { 2358 NL_SET_ERR_MSG_MOD(extack, "Failed to query split info"); 2359 return err; 2360 } 2361 2362 for (i = 0; i < count; i++) { 2363 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2364 2365 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) 2366 mlxsw_sp_port_remove(mlxsw_sp, s_local_port); 2367 } 2368 2369 mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl); 2370 2371 return 0; 2372 } 2373 2374 static void 2375 mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port) 2376 { 2377 int i; 2378 2379 for (i = 0; i < TC_MAX_QUEUE; i++) 2380 mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0; 2381 } 2382 2383 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 2384 char *pude_pl, void *priv) 2385 { 2386 struct mlxsw_sp *mlxsw_sp = priv; 2387 struct mlxsw_sp_port *mlxsw_sp_port; 2388 enum mlxsw_reg_pude_oper_status status; 2389 u16 local_port; 2390 2391 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 2392 2393 if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port))) 2394 return; 2395 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2396 if (!mlxsw_sp_port) 2397 return; 2398 2399 status = mlxsw_reg_pude_oper_status_get(pude_pl); 2400 if (status == MLXSW_PORT_OPER_STATUS_UP) { 2401 netdev_info(mlxsw_sp_port->dev, "link up\n"); 2402 netif_carrier_on(mlxsw_sp_port->dev); 2403 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0); 2404 } else { 2405 netdev_info(mlxsw_sp_port->dev, "link down\n"); 2406 netif_carrier_off(mlxsw_sp_port->dev); 2407 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port); 2408 } 2409 } 2410 2411 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp, 2412 char *mtpptr_pl, bool ingress) 2413 { 2414 u16 local_port; 2415 u8 num_rec; 2416 int i; 2417 2418 local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl); 2419 num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl); 2420 for (i = 0; i < num_rec; i++) { 2421 u8 domain_number; 2422 u8 message_type; 2423 u16 sequence_id; 2424 u64 timestamp; 2425 2426 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type, 2427 &domain_number, &sequence_id, 2428 ×tamp); 2429 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port, 2430 message_type, domain_number, 2431 sequence_id, timestamp); 2432 } 2433 } 2434 2435 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg, 2436 char *mtpptr_pl, void *priv) 2437 { 2438 struct mlxsw_sp *mlxsw_sp = priv; 2439 2440 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true); 2441 } 2442 2443 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg, 2444 char *mtpptr_pl, void *priv) 2445 { 2446 struct mlxsw_sp *mlxsw_sp = priv; 2447 2448 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false); 2449 } 2450 2451 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 2452 u16 local_port, void *priv) 2453 { 2454 struct mlxsw_sp *mlxsw_sp = priv; 2455 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2456 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 2457 2458 if (unlikely(!mlxsw_sp_port)) { 2459 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 2460 local_port); 2461 return; 2462 } 2463 2464 skb->dev = mlxsw_sp_port->dev; 2465 2466 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 2467 u64_stats_update_begin(&pcpu_stats->syncp); 2468 pcpu_stats->rx_packets++; 2469 pcpu_stats->rx_bytes += skb->len; 2470 u64_stats_update_end(&pcpu_stats->syncp); 2471 2472 skb->protocol = eth_type_trans(skb, skb->dev); 2473 netif_receive_skb(skb); 2474 } 2475 2476 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u16 local_port, 2477 void *priv) 2478 { 2479 skb->offload_fwd_mark = 1; 2480 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 2481 } 2482 2483 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, 2484 u16 local_port, void *priv) 2485 { 2486 skb->offload_l3_fwd_mark = 1; 2487 skb->offload_fwd_mark = 1; 2488 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 2489 } 2490 2491 void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 2492 u16 local_port) 2493 { 2494 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port); 2495 } 2496 2497 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2498 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 2499 _is_ctrl, SP_##_trap_group, DISCARD) 2500 2501 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2502 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 2503 _is_ctrl, SP_##_trap_group, DISCARD) 2504 2505 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2506 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \ 2507 _is_ctrl, SP_##_trap_group, DISCARD) 2508 2509 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 2510 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 2511 2512 static const struct mlxsw_listener mlxsw_sp_listener[] = { 2513 /* Events */ 2514 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 2515 /* L2 traps */ 2516 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false), 2517 /* L3 traps */ 2518 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 2519 false), 2520 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 2521 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 2522 false), 2523 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD, 2524 ROUTER_EXP, false), 2525 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD, 2526 ROUTER_EXP, false), 2527 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD, 2528 ROUTER_EXP, false), 2529 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD, 2530 ROUTER_EXP, false), 2531 /* Multicast Router Traps */ 2532 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 2533 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 2534 /* NVE traps */ 2535 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false), 2536 }; 2537 2538 static const struct mlxsw_listener mlxsw_sp1_listener[] = { 2539 /* Events */ 2540 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0), 2541 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0), 2542 }; 2543 2544 static const struct mlxsw_listener mlxsw_sp2_listener[] = { 2545 /* Events */ 2546 MLXSW_SP_EVENTL(mlxsw_sp_port_mapping_listener_func, PMLPE), 2547 }; 2548 2549 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 2550 { 2551 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2552 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 2553 enum mlxsw_reg_qpcr_ir_units ir_units; 2554 int max_cpu_policers; 2555 bool is_bytes; 2556 u8 burst_size; 2557 u32 rate; 2558 int i, err; 2559 2560 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 2561 return -EIO; 2562 2563 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 2564 2565 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 2566 for (i = 0; i < max_cpu_policers; i++) { 2567 is_bytes = false; 2568 switch (i) { 2569 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 2570 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 2571 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS: 2572 rate = 1024; 2573 burst_size = 7; 2574 break; 2575 default: 2576 continue; 2577 } 2578 2579 __set_bit(i, mlxsw_sp->trap->policers_usage); 2580 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 2581 burst_size); 2582 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 2583 if (err) 2584 return err; 2585 } 2586 2587 return 0; 2588 } 2589 2590 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 2591 { 2592 char htgt_pl[MLXSW_REG_HTGT_LEN]; 2593 enum mlxsw_reg_htgt_trap_group i; 2594 int max_cpu_policers; 2595 int max_trap_groups; 2596 u8 priority, tc; 2597 u16 policer_id; 2598 int err; 2599 2600 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 2601 return -EIO; 2602 2603 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 2604 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 2605 2606 for (i = 0; i < max_trap_groups; i++) { 2607 policer_id = i; 2608 switch (i) { 2609 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 2610 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 2611 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS: 2612 priority = 1; 2613 tc = 1; 2614 break; 2615 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 2616 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 2617 tc = MLXSW_REG_HTGT_DEFAULT_TC; 2618 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 2619 break; 2620 default: 2621 continue; 2622 } 2623 2624 if (max_cpu_policers <= policer_id && 2625 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 2626 return -EIO; 2627 2628 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 2629 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 2630 if (err) 2631 return err; 2632 } 2633 2634 return 0; 2635 } 2636 2637 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 2638 { 2639 struct mlxsw_sp_trap *trap; 2640 u64 max_policers; 2641 int err; 2642 2643 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS)) 2644 return -EIO; 2645 max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS); 2646 trap = kzalloc(struct_size(trap, policers_usage, 2647 BITS_TO_LONGS(max_policers)), GFP_KERNEL); 2648 if (!trap) 2649 return -ENOMEM; 2650 trap->max_policers = max_policers; 2651 mlxsw_sp->trap = trap; 2652 2653 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 2654 if (err) 2655 goto err_cpu_policers_set; 2656 2657 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 2658 if (err) 2659 goto err_trap_groups_set; 2660 2661 err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp_listener, 2662 ARRAY_SIZE(mlxsw_sp_listener), 2663 mlxsw_sp); 2664 if (err) 2665 goto err_traps_register; 2666 2667 err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp->listeners, 2668 mlxsw_sp->listeners_count, mlxsw_sp); 2669 if (err) 2670 goto err_extra_traps_init; 2671 2672 return 0; 2673 2674 err_extra_traps_init: 2675 mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener, 2676 ARRAY_SIZE(mlxsw_sp_listener), 2677 mlxsw_sp); 2678 err_traps_register: 2679 err_trap_groups_set: 2680 err_cpu_policers_set: 2681 kfree(trap); 2682 return err; 2683 } 2684 2685 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 2686 { 2687 mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp->listeners, 2688 mlxsw_sp->listeners_count, 2689 mlxsw_sp); 2690 mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener, 2691 ARRAY_SIZE(mlxsw_sp_listener), mlxsw_sp); 2692 kfree(mlxsw_sp->trap); 2693 } 2694 2695 static int mlxsw_sp_lag_pgt_init(struct mlxsw_sp *mlxsw_sp) 2696 { 2697 char sgcr_pl[MLXSW_REG_SGCR_LEN]; 2698 u16 max_lag; 2699 int err; 2700 2701 if (mlxsw_core_lag_mode(mlxsw_sp->core) != 2702 MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW) 2703 return 0; 2704 2705 err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag); 2706 if (err) 2707 return err; 2708 2709 /* In DDD mode, which we by default use, each LAG entry is 8 PGT 2710 * entries. The LAG table address needs to be 8-aligned, but that ought 2711 * to be the case, since the LAG table is allocated first. 2712 */ 2713 err = mlxsw_sp_pgt_mid_alloc_range(mlxsw_sp, &mlxsw_sp->lag_pgt_base, 2714 max_lag * 8); 2715 if (err) 2716 return err; 2717 if (WARN_ON_ONCE(mlxsw_sp->lag_pgt_base % 8)) { 2718 err = -EINVAL; 2719 goto err_mid_alloc_range; 2720 } 2721 2722 mlxsw_reg_sgcr_pack(sgcr_pl, mlxsw_sp->lag_pgt_base); 2723 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sgcr), sgcr_pl); 2724 if (err) 2725 goto err_mid_alloc_range; 2726 2727 return 0; 2728 2729 err_mid_alloc_range: 2730 mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mlxsw_sp->lag_pgt_base, 2731 max_lag * 8); 2732 return err; 2733 } 2734 2735 static void mlxsw_sp_lag_pgt_fini(struct mlxsw_sp *mlxsw_sp) 2736 { 2737 u16 max_lag; 2738 int err; 2739 2740 if (mlxsw_core_lag_mode(mlxsw_sp->core) != 2741 MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW) 2742 return; 2743 2744 err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag); 2745 if (err) 2746 return; 2747 2748 mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mlxsw_sp->lag_pgt_base, 2749 max_lag * 8); 2750 } 2751 2752 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe 2753 2754 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 2755 { 2756 char slcr_pl[MLXSW_REG_SLCR_LEN]; 2757 u16 max_lag; 2758 u32 seed; 2759 int err; 2760 2761 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 2762 MLXSW_SP_LAG_SEED_INIT); 2763 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 2764 MLXSW_REG_SLCR_LAG_HASH_DMAC | 2765 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 2766 MLXSW_REG_SLCR_LAG_HASH_VLANID | 2767 MLXSW_REG_SLCR_LAG_HASH_SIP | 2768 MLXSW_REG_SLCR_LAG_HASH_DIP | 2769 MLXSW_REG_SLCR_LAG_HASH_SPORT | 2770 MLXSW_REG_SLCR_LAG_HASH_DPORT | 2771 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed); 2772 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 2773 if (err) 2774 return err; 2775 2776 err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag); 2777 if (err) 2778 return err; 2779 2780 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 2781 return -EIO; 2782 2783 err = mlxsw_sp_lag_pgt_init(mlxsw_sp); 2784 if (err) 2785 return err; 2786 2787 mlxsw_sp->lags = kcalloc(max_lag, sizeof(struct mlxsw_sp_upper), 2788 GFP_KERNEL); 2789 if (!mlxsw_sp->lags) { 2790 err = -ENOMEM; 2791 goto err_kcalloc; 2792 } 2793 2794 return 0; 2795 2796 err_kcalloc: 2797 mlxsw_sp_lag_pgt_fini(mlxsw_sp); 2798 return err; 2799 } 2800 2801 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 2802 { 2803 mlxsw_sp_lag_pgt_fini(mlxsw_sp); 2804 kfree(mlxsw_sp->lags); 2805 } 2806 2807 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = { 2808 .clock_init = mlxsw_sp1_ptp_clock_init, 2809 .clock_fini = mlxsw_sp1_ptp_clock_fini, 2810 .init = mlxsw_sp1_ptp_init, 2811 .fini = mlxsw_sp1_ptp_fini, 2812 .receive = mlxsw_sp1_ptp_receive, 2813 .transmitted = mlxsw_sp1_ptp_transmitted, 2814 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get, 2815 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set, 2816 .shaper_work = mlxsw_sp1_ptp_shaper_work, 2817 .get_ts_info = mlxsw_sp1_ptp_get_ts_info, 2818 .get_stats_count = mlxsw_sp1_get_stats_count, 2819 .get_stats_strings = mlxsw_sp1_get_stats_strings, 2820 .get_stats = mlxsw_sp1_get_stats, 2821 .txhdr_construct = mlxsw_sp_ptp_txhdr_construct, 2822 }; 2823 2824 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { 2825 .clock_init = mlxsw_sp2_ptp_clock_init, 2826 .clock_fini = mlxsw_sp2_ptp_clock_fini, 2827 .init = mlxsw_sp2_ptp_init, 2828 .fini = mlxsw_sp2_ptp_fini, 2829 .receive = mlxsw_sp2_ptp_receive, 2830 .transmitted = mlxsw_sp2_ptp_transmitted, 2831 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get, 2832 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set, 2833 .shaper_work = mlxsw_sp2_ptp_shaper_work, 2834 .get_ts_info = mlxsw_sp2_ptp_get_ts_info, 2835 .get_stats_count = mlxsw_sp2_get_stats_count, 2836 .get_stats_strings = mlxsw_sp2_get_stats_strings, 2837 .get_stats = mlxsw_sp2_get_stats, 2838 .txhdr_construct = mlxsw_sp2_ptp_txhdr_construct, 2839 }; 2840 2841 static const struct mlxsw_sp_ptp_ops mlxsw_sp4_ptp_ops = { 2842 .clock_init = mlxsw_sp2_ptp_clock_init, 2843 .clock_fini = mlxsw_sp2_ptp_clock_fini, 2844 .init = mlxsw_sp2_ptp_init, 2845 .fini = mlxsw_sp2_ptp_fini, 2846 .receive = mlxsw_sp2_ptp_receive, 2847 .transmitted = mlxsw_sp2_ptp_transmitted, 2848 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get, 2849 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set, 2850 .shaper_work = mlxsw_sp2_ptp_shaper_work, 2851 .get_ts_info = mlxsw_sp2_ptp_get_ts_info, 2852 .get_stats_count = mlxsw_sp2_get_stats_count, 2853 .get_stats_strings = mlxsw_sp2_get_stats_strings, 2854 .get_stats = mlxsw_sp2_get_stats, 2855 .txhdr_construct = mlxsw_sp_ptp_txhdr_construct, 2856 }; 2857 2858 struct mlxsw_sp_sample_trigger_node { 2859 struct mlxsw_sp_sample_trigger trigger; 2860 struct mlxsw_sp_sample_params params; 2861 struct rhash_head ht_node; 2862 struct rcu_head rcu; 2863 refcount_t refcount; 2864 }; 2865 2866 static const struct rhashtable_params mlxsw_sp_sample_trigger_ht_params = { 2867 .key_offset = offsetof(struct mlxsw_sp_sample_trigger_node, trigger), 2868 .head_offset = offsetof(struct mlxsw_sp_sample_trigger_node, ht_node), 2869 .key_len = sizeof(struct mlxsw_sp_sample_trigger), 2870 .automatic_shrinking = true, 2871 }; 2872 2873 static void 2874 mlxsw_sp_sample_trigger_key_init(struct mlxsw_sp_sample_trigger *key, 2875 const struct mlxsw_sp_sample_trigger *trigger) 2876 { 2877 memset(key, 0, sizeof(*key)); 2878 key->type = trigger->type; 2879 key->local_port = trigger->local_port; 2880 } 2881 2882 /* RCU read lock must be held */ 2883 struct mlxsw_sp_sample_params * 2884 mlxsw_sp_sample_trigger_params_lookup(struct mlxsw_sp *mlxsw_sp, 2885 const struct mlxsw_sp_sample_trigger *trigger) 2886 { 2887 struct mlxsw_sp_sample_trigger_node *trigger_node; 2888 struct mlxsw_sp_sample_trigger key; 2889 2890 mlxsw_sp_sample_trigger_key_init(&key, trigger); 2891 trigger_node = rhashtable_lookup(&mlxsw_sp->sample_trigger_ht, &key, 2892 mlxsw_sp_sample_trigger_ht_params); 2893 if (!trigger_node) 2894 return NULL; 2895 2896 return &trigger_node->params; 2897 } 2898 2899 static int 2900 mlxsw_sp_sample_trigger_node_init(struct mlxsw_sp *mlxsw_sp, 2901 const struct mlxsw_sp_sample_trigger *trigger, 2902 const struct mlxsw_sp_sample_params *params) 2903 { 2904 struct mlxsw_sp_sample_trigger_node *trigger_node; 2905 int err; 2906 2907 trigger_node = kzalloc(sizeof(*trigger_node), GFP_KERNEL); 2908 if (!trigger_node) 2909 return -ENOMEM; 2910 2911 trigger_node->trigger = *trigger; 2912 trigger_node->params = *params; 2913 refcount_set(&trigger_node->refcount, 1); 2914 2915 err = rhashtable_insert_fast(&mlxsw_sp->sample_trigger_ht, 2916 &trigger_node->ht_node, 2917 mlxsw_sp_sample_trigger_ht_params); 2918 if (err) 2919 goto err_rhashtable_insert; 2920 2921 return 0; 2922 2923 err_rhashtable_insert: 2924 kfree(trigger_node); 2925 return err; 2926 } 2927 2928 static void 2929 mlxsw_sp_sample_trigger_node_fini(struct mlxsw_sp *mlxsw_sp, 2930 struct mlxsw_sp_sample_trigger_node *trigger_node) 2931 { 2932 rhashtable_remove_fast(&mlxsw_sp->sample_trigger_ht, 2933 &trigger_node->ht_node, 2934 mlxsw_sp_sample_trigger_ht_params); 2935 kfree_rcu(trigger_node, rcu); 2936 } 2937 2938 int 2939 mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp, 2940 const struct mlxsw_sp_sample_trigger *trigger, 2941 const struct mlxsw_sp_sample_params *params, 2942 struct netlink_ext_ack *extack) 2943 { 2944 struct mlxsw_sp_sample_trigger_node *trigger_node; 2945 struct mlxsw_sp_sample_trigger key; 2946 2947 ASSERT_RTNL(); 2948 2949 mlxsw_sp_sample_trigger_key_init(&key, trigger); 2950 2951 trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht, 2952 &key, 2953 mlxsw_sp_sample_trigger_ht_params); 2954 if (!trigger_node) 2955 return mlxsw_sp_sample_trigger_node_init(mlxsw_sp, &key, 2956 params); 2957 2958 if (trigger_node->trigger.local_port) { 2959 NL_SET_ERR_MSG_MOD(extack, "Sampling already enabled on port"); 2960 return -EINVAL; 2961 } 2962 2963 if (trigger_node->params.psample_group != params->psample_group || 2964 trigger_node->params.truncate != params->truncate || 2965 trigger_node->params.rate != params->rate || 2966 trigger_node->params.trunc_size != params->trunc_size) { 2967 NL_SET_ERR_MSG_MOD(extack, "Sampling parameters do not match for an existing sampling trigger"); 2968 return -EINVAL; 2969 } 2970 2971 refcount_inc(&trigger_node->refcount); 2972 2973 return 0; 2974 } 2975 2976 void 2977 mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp, 2978 const struct mlxsw_sp_sample_trigger *trigger) 2979 { 2980 struct mlxsw_sp_sample_trigger_node *trigger_node; 2981 struct mlxsw_sp_sample_trigger key; 2982 2983 ASSERT_RTNL(); 2984 2985 mlxsw_sp_sample_trigger_key_init(&key, trigger); 2986 2987 trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht, 2988 &key, 2989 mlxsw_sp_sample_trigger_ht_params); 2990 if (!trigger_node) 2991 return; 2992 2993 if (!refcount_dec_and_test(&trigger_node->refcount)) 2994 return; 2995 2996 mlxsw_sp_sample_trigger_node_fini(mlxsw_sp, trigger_node); 2997 } 2998 2999 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 3000 unsigned long event, void *ptr); 3001 3002 #define MLXSW_SP_DEFAULT_PARSING_DEPTH 96 3003 #define MLXSW_SP_INCREASED_PARSING_DEPTH 128 3004 #define MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT 4789 3005 3006 static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp) 3007 { 3008 refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 0); 3009 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH; 3010 mlxsw_sp->parsing.vxlan_udp_dport = MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT; 3011 mutex_init(&mlxsw_sp->parsing.lock); 3012 } 3013 3014 static void mlxsw_sp_parsing_fini(struct mlxsw_sp *mlxsw_sp) 3015 { 3016 mutex_destroy(&mlxsw_sp->parsing.lock); 3017 WARN_ON_ONCE(refcount_read(&mlxsw_sp->parsing.parsing_depth_ref)); 3018 } 3019 3020 struct mlxsw_sp_ipv6_addr_node { 3021 struct in6_addr key; 3022 struct rhash_head ht_node; 3023 u32 kvdl_index; 3024 refcount_t refcount; 3025 }; 3026 3027 static const struct rhashtable_params mlxsw_sp_ipv6_addr_ht_params = { 3028 .key_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, key), 3029 .head_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, ht_node), 3030 .key_len = sizeof(struct in6_addr), 3031 .automatic_shrinking = true, 3032 }; 3033 3034 static int 3035 mlxsw_sp_ipv6_addr_init(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6, 3036 u32 *p_kvdl_index) 3037 { 3038 struct mlxsw_sp_ipv6_addr_node *node; 3039 char rips_pl[MLXSW_REG_RIPS_LEN]; 3040 int err; 3041 3042 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 3043 MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, 3044 p_kvdl_index); 3045 if (err) 3046 return err; 3047 3048 mlxsw_reg_rips_pack(rips_pl, *p_kvdl_index, addr6); 3049 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rips), rips_pl); 3050 if (err) 3051 goto err_rips_write; 3052 3053 node = kzalloc(sizeof(*node), GFP_KERNEL); 3054 if (!node) { 3055 err = -ENOMEM; 3056 goto err_node_alloc; 3057 } 3058 3059 node->key = *addr6; 3060 node->kvdl_index = *p_kvdl_index; 3061 refcount_set(&node->refcount, 1); 3062 3063 err = rhashtable_insert_fast(&mlxsw_sp->ipv6_addr_ht, 3064 &node->ht_node, 3065 mlxsw_sp_ipv6_addr_ht_params); 3066 if (err) 3067 goto err_rhashtable_insert; 3068 3069 return 0; 3070 3071 err_rhashtable_insert: 3072 kfree(node); 3073 err_node_alloc: 3074 err_rips_write: 3075 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, 3076 *p_kvdl_index); 3077 return err; 3078 } 3079 3080 static void mlxsw_sp_ipv6_addr_fini(struct mlxsw_sp *mlxsw_sp, 3081 struct mlxsw_sp_ipv6_addr_node *node) 3082 { 3083 u32 kvdl_index = node->kvdl_index; 3084 3085 rhashtable_remove_fast(&mlxsw_sp->ipv6_addr_ht, &node->ht_node, 3086 mlxsw_sp_ipv6_addr_ht_params); 3087 kfree(node); 3088 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, 3089 kvdl_index); 3090 } 3091 3092 int mlxsw_sp_ipv6_addr_kvdl_index_get(struct mlxsw_sp *mlxsw_sp, 3093 const struct in6_addr *addr6, 3094 u32 *p_kvdl_index) 3095 { 3096 struct mlxsw_sp_ipv6_addr_node *node; 3097 int err = 0; 3098 3099 mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock); 3100 node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6, 3101 mlxsw_sp_ipv6_addr_ht_params); 3102 if (node) { 3103 refcount_inc(&node->refcount); 3104 *p_kvdl_index = node->kvdl_index; 3105 goto out_unlock; 3106 } 3107 3108 err = mlxsw_sp_ipv6_addr_init(mlxsw_sp, addr6, p_kvdl_index); 3109 3110 out_unlock: 3111 mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock); 3112 return err; 3113 } 3114 3115 void 3116 mlxsw_sp_ipv6_addr_put(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6) 3117 { 3118 struct mlxsw_sp_ipv6_addr_node *node; 3119 3120 mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock); 3121 node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6, 3122 mlxsw_sp_ipv6_addr_ht_params); 3123 if (WARN_ON(!node)) 3124 goto out_unlock; 3125 3126 if (!refcount_dec_and_test(&node->refcount)) 3127 goto out_unlock; 3128 3129 mlxsw_sp_ipv6_addr_fini(mlxsw_sp, node); 3130 3131 out_unlock: 3132 mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock); 3133 } 3134 3135 static int mlxsw_sp_ipv6_addr_ht_init(struct mlxsw_sp *mlxsw_sp) 3136 { 3137 int err; 3138 3139 err = rhashtable_init(&mlxsw_sp->ipv6_addr_ht, 3140 &mlxsw_sp_ipv6_addr_ht_params); 3141 if (err) 3142 return err; 3143 3144 mutex_init(&mlxsw_sp->ipv6_addr_ht_lock); 3145 return 0; 3146 } 3147 3148 static void mlxsw_sp_ipv6_addr_ht_fini(struct mlxsw_sp *mlxsw_sp) 3149 { 3150 mutex_destroy(&mlxsw_sp->ipv6_addr_ht_lock); 3151 rhashtable_destroy(&mlxsw_sp->ipv6_addr_ht); 3152 } 3153 3154 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 3155 const struct mlxsw_bus_info *mlxsw_bus_info, 3156 struct netlink_ext_ack *extack) 3157 { 3158 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3159 int err; 3160 3161 mlxsw_sp->core = mlxsw_core; 3162 mlxsw_sp->bus_info = mlxsw_bus_info; 3163 3164 mlxsw_sp_parsing_init(mlxsw_sp); 3165 3166 err = mlxsw_sp_base_mac_get(mlxsw_sp); 3167 if (err) { 3168 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 3169 return err; 3170 } 3171 3172 err = mlxsw_sp_kvdl_init(mlxsw_sp); 3173 if (err) { 3174 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 3175 return err; 3176 } 3177 3178 err = mlxsw_sp_pgt_init(mlxsw_sp); 3179 if (err) { 3180 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PGT\n"); 3181 goto err_pgt_init; 3182 } 3183 3184 /* Initialize before FIDs so that the LAG table is at the start of PGT 3185 * and 8-aligned without overallocation. 3186 */ 3187 err = mlxsw_sp_lag_init(mlxsw_sp); 3188 if (err) { 3189 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 3190 goto err_lag_init; 3191 } 3192 3193 err = mlxsw_sp->fid_core_ops->init(mlxsw_sp); 3194 if (err) { 3195 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 3196 goto err_fid_core_init; 3197 } 3198 3199 err = mlxsw_sp_policers_init(mlxsw_sp); 3200 if (err) { 3201 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize policers\n"); 3202 goto err_policers_init; 3203 } 3204 3205 err = mlxsw_sp_traps_init(mlxsw_sp); 3206 if (err) { 3207 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 3208 goto err_traps_init; 3209 } 3210 3211 err = mlxsw_sp_devlink_traps_init(mlxsw_sp); 3212 if (err) { 3213 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n"); 3214 goto err_devlink_traps_init; 3215 } 3216 3217 err = mlxsw_sp_buffers_init(mlxsw_sp); 3218 if (err) { 3219 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 3220 goto err_buffers_init; 3221 } 3222 3223 /* Initialize SPAN before router and switchdev, so that those components 3224 * can call mlxsw_sp_span_respin(). 3225 */ 3226 err = mlxsw_sp_span_init(mlxsw_sp); 3227 if (err) { 3228 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 3229 goto err_span_init; 3230 } 3231 3232 err = mlxsw_sp_switchdev_init(mlxsw_sp); 3233 if (err) { 3234 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 3235 goto err_switchdev_init; 3236 } 3237 3238 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 3239 if (err) { 3240 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 3241 goto err_counter_pool_init; 3242 } 3243 3244 err = mlxsw_sp_afa_init(mlxsw_sp); 3245 if (err) { 3246 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 3247 goto err_afa_init; 3248 } 3249 3250 err = mlxsw_sp_ipv6_addr_ht_init(mlxsw_sp); 3251 if (err) { 3252 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize hash table for IPv6 addresses\n"); 3253 goto err_ipv6_addr_ht_init; 3254 } 3255 3256 err = mlxsw_sp_nve_init(mlxsw_sp); 3257 if (err) { 3258 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n"); 3259 goto err_nve_init; 3260 } 3261 3262 err = mlxsw_sp_port_range_init(mlxsw_sp); 3263 if (err) { 3264 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize port ranges\n"); 3265 goto err_port_range_init; 3266 } 3267 3268 err = mlxsw_sp_acl_init(mlxsw_sp); 3269 if (err) { 3270 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 3271 goto err_acl_init; 3272 } 3273 3274 err = mlxsw_sp_router_init(mlxsw_sp, extack); 3275 if (err) { 3276 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 3277 goto err_router_init; 3278 } 3279 3280 if (mlxsw_sp->bus_info->read_clock_capable) { 3281 /* NULL is a valid return value from clock_init */ 3282 mlxsw_sp->clock = 3283 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp, 3284 mlxsw_sp->bus_info->dev); 3285 if (IS_ERR(mlxsw_sp->clock)) { 3286 err = PTR_ERR(mlxsw_sp->clock); 3287 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n"); 3288 goto err_ptp_clock_init; 3289 } 3290 } 3291 3292 if (mlxsw_sp->clock) { 3293 /* NULL is a valid return value from ptp_ops->init */ 3294 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp); 3295 if (IS_ERR(mlxsw_sp->ptp_state)) { 3296 err = PTR_ERR(mlxsw_sp->ptp_state); 3297 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n"); 3298 goto err_ptp_init; 3299 } 3300 } 3301 3302 /* Initialize netdevice notifier after SPAN is initialized, so that the 3303 * event handler can call SPAN respin. 3304 */ 3305 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 3306 err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 3307 &mlxsw_sp->netdevice_nb); 3308 if (err) { 3309 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 3310 goto err_netdev_notifier; 3311 } 3312 3313 err = mlxsw_sp_dpipe_init(mlxsw_sp); 3314 if (err) { 3315 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 3316 goto err_dpipe_init; 3317 } 3318 3319 err = mlxsw_sp_port_module_info_init(mlxsw_sp); 3320 if (err) { 3321 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n"); 3322 goto err_port_module_info_init; 3323 } 3324 3325 err = rhashtable_init(&mlxsw_sp->sample_trigger_ht, 3326 &mlxsw_sp_sample_trigger_ht_params); 3327 if (err) { 3328 dev_err(mlxsw_sp->bus_info->dev, "Failed to init sampling trigger hashtable\n"); 3329 goto err_sample_trigger_init; 3330 } 3331 3332 err = mlxsw_sp_ports_create(mlxsw_sp); 3333 if (err) { 3334 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 3335 goto err_ports_create; 3336 } 3337 3338 return 0; 3339 3340 err_ports_create: 3341 rhashtable_destroy(&mlxsw_sp->sample_trigger_ht); 3342 err_sample_trigger_init: 3343 mlxsw_sp_port_module_info_fini(mlxsw_sp); 3344 err_port_module_info_init: 3345 mlxsw_sp_dpipe_fini(mlxsw_sp); 3346 err_dpipe_init: 3347 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 3348 &mlxsw_sp->netdevice_nb); 3349 err_netdev_notifier: 3350 if (mlxsw_sp->clock) 3351 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 3352 err_ptp_init: 3353 if (mlxsw_sp->clock) 3354 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 3355 err_ptp_clock_init: 3356 mlxsw_sp_router_fini(mlxsw_sp); 3357 err_router_init: 3358 mlxsw_sp_acl_fini(mlxsw_sp); 3359 err_acl_init: 3360 mlxsw_sp_port_range_fini(mlxsw_sp); 3361 err_port_range_init: 3362 mlxsw_sp_nve_fini(mlxsw_sp); 3363 err_nve_init: 3364 mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp); 3365 err_ipv6_addr_ht_init: 3366 mlxsw_sp_afa_fini(mlxsw_sp); 3367 err_afa_init: 3368 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3369 err_counter_pool_init: 3370 mlxsw_sp_switchdev_fini(mlxsw_sp); 3371 err_switchdev_init: 3372 mlxsw_sp_span_fini(mlxsw_sp); 3373 err_span_init: 3374 mlxsw_sp_buffers_fini(mlxsw_sp); 3375 err_buffers_init: 3376 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 3377 err_devlink_traps_init: 3378 mlxsw_sp_traps_fini(mlxsw_sp); 3379 err_traps_init: 3380 mlxsw_sp_policers_fini(mlxsw_sp); 3381 err_policers_init: 3382 mlxsw_sp->fid_core_ops->fini(mlxsw_sp); 3383 err_fid_core_init: 3384 mlxsw_sp_lag_fini(mlxsw_sp); 3385 err_lag_init: 3386 mlxsw_sp_pgt_fini(mlxsw_sp); 3387 err_pgt_init: 3388 mlxsw_sp_kvdl_fini(mlxsw_sp); 3389 mlxsw_sp_parsing_fini(mlxsw_sp); 3390 return err; 3391 } 3392 3393 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, 3394 const struct mlxsw_bus_info *mlxsw_bus_info, 3395 struct netlink_ext_ack *extack) 3396 { 3397 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3398 3399 mlxsw_sp->switchdev_ops = &mlxsw_sp1_switchdev_ops; 3400 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; 3401 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; 3402 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; 3403 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; 3404 mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops; 3405 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; 3406 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; 3407 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; 3408 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals; 3409 mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops; 3410 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops; 3411 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops; 3412 mlxsw_sp->span_ops = &mlxsw_sp1_span_ops; 3413 mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops; 3414 mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops; 3415 mlxsw_sp->mall_ops = &mlxsw_sp1_mall_ops; 3416 mlxsw_sp->router_ops = &mlxsw_sp1_router_ops; 3417 mlxsw_sp->listeners = mlxsw_sp1_listener; 3418 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener); 3419 mlxsw_sp->fid_core_ops = &mlxsw_sp1_fid_core_ops; 3420 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1; 3421 mlxsw_sp->pgt_smpe_index_valid = true; 3422 3423 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3424 } 3425 3426 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, 3427 const struct mlxsw_bus_info *mlxsw_bus_info, 3428 struct netlink_ext_ack *extack) 3429 { 3430 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3431 3432 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; 3433 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 3434 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 3435 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 3436 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 3437 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 3438 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 3439 mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops; 3440 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 3441 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 3442 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 3443 mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops; 3444 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 3445 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 3446 mlxsw_sp->span_ops = &mlxsw_sp2_span_ops; 3447 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; 3448 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; 3449 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; 3450 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; 3451 mlxsw_sp->listeners = mlxsw_sp2_listener; 3452 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener); 3453 mlxsw_sp->fid_core_ops = &mlxsw_sp2_fid_core_ops; 3454 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2; 3455 mlxsw_sp->pgt_smpe_index_valid = false; 3456 3457 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3458 } 3459 3460 static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core, 3461 const struct mlxsw_bus_info *mlxsw_bus_info, 3462 struct netlink_ext_ack *extack) 3463 { 3464 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3465 3466 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; 3467 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 3468 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 3469 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 3470 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 3471 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 3472 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 3473 mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops; 3474 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 3475 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 3476 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 3477 mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops; 3478 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 3479 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 3480 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops; 3481 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; 3482 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; 3483 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; 3484 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; 3485 mlxsw_sp->listeners = mlxsw_sp2_listener; 3486 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener); 3487 mlxsw_sp->fid_core_ops = &mlxsw_sp2_fid_core_ops; 3488 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3; 3489 mlxsw_sp->pgt_smpe_index_valid = false; 3490 3491 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3492 } 3493 3494 static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core, 3495 const struct mlxsw_bus_info *mlxsw_bus_info, 3496 struct netlink_ext_ack *extack) 3497 { 3498 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3499 3500 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; 3501 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 3502 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 3503 mlxsw_sp->afk_ops = &mlxsw_sp4_afk_ops; 3504 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 3505 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 3506 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 3507 mlxsw_sp->acl_bf_ops = &mlxsw_sp4_acl_bf_ops; 3508 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 3509 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 3510 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 3511 mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops; 3512 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 3513 mlxsw_sp->ptp_ops = &mlxsw_sp4_ptp_ops; 3514 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops; 3515 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; 3516 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; 3517 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; 3518 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; 3519 mlxsw_sp->listeners = mlxsw_sp2_listener; 3520 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener); 3521 mlxsw_sp->fid_core_ops = &mlxsw_sp2_fid_core_ops; 3522 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4; 3523 mlxsw_sp->pgt_smpe_index_valid = false; 3524 3525 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3526 } 3527 3528 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 3529 { 3530 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3531 3532 mlxsw_sp_ports_remove(mlxsw_sp); 3533 rhashtable_destroy(&mlxsw_sp->sample_trigger_ht); 3534 mlxsw_sp_port_module_info_fini(mlxsw_sp); 3535 mlxsw_sp_dpipe_fini(mlxsw_sp); 3536 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 3537 &mlxsw_sp->netdevice_nb); 3538 if (mlxsw_sp->clock) { 3539 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 3540 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 3541 } 3542 mlxsw_sp_router_fini(mlxsw_sp); 3543 mlxsw_sp_acl_fini(mlxsw_sp); 3544 mlxsw_sp_port_range_fini(mlxsw_sp); 3545 mlxsw_sp_nve_fini(mlxsw_sp); 3546 mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp); 3547 mlxsw_sp_afa_fini(mlxsw_sp); 3548 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3549 mlxsw_sp_switchdev_fini(mlxsw_sp); 3550 mlxsw_sp_span_fini(mlxsw_sp); 3551 mlxsw_sp_buffers_fini(mlxsw_sp); 3552 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 3553 mlxsw_sp_traps_fini(mlxsw_sp); 3554 mlxsw_sp_policers_fini(mlxsw_sp); 3555 mlxsw_sp->fid_core_ops->fini(mlxsw_sp); 3556 mlxsw_sp_lag_fini(mlxsw_sp); 3557 mlxsw_sp_pgt_fini(mlxsw_sp); 3558 mlxsw_sp_kvdl_fini(mlxsw_sp); 3559 mlxsw_sp_parsing_fini(mlxsw_sp); 3560 } 3561 3562 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { 3563 .used_flood_mode = 1, 3564 .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED, 3565 .used_max_ib_mc = 1, 3566 .max_ib_mc = 0, 3567 .used_max_pkey = 1, 3568 .max_pkey = 0, 3569 .used_ubridge = 1, 3570 .ubridge = 1, 3571 .used_kvd_sizes = 1, 3572 .kvd_hash_single_parts = 59, 3573 .kvd_hash_double_parts = 41, 3574 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 3575 .swid_config = { 3576 { 3577 .used_type = 1, 3578 .type = MLXSW_PORT_SWID_TYPE_ETH, 3579 } 3580 }, 3581 }; 3582 3583 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { 3584 .used_flood_mode = 1, 3585 .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED, 3586 .used_max_ib_mc = 1, 3587 .max_ib_mc = 0, 3588 .used_max_pkey = 1, 3589 .max_pkey = 0, 3590 .used_ubridge = 1, 3591 .ubridge = 1, 3592 .swid_config = { 3593 { 3594 .used_type = 1, 3595 .type = MLXSW_PORT_SWID_TYPE_ETH, 3596 } 3597 }, 3598 .used_cqe_time_stamp_type = 1, 3599 .cqe_time_stamp_type = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC, 3600 .lag_mode_prefer_sw = true, 3601 .flood_mode_prefer_cff = true, 3602 }; 3603 3604 /* Reduce number of LAGs from full capacity (256) to the maximum supported LAGs 3605 * in Spectrum-2/3, to avoid regression in number of free entries in the PGT 3606 * table. 3607 */ 3608 #define MLXSW_SP4_CONFIG_PROFILE_MAX_LAG 128 3609 3610 static const struct mlxsw_config_profile mlxsw_sp4_config_profile = { 3611 .used_max_lag = 1, 3612 .max_lag = MLXSW_SP4_CONFIG_PROFILE_MAX_LAG, 3613 .used_flood_mode = 1, 3614 .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED, 3615 .used_max_ib_mc = 1, 3616 .max_ib_mc = 0, 3617 .used_max_pkey = 1, 3618 .max_pkey = 0, 3619 .used_ubridge = 1, 3620 .ubridge = 1, 3621 .swid_config = { 3622 { 3623 .used_type = 1, 3624 .type = MLXSW_PORT_SWID_TYPE_ETH, 3625 } 3626 }, 3627 .used_cqe_time_stamp_type = 1, 3628 .cqe_time_stamp_type = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC, 3629 .lag_mode_prefer_sw = true, 3630 .flood_mode_prefer_cff = true, 3631 }; 3632 3633 static void 3634 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, 3635 struct devlink_resource_size_params *kvd_size_params, 3636 struct devlink_resource_size_params *linear_size_params, 3637 struct devlink_resource_size_params *hash_double_size_params, 3638 struct devlink_resource_size_params *hash_single_size_params) 3639 { 3640 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 3641 KVD_SINGLE_MIN_SIZE); 3642 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 3643 KVD_DOUBLE_MIN_SIZE); 3644 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3645 u32 linear_size_min = 0; 3646 3647 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, 3648 MLXSW_SP_KVD_GRANULARITY, 3649 DEVLINK_RESOURCE_UNIT_ENTRY); 3650 devlink_resource_size_params_init(linear_size_params, linear_size_min, 3651 kvd_size - single_size_min - 3652 double_size_min, 3653 MLXSW_SP_KVD_GRANULARITY, 3654 DEVLINK_RESOURCE_UNIT_ENTRY); 3655 devlink_resource_size_params_init(hash_double_size_params, 3656 double_size_min, 3657 kvd_size - single_size_min - 3658 linear_size_min, 3659 MLXSW_SP_KVD_GRANULARITY, 3660 DEVLINK_RESOURCE_UNIT_ENTRY); 3661 devlink_resource_size_params_init(hash_single_size_params, 3662 single_size_min, 3663 kvd_size - double_size_min - 3664 linear_size_min, 3665 MLXSW_SP_KVD_GRANULARITY, 3666 DEVLINK_RESOURCE_UNIT_ENTRY); 3667 } 3668 3669 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) 3670 { 3671 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3672 struct devlink_resource_size_params hash_single_size_params; 3673 struct devlink_resource_size_params hash_double_size_params; 3674 struct devlink_resource_size_params linear_size_params; 3675 struct devlink_resource_size_params kvd_size_params; 3676 u32 kvd_size, single_size, double_size, linear_size; 3677 const struct mlxsw_config_profile *profile; 3678 int err; 3679 3680 profile = &mlxsw_sp1_config_profile; 3681 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 3682 return -EIO; 3683 3684 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, 3685 &linear_size_params, 3686 &hash_double_size_params, 3687 &hash_single_size_params); 3688 3689 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3690 err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 3691 kvd_size, MLXSW_SP_RESOURCE_KVD, 3692 DEVLINK_RESOURCE_ID_PARENT_TOP, 3693 &kvd_size_params); 3694 if (err) 3695 return err; 3696 3697 linear_size = profile->kvd_linear_size; 3698 err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, 3699 linear_size, 3700 MLXSW_SP_RESOURCE_KVD_LINEAR, 3701 MLXSW_SP_RESOURCE_KVD, 3702 &linear_size_params); 3703 if (err) 3704 return err; 3705 3706 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); 3707 if (err) 3708 return err; 3709 3710 double_size = kvd_size - linear_size; 3711 double_size *= profile->kvd_hash_double_parts; 3712 double_size /= profile->kvd_hash_double_parts + 3713 profile->kvd_hash_single_parts; 3714 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY); 3715 err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, 3716 double_size, 3717 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 3718 MLXSW_SP_RESOURCE_KVD, 3719 &hash_double_size_params); 3720 if (err) 3721 return err; 3722 3723 single_size = kvd_size - double_size - linear_size; 3724 err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, 3725 single_size, 3726 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 3727 MLXSW_SP_RESOURCE_KVD, 3728 &hash_single_size_params); 3729 if (err) 3730 return err; 3731 3732 return 0; 3733 } 3734 3735 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core) 3736 { 3737 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3738 struct devlink_resource_size_params kvd_size_params; 3739 u32 kvd_size; 3740 3741 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 3742 return -EIO; 3743 3744 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3745 devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size, 3746 MLXSW_SP_KVD_GRANULARITY, 3747 DEVLINK_RESOURCE_UNIT_ENTRY); 3748 3749 return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 3750 kvd_size, MLXSW_SP_RESOURCE_KVD, 3751 DEVLINK_RESOURCE_ID_PARENT_TOP, 3752 &kvd_size_params); 3753 } 3754 3755 static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core) 3756 { 3757 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3758 struct devlink_resource_size_params span_size_params; 3759 u32 max_span; 3760 3761 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN)) 3762 return -EIO; 3763 3764 max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN); 3765 devlink_resource_size_params_init(&span_size_params, max_span, max_span, 3766 1, DEVLINK_RESOURCE_UNIT_ENTRY); 3767 3768 return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN, 3769 max_span, MLXSW_SP_RESOURCE_SPAN, 3770 DEVLINK_RESOURCE_ID_PARENT_TOP, 3771 &span_size_params); 3772 } 3773 3774 static int 3775 mlxsw_sp_resources_rif_mac_profile_register(struct mlxsw_core *mlxsw_core) 3776 { 3777 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3778 struct devlink_resource_size_params size_params; 3779 u8 max_rif_mac_profiles; 3780 3781 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIF_MAC_PROFILES)) 3782 max_rif_mac_profiles = 1; 3783 else 3784 max_rif_mac_profiles = MLXSW_CORE_RES_GET(mlxsw_core, 3785 MAX_RIF_MAC_PROFILES); 3786 devlink_resource_size_params_init(&size_params, max_rif_mac_profiles, 3787 max_rif_mac_profiles, 1, 3788 DEVLINK_RESOURCE_UNIT_ENTRY); 3789 3790 return devl_resource_register(devlink, 3791 "rif_mac_profiles", 3792 max_rif_mac_profiles, 3793 MLXSW_SP_RESOURCE_RIF_MAC_PROFILES, 3794 DEVLINK_RESOURCE_ID_PARENT_TOP, 3795 &size_params); 3796 } 3797 3798 static int mlxsw_sp_resources_rifs_register(struct mlxsw_core *mlxsw_core) 3799 { 3800 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3801 struct devlink_resource_size_params size_params; 3802 u64 max_rifs; 3803 3804 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIFS)) 3805 return -EIO; 3806 3807 max_rifs = MLXSW_CORE_RES_GET(mlxsw_core, MAX_RIFS); 3808 devlink_resource_size_params_init(&size_params, max_rifs, max_rifs, 3809 1, DEVLINK_RESOURCE_UNIT_ENTRY); 3810 3811 return devl_resource_register(devlink, "rifs", max_rifs, 3812 MLXSW_SP_RESOURCE_RIFS, 3813 DEVLINK_RESOURCE_ID_PARENT_TOP, 3814 &size_params); 3815 } 3816 3817 static int 3818 mlxsw_sp_resources_port_range_register(struct mlxsw_core *mlxsw_core) 3819 { 3820 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3821 struct devlink_resource_size_params size_params; 3822 u64 max; 3823 3824 if (!MLXSW_CORE_RES_VALID(mlxsw_core, ACL_MAX_L4_PORT_RANGE)) 3825 return -EIO; 3826 3827 max = MLXSW_CORE_RES_GET(mlxsw_core, ACL_MAX_L4_PORT_RANGE); 3828 devlink_resource_size_params_init(&size_params, max, max, 1, 3829 DEVLINK_RESOURCE_UNIT_ENTRY); 3830 3831 return devl_resource_register(devlink, "port_range_registers", max, 3832 MLXSW_SP_RESOURCE_PORT_RANGE_REGISTERS, 3833 DEVLINK_RESOURCE_ID_PARENT_TOP, 3834 &size_params); 3835 } 3836 3837 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) 3838 { 3839 int err; 3840 3841 err = mlxsw_sp1_resources_kvd_register(mlxsw_core); 3842 if (err) 3843 return err; 3844 3845 err = mlxsw_sp_resources_span_register(mlxsw_core); 3846 if (err) 3847 goto err_resources_span_register; 3848 3849 err = mlxsw_sp_counter_resources_register(mlxsw_core); 3850 if (err) 3851 goto err_resources_counter_register; 3852 3853 err = mlxsw_sp_policer_resources_register(mlxsw_core); 3854 if (err) 3855 goto err_policer_resources_register; 3856 3857 err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core); 3858 if (err) 3859 goto err_resources_rif_mac_profile_register; 3860 3861 err = mlxsw_sp_resources_rifs_register(mlxsw_core); 3862 if (err) 3863 goto err_resources_rifs_register; 3864 3865 err = mlxsw_sp_resources_port_range_register(mlxsw_core); 3866 if (err) 3867 goto err_resources_port_range_register; 3868 3869 return 0; 3870 3871 err_resources_port_range_register: 3872 err_resources_rifs_register: 3873 err_resources_rif_mac_profile_register: 3874 err_policer_resources_register: 3875 err_resources_counter_register: 3876 err_resources_span_register: 3877 devl_resources_unregister(priv_to_devlink(mlxsw_core)); 3878 return err; 3879 } 3880 3881 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) 3882 { 3883 int err; 3884 3885 err = mlxsw_sp2_resources_kvd_register(mlxsw_core); 3886 if (err) 3887 return err; 3888 3889 err = mlxsw_sp_resources_span_register(mlxsw_core); 3890 if (err) 3891 goto err_resources_span_register; 3892 3893 err = mlxsw_sp_counter_resources_register(mlxsw_core); 3894 if (err) 3895 goto err_resources_counter_register; 3896 3897 err = mlxsw_sp_policer_resources_register(mlxsw_core); 3898 if (err) 3899 goto err_policer_resources_register; 3900 3901 err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core); 3902 if (err) 3903 goto err_resources_rif_mac_profile_register; 3904 3905 err = mlxsw_sp_resources_rifs_register(mlxsw_core); 3906 if (err) 3907 goto err_resources_rifs_register; 3908 3909 err = mlxsw_sp_resources_port_range_register(mlxsw_core); 3910 if (err) 3911 goto err_resources_port_range_register; 3912 3913 return 0; 3914 3915 err_resources_port_range_register: 3916 err_resources_rifs_register: 3917 err_resources_rif_mac_profile_register: 3918 err_policer_resources_register: 3919 err_resources_counter_register: 3920 err_resources_span_register: 3921 devl_resources_unregister(priv_to_devlink(mlxsw_core)); 3922 return err; 3923 } 3924 3925 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 3926 const struct mlxsw_config_profile *profile, 3927 u64 *p_single_size, u64 *p_double_size, 3928 u64 *p_linear_size) 3929 { 3930 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3931 u32 double_size; 3932 int err; 3933 3934 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 3935 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) 3936 return -EIO; 3937 3938 /* The hash part is what left of the kvd without the 3939 * linear part. It is split to the single size and 3940 * double size by the parts ratio from the profile. 3941 * Both sizes must be a multiplications of the 3942 * granularity from the profile. In case the user 3943 * provided the sizes they are obtained via devlink. 3944 */ 3945 err = devl_resource_size_get(devlink, 3946 MLXSW_SP_RESOURCE_KVD_LINEAR, 3947 p_linear_size); 3948 if (err) 3949 *p_linear_size = profile->kvd_linear_size; 3950 3951 err = devl_resource_size_get(devlink, 3952 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 3953 p_double_size); 3954 if (err) { 3955 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 3956 *p_linear_size; 3957 double_size *= profile->kvd_hash_double_parts; 3958 double_size /= profile->kvd_hash_double_parts + 3959 profile->kvd_hash_single_parts; 3960 *p_double_size = rounddown(double_size, 3961 MLXSW_SP_KVD_GRANULARITY); 3962 } 3963 3964 err = devl_resource_size_get(devlink, 3965 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 3966 p_single_size); 3967 if (err) 3968 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 3969 *p_double_size - *p_linear_size; 3970 3971 /* Check results are legal. */ 3972 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 3973 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || 3974 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size) 3975 return -EIO; 3976 3977 return 0; 3978 } 3979 3980 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core, 3981 struct sk_buff *skb, u16 local_port) 3982 { 3983 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3984 3985 skb_pull(skb, MLXSW_TXHDR_LEN); 3986 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port); 3987 } 3988 3989 static struct mlxsw_driver mlxsw_sp1_driver = { 3990 .kind = mlxsw_sp1_driver_name, 3991 .priv_size = sizeof(struct mlxsw_sp), 3992 .fw_req_rev = &mlxsw_sp1_fw_rev, 3993 .fw_filename = MLXSW_SP1_FW_FILENAME, 3994 .init = mlxsw_sp1_init, 3995 .fini = mlxsw_sp_fini, 3996 .port_split = mlxsw_sp_port_split, 3997 .port_unsplit = mlxsw_sp_port_unsplit, 3998 .sb_pool_get = mlxsw_sp_sb_pool_get, 3999 .sb_pool_set = mlxsw_sp_sb_pool_set, 4000 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 4001 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 4002 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 4003 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 4004 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 4005 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 4006 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 4007 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 4008 .trap_init = mlxsw_sp_trap_init, 4009 .trap_fini = mlxsw_sp_trap_fini, 4010 .trap_action_set = mlxsw_sp_trap_action_set, 4011 .trap_group_init = mlxsw_sp_trap_group_init, 4012 .trap_group_set = mlxsw_sp_trap_group_set, 4013 .trap_policer_init = mlxsw_sp_trap_policer_init, 4014 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 4015 .trap_policer_set = mlxsw_sp_trap_policer_set, 4016 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 4017 .txhdr_construct = mlxsw_sp_txhdr_construct, 4018 .resources_register = mlxsw_sp1_resources_register, 4019 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, 4020 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 4021 .txhdr_len = MLXSW_TXHDR_LEN, 4022 .profile = &mlxsw_sp1_config_profile, 4023 .sdq_supports_cqe_v2 = false, 4024 }; 4025 4026 static struct mlxsw_driver mlxsw_sp2_driver = { 4027 .kind = mlxsw_sp2_driver_name, 4028 .priv_size = sizeof(struct mlxsw_sp), 4029 .fw_req_rev = &mlxsw_sp2_fw_rev, 4030 .fw_filename = MLXSW_SP2_FW_FILENAME, 4031 .init = mlxsw_sp2_init, 4032 .fini = mlxsw_sp_fini, 4033 .port_split = mlxsw_sp_port_split, 4034 .port_unsplit = mlxsw_sp_port_unsplit, 4035 .ports_remove_selected = mlxsw_sp_ports_remove_selected, 4036 .sb_pool_get = mlxsw_sp_sb_pool_get, 4037 .sb_pool_set = mlxsw_sp_sb_pool_set, 4038 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 4039 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 4040 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 4041 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 4042 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 4043 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 4044 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 4045 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 4046 .trap_init = mlxsw_sp_trap_init, 4047 .trap_fini = mlxsw_sp_trap_fini, 4048 .trap_action_set = mlxsw_sp_trap_action_set, 4049 .trap_group_init = mlxsw_sp_trap_group_init, 4050 .trap_group_set = mlxsw_sp_trap_group_set, 4051 .trap_policer_init = mlxsw_sp_trap_policer_init, 4052 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 4053 .trap_policer_set = mlxsw_sp_trap_policer_set, 4054 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 4055 .txhdr_construct = mlxsw_sp_txhdr_construct, 4056 .resources_register = mlxsw_sp2_resources_register, 4057 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 4058 .txhdr_len = MLXSW_TXHDR_LEN, 4059 .profile = &mlxsw_sp2_config_profile, 4060 .sdq_supports_cqe_v2 = true, 4061 }; 4062 4063 static struct mlxsw_driver mlxsw_sp3_driver = { 4064 .kind = mlxsw_sp3_driver_name, 4065 .priv_size = sizeof(struct mlxsw_sp), 4066 .fw_req_rev = &mlxsw_sp3_fw_rev, 4067 .fw_filename = MLXSW_SP3_FW_FILENAME, 4068 .init = mlxsw_sp3_init, 4069 .fini = mlxsw_sp_fini, 4070 .port_split = mlxsw_sp_port_split, 4071 .port_unsplit = mlxsw_sp_port_unsplit, 4072 .ports_remove_selected = mlxsw_sp_ports_remove_selected, 4073 .sb_pool_get = mlxsw_sp_sb_pool_get, 4074 .sb_pool_set = mlxsw_sp_sb_pool_set, 4075 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 4076 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 4077 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 4078 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 4079 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 4080 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 4081 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 4082 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 4083 .trap_init = mlxsw_sp_trap_init, 4084 .trap_fini = mlxsw_sp_trap_fini, 4085 .trap_action_set = mlxsw_sp_trap_action_set, 4086 .trap_group_init = mlxsw_sp_trap_group_init, 4087 .trap_group_set = mlxsw_sp_trap_group_set, 4088 .trap_policer_init = mlxsw_sp_trap_policer_init, 4089 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 4090 .trap_policer_set = mlxsw_sp_trap_policer_set, 4091 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 4092 .txhdr_construct = mlxsw_sp_txhdr_construct, 4093 .resources_register = mlxsw_sp2_resources_register, 4094 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 4095 .txhdr_len = MLXSW_TXHDR_LEN, 4096 .profile = &mlxsw_sp2_config_profile, 4097 .sdq_supports_cqe_v2 = true, 4098 }; 4099 4100 static struct mlxsw_driver mlxsw_sp4_driver = { 4101 .kind = mlxsw_sp4_driver_name, 4102 .priv_size = sizeof(struct mlxsw_sp), 4103 .init = mlxsw_sp4_init, 4104 .fini = mlxsw_sp_fini, 4105 .port_split = mlxsw_sp_port_split, 4106 .port_unsplit = mlxsw_sp_port_unsplit, 4107 .ports_remove_selected = mlxsw_sp_ports_remove_selected, 4108 .sb_pool_get = mlxsw_sp_sb_pool_get, 4109 .sb_pool_set = mlxsw_sp_sb_pool_set, 4110 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 4111 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 4112 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 4113 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 4114 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 4115 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 4116 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 4117 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 4118 .trap_init = mlxsw_sp_trap_init, 4119 .trap_fini = mlxsw_sp_trap_fini, 4120 .trap_action_set = mlxsw_sp_trap_action_set, 4121 .trap_group_init = mlxsw_sp_trap_group_init, 4122 .trap_group_set = mlxsw_sp_trap_group_set, 4123 .trap_policer_init = mlxsw_sp_trap_policer_init, 4124 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 4125 .trap_policer_set = mlxsw_sp_trap_policer_set, 4126 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 4127 .txhdr_construct = mlxsw_sp_txhdr_construct, 4128 .resources_register = mlxsw_sp2_resources_register, 4129 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 4130 .txhdr_len = MLXSW_TXHDR_LEN, 4131 .profile = &mlxsw_sp4_config_profile, 4132 .sdq_supports_cqe_v2 = true, 4133 }; 4134 4135 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 4136 { 4137 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 4138 } 4139 4140 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, 4141 struct netdev_nested_priv *priv) 4142 { 4143 int ret = 0; 4144 4145 if (mlxsw_sp_port_dev_check(lower_dev)) { 4146 priv->data = (void *)netdev_priv(lower_dev); 4147 ret = 1; 4148 } 4149 4150 return ret; 4151 } 4152 4153 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 4154 { 4155 struct netdev_nested_priv priv = { 4156 .data = NULL, 4157 }; 4158 4159 if (mlxsw_sp_port_dev_check(dev)) 4160 return netdev_priv(dev); 4161 4162 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &priv); 4163 4164 return (struct mlxsw_sp_port *)priv.data; 4165 } 4166 4167 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 4168 { 4169 struct mlxsw_sp_port *mlxsw_sp_port; 4170 4171 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 4172 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 4173 } 4174 4175 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 4176 { 4177 struct netdev_nested_priv priv = { 4178 .data = NULL, 4179 }; 4180 4181 if (mlxsw_sp_port_dev_check(dev)) 4182 return netdev_priv(dev); 4183 4184 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 4185 &priv); 4186 4187 return (struct mlxsw_sp_port *)priv.data; 4188 } 4189 4190 int mlxsw_sp_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp) 4191 { 4192 char mprs_pl[MLXSW_REG_MPRS_LEN]; 4193 int err = 0; 4194 4195 mutex_lock(&mlxsw_sp->parsing.lock); 4196 4197 if (refcount_inc_not_zero(&mlxsw_sp->parsing.parsing_depth_ref)) 4198 goto out_unlock; 4199 4200 mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_INCREASED_PARSING_DEPTH, 4201 mlxsw_sp->parsing.vxlan_udp_dport); 4202 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); 4203 if (err) 4204 goto out_unlock; 4205 4206 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_INCREASED_PARSING_DEPTH; 4207 refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 1); 4208 4209 out_unlock: 4210 mutex_unlock(&mlxsw_sp->parsing.lock); 4211 return err; 4212 } 4213 4214 void mlxsw_sp_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp) 4215 { 4216 char mprs_pl[MLXSW_REG_MPRS_LEN]; 4217 4218 mutex_lock(&mlxsw_sp->parsing.lock); 4219 4220 if (!refcount_dec_and_test(&mlxsw_sp->parsing.parsing_depth_ref)) 4221 goto out_unlock; 4222 4223 mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_DEFAULT_PARSING_DEPTH, 4224 mlxsw_sp->parsing.vxlan_udp_dport); 4225 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); 4226 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH; 4227 4228 out_unlock: 4229 mutex_unlock(&mlxsw_sp->parsing.lock); 4230 } 4231 4232 int mlxsw_sp_parsing_vxlan_udp_dport_set(struct mlxsw_sp *mlxsw_sp, 4233 __be16 udp_dport) 4234 { 4235 char mprs_pl[MLXSW_REG_MPRS_LEN]; 4236 int err; 4237 4238 mutex_lock(&mlxsw_sp->parsing.lock); 4239 4240 mlxsw_reg_mprs_pack(mprs_pl, mlxsw_sp->parsing.parsing_depth, 4241 be16_to_cpu(udp_dport)); 4242 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); 4243 if (err) 4244 goto out_unlock; 4245 4246 mlxsw_sp->parsing.vxlan_udp_dport = be16_to_cpu(udp_dport); 4247 4248 out_unlock: 4249 mutex_unlock(&mlxsw_sp->parsing.lock); 4250 return err; 4251 } 4252 4253 static void 4254 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port, 4255 struct net_device *lag_dev) 4256 { 4257 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev); 4258 struct net_device *upper_dev; 4259 struct list_head *iter; 4260 4261 if (netif_is_bridge_port(lag_dev)) 4262 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev); 4263 4264 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 4265 if (!netif_is_bridge_port(upper_dev)) 4266 continue; 4267 br_dev = netdev_master_upper_dev_get(upper_dev); 4268 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev); 4269 } 4270 } 4271 4272 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 4273 { 4274 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4275 4276 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 4277 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4278 } 4279 4280 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 4281 { 4282 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4283 4284 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 4285 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4286 } 4287 4288 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 4289 u16 lag_id, u8 port_index) 4290 { 4291 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4292 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4293 4294 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 4295 lag_id, port_index); 4296 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4297 } 4298 4299 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 4300 u16 lag_id) 4301 { 4302 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4303 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4304 4305 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 4306 lag_id); 4307 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4308 } 4309 4310 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 4311 u16 lag_id) 4312 { 4313 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4314 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4315 4316 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 4317 lag_id); 4318 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4319 } 4320 4321 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 4322 u16 lag_id) 4323 { 4324 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4325 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4326 4327 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 4328 lag_id); 4329 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4330 } 4331 4332 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 4333 struct net_device *lag_dev, 4334 u16 *p_lag_id) 4335 { 4336 struct mlxsw_sp_upper *lag; 4337 int free_lag_id = -1; 4338 u16 max_lag; 4339 int err, i; 4340 4341 err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag); 4342 if (err) 4343 return err; 4344 4345 for (i = 0; i < max_lag; i++) { 4346 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 4347 if (lag->ref_count) { 4348 if (lag->dev == lag_dev) { 4349 *p_lag_id = i; 4350 return 0; 4351 } 4352 } else if (free_lag_id < 0) { 4353 free_lag_id = i; 4354 } 4355 } 4356 if (free_lag_id < 0) 4357 return -EBUSY; 4358 *p_lag_id = free_lag_id; 4359 return 0; 4360 } 4361 4362 static bool 4363 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 4364 struct net_device *lag_dev, 4365 struct netdev_lag_upper_info *lag_upper_info, 4366 struct netlink_ext_ack *extack) 4367 { 4368 u16 lag_id; 4369 4370 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 4371 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices"); 4372 return false; 4373 } 4374 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 4375 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 4376 return false; 4377 } 4378 return true; 4379 } 4380 4381 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 4382 u16 lag_id, u8 *p_port_index) 4383 { 4384 u64 max_lag_members; 4385 int i; 4386 4387 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 4388 MAX_LAG_MEMBERS); 4389 for (i = 0; i < max_lag_members; i++) { 4390 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 4391 *p_port_index = i; 4392 return 0; 4393 } 4394 } 4395 return -EBUSY; 4396 } 4397 4398 static int mlxsw_sp_lag_uppers_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port, 4399 struct net_device *lag_dev, 4400 struct netlink_ext_ack *extack) 4401 { 4402 struct net_device *upper_dev; 4403 struct net_device *master; 4404 struct list_head *iter; 4405 int done = 0; 4406 int err; 4407 4408 master = netdev_master_upper_dev_get(lag_dev); 4409 if (master && netif_is_bridge_master(master)) { 4410 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, lag_dev, master, 4411 extack); 4412 if (err) 4413 return err; 4414 } 4415 4416 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 4417 if (!is_vlan_dev(upper_dev)) 4418 continue; 4419 4420 master = netdev_master_upper_dev_get(upper_dev); 4421 if (master && netif_is_bridge_master(master)) { 4422 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4423 upper_dev, master, 4424 extack); 4425 if (err) 4426 goto err_port_bridge_join; 4427 } 4428 4429 ++done; 4430 } 4431 4432 return 0; 4433 4434 err_port_bridge_join: 4435 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 4436 if (!is_vlan_dev(upper_dev)) 4437 continue; 4438 4439 master = netdev_master_upper_dev_get(upper_dev); 4440 if (!master || !netif_is_bridge_master(master)) 4441 continue; 4442 4443 if (!done--) 4444 break; 4445 4446 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, master); 4447 } 4448 4449 master = netdev_master_upper_dev_get(lag_dev); 4450 if (master && netif_is_bridge_master(master)) 4451 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, master); 4452 4453 return err; 4454 } 4455 4456 static void 4457 mlxsw_sp_lag_uppers_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port, 4458 struct net_device *lag_dev) 4459 { 4460 struct net_device *upper_dev; 4461 struct net_device *master; 4462 struct list_head *iter; 4463 4464 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 4465 if (!is_vlan_dev(upper_dev)) 4466 continue; 4467 4468 master = netdev_master_upper_dev_get(upper_dev); 4469 if (!master) 4470 continue; 4471 4472 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, master); 4473 } 4474 4475 master = netdev_master_upper_dev_get(lag_dev); 4476 if (master) 4477 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, master); 4478 } 4479 4480 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 4481 struct net_device *lag_dev, 4482 struct netlink_ext_ack *extack) 4483 { 4484 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4485 struct mlxsw_sp_upper *lag; 4486 u16 lag_id; 4487 u8 port_index; 4488 int err; 4489 4490 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 4491 if (err) 4492 return err; 4493 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 4494 if (!lag->ref_count) { 4495 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 4496 if (err) 4497 return err; 4498 lag->dev = lag_dev; 4499 } 4500 4501 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 4502 if (err) 4503 return err; 4504 4505 err = mlxsw_sp_lag_uppers_bridge_join(mlxsw_sp_port, lag_dev, 4506 extack); 4507 if (err) 4508 goto err_lag_uppers_bridge_join; 4509 4510 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 4511 if (err) 4512 goto err_col_port_add; 4513 4514 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 4515 mlxsw_sp_port->local_port); 4516 mlxsw_sp_port->lag_id = lag_id; 4517 mlxsw_sp_port->lagged = 1; 4518 lag->ref_count++; 4519 4520 err = mlxsw_sp_fid_port_join_lag(mlxsw_sp_port); 4521 if (err) 4522 goto err_fid_port_join_lag; 4523 4524 /* Port is no longer usable as a router interface */ 4525 if (mlxsw_sp_port->default_vlan->fid) 4526 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); 4527 4528 /* Join a router interface configured on the LAG, if exists */ 4529 err = mlxsw_sp_router_port_join_lag(mlxsw_sp_port, lag_dev, 4530 extack); 4531 if (err) 4532 goto err_router_join; 4533 4534 err = mlxsw_sp_netdevice_enslavement_replay(mlxsw_sp, lag_dev, extack); 4535 if (err) 4536 goto err_replay; 4537 4538 return 0; 4539 4540 err_replay: 4541 mlxsw_sp_router_port_leave_lag(mlxsw_sp_port, lag_dev); 4542 err_router_join: 4543 mlxsw_sp_fid_port_leave_lag(mlxsw_sp_port); 4544 err_fid_port_join_lag: 4545 lag->ref_count--; 4546 mlxsw_sp_port->lagged = 0; 4547 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 4548 mlxsw_sp_port->local_port); 4549 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4550 err_col_port_add: 4551 mlxsw_sp_lag_uppers_bridge_leave(mlxsw_sp_port, lag_dev); 4552 err_lag_uppers_bridge_join: 4553 if (!lag->ref_count) 4554 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 4555 return err; 4556 } 4557 4558 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 4559 struct net_device *lag_dev) 4560 { 4561 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4562 u16 lag_id = mlxsw_sp_port->lag_id; 4563 struct mlxsw_sp_upper *lag; 4564 4565 if (!mlxsw_sp_port->lagged) 4566 return; 4567 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 4568 WARN_ON(lag->ref_count == 0); 4569 4570 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4571 4572 /* Any VLANs configured on the port are no longer valid */ 4573 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false); 4574 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan); 4575 /* Make the LAG and its directly linked uppers leave bridges they 4576 * are memeber in 4577 */ 4578 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev); 4579 4580 mlxsw_sp_fid_port_leave_lag(mlxsw_sp_port); 4581 4582 if (lag->ref_count == 1) 4583 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 4584 4585 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 4586 mlxsw_sp_port->local_port); 4587 mlxsw_sp_port->lagged = 0; 4588 lag->ref_count--; 4589 4590 /* Make sure untagged frames are allowed to ingress */ 4591 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID, 4592 ETH_P_8021Q); 4593 } 4594 4595 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 4596 u16 lag_id) 4597 { 4598 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4599 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4600 4601 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 4602 mlxsw_sp_port->local_port); 4603 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4604 } 4605 4606 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 4607 u16 lag_id) 4608 { 4609 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4610 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4611 4612 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 4613 mlxsw_sp_port->local_port); 4614 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4615 } 4616 4617 static int 4618 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port) 4619 { 4620 int err; 4621 4622 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, 4623 mlxsw_sp_port->lag_id); 4624 if (err) 4625 return err; 4626 4627 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 4628 if (err) 4629 goto err_dist_port_add; 4630 4631 return 0; 4632 4633 err_dist_port_add: 4634 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id); 4635 return err; 4636 } 4637 4638 static int 4639 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port) 4640 { 4641 int err; 4642 4643 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 4644 mlxsw_sp_port->lag_id); 4645 if (err) 4646 return err; 4647 4648 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, 4649 mlxsw_sp_port->lag_id); 4650 if (err) 4651 goto err_col_port_disable; 4652 4653 return 0; 4654 4655 err_col_port_disable: 4656 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 4657 return err; 4658 } 4659 4660 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 4661 struct netdev_lag_lower_state_info *info) 4662 { 4663 if (info->tx_enabled) 4664 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port); 4665 else 4666 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 4667 } 4668 4669 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 4670 bool enable) 4671 { 4672 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4673 enum mlxsw_reg_spms_state spms_state; 4674 char *spms_pl; 4675 u16 vid; 4676 int err; 4677 4678 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 4679 MLXSW_REG_SPMS_STATE_DISCARDING; 4680 4681 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 4682 if (!spms_pl) 4683 return -ENOMEM; 4684 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 4685 4686 for (vid = 0; vid < VLAN_N_VID; vid++) 4687 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 4688 4689 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 4690 kfree(spms_pl); 4691 return err; 4692 } 4693 4694 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 4695 { 4696 u16 vid = 1; 4697 int err; 4698 4699 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 4700 if (err) 4701 return err; 4702 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 4703 if (err) 4704 goto err_port_stp_set; 4705 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 4706 true, false); 4707 if (err) 4708 goto err_port_vlan_set; 4709 4710 for (; vid <= VLAN_N_VID - 1; vid++) { 4711 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4712 vid, false); 4713 if (err) 4714 goto err_vid_learning_set; 4715 } 4716 4717 return 0; 4718 4719 err_vid_learning_set: 4720 for (vid--; vid >= 1; vid--) 4721 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 4722 err_port_vlan_set: 4723 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4724 err_port_stp_set: 4725 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4726 return err; 4727 } 4728 4729 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 4730 { 4731 u16 vid; 4732 4733 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 4734 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4735 vid, true); 4736 4737 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 4738 false, false); 4739 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4740 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4741 } 4742 4743 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev) 4744 { 4745 unsigned int num_vxlans = 0; 4746 struct net_device *dev; 4747 struct list_head *iter; 4748 4749 netdev_for_each_lower_dev(br_dev, dev, iter) { 4750 if (netif_is_vxlan(dev)) 4751 num_vxlans++; 4752 } 4753 4754 return num_vxlans > 1; 4755 } 4756 4757 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev) 4758 { 4759 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0}; 4760 struct net_device *dev; 4761 struct list_head *iter; 4762 4763 netdev_for_each_lower_dev(br_dev, dev, iter) { 4764 u16 pvid; 4765 int err; 4766 4767 if (!netif_is_vxlan(dev)) 4768 continue; 4769 4770 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid); 4771 if (err || !pvid) 4772 continue; 4773 4774 if (test_and_set_bit(pvid, vlans)) 4775 return false; 4776 } 4777 4778 return true; 4779 } 4780 4781 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev, 4782 struct netlink_ext_ack *extack) 4783 { 4784 if (br_multicast_enabled(br_dev)) { 4785 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device"); 4786 return false; 4787 } 4788 4789 if (!br_vlan_enabled(br_dev) && 4790 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) { 4791 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge"); 4792 return false; 4793 } 4794 4795 if (br_vlan_enabled(br_dev) && 4796 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) { 4797 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged"); 4798 return false; 4799 } 4800 4801 return true; 4802 } 4803 4804 static bool mlxsw_sp_netdev_is_master(struct net_device *upper_dev, 4805 struct net_device *dev) 4806 { 4807 return upper_dev == netdev_master_upper_dev_get(dev); 4808 } 4809 4810 static int __mlxsw_sp_netdevice_event(struct mlxsw_sp *mlxsw_sp, 4811 unsigned long event, void *ptr, 4812 bool process_foreign); 4813 4814 static int mlxsw_sp_netdevice_validate_uppers(struct mlxsw_sp *mlxsw_sp, 4815 struct net_device *dev, 4816 struct netlink_ext_ack *extack) 4817 { 4818 struct net_device *upper_dev; 4819 struct list_head *iter; 4820 int err; 4821 4822 netdev_for_each_upper_dev_rcu(dev, upper_dev, iter) { 4823 struct netdev_notifier_changeupper_info info = { 4824 .info = { 4825 .dev = dev, 4826 .extack = extack, 4827 }, 4828 .master = mlxsw_sp_netdev_is_master(upper_dev, dev), 4829 .upper_dev = upper_dev, 4830 .linking = true, 4831 4832 /* upper_info is relevant for LAG devices. But we would 4833 * only need this if LAG were a valid upper above 4834 * another upper (e.g. a bridge that is a member of a 4835 * LAG), and that is never a valid configuration. So we 4836 * can keep this as NULL. 4837 */ 4838 .upper_info = NULL, 4839 }; 4840 4841 err = __mlxsw_sp_netdevice_event(mlxsw_sp, 4842 NETDEV_PRECHANGEUPPER, 4843 &info, true); 4844 if (err) 4845 return err; 4846 4847 err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp, upper_dev, 4848 extack); 4849 if (err) 4850 return err; 4851 } 4852 4853 return 0; 4854 } 4855 4856 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 4857 struct net_device *dev, 4858 unsigned long event, void *ptr, 4859 bool replay_deslavement) 4860 { 4861 struct netdev_notifier_changeupper_info *info; 4862 struct mlxsw_sp_port *mlxsw_sp_port; 4863 struct netlink_ext_ack *extack; 4864 struct net_device *upper_dev; 4865 struct mlxsw_sp *mlxsw_sp; 4866 int err = 0; 4867 u16 proto; 4868 4869 mlxsw_sp_port = netdev_priv(dev); 4870 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4871 info = ptr; 4872 extack = netdev_notifier_info_to_extack(&info->info); 4873 4874 switch (event) { 4875 case NETDEV_PRECHANGEUPPER: 4876 upper_dev = info->upper_dev; 4877 if (!is_vlan_dev(upper_dev) && 4878 !netif_is_lag_master(upper_dev) && 4879 !netif_is_bridge_master(upper_dev) && 4880 !netif_is_ovs_master(upper_dev) && 4881 !netif_is_macvlan(upper_dev) && 4882 !netif_is_l3_master(upper_dev)) { 4883 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4884 return -EINVAL; 4885 } 4886 if (!info->linking) 4887 break; 4888 if (netif_is_bridge_master(upper_dev) && 4889 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 4890 mlxsw_sp_bridge_has_vxlan(upper_dev) && 4891 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 4892 return -EOPNOTSUPP; 4893 if (netdev_has_any_upper_dev(upper_dev) && 4894 (!netif_is_bridge_master(upper_dev) || 4895 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 4896 upper_dev))) { 4897 err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp, 4898 upper_dev, 4899 extack); 4900 if (err) 4901 return err; 4902 } 4903 if (netif_is_lag_master(upper_dev) && 4904 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 4905 info->upper_info, extack)) 4906 return -EINVAL; 4907 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 4908 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); 4909 return -EINVAL; 4910 } 4911 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 4912 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 4913 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); 4914 return -EINVAL; 4915 } 4916 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 4917 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); 4918 return -EINVAL; 4919 } 4920 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 4921 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 4922 return -EINVAL; 4923 } 4924 if (netif_is_bridge_master(upper_dev)) { 4925 br_vlan_get_proto(upper_dev, &proto); 4926 if (br_vlan_enabled(upper_dev) && 4927 proto != ETH_P_8021Q && proto != ETH_P_8021AD) { 4928 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a bridge with unknown VLAN protocol is not supported"); 4929 return -EOPNOTSUPP; 4930 } 4931 if (vlan_uses_dev(lower_dev) && 4932 br_vlan_enabled(upper_dev) && 4933 proto == ETH_P_8021AD) { 4934 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port that already has a VLAN upper to an 802.1ad bridge is not supported"); 4935 return -EOPNOTSUPP; 4936 } 4937 } 4938 if (netif_is_bridge_port(lower_dev) && is_vlan_dev(upper_dev)) { 4939 struct net_device *br_dev = netdev_master_upper_dev_get(lower_dev); 4940 4941 if (br_vlan_enabled(br_dev)) { 4942 br_vlan_get_proto(br_dev, &proto); 4943 if (proto == ETH_P_8021AD) { 4944 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a port enslaved to an 802.1ad bridge"); 4945 return -EOPNOTSUPP; 4946 } 4947 } 4948 } 4949 if (is_vlan_dev(upper_dev) && 4950 ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) { 4951 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol"); 4952 return -EOPNOTSUPP; 4953 } 4954 if (is_vlan_dev(upper_dev) && mlxsw_sp_port->security) { 4955 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a locked port"); 4956 return -EOPNOTSUPP; 4957 } 4958 break; 4959 case NETDEV_CHANGEUPPER: 4960 upper_dev = info->upper_dev; 4961 if (netif_is_bridge_master(upper_dev)) { 4962 if (info->linking) { 4963 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4964 lower_dev, 4965 upper_dev, 4966 extack); 4967 } else { 4968 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4969 lower_dev, 4970 upper_dev); 4971 if (!replay_deslavement) 4972 break; 4973 mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp, 4974 lower_dev); 4975 } 4976 } else if (netif_is_lag_master(upper_dev)) { 4977 if (info->linking) { 4978 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 4979 upper_dev, extack); 4980 } else { 4981 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 4982 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 4983 upper_dev); 4984 mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp, 4985 dev); 4986 } 4987 } else if (netif_is_ovs_master(upper_dev)) { 4988 if (info->linking) 4989 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 4990 else 4991 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 4992 } else if (netif_is_macvlan(upper_dev)) { 4993 if (!info->linking) 4994 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4995 } else if (is_vlan_dev(upper_dev)) { 4996 struct net_device *br_dev; 4997 4998 if (!netif_is_bridge_port(upper_dev)) 4999 break; 5000 if (info->linking) 5001 break; 5002 br_dev = netdev_master_upper_dev_get(upper_dev); 5003 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, 5004 br_dev); 5005 } 5006 break; 5007 } 5008 5009 return err; 5010 } 5011 5012 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 5013 unsigned long event, void *ptr) 5014 { 5015 struct netdev_notifier_changelowerstate_info *info; 5016 struct mlxsw_sp_port *mlxsw_sp_port; 5017 int err; 5018 5019 mlxsw_sp_port = netdev_priv(dev); 5020 info = ptr; 5021 5022 switch (event) { 5023 case NETDEV_CHANGELOWERSTATE: 5024 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 5025 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 5026 info->lower_state_info); 5027 if (err) 5028 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 5029 } 5030 break; 5031 } 5032 5033 return 0; 5034 } 5035 5036 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 5037 struct net_device *port_dev, 5038 unsigned long event, void *ptr, 5039 bool replay_deslavement) 5040 { 5041 switch (event) { 5042 case NETDEV_PRECHANGEUPPER: 5043 case NETDEV_CHANGEUPPER: 5044 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 5045 event, ptr, 5046 replay_deslavement); 5047 case NETDEV_CHANGELOWERSTATE: 5048 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 5049 ptr); 5050 } 5051 5052 return 0; 5053 } 5054 5055 /* Called for LAG or its upper VLAN after the per-LAG-lower processing was done, 5056 * to do any per-LAG / per-LAG-upper processing. 5057 */ 5058 static int mlxsw_sp_netdevice_post_lag_event(struct net_device *dev, 5059 unsigned long event, 5060 void *ptr) 5061 { 5062 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(dev); 5063 struct netdev_notifier_changeupper_info *info = ptr; 5064 5065 if (!mlxsw_sp) 5066 return 0; 5067 5068 switch (event) { 5069 case NETDEV_CHANGEUPPER: 5070 if (info->linking) 5071 break; 5072 if (netif_is_bridge_master(info->upper_dev)) 5073 mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp, dev); 5074 break; 5075 } 5076 return 0; 5077 } 5078 5079 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 5080 unsigned long event, void *ptr) 5081 { 5082 struct net_device *dev; 5083 struct list_head *iter; 5084 int ret; 5085 5086 netdev_for_each_lower_dev(lag_dev, dev, iter) { 5087 if (mlxsw_sp_port_dev_check(dev)) { 5088 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 5089 ptr, false); 5090 if (ret) 5091 return ret; 5092 } 5093 } 5094 5095 return mlxsw_sp_netdevice_post_lag_event(lag_dev, event, ptr); 5096 } 5097 5098 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 5099 struct net_device *dev, 5100 unsigned long event, void *ptr, 5101 u16 vid, bool replay_deslavement) 5102 { 5103 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 5104 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5105 struct netdev_notifier_changeupper_info *info = ptr; 5106 struct netlink_ext_ack *extack; 5107 struct net_device *upper_dev; 5108 int err = 0; 5109 5110 extack = netdev_notifier_info_to_extack(&info->info); 5111 5112 switch (event) { 5113 case NETDEV_PRECHANGEUPPER: 5114 upper_dev = info->upper_dev; 5115 if (!netif_is_bridge_master(upper_dev) && 5116 !netif_is_macvlan(upper_dev) && 5117 !netif_is_l3_master(upper_dev)) { 5118 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5119 return -EINVAL; 5120 } 5121 if (!info->linking) 5122 break; 5123 if (netif_is_bridge_master(upper_dev) && 5124 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 5125 mlxsw_sp_bridge_has_vxlan(upper_dev) && 5126 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 5127 return -EOPNOTSUPP; 5128 if (netdev_has_any_upper_dev(upper_dev) && 5129 (!netif_is_bridge_master(upper_dev) || 5130 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 5131 upper_dev))) { 5132 err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp, 5133 upper_dev, 5134 extack); 5135 if (err) 5136 return err; 5137 } 5138 break; 5139 case NETDEV_CHANGEUPPER: 5140 upper_dev = info->upper_dev; 5141 if (netif_is_bridge_master(upper_dev)) { 5142 if (info->linking) { 5143 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 5144 vlan_dev, 5145 upper_dev, 5146 extack); 5147 } else { 5148 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 5149 vlan_dev, 5150 upper_dev); 5151 if (!replay_deslavement) 5152 break; 5153 mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp, 5154 vlan_dev); 5155 } 5156 } else if (netif_is_macvlan(upper_dev)) { 5157 if (!info->linking) 5158 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 5159 } 5160 break; 5161 } 5162 5163 return err; 5164 } 5165 5166 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 5167 struct net_device *lag_dev, 5168 unsigned long event, 5169 void *ptr, u16 vid) 5170 { 5171 struct net_device *dev; 5172 struct list_head *iter; 5173 int ret; 5174 5175 netdev_for_each_lower_dev(lag_dev, dev, iter) { 5176 if (mlxsw_sp_port_dev_check(dev)) { 5177 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 5178 event, ptr, 5179 vid, false); 5180 if (ret) 5181 return ret; 5182 } 5183 } 5184 5185 return mlxsw_sp_netdevice_post_lag_event(vlan_dev, event, ptr); 5186 } 5187 5188 static int mlxsw_sp_netdevice_bridge_vlan_event(struct mlxsw_sp *mlxsw_sp, 5189 struct net_device *vlan_dev, 5190 struct net_device *br_dev, 5191 unsigned long event, void *ptr, 5192 u16 vid, bool process_foreign) 5193 { 5194 struct netdev_notifier_changeupper_info *info = ptr; 5195 struct netlink_ext_ack *extack; 5196 struct net_device *upper_dev; 5197 5198 if (!process_foreign && !mlxsw_sp_lower_get(vlan_dev)) 5199 return 0; 5200 5201 extack = netdev_notifier_info_to_extack(&info->info); 5202 5203 switch (event) { 5204 case NETDEV_PRECHANGEUPPER: 5205 upper_dev = info->upper_dev; 5206 if (!netif_is_macvlan(upper_dev) && 5207 !netif_is_l3_master(upper_dev)) { 5208 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5209 return -EOPNOTSUPP; 5210 } 5211 break; 5212 case NETDEV_CHANGEUPPER: 5213 upper_dev = info->upper_dev; 5214 if (info->linking) 5215 break; 5216 if (netif_is_macvlan(upper_dev)) 5217 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 5218 break; 5219 } 5220 5221 return 0; 5222 } 5223 5224 static int mlxsw_sp_netdevice_vlan_event(struct mlxsw_sp *mlxsw_sp, 5225 struct net_device *vlan_dev, 5226 unsigned long event, void *ptr, 5227 bool process_foreign) 5228 { 5229 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 5230 u16 vid = vlan_dev_vlan_id(vlan_dev); 5231 5232 if (mlxsw_sp_port_dev_check(real_dev)) 5233 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 5234 event, ptr, vid, 5235 true); 5236 else if (netif_is_lag_master(real_dev)) 5237 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 5238 real_dev, event, 5239 ptr, vid); 5240 else if (netif_is_bridge_master(real_dev)) 5241 return mlxsw_sp_netdevice_bridge_vlan_event(mlxsw_sp, vlan_dev, 5242 real_dev, event, 5243 ptr, vid, 5244 process_foreign); 5245 5246 return 0; 5247 } 5248 5249 static int mlxsw_sp_netdevice_bridge_event(struct mlxsw_sp *mlxsw_sp, 5250 struct net_device *br_dev, 5251 unsigned long event, void *ptr, 5252 bool process_foreign) 5253 { 5254 struct netdev_notifier_changeupper_info *info = ptr; 5255 struct netlink_ext_ack *extack; 5256 struct net_device *upper_dev; 5257 u16 proto; 5258 5259 if (!process_foreign && !mlxsw_sp_lower_get(br_dev)) 5260 return 0; 5261 5262 extack = netdev_notifier_info_to_extack(&info->info); 5263 5264 switch (event) { 5265 case NETDEV_PRECHANGEUPPER: 5266 upper_dev = info->upper_dev; 5267 if (!is_vlan_dev(upper_dev) && 5268 !netif_is_macvlan(upper_dev) && 5269 !netif_is_l3_master(upper_dev)) { 5270 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5271 return -EOPNOTSUPP; 5272 } 5273 if (!info->linking) 5274 break; 5275 if (br_vlan_enabled(br_dev)) { 5276 br_vlan_get_proto(br_dev, &proto); 5277 if (proto == ETH_P_8021AD) { 5278 NL_SET_ERR_MSG_MOD(extack, "Upper devices are not supported on top of an 802.1ad bridge"); 5279 return -EOPNOTSUPP; 5280 } 5281 } 5282 if (is_vlan_dev(upper_dev) && 5283 ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) { 5284 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol"); 5285 return -EOPNOTSUPP; 5286 } 5287 break; 5288 case NETDEV_CHANGEUPPER: 5289 upper_dev = info->upper_dev; 5290 if (info->linking) 5291 break; 5292 if (is_vlan_dev(upper_dev)) 5293 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev); 5294 if (netif_is_macvlan(upper_dev)) 5295 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 5296 break; 5297 } 5298 5299 return 0; 5300 } 5301 5302 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, 5303 unsigned long event, void *ptr) 5304 { 5305 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); 5306 struct netdev_notifier_changeupper_info *info = ptr; 5307 struct netlink_ext_ack *extack; 5308 struct net_device *upper_dev; 5309 5310 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) 5311 return 0; 5312 5313 extack = netdev_notifier_info_to_extack(&info->info); 5314 upper_dev = info->upper_dev; 5315 5316 if (!netif_is_l3_master(upper_dev)) { 5317 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5318 return -EOPNOTSUPP; 5319 } 5320 5321 return 0; 5322 } 5323 5324 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, 5325 struct net_device *dev, 5326 unsigned long event, void *ptr) 5327 { 5328 struct netdev_notifier_changeupper_info *cu_info; 5329 struct netdev_notifier_info *info = ptr; 5330 struct netlink_ext_ack *extack; 5331 struct net_device *upper_dev; 5332 5333 extack = netdev_notifier_info_to_extack(info); 5334 5335 switch (event) { 5336 case NETDEV_CHANGEUPPER: 5337 cu_info = container_of(info, 5338 struct netdev_notifier_changeupper_info, 5339 info); 5340 upper_dev = cu_info->upper_dev; 5341 if (!netif_is_bridge_master(upper_dev)) 5342 return 0; 5343 if (!mlxsw_sp_lower_get(upper_dev)) 5344 return 0; 5345 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 5346 return -EOPNOTSUPP; 5347 if (cu_info->linking) { 5348 if (!netif_running(dev)) 5349 return 0; 5350 /* When the bridge is VLAN-aware, the VNI of the VxLAN 5351 * device needs to be mapped to a VLAN, but at this 5352 * point no VLANs are configured on the VxLAN device 5353 */ 5354 if (br_vlan_enabled(upper_dev)) 5355 return 0; 5356 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, 5357 dev, 0, extack); 5358 } else { 5359 /* VLANs were already flushed, which triggered the 5360 * necessary cleanup 5361 */ 5362 if (br_vlan_enabled(upper_dev)) 5363 return 0; 5364 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 5365 } 5366 break; 5367 case NETDEV_PRE_UP: 5368 upper_dev = netdev_master_upper_dev_get(dev); 5369 if (!upper_dev) 5370 return 0; 5371 if (!netif_is_bridge_master(upper_dev)) 5372 return 0; 5373 if (!mlxsw_sp_lower_get(upper_dev)) 5374 return 0; 5375 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0, 5376 extack); 5377 case NETDEV_DOWN: 5378 upper_dev = netdev_master_upper_dev_get(dev); 5379 if (!upper_dev) 5380 return 0; 5381 if (!netif_is_bridge_master(upper_dev)) 5382 return 0; 5383 if (!mlxsw_sp_lower_get(upper_dev)) 5384 return 0; 5385 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 5386 break; 5387 } 5388 5389 return 0; 5390 } 5391 5392 static int __mlxsw_sp_netdevice_event(struct mlxsw_sp *mlxsw_sp, 5393 unsigned long event, void *ptr, 5394 bool process_foreign) 5395 { 5396 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 5397 struct mlxsw_sp_span_entry *span_entry; 5398 int err = 0; 5399 5400 if (event == NETDEV_UNREGISTER) { 5401 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); 5402 if (span_entry) 5403 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); 5404 } 5405 5406 if (netif_is_vxlan(dev)) 5407 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr); 5408 else if (mlxsw_sp_port_dev_check(dev)) 5409 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr, true); 5410 else if (netif_is_lag_master(dev)) 5411 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 5412 else if (is_vlan_dev(dev)) 5413 err = mlxsw_sp_netdevice_vlan_event(mlxsw_sp, dev, event, ptr, 5414 process_foreign); 5415 else if (netif_is_bridge_master(dev)) 5416 err = mlxsw_sp_netdevice_bridge_event(mlxsw_sp, dev, event, ptr, 5417 process_foreign); 5418 else if (netif_is_macvlan(dev)) 5419 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); 5420 5421 return err; 5422 } 5423 5424 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 5425 unsigned long event, void *ptr) 5426 { 5427 struct mlxsw_sp *mlxsw_sp; 5428 int err; 5429 5430 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 5431 mlxsw_sp_span_respin(mlxsw_sp); 5432 err = __mlxsw_sp_netdevice_event(mlxsw_sp, event, ptr, false); 5433 5434 return notifier_from_errno(err); 5435 } 5436 5437 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { 5438 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 5439 {0, }, 5440 }; 5441 5442 static struct pci_driver mlxsw_sp1_pci_driver = { 5443 .name = mlxsw_sp1_driver_name, 5444 .id_table = mlxsw_sp1_pci_id_table, 5445 }; 5446 5447 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { 5448 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, 5449 {0, }, 5450 }; 5451 5452 static struct pci_driver mlxsw_sp2_pci_driver = { 5453 .name = mlxsw_sp2_driver_name, 5454 .id_table = mlxsw_sp2_pci_id_table, 5455 }; 5456 5457 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = { 5458 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0}, 5459 {0, }, 5460 }; 5461 5462 static struct pci_driver mlxsw_sp3_pci_driver = { 5463 .name = mlxsw_sp3_driver_name, 5464 .id_table = mlxsw_sp3_pci_id_table, 5465 }; 5466 5467 static const struct pci_device_id mlxsw_sp4_pci_id_table[] = { 5468 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM4), 0}, 5469 {0, }, 5470 }; 5471 5472 static struct pci_driver mlxsw_sp4_pci_driver = { 5473 .name = mlxsw_sp4_driver_name, 5474 .id_table = mlxsw_sp4_pci_id_table, 5475 }; 5476 5477 static int __init mlxsw_sp_module_init(void) 5478 { 5479 int err; 5480 5481 err = mlxsw_core_driver_register(&mlxsw_sp1_driver); 5482 if (err) 5483 return err; 5484 5485 err = mlxsw_core_driver_register(&mlxsw_sp2_driver); 5486 if (err) 5487 goto err_sp2_core_driver_register; 5488 5489 err = mlxsw_core_driver_register(&mlxsw_sp3_driver); 5490 if (err) 5491 goto err_sp3_core_driver_register; 5492 5493 err = mlxsw_core_driver_register(&mlxsw_sp4_driver); 5494 if (err) 5495 goto err_sp4_core_driver_register; 5496 5497 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); 5498 if (err) 5499 goto err_sp1_pci_driver_register; 5500 5501 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); 5502 if (err) 5503 goto err_sp2_pci_driver_register; 5504 5505 err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver); 5506 if (err) 5507 goto err_sp3_pci_driver_register; 5508 5509 err = mlxsw_pci_driver_register(&mlxsw_sp4_pci_driver); 5510 if (err) 5511 goto err_sp4_pci_driver_register; 5512 5513 return 0; 5514 5515 err_sp4_pci_driver_register: 5516 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver); 5517 err_sp3_pci_driver_register: 5518 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 5519 err_sp2_pci_driver_register: 5520 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 5521 err_sp1_pci_driver_register: 5522 mlxsw_core_driver_unregister(&mlxsw_sp4_driver); 5523 err_sp4_core_driver_register: 5524 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 5525 err_sp3_core_driver_register: 5526 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 5527 err_sp2_core_driver_register: 5528 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 5529 return err; 5530 } 5531 5532 static void __exit mlxsw_sp_module_exit(void) 5533 { 5534 mlxsw_pci_driver_unregister(&mlxsw_sp4_pci_driver); 5535 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver); 5536 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 5537 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 5538 mlxsw_core_driver_unregister(&mlxsw_sp4_driver); 5539 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 5540 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 5541 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 5542 } 5543 5544 module_init(mlxsw_sp_module_init); 5545 module_exit(mlxsw_sp_module_exit); 5546 5547 MODULE_LICENSE("Dual BSD/GPL"); 5548 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 5549 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 5550 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); 5551 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); 5552 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table); 5553 MODULE_DEVICE_TABLE(pci, mlxsw_sp4_pci_id_table); 5554 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); 5555 MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME); 5556 MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME); 5557 MODULE_FIRMWARE(MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME); 5558