1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/types.h> 7 #include <linux/pci.h> 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/ethtool.h> 11 #include <linux/slab.h> 12 #include <linux/device.h> 13 #include <linux/skbuff.h> 14 #include <linux/if_vlan.h> 15 #include <linux/if_bridge.h> 16 #include <linux/workqueue.h> 17 #include <linux/jiffies.h> 18 #include <linux/bitops.h> 19 #include <linux/list.h> 20 #include <linux/notifier.h> 21 #include <linux/dcbnl.h> 22 #include <linux/inetdevice.h> 23 #include <linux/netlink.h> 24 #include <linux/jhash.h> 25 #include <linux/log2.h> 26 #include <linux/refcount.h> 27 #include <linux/rhashtable.h> 28 #include <net/switchdev.h> 29 #include <net/pkt_cls.h> 30 #include <net/netevent.h> 31 #include <net/addrconf.h> 32 #include <linux/ptp_classify.h> 33 34 #include "spectrum.h" 35 #include "pci.h" 36 #include "core.h" 37 #include "core_env.h" 38 #include "reg.h" 39 #include "port.h" 40 #include "trap.h" 41 #include "txheader.h" 42 #include "spectrum_cnt.h" 43 #include "spectrum_dpipe.h" 44 #include "spectrum_acl_flex_actions.h" 45 #include "spectrum_span.h" 46 #include "spectrum_ptp.h" 47 #include "spectrum_trap.h" 48 49 #define MLXSW_SP_FWREV_MINOR 2010 50 #define MLXSW_SP_FWREV_SUBMINOR 1006 51 52 #define MLXSW_SP1_FWREV_MAJOR 13 53 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 54 55 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { 56 .major = MLXSW_SP1_FWREV_MAJOR, 57 .minor = MLXSW_SP_FWREV_MINOR, 58 .subminor = MLXSW_SP_FWREV_SUBMINOR, 59 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR, 60 }; 61 62 #define MLXSW_SP1_FW_FILENAME \ 63 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ 64 "." __stringify(MLXSW_SP_FWREV_MINOR) \ 65 "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2" 66 67 #define MLXSW_SP2_FWREV_MAJOR 29 68 69 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { 70 .major = MLXSW_SP2_FWREV_MAJOR, 71 .minor = MLXSW_SP_FWREV_MINOR, 72 .subminor = MLXSW_SP_FWREV_SUBMINOR, 73 }; 74 75 #define MLXSW_SP2_FW_FILENAME \ 76 "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \ 77 "." __stringify(MLXSW_SP_FWREV_MINOR) \ 78 "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2" 79 80 #define MLXSW_SP3_FWREV_MAJOR 30 81 82 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = { 83 .major = MLXSW_SP3_FWREV_MAJOR, 84 .minor = MLXSW_SP_FWREV_MINOR, 85 .subminor = MLXSW_SP_FWREV_SUBMINOR, 86 }; 87 88 #define MLXSW_SP3_FW_FILENAME \ 89 "mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \ 90 "." __stringify(MLXSW_SP_FWREV_MINOR) \ 91 "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2" 92 93 #define MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME \ 94 "mellanox/lc_ini_bundle_" \ 95 __stringify(MLXSW_SP_FWREV_MINOR) "_" \ 96 __stringify(MLXSW_SP_FWREV_SUBMINOR) ".bin" 97 98 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; 99 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; 100 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3"; 101 static const char mlxsw_sp4_driver_name[] = "mlxsw_spectrum4"; 102 103 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = { 104 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00 105 }; 106 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = { 107 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00 108 }; 109 110 /* tx_hdr_version 111 * Tx header version. 112 * Must be set to 1. 113 */ 114 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 115 116 /* tx_hdr_ctl 117 * Packet control type. 118 * 0 - Ethernet control (e.g. EMADs, LACP) 119 * 1 - Ethernet data 120 */ 121 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 122 123 /* tx_hdr_proto 124 * Packet protocol type. Must be set to 1 (Ethernet). 125 */ 126 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 127 128 /* tx_hdr_rx_is_router 129 * Packet is sent from the router. Valid for data packets only. 130 */ 131 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 132 133 /* tx_hdr_fid_valid 134 * Indicates if the 'fid' field is valid and should be used for 135 * forwarding lookup. Valid for data packets only. 136 */ 137 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 138 139 /* tx_hdr_swid 140 * Switch partition ID. Must be set to 0. 141 */ 142 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 143 144 /* tx_hdr_control_tclass 145 * Indicates if the packet should use the control TClass and not one 146 * of the data TClasses. 147 */ 148 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 149 150 /* tx_hdr_etclass 151 * Egress TClass to be used on the egress device on the egress port. 152 */ 153 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 154 155 /* tx_hdr_port_mid 156 * Destination local port for unicast packets. 157 * Destination multicast ID for multicast packets. 158 * 159 * Control packets are directed to a specific egress port, while data 160 * packets are transmitted through the CPU port (0) into the switch partition, 161 * where forwarding rules are applied. 162 */ 163 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 164 165 /* tx_hdr_fid 166 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 167 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 168 * Valid for data packets only. 169 */ 170 MLXSW_ITEM32(tx, hdr, fid, 0x08, 16, 16); 171 172 /* tx_hdr_type 173 * 0 - Data packets 174 * 6 - Control packets 175 */ 176 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 177 178 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 179 unsigned int counter_index, bool clear, 180 u64 *packets, u64 *bytes) 181 { 182 enum mlxsw_reg_mgpc_opcode op = clear ? MLXSW_REG_MGPC_OPCODE_CLEAR : 183 MLXSW_REG_MGPC_OPCODE_NOP; 184 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 185 int err; 186 187 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, op, 188 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 189 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 190 if (err) 191 return err; 192 if (packets) 193 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 194 if (bytes) 195 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 196 return 0; 197 } 198 199 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 200 unsigned int counter_index) 201 { 202 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 203 204 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 205 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 206 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 207 } 208 209 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 210 unsigned int *p_counter_index) 211 { 212 int err; 213 214 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 215 p_counter_index); 216 if (err) 217 return err; 218 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 219 if (err) 220 goto err_counter_clear; 221 return 0; 222 223 err_counter_clear: 224 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 225 *p_counter_index); 226 return err; 227 } 228 229 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 230 unsigned int counter_index) 231 { 232 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 233 counter_index); 234 } 235 236 void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 237 const struct mlxsw_tx_info *tx_info) 238 { 239 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 240 241 memset(txhdr, 0, MLXSW_TXHDR_LEN); 242 243 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 244 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 245 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 246 mlxsw_tx_hdr_swid_set(txhdr, 0); 247 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 248 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 249 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 250 } 251 252 int 253 mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core, 254 struct mlxsw_sp_port *mlxsw_sp_port, 255 struct sk_buff *skb, 256 const struct mlxsw_tx_info *tx_info) 257 { 258 char *txhdr; 259 u16 max_fid; 260 int err; 261 262 if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) { 263 err = -ENOMEM; 264 goto err_skb_cow_head; 265 } 266 267 if (!MLXSW_CORE_RES_VALID(mlxsw_core, FID)) { 268 err = -EIO; 269 goto err_res_valid; 270 } 271 max_fid = MLXSW_CORE_RES_GET(mlxsw_core, FID); 272 273 txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 274 memset(txhdr, 0, MLXSW_TXHDR_LEN); 275 276 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 277 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 278 mlxsw_tx_hdr_rx_is_router_set(txhdr, true); 279 mlxsw_tx_hdr_fid_valid_set(txhdr, true); 280 mlxsw_tx_hdr_fid_set(txhdr, max_fid + tx_info->local_port - 1); 281 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_DATA); 282 return 0; 283 284 err_res_valid: 285 err_skb_cow_head: 286 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 287 dev_kfree_skb_any(skb); 288 return err; 289 } 290 291 static bool mlxsw_sp_skb_requires_ts(struct sk_buff *skb) 292 { 293 unsigned int type; 294 295 if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) 296 return false; 297 298 type = ptp_classify_raw(skb); 299 return !!ptp_parse_header(skb, type); 300 } 301 302 static int mlxsw_sp_txhdr_handle(struct mlxsw_core *mlxsw_core, 303 struct mlxsw_sp_port *mlxsw_sp_port, 304 struct sk_buff *skb, 305 const struct mlxsw_tx_info *tx_info) 306 { 307 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 308 309 /* In Spectrum-2 and Spectrum-3, PTP events that require a time stamp 310 * need special handling and cannot be transmitted as regular control 311 * packets. 312 */ 313 if (unlikely(mlxsw_sp_skb_requires_ts(skb))) 314 return mlxsw_sp->ptp_ops->txhdr_construct(mlxsw_core, 315 mlxsw_sp_port, skb, 316 tx_info); 317 318 if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) { 319 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 320 dev_kfree_skb_any(skb); 321 return -ENOMEM; 322 } 323 324 mlxsw_sp_txhdr_construct(skb, tx_info); 325 return 0; 326 } 327 328 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) 329 { 330 switch (state) { 331 case BR_STATE_FORWARDING: 332 return MLXSW_REG_SPMS_STATE_FORWARDING; 333 case BR_STATE_LEARNING: 334 return MLXSW_REG_SPMS_STATE_LEARNING; 335 case BR_STATE_LISTENING: 336 case BR_STATE_DISABLED: 337 case BR_STATE_BLOCKING: 338 return MLXSW_REG_SPMS_STATE_DISCARDING; 339 default: 340 BUG(); 341 } 342 } 343 344 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 345 u8 state) 346 { 347 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state); 348 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 349 char *spms_pl; 350 int err; 351 352 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 353 if (!spms_pl) 354 return -ENOMEM; 355 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 356 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 357 358 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 359 kfree(spms_pl); 360 return err; 361 } 362 363 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 364 { 365 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 366 int err; 367 368 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 369 if (err) 370 return err; 371 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 372 return 0; 373 } 374 375 int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 376 bool is_up) 377 { 378 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 379 char paos_pl[MLXSW_REG_PAOS_LEN]; 380 381 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 382 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 383 MLXSW_PORT_ADMIN_STATUS_DOWN); 384 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 385 } 386 387 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 388 const unsigned char *addr) 389 { 390 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 391 char ppad_pl[MLXSW_REG_PPAD_LEN]; 392 393 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 394 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 395 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 396 } 397 398 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 399 { 400 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 401 402 eth_hw_addr_gen(mlxsw_sp_port->dev, mlxsw_sp->base_mac, 403 mlxsw_sp_port->local_port); 404 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, 405 mlxsw_sp_port->dev->dev_addr); 406 } 407 408 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 409 { 410 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 411 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 412 413 mtu += MLXSW_PORT_ETH_FRAME_HDR; 414 415 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 416 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 417 } 418 419 static int mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, 420 u16 local_port, u8 swid) 421 { 422 char pspa_pl[MLXSW_REG_PSPA_LEN]; 423 424 mlxsw_reg_pspa_pack(pspa_pl, swid, local_port); 425 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 426 } 427 428 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 429 { 430 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 431 char svpe_pl[MLXSW_REG_SVPE_LEN]; 432 433 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 434 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 435 } 436 437 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 438 bool learn_enable) 439 { 440 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 441 char *spvmlr_pl; 442 int err; 443 444 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 445 if (!spvmlr_pl) 446 return -ENOMEM; 447 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 448 learn_enable); 449 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 450 kfree(spvmlr_pl); 451 return err; 452 } 453 454 int mlxsw_sp_port_security_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 455 { 456 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 457 char spfsr_pl[MLXSW_REG_SPFSR_LEN]; 458 int err; 459 460 if (mlxsw_sp_port->security == enable) 461 return 0; 462 463 mlxsw_reg_spfsr_pack(spfsr_pl, mlxsw_sp_port->local_port, enable); 464 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spfsr), spfsr_pl); 465 if (err) 466 return err; 467 468 mlxsw_sp_port->security = enable; 469 return 0; 470 } 471 472 int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type) 473 { 474 switch (ethtype) { 475 case ETH_P_8021Q: 476 *p_sver_type = 0; 477 break; 478 case ETH_P_8021AD: 479 *p_sver_type = 1; 480 break; 481 default: 482 return -EINVAL; 483 } 484 485 return 0; 486 } 487 488 int mlxsw_sp_port_egress_ethtype_set(struct mlxsw_sp_port *mlxsw_sp_port, 489 u16 ethtype) 490 { 491 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 492 char spevet_pl[MLXSW_REG_SPEVET_LEN]; 493 u8 sver_type; 494 int err; 495 496 err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type); 497 if (err) 498 return err; 499 500 mlxsw_reg_spevet_pack(spevet_pl, mlxsw_sp_port->local_port, sver_type); 501 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spevet), spevet_pl); 502 } 503 504 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 505 u16 vid, u16 ethtype) 506 { 507 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 508 char spvid_pl[MLXSW_REG_SPVID_LEN]; 509 u8 sver_type; 510 int err; 511 512 err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type); 513 if (err) 514 return err; 515 516 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid, 517 sver_type); 518 519 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 520 } 521 522 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 523 bool allow) 524 { 525 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 526 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 527 528 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 529 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 530 } 531 532 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 533 u16 ethtype) 534 { 535 int err; 536 537 if (!vid) { 538 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 539 if (err) 540 return err; 541 } else { 542 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid, ethtype); 543 if (err) 544 return err; 545 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 546 if (err) 547 goto err_port_allow_untagged_set; 548 } 549 550 mlxsw_sp_port->pvid = vid; 551 return 0; 552 553 err_port_allow_untagged_set: 554 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid, ethtype); 555 return err; 556 } 557 558 static int 559 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 560 { 561 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 562 char sspr_pl[MLXSW_REG_SSPR_LEN]; 563 564 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 565 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 566 } 567 568 static int 569 mlxsw_sp_port_module_info_parse(struct mlxsw_sp *mlxsw_sp, 570 u16 local_port, char *pmlp_pl, 571 struct mlxsw_sp_port_mapping *port_mapping) 572 { 573 bool separate_rxtx; 574 u8 first_lane; 575 u8 slot_index; 576 u8 module; 577 u8 width; 578 int i; 579 580 module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 581 slot_index = mlxsw_reg_pmlp_slot_index_get(pmlp_pl, 0); 582 width = mlxsw_reg_pmlp_width_get(pmlp_pl); 583 separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl); 584 first_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 585 586 if (width && !is_power_of_2(width)) { 587 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n", 588 local_port); 589 return -EINVAL; 590 } 591 592 for (i = 0; i < width; i++) { 593 if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) { 594 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n", 595 local_port); 596 return -EINVAL; 597 } 598 if (mlxsw_reg_pmlp_slot_index_get(pmlp_pl, i) != slot_index) { 599 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple slot indexes\n", 600 local_port); 601 return -EINVAL; 602 } 603 if (separate_rxtx && 604 mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != 605 mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) { 606 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n", 607 local_port); 608 return -EINVAL; 609 } 610 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i + first_lane) { 611 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n", 612 local_port); 613 return -EINVAL; 614 } 615 } 616 617 port_mapping->module = module; 618 port_mapping->slot_index = slot_index; 619 port_mapping->width = width; 620 port_mapping->module_width = width; 621 port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 622 return 0; 623 } 624 625 static int 626 mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port, 627 struct mlxsw_sp_port_mapping *port_mapping) 628 { 629 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 630 int err; 631 632 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 633 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 634 if (err) 635 return err; 636 return mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port, 637 pmlp_pl, port_mapping); 638 } 639 640 static int 641 mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port, 642 const struct mlxsw_sp_port_mapping *port_mapping) 643 { 644 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 645 int i, err; 646 647 mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->slot_index, 648 port_mapping->module); 649 650 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 651 mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width); 652 for (i = 0; i < port_mapping->width; i++) { 653 mlxsw_reg_pmlp_slot_index_set(pmlp_pl, i, 654 port_mapping->slot_index); 655 mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module); 656 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */ 657 } 658 659 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 660 if (err) 661 goto err_pmlp_write; 662 return 0; 663 664 err_pmlp_write: 665 mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->slot_index, 666 port_mapping->module); 667 return err; 668 } 669 670 static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u16 local_port, 671 u8 slot_index, u8 module) 672 { 673 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 674 675 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 676 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 677 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 678 mlxsw_env_module_port_unmap(mlxsw_sp->core, slot_index, module); 679 } 680 681 static int mlxsw_sp_port_open(struct net_device *dev) 682 { 683 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 684 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 685 int err; 686 687 err = mlxsw_env_module_port_up(mlxsw_sp->core, 688 mlxsw_sp_port->mapping.slot_index, 689 mlxsw_sp_port->mapping.module); 690 if (err) 691 return err; 692 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 693 if (err) 694 goto err_port_admin_status_set; 695 netif_start_queue(dev); 696 return 0; 697 698 err_port_admin_status_set: 699 mlxsw_env_module_port_down(mlxsw_sp->core, 700 mlxsw_sp_port->mapping.slot_index, 701 mlxsw_sp_port->mapping.module); 702 return err; 703 } 704 705 static int mlxsw_sp_port_stop(struct net_device *dev) 706 { 707 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 708 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 709 710 netif_stop_queue(dev); 711 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 712 mlxsw_env_module_port_down(mlxsw_sp->core, 713 mlxsw_sp_port->mapping.slot_index, 714 mlxsw_sp_port->mapping.module); 715 return 0; 716 } 717 718 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 719 struct net_device *dev) 720 { 721 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 722 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 723 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 724 const struct mlxsw_tx_info tx_info = { 725 .local_port = mlxsw_sp_port->local_port, 726 .is_emad = false, 727 }; 728 u64 len; 729 int err; 730 731 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); 732 733 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 734 return NETDEV_TX_BUSY; 735 736 if (eth_skb_pad(skb)) { 737 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 738 return NETDEV_TX_OK; 739 } 740 741 err = mlxsw_sp_txhdr_handle(mlxsw_sp->core, mlxsw_sp_port, skb, 742 &tx_info); 743 if (err) 744 return NETDEV_TX_OK; 745 746 /* TX header is consumed by HW on the way so we shouldn't count its 747 * bytes as being sent. 748 */ 749 len = skb->len - MLXSW_TXHDR_LEN; 750 751 /* Due to a race we might fail here because of a full queue. In that 752 * unlikely case we simply drop the packet. 753 */ 754 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 755 756 if (!err) { 757 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 758 u64_stats_update_begin(&pcpu_stats->syncp); 759 pcpu_stats->tx_packets++; 760 pcpu_stats->tx_bytes += len; 761 u64_stats_update_end(&pcpu_stats->syncp); 762 } else { 763 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 764 dev_kfree_skb_any(skb); 765 } 766 return NETDEV_TX_OK; 767 } 768 769 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 770 { 771 } 772 773 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 774 { 775 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 776 struct sockaddr *addr = p; 777 int err; 778 779 if (!is_valid_ether_addr(addr->sa_data)) 780 return -EADDRNOTAVAIL; 781 782 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 783 if (err) 784 return err; 785 eth_hw_addr_set(dev, addr->sa_data); 786 return 0; 787 } 788 789 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 790 { 791 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 792 struct mlxsw_sp_hdroom orig_hdroom; 793 struct mlxsw_sp_hdroom hdroom; 794 int err; 795 796 orig_hdroom = *mlxsw_sp_port->hdroom; 797 798 hdroom = orig_hdroom; 799 hdroom.mtu = mtu; 800 mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom); 801 802 err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom); 803 if (err) { 804 netdev_err(dev, "Failed to configure port's headroom\n"); 805 return err; 806 } 807 808 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 809 if (err) 810 goto err_port_mtu_set; 811 WRITE_ONCE(dev->mtu, mtu); 812 return 0; 813 814 err_port_mtu_set: 815 mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom); 816 return err; 817 } 818 819 static int 820 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 821 struct rtnl_link_stats64 *stats) 822 { 823 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 824 struct mlxsw_sp_port_pcpu_stats *p; 825 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 826 u32 tx_dropped = 0; 827 unsigned int start; 828 int i; 829 830 for_each_possible_cpu(i) { 831 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 832 do { 833 start = u64_stats_fetch_begin(&p->syncp); 834 rx_packets = p->rx_packets; 835 rx_bytes = p->rx_bytes; 836 tx_packets = p->tx_packets; 837 tx_bytes = p->tx_bytes; 838 } while (u64_stats_fetch_retry(&p->syncp, start)); 839 840 stats->rx_packets += rx_packets; 841 stats->rx_bytes += rx_bytes; 842 stats->tx_packets += tx_packets; 843 stats->tx_bytes += tx_bytes; 844 /* tx_dropped is u32, updated without syncp protection. */ 845 tx_dropped += p->tx_dropped; 846 } 847 stats->tx_dropped = tx_dropped; 848 return 0; 849 } 850 851 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 852 { 853 switch (attr_id) { 854 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 855 return true; 856 } 857 858 return false; 859 } 860 861 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 862 void *sp) 863 { 864 switch (attr_id) { 865 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 866 return mlxsw_sp_port_get_sw_stats64(dev, sp); 867 } 868 869 return -EINVAL; 870 } 871 872 int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 873 int prio, char *ppcnt_pl) 874 { 875 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 876 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 877 878 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 879 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 880 } 881 882 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 883 struct rtnl_link_stats64 *stats) 884 { 885 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 886 int err; 887 888 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 889 0, ppcnt_pl); 890 if (err) 891 goto out; 892 893 stats->tx_packets = 894 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 895 stats->rx_packets = 896 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 897 stats->tx_bytes = 898 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 899 stats->rx_bytes = 900 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 901 stats->multicast = 902 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 903 904 stats->rx_crc_errors = 905 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 906 stats->rx_frame_errors = 907 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 908 909 stats->rx_length_errors = ( 910 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 911 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 912 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 913 914 stats->rx_errors = (stats->rx_crc_errors + 915 stats->rx_frame_errors + stats->rx_length_errors); 916 917 out: 918 return err; 919 } 920 921 static void 922 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 923 struct mlxsw_sp_port_xstats *xstats) 924 { 925 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 926 int err, i; 927 928 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 929 ppcnt_pl); 930 if (!err) 931 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 932 933 for (i = 0; i < TC_MAX_QUEUE; i++) { 934 err = mlxsw_sp_port_get_stats_raw(dev, 935 MLXSW_REG_PPCNT_TC_CONG_CNT, 936 i, ppcnt_pl); 937 if (err) 938 goto tc_cnt; 939 940 xstats->wred_drop[i] = 941 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 942 xstats->tc_ecn[i] = mlxsw_reg_ppcnt_ecn_marked_tc_get(ppcnt_pl); 943 944 tc_cnt: 945 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 946 i, ppcnt_pl); 947 if (err) 948 continue; 949 950 xstats->backlog[i] = 951 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 952 xstats->tail_drop[i] = 953 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 954 } 955 956 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 957 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, 958 i, ppcnt_pl); 959 if (err) 960 continue; 961 962 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); 963 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); 964 } 965 } 966 967 static void update_stats_cache(struct work_struct *work) 968 { 969 struct mlxsw_sp_port *mlxsw_sp_port = 970 container_of(work, struct mlxsw_sp_port, 971 periodic_hw_stats.update_dw.work); 972 973 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 974 /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as 975 * necessary when port goes down. 976 */ 977 goto out; 978 979 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 980 &mlxsw_sp_port->periodic_hw_stats.stats); 981 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 982 &mlxsw_sp_port->periodic_hw_stats.xstats); 983 984 out: 985 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 986 MLXSW_HW_STATS_UPDATE_TIME); 987 } 988 989 /* Return the stats from a cache that is updated periodically, 990 * as this function might get called in an atomic context. 991 */ 992 static void 993 mlxsw_sp_port_get_stats64(struct net_device *dev, 994 struct rtnl_link_stats64 *stats) 995 { 996 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 997 998 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 999 } 1000 1001 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 1002 u16 vid_begin, u16 vid_end, 1003 bool is_member, bool untagged) 1004 { 1005 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1006 char *spvm_pl; 1007 int err; 1008 1009 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 1010 if (!spvm_pl) 1011 return -ENOMEM; 1012 1013 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 1014 vid_end, is_member, untagged); 1015 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 1016 kfree(spvm_pl); 1017 return err; 1018 } 1019 1020 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 1021 u16 vid_end, bool is_member, bool untagged) 1022 { 1023 u16 vid, vid_e; 1024 int err; 1025 1026 for (vid = vid_begin; vid <= vid_end; 1027 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 1028 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 1029 vid_end); 1030 1031 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 1032 is_member, untagged); 1033 if (err) 1034 return err; 1035 } 1036 1037 return 0; 1038 } 1039 1040 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port, 1041 bool flush_default) 1042 { 1043 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 1044 1045 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 1046 &mlxsw_sp_port->vlans_list, list) { 1047 if (!flush_default && 1048 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID) 1049 continue; 1050 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1051 } 1052 } 1053 1054 static void 1055 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1056 { 1057 if (mlxsw_sp_port_vlan->bridge_port) 1058 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1059 else if (mlxsw_sp_port_vlan->fid) 1060 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 1061 } 1062 1063 struct mlxsw_sp_port_vlan * 1064 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1065 { 1066 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1067 bool untagged = vid == MLXSW_SP_DEFAULT_VID; 1068 int err; 1069 1070 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1071 if (mlxsw_sp_port_vlan) 1072 return ERR_PTR(-EEXIST); 1073 1074 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 1075 if (err) 1076 return ERR_PTR(err); 1077 1078 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 1079 if (!mlxsw_sp_port_vlan) { 1080 err = -ENOMEM; 1081 goto err_port_vlan_alloc; 1082 } 1083 1084 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1085 mlxsw_sp_port_vlan->vid = vid; 1086 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1087 1088 return mlxsw_sp_port_vlan; 1089 1090 err_port_vlan_alloc: 1091 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1092 return ERR_PTR(err); 1093 } 1094 1095 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1096 { 1097 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1098 u16 vid = mlxsw_sp_port_vlan->vid; 1099 1100 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan); 1101 list_del(&mlxsw_sp_port_vlan->list); 1102 kfree(mlxsw_sp_port_vlan); 1103 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1104 } 1105 1106 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1107 __be16 __always_unused proto, u16 vid) 1108 { 1109 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1110 1111 /* VLAN 0 is added to HW filter when device goes up, but it is 1112 * reserved in our case, so simply return. 1113 */ 1114 if (!vid) 1115 return 0; 1116 1117 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid)); 1118 } 1119 1120 int mlxsw_sp_port_kill_vid(struct net_device *dev, 1121 __be16 __always_unused proto, u16 vid) 1122 { 1123 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1124 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1125 1126 /* VLAN 0 is removed from HW filter when device goes down, but 1127 * it is reserved in our case, so simply return. 1128 */ 1129 if (!vid) 1130 return 0; 1131 1132 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1133 if (!mlxsw_sp_port_vlan) 1134 return 0; 1135 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1136 1137 return 0; 1138 } 1139 1140 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 1141 struct flow_block_offload *f) 1142 { 1143 switch (f->binder_type) { 1144 case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS: 1145 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true); 1146 case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS: 1147 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false); 1148 case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP: 1149 return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f); 1150 case FLOW_BLOCK_BINDER_TYPE_RED_MARK: 1151 return mlxsw_sp_setup_tc_block_qevent_mark(mlxsw_sp_port, f); 1152 default: 1153 return -EOPNOTSUPP; 1154 } 1155 } 1156 1157 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1158 void *type_data) 1159 { 1160 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1161 1162 switch (type) { 1163 case TC_SETUP_BLOCK: 1164 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1165 case TC_SETUP_QDISC_RED: 1166 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1167 case TC_SETUP_QDISC_PRIO: 1168 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); 1169 case TC_SETUP_QDISC_ETS: 1170 return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data); 1171 case TC_SETUP_QDISC_TBF: 1172 return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data); 1173 case TC_SETUP_QDISC_FIFO: 1174 return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data); 1175 default: 1176 return -EOPNOTSUPP; 1177 } 1178 } 1179 1180 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) 1181 { 1182 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1183 1184 if (!enable) { 1185 if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) || 1186 mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) { 1187 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 1188 return -EINVAL; 1189 } 1190 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block); 1191 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block); 1192 } else { 1193 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block); 1194 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block); 1195 } 1196 return 0; 1197 } 1198 1199 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable) 1200 { 1201 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1202 char pplr_pl[MLXSW_REG_PPLR_LEN]; 1203 int err; 1204 1205 if (netif_running(dev)) 1206 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1207 1208 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable); 1209 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr), 1210 pplr_pl); 1211 1212 if (netif_running(dev)) 1213 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1214 1215 return err; 1216 } 1217 1218 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); 1219 1220 static int mlxsw_sp_handle_feature(struct net_device *dev, 1221 netdev_features_t wanted_features, 1222 netdev_features_t feature, 1223 mlxsw_sp_feature_handler feature_handler) 1224 { 1225 netdev_features_t changes = wanted_features ^ dev->features; 1226 bool enable = !!(wanted_features & feature); 1227 int err; 1228 1229 if (!(changes & feature)) 1230 return 0; 1231 1232 err = feature_handler(dev, enable); 1233 if (err) { 1234 netdev_err(dev, "%s feature %pNF failed, err %d\n", 1235 enable ? "Enable" : "Disable", &feature, err); 1236 return err; 1237 } 1238 1239 if (enable) 1240 dev->features |= feature; 1241 else 1242 dev->features &= ~feature; 1243 1244 return 0; 1245 } 1246 static int mlxsw_sp_set_features(struct net_device *dev, 1247 netdev_features_t features) 1248 { 1249 netdev_features_t oper_features = dev->features; 1250 int err = 0; 1251 1252 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, 1253 mlxsw_sp_feature_hw_tc); 1254 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK, 1255 mlxsw_sp_feature_loopback); 1256 1257 if (err) { 1258 dev->features = oper_features; 1259 return -EINVAL; 1260 } 1261 1262 return 0; 1263 } 1264 1265 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, 1266 struct ifreq *ifr) 1267 { 1268 struct hwtstamp_config config; 1269 int err; 1270 1271 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 1272 return -EFAULT; 1273 1274 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, 1275 &config); 1276 if (err) 1277 return err; 1278 1279 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1280 return -EFAULT; 1281 1282 return 0; 1283 } 1284 1285 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port, 1286 struct ifreq *ifr) 1287 { 1288 struct hwtstamp_config config; 1289 int err; 1290 1291 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port, 1292 &config); 1293 if (err) 1294 return err; 1295 1296 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1297 return -EFAULT; 1298 1299 return 0; 1300 } 1301 1302 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port) 1303 { 1304 struct hwtstamp_config config = {0}; 1305 1306 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config); 1307 } 1308 1309 static int 1310 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1311 { 1312 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1313 1314 switch (cmd) { 1315 case SIOCSHWTSTAMP: 1316 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr); 1317 case SIOCGHWTSTAMP: 1318 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr); 1319 default: 1320 return -EOPNOTSUPP; 1321 } 1322 } 1323 1324 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1325 .ndo_open = mlxsw_sp_port_open, 1326 .ndo_stop = mlxsw_sp_port_stop, 1327 .ndo_start_xmit = mlxsw_sp_port_xmit, 1328 .ndo_setup_tc = mlxsw_sp_setup_tc, 1329 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1330 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1331 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1332 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1333 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1334 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1335 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1336 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1337 .ndo_set_features = mlxsw_sp_set_features, 1338 .ndo_eth_ioctl = mlxsw_sp_port_ioctl, 1339 }; 1340 1341 static int 1342 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port) 1343 { 1344 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1345 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; 1346 const struct mlxsw_sp_port_type_speed_ops *ops; 1347 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1348 u32 eth_proto_cap_masked; 1349 int err; 1350 1351 ops = mlxsw_sp->port_type_speed_ops; 1352 1353 /* Set advertised speeds to speeds supported by both the driver 1354 * and the device. 1355 */ 1356 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 1357 0, false); 1358 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1359 if (err) 1360 return err; 1361 1362 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, 1363 ð_proto_admin, ð_proto_oper); 1364 eth_proto_cap_masked = ops->ptys_proto_cap_masked_get(eth_proto_cap); 1365 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 1366 eth_proto_cap_masked, 1367 mlxsw_sp_port->link.autoneg); 1368 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1369 } 1370 1371 int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed) 1372 { 1373 const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops; 1374 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1375 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1376 u32 eth_proto_oper; 1377 int err; 1378 1379 port_type_speed_ops = mlxsw_sp->port_type_speed_ops; 1380 port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, 1381 mlxsw_sp_port->local_port, 0, 1382 false); 1383 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1384 if (err) 1385 return err; 1386 port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL, 1387 ð_proto_oper); 1388 *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper); 1389 return 0; 1390 } 1391 1392 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 1393 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 1394 bool dwrr, u8 dwrr_weight) 1395 { 1396 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1397 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1398 1399 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1400 next_index); 1401 mlxsw_reg_qeec_de_set(qeec_pl, true); 1402 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 1403 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 1404 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1405 } 1406 1407 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 1408 enum mlxsw_reg_qeec_hr hr, u8 index, 1409 u8 next_index, u32 maxrate, u8 burst_size) 1410 { 1411 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1412 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1413 1414 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1415 next_index); 1416 mlxsw_reg_qeec_mase_set(qeec_pl, true); 1417 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 1418 mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size); 1419 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1420 } 1421 1422 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port, 1423 enum mlxsw_reg_qeec_hr hr, u8 index, 1424 u8 next_index, u32 minrate) 1425 { 1426 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1427 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1428 1429 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1430 next_index); 1431 mlxsw_reg_qeec_mise_set(qeec_pl, true); 1432 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate); 1433 1434 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1435 } 1436 1437 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 1438 u8 switch_prio, u8 tclass) 1439 { 1440 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1441 char qtct_pl[MLXSW_REG_QTCT_LEN]; 1442 1443 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 1444 tclass); 1445 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 1446 } 1447 1448 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 1449 { 1450 int err, i; 1451 1452 /* Setup the elements hierarcy, so that each TC is linked to 1453 * one subgroup, which are all member in the same group. 1454 */ 1455 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1456 MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0); 1457 if (err) 1458 return err; 1459 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1460 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1461 MLXSW_REG_QEEC_HR_SUBGROUP, i, 1462 0, false, 0); 1463 if (err) 1464 return err; 1465 } 1466 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1467 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1468 MLXSW_REG_QEEC_HR_TC, i, i, 1469 false, 0); 1470 if (err) 1471 return err; 1472 1473 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1474 MLXSW_REG_QEEC_HR_TC, 1475 i + 8, i, 1476 true, 100); 1477 if (err) 1478 return err; 1479 } 1480 1481 /* Make sure the max shaper is disabled in all hierarchies that support 1482 * it. Note that this disables ptps (PTP shaper), but that is intended 1483 * for the initial configuration. 1484 */ 1485 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1486 MLXSW_REG_QEEC_HR_PORT, 0, 0, 1487 MLXSW_REG_QEEC_MAS_DIS, 0); 1488 if (err) 1489 return err; 1490 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1491 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1492 MLXSW_REG_QEEC_HR_SUBGROUP, 1493 i, 0, 1494 MLXSW_REG_QEEC_MAS_DIS, 0); 1495 if (err) 1496 return err; 1497 } 1498 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1499 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1500 MLXSW_REG_QEEC_HR_TC, 1501 i, i, 1502 MLXSW_REG_QEEC_MAS_DIS, 0); 1503 if (err) 1504 return err; 1505 1506 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1507 MLXSW_REG_QEEC_HR_TC, 1508 i + 8, i, 1509 MLXSW_REG_QEEC_MAS_DIS, 0); 1510 if (err) 1511 return err; 1512 } 1513 1514 /* Configure the min shaper for multicast TCs. */ 1515 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1516 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port, 1517 MLXSW_REG_QEEC_HR_TC, 1518 i + 8, i, 1519 MLXSW_REG_QEEC_MIS_MIN); 1520 if (err) 1521 return err; 1522 } 1523 1524 /* Map all priorities to traffic class 0. */ 1525 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1526 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 1527 if (err) 1528 return err; 1529 } 1530 1531 return 0; 1532 } 1533 1534 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 1535 bool enable) 1536 { 1537 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1538 char qtctm_pl[MLXSW_REG_QTCTM_LEN]; 1539 1540 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable); 1541 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl); 1542 } 1543 1544 static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port) 1545 { 1546 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1547 u8 slot_index = mlxsw_sp_port->mapping.slot_index; 1548 u8 module = mlxsw_sp_port->mapping.module; 1549 u64 overheat_counter; 1550 int err; 1551 1552 err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, slot_index, 1553 module, &overheat_counter); 1554 if (err) 1555 return err; 1556 1557 mlxsw_sp_port->module_overheat_initial_val = overheat_counter; 1558 return 0; 1559 } 1560 1561 int 1562 mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port, 1563 bool is_8021ad_tagged, 1564 bool is_8021q_tagged) 1565 { 1566 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1567 char spvc_pl[MLXSW_REG_SPVC_LEN]; 1568 1569 mlxsw_reg_spvc_pack(spvc_pl, mlxsw_sp_port->local_port, 1570 is_8021ad_tagged, is_8021q_tagged); 1571 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvc), spvc_pl); 1572 } 1573 1574 static int mlxsw_sp_port_label_info_get(struct mlxsw_sp *mlxsw_sp, 1575 u16 local_port, u8 *port_number, 1576 u8 *split_port_subnumber, 1577 u8 *slot_index) 1578 { 1579 char pllp_pl[MLXSW_REG_PLLP_LEN]; 1580 int err; 1581 1582 mlxsw_reg_pllp_pack(pllp_pl, local_port); 1583 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pllp), pllp_pl); 1584 if (err) 1585 return err; 1586 mlxsw_reg_pllp_unpack(pllp_pl, port_number, 1587 split_port_subnumber, slot_index); 1588 return 0; 1589 } 1590 1591 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port, 1592 bool split, 1593 struct mlxsw_sp_port_mapping *port_mapping) 1594 { 1595 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1596 struct mlxsw_sp_port *mlxsw_sp_port; 1597 u32 lanes = port_mapping->width; 1598 u8 split_port_subnumber; 1599 struct net_device *dev; 1600 u8 port_number; 1601 u8 slot_index; 1602 bool splittable; 1603 int err; 1604 1605 err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, port_mapping); 1606 if (err) { 1607 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 1608 local_port); 1609 return err; 1610 } 1611 1612 err = mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 0); 1613 if (err) { 1614 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 1615 local_port); 1616 goto err_port_swid_set; 1617 } 1618 1619 err = mlxsw_sp_port_label_info_get(mlxsw_sp, local_port, &port_number, 1620 &split_port_subnumber, &slot_index); 1621 if (err) { 1622 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get port label information\n", 1623 local_port); 1624 goto err_port_label_info_get; 1625 } 1626 1627 splittable = lanes > 1 && !split; 1628 err = mlxsw_core_port_init(mlxsw_sp->core, local_port, slot_index, 1629 port_number, split, split_port_subnumber, 1630 splittable, lanes, mlxsw_sp->base_mac, 1631 sizeof(mlxsw_sp->base_mac)); 1632 if (err) { 1633 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 1634 local_port); 1635 goto err_core_port_init; 1636 } 1637 1638 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 1639 if (!dev) { 1640 err = -ENOMEM; 1641 goto err_alloc_etherdev; 1642 } 1643 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 1644 dev_net_set(dev, mlxsw_sp_net(mlxsw_sp)); 1645 mlxsw_sp_port = netdev_priv(dev); 1646 mlxsw_core_port_netdev_link(mlxsw_sp->core, local_port, 1647 mlxsw_sp_port, dev); 1648 mlxsw_sp_port->dev = dev; 1649 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 1650 mlxsw_sp_port->local_port = local_port; 1651 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID; 1652 mlxsw_sp_port->split = split; 1653 mlxsw_sp_port->mapping = *port_mapping; 1654 mlxsw_sp_port->link.autoneg = 1; 1655 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 1656 1657 mlxsw_sp_port->pcpu_stats = 1658 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 1659 if (!mlxsw_sp_port->pcpu_stats) { 1660 err = -ENOMEM; 1661 goto err_alloc_stats; 1662 } 1663 1664 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1665 &update_stats_cache); 1666 1667 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 1668 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 1669 1670 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 1671 if (err) { 1672 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 1673 mlxsw_sp_port->local_port); 1674 goto err_dev_addr_init; 1675 } 1676 1677 netif_carrier_off(dev); 1678 1679 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 1680 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 1681 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK; 1682 1683 dev->min_mtu = ETH_MIN_MTU; 1684 dev->max_mtu = MLXSW_PORT_MAX_MTU - MLXSW_PORT_ETH_FRAME_HDR; 1685 1686 /* Each packet needs to have a Tx header (metadata) on top all other 1687 * headers. 1688 */ 1689 dev->needed_headroom = MLXSW_TXHDR_LEN; 1690 1691 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 1692 if (err) { 1693 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 1694 mlxsw_sp_port->local_port); 1695 goto err_port_system_port_mapping_set; 1696 } 1697 1698 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port); 1699 if (err) { 1700 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 1701 mlxsw_sp_port->local_port); 1702 goto err_port_speed_by_width_set; 1703 } 1704 1705 err = mlxsw_sp->port_type_speed_ops->ptys_max_speed(mlxsw_sp_port, 1706 &mlxsw_sp_port->max_speed); 1707 if (err) { 1708 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum speed\n", 1709 mlxsw_sp_port->local_port); 1710 goto err_max_speed_get; 1711 } 1712 1713 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 1714 if (err) { 1715 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 1716 mlxsw_sp_port->local_port); 1717 goto err_port_mtu_set; 1718 } 1719 1720 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1721 if (err) 1722 goto err_port_admin_status_set; 1723 1724 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 1725 if (err) { 1726 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 1727 mlxsw_sp_port->local_port); 1728 goto err_port_buffers_init; 1729 } 1730 1731 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 1732 if (err) { 1733 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 1734 mlxsw_sp_port->local_port); 1735 goto err_port_ets_init; 1736 } 1737 1738 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true); 1739 if (err) { 1740 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n", 1741 mlxsw_sp_port->local_port); 1742 goto err_port_tc_mc_mode; 1743 } 1744 1745 /* ETS and buffers must be initialized before DCB. */ 1746 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 1747 if (err) { 1748 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 1749 mlxsw_sp_port->local_port); 1750 goto err_port_dcb_init; 1751 } 1752 1753 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 1754 if (err) { 1755 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 1756 mlxsw_sp_port->local_port); 1757 goto err_port_fids_init; 1758 } 1759 1760 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); 1761 if (err) { 1762 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", 1763 mlxsw_sp_port->local_port); 1764 goto err_port_qdiscs_init; 1765 } 1766 1767 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false, 1768 false); 1769 if (err) { 1770 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n", 1771 mlxsw_sp_port->local_port); 1772 goto err_port_vlan_clear; 1773 } 1774 1775 err = mlxsw_sp_port_nve_init(mlxsw_sp_port); 1776 if (err) { 1777 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n", 1778 mlxsw_sp_port->local_port); 1779 goto err_port_nve_init; 1780 } 1781 1782 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID, 1783 ETH_P_8021Q); 1784 if (err) { 1785 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n", 1786 mlxsw_sp_port->local_port); 1787 goto err_port_pvid_set; 1788 } 1789 1790 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, 1791 MLXSW_SP_DEFAULT_VID); 1792 if (IS_ERR(mlxsw_sp_port_vlan)) { 1793 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 1794 mlxsw_sp_port->local_port); 1795 err = PTR_ERR(mlxsw_sp_port_vlan); 1796 goto err_port_vlan_create; 1797 } 1798 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan; 1799 1800 /* Set SPVC.et0=true and SPVC.et1=false to make the local port to treat 1801 * only packets with 802.1q header as tagged packets. 1802 */ 1803 err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true); 1804 if (err) { 1805 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set default VLAN classification\n", 1806 local_port); 1807 goto err_port_vlan_classification_set; 1808 } 1809 1810 INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw, 1811 mlxsw_sp->ptp_ops->shaper_work); 1812 1813 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 1814 1815 err = mlxsw_sp_port_overheat_init_val_set(mlxsw_sp_port); 1816 if (err) { 1817 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set overheat initial value\n", 1818 mlxsw_sp_port->local_port); 1819 goto err_port_overheat_init_val_set; 1820 } 1821 1822 err = register_netdev(dev); 1823 if (err) { 1824 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 1825 mlxsw_sp_port->local_port); 1826 goto err_register_netdev; 1827 } 1828 1829 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 1830 return 0; 1831 1832 err_register_netdev: 1833 err_port_overheat_init_val_set: 1834 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true); 1835 err_port_vlan_classification_set: 1836 mlxsw_sp->ports[local_port] = NULL; 1837 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1838 err_port_vlan_create: 1839 err_port_pvid_set: 1840 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 1841 err_port_nve_init: 1842 err_port_vlan_clear: 1843 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 1844 err_port_qdiscs_init: 1845 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 1846 err_port_fids_init: 1847 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 1848 err_port_dcb_init: 1849 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 1850 err_port_tc_mc_mode: 1851 err_port_ets_init: 1852 mlxsw_sp_port_buffers_fini(mlxsw_sp_port); 1853 err_port_buffers_init: 1854 err_port_admin_status_set: 1855 err_port_mtu_set: 1856 err_max_speed_get: 1857 err_port_speed_by_width_set: 1858 err_port_system_port_mapping_set: 1859 err_dev_addr_init: 1860 free_percpu(mlxsw_sp_port->pcpu_stats); 1861 err_alloc_stats: 1862 free_netdev(dev); 1863 err_alloc_etherdev: 1864 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 1865 err_core_port_init: 1866 err_port_label_info_get: 1867 mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 1868 MLXSW_PORT_SWID_DISABLED_PORT); 1869 err_port_swid_set: 1870 mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, 1871 port_mapping->slot_index, 1872 port_mapping->module); 1873 return err; 1874 } 1875 1876 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port) 1877 { 1878 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1879 u8 slot_index = mlxsw_sp_port->mapping.slot_index; 1880 u8 module = mlxsw_sp_port->mapping.module; 1881 1882 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 1883 cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw); 1884 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 1885 mlxsw_sp_port_ptp_clear(mlxsw_sp_port); 1886 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true); 1887 mlxsw_sp->ports[local_port] = NULL; 1888 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true); 1889 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 1890 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 1891 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 1892 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 1893 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 1894 mlxsw_sp_port_buffers_fini(mlxsw_sp_port); 1895 free_percpu(mlxsw_sp_port->pcpu_stats); 1896 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 1897 free_netdev(mlxsw_sp_port->dev); 1898 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 1899 mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 1900 MLXSW_PORT_SWID_DISABLED_PORT); 1901 mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, slot_index, module); 1902 } 1903 1904 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp) 1905 { 1906 struct mlxsw_sp_port *mlxsw_sp_port; 1907 int err; 1908 1909 mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL); 1910 if (!mlxsw_sp_port) 1911 return -ENOMEM; 1912 1913 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 1914 mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT; 1915 1916 err = mlxsw_core_cpu_port_init(mlxsw_sp->core, 1917 mlxsw_sp_port, 1918 mlxsw_sp->base_mac, 1919 sizeof(mlxsw_sp->base_mac)); 1920 if (err) { 1921 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n"); 1922 goto err_core_cpu_port_init; 1923 } 1924 1925 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port; 1926 return 0; 1927 1928 err_core_cpu_port_init: 1929 kfree(mlxsw_sp_port); 1930 return err; 1931 } 1932 1933 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp) 1934 { 1935 struct mlxsw_sp_port *mlxsw_sp_port = 1936 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT]; 1937 1938 mlxsw_core_cpu_port_fini(mlxsw_sp->core); 1939 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL; 1940 kfree(mlxsw_sp_port); 1941 } 1942 1943 static bool mlxsw_sp_local_port_valid(u16 local_port) 1944 { 1945 return local_port != MLXSW_PORT_CPU_PORT; 1946 } 1947 1948 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u16 local_port) 1949 { 1950 if (!mlxsw_sp_local_port_valid(local_port)) 1951 return false; 1952 return mlxsw_sp->ports[local_port] != NULL; 1953 } 1954 1955 static int mlxsw_sp_port_mapping_event_set(struct mlxsw_sp *mlxsw_sp, 1956 u16 local_port, bool enable) 1957 { 1958 char pmecr_pl[MLXSW_REG_PMECR_LEN]; 1959 1960 mlxsw_reg_pmecr_pack(pmecr_pl, local_port, 1961 enable ? MLXSW_REG_PMECR_E_GENERATE_EVENT : 1962 MLXSW_REG_PMECR_E_DO_NOT_GENERATE_EVENT); 1963 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmecr), pmecr_pl); 1964 } 1965 1966 struct mlxsw_sp_port_mapping_event { 1967 struct list_head list; 1968 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 1969 }; 1970 1971 static void mlxsw_sp_port_mapping_events_work(struct work_struct *work) 1972 { 1973 struct mlxsw_sp_port_mapping_event *event, *next_event; 1974 struct mlxsw_sp_port_mapping_events *events; 1975 struct mlxsw_sp_port_mapping port_mapping; 1976 struct mlxsw_sp *mlxsw_sp; 1977 struct devlink *devlink; 1978 LIST_HEAD(event_queue); 1979 u16 local_port; 1980 int err; 1981 1982 events = container_of(work, struct mlxsw_sp_port_mapping_events, work); 1983 mlxsw_sp = container_of(events, struct mlxsw_sp, port_mapping_events); 1984 devlink = priv_to_devlink(mlxsw_sp->core); 1985 1986 spin_lock_bh(&events->queue_lock); 1987 list_splice_init(&events->queue, &event_queue); 1988 spin_unlock_bh(&events->queue_lock); 1989 1990 list_for_each_entry_safe(event, next_event, &event_queue, list) { 1991 local_port = mlxsw_reg_pmlp_local_port_get(event->pmlp_pl); 1992 err = mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port, 1993 event->pmlp_pl, &port_mapping); 1994 if (err) 1995 goto out; 1996 1997 if (WARN_ON_ONCE(!port_mapping.width)) 1998 goto out; 1999 2000 devl_lock(devlink); 2001 2002 if (!mlxsw_sp_port_created(mlxsw_sp, local_port)) 2003 mlxsw_sp_port_create(mlxsw_sp, local_port, 2004 false, &port_mapping); 2005 else 2006 WARN_ON_ONCE(1); 2007 2008 devl_unlock(devlink); 2009 2010 mlxsw_sp->port_mapping[local_port] = port_mapping; 2011 2012 out: 2013 kfree(event); 2014 } 2015 } 2016 2017 static void 2018 mlxsw_sp_port_mapping_listener_func(const struct mlxsw_reg_info *reg, 2019 char *pmlp_pl, void *priv) 2020 { 2021 struct mlxsw_sp_port_mapping_events *events; 2022 struct mlxsw_sp_port_mapping_event *event; 2023 struct mlxsw_sp *mlxsw_sp = priv; 2024 u16 local_port; 2025 2026 local_port = mlxsw_reg_pmlp_local_port_get(pmlp_pl); 2027 if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port))) 2028 return; 2029 2030 events = &mlxsw_sp->port_mapping_events; 2031 event = kmalloc(sizeof(*event), GFP_ATOMIC); 2032 if (!event) 2033 return; 2034 memcpy(event->pmlp_pl, pmlp_pl, sizeof(event->pmlp_pl)); 2035 spin_lock(&events->queue_lock); 2036 list_add_tail(&event->list, &events->queue); 2037 spin_unlock(&events->queue_lock); 2038 mlxsw_core_schedule_work(&events->work); 2039 } 2040 2041 static void 2042 __mlxsw_sp_port_mapping_events_cancel(struct mlxsw_sp *mlxsw_sp) 2043 { 2044 struct mlxsw_sp_port_mapping_event *event, *next_event; 2045 struct mlxsw_sp_port_mapping_events *events; 2046 2047 events = &mlxsw_sp->port_mapping_events; 2048 2049 /* Caller needs to make sure that no new event is going to appear. */ 2050 cancel_work_sync(&events->work); 2051 list_for_each_entry_safe(event, next_event, &events->queue, list) { 2052 list_del(&event->list); 2053 kfree(event); 2054 } 2055 } 2056 2057 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 2058 { 2059 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 2060 int i; 2061 2062 for (i = 1; i < max_ports; i++) 2063 mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false); 2064 /* Make sure all scheduled events are processed */ 2065 __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp); 2066 2067 for (i = 1; i < max_ports; i++) 2068 if (mlxsw_sp_port_created(mlxsw_sp, i)) 2069 mlxsw_sp_port_remove(mlxsw_sp, i); 2070 mlxsw_sp_cpu_port_remove(mlxsw_sp); 2071 kfree(mlxsw_sp->ports); 2072 mlxsw_sp->ports = NULL; 2073 } 2074 2075 static void 2076 mlxsw_sp_ports_remove_selected(struct mlxsw_core *mlxsw_core, 2077 bool (*selector)(void *priv, u16 local_port), 2078 void *priv) 2079 { 2080 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2081 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_core); 2082 int i; 2083 2084 for (i = 1; i < max_ports; i++) 2085 if (mlxsw_sp_port_created(mlxsw_sp, i) && selector(priv, i)) 2086 mlxsw_sp_port_remove(mlxsw_sp, i); 2087 } 2088 2089 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 2090 { 2091 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 2092 struct mlxsw_sp_port_mapping_events *events; 2093 struct mlxsw_sp_port_mapping *port_mapping; 2094 size_t alloc_size; 2095 int i; 2096 int err; 2097 2098 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 2099 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 2100 if (!mlxsw_sp->ports) 2101 return -ENOMEM; 2102 2103 events = &mlxsw_sp->port_mapping_events; 2104 INIT_LIST_HEAD(&events->queue); 2105 spin_lock_init(&events->queue_lock); 2106 INIT_WORK(&events->work, mlxsw_sp_port_mapping_events_work); 2107 2108 for (i = 1; i < max_ports; i++) { 2109 err = mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, true); 2110 if (err) 2111 goto err_event_enable; 2112 } 2113 2114 err = mlxsw_sp_cpu_port_create(mlxsw_sp); 2115 if (err) 2116 goto err_cpu_port_create; 2117 2118 for (i = 1; i < max_ports; i++) { 2119 port_mapping = &mlxsw_sp->port_mapping[i]; 2120 if (!port_mapping->width) 2121 continue; 2122 err = mlxsw_sp_port_create(mlxsw_sp, i, false, port_mapping); 2123 if (err) 2124 goto err_port_create; 2125 } 2126 return 0; 2127 2128 err_port_create: 2129 for (i--; i >= 1; i--) 2130 if (mlxsw_sp_port_created(mlxsw_sp, i)) 2131 mlxsw_sp_port_remove(mlxsw_sp, i); 2132 i = max_ports; 2133 mlxsw_sp_cpu_port_remove(mlxsw_sp); 2134 err_cpu_port_create: 2135 err_event_enable: 2136 for (i--; i >= 1; i--) 2137 mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false); 2138 /* Make sure all scheduled events are processed */ 2139 __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp); 2140 kfree(mlxsw_sp->ports); 2141 mlxsw_sp->ports = NULL; 2142 return err; 2143 } 2144 2145 static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp) 2146 { 2147 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 2148 struct mlxsw_sp_port_mapping *port_mapping; 2149 int i; 2150 int err; 2151 2152 mlxsw_sp->port_mapping = kcalloc(max_ports, 2153 sizeof(struct mlxsw_sp_port_mapping), 2154 GFP_KERNEL); 2155 if (!mlxsw_sp->port_mapping) 2156 return -ENOMEM; 2157 2158 for (i = 1; i < max_ports; i++) { 2159 port_mapping = &mlxsw_sp->port_mapping[i]; 2160 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, port_mapping); 2161 if (err) 2162 goto err_port_module_info_get; 2163 } 2164 return 0; 2165 2166 err_port_module_info_get: 2167 kfree(mlxsw_sp->port_mapping); 2168 return err; 2169 } 2170 2171 static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp) 2172 { 2173 kfree(mlxsw_sp->port_mapping); 2174 } 2175 2176 static int 2177 mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, 2178 struct mlxsw_sp_port_mapping *port_mapping, 2179 unsigned int count, const char *pmtdb_pl) 2180 { 2181 struct mlxsw_sp_port_mapping split_port_mapping; 2182 int err, i; 2183 2184 split_port_mapping = *port_mapping; 2185 split_port_mapping.width /= count; 2186 for (i = 0; i < count; i++) { 2187 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2188 2189 if (!mlxsw_sp_local_port_valid(s_local_port)) 2190 continue; 2191 2192 err = mlxsw_sp_port_create(mlxsw_sp, s_local_port, 2193 true, &split_port_mapping); 2194 if (err) 2195 goto err_port_create; 2196 split_port_mapping.lane += split_port_mapping.width; 2197 } 2198 2199 return 0; 2200 2201 err_port_create: 2202 for (i--; i >= 0; i--) { 2203 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2204 2205 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) 2206 mlxsw_sp_port_remove(mlxsw_sp, s_local_port); 2207 } 2208 return err; 2209 } 2210 2211 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 2212 unsigned int count, 2213 const char *pmtdb_pl) 2214 { 2215 struct mlxsw_sp_port_mapping *port_mapping; 2216 int i; 2217 2218 /* Go over original unsplit ports in the gap and recreate them. */ 2219 for (i = 0; i < count; i++) { 2220 u16 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2221 2222 port_mapping = &mlxsw_sp->port_mapping[local_port]; 2223 if (!port_mapping->width || !mlxsw_sp_local_port_valid(local_port)) 2224 continue; 2225 mlxsw_sp_port_create(mlxsw_sp, local_port, 2226 false, port_mapping); 2227 } 2228 } 2229 2230 static struct mlxsw_sp_port * 2231 mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u16 local_port) 2232 { 2233 if (mlxsw_sp->ports && mlxsw_sp->ports[local_port]) 2234 return mlxsw_sp->ports[local_port]; 2235 return NULL; 2236 } 2237 2238 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port, 2239 unsigned int count, 2240 struct netlink_ext_ack *extack) 2241 { 2242 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2243 struct mlxsw_sp_port_mapping port_mapping; 2244 struct mlxsw_sp_port *mlxsw_sp_port; 2245 enum mlxsw_reg_pmtdb_status status; 2246 char pmtdb_pl[MLXSW_REG_PMTDB_LEN]; 2247 int i; 2248 int err; 2249 2250 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); 2251 if (!mlxsw_sp_port) { 2252 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 2253 local_port); 2254 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 2255 return -EINVAL; 2256 } 2257 2258 if (mlxsw_sp_port->split) { 2259 NL_SET_ERR_MSG_MOD(extack, "Port is already split"); 2260 return -EINVAL; 2261 } 2262 2263 mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index, 2264 mlxsw_sp_port->mapping.module, 2265 mlxsw_sp_port->mapping.module_width / count, 2266 count); 2267 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl); 2268 if (err) { 2269 NL_SET_ERR_MSG_MOD(extack, "Failed to query split info"); 2270 return err; 2271 } 2272 2273 status = mlxsw_reg_pmtdb_status_get(pmtdb_pl); 2274 if (status != MLXSW_REG_PMTDB_STATUS_SUCCESS) { 2275 NL_SET_ERR_MSG_MOD(extack, "Unsupported split configuration"); 2276 return -EINVAL; 2277 } 2278 2279 port_mapping = mlxsw_sp_port->mapping; 2280 2281 for (i = 0; i < count; i++) { 2282 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2283 2284 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) 2285 mlxsw_sp_port_remove(mlxsw_sp, s_local_port); 2286 } 2287 2288 err = mlxsw_sp_port_split_create(mlxsw_sp, &port_mapping, 2289 count, pmtdb_pl); 2290 if (err) { 2291 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 2292 goto err_port_split_create; 2293 } 2294 2295 return 0; 2296 2297 err_port_split_create: 2298 mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl); 2299 2300 return err; 2301 } 2302 2303 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u16 local_port, 2304 struct netlink_ext_ack *extack) 2305 { 2306 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2307 struct mlxsw_sp_port *mlxsw_sp_port; 2308 char pmtdb_pl[MLXSW_REG_PMTDB_LEN]; 2309 unsigned int count; 2310 int i; 2311 int err; 2312 2313 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); 2314 if (!mlxsw_sp_port) { 2315 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 2316 local_port); 2317 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 2318 return -EINVAL; 2319 } 2320 2321 if (!mlxsw_sp_port->split) { 2322 NL_SET_ERR_MSG_MOD(extack, "Port was not split"); 2323 return -EINVAL; 2324 } 2325 2326 count = mlxsw_sp_port->mapping.module_width / 2327 mlxsw_sp_port->mapping.width; 2328 2329 mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index, 2330 mlxsw_sp_port->mapping.module, 2331 mlxsw_sp_port->mapping.module_width / count, 2332 count); 2333 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl); 2334 if (err) { 2335 NL_SET_ERR_MSG_MOD(extack, "Failed to query split info"); 2336 return err; 2337 } 2338 2339 for (i = 0; i < count; i++) { 2340 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2341 2342 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) 2343 mlxsw_sp_port_remove(mlxsw_sp, s_local_port); 2344 } 2345 2346 mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl); 2347 2348 return 0; 2349 } 2350 2351 static void 2352 mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port) 2353 { 2354 int i; 2355 2356 for (i = 0; i < TC_MAX_QUEUE; i++) 2357 mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0; 2358 } 2359 2360 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 2361 char *pude_pl, void *priv) 2362 { 2363 struct mlxsw_sp *mlxsw_sp = priv; 2364 struct mlxsw_sp_port *mlxsw_sp_port; 2365 enum mlxsw_reg_pude_oper_status status; 2366 u16 local_port; 2367 2368 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 2369 2370 if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port))) 2371 return; 2372 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2373 if (!mlxsw_sp_port) 2374 return; 2375 2376 status = mlxsw_reg_pude_oper_status_get(pude_pl); 2377 if (status == MLXSW_PORT_OPER_STATUS_UP) { 2378 netdev_info(mlxsw_sp_port->dev, "link up\n"); 2379 netif_carrier_on(mlxsw_sp_port->dev); 2380 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0); 2381 } else { 2382 netdev_info(mlxsw_sp_port->dev, "link down\n"); 2383 netif_carrier_off(mlxsw_sp_port->dev); 2384 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port); 2385 } 2386 } 2387 2388 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp, 2389 char *mtpptr_pl, bool ingress) 2390 { 2391 u16 local_port; 2392 u8 num_rec; 2393 int i; 2394 2395 local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl); 2396 num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl); 2397 for (i = 0; i < num_rec; i++) { 2398 u8 domain_number; 2399 u8 message_type; 2400 u16 sequence_id; 2401 u64 timestamp; 2402 2403 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type, 2404 &domain_number, &sequence_id, 2405 ×tamp); 2406 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port, 2407 message_type, domain_number, 2408 sequence_id, timestamp); 2409 } 2410 } 2411 2412 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg, 2413 char *mtpptr_pl, void *priv) 2414 { 2415 struct mlxsw_sp *mlxsw_sp = priv; 2416 2417 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true); 2418 } 2419 2420 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg, 2421 char *mtpptr_pl, void *priv) 2422 { 2423 struct mlxsw_sp *mlxsw_sp = priv; 2424 2425 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false); 2426 } 2427 2428 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 2429 u16 local_port, void *priv) 2430 { 2431 struct mlxsw_sp *mlxsw_sp = priv; 2432 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2433 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 2434 2435 if (unlikely(!mlxsw_sp_port)) { 2436 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 2437 local_port); 2438 return; 2439 } 2440 2441 skb->dev = mlxsw_sp_port->dev; 2442 2443 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 2444 u64_stats_update_begin(&pcpu_stats->syncp); 2445 pcpu_stats->rx_packets++; 2446 pcpu_stats->rx_bytes += skb->len; 2447 u64_stats_update_end(&pcpu_stats->syncp); 2448 2449 skb->protocol = eth_type_trans(skb, skb->dev); 2450 netif_receive_skb(skb); 2451 } 2452 2453 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u16 local_port, 2454 void *priv) 2455 { 2456 skb->offload_fwd_mark = 1; 2457 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 2458 } 2459 2460 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, 2461 u16 local_port, void *priv) 2462 { 2463 skb->offload_l3_fwd_mark = 1; 2464 skb->offload_fwd_mark = 1; 2465 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 2466 } 2467 2468 void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 2469 u16 local_port) 2470 { 2471 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port); 2472 } 2473 2474 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2475 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 2476 _is_ctrl, SP_##_trap_group, DISCARD) 2477 2478 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2479 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 2480 _is_ctrl, SP_##_trap_group, DISCARD) 2481 2482 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2483 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \ 2484 _is_ctrl, SP_##_trap_group, DISCARD) 2485 2486 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 2487 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 2488 2489 static const struct mlxsw_listener mlxsw_sp_listener[] = { 2490 /* Events */ 2491 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 2492 /* L2 traps */ 2493 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false), 2494 /* L3 traps */ 2495 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 2496 false), 2497 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 2498 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 2499 false), 2500 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD, 2501 ROUTER_EXP, false), 2502 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD, 2503 ROUTER_EXP, false), 2504 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD, 2505 ROUTER_EXP, false), 2506 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD, 2507 ROUTER_EXP, false), 2508 /* Multicast Router Traps */ 2509 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 2510 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 2511 /* NVE traps */ 2512 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false), 2513 }; 2514 2515 static const struct mlxsw_listener mlxsw_sp1_listener[] = { 2516 /* Events */ 2517 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0), 2518 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0), 2519 }; 2520 2521 static const struct mlxsw_listener mlxsw_sp2_listener[] = { 2522 /* Events */ 2523 MLXSW_SP_EVENTL(mlxsw_sp_port_mapping_listener_func, PMLPE), 2524 }; 2525 2526 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 2527 { 2528 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2529 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 2530 enum mlxsw_reg_qpcr_ir_units ir_units; 2531 int max_cpu_policers; 2532 bool is_bytes; 2533 u8 burst_size; 2534 u32 rate; 2535 int i, err; 2536 2537 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 2538 return -EIO; 2539 2540 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 2541 2542 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 2543 for (i = 0; i < max_cpu_policers; i++) { 2544 is_bytes = false; 2545 switch (i) { 2546 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 2547 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 2548 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS: 2549 rate = 1024; 2550 burst_size = 7; 2551 break; 2552 default: 2553 continue; 2554 } 2555 2556 __set_bit(i, mlxsw_sp->trap->policers_usage); 2557 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 2558 burst_size); 2559 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 2560 if (err) 2561 return err; 2562 } 2563 2564 return 0; 2565 } 2566 2567 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 2568 { 2569 char htgt_pl[MLXSW_REG_HTGT_LEN]; 2570 enum mlxsw_reg_htgt_trap_group i; 2571 int max_cpu_policers; 2572 int max_trap_groups; 2573 u8 priority, tc; 2574 u16 policer_id; 2575 int err; 2576 2577 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 2578 return -EIO; 2579 2580 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 2581 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 2582 2583 for (i = 0; i < max_trap_groups; i++) { 2584 policer_id = i; 2585 switch (i) { 2586 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 2587 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 2588 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS: 2589 priority = 1; 2590 tc = 1; 2591 break; 2592 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 2593 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 2594 tc = MLXSW_REG_HTGT_DEFAULT_TC; 2595 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 2596 break; 2597 default: 2598 continue; 2599 } 2600 2601 if (max_cpu_policers <= policer_id && 2602 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 2603 return -EIO; 2604 2605 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 2606 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 2607 if (err) 2608 return err; 2609 } 2610 2611 return 0; 2612 } 2613 2614 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 2615 { 2616 struct mlxsw_sp_trap *trap; 2617 u64 max_policers; 2618 int err; 2619 2620 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS)) 2621 return -EIO; 2622 max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS); 2623 trap = kzalloc(struct_size(trap, policers_usage, 2624 BITS_TO_LONGS(max_policers)), GFP_KERNEL); 2625 if (!trap) 2626 return -ENOMEM; 2627 trap->max_policers = max_policers; 2628 mlxsw_sp->trap = trap; 2629 2630 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 2631 if (err) 2632 goto err_cpu_policers_set; 2633 2634 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 2635 if (err) 2636 goto err_trap_groups_set; 2637 2638 err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp_listener, 2639 ARRAY_SIZE(mlxsw_sp_listener), 2640 mlxsw_sp); 2641 if (err) 2642 goto err_traps_register; 2643 2644 err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp->listeners, 2645 mlxsw_sp->listeners_count, mlxsw_sp); 2646 if (err) 2647 goto err_extra_traps_init; 2648 2649 return 0; 2650 2651 err_extra_traps_init: 2652 mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener, 2653 ARRAY_SIZE(mlxsw_sp_listener), 2654 mlxsw_sp); 2655 err_traps_register: 2656 err_trap_groups_set: 2657 err_cpu_policers_set: 2658 kfree(trap); 2659 return err; 2660 } 2661 2662 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 2663 { 2664 mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp->listeners, 2665 mlxsw_sp->listeners_count, 2666 mlxsw_sp); 2667 mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener, 2668 ARRAY_SIZE(mlxsw_sp_listener), mlxsw_sp); 2669 kfree(mlxsw_sp->trap); 2670 } 2671 2672 static int mlxsw_sp_lag_pgt_init(struct mlxsw_sp *mlxsw_sp) 2673 { 2674 char sgcr_pl[MLXSW_REG_SGCR_LEN]; 2675 int err; 2676 2677 if (mlxsw_core_lag_mode(mlxsw_sp->core) != 2678 MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW) 2679 return 0; 2680 2681 /* In DDD mode, which we by default use, each LAG entry is 8 PGT 2682 * entries. The LAG table address needs to be 8-aligned, but that ought 2683 * to be the case, since the LAG table is allocated first. 2684 */ 2685 err = mlxsw_sp_pgt_mid_alloc_range(mlxsw_sp, &mlxsw_sp->lag_pgt_base, 2686 mlxsw_sp->max_lag * 8); 2687 if (err) 2688 return err; 2689 if (WARN_ON_ONCE(mlxsw_sp->lag_pgt_base % 8)) { 2690 err = -EINVAL; 2691 goto err_mid_alloc_range; 2692 } 2693 2694 mlxsw_reg_sgcr_pack(sgcr_pl, mlxsw_sp->lag_pgt_base); 2695 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sgcr), sgcr_pl); 2696 if (err) 2697 goto err_mid_alloc_range; 2698 2699 return 0; 2700 2701 err_mid_alloc_range: 2702 mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mlxsw_sp->lag_pgt_base, 2703 mlxsw_sp->max_lag * 8); 2704 return err; 2705 } 2706 2707 static void mlxsw_sp_lag_pgt_fini(struct mlxsw_sp *mlxsw_sp) 2708 { 2709 if (mlxsw_core_lag_mode(mlxsw_sp->core) != 2710 MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW) 2711 return; 2712 2713 mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mlxsw_sp->lag_pgt_base, 2714 mlxsw_sp->max_lag * 8); 2715 } 2716 2717 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe 2718 2719 struct mlxsw_sp_lag { 2720 struct net_device *dev; 2721 refcount_t ref_count; 2722 u16 lag_id; 2723 }; 2724 2725 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 2726 { 2727 char slcr_pl[MLXSW_REG_SLCR_LEN]; 2728 u32 seed; 2729 int err; 2730 2731 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 2732 MLXSW_SP_LAG_SEED_INIT); 2733 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 2734 MLXSW_REG_SLCR_LAG_HASH_DMAC | 2735 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 2736 MLXSW_REG_SLCR_LAG_HASH_VLANID | 2737 MLXSW_REG_SLCR_LAG_HASH_SIP | 2738 MLXSW_REG_SLCR_LAG_HASH_DIP | 2739 MLXSW_REG_SLCR_LAG_HASH_SPORT | 2740 MLXSW_REG_SLCR_LAG_HASH_DPORT | 2741 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed); 2742 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 2743 if (err) 2744 return err; 2745 2746 err = mlxsw_core_max_lag(mlxsw_sp->core, &mlxsw_sp->max_lag); 2747 if (err) 2748 return err; 2749 2750 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 2751 return -EIO; 2752 2753 err = mlxsw_sp_lag_pgt_init(mlxsw_sp); 2754 if (err) 2755 return err; 2756 2757 mlxsw_sp->lags = kcalloc(mlxsw_sp->max_lag, sizeof(struct mlxsw_sp_lag), 2758 GFP_KERNEL); 2759 if (!mlxsw_sp->lags) { 2760 err = -ENOMEM; 2761 goto err_kcalloc; 2762 } 2763 2764 return 0; 2765 2766 err_kcalloc: 2767 mlxsw_sp_lag_pgt_fini(mlxsw_sp); 2768 return err; 2769 } 2770 2771 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 2772 { 2773 mlxsw_sp_lag_pgt_fini(mlxsw_sp); 2774 kfree(mlxsw_sp->lags); 2775 } 2776 2777 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = { 2778 .clock_init = mlxsw_sp1_ptp_clock_init, 2779 .clock_fini = mlxsw_sp1_ptp_clock_fini, 2780 .init = mlxsw_sp1_ptp_init, 2781 .fini = mlxsw_sp1_ptp_fini, 2782 .receive = mlxsw_sp1_ptp_receive, 2783 .transmitted = mlxsw_sp1_ptp_transmitted, 2784 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get, 2785 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set, 2786 .shaper_work = mlxsw_sp1_ptp_shaper_work, 2787 .get_ts_info = mlxsw_sp1_ptp_get_ts_info, 2788 .get_stats_count = mlxsw_sp1_get_stats_count, 2789 .get_stats_strings = mlxsw_sp1_get_stats_strings, 2790 .get_stats = mlxsw_sp1_get_stats, 2791 .txhdr_construct = mlxsw_sp_ptp_txhdr_construct, 2792 }; 2793 2794 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { 2795 .clock_init = mlxsw_sp2_ptp_clock_init, 2796 .clock_fini = mlxsw_sp2_ptp_clock_fini, 2797 .init = mlxsw_sp2_ptp_init, 2798 .fini = mlxsw_sp2_ptp_fini, 2799 .receive = mlxsw_sp2_ptp_receive, 2800 .transmitted = mlxsw_sp2_ptp_transmitted, 2801 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get, 2802 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set, 2803 .shaper_work = mlxsw_sp2_ptp_shaper_work, 2804 .get_ts_info = mlxsw_sp2_ptp_get_ts_info, 2805 .get_stats_count = mlxsw_sp2_get_stats_count, 2806 .get_stats_strings = mlxsw_sp2_get_stats_strings, 2807 .get_stats = mlxsw_sp2_get_stats, 2808 .txhdr_construct = mlxsw_sp2_ptp_txhdr_construct, 2809 }; 2810 2811 static const struct mlxsw_sp_ptp_ops mlxsw_sp4_ptp_ops = { 2812 .clock_init = mlxsw_sp2_ptp_clock_init, 2813 .clock_fini = mlxsw_sp2_ptp_clock_fini, 2814 .init = mlxsw_sp2_ptp_init, 2815 .fini = mlxsw_sp2_ptp_fini, 2816 .receive = mlxsw_sp2_ptp_receive, 2817 .transmitted = mlxsw_sp2_ptp_transmitted, 2818 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get, 2819 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set, 2820 .shaper_work = mlxsw_sp2_ptp_shaper_work, 2821 .get_ts_info = mlxsw_sp2_ptp_get_ts_info, 2822 .get_stats_count = mlxsw_sp2_get_stats_count, 2823 .get_stats_strings = mlxsw_sp2_get_stats_strings, 2824 .get_stats = mlxsw_sp2_get_stats, 2825 .txhdr_construct = mlxsw_sp_ptp_txhdr_construct, 2826 }; 2827 2828 struct mlxsw_sp_sample_trigger_node { 2829 struct mlxsw_sp_sample_trigger trigger; 2830 struct mlxsw_sp_sample_params params; 2831 struct rhash_head ht_node; 2832 struct rcu_head rcu; 2833 refcount_t refcount; 2834 }; 2835 2836 static const struct rhashtable_params mlxsw_sp_sample_trigger_ht_params = { 2837 .key_offset = offsetof(struct mlxsw_sp_sample_trigger_node, trigger), 2838 .head_offset = offsetof(struct mlxsw_sp_sample_trigger_node, ht_node), 2839 .key_len = sizeof(struct mlxsw_sp_sample_trigger), 2840 .automatic_shrinking = true, 2841 }; 2842 2843 static void 2844 mlxsw_sp_sample_trigger_key_init(struct mlxsw_sp_sample_trigger *key, 2845 const struct mlxsw_sp_sample_trigger *trigger) 2846 { 2847 memset(key, 0, sizeof(*key)); 2848 key->type = trigger->type; 2849 key->local_port = trigger->local_port; 2850 } 2851 2852 /* RCU read lock must be held */ 2853 struct mlxsw_sp_sample_params * 2854 mlxsw_sp_sample_trigger_params_lookup(struct mlxsw_sp *mlxsw_sp, 2855 const struct mlxsw_sp_sample_trigger *trigger) 2856 { 2857 struct mlxsw_sp_sample_trigger_node *trigger_node; 2858 struct mlxsw_sp_sample_trigger key; 2859 2860 mlxsw_sp_sample_trigger_key_init(&key, trigger); 2861 trigger_node = rhashtable_lookup(&mlxsw_sp->sample_trigger_ht, &key, 2862 mlxsw_sp_sample_trigger_ht_params); 2863 if (!trigger_node) 2864 return NULL; 2865 2866 return &trigger_node->params; 2867 } 2868 2869 static int 2870 mlxsw_sp_sample_trigger_node_init(struct mlxsw_sp *mlxsw_sp, 2871 const struct mlxsw_sp_sample_trigger *trigger, 2872 const struct mlxsw_sp_sample_params *params) 2873 { 2874 struct mlxsw_sp_sample_trigger_node *trigger_node; 2875 int err; 2876 2877 trigger_node = kzalloc(sizeof(*trigger_node), GFP_KERNEL); 2878 if (!trigger_node) 2879 return -ENOMEM; 2880 2881 trigger_node->trigger = *trigger; 2882 trigger_node->params = *params; 2883 refcount_set(&trigger_node->refcount, 1); 2884 2885 err = rhashtable_insert_fast(&mlxsw_sp->sample_trigger_ht, 2886 &trigger_node->ht_node, 2887 mlxsw_sp_sample_trigger_ht_params); 2888 if (err) 2889 goto err_rhashtable_insert; 2890 2891 return 0; 2892 2893 err_rhashtable_insert: 2894 kfree(trigger_node); 2895 return err; 2896 } 2897 2898 static void 2899 mlxsw_sp_sample_trigger_node_fini(struct mlxsw_sp *mlxsw_sp, 2900 struct mlxsw_sp_sample_trigger_node *trigger_node) 2901 { 2902 rhashtable_remove_fast(&mlxsw_sp->sample_trigger_ht, 2903 &trigger_node->ht_node, 2904 mlxsw_sp_sample_trigger_ht_params); 2905 kfree_rcu(trigger_node, rcu); 2906 } 2907 2908 int 2909 mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp, 2910 const struct mlxsw_sp_sample_trigger *trigger, 2911 const struct mlxsw_sp_sample_params *params, 2912 struct netlink_ext_ack *extack) 2913 { 2914 struct mlxsw_sp_sample_trigger_node *trigger_node; 2915 struct mlxsw_sp_sample_trigger key; 2916 2917 ASSERT_RTNL(); 2918 2919 mlxsw_sp_sample_trigger_key_init(&key, trigger); 2920 2921 trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht, 2922 &key, 2923 mlxsw_sp_sample_trigger_ht_params); 2924 if (!trigger_node) 2925 return mlxsw_sp_sample_trigger_node_init(mlxsw_sp, &key, 2926 params); 2927 2928 if (trigger_node->trigger.local_port) { 2929 NL_SET_ERR_MSG_MOD(extack, "Sampling already enabled on port"); 2930 return -EINVAL; 2931 } 2932 2933 if (trigger_node->params.psample_group != params->psample_group || 2934 trigger_node->params.truncate != params->truncate || 2935 trigger_node->params.rate != params->rate || 2936 trigger_node->params.trunc_size != params->trunc_size) { 2937 NL_SET_ERR_MSG_MOD(extack, "Sampling parameters do not match for an existing sampling trigger"); 2938 return -EINVAL; 2939 } 2940 2941 refcount_inc(&trigger_node->refcount); 2942 2943 return 0; 2944 } 2945 2946 void 2947 mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp, 2948 const struct mlxsw_sp_sample_trigger *trigger) 2949 { 2950 struct mlxsw_sp_sample_trigger_node *trigger_node; 2951 struct mlxsw_sp_sample_trigger key; 2952 2953 ASSERT_RTNL(); 2954 2955 mlxsw_sp_sample_trigger_key_init(&key, trigger); 2956 2957 trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht, 2958 &key, 2959 mlxsw_sp_sample_trigger_ht_params); 2960 if (!trigger_node) 2961 return; 2962 2963 if (!refcount_dec_and_test(&trigger_node->refcount)) 2964 return; 2965 2966 mlxsw_sp_sample_trigger_node_fini(mlxsw_sp, trigger_node); 2967 } 2968 2969 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 2970 unsigned long event, void *ptr); 2971 2972 #define MLXSW_SP_DEFAULT_PARSING_DEPTH 96 2973 #define MLXSW_SP_INCREASED_PARSING_DEPTH 128 2974 #define MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT 4789 2975 2976 static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp) 2977 { 2978 refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 0); 2979 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH; 2980 mlxsw_sp->parsing.vxlan_udp_dport = MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT; 2981 mutex_init(&mlxsw_sp->parsing.lock); 2982 } 2983 2984 static void mlxsw_sp_parsing_fini(struct mlxsw_sp *mlxsw_sp) 2985 { 2986 mutex_destroy(&mlxsw_sp->parsing.lock); 2987 WARN_ON_ONCE(refcount_read(&mlxsw_sp->parsing.parsing_depth_ref)); 2988 } 2989 2990 struct mlxsw_sp_ipv6_addr_node { 2991 struct in6_addr key; 2992 struct rhash_head ht_node; 2993 u32 kvdl_index; 2994 refcount_t refcount; 2995 }; 2996 2997 static const struct rhashtable_params mlxsw_sp_ipv6_addr_ht_params = { 2998 .key_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, key), 2999 .head_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, ht_node), 3000 .key_len = sizeof(struct in6_addr), 3001 .automatic_shrinking = true, 3002 }; 3003 3004 static int 3005 mlxsw_sp_ipv6_addr_init(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6, 3006 u32 *p_kvdl_index) 3007 { 3008 struct mlxsw_sp_ipv6_addr_node *node; 3009 char rips_pl[MLXSW_REG_RIPS_LEN]; 3010 int err; 3011 3012 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 3013 MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, 3014 p_kvdl_index); 3015 if (err) 3016 return err; 3017 3018 mlxsw_reg_rips_pack(rips_pl, *p_kvdl_index, addr6); 3019 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rips), rips_pl); 3020 if (err) 3021 goto err_rips_write; 3022 3023 node = kzalloc(sizeof(*node), GFP_KERNEL); 3024 if (!node) { 3025 err = -ENOMEM; 3026 goto err_node_alloc; 3027 } 3028 3029 node->key = *addr6; 3030 node->kvdl_index = *p_kvdl_index; 3031 refcount_set(&node->refcount, 1); 3032 3033 err = rhashtable_insert_fast(&mlxsw_sp->ipv6_addr_ht, 3034 &node->ht_node, 3035 mlxsw_sp_ipv6_addr_ht_params); 3036 if (err) 3037 goto err_rhashtable_insert; 3038 3039 return 0; 3040 3041 err_rhashtable_insert: 3042 kfree(node); 3043 err_node_alloc: 3044 err_rips_write: 3045 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, 3046 *p_kvdl_index); 3047 return err; 3048 } 3049 3050 static void mlxsw_sp_ipv6_addr_fini(struct mlxsw_sp *mlxsw_sp, 3051 struct mlxsw_sp_ipv6_addr_node *node) 3052 { 3053 u32 kvdl_index = node->kvdl_index; 3054 3055 rhashtable_remove_fast(&mlxsw_sp->ipv6_addr_ht, &node->ht_node, 3056 mlxsw_sp_ipv6_addr_ht_params); 3057 kfree(node); 3058 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, 3059 kvdl_index); 3060 } 3061 3062 int mlxsw_sp_ipv6_addr_kvdl_index_get(struct mlxsw_sp *mlxsw_sp, 3063 const struct in6_addr *addr6, 3064 u32 *p_kvdl_index) 3065 { 3066 struct mlxsw_sp_ipv6_addr_node *node; 3067 int err = 0; 3068 3069 mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock); 3070 node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6, 3071 mlxsw_sp_ipv6_addr_ht_params); 3072 if (node) { 3073 refcount_inc(&node->refcount); 3074 *p_kvdl_index = node->kvdl_index; 3075 goto out_unlock; 3076 } 3077 3078 err = mlxsw_sp_ipv6_addr_init(mlxsw_sp, addr6, p_kvdl_index); 3079 3080 out_unlock: 3081 mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock); 3082 return err; 3083 } 3084 3085 void 3086 mlxsw_sp_ipv6_addr_put(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6) 3087 { 3088 struct mlxsw_sp_ipv6_addr_node *node; 3089 3090 mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock); 3091 node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6, 3092 mlxsw_sp_ipv6_addr_ht_params); 3093 if (WARN_ON(!node)) 3094 goto out_unlock; 3095 3096 if (!refcount_dec_and_test(&node->refcount)) 3097 goto out_unlock; 3098 3099 mlxsw_sp_ipv6_addr_fini(mlxsw_sp, node); 3100 3101 out_unlock: 3102 mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock); 3103 } 3104 3105 static int mlxsw_sp_ipv6_addr_ht_init(struct mlxsw_sp *mlxsw_sp) 3106 { 3107 int err; 3108 3109 err = rhashtable_init(&mlxsw_sp->ipv6_addr_ht, 3110 &mlxsw_sp_ipv6_addr_ht_params); 3111 if (err) 3112 return err; 3113 3114 mutex_init(&mlxsw_sp->ipv6_addr_ht_lock); 3115 return 0; 3116 } 3117 3118 static void mlxsw_sp_ipv6_addr_ht_fini(struct mlxsw_sp *mlxsw_sp) 3119 { 3120 mutex_destroy(&mlxsw_sp->ipv6_addr_ht_lock); 3121 rhashtable_destroy(&mlxsw_sp->ipv6_addr_ht); 3122 } 3123 3124 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 3125 const struct mlxsw_bus_info *mlxsw_bus_info, 3126 struct netlink_ext_ack *extack) 3127 { 3128 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3129 int err; 3130 3131 mlxsw_sp->core = mlxsw_core; 3132 mlxsw_sp->bus_info = mlxsw_bus_info; 3133 3134 mlxsw_sp_parsing_init(mlxsw_sp); 3135 3136 err = mlxsw_sp_base_mac_get(mlxsw_sp); 3137 if (err) { 3138 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 3139 return err; 3140 } 3141 3142 err = mlxsw_sp_kvdl_init(mlxsw_sp); 3143 if (err) { 3144 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 3145 return err; 3146 } 3147 3148 err = mlxsw_sp_pgt_init(mlxsw_sp); 3149 if (err) { 3150 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PGT\n"); 3151 goto err_pgt_init; 3152 } 3153 3154 /* Initialize before FIDs so that the LAG table is at the start of PGT 3155 * and 8-aligned without overallocation. 3156 */ 3157 err = mlxsw_sp_lag_init(mlxsw_sp); 3158 if (err) { 3159 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 3160 goto err_lag_init; 3161 } 3162 3163 err = mlxsw_sp->fid_core_ops->init(mlxsw_sp); 3164 if (err) { 3165 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 3166 goto err_fid_core_init; 3167 } 3168 3169 err = mlxsw_sp_policers_init(mlxsw_sp); 3170 if (err) { 3171 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize policers\n"); 3172 goto err_policers_init; 3173 } 3174 3175 err = mlxsw_sp_traps_init(mlxsw_sp); 3176 if (err) { 3177 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 3178 goto err_traps_init; 3179 } 3180 3181 err = mlxsw_sp_devlink_traps_init(mlxsw_sp); 3182 if (err) { 3183 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n"); 3184 goto err_devlink_traps_init; 3185 } 3186 3187 err = mlxsw_sp_buffers_init(mlxsw_sp); 3188 if (err) { 3189 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 3190 goto err_buffers_init; 3191 } 3192 3193 /* Initialize SPAN before router and switchdev, so that those components 3194 * can call mlxsw_sp_span_respin(). 3195 */ 3196 err = mlxsw_sp_span_init(mlxsw_sp); 3197 if (err) { 3198 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 3199 goto err_span_init; 3200 } 3201 3202 err = mlxsw_sp_switchdev_init(mlxsw_sp); 3203 if (err) { 3204 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 3205 goto err_switchdev_init; 3206 } 3207 3208 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 3209 if (err) { 3210 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 3211 goto err_counter_pool_init; 3212 } 3213 3214 err = mlxsw_sp_afa_init(mlxsw_sp); 3215 if (err) { 3216 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 3217 goto err_afa_init; 3218 } 3219 3220 err = mlxsw_sp_ipv6_addr_ht_init(mlxsw_sp); 3221 if (err) { 3222 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize hash table for IPv6 addresses\n"); 3223 goto err_ipv6_addr_ht_init; 3224 } 3225 3226 err = mlxsw_sp_nve_init(mlxsw_sp); 3227 if (err) { 3228 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n"); 3229 goto err_nve_init; 3230 } 3231 3232 err = mlxsw_sp_port_range_init(mlxsw_sp); 3233 if (err) { 3234 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize port ranges\n"); 3235 goto err_port_range_init; 3236 } 3237 3238 err = mlxsw_sp_acl_init(mlxsw_sp); 3239 if (err) { 3240 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 3241 goto err_acl_init; 3242 } 3243 3244 err = mlxsw_sp_router_init(mlxsw_sp, extack); 3245 if (err) { 3246 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 3247 goto err_router_init; 3248 } 3249 3250 if (mlxsw_sp->bus_info->read_clock_capable) { 3251 /* NULL is a valid return value from clock_init */ 3252 mlxsw_sp->clock = 3253 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp, 3254 mlxsw_sp->bus_info->dev); 3255 if (IS_ERR(mlxsw_sp->clock)) { 3256 err = PTR_ERR(mlxsw_sp->clock); 3257 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n"); 3258 goto err_ptp_clock_init; 3259 } 3260 } 3261 3262 if (mlxsw_sp->clock) { 3263 /* NULL is a valid return value from ptp_ops->init */ 3264 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp); 3265 if (IS_ERR(mlxsw_sp->ptp_state)) { 3266 err = PTR_ERR(mlxsw_sp->ptp_state); 3267 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n"); 3268 goto err_ptp_init; 3269 } 3270 } 3271 3272 /* Initialize netdevice notifier after SPAN is initialized, so that the 3273 * event handler can call SPAN respin. 3274 */ 3275 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 3276 err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 3277 &mlxsw_sp->netdevice_nb); 3278 if (err) { 3279 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 3280 goto err_netdev_notifier; 3281 } 3282 3283 err = mlxsw_sp_dpipe_init(mlxsw_sp); 3284 if (err) { 3285 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 3286 goto err_dpipe_init; 3287 } 3288 3289 err = mlxsw_sp_port_module_info_init(mlxsw_sp); 3290 if (err) { 3291 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n"); 3292 goto err_port_module_info_init; 3293 } 3294 3295 err = rhashtable_init(&mlxsw_sp->sample_trigger_ht, 3296 &mlxsw_sp_sample_trigger_ht_params); 3297 if (err) { 3298 dev_err(mlxsw_sp->bus_info->dev, "Failed to init sampling trigger hashtable\n"); 3299 goto err_sample_trigger_init; 3300 } 3301 3302 err = mlxsw_sp_ports_create(mlxsw_sp); 3303 if (err) { 3304 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 3305 goto err_ports_create; 3306 } 3307 3308 return 0; 3309 3310 err_ports_create: 3311 rhashtable_destroy(&mlxsw_sp->sample_trigger_ht); 3312 err_sample_trigger_init: 3313 mlxsw_sp_port_module_info_fini(mlxsw_sp); 3314 err_port_module_info_init: 3315 mlxsw_sp_dpipe_fini(mlxsw_sp); 3316 err_dpipe_init: 3317 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 3318 &mlxsw_sp->netdevice_nb); 3319 err_netdev_notifier: 3320 if (mlxsw_sp->clock) 3321 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 3322 err_ptp_init: 3323 if (mlxsw_sp->clock) 3324 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 3325 err_ptp_clock_init: 3326 mlxsw_sp_router_fini(mlxsw_sp); 3327 err_router_init: 3328 mlxsw_sp_acl_fini(mlxsw_sp); 3329 err_acl_init: 3330 mlxsw_sp_port_range_fini(mlxsw_sp); 3331 err_port_range_init: 3332 mlxsw_sp_nve_fini(mlxsw_sp); 3333 err_nve_init: 3334 mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp); 3335 err_ipv6_addr_ht_init: 3336 mlxsw_sp_afa_fini(mlxsw_sp); 3337 err_afa_init: 3338 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3339 err_counter_pool_init: 3340 mlxsw_sp_switchdev_fini(mlxsw_sp); 3341 err_switchdev_init: 3342 mlxsw_sp_span_fini(mlxsw_sp); 3343 err_span_init: 3344 mlxsw_sp_buffers_fini(mlxsw_sp); 3345 err_buffers_init: 3346 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 3347 err_devlink_traps_init: 3348 mlxsw_sp_traps_fini(mlxsw_sp); 3349 err_traps_init: 3350 mlxsw_sp_policers_fini(mlxsw_sp); 3351 err_policers_init: 3352 mlxsw_sp->fid_core_ops->fini(mlxsw_sp); 3353 err_fid_core_init: 3354 mlxsw_sp_lag_fini(mlxsw_sp); 3355 err_lag_init: 3356 mlxsw_sp_pgt_fini(mlxsw_sp); 3357 err_pgt_init: 3358 mlxsw_sp_kvdl_fini(mlxsw_sp); 3359 mlxsw_sp_parsing_fini(mlxsw_sp); 3360 return err; 3361 } 3362 3363 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, 3364 const struct mlxsw_bus_info *mlxsw_bus_info, 3365 struct netlink_ext_ack *extack) 3366 { 3367 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3368 3369 mlxsw_sp->switchdev_ops = &mlxsw_sp1_switchdev_ops; 3370 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; 3371 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; 3372 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; 3373 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; 3374 mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops; 3375 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; 3376 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; 3377 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; 3378 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals; 3379 mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops; 3380 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops; 3381 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops; 3382 mlxsw_sp->span_ops = &mlxsw_sp1_span_ops; 3383 mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops; 3384 mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops; 3385 mlxsw_sp->mall_ops = &mlxsw_sp1_mall_ops; 3386 mlxsw_sp->router_ops = &mlxsw_sp1_router_ops; 3387 mlxsw_sp->listeners = mlxsw_sp1_listener; 3388 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener); 3389 mlxsw_sp->fid_core_ops = &mlxsw_sp1_fid_core_ops; 3390 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1; 3391 mlxsw_sp->pgt_smpe_index_valid = true; 3392 3393 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3394 } 3395 3396 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, 3397 const struct mlxsw_bus_info *mlxsw_bus_info, 3398 struct netlink_ext_ack *extack) 3399 { 3400 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3401 3402 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; 3403 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 3404 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 3405 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 3406 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 3407 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 3408 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 3409 mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops; 3410 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 3411 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 3412 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 3413 mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops; 3414 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 3415 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 3416 mlxsw_sp->span_ops = &mlxsw_sp2_span_ops; 3417 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; 3418 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; 3419 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; 3420 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; 3421 mlxsw_sp->listeners = mlxsw_sp2_listener; 3422 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener); 3423 mlxsw_sp->fid_core_ops = &mlxsw_sp2_fid_core_ops; 3424 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2; 3425 mlxsw_sp->pgt_smpe_index_valid = false; 3426 3427 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3428 } 3429 3430 static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core, 3431 const struct mlxsw_bus_info *mlxsw_bus_info, 3432 struct netlink_ext_ack *extack) 3433 { 3434 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3435 3436 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; 3437 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 3438 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 3439 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 3440 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 3441 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 3442 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 3443 mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops; 3444 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 3445 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 3446 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 3447 mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops; 3448 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 3449 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 3450 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops; 3451 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; 3452 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; 3453 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; 3454 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; 3455 mlxsw_sp->listeners = mlxsw_sp2_listener; 3456 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener); 3457 mlxsw_sp->fid_core_ops = &mlxsw_sp2_fid_core_ops; 3458 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3; 3459 mlxsw_sp->pgt_smpe_index_valid = false; 3460 3461 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3462 } 3463 3464 static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core, 3465 const struct mlxsw_bus_info *mlxsw_bus_info, 3466 struct netlink_ext_ack *extack) 3467 { 3468 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3469 3470 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; 3471 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 3472 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 3473 mlxsw_sp->afk_ops = &mlxsw_sp4_afk_ops; 3474 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 3475 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 3476 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 3477 mlxsw_sp->acl_bf_ops = &mlxsw_sp4_acl_bf_ops; 3478 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 3479 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 3480 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 3481 mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops; 3482 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 3483 mlxsw_sp->ptp_ops = &mlxsw_sp4_ptp_ops; 3484 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops; 3485 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; 3486 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; 3487 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; 3488 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; 3489 mlxsw_sp->listeners = mlxsw_sp2_listener; 3490 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener); 3491 mlxsw_sp->fid_core_ops = &mlxsw_sp2_fid_core_ops; 3492 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4; 3493 mlxsw_sp->pgt_smpe_index_valid = false; 3494 3495 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3496 } 3497 3498 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 3499 { 3500 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3501 3502 mlxsw_sp_ports_remove(mlxsw_sp); 3503 rhashtable_destroy(&mlxsw_sp->sample_trigger_ht); 3504 mlxsw_sp_port_module_info_fini(mlxsw_sp); 3505 mlxsw_sp_dpipe_fini(mlxsw_sp); 3506 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 3507 &mlxsw_sp->netdevice_nb); 3508 if (mlxsw_sp->clock) { 3509 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 3510 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 3511 } 3512 mlxsw_sp_router_fini(mlxsw_sp); 3513 mlxsw_sp_acl_fini(mlxsw_sp); 3514 mlxsw_sp_port_range_fini(mlxsw_sp); 3515 mlxsw_sp_nve_fini(mlxsw_sp); 3516 mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp); 3517 mlxsw_sp_afa_fini(mlxsw_sp); 3518 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3519 mlxsw_sp_switchdev_fini(mlxsw_sp); 3520 mlxsw_sp_span_fini(mlxsw_sp); 3521 mlxsw_sp_buffers_fini(mlxsw_sp); 3522 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 3523 mlxsw_sp_traps_fini(mlxsw_sp); 3524 mlxsw_sp_policers_fini(mlxsw_sp); 3525 mlxsw_sp->fid_core_ops->fini(mlxsw_sp); 3526 mlxsw_sp_lag_fini(mlxsw_sp); 3527 mlxsw_sp_pgt_fini(mlxsw_sp); 3528 mlxsw_sp_kvdl_fini(mlxsw_sp); 3529 mlxsw_sp_parsing_fini(mlxsw_sp); 3530 } 3531 3532 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { 3533 .used_flood_mode = 1, 3534 .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED, 3535 .used_max_ib_mc = 1, 3536 .max_ib_mc = 0, 3537 .used_max_pkey = 1, 3538 .max_pkey = 0, 3539 .used_ubridge = 1, 3540 .ubridge = 1, 3541 .used_kvd_sizes = 1, 3542 .kvd_hash_single_parts = 59, 3543 .kvd_hash_double_parts = 41, 3544 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 3545 .swid_config = { 3546 { 3547 .used_type = 1, 3548 .type = MLXSW_PORT_SWID_TYPE_ETH, 3549 } 3550 }, 3551 }; 3552 3553 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { 3554 .used_flood_mode = 1, 3555 .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED, 3556 .used_max_ib_mc = 1, 3557 .max_ib_mc = 0, 3558 .used_max_pkey = 1, 3559 .max_pkey = 0, 3560 .used_ubridge = 1, 3561 .ubridge = 1, 3562 .swid_config = { 3563 { 3564 .used_type = 1, 3565 .type = MLXSW_PORT_SWID_TYPE_ETH, 3566 } 3567 }, 3568 .used_cqe_time_stamp_type = 1, 3569 .cqe_time_stamp_type = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC, 3570 .lag_mode_prefer_sw = true, 3571 .flood_mode_prefer_cff = true, 3572 }; 3573 3574 /* Reduce number of LAGs from full capacity (256) to the maximum supported LAGs 3575 * in Spectrum-2/3, to avoid regression in number of free entries in the PGT 3576 * table. 3577 */ 3578 #define MLXSW_SP4_CONFIG_PROFILE_MAX_LAG 128 3579 3580 static const struct mlxsw_config_profile mlxsw_sp4_config_profile = { 3581 .used_max_lag = 1, 3582 .max_lag = MLXSW_SP4_CONFIG_PROFILE_MAX_LAG, 3583 .used_flood_mode = 1, 3584 .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED, 3585 .used_max_ib_mc = 1, 3586 .max_ib_mc = 0, 3587 .used_max_pkey = 1, 3588 .max_pkey = 0, 3589 .used_ubridge = 1, 3590 .ubridge = 1, 3591 .swid_config = { 3592 { 3593 .used_type = 1, 3594 .type = MLXSW_PORT_SWID_TYPE_ETH, 3595 } 3596 }, 3597 .used_cqe_time_stamp_type = 1, 3598 .cqe_time_stamp_type = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC, 3599 .lag_mode_prefer_sw = true, 3600 .flood_mode_prefer_cff = true, 3601 }; 3602 3603 static void 3604 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, 3605 struct devlink_resource_size_params *kvd_size_params, 3606 struct devlink_resource_size_params *linear_size_params, 3607 struct devlink_resource_size_params *hash_double_size_params, 3608 struct devlink_resource_size_params *hash_single_size_params) 3609 { 3610 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 3611 KVD_SINGLE_MIN_SIZE); 3612 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 3613 KVD_DOUBLE_MIN_SIZE); 3614 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3615 u32 linear_size_min = 0; 3616 3617 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, 3618 MLXSW_SP_KVD_GRANULARITY, 3619 DEVLINK_RESOURCE_UNIT_ENTRY); 3620 devlink_resource_size_params_init(linear_size_params, linear_size_min, 3621 kvd_size - single_size_min - 3622 double_size_min, 3623 MLXSW_SP_KVD_GRANULARITY, 3624 DEVLINK_RESOURCE_UNIT_ENTRY); 3625 devlink_resource_size_params_init(hash_double_size_params, 3626 double_size_min, 3627 kvd_size - single_size_min - 3628 linear_size_min, 3629 MLXSW_SP_KVD_GRANULARITY, 3630 DEVLINK_RESOURCE_UNIT_ENTRY); 3631 devlink_resource_size_params_init(hash_single_size_params, 3632 single_size_min, 3633 kvd_size - double_size_min - 3634 linear_size_min, 3635 MLXSW_SP_KVD_GRANULARITY, 3636 DEVLINK_RESOURCE_UNIT_ENTRY); 3637 } 3638 3639 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) 3640 { 3641 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3642 struct devlink_resource_size_params hash_single_size_params; 3643 struct devlink_resource_size_params hash_double_size_params; 3644 struct devlink_resource_size_params linear_size_params; 3645 struct devlink_resource_size_params kvd_size_params; 3646 u32 kvd_size, single_size, double_size, linear_size; 3647 const struct mlxsw_config_profile *profile; 3648 int err; 3649 3650 profile = &mlxsw_sp1_config_profile; 3651 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 3652 return -EIO; 3653 3654 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, 3655 &linear_size_params, 3656 &hash_double_size_params, 3657 &hash_single_size_params); 3658 3659 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3660 err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 3661 kvd_size, MLXSW_SP_RESOURCE_KVD, 3662 DEVLINK_RESOURCE_ID_PARENT_TOP, 3663 &kvd_size_params); 3664 if (err) 3665 return err; 3666 3667 linear_size = profile->kvd_linear_size; 3668 err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, 3669 linear_size, 3670 MLXSW_SP_RESOURCE_KVD_LINEAR, 3671 MLXSW_SP_RESOURCE_KVD, 3672 &linear_size_params); 3673 if (err) 3674 return err; 3675 3676 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); 3677 if (err) 3678 return err; 3679 3680 double_size = kvd_size - linear_size; 3681 double_size *= profile->kvd_hash_double_parts; 3682 double_size /= profile->kvd_hash_double_parts + 3683 profile->kvd_hash_single_parts; 3684 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY); 3685 err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, 3686 double_size, 3687 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 3688 MLXSW_SP_RESOURCE_KVD, 3689 &hash_double_size_params); 3690 if (err) 3691 return err; 3692 3693 single_size = kvd_size - double_size - linear_size; 3694 err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, 3695 single_size, 3696 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 3697 MLXSW_SP_RESOURCE_KVD, 3698 &hash_single_size_params); 3699 if (err) 3700 return err; 3701 3702 return 0; 3703 } 3704 3705 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core) 3706 { 3707 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3708 struct devlink_resource_size_params kvd_size_params; 3709 u32 kvd_size; 3710 3711 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 3712 return -EIO; 3713 3714 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3715 devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size, 3716 MLXSW_SP_KVD_GRANULARITY, 3717 DEVLINK_RESOURCE_UNIT_ENTRY); 3718 3719 return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 3720 kvd_size, MLXSW_SP_RESOURCE_KVD, 3721 DEVLINK_RESOURCE_ID_PARENT_TOP, 3722 &kvd_size_params); 3723 } 3724 3725 static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core) 3726 { 3727 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3728 struct devlink_resource_size_params span_size_params; 3729 u32 max_span; 3730 3731 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN)) 3732 return -EIO; 3733 3734 max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN); 3735 devlink_resource_size_params_init(&span_size_params, max_span, max_span, 3736 1, DEVLINK_RESOURCE_UNIT_ENTRY); 3737 3738 return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN, 3739 max_span, MLXSW_SP_RESOURCE_SPAN, 3740 DEVLINK_RESOURCE_ID_PARENT_TOP, 3741 &span_size_params); 3742 } 3743 3744 static int 3745 mlxsw_sp_resources_rif_mac_profile_register(struct mlxsw_core *mlxsw_core) 3746 { 3747 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3748 struct devlink_resource_size_params size_params; 3749 u8 max_rif_mac_profiles; 3750 3751 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIF_MAC_PROFILES)) 3752 max_rif_mac_profiles = 1; 3753 else 3754 max_rif_mac_profiles = MLXSW_CORE_RES_GET(mlxsw_core, 3755 MAX_RIF_MAC_PROFILES); 3756 devlink_resource_size_params_init(&size_params, max_rif_mac_profiles, 3757 max_rif_mac_profiles, 1, 3758 DEVLINK_RESOURCE_UNIT_ENTRY); 3759 3760 return devl_resource_register(devlink, 3761 "rif_mac_profiles", 3762 max_rif_mac_profiles, 3763 MLXSW_SP_RESOURCE_RIF_MAC_PROFILES, 3764 DEVLINK_RESOURCE_ID_PARENT_TOP, 3765 &size_params); 3766 } 3767 3768 static int mlxsw_sp_resources_rifs_register(struct mlxsw_core *mlxsw_core) 3769 { 3770 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3771 struct devlink_resource_size_params size_params; 3772 u64 max_rifs; 3773 3774 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIFS)) 3775 return -EIO; 3776 3777 max_rifs = MLXSW_CORE_RES_GET(mlxsw_core, MAX_RIFS); 3778 devlink_resource_size_params_init(&size_params, max_rifs, max_rifs, 3779 1, DEVLINK_RESOURCE_UNIT_ENTRY); 3780 3781 return devl_resource_register(devlink, "rifs", max_rifs, 3782 MLXSW_SP_RESOURCE_RIFS, 3783 DEVLINK_RESOURCE_ID_PARENT_TOP, 3784 &size_params); 3785 } 3786 3787 static int 3788 mlxsw_sp_resources_port_range_register(struct mlxsw_core *mlxsw_core) 3789 { 3790 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3791 struct devlink_resource_size_params size_params; 3792 u64 max; 3793 3794 if (!MLXSW_CORE_RES_VALID(mlxsw_core, ACL_MAX_L4_PORT_RANGE)) 3795 return -EIO; 3796 3797 max = MLXSW_CORE_RES_GET(mlxsw_core, ACL_MAX_L4_PORT_RANGE); 3798 devlink_resource_size_params_init(&size_params, max, max, 1, 3799 DEVLINK_RESOURCE_UNIT_ENTRY); 3800 3801 return devl_resource_register(devlink, "port_range_registers", max, 3802 MLXSW_SP_RESOURCE_PORT_RANGE_REGISTERS, 3803 DEVLINK_RESOURCE_ID_PARENT_TOP, 3804 &size_params); 3805 } 3806 3807 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) 3808 { 3809 int err; 3810 3811 err = mlxsw_sp1_resources_kvd_register(mlxsw_core); 3812 if (err) 3813 return err; 3814 3815 err = mlxsw_sp_resources_span_register(mlxsw_core); 3816 if (err) 3817 goto err_resources_span_register; 3818 3819 err = mlxsw_sp_counter_resources_register(mlxsw_core); 3820 if (err) 3821 goto err_resources_counter_register; 3822 3823 err = mlxsw_sp_policer_resources_register(mlxsw_core); 3824 if (err) 3825 goto err_policer_resources_register; 3826 3827 err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core); 3828 if (err) 3829 goto err_resources_rif_mac_profile_register; 3830 3831 err = mlxsw_sp_resources_rifs_register(mlxsw_core); 3832 if (err) 3833 goto err_resources_rifs_register; 3834 3835 err = mlxsw_sp_resources_port_range_register(mlxsw_core); 3836 if (err) 3837 goto err_resources_port_range_register; 3838 3839 return 0; 3840 3841 err_resources_port_range_register: 3842 err_resources_rifs_register: 3843 err_resources_rif_mac_profile_register: 3844 err_policer_resources_register: 3845 err_resources_counter_register: 3846 err_resources_span_register: 3847 devl_resources_unregister(priv_to_devlink(mlxsw_core)); 3848 return err; 3849 } 3850 3851 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) 3852 { 3853 int err; 3854 3855 err = mlxsw_sp2_resources_kvd_register(mlxsw_core); 3856 if (err) 3857 return err; 3858 3859 err = mlxsw_sp_resources_span_register(mlxsw_core); 3860 if (err) 3861 goto err_resources_span_register; 3862 3863 err = mlxsw_sp_counter_resources_register(mlxsw_core); 3864 if (err) 3865 goto err_resources_counter_register; 3866 3867 err = mlxsw_sp_policer_resources_register(mlxsw_core); 3868 if (err) 3869 goto err_policer_resources_register; 3870 3871 err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core); 3872 if (err) 3873 goto err_resources_rif_mac_profile_register; 3874 3875 err = mlxsw_sp_resources_rifs_register(mlxsw_core); 3876 if (err) 3877 goto err_resources_rifs_register; 3878 3879 err = mlxsw_sp_resources_port_range_register(mlxsw_core); 3880 if (err) 3881 goto err_resources_port_range_register; 3882 3883 return 0; 3884 3885 err_resources_port_range_register: 3886 err_resources_rifs_register: 3887 err_resources_rif_mac_profile_register: 3888 err_policer_resources_register: 3889 err_resources_counter_register: 3890 err_resources_span_register: 3891 devl_resources_unregister(priv_to_devlink(mlxsw_core)); 3892 return err; 3893 } 3894 3895 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 3896 const struct mlxsw_config_profile *profile, 3897 u64 *p_single_size, u64 *p_double_size, 3898 u64 *p_linear_size) 3899 { 3900 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3901 u32 double_size; 3902 int err; 3903 3904 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 3905 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) 3906 return -EIO; 3907 3908 /* The hash part is what left of the kvd without the 3909 * linear part. It is split to the single size and 3910 * double size by the parts ratio from the profile. 3911 * Both sizes must be a multiplications of the 3912 * granularity from the profile. In case the user 3913 * provided the sizes they are obtained via devlink. 3914 */ 3915 err = devl_resource_size_get(devlink, 3916 MLXSW_SP_RESOURCE_KVD_LINEAR, 3917 p_linear_size); 3918 if (err) 3919 *p_linear_size = profile->kvd_linear_size; 3920 3921 err = devl_resource_size_get(devlink, 3922 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 3923 p_double_size); 3924 if (err) { 3925 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 3926 *p_linear_size; 3927 double_size *= profile->kvd_hash_double_parts; 3928 double_size /= profile->kvd_hash_double_parts + 3929 profile->kvd_hash_single_parts; 3930 *p_double_size = rounddown(double_size, 3931 MLXSW_SP_KVD_GRANULARITY); 3932 } 3933 3934 err = devl_resource_size_get(devlink, 3935 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 3936 p_single_size); 3937 if (err) 3938 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 3939 *p_double_size - *p_linear_size; 3940 3941 /* Check results are legal. */ 3942 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 3943 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || 3944 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size) 3945 return -EIO; 3946 3947 return 0; 3948 } 3949 3950 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core, 3951 struct sk_buff *skb, u16 local_port) 3952 { 3953 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3954 3955 skb_pull(skb, MLXSW_TXHDR_LEN); 3956 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port); 3957 } 3958 3959 static struct mlxsw_driver mlxsw_sp1_driver = { 3960 .kind = mlxsw_sp1_driver_name, 3961 .priv_size = sizeof(struct mlxsw_sp), 3962 .fw_req_rev = &mlxsw_sp1_fw_rev, 3963 .fw_filename = MLXSW_SP1_FW_FILENAME, 3964 .init = mlxsw_sp1_init, 3965 .fini = mlxsw_sp_fini, 3966 .port_split = mlxsw_sp_port_split, 3967 .port_unsplit = mlxsw_sp_port_unsplit, 3968 .sb_pool_get = mlxsw_sp_sb_pool_get, 3969 .sb_pool_set = mlxsw_sp_sb_pool_set, 3970 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3971 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3972 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3973 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3974 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3975 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3976 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3977 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3978 .trap_init = mlxsw_sp_trap_init, 3979 .trap_fini = mlxsw_sp_trap_fini, 3980 .trap_action_set = mlxsw_sp_trap_action_set, 3981 .trap_group_init = mlxsw_sp_trap_group_init, 3982 .trap_group_set = mlxsw_sp_trap_group_set, 3983 .trap_policer_init = mlxsw_sp_trap_policer_init, 3984 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3985 .trap_policer_set = mlxsw_sp_trap_policer_set, 3986 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3987 .txhdr_construct = mlxsw_sp_txhdr_construct, 3988 .resources_register = mlxsw_sp1_resources_register, 3989 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, 3990 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3991 .txhdr_len = MLXSW_TXHDR_LEN, 3992 .profile = &mlxsw_sp1_config_profile, 3993 .sdq_supports_cqe_v2 = false, 3994 }; 3995 3996 static struct mlxsw_driver mlxsw_sp2_driver = { 3997 .kind = mlxsw_sp2_driver_name, 3998 .priv_size = sizeof(struct mlxsw_sp), 3999 .fw_req_rev = &mlxsw_sp2_fw_rev, 4000 .fw_filename = MLXSW_SP2_FW_FILENAME, 4001 .init = mlxsw_sp2_init, 4002 .fini = mlxsw_sp_fini, 4003 .port_split = mlxsw_sp_port_split, 4004 .port_unsplit = mlxsw_sp_port_unsplit, 4005 .ports_remove_selected = mlxsw_sp_ports_remove_selected, 4006 .sb_pool_get = mlxsw_sp_sb_pool_get, 4007 .sb_pool_set = mlxsw_sp_sb_pool_set, 4008 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 4009 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 4010 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 4011 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 4012 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 4013 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 4014 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 4015 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 4016 .trap_init = mlxsw_sp_trap_init, 4017 .trap_fini = mlxsw_sp_trap_fini, 4018 .trap_action_set = mlxsw_sp_trap_action_set, 4019 .trap_group_init = mlxsw_sp_trap_group_init, 4020 .trap_group_set = mlxsw_sp_trap_group_set, 4021 .trap_policer_init = mlxsw_sp_trap_policer_init, 4022 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 4023 .trap_policer_set = mlxsw_sp_trap_policer_set, 4024 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 4025 .txhdr_construct = mlxsw_sp_txhdr_construct, 4026 .resources_register = mlxsw_sp2_resources_register, 4027 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 4028 .txhdr_len = MLXSW_TXHDR_LEN, 4029 .profile = &mlxsw_sp2_config_profile, 4030 .sdq_supports_cqe_v2 = true, 4031 }; 4032 4033 static struct mlxsw_driver mlxsw_sp3_driver = { 4034 .kind = mlxsw_sp3_driver_name, 4035 .priv_size = sizeof(struct mlxsw_sp), 4036 .fw_req_rev = &mlxsw_sp3_fw_rev, 4037 .fw_filename = MLXSW_SP3_FW_FILENAME, 4038 .init = mlxsw_sp3_init, 4039 .fini = mlxsw_sp_fini, 4040 .port_split = mlxsw_sp_port_split, 4041 .port_unsplit = mlxsw_sp_port_unsplit, 4042 .ports_remove_selected = mlxsw_sp_ports_remove_selected, 4043 .sb_pool_get = mlxsw_sp_sb_pool_get, 4044 .sb_pool_set = mlxsw_sp_sb_pool_set, 4045 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 4046 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 4047 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 4048 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 4049 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 4050 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 4051 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 4052 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 4053 .trap_init = mlxsw_sp_trap_init, 4054 .trap_fini = mlxsw_sp_trap_fini, 4055 .trap_action_set = mlxsw_sp_trap_action_set, 4056 .trap_group_init = mlxsw_sp_trap_group_init, 4057 .trap_group_set = mlxsw_sp_trap_group_set, 4058 .trap_policer_init = mlxsw_sp_trap_policer_init, 4059 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 4060 .trap_policer_set = mlxsw_sp_trap_policer_set, 4061 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 4062 .txhdr_construct = mlxsw_sp_txhdr_construct, 4063 .resources_register = mlxsw_sp2_resources_register, 4064 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 4065 .txhdr_len = MLXSW_TXHDR_LEN, 4066 .profile = &mlxsw_sp2_config_profile, 4067 .sdq_supports_cqe_v2 = true, 4068 }; 4069 4070 static struct mlxsw_driver mlxsw_sp4_driver = { 4071 .kind = mlxsw_sp4_driver_name, 4072 .priv_size = sizeof(struct mlxsw_sp), 4073 .init = mlxsw_sp4_init, 4074 .fini = mlxsw_sp_fini, 4075 .port_split = mlxsw_sp_port_split, 4076 .port_unsplit = mlxsw_sp_port_unsplit, 4077 .ports_remove_selected = mlxsw_sp_ports_remove_selected, 4078 .sb_pool_get = mlxsw_sp_sb_pool_get, 4079 .sb_pool_set = mlxsw_sp_sb_pool_set, 4080 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 4081 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 4082 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 4083 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 4084 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 4085 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 4086 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 4087 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 4088 .trap_init = mlxsw_sp_trap_init, 4089 .trap_fini = mlxsw_sp_trap_fini, 4090 .trap_action_set = mlxsw_sp_trap_action_set, 4091 .trap_group_init = mlxsw_sp_trap_group_init, 4092 .trap_group_set = mlxsw_sp_trap_group_set, 4093 .trap_policer_init = mlxsw_sp_trap_policer_init, 4094 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 4095 .trap_policer_set = mlxsw_sp_trap_policer_set, 4096 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 4097 .txhdr_construct = mlxsw_sp_txhdr_construct, 4098 .resources_register = mlxsw_sp2_resources_register, 4099 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 4100 .txhdr_len = MLXSW_TXHDR_LEN, 4101 .profile = &mlxsw_sp4_config_profile, 4102 .sdq_supports_cqe_v2 = true, 4103 }; 4104 4105 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 4106 { 4107 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 4108 } 4109 4110 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, 4111 struct netdev_nested_priv *priv) 4112 { 4113 int ret = 0; 4114 4115 if (mlxsw_sp_port_dev_check(lower_dev)) { 4116 priv->data = (void *)netdev_priv(lower_dev); 4117 ret = 1; 4118 } 4119 4120 return ret; 4121 } 4122 4123 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 4124 { 4125 struct netdev_nested_priv priv = { 4126 .data = NULL, 4127 }; 4128 4129 if (mlxsw_sp_port_dev_check(dev)) 4130 return netdev_priv(dev); 4131 4132 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &priv); 4133 4134 return (struct mlxsw_sp_port *)priv.data; 4135 } 4136 4137 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 4138 { 4139 struct mlxsw_sp_port *mlxsw_sp_port; 4140 4141 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 4142 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 4143 } 4144 4145 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 4146 { 4147 struct netdev_nested_priv priv = { 4148 .data = NULL, 4149 }; 4150 4151 if (mlxsw_sp_port_dev_check(dev)) 4152 return netdev_priv(dev); 4153 4154 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 4155 &priv); 4156 4157 return (struct mlxsw_sp_port *)priv.data; 4158 } 4159 4160 int mlxsw_sp_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp) 4161 { 4162 char mprs_pl[MLXSW_REG_MPRS_LEN]; 4163 int err = 0; 4164 4165 mutex_lock(&mlxsw_sp->parsing.lock); 4166 4167 if (refcount_inc_not_zero(&mlxsw_sp->parsing.parsing_depth_ref)) 4168 goto out_unlock; 4169 4170 mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_INCREASED_PARSING_DEPTH, 4171 mlxsw_sp->parsing.vxlan_udp_dport); 4172 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); 4173 if (err) 4174 goto out_unlock; 4175 4176 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_INCREASED_PARSING_DEPTH; 4177 refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 1); 4178 4179 out_unlock: 4180 mutex_unlock(&mlxsw_sp->parsing.lock); 4181 return err; 4182 } 4183 4184 void mlxsw_sp_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp) 4185 { 4186 char mprs_pl[MLXSW_REG_MPRS_LEN]; 4187 4188 mutex_lock(&mlxsw_sp->parsing.lock); 4189 4190 if (!refcount_dec_and_test(&mlxsw_sp->parsing.parsing_depth_ref)) 4191 goto out_unlock; 4192 4193 mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_DEFAULT_PARSING_DEPTH, 4194 mlxsw_sp->parsing.vxlan_udp_dport); 4195 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); 4196 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH; 4197 4198 out_unlock: 4199 mutex_unlock(&mlxsw_sp->parsing.lock); 4200 } 4201 4202 int mlxsw_sp_parsing_vxlan_udp_dport_set(struct mlxsw_sp *mlxsw_sp, 4203 __be16 udp_dport) 4204 { 4205 char mprs_pl[MLXSW_REG_MPRS_LEN]; 4206 int err; 4207 4208 mutex_lock(&mlxsw_sp->parsing.lock); 4209 4210 mlxsw_reg_mprs_pack(mprs_pl, mlxsw_sp->parsing.parsing_depth, 4211 be16_to_cpu(udp_dport)); 4212 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); 4213 if (err) 4214 goto out_unlock; 4215 4216 mlxsw_sp->parsing.vxlan_udp_dport = be16_to_cpu(udp_dport); 4217 4218 out_unlock: 4219 mutex_unlock(&mlxsw_sp->parsing.lock); 4220 return err; 4221 } 4222 4223 static void 4224 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port, 4225 struct net_device *lag_dev) 4226 { 4227 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev); 4228 struct net_device *upper_dev; 4229 struct list_head *iter; 4230 4231 if (netif_is_bridge_port(lag_dev)) 4232 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev); 4233 4234 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 4235 if (!netif_is_bridge_port(upper_dev)) 4236 continue; 4237 br_dev = netdev_master_upper_dev_get(upper_dev); 4238 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev); 4239 } 4240 } 4241 4242 static struct mlxsw_sp_lag * 4243 mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, struct net_device *lag_dev, 4244 struct netlink_ext_ack *extack) 4245 { 4246 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4247 struct mlxsw_sp_lag *lag; 4248 u16 lag_id; 4249 int i, err; 4250 4251 for (i = 0; i < mlxsw_sp->max_lag; i++) { 4252 if (!mlxsw_sp->lags[i].dev) 4253 break; 4254 } 4255 4256 if (i == mlxsw_sp->max_lag) { 4257 NL_SET_ERR_MSG_MOD(extack, 4258 "Exceeded number of supported LAG devices"); 4259 return ERR_PTR(-EBUSY); 4260 } 4261 4262 lag_id = i; 4263 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 4264 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4265 if (err) 4266 return ERR_PTR(err); 4267 4268 lag = &mlxsw_sp->lags[lag_id]; 4269 lag->lag_id = lag_id; 4270 lag->dev = lag_dev; 4271 refcount_set(&lag->ref_count, 1); 4272 4273 return lag; 4274 } 4275 4276 static int 4277 mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_lag *lag) 4278 { 4279 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4280 4281 lag->dev = NULL; 4282 4283 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag->lag_id); 4284 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4285 } 4286 4287 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 4288 u16 lag_id, u8 port_index) 4289 { 4290 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4291 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4292 4293 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 4294 lag_id, port_index); 4295 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4296 } 4297 4298 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 4299 u16 lag_id) 4300 { 4301 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4302 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4303 4304 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 4305 lag_id); 4306 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4307 } 4308 4309 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 4310 u16 lag_id) 4311 { 4312 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4313 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4314 4315 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 4316 lag_id); 4317 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4318 } 4319 4320 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 4321 u16 lag_id) 4322 { 4323 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4324 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4325 4326 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 4327 lag_id); 4328 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4329 } 4330 4331 static struct mlxsw_sp_lag * 4332 mlxsw_sp_lag_find(struct mlxsw_sp *mlxsw_sp, struct net_device *lag_dev) 4333 { 4334 int i; 4335 4336 for (i = 0; i < mlxsw_sp->max_lag; i++) { 4337 if (!mlxsw_sp->lags[i].dev) 4338 continue; 4339 4340 if (mlxsw_sp->lags[i].dev == lag_dev) 4341 return &mlxsw_sp->lags[i]; 4342 } 4343 4344 return NULL; 4345 } 4346 4347 static struct mlxsw_sp_lag * 4348 mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, struct net_device *lag_dev, 4349 struct netlink_ext_ack *extack) 4350 { 4351 struct mlxsw_sp_lag *lag; 4352 4353 lag = mlxsw_sp_lag_find(mlxsw_sp, lag_dev); 4354 if (lag) { 4355 refcount_inc(&lag->ref_count); 4356 return lag; 4357 } 4358 4359 return mlxsw_sp_lag_create(mlxsw_sp, lag_dev, extack); 4360 } 4361 4362 static void 4363 mlxsw_sp_lag_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_lag *lag) 4364 { 4365 if (!refcount_dec_and_test(&lag->ref_count)) 4366 return; 4367 4368 mlxsw_sp_lag_destroy(mlxsw_sp, lag); 4369 } 4370 4371 static bool 4372 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 4373 struct net_device *lag_dev, 4374 struct netdev_lag_upper_info *lag_upper_info, 4375 struct netlink_ext_ack *extack) 4376 { 4377 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 4378 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 4379 return false; 4380 } 4381 return true; 4382 } 4383 4384 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 4385 u16 lag_id, u8 *p_port_index) 4386 { 4387 u64 max_lag_members; 4388 int i; 4389 4390 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 4391 MAX_LAG_MEMBERS); 4392 for (i = 0; i < max_lag_members; i++) { 4393 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 4394 *p_port_index = i; 4395 return 0; 4396 } 4397 } 4398 return -EBUSY; 4399 } 4400 4401 static int mlxsw_sp_lag_uppers_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port, 4402 struct net_device *lag_dev, 4403 struct netlink_ext_ack *extack) 4404 { 4405 struct net_device *upper_dev; 4406 struct net_device *master; 4407 struct list_head *iter; 4408 int done = 0; 4409 int err; 4410 4411 master = netdev_master_upper_dev_get(lag_dev); 4412 if (master && netif_is_bridge_master(master)) { 4413 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, lag_dev, master, 4414 extack); 4415 if (err) 4416 return err; 4417 } 4418 4419 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 4420 if (!is_vlan_dev(upper_dev)) 4421 continue; 4422 4423 master = netdev_master_upper_dev_get(upper_dev); 4424 if (master && netif_is_bridge_master(master)) { 4425 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4426 upper_dev, master, 4427 extack); 4428 if (err) 4429 goto err_port_bridge_join; 4430 } 4431 4432 ++done; 4433 } 4434 4435 return 0; 4436 4437 err_port_bridge_join: 4438 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 4439 if (!is_vlan_dev(upper_dev)) 4440 continue; 4441 4442 master = netdev_master_upper_dev_get(upper_dev); 4443 if (!master || !netif_is_bridge_master(master)) 4444 continue; 4445 4446 if (!done--) 4447 break; 4448 4449 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, master); 4450 } 4451 4452 master = netdev_master_upper_dev_get(lag_dev); 4453 if (master && netif_is_bridge_master(master)) 4454 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, master); 4455 4456 return err; 4457 } 4458 4459 static void 4460 mlxsw_sp_lag_uppers_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port, 4461 struct net_device *lag_dev) 4462 { 4463 struct net_device *upper_dev; 4464 struct net_device *master; 4465 struct list_head *iter; 4466 4467 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 4468 if (!is_vlan_dev(upper_dev)) 4469 continue; 4470 4471 master = netdev_master_upper_dev_get(upper_dev); 4472 if (!master) 4473 continue; 4474 4475 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, master); 4476 } 4477 4478 master = netdev_master_upper_dev_get(lag_dev); 4479 if (master) 4480 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, master); 4481 } 4482 4483 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 4484 struct net_device *lag_dev, 4485 struct netlink_ext_ack *extack) 4486 { 4487 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4488 struct mlxsw_sp_lag *lag; 4489 u16 lag_id; 4490 u8 port_index; 4491 int err; 4492 4493 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_dev, extack); 4494 if (IS_ERR(lag)) 4495 return PTR_ERR(lag); 4496 4497 lag_id = lag->lag_id; 4498 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 4499 if (err) 4500 return err; 4501 4502 err = mlxsw_sp_lag_uppers_bridge_join(mlxsw_sp_port, lag_dev, 4503 extack); 4504 if (err) 4505 goto err_lag_uppers_bridge_join; 4506 4507 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 4508 if (err) 4509 goto err_col_port_add; 4510 4511 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 4512 mlxsw_sp_port->local_port); 4513 mlxsw_sp_port->lag_id = lag_id; 4514 mlxsw_sp_port->lagged = 1; 4515 4516 err = mlxsw_sp_fid_port_join_lag(mlxsw_sp_port); 4517 if (err) 4518 goto err_fid_port_join_lag; 4519 4520 /* Port is no longer usable as a router interface */ 4521 if (mlxsw_sp_port->default_vlan->fid) 4522 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); 4523 4524 /* Join a router interface configured on the LAG, if exists */ 4525 err = mlxsw_sp_router_port_join_lag(mlxsw_sp_port, lag_dev, 4526 extack); 4527 if (err) 4528 goto err_router_join; 4529 4530 err = mlxsw_sp_netdevice_enslavement_replay(mlxsw_sp, lag_dev, extack); 4531 if (err) 4532 goto err_replay; 4533 4534 return 0; 4535 4536 err_replay: 4537 mlxsw_sp_router_port_leave_lag(mlxsw_sp_port, lag_dev); 4538 err_router_join: 4539 mlxsw_sp_fid_port_leave_lag(mlxsw_sp_port); 4540 err_fid_port_join_lag: 4541 mlxsw_sp_port->lagged = 0; 4542 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 4543 mlxsw_sp_port->local_port); 4544 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4545 err_col_port_add: 4546 mlxsw_sp_lag_uppers_bridge_leave(mlxsw_sp_port, lag_dev); 4547 err_lag_uppers_bridge_join: 4548 mlxsw_sp_lag_put(mlxsw_sp, lag); 4549 return err; 4550 } 4551 4552 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 4553 struct net_device *lag_dev) 4554 { 4555 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4556 u16 lag_id = mlxsw_sp_port->lag_id; 4557 struct mlxsw_sp_lag *lag; 4558 4559 if (!mlxsw_sp_port->lagged) 4560 return; 4561 lag = &mlxsw_sp->lags[lag_id]; 4562 4563 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4564 4565 /* Any VLANs configured on the port are no longer valid */ 4566 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false); 4567 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan); 4568 /* Make the LAG and its directly linked uppers leave bridges they 4569 * are memeber in 4570 */ 4571 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev); 4572 4573 mlxsw_sp_fid_port_leave_lag(mlxsw_sp_port); 4574 4575 mlxsw_sp_lag_put(mlxsw_sp, lag); 4576 4577 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 4578 mlxsw_sp_port->local_port); 4579 mlxsw_sp_port->lagged = 0; 4580 4581 /* Make sure untagged frames are allowed to ingress */ 4582 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID, 4583 ETH_P_8021Q); 4584 } 4585 4586 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 4587 u16 lag_id) 4588 { 4589 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4590 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4591 4592 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 4593 mlxsw_sp_port->local_port); 4594 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4595 } 4596 4597 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 4598 u16 lag_id) 4599 { 4600 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4601 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4602 4603 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 4604 mlxsw_sp_port->local_port); 4605 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4606 } 4607 4608 static int 4609 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port) 4610 { 4611 int err; 4612 4613 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, 4614 mlxsw_sp_port->lag_id); 4615 if (err) 4616 return err; 4617 4618 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 4619 if (err) 4620 goto err_dist_port_add; 4621 4622 return 0; 4623 4624 err_dist_port_add: 4625 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id); 4626 return err; 4627 } 4628 4629 static int 4630 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port) 4631 { 4632 int err; 4633 4634 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 4635 mlxsw_sp_port->lag_id); 4636 if (err) 4637 return err; 4638 4639 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, 4640 mlxsw_sp_port->lag_id); 4641 if (err) 4642 goto err_col_port_disable; 4643 4644 return 0; 4645 4646 err_col_port_disable: 4647 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 4648 return err; 4649 } 4650 4651 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 4652 struct netdev_lag_lower_state_info *info) 4653 { 4654 if (info->tx_enabled) 4655 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port); 4656 else 4657 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 4658 } 4659 4660 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 4661 bool enable) 4662 { 4663 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4664 enum mlxsw_reg_spms_state spms_state; 4665 char *spms_pl; 4666 u16 vid; 4667 int err; 4668 4669 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 4670 MLXSW_REG_SPMS_STATE_DISCARDING; 4671 4672 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 4673 if (!spms_pl) 4674 return -ENOMEM; 4675 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 4676 4677 for (vid = 0; vid < VLAN_N_VID; vid++) 4678 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 4679 4680 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 4681 kfree(spms_pl); 4682 return err; 4683 } 4684 4685 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 4686 { 4687 u16 vid = 1; 4688 int err; 4689 4690 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 4691 if (err) 4692 return err; 4693 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 4694 if (err) 4695 goto err_port_stp_set; 4696 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 4697 true, false); 4698 if (err) 4699 goto err_port_vlan_set; 4700 4701 for (; vid <= VLAN_N_VID - 1; vid++) { 4702 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4703 vid, false); 4704 if (err) 4705 goto err_vid_learning_set; 4706 } 4707 4708 return 0; 4709 4710 err_vid_learning_set: 4711 for (vid--; vid >= 1; vid--) 4712 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 4713 err_port_vlan_set: 4714 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4715 err_port_stp_set: 4716 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4717 return err; 4718 } 4719 4720 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 4721 { 4722 u16 vid; 4723 4724 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 4725 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4726 vid, true); 4727 4728 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 4729 false, false); 4730 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4731 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4732 } 4733 4734 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev) 4735 { 4736 unsigned int num_vxlans = 0; 4737 struct net_device *dev; 4738 struct list_head *iter; 4739 4740 netdev_for_each_lower_dev(br_dev, dev, iter) { 4741 if (netif_is_vxlan(dev)) 4742 num_vxlans++; 4743 } 4744 4745 return num_vxlans > 1; 4746 } 4747 4748 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev) 4749 { 4750 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0}; 4751 struct net_device *dev; 4752 struct list_head *iter; 4753 4754 netdev_for_each_lower_dev(br_dev, dev, iter) { 4755 u16 pvid; 4756 int err; 4757 4758 if (!netif_is_vxlan(dev)) 4759 continue; 4760 4761 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid); 4762 if (err || !pvid) 4763 continue; 4764 4765 if (test_and_set_bit(pvid, vlans)) 4766 return false; 4767 } 4768 4769 return true; 4770 } 4771 4772 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev, 4773 struct netlink_ext_ack *extack) 4774 { 4775 if (br_multicast_enabled(br_dev)) { 4776 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device"); 4777 return false; 4778 } 4779 4780 if (!br_vlan_enabled(br_dev) && 4781 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) { 4782 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge"); 4783 return false; 4784 } 4785 4786 if (br_vlan_enabled(br_dev) && 4787 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) { 4788 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged"); 4789 return false; 4790 } 4791 4792 return true; 4793 } 4794 4795 static bool mlxsw_sp_netdev_is_master(struct net_device *upper_dev, 4796 struct net_device *dev) 4797 { 4798 return upper_dev == netdev_master_upper_dev_get(dev); 4799 } 4800 4801 static int __mlxsw_sp_netdevice_event(struct mlxsw_sp *mlxsw_sp, 4802 unsigned long event, void *ptr, 4803 bool process_foreign); 4804 4805 static int mlxsw_sp_netdevice_validate_uppers(struct mlxsw_sp *mlxsw_sp, 4806 struct net_device *dev, 4807 struct netlink_ext_ack *extack) 4808 { 4809 struct net_device *upper_dev; 4810 struct list_head *iter; 4811 int err; 4812 4813 netdev_for_each_upper_dev_rcu(dev, upper_dev, iter) { 4814 struct netdev_notifier_changeupper_info info = { 4815 .info = { 4816 .dev = dev, 4817 .extack = extack, 4818 }, 4819 .master = mlxsw_sp_netdev_is_master(upper_dev, dev), 4820 .upper_dev = upper_dev, 4821 .linking = true, 4822 4823 /* upper_info is relevant for LAG devices. But we would 4824 * only need this if LAG were a valid upper above 4825 * another upper (e.g. a bridge that is a member of a 4826 * LAG), and that is never a valid configuration. So we 4827 * can keep this as NULL. 4828 */ 4829 .upper_info = NULL, 4830 }; 4831 4832 err = __mlxsw_sp_netdevice_event(mlxsw_sp, 4833 NETDEV_PRECHANGEUPPER, 4834 &info, true); 4835 if (err) 4836 return err; 4837 4838 err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp, upper_dev, 4839 extack); 4840 if (err) 4841 return err; 4842 } 4843 4844 return 0; 4845 } 4846 4847 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 4848 struct net_device *dev, 4849 unsigned long event, void *ptr, 4850 bool replay_deslavement) 4851 { 4852 struct netdev_notifier_changeupper_info *info; 4853 struct mlxsw_sp_port *mlxsw_sp_port; 4854 struct netlink_ext_ack *extack; 4855 struct net_device *upper_dev; 4856 struct mlxsw_sp *mlxsw_sp; 4857 int err = 0; 4858 u16 proto; 4859 4860 mlxsw_sp_port = netdev_priv(dev); 4861 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4862 info = ptr; 4863 extack = netdev_notifier_info_to_extack(&info->info); 4864 4865 switch (event) { 4866 case NETDEV_PRECHANGEUPPER: 4867 upper_dev = info->upper_dev; 4868 if (!is_vlan_dev(upper_dev) && 4869 !netif_is_lag_master(upper_dev) && 4870 !netif_is_bridge_master(upper_dev) && 4871 !netif_is_ovs_master(upper_dev) && 4872 !netif_is_macvlan(upper_dev) && 4873 !netif_is_l3_master(upper_dev)) { 4874 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4875 return -EINVAL; 4876 } 4877 if (!info->linking) 4878 break; 4879 if (netif_is_bridge_master(upper_dev) && 4880 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 4881 mlxsw_sp_bridge_has_vxlan(upper_dev) && 4882 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 4883 return -EOPNOTSUPP; 4884 if (netdev_has_any_upper_dev(upper_dev) && 4885 (!netif_is_bridge_master(upper_dev) || 4886 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 4887 upper_dev))) { 4888 err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp, 4889 upper_dev, 4890 extack); 4891 if (err) 4892 return err; 4893 } 4894 if (netif_is_lag_master(upper_dev) && 4895 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 4896 info->upper_info, extack)) 4897 return -EINVAL; 4898 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 4899 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); 4900 return -EINVAL; 4901 } 4902 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 4903 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 4904 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); 4905 return -EINVAL; 4906 } 4907 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 4908 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); 4909 return -EINVAL; 4910 } 4911 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 4912 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 4913 return -EINVAL; 4914 } 4915 if (netif_is_bridge_master(upper_dev)) { 4916 br_vlan_get_proto(upper_dev, &proto); 4917 if (br_vlan_enabled(upper_dev) && 4918 proto != ETH_P_8021Q && proto != ETH_P_8021AD) { 4919 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a bridge with unknown VLAN protocol is not supported"); 4920 return -EOPNOTSUPP; 4921 } 4922 if (vlan_uses_dev(lower_dev) && 4923 br_vlan_enabled(upper_dev) && 4924 proto == ETH_P_8021AD) { 4925 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port that already has a VLAN upper to an 802.1ad bridge is not supported"); 4926 return -EOPNOTSUPP; 4927 } 4928 } 4929 if (netif_is_bridge_port(lower_dev) && is_vlan_dev(upper_dev)) { 4930 struct net_device *br_dev = netdev_master_upper_dev_get(lower_dev); 4931 4932 if (br_vlan_enabled(br_dev)) { 4933 br_vlan_get_proto(br_dev, &proto); 4934 if (proto == ETH_P_8021AD) { 4935 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a port enslaved to an 802.1ad bridge"); 4936 return -EOPNOTSUPP; 4937 } 4938 } 4939 } 4940 if (is_vlan_dev(upper_dev) && 4941 ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) { 4942 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol"); 4943 return -EOPNOTSUPP; 4944 } 4945 if (is_vlan_dev(upper_dev) && mlxsw_sp_port->security) { 4946 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a locked port"); 4947 return -EOPNOTSUPP; 4948 } 4949 break; 4950 case NETDEV_CHANGEUPPER: 4951 upper_dev = info->upper_dev; 4952 if (netif_is_bridge_master(upper_dev)) { 4953 if (info->linking) { 4954 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4955 lower_dev, 4956 upper_dev, 4957 extack); 4958 } else { 4959 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4960 lower_dev, 4961 upper_dev); 4962 if (!replay_deslavement) 4963 break; 4964 mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp, 4965 lower_dev); 4966 } 4967 } else if (netif_is_lag_master(upper_dev)) { 4968 if (info->linking) { 4969 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 4970 upper_dev, extack); 4971 } else { 4972 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 4973 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 4974 upper_dev); 4975 mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp, 4976 dev); 4977 } 4978 } else if (netif_is_ovs_master(upper_dev)) { 4979 if (info->linking) 4980 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 4981 else 4982 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 4983 } else if (netif_is_macvlan(upper_dev)) { 4984 if (!info->linking) 4985 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4986 } else if (is_vlan_dev(upper_dev)) { 4987 struct net_device *br_dev; 4988 4989 if (!netif_is_bridge_port(upper_dev)) 4990 break; 4991 if (info->linking) 4992 break; 4993 br_dev = netdev_master_upper_dev_get(upper_dev); 4994 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, 4995 br_dev); 4996 } 4997 break; 4998 } 4999 5000 return err; 5001 } 5002 5003 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 5004 unsigned long event, void *ptr) 5005 { 5006 struct netdev_notifier_changelowerstate_info *info; 5007 struct mlxsw_sp_port *mlxsw_sp_port; 5008 int err; 5009 5010 mlxsw_sp_port = netdev_priv(dev); 5011 info = ptr; 5012 5013 switch (event) { 5014 case NETDEV_CHANGELOWERSTATE: 5015 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 5016 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 5017 info->lower_state_info); 5018 if (err) 5019 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 5020 } 5021 break; 5022 } 5023 5024 return 0; 5025 } 5026 5027 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 5028 struct net_device *port_dev, 5029 unsigned long event, void *ptr, 5030 bool replay_deslavement) 5031 { 5032 switch (event) { 5033 case NETDEV_PRECHANGEUPPER: 5034 case NETDEV_CHANGEUPPER: 5035 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 5036 event, ptr, 5037 replay_deslavement); 5038 case NETDEV_CHANGELOWERSTATE: 5039 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 5040 ptr); 5041 } 5042 5043 return 0; 5044 } 5045 5046 /* Called for LAG or its upper VLAN after the per-LAG-lower processing was done, 5047 * to do any per-LAG / per-LAG-upper processing. 5048 */ 5049 static int mlxsw_sp_netdevice_post_lag_event(struct net_device *dev, 5050 unsigned long event, 5051 void *ptr) 5052 { 5053 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(dev); 5054 struct netdev_notifier_changeupper_info *info = ptr; 5055 5056 if (!mlxsw_sp) 5057 return 0; 5058 5059 switch (event) { 5060 case NETDEV_CHANGEUPPER: 5061 if (info->linking) 5062 break; 5063 if (netif_is_bridge_master(info->upper_dev)) 5064 mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp, dev); 5065 break; 5066 } 5067 return 0; 5068 } 5069 5070 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 5071 unsigned long event, void *ptr) 5072 { 5073 struct net_device *dev; 5074 struct list_head *iter; 5075 int ret; 5076 5077 netdev_for_each_lower_dev(lag_dev, dev, iter) { 5078 if (mlxsw_sp_port_dev_check(dev)) { 5079 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 5080 ptr, false); 5081 if (ret) 5082 return ret; 5083 } 5084 } 5085 5086 return mlxsw_sp_netdevice_post_lag_event(lag_dev, event, ptr); 5087 } 5088 5089 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 5090 struct net_device *dev, 5091 unsigned long event, void *ptr, 5092 u16 vid, bool replay_deslavement) 5093 { 5094 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 5095 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5096 struct netdev_notifier_changeupper_info *info = ptr; 5097 struct netlink_ext_ack *extack; 5098 struct net_device *upper_dev; 5099 int err = 0; 5100 5101 extack = netdev_notifier_info_to_extack(&info->info); 5102 5103 switch (event) { 5104 case NETDEV_PRECHANGEUPPER: 5105 upper_dev = info->upper_dev; 5106 if (!netif_is_bridge_master(upper_dev) && 5107 !netif_is_macvlan(upper_dev) && 5108 !netif_is_l3_master(upper_dev)) { 5109 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5110 return -EINVAL; 5111 } 5112 if (!info->linking) 5113 break; 5114 if (netif_is_bridge_master(upper_dev) && 5115 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 5116 mlxsw_sp_bridge_has_vxlan(upper_dev) && 5117 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 5118 return -EOPNOTSUPP; 5119 if (netdev_has_any_upper_dev(upper_dev) && 5120 (!netif_is_bridge_master(upper_dev) || 5121 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 5122 upper_dev))) { 5123 err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp, 5124 upper_dev, 5125 extack); 5126 if (err) 5127 return err; 5128 } 5129 break; 5130 case NETDEV_CHANGEUPPER: 5131 upper_dev = info->upper_dev; 5132 if (netif_is_bridge_master(upper_dev)) { 5133 if (info->linking) { 5134 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 5135 vlan_dev, 5136 upper_dev, 5137 extack); 5138 } else { 5139 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 5140 vlan_dev, 5141 upper_dev); 5142 if (!replay_deslavement) 5143 break; 5144 mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp, 5145 vlan_dev); 5146 } 5147 } else if (netif_is_macvlan(upper_dev)) { 5148 if (!info->linking) 5149 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 5150 } 5151 break; 5152 } 5153 5154 return err; 5155 } 5156 5157 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 5158 struct net_device *lag_dev, 5159 unsigned long event, 5160 void *ptr, u16 vid) 5161 { 5162 struct net_device *dev; 5163 struct list_head *iter; 5164 int ret; 5165 5166 netdev_for_each_lower_dev(lag_dev, dev, iter) { 5167 if (mlxsw_sp_port_dev_check(dev)) { 5168 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 5169 event, ptr, 5170 vid, false); 5171 if (ret) 5172 return ret; 5173 } 5174 } 5175 5176 return mlxsw_sp_netdevice_post_lag_event(vlan_dev, event, ptr); 5177 } 5178 5179 static int mlxsw_sp_netdevice_bridge_vlan_event(struct mlxsw_sp *mlxsw_sp, 5180 struct net_device *vlan_dev, 5181 struct net_device *br_dev, 5182 unsigned long event, void *ptr, 5183 u16 vid, bool process_foreign) 5184 { 5185 struct netdev_notifier_changeupper_info *info = ptr; 5186 struct netlink_ext_ack *extack; 5187 struct net_device *upper_dev; 5188 5189 if (!process_foreign && !mlxsw_sp_lower_get(vlan_dev)) 5190 return 0; 5191 5192 extack = netdev_notifier_info_to_extack(&info->info); 5193 5194 switch (event) { 5195 case NETDEV_PRECHANGEUPPER: 5196 upper_dev = info->upper_dev; 5197 if (!netif_is_macvlan(upper_dev) && 5198 !netif_is_l3_master(upper_dev)) { 5199 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5200 return -EOPNOTSUPP; 5201 } 5202 break; 5203 case NETDEV_CHANGEUPPER: 5204 upper_dev = info->upper_dev; 5205 if (info->linking) 5206 break; 5207 if (netif_is_macvlan(upper_dev)) 5208 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 5209 break; 5210 } 5211 5212 return 0; 5213 } 5214 5215 static int mlxsw_sp_netdevice_vlan_event(struct mlxsw_sp *mlxsw_sp, 5216 struct net_device *vlan_dev, 5217 unsigned long event, void *ptr, 5218 bool process_foreign) 5219 { 5220 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 5221 u16 vid = vlan_dev_vlan_id(vlan_dev); 5222 5223 if (mlxsw_sp_port_dev_check(real_dev)) 5224 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 5225 event, ptr, vid, 5226 true); 5227 else if (netif_is_lag_master(real_dev)) 5228 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 5229 real_dev, event, 5230 ptr, vid); 5231 else if (netif_is_bridge_master(real_dev)) 5232 return mlxsw_sp_netdevice_bridge_vlan_event(mlxsw_sp, vlan_dev, 5233 real_dev, event, 5234 ptr, vid, 5235 process_foreign); 5236 5237 return 0; 5238 } 5239 5240 static int mlxsw_sp_netdevice_bridge_event(struct mlxsw_sp *mlxsw_sp, 5241 struct net_device *br_dev, 5242 unsigned long event, void *ptr, 5243 bool process_foreign) 5244 { 5245 struct netdev_notifier_changeupper_info *info = ptr; 5246 struct netlink_ext_ack *extack; 5247 struct net_device *upper_dev; 5248 u16 proto; 5249 5250 if (!process_foreign && !mlxsw_sp_lower_get(br_dev)) 5251 return 0; 5252 5253 extack = netdev_notifier_info_to_extack(&info->info); 5254 5255 switch (event) { 5256 case NETDEV_PRECHANGEUPPER: 5257 upper_dev = info->upper_dev; 5258 if (!is_vlan_dev(upper_dev) && 5259 !netif_is_macvlan(upper_dev) && 5260 !netif_is_l3_master(upper_dev)) { 5261 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5262 return -EOPNOTSUPP; 5263 } 5264 if (!info->linking) 5265 break; 5266 if (br_vlan_enabled(br_dev)) { 5267 br_vlan_get_proto(br_dev, &proto); 5268 if (proto == ETH_P_8021AD) { 5269 NL_SET_ERR_MSG_MOD(extack, "Upper devices are not supported on top of an 802.1ad bridge"); 5270 return -EOPNOTSUPP; 5271 } 5272 } 5273 if (is_vlan_dev(upper_dev) && 5274 ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) { 5275 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol"); 5276 return -EOPNOTSUPP; 5277 } 5278 break; 5279 case NETDEV_CHANGEUPPER: 5280 upper_dev = info->upper_dev; 5281 if (info->linking) 5282 break; 5283 if (is_vlan_dev(upper_dev)) 5284 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev); 5285 if (netif_is_macvlan(upper_dev)) 5286 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 5287 break; 5288 } 5289 5290 return 0; 5291 } 5292 5293 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, 5294 unsigned long event, void *ptr) 5295 { 5296 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); 5297 struct netdev_notifier_changeupper_info *info = ptr; 5298 struct netlink_ext_ack *extack; 5299 struct net_device *upper_dev; 5300 5301 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) 5302 return 0; 5303 5304 extack = netdev_notifier_info_to_extack(&info->info); 5305 upper_dev = info->upper_dev; 5306 5307 if (!netif_is_l3_master(upper_dev)) { 5308 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5309 return -EOPNOTSUPP; 5310 } 5311 5312 return 0; 5313 } 5314 5315 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, 5316 struct net_device *dev, 5317 unsigned long event, void *ptr) 5318 { 5319 struct netdev_notifier_changeupper_info *cu_info; 5320 struct netdev_notifier_info *info = ptr; 5321 struct netlink_ext_ack *extack; 5322 struct net_device *upper_dev; 5323 5324 extack = netdev_notifier_info_to_extack(info); 5325 5326 switch (event) { 5327 case NETDEV_CHANGEUPPER: 5328 cu_info = container_of(info, 5329 struct netdev_notifier_changeupper_info, 5330 info); 5331 upper_dev = cu_info->upper_dev; 5332 if (!netif_is_bridge_master(upper_dev)) 5333 return 0; 5334 if (!mlxsw_sp_lower_get(upper_dev)) 5335 return 0; 5336 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 5337 return -EOPNOTSUPP; 5338 if (cu_info->linking) { 5339 if (!netif_running(dev)) 5340 return 0; 5341 /* When the bridge is VLAN-aware, the VNI of the VxLAN 5342 * device needs to be mapped to a VLAN, but at this 5343 * point no VLANs are configured on the VxLAN device 5344 */ 5345 if (br_vlan_enabled(upper_dev)) 5346 return 0; 5347 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, 5348 dev, 0, extack); 5349 } else { 5350 /* VLANs were already flushed, which triggered the 5351 * necessary cleanup 5352 */ 5353 if (br_vlan_enabled(upper_dev)) 5354 return 0; 5355 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 5356 } 5357 break; 5358 case NETDEV_PRE_UP: 5359 upper_dev = netdev_master_upper_dev_get(dev); 5360 if (!upper_dev) 5361 return 0; 5362 if (!netif_is_bridge_master(upper_dev)) 5363 return 0; 5364 if (!mlxsw_sp_lower_get(upper_dev)) 5365 return 0; 5366 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0, 5367 extack); 5368 case NETDEV_DOWN: 5369 upper_dev = netdev_master_upper_dev_get(dev); 5370 if (!upper_dev) 5371 return 0; 5372 if (!netif_is_bridge_master(upper_dev)) 5373 return 0; 5374 if (!mlxsw_sp_lower_get(upper_dev)) 5375 return 0; 5376 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 5377 break; 5378 } 5379 5380 return 0; 5381 } 5382 5383 static int __mlxsw_sp_netdevice_event(struct mlxsw_sp *mlxsw_sp, 5384 unsigned long event, void *ptr, 5385 bool process_foreign) 5386 { 5387 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 5388 struct mlxsw_sp_span_entry *span_entry; 5389 int err = 0; 5390 5391 if (event == NETDEV_UNREGISTER) { 5392 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); 5393 if (span_entry) 5394 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); 5395 } 5396 5397 if (netif_is_vxlan(dev)) 5398 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr); 5399 else if (mlxsw_sp_port_dev_check(dev)) 5400 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr, true); 5401 else if (netif_is_lag_master(dev)) 5402 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 5403 else if (is_vlan_dev(dev)) 5404 err = mlxsw_sp_netdevice_vlan_event(mlxsw_sp, dev, event, ptr, 5405 process_foreign); 5406 else if (netif_is_bridge_master(dev)) 5407 err = mlxsw_sp_netdevice_bridge_event(mlxsw_sp, dev, event, ptr, 5408 process_foreign); 5409 else if (netif_is_macvlan(dev)) 5410 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); 5411 5412 return err; 5413 } 5414 5415 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 5416 unsigned long event, void *ptr) 5417 { 5418 struct mlxsw_sp *mlxsw_sp; 5419 int err; 5420 5421 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 5422 mlxsw_sp_span_respin(mlxsw_sp); 5423 err = __mlxsw_sp_netdevice_event(mlxsw_sp, event, ptr, false); 5424 5425 return notifier_from_errno(err); 5426 } 5427 5428 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { 5429 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 5430 {0, }, 5431 }; 5432 5433 static struct pci_driver mlxsw_sp1_pci_driver = { 5434 .name = mlxsw_sp1_driver_name, 5435 .id_table = mlxsw_sp1_pci_id_table, 5436 }; 5437 5438 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { 5439 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, 5440 {0, }, 5441 }; 5442 5443 static struct pci_driver mlxsw_sp2_pci_driver = { 5444 .name = mlxsw_sp2_driver_name, 5445 .id_table = mlxsw_sp2_pci_id_table, 5446 }; 5447 5448 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = { 5449 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0}, 5450 {0, }, 5451 }; 5452 5453 static struct pci_driver mlxsw_sp3_pci_driver = { 5454 .name = mlxsw_sp3_driver_name, 5455 .id_table = mlxsw_sp3_pci_id_table, 5456 }; 5457 5458 static const struct pci_device_id mlxsw_sp4_pci_id_table[] = { 5459 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM4), 0}, 5460 {0, }, 5461 }; 5462 5463 static struct pci_driver mlxsw_sp4_pci_driver = { 5464 .name = mlxsw_sp4_driver_name, 5465 .id_table = mlxsw_sp4_pci_id_table, 5466 }; 5467 5468 static int __init mlxsw_sp_module_init(void) 5469 { 5470 int err; 5471 5472 err = mlxsw_core_driver_register(&mlxsw_sp1_driver); 5473 if (err) 5474 return err; 5475 5476 err = mlxsw_core_driver_register(&mlxsw_sp2_driver); 5477 if (err) 5478 goto err_sp2_core_driver_register; 5479 5480 err = mlxsw_core_driver_register(&mlxsw_sp3_driver); 5481 if (err) 5482 goto err_sp3_core_driver_register; 5483 5484 err = mlxsw_core_driver_register(&mlxsw_sp4_driver); 5485 if (err) 5486 goto err_sp4_core_driver_register; 5487 5488 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); 5489 if (err) 5490 goto err_sp1_pci_driver_register; 5491 5492 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); 5493 if (err) 5494 goto err_sp2_pci_driver_register; 5495 5496 err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver); 5497 if (err) 5498 goto err_sp3_pci_driver_register; 5499 5500 err = mlxsw_pci_driver_register(&mlxsw_sp4_pci_driver); 5501 if (err) 5502 goto err_sp4_pci_driver_register; 5503 5504 return 0; 5505 5506 err_sp4_pci_driver_register: 5507 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver); 5508 err_sp3_pci_driver_register: 5509 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 5510 err_sp2_pci_driver_register: 5511 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 5512 err_sp1_pci_driver_register: 5513 mlxsw_core_driver_unregister(&mlxsw_sp4_driver); 5514 err_sp4_core_driver_register: 5515 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 5516 err_sp3_core_driver_register: 5517 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 5518 err_sp2_core_driver_register: 5519 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 5520 return err; 5521 } 5522 5523 static void __exit mlxsw_sp_module_exit(void) 5524 { 5525 mlxsw_pci_driver_unregister(&mlxsw_sp4_pci_driver); 5526 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver); 5527 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 5528 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 5529 mlxsw_core_driver_unregister(&mlxsw_sp4_driver); 5530 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 5531 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 5532 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 5533 } 5534 5535 module_init(mlxsw_sp_module_init); 5536 module_exit(mlxsw_sp_module_exit); 5537 5538 MODULE_LICENSE("Dual BSD/GPL"); 5539 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 5540 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 5541 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); 5542 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); 5543 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table); 5544 MODULE_DEVICE_TABLE(pci, mlxsw_sp4_pci_id_table); 5545 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); 5546 MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME); 5547 MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME); 5548 MODULE_FIRMWARE(MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME); 5549