1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/types.h> 7 #include <linux/pci.h> 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/ethtool.h> 11 #include <linux/slab.h> 12 #include <linux/device.h> 13 #include <linux/skbuff.h> 14 #include <linux/if_vlan.h> 15 #include <linux/if_bridge.h> 16 #include <linux/workqueue.h> 17 #include <linux/jiffies.h> 18 #include <linux/bitops.h> 19 #include <linux/list.h> 20 #include <linux/notifier.h> 21 #include <linux/dcbnl.h> 22 #include <linux/inetdevice.h> 23 #include <linux/netlink.h> 24 #include <linux/jhash.h> 25 #include <linux/log2.h> 26 #include <linux/refcount.h> 27 #include <linux/rhashtable.h> 28 #include <net/switchdev.h> 29 #include <net/pkt_cls.h> 30 #include <net/netevent.h> 31 #include <net/addrconf.h> 32 #include <linux/ptp_classify.h> 33 34 #include "spectrum.h" 35 #include "pci.h" 36 #include "core.h" 37 #include "core_env.h" 38 #include "reg.h" 39 #include "port.h" 40 #include "trap.h" 41 #include "txheader.h" 42 #include "spectrum_cnt.h" 43 #include "spectrum_dpipe.h" 44 #include "spectrum_acl_flex_actions.h" 45 #include "spectrum_span.h" 46 #include "spectrum_ptp.h" 47 #include "spectrum_trap.h" 48 49 #define MLXSW_SP_FWREV_MINOR 2010 50 #define MLXSW_SP_FWREV_SUBMINOR 1006 51 52 #define MLXSW_SP1_FWREV_MAJOR 13 53 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 54 55 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { 56 .major = MLXSW_SP1_FWREV_MAJOR, 57 .minor = MLXSW_SP_FWREV_MINOR, 58 .subminor = MLXSW_SP_FWREV_SUBMINOR, 59 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR, 60 }; 61 62 #define MLXSW_SP1_FW_FILENAME \ 63 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ 64 "." __stringify(MLXSW_SP_FWREV_MINOR) \ 65 "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2" 66 67 #define MLXSW_SP2_FWREV_MAJOR 29 68 69 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { 70 .major = MLXSW_SP2_FWREV_MAJOR, 71 .minor = MLXSW_SP_FWREV_MINOR, 72 .subminor = MLXSW_SP_FWREV_SUBMINOR, 73 }; 74 75 #define MLXSW_SP2_FW_FILENAME \ 76 "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \ 77 "." __stringify(MLXSW_SP_FWREV_MINOR) \ 78 "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2" 79 80 #define MLXSW_SP3_FWREV_MAJOR 30 81 82 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = { 83 .major = MLXSW_SP3_FWREV_MAJOR, 84 .minor = MLXSW_SP_FWREV_MINOR, 85 .subminor = MLXSW_SP_FWREV_SUBMINOR, 86 }; 87 88 #define MLXSW_SP3_FW_FILENAME \ 89 "mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \ 90 "." __stringify(MLXSW_SP_FWREV_MINOR) \ 91 "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2" 92 93 #define MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME \ 94 "mellanox/lc_ini_bundle_" \ 95 __stringify(MLXSW_SP_FWREV_MINOR) "_" \ 96 __stringify(MLXSW_SP_FWREV_SUBMINOR) ".bin" 97 98 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; 99 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; 100 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3"; 101 static const char mlxsw_sp4_driver_name[] = "mlxsw_spectrum4"; 102 103 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = { 104 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00 105 }; 106 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = { 107 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00 108 }; 109 110 /* tx_hdr_version 111 * Tx header version. 112 * Must be set to 1. 113 */ 114 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 115 116 /* tx_hdr_ctl 117 * Packet control type. 118 * 0 - Ethernet control (e.g. EMADs, LACP) 119 * 1 - Ethernet data 120 */ 121 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 122 123 /* tx_hdr_proto 124 * Packet protocol type. Must be set to 1 (Ethernet). 125 */ 126 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 127 128 /* tx_hdr_rx_is_router 129 * Packet is sent from the router. Valid for data packets only. 130 */ 131 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 132 133 /* tx_hdr_fid_valid 134 * Indicates if the 'fid' field is valid and should be used for 135 * forwarding lookup. Valid for data packets only. 136 */ 137 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 138 139 /* tx_hdr_swid 140 * Switch partition ID. Must be set to 0. 141 */ 142 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 143 144 /* tx_hdr_control_tclass 145 * Indicates if the packet should use the control TClass and not one 146 * of the data TClasses. 147 */ 148 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 149 150 /* tx_hdr_etclass 151 * Egress TClass to be used on the egress device on the egress port. 152 */ 153 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 154 155 /* tx_hdr_port_mid 156 * Destination local port for unicast packets. 157 * Destination multicast ID for multicast packets. 158 * 159 * Control packets are directed to a specific egress port, while data 160 * packets are transmitted through the CPU port (0) into the switch partition, 161 * where forwarding rules are applied. 162 */ 163 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 164 165 /* tx_hdr_fid 166 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 167 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 168 * Valid for data packets only. 169 */ 170 MLXSW_ITEM32(tx, hdr, fid, 0x08, 16, 16); 171 172 /* tx_hdr_type 173 * 0 - Data packets 174 * 6 - Control packets 175 */ 176 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 177 178 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 179 unsigned int counter_index, bool clear, 180 u64 *packets, u64 *bytes) 181 { 182 enum mlxsw_reg_mgpc_opcode op = clear ? MLXSW_REG_MGPC_OPCODE_CLEAR : 183 MLXSW_REG_MGPC_OPCODE_NOP; 184 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 185 int err; 186 187 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, op, 188 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 189 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 190 if (err) 191 return err; 192 if (packets) 193 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 194 if (bytes) 195 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 196 return 0; 197 } 198 199 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 200 unsigned int counter_index) 201 { 202 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 203 204 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 205 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 206 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 207 } 208 209 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 210 unsigned int *p_counter_index) 211 { 212 int err; 213 214 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 215 p_counter_index); 216 if (err) 217 return err; 218 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 219 if (err) 220 goto err_counter_clear; 221 return 0; 222 223 err_counter_clear: 224 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 225 *p_counter_index); 226 return err; 227 } 228 229 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 230 unsigned int counter_index) 231 { 232 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 233 counter_index); 234 } 235 236 void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 237 const struct mlxsw_tx_info *tx_info) 238 { 239 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 240 241 memset(txhdr, 0, MLXSW_TXHDR_LEN); 242 243 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 244 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 245 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 246 mlxsw_tx_hdr_swid_set(txhdr, 0); 247 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 248 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 249 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 250 } 251 252 int 253 mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core, 254 struct mlxsw_sp_port *mlxsw_sp_port, 255 struct sk_buff *skb, 256 const struct mlxsw_tx_info *tx_info) 257 { 258 char *txhdr; 259 u16 max_fid; 260 int err; 261 262 if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) { 263 err = -ENOMEM; 264 goto err_skb_cow_head; 265 } 266 267 if (!MLXSW_CORE_RES_VALID(mlxsw_core, FID)) { 268 err = -EIO; 269 goto err_res_valid; 270 } 271 max_fid = MLXSW_CORE_RES_GET(mlxsw_core, FID); 272 273 txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 274 memset(txhdr, 0, MLXSW_TXHDR_LEN); 275 276 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 277 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 278 mlxsw_tx_hdr_rx_is_router_set(txhdr, true); 279 mlxsw_tx_hdr_fid_valid_set(txhdr, true); 280 mlxsw_tx_hdr_fid_set(txhdr, max_fid + tx_info->local_port - 1); 281 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_DATA); 282 return 0; 283 284 err_res_valid: 285 err_skb_cow_head: 286 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 287 dev_kfree_skb_any(skb); 288 return err; 289 } 290 291 static bool mlxsw_sp_skb_requires_ts(struct sk_buff *skb) 292 { 293 unsigned int type; 294 295 if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) 296 return false; 297 298 type = ptp_classify_raw(skb); 299 return !!ptp_parse_header(skb, type); 300 } 301 302 static int mlxsw_sp_txhdr_handle(struct mlxsw_core *mlxsw_core, 303 struct mlxsw_sp_port *mlxsw_sp_port, 304 struct sk_buff *skb, 305 const struct mlxsw_tx_info *tx_info) 306 { 307 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 308 309 /* In Spectrum-2 and Spectrum-3, PTP events that require a time stamp 310 * need special handling and cannot be transmitted as regular control 311 * packets. 312 */ 313 if (unlikely(mlxsw_sp_skb_requires_ts(skb))) 314 return mlxsw_sp->ptp_ops->txhdr_construct(mlxsw_core, 315 mlxsw_sp_port, skb, 316 tx_info); 317 318 if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) { 319 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 320 dev_kfree_skb_any(skb); 321 return -ENOMEM; 322 } 323 324 mlxsw_sp_txhdr_construct(skb, tx_info); 325 return 0; 326 } 327 328 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) 329 { 330 switch (state) { 331 case BR_STATE_FORWARDING: 332 return MLXSW_REG_SPMS_STATE_FORWARDING; 333 case BR_STATE_LEARNING: 334 return MLXSW_REG_SPMS_STATE_LEARNING; 335 case BR_STATE_LISTENING: 336 case BR_STATE_DISABLED: 337 case BR_STATE_BLOCKING: 338 return MLXSW_REG_SPMS_STATE_DISCARDING; 339 default: 340 BUG(); 341 } 342 } 343 344 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 345 u8 state) 346 { 347 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state); 348 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 349 char *spms_pl; 350 int err; 351 352 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 353 if (!spms_pl) 354 return -ENOMEM; 355 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 356 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 357 358 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 359 kfree(spms_pl); 360 return err; 361 } 362 363 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 364 { 365 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 366 int err; 367 368 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 369 if (err) 370 return err; 371 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 372 return 0; 373 } 374 375 int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 376 bool is_up) 377 { 378 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 379 char paos_pl[MLXSW_REG_PAOS_LEN]; 380 381 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 382 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 383 MLXSW_PORT_ADMIN_STATUS_DOWN); 384 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 385 } 386 387 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 388 const unsigned char *addr) 389 { 390 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 391 char ppad_pl[MLXSW_REG_PPAD_LEN]; 392 393 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 394 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 395 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 396 } 397 398 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 399 { 400 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 401 402 eth_hw_addr_gen(mlxsw_sp_port->dev, mlxsw_sp->base_mac, 403 mlxsw_sp_port->local_port); 404 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, 405 mlxsw_sp_port->dev->dev_addr); 406 } 407 408 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 409 { 410 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 411 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 412 413 mtu += MLXSW_PORT_ETH_FRAME_HDR; 414 415 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 416 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 417 } 418 419 static int mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, 420 u16 local_port, u8 swid) 421 { 422 char pspa_pl[MLXSW_REG_PSPA_LEN]; 423 424 mlxsw_reg_pspa_pack(pspa_pl, swid, local_port); 425 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 426 } 427 428 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 429 { 430 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 431 char svpe_pl[MLXSW_REG_SVPE_LEN]; 432 433 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 434 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 435 } 436 437 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 438 bool learn_enable) 439 { 440 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 441 char *spvmlr_pl; 442 int err; 443 444 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 445 if (!spvmlr_pl) 446 return -ENOMEM; 447 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 448 learn_enable); 449 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 450 kfree(spvmlr_pl); 451 return err; 452 } 453 454 int mlxsw_sp_port_security_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 455 { 456 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 457 char spfsr_pl[MLXSW_REG_SPFSR_LEN]; 458 int err; 459 460 if (mlxsw_sp_port->security == enable) 461 return 0; 462 463 mlxsw_reg_spfsr_pack(spfsr_pl, mlxsw_sp_port->local_port, enable); 464 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spfsr), spfsr_pl); 465 if (err) 466 return err; 467 468 mlxsw_sp_port->security = enable; 469 return 0; 470 } 471 472 int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type) 473 { 474 switch (ethtype) { 475 case ETH_P_8021Q: 476 *p_sver_type = 0; 477 break; 478 case ETH_P_8021AD: 479 *p_sver_type = 1; 480 break; 481 default: 482 return -EINVAL; 483 } 484 485 return 0; 486 } 487 488 int mlxsw_sp_port_egress_ethtype_set(struct mlxsw_sp_port *mlxsw_sp_port, 489 u16 ethtype) 490 { 491 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 492 char spevet_pl[MLXSW_REG_SPEVET_LEN]; 493 u8 sver_type; 494 int err; 495 496 err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type); 497 if (err) 498 return err; 499 500 mlxsw_reg_spevet_pack(spevet_pl, mlxsw_sp_port->local_port, sver_type); 501 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spevet), spevet_pl); 502 } 503 504 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 505 u16 vid, u16 ethtype) 506 { 507 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 508 char spvid_pl[MLXSW_REG_SPVID_LEN]; 509 u8 sver_type; 510 int err; 511 512 err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type); 513 if (err) 514 return err; 515 516 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid, 517 sver_type); 518 519 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 520 } 521 522 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 523 bool allow) 524 { 525 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 526 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 527 528 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 529 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 530 } 531 532 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 533 u16 ethtype) 534 { 535 int err; 536 537 if (!vid) { 538 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 539 if (err) 540 return err; 541 } else { 542 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid, ethtype); 543 if (err) 544 return err; 545 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 546 if (err) 547 goto err_port_allow_untagged_set; 548 } 549 550 mlxsw_sp_port->pvid = vid; 551 return 0; 552 553 err_port_allow_untagged_set: 554 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid, ethtype); 555 return err; 556 } 557 558 static int 559 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 560 { 561 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 562 char sspr_pl[MLXSW_REG_SSPR_LEN]; 563 564 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 565 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 566 } 567 568 static int 569 mlxsw_sp_port_module_info_parse(struct mlxsw_sp *mlxsw_sp, 570 u16 local_port, char *pmlp_pl, 571 struct mlxsw_sp_port_mapping *port_mapping) 572 { 573 bool separate_rxtx; 574 u8 first_lane; 575 u8 slot_index; 576 u8 module; 577 u8 width; 578 int i; 579 580 module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 581 slot_index = mlxsw_reg_pmlp_slot_index_get(pmlp_pl, 0); 582 width = mlxsw_reg_pmlp_width_get(pmlp_pl); 583 separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl); 584 first_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 585 586 if (width && !is_power_of_2(width)) { 587 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n", 588 local_port); 589 return -EINVAL; 590 } 591 592 for (i = 0; i < width; i++) { 593 if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) { 594 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n", 595 local_port); 596 return -EINVAL; 597 } 598 if (mlxsw_reg_pmlp_slot_index_get(pmlp_pl, i) != slot_index) { 599 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple slot indexes\n", 600 local_port); 601 return -EINVAL; 602 } 603 if (separate_rxtx && 604 mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != 605 mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) { 606 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n", 607 local_port); 608 return -EINVAL; 609 } 610 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i + first_lane) { 611 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n", 612 local_port); 613 return -EINVAL; 614 } 615 } 616 617 port_mapping->module = module; 618 port_mapping->slot_index = slot_index; 619 port_mapping->width = width; 620 port_mapping->module_width = width; 621 port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 622 return 0; 623 } 624 625 static int 626 mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port, 627 struct mlxsw_sp_port_mapping *port_mapping) 628 { 629 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 630 int err; 631 632 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 633 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 634 if (err) 635 return err; 636 return mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port, 637 pmlp_pl, port_mapping); 638 } 639 640 static int 641 mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port, 642 const struct mlxsw_sp_port_mapping *port_mapping) 643 { 644 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 645 int i, err; 646 647 mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->slot_index, 648 port_mapping->module); 649 650 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 651 mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width); 652 for (i = 0; i < port_mapping->width; i++) { 653 mlxsw_reg_pmlp_slot_index_set(pmlp_pl, i, 654 port_mapping->slot_index); 655 mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module); 656 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */ 657 } 658 659 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 660 if (err) 661 goto err_pmlp_write; 662 return 0; 663 664 err_pmlp_write: 665 mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->slot_index, 666 port_mapping->module); 667 return err; 668 } 669 670 static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u16 local_port, 671 u8 slot_index, u8 module) 672 { 673 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 674 675 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 676 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 677 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 678 mlxsw_env_module_port_unmap(mlxsw_sp->core, slot_index, module); 679 } 680 681 static int mlxsw_sp_port_open(struct net_device *dev) 682 { 683 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 684 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 685 int err; 686 687 err = mlxsw_env_module_port_up(mlxsw_sp->core, 688 mlxsw_sp_port->mapping.slot_index, 689 mlxsw_sp_port->mapping.module); 690 if (err) 691 return err; 692 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 693 if (err) 694 goto err_port_admin_status_set; 695 netif_start_queue(dev); 696 return 0; 697 698 err_port_admin_status_set: 699 mlxsw_env_module_port_down(mlxsw_sp->core, 700 mlxsw_sp_port->mapping.slot_index, 701 mlxsw_sp_port->mapping.module); 702 return err; 703 } 704 705 static int mlxsw_sp_port_stop(struct net_device *dev) 706 { 707 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 708 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 709 710 netif_stop_queue(dev); 711 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 712 mlxsw_env_module_port_down(mlxsw_sp->core, 713 mlxsw_sp_port->mapping.slot_index, 714 mlxsw_sp_port->mapping.module); 715 return 0; 716 } 717 718 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 719 struct net_device *dev) 720 { 721 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 722 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 723 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 724 const struct mlxsw_tx_info tx_info = { 725 .local_port = mlxsw_sp_port->local_port, 726 .is_emad = false, 727 }; 728 u64 len; 729 int err; 730 731 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); 732 733 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 734 return NETDEV_TX_BUSY; 735 736 if (eth_skb_pad(skb)) { 737 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 738 return NETDEV_TX_OK; 739 } 740 741 err = mlxsw_sp_txhdr_handle(mlxsw_sp->core, mlxsw_sp_port, skb, 742 &tx_info); 743 if (err) 744 return NETDEV_TX_OK; 745 746 /* TX header is consumed by HW on the way so we shouldn't count its 747 * bytes as being sent. 748 */ 749 len = skb->len - MLXSW_TXHDR_LEN; 750 751 /* Due to a race we might fail here because of a full queue. In that 752 * unlikely case we simply drop the packet. 753 */ 754 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 755 756 if (!err) { 757 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 758 u64_stats_update_begin(&pcpu_stats->syncp); 759 pcpu_stats->tx_packets++; 760 pcpu_stats->tx_bytes += len; 761 u64_stats_update_end(&pcpu_stats->syncp); 762 } else { 763 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 764 dev_kfree_skb_any(skb); 765 } 766 return NETDEV_TX_OK; 767 } 768 769 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 770 { 771 } 772 773 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 774 { 775 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 776 struct sockaddr *addr = p; 777 int err; 778 779 if (!is_valid_ether_addr(addr->sa_data)) 780 return -EADDRNOTAVAIL; 781 782 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 783 if (err) 784 return err; 785 eth_hw_addr_set(dev, addr->sa_data); 786 return 0; 787 } 788 789 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 790 { 791 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 792 struct mlxsw_sp_hdroom orig_hdroom; 793 struct mlxsw_sp_hdroom hdroom; 794 int err; 795 796 orig_hdroom = *mlxsw_sp_port->hdroom; 797 798 hdroom = orig_hdroom; 799 hdroom.mtu = mtu; 800 mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom); 801 802 err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom); 803 if (err) { 804 netdev_err(dev, "Failed to configure port's headroom\n"); 805 return err; 806 } 807 808 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 809 if (err) 810 goto err_port_mtu_set; 811 WRITE_ONCE(dev->mtu, mtu); 812 return 0; 813 814 err_port_mtu_set: 815 mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom); 816 return err; 817 } 818 819 static int 820 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 821 struct rtnl_link_stats64 *stats) 822 { 823 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 824 struct mlxsw_sp_port_pcpu_stats *p; 825 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 826 u32 tx_dropped = 0; 827 unsigned int start; 828 int i; 829 830 for_each_possible_cpu(i) { 831 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 832 do { 833 start = u64_stats_fetch_begin(&p->syncp); 834 rx_packets = p->rx_packets; 835 rx_bytes = p->rx_bytes; 836 tx_packets = p->tx_packets; 837 tx_bytes = p->tx_bytes; 838 } while (u64_stats_fetch_retry(&p->syncp, start)); 839 840 stats->rx_packets += rx_packets; 841 stats->rx_bytes += rx_bytes; 842 stats->tx_packets += tx_packets; 843 stats->tx_bytes += tx_bytes; 844 /* tx_dropped is u32, updated without syncp protection. */ 845 tx_dropped += p->tx_dropped; 846 } 847 stats->tx_dropped = tx_dropped; 848 return 0; 849 } 850 851 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 852 { 853 switch (attr_id) { 854 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 855 return true; 856 } 857 858 return false; 859 } 860 861 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 862 void *sp) 863 { 864 switch (attr_id) { 865 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 866 return mlxsw_sp_port_get_sw_stats64(dev, sp); 867 } 868 869 return -EINVAL; 870 } 871 872 int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 873 int prio, char *ppcnt_pl) 874 { 875 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 876 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 877 878 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 879 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 880 } 881 882 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 883 struct rtnl_link_stats64 *stats) 884 { 885 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 886 int err; 887 888 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 889 0, ppcnt_pl); 890 if (err) 891 goto out; 892 893 stats->tx_packets = 894 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 895 stats->rx_packets = 896 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 897 stats->tx_bytes = 898 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 899 stats->rx_bytes = 900 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 901 stats->multicast = 902 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 903 904 stats->rx_crc_errors = 905 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 906 stats->rx_frame_errors = 907 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 908 909 stats->rx_length_errors = ( 910 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 911 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 912 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 913 914 stats->rx_errors = (stats->rx_crc_errors + 915 stats->rx_frame_errors + stats->rx_length_errors); 916 917 out: 918 return err; 919 } 920 921 static void 922 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 923 struct mlxsw_sp_port_xstats *xstats) 924 { 925 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 926 int err, i; 927 928 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 929 ppcnt_pl); 930 if (!err) 931 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 932 933 for (i = 0; i < TC_MAX_QUEUE; i++) { 934 err = mlxsw_sp_port_get_stats_raw(dev, 935 MLXSW_REG_PPCNT_TC_CONG_CNT, 936 i, ppcnt_pl); 937 if (err) 938 goto tc_cnt; 939 940 xstats->wred_drop[i] = 941 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 942 xstats->tc_ecn[i] = mlxsw_reg_ppcnt_ecn_marked_tc_get(ppcnt_pl); 943 944 tc_cnt: 945 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 946 i, ppcnt_pl); 947 if (err) 948 continue; 949 950 xstats->backlog[i] = 951 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 952 xstats->tail_drop[i] = 953 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 954 } 955 956 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 957 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, 958 i, ppcnt_pl); 959 if (err) 960 continue; 961 962 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); 963 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); 964 } 965 } 966 967 static void update_stats_cache(struct work_struct *work) 968 { 969 struct mlxsw_sp_port *mlxsw_sp_port = 970 container_of(work, struct mlxsw_sp_port, 971 periodic_hw_stats.update_dw.work); 972 973 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 974 /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as 975 * necessary when port goes down. 976 */ 977 goto out; 978 979 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 980 &mlxsw_sp_port->periodic_hw_stats.stats); 981 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 982 &mlxsw_sp_port->periodic_hw_stats.xstats); 983 984 out: 985 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 986 MLXSW_HW_STATS_UPDATE_TIME); 987 } 988 989 /* Return the stats from a cache that is updated periodically, 990 * as this function might get called in an atomic context. 991 */ 992 static void 993 mlxsw_sp_port_get_stats64(struct net_device *dev, 994 struct rtnl_link_stats64 *stats) 995 { 996 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 997 998 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 999 } 1000 1001 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 1002 u16 vid_begin, u16 vid_end, 1003 bool is_member, bool untagged) 1004 { 1005 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1006 char *spvm_pl; 1007 int err; 1008 1009 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 1010 if (!spvm_pl) 1011 return -ENOMEM; 1012 1013 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 1014 vid_end, is_member, untagged); 1015 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 1016 kfree(spvm_pl); 1017 return err; 1018 } 1019 1020 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 1021 u16 vid_end, bool is_member, bool untagged) 1022 { 1023 u16 vid, vid_e; 1024 int err; 1025 1026 for (vid = vid_begin; vid <= vid_end; 1027 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 1028 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 1029 vid_end); 1030 1031 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 1032 is_member, untagged); 1033 if (err) 1034 return err; 1035 } 1036 1037 return 0; 1038 } 1039 1040 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port, 1041 bool flush_default) 1042 { 1043 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 1044 1045 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 1046 &mlxsw_sp_port->vlans_list, list) { 1047 if (!flush_default && 1048 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID) 1049 continue; 1050 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1051 } 1052 } 1053 1054 static void 1055 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1056 { 1057 if (mlxsw_sp_port_vlan->bridge_port) 1058 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1059 else if (mlxsw_sp_port_vlan->fid) 1060 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 1061 } 1062 1063 struct mlxsw_sp_port_vlan * 1064 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1065 { 1066 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1067 bool untagged = vid == MLXSW_SP_DEFAULT_VID; 1068 int err; 1069 1070 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1071 if (mlxsw_sp_port_vlan) 1072 return ERR_PTR(-EEXIST); 1073 1074 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 1075 if (err) 1076 return ERR_PTR(err); 1077 1078 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 1079 if (!mlxsw_sp_port_vlan) { 1080 err = -ENOMEM; 1081 goto err_port_vlan_alloc; 1082 } 1083 1084 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1085 mlxsw_sp_port_vlan->vid = vid; 1086 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1087 1088 return mlxsw_sp_port_vlan; 1089 1090 err_port_vlan_alloc: 1091 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1092 return ERR_PTR(err); 1093 } 1094 1095 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1096 { 1097 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1098 u16 vid = mlxsw_sp_port_vlan->vid; 1099 1100 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan); 1101 list_del(&mlxsw_sp_port_vlan->list); 1102 kfree(mlxsw_sp_port_vlan); 1103 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1104 } 1105 1106 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1107 __be16 __always_unused proto, u16 vid) 1108 { 1109 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1110 1111 /* VLAN 0 is added to HW filter when device goes up, but it is 1112 * reserved in our case, so simply return. 1113 */ 1114 if (!vid) 1115 return 0; 1116 1117 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid)); 1118 } 1119 1120 int mlxsw_sp_port_kill_vid(struct net_device *dev, 1121 __be16 __always_unused proto, u16 vid) 1122 { 1123 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1124 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1125 1126 /* VLAN 0 is removed from HW filter when device goes down, but 1127 * it is reserved in our case, so simply return. 1128 */ 1129 if (!vid) 1130 return 0; 1131 1132 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1133 if (!mlxsw_sp_port_vlan) 1134 return 0; 1135 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1136 1137 return 0; 1138 } 1139 1140 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 1141 struct flow_block_offload *f) 1142 { 1143 switch (f->binder_type) { 1144 case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS: 1145 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true); 1146 case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS: 1147 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false); 1148 case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP: 1149 return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f); 1150 case FLOW_BLOCK_BINDER_TYPE_RED_MARK: 1151 return mlxsw_sp_setup_tc_block_qevent_mark(mlxsw_sp_port, f); 1152 default: 1153 return -EOPNOTSUPP; 1154 } 1155 } 1156 1157 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1158 void *type_data) 1159 { 1160 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1161 1162 switch (type) { 1163 case TC_SETUP_BLOCK: 1164 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1165 case TC_SETUP_QDISC_RED: 1166 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1167 case TC_SETUP_QDISC_PRIO: 1168 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); 1169 case TC_SETUP_QDISC_ETS: 1170 return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data); 1171 case TC_SETUP_QDISC_TBF: 1172 return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data); 1173 case TC_SETUP_QDISC_FIFO: 1174 return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data); 1175 default: 1176 return -EOPNOTSUPP; 1177 } 1178 } 1179 1180 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) 1181 { 1182 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1183 1184 if (!enable) { 1185 if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) || 1186 mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) { 1187 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 1188 return -EINVAL; 1189 } 1190 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block); 1191 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block); 1192 } else { 1193 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block); 1194 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block); 1195 } 1196 return 0; 1197 } 1198 1199 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable) 1200 { 1201 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1202 char pplr_pl[MLXSW_REG_PPLR_LEN]; 1203 int err; 1204 1205 if (netif_running(dev)) 1206 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1207 1208 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable); 1209 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr), 1210 pplr_pl); 1211 1212 if (netif_running(dev)) 1213 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1214 1215 return err; 1216 } 1217 1218 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); 1219 1220 static int mlxsw_sp_handle_feature(struct net_device *dev, 1221 netdev_features_t wanted_features, 1222 netdev_features_t feature, 1223 mlxsw_sp_feature_handler feature_handler) 1224 { 1225 netdev_features_t changes = wanted_features ^ dev->features; 1226 bool enable = !!(wanted_features & feature); 1227 int err; 1228 1229 if (!(changes & feature)) 1230 return 0; 1231 1232 err = feature_handler(dev, enable); 1233 if (err) { 1234 netdev_err(dev, "%s feature %pNF failed, err %d\n", 1235 enable ? "Enable" : "Disable", &feature, err); 1236 return err; 1237 } 1238 1239 if (enable) 1240 dev->features |= feature; 1241 else 1242 dev->features &= ~feature; 1243 1244 return 0; 1245 } 1246 static int mlxsw_sp_set_features(struct net_device *dev, 1247 netdev_features_t features) 1248 { 1249 netdev_features_t oper_features = dev->features; 1250 int err = 0; 1251 1252 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, 1253 mlxsw_sp_feature_hw_tc); 1254 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK, 1255 mlxsw_sp_feature_loopback); 1256 1257 if (err) { 1258 dev->features = oper_features; 1259 return -EINVAL; 1260 } 1261 1262 return 0; 1263 } 1264 1265 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, 1266 struct ifreq *ifr) 1267 { 1268 struct hwtstamp_config config; 1269 int err; 1270 1271 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 1272 return -EFAULT; 1273 1274 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, 1275 &config); 1276 if (err) 1277 return err; 1278 1279 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1280 return -EFAULT; 1281 1282 return 0; 1283 } 1284 1285 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port, 1286 struct ifreq *ifr) 1287 { 1288 struct hwtstamp_config config; 1289 int err; 1290 1291 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port, 1292 &config); 1293 if (err) 1294 return err; 1295 1296 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1297 return -EFAULT; 1298 1299 return 0; 1300 } 1301 1302 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port) 1303 { 1304 struct hwtstamp_config config = {0}; 1305 1306 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config); 1307 } 1308 1309 static int 1310 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1311 { 1312 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1313 1314 switch (cmd) { 1315 case SIOCSHWTSTAMP: 1316 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr); 1317 case SIOCGHWTSTAMP: 1318 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr); 1319 default: 1320 return -EOPNOTSUPP; 1321 } 1322 } 1323 1324 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1325 .ndo_open = mlxsw_sp_port_open, 1326 .ndo_stop = mlxsw_sp_port_stop, 1327 .ndo_start_xmit = mlxsw_sp_port_xmit, 1328 .ndo_setup_tc = mlxsw_sp_setup_tc, 1329 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1330 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1331 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1332 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1333 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1334 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1335 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1336 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1337 .ndo_set_features = mlxsw_sp_set_features, 1338 .ndo_eth_ioctl = mlxsw_sp_port_ioctl, 1339 }; 1340 1341 static int 1342 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port) 1343 { 1344 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1345 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; 1346 const struct mlxsw_sp_port_type_speed_ops *ops; 1347 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1348 u32 eth_proto_cap_masked; 1349 int err; 1350 1351 ops = mlxsw_sp->port_type_speed_ops; 1352 1353 /* Set advertised speeds to speeds supported by both the driver 1354 * and the device. 1355 */ 1356 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 1357 0, false); 1358 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1359 if (err) 1360 return err; 1361 1362 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, 1363 ð_proto_admin, ð_proto_oper); 1364 eth_proto_cap_masked = ops->ptys_proto_cap_masked_get(eth_proto_cap); 1365 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 1366 eth_proto_cap_masked, 1367 mlxsw_sp_port->link.autoneg); 1368 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1369 } 1370 1371 int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed) 1372 { 1373 const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops; 1374 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1375 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1376 u32 eth_proto_oper; 1377 int err; 1378 1379 port_type_speed_ops = mlxsw_sp->port_type_speed_ops; 1380 port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, 1381 mlxsw_sp_port->local_port, 0, 1382 false); 1383 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1384 if (err) 1385 return err; 1386 port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL, 1387 ð_proto_oper); 1388 *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper); 1389 return 0; 1390 } 1391 1392 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 1393 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 1394 bool dwrr, u8 dwrr_weight) 1395 { 1396 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1397 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1398 1399 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1400 next_index); 1401 mlxsw_reg_qeec_de_set(qeec_pl, true); 1402 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 1403 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 1404 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1405 } 1406 1407 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 1408 enum mlxsw_reg_qeec_hr hr, u8 index, 1409 u8 next_index, u32 maxrate, u8 burst_size) 1410 { 1411 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1412 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1413 1414 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1415 next_index); 1416 mlxsw_reg_qeec_mase_set(qeec_pl, true); 1417 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 1418 mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size); 1419 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1420 } 1421 1422 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port, 1423 enum mlxsw_reg_qeec_hr hr, u8 index, 1424 u8 next_index, u32 minrate) 1425 { 1426 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1427 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1428 1429 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1430 next_index); 1431 mlxsw_reg_qeec_mise_set(qeec_pl, true); 1432 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate); 1433 1434 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1435 } 1436 1437 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 1438 u8 switch_prio, u8 tclass) 1439 { 1440 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1441 char qtct_pl[MLXSW_REG_QTCT_LEN]; 1442 1443 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 1444 tclass); 1445 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 1446 } 1447 1448 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 1449 { 1450 int err, i; 1451 1452 /* Setup the elements hierarcy, so that each TC is linked to 1453 * one subgroup, which are all member in the same group. 1454 */ 1455 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1456 MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0); 1457 if (err) 1458 return err; 1459 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1460 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1461 MLXSW_REG_QEEC_HR_SUBGROUP, i, 1462 0, false, 0); 1463 if (err) 1464 return err; 1465 } 1466 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1467 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1468 MLXSW_REG_QEEC_HR_TC, i, i, 1469 false, 0); 1470 if (err) 1471 return err; 1472 1473 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1474 MLXSW_REG_QEEC_HR_TC, 1475 i + 8, i, 1476 true, 100); 1477 if (err) 1478 return err; 1479 } 1480 1481 /* Make sure the max shaper is disabled in all hierarchies that support 1482 * it. Note that this disables ptps (PTP shaper), but that is intended 1483 * for the initial configuration. 1484 */ 1485 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1486 MLXSW_REG_QEEC_HR_PORT, 0, 0, 1487 MLXSW_REG_QEEC_MAS_DIS, 0); 1488 if (err) 1489 return err; 1490 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1491 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1492 MLXSW_REG_QEEC_HR_SUBGROUP, 1493 i, 0, 1494 MLXSW_REG_QEEC_MAS_DIS, 0); 1495 if (err) 1496 return err; 1497 } 1498 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1499 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1500 MLXSW_REG_QEEC_HR_TC, 1501 i, i, 1502 MLXSW_REG_QEEC_MAS_DIS, 0); 1503 if (err) 1504 return err; 1505 1506 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1507 MLXSW_REG_QEEC_HR_TC, 1508 i + 8, i, 1509 MLXSW_REG_QEEC_MAS_DIS, 0); 1510 if (err) 1511 return err; 1512 } 1513 1514 /* Configure the min shaper for multicast TCs. */ 1515 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1516 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port, 1517 MLXSW_REG_QEEC_HR_TC, 1518 i + 8, i, 1519 MLXSW_REG_QEEC_MIS_MIN); 1520 if (err) 1521 return err; 1522 } 1523 1524 /* Map all priorities to traffic class 0. */ 1525 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1526 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 1527 if (err) 1528 return err; 1529 } 1530 1531 return 0; 1532 } 1533 1534 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 1535 bool enable) 1536 { 1537 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1538 char qtctm_pl[MLXSW_REG_QTCTM_LEN]; 1539 1540 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable); 1541 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl); 1542 } 1543 1544 static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port) 1545 { 1546 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1547 u8 slot_index = mlxsw_sp_port->mapping.slot_index; 1548 u8 module = mlxsw_sp_port->mapping.module; 1549 u64 overheat_counter; 1550 int err; 1551 1552 err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, slot_index, 1553 module, &overheat_counter); 1554 if (err) 1555 return err; 1556 1557 mlxsw_sp_port->module_overheat_initial_val = overheat_counter; 1558 return 0; 1559 } 1560 1561 int 1562 mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port, 1563 bool is_8021ad_tagged, 1564 bool is_8021q_tagged) 1565 { 1566 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1567 char spvc_pl[MLXSW_REG_SPVC_LEN]; 1568 1569 mlxsw_reg_spvc_pack(spvc_pl, mlxsw_sp_port->local_port, 1570 is_8021ad_tagged, is_8021q_tagged); 1571 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvc), spvc_pl); 1572 } 1573 1574 static int mlxsw_sp_port_label_info_get(struct mlxsw_sp *mlxsw_sp, 1575 u16 local_port, u8 *port_number, 1576 u8 *split_port_subnumber, 1577 u8 *slot_index) 1578 { 1579 char pllp_pl[MLXSW_REG_PLLP_LEN]; 1580 int err; 1581 1582 mlxsw_reg_pllp_pack(pllp_pl, local_port); 1583 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pllp), pllp_pl); 1584 if (err) 1585 return err; 1586 mlxsw_reg_pllp_unpack(pllp_pl, port_number, 1587 split_port_subnumber, slot_index); 1588 return 0; 1589 } 1590 1591 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port, 1592 bool split, 1593 struct mlxsw_sp_port_mapping *port_mapping) 1594 { 1595 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1596 struct mlxsw_sp_port *mlxsw_sp_port; 1597 u32 lanes = port_mapping->width; 1598 u8 split_port_subnumber; 1599 struct net_device *dev; 1600 u8 port_number; 1601 u8 slot_index; 1602 bool splittable; 1603 int err; 1604 1605 err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, port_mapping); 1606 if (err) { 1607 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 1608 local_port); 1609 return err; 1610 } 1611 1612 err = mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 0); 1613 if (err) { 1614 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 1615 local_port); 1616 goto err_port_swid_set; 1617 } 1618 1619 err = mlxsw_sp_port_label_info_get(mlxsw_sp, local_port, &port_number, 1620 &split_port_subnumber, &slot_index); 1621 if (err) { 1622 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get port label information\n", 1623 local_port); 1624 goto err_port_label_info_get; 1625 } 1626 1627 splittable = lanes > 1 && !split; 1628 err = mlxsw_core_port_init(mlxsw_sp->core, local_port, slot_index, 1629 port_number, split, split_port_subnumber, 1630 splittable, lanes, mlxsw_sp->base_mac, 1631 sizeof(mlxsw_sp->base_mac)); 1632 if (err) { 1633 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 1634 local_port); 1635 goto err_core_port_init; 1636 } 1637 1638 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 1639 if (!dev) { 1640 err = -ENOMEM; 1641 goto err_alloc_etherdev; 1642 } 1643 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 1644 dev_net_set(dev, mlxsw_sp_net(mlxsw_sp)); 1645 mlxsw_sp_port = netdev_priv(dev); 1646 mlxsw_core_port_netdev_link(mlxsw_sp->core, local_port, 1647 mlxsw_sp_port, dev); 1648 mlxsw_sp_port->dev = dev; 1649 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 1650 mlxsw_sp_port->local_port = local_port; 1651 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID; 1652 mlxsw_sp_port->split = split; 1653 mlxsw_sp_port->mapping = *port_mapping; 1654 mlxsw_sp_port->link.autoneg = 1; 1655 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 1656 1657 mlxsw_sp_port->pcpu_stats = 1658 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 1659 if (!mlxsw_sp_port->pcpu_stats) { 1660 err = -ENOMEM; 1661 goto err_alloc_stats; 1662 } 1663 1664 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1665 &update_stats_cache); 1666 1667 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 1668 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 1669 1670 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 1671 if (err) { 1672 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 1673 mlxsw_sp_port->local_port); 1674 goto err_dev_addr_init; 1675 } 1676 1677 netif_carrier_off(dev); 1678 1679 dev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_FILTER | 1680 NETIF_F_HW_TC; 1681 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK; 1682 dev->lltx = true; 1683 dev->netns_local = true; 1684 1685 dev->min_mtu = ETH_MIN_MTU; 1686 dev->max_mtu = MLXSW_PORT_MAX_MTU - MLXSW_PORT_ETH_FRAME_HDR; 1687 1688 /* Each packet needs to have a Tx header (metadata) on top all other 1689 * headers. 1690 */ 1691 dev->needed_headroom = MLXSW_TXHDR_LEN; 1692 1693 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 1694 if (err) { 1695 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 1696 mlxsw_sp_port->local_port); 1697 goto err_port_system_port_mapping_set; 1698 } 1699 1700 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port); 1701 if (err) { 1702 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 1703 mlxsw_sp_port->local_port); 1704 goto err_port_speed_by_width_set; 1705 } 1706 1707 err = mlxsw_sp->port_type_speed_ops->ptys_max_speed(mlxsw_sp_port, 1708 &mlxsw_sp_port->max_speed); 1709 if (err) { 1710 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum speed\n", 1711 mlxsw_sp_port->local_port); 1712 goto err_max_speed_get; 1713 } 1714 1715 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 1716 if (err) { 1717 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 1718 mlxsw_sp_port->local_port); 1719 goto err_port_mtu_set; 1720 } 1721 1722 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1723 if (err) 1724 goto err_port_admin_status_set; 1725 1726 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 1727 if (err) { 1728 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 1729 mlxsw_sp_port->local_port); 1730 goto err_port_buffers_init; 1731 } 1732 1733 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 1734 if (err) { 1735 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 1736 mlxsw_sp_port->local_port); 1737 goto err_port_ets_init; 1738 } 1739 1740 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true); 1741 if (err) { 1742 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n", 1743 mlxsw_sp_port->local_port); 1744 goto err_port_tc_mc_mode; 1745 } 1746 1747 /* ETS and buffers must be initialized before DCB. */ 1748 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 1749 if (err) { 1750 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 1751 mlxsw_sp_port->local_port); 1752 goto err_port_dcb_init; 1753 } 1754 1755 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 1756 if (err) { 1757 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 1758 mlxsw_sp_port->local_port); 1759 goto err_port_fids_init; 1760 } 1761 1762 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); 1763 if (err) { 1764 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", 1765 mlxsw_sp_port->local_port); 1766 goto err_port_qdiscs_init; 1767 } 1768 1769 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false, 1770 false); 1771 if (err) { 1772 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n", 1773 mlxsw_sp_port->local_port); 1774 goto err_port_vlan_clear; 1775 } 1776 1777 err = mlxsw_sp_port_nve_init(mlxsw_sp_port); 1778 if (err) { 1779 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n", 1780 mlxsw_sp_port->local_port); 1781 goto err_port_nve_init; 1782 } 1783 1784 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID, 1785 ETH_P_8021Q); 1786 if (err) { 1787 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n", 1788 mlxsw_sp_port->local_port); 1789 goto err_port_pvid_set; 1790 } 1791 1792 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, 1793 MLXSW_SP_DEFAULT_VID); 1794 if (IS_ERR(mlxsw_sp_port_vlan)) { 1795 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 1796 mlxsw_sp_port->local_port); 1797 err = PTR_ERR(mlxsw_sp_port_vlan); 1798 goto err_port_vlan_create; 1799 } 1800 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan; 1801 1802 /* Set SPVC.et0=true and SPVC.et1=false to make the local port to treat 1803 * only packets with 802.1q header as tagged packets. 1804 */ 1805 err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true); 1806 if (err) { 1807 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set default VLAN classification\n", 1808 local_port); 1809 goto err_port_vlan_classification_set; 1810 } 1811 1812 INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw, 1813 mlxsw_sp->ptp_ops->shaper_work); 1814 1815 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 1816 1817 err = mlxsw_sp_port_overheat_init_val_set(mlxsw_sp_port); 1818 if (err) { 1819 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set overheat initial value\n", 1820 mlxsw_sp_port->local_port); 1821 goto err_port_overheat_init_val_set; 1822 } 1823 1824 err = register_netdev(dev); 1825 if (err) { 1826 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 1827 mlxsw_sp_port->local_port); 1828 goto err_register_netdev; 1829 } 1830 1831 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 1832 return 0; 1833 1834 err_register_netdev: 1835 err_port_overheat_init_val_set: 1836 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true); 1837 err_port_vlan_classification_set: 1838 mlxsw_sp->ports[local_port] = NULL; 1839 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1840 err_port_vlan_create: 1841 err_port_pvid_set: 1842 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 1843 err_port_nve_init: 1844 err_port_vlan_clear: 1845 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 1846 err_port_qdiscs_init: 1847 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 1848 err_port_fids_init: 1849 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 1850 err_port_dcb_init: 1851 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 1852 err_port_tc_mc_mode: 1853 err_port_ets_init: 1854 mlxsw_sp_port_buffers_fini(mlxsw_sp_port); 1855 err_port_buffers_init: 1856 err_port_admin_status_set: 1857 err_port_mtu_set: 1858 err_max_speed_get: 1859 err_port_speed_by_width_set: 1860 err_port_system_port_mapping_set: 1861 err_dev_addr_init: 1862 free_percpu(mlxsw_sp_port->pcpu_stats); 1863 err_alloc_stats: 1864 free_netdev(dev); 1865 err_alloc_etherdev: 1866 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 1867 err_core_port_init: 1868 err_port_label_info_get: 1869 mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 1870 MLXSW_PORT_SWID_DISABLED_PORT); 1871 err_port_swid_set: 1872 mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, 1873 port_mapping->slot_index, 1874 port_mapping->module); 1875 return err; 1876 } 1877 1878 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port) 1879 { 1880 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1881 u8 slot_index = mlxsw_sp_port->mapping.slot_index; 1882 u8 module = mlxsw_sp_port->mapping.module; 1883 1884 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 1885 cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw); 1886 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 1887 mlxsw_sp_port_ptp_clear(mlxsw_sp_port); 1888 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true); 1889 mlxsw_sp->ports[local_port] = NULL; 1890 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true); 1891 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 1892 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 1893 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 1894 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 1895 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 1896 mlxsw_sp_port_buffers_fini(mlxsw_sp_port); 1897 free_percpu(mlxsw_sp_port->pcpu_stats); 1898 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 1899 free_netdev(mlxsw_sp_port->dev); 1900 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 1901 mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 1902 MLXSW_PORT_SWID_DISABLED_PORT); 1903 mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, slot_index, module); 1904 } 1905 1906 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp) 1907 { 1908 struct mlxsw_sp_port *mlxsw_sp_port; 1909 int err; 1910 1911 mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL); 1912 if (!mlxsw_sp_port) 1913 return -ENOMEM; 1914 1915 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 1916 mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT; 1917 1918 err = mlxsw_core_cpu_port_init(mlxsw_sp->core, 1919 mlxsw_sp_port, 1920 mlxsw_sp->base_mac, 1921 sizeof(mlxsw_sp->base_mac)); 1922 if (err) { 1923 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n"); 1924 goto err_core_cpu_port_init; 1925 } 1926 1927 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port; 1928 return 0; 1929 1930 err_core_cpu_port_init: 1931 kfree(mlxsw_sp_port); 1932 return err; 1933 } 1934 1935 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp) 1936 { 1937 struct mlxsw_sp_port *mlxsw_sp_port = 1938 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT]; 1939 1940 mlxsw_core_cpu_port_fini(mlxsw_sp->core); 1941 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL; 1942 kfree(mlxsw_sp_port); 1943 } 1944 1945 static bool mlxsw_sp_local_port_valid(u16 local_port) 1946 { 1947 return local_port != MLXSW_PORT_CPU_PORT; 1948 } 1949 1950 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u16 local_port) 1951 { 1952 if (!mlxsw_sp_local_port_valid(local_port)) 1953 return false; 1954 return mlxsw_sp->ports[local_port] != NULL; 1955 } 1956 1957 static int mlxsw_sp_port_mapping_event_set(struct mlxsw_sp *mlxsw_sp, 1958 u16 local_port, bool enable) 1959 { 1960 char pmecr_pl[MLXSW_REG_PMECR_LEN]; 1961 1962 mlxsw_reg_pmecr_pack(pmecr_pl, local_port, 1963 enable ? MLXSW_REG_PMECR_E_GENERATE_EVENT : 1964 MLXSW_REG_PMECR_E_DO_NOT_GENERATE_EVENT); 1965 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmecr), pmecr_pl); 1966 } 1967 1968 struct mlxsw_sp_port_mapping_event { 1969 struct list_head list; 1970 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 1971 }; 1972 1973 static void mlxsw_sp_port_mapping_events_work(struct work_struct *work) 1974 { 1975 struct mlxsw_sp_port_mapping_event *event, *next_event; 1976 struct mlxsw_sp_port_mapping_events *events; 1977 struct mlxsw_sp_port_mapping port_mapping; 1978 struct mlxsw_sp *mlxsw_sp; 1979 struct devlink *devlink; 1980 LIST_HEAD(event_queue); 1981 u16 local_port; 1982 int err; 1983 1984 events = container_of(work, struct mlxsw_sp_port_mapping_events, work); 1985 mlxsw_sp = container_of(events, struct mlxsw_sp, port_mapping_events); 1986 devlink = priv_to_devlink(mlxsw_sp->core); 1987 1988 spin_lock_bh(&events->queue_lock); 1989 list_splice_init(&events->queue, &event_queue); 1990 spin_unlock_bh(&events->queue_lock); 1991 1992 list_for_each_entry_safe(event, next_event, &event_queue, list) { 1993 local_port = mlxsw_reg_pmlp_local_port_get(event->pmlp_pl); 1994 err = mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port, 1995 event->pmlp_pl, &port_mapping); 1996 if (err) 1997 goto out; 1998 1999 if (WARN_ON_ONCE(!port_mapping.width)) 2000 goto out; 2001 2002 devl_lock(devlink); 2003 2004 if (!mlxsw_sp_port_created(mlxsw_sp, local_port)) 2005 mlxsw_sp_port_create(mlxsw_sp, local_port, 2006 false, &port_mapping); 2007 else 2008 WARN_ON_ONCE(1); 2009 2010 devl_unlock(devlink); 2011 2012 mlxsw_sp->port_mapping[local_port] = port_mapping; 2013 2014 out: 2015 kfree(event); 2016 } 2017 } 2018 2019 static void 2020 mlxsw_sp_port_mapping_listener_func(const struct mlxsw_reg_info *reg, 2021 char *pmlp_pl, void *priv) 2022 { 2023 struct mlxsw_sp_port_mapping_events *events; 2024 struct mlxsw_sp_port_mapping_event *event; 2025 struct mlxsw_sp *mlxsw_sp = priv; 2026 u16 local_port; 2027 2028 local_port = mlxsw_reg_pmlp_local_port_get(pmlp_pl); 2029 if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port))) 2030 return; 2031 2032 events = &mlxsw_sp->port_mapping_events; 2033 event = kmalloc(sizeof(*event), GFP_ATOMIC); 2034 if (!event) 2035 return; 2036 memcpy(event->pmlp_pl, pmlp_pl, sizeof(event->pmlp_pl)); 2037 spin_lock(&events->queue_lock); 2038 list_add_tail(&event->list, &events->queue); 2039 spin_unlock(&events->queue_lock); 2040 mlxsw_core_schedule_work(&events->work); 2041 } 2042 2043 static void 2044 __mlxsw_sp_port_mapping_events_cancel(struct mlxsw_sp *mlxsw_sp) 2045 { 2046 struct mlxsw_sp_port_mapping_event *event, *next_event; 2047 struct mlxsw_sp_port_mapping_events *events; 2048 2049 events = &mlxsw_sp->port_mapping_events; 2050 2051 /* Caller needs to make sure that no new event is going to appear. */ 2052 cancel_work_sync(&events->work); 2053 list_for_each_entry_safe(event, next_event, &events->queue, list) { 2054 list_del(&event->list); 2055 kfree(event); 2056 } 2057 } 2058 2059 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 2060 { 2061 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 2062 int i; 2063 2064 for (i = 1; i < max_ports; i++) 2065 mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false); 2066 /* Make sure all scheduled events are processed */ 2067 __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp); 2068 2069 for (i = 1; i < max_ports; i++) 2070 if (mlxsw_sp_port_created(mlxsw_sp, i)) 2071 mlxsw_sp_port_remove(mlxsw_sp, i); 2072 mlxsw_sp_cpu_port_remove(mlxsw_sp); 2073 kfree(mlxsw_sp->ports); 2074 mlxsw_sp->ports = NULL; 2075 } 2076 2077 static void 2078 mlxsw_sp_ports_remove_selected(struct mlxsw_core *mlxsw_core, 2079 bool (*selector)(void *priv, u16 local_port), 2080 void *priv) 2081 { 2082 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2083 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_core); 2084 int i; 2085 2086 for (i = 1; i < max_ports; i++) 2087 if (mlxsw_sp_port_created(mlxsw_sp, i) && selector(priv, i)) 2088 mlxsw_sp_port_remove(mlxsw_sp, i); 2089 } 2090 2091 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 2092 { 2093 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 2094 struct mlxsw_sp_port_mapping_events *events; 2095 struct mlxsw_sp_port_mapping *port_mapping; 2096 size_t alloc_size; 2097 int i; 2098 int err; 2099 2100 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 2101 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 2102 if (!mlxsw_sp->ports) 2103 return -ENOMEM; 2104 2105 events = &mlxsw_sp->port_mapping_events; 2106 INIT_LIST_HEAD(&events->queue); 2107 spin_lock_init(&events->queue_lock); 2108 INIT_WORK(&events->work, mlxsw_sp_port_mapping_events_work); 2109 2110 for (i = 1; i < max_ports; i++) { 2111 err = mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, true); 2112 if (err) 2113 goto err_event_enable; 2114 } 2115 2116 err = mlxsw_sp_cpu_port_create(mlxsw_sp); 2117 if (err) 2118 goto err_cpu_port_create; 2119 2120 for (i = 1; i < max_ports; i++) { 2121 port_mapping = &mlxsw_sp->port_mapping[i]; 2122 if (!port_mapping->width) 2123 continue; 2124 err = mlxsw_sp_port_create(mlxsw_sp, i, false, port_mapping); 2125 if (err) 2126 goto err_port_create; 2127 } 2128 return 0; 2129 2130 err_port_create: 2131 for (i--; i >= 1; i--) 2132 if (mlxsw_sp_port_created(mlxsw_sp, i)) 2133 mlxsw_sp_port_remove(mlxsw_sp, i); 2134 i = max_ports; 2135 mlxsw_sp_cpu_port_remove(mlxsw_sp); 2136 err_cpu_port_create: 2137 err_event_enable: 2138 for (i--; i >= 1; i--) 2139 mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false); 2140 /* Make sure all scheduled events are processed */ 2141 __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp); 2142 kfree(mlxsw_sp->ports); 2143 mlxsw_sp->ports = NULL; 2144 return err; 2145 } 2146 2147 static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp) 2148 { 2149 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 2150 struct mlxsw_sp_port_mapping *port_mapping; 2151 int i; 2152 int err; 2153 2154 mlxsw_sp->port_mapping = kcalloc(max_ports, 2155 sizeof(struct mlxsw_sp_port_mapping), 2156 GFP_KERNEL); 2157 if (!mlxsw_sp->port_mapping) 2158 return -ENOMEM; 2159 2160 for (i = 1; i < max_ports; i++) { 2161 port_mapping = &mlxsw_sp->port_mapping[i]; 2162 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, port_mapping); 2163 if (err) 2164 goto err_port_module_info_get; 2165 } 2166 return 0; 2167 2168 err_port_module_info_get: 2169 kfree(mlxsw_sp->port_mapping); 2170 return err; 2171 } 2172 2173 static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp) 2174 { 2175 kfree(mlxsw_sp->port_mapping); 2176 } 2177 2178 static int 2179 mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, 2180 struct mlxsw_sp_port_mapping *port_mapping, 2181 unsigned int count, const char *pmtdb_pl) 2182 { 2183 struct mlxsw_sp_port_mapping split_port_mapping; 2184 int err, i; 2185 2186 split_port_mapping = *port_mapping; 2187 split_port_mapping.width /= count; 2188 for (i = 0; i < count; i++) { 2189 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2190 2191 if (!mlxsw_sp_local_port_valid(s_local_port)) 2192 continue; 2193 2194 err = mlxsw_sp_port_create(mlxsw_sp, s_local_port, 2195 true, &split_port_mapping); 2196 if (err) 2197 goto err_port_create; 2198 split_port_mapping.lane += split_port_mapping.width; 2199 } 2200 2201 return 0; 2202 2203 err_port_create: 2204 for (i--; i >= 0; i--) { 2205 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2206 2207 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) 2208 mlxsw_sp_port_remove(mlxsw_sp, s_local_port); 2209 } 2210 return err; 2211 } 2212 2213 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 2214 unsigned int count, 2215 const char *pmtdb_pl) 2216 { 2217 struct mlxsw_sp_port_mapping *port_mapping; 2218 int i; 2219 2220 /* Go over original unsplit ports in the gap and recreate them. */ 2221 for (i = 0; i < count; i++) { 2222 u16 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2223 2224 port_mapping = &mlxsw_sp->port_mapping[local_port]; 2225 if (!port_mapping->width || !mlxsw_sp_local_port_valid(local_port)) 2226 continue; 2227 mlxsw_sp_port_create(mlxsw_sp, local_port, 2228 false, port_mapping); 2229 } 2230 } 2231 2232 static struct mlxsw_sp_port * 2233 mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u16 local_port) 2234 { 2235 if (mlxsw_sp->ports && mlxsw_sp->ports[local_port]) 2236 return mlxsw_sp->ports[local_port]; 2237 return NULL; 2238 } 2239 2240 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port, 2241 unsigned int count, 2242 struct netlink_ext_ack *extack) 2243 { 2244 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2245 struct mlxsw_sp_port_mapping port_mapping; 2246 struct mlxsw_sp_port *mlxsw_sp_port; 2247 enum mlxsw_reg_pmtdb_status status; 2248 char pmtdb_pl[MLXSW_REG_PMTDB_LEN]; 2249 int i; 2250 int err; 2251 2252 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); 2253 if (!mlxsw_sp_port) { 2254 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 2255 local_port); 2256 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 2257 return -EINVAL; 2258 } 2259 2260 if (mlxsw_sp_port->split) { 2261 NL_SET_ERR_MSG_MOD(extack, "Port is already split"); 2262 return -EINVAL; 2263 } 2264 2265 mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index, 2266 mlxsw_sp_port->mapping.module, 2267 mlxsw_sp_port->mapping.module_width / count, 2268 count); 2269 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl); 2270 if (err) { 2271 NL_SET_ERR_MSG_MOD(extack, "Failed to query split info"); 2272 return err; 2273 } 2274 2275 status = mlxsw_reg_pmtdb_status_get(pmtdb_pl); 2276 if (status != MLXSW_REG_PMTDB_STATUS_SUCCESS) { 2277 NL_SET_ERR_MSG_MOD(extack, "Unsupported split configuration"); 2278 return -EINVAL; 2279 } 2280 2281 port_mapping = mlxsw_sp_port->mapping; 2282 2283 for (i = 0; i < count; i++) { 2284 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2285 2286 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) 2287 mlxsw_sp_port_remove(mlxsw_sp, s_local_port); 2288 } 2289 2290 err = mlxsw_sp_port_split_create(mlxsw_sp, &port_mapping, 2291 count, pmtdb_pl); 2292 if (err) { 2293 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 2294 goto err_port_split_create; 2295 } 2296 2297 return 0; 2298 2299 err_port_split_create: 2300 mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl); 2301 2302 return err; 2303 } 2304 2305 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u16 local_port, 2306 struct netlink_ext_ack *extack) 2307 { 2308 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2309 struct mlxsw_sp_port *mlxsw_sp_port; 2310 char pmtdb_pl[MLXSW_REG_PMTDB_LEN]; 2311 unsigned int count; 2312 int i; 2313 int err; 2314 2315 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); 2316 if (!mlxsw_sp_port) { 2317 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 2318 local_port); 2319 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 2320 return -EINVAL; 2321 } 2322 2323 if (!mlxsw_sp_port->split) { 2324 NL_SET_ERR_MSG_MOD(extack, "Port was not split"); 2325 return -EINVAL; 2326 } 2327 2328 count = mlxsw_sp_port->mapping.module_width / 2329 mlxsw_sp_port->mapping.width; 2330 2331 mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index, 2332 mlxsw_sp_port->mapping.module, 2333 mlxsw_sp_port->mapping.module_width / count, 2334 count); 2335 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl); 2336 if (err) { 2337 NL_SET_ERR_MSG_MOD(extack, "Failed to query split info"); 2338 return err; 2339 } 2340 2341 for (i = 0; i < count; i++) { 2342 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2343 2344 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) 2345 mlxsw_sp_port_remove(mlxsw_sp, s_local_port); 2346 } 2347 2348 mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl); 2349 2350 return 0; 2351 } 2352 2353 static void 2354 mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port) 2355 { 2356 int i; 2357 2358 for (i = 0; i < TC_MAX_QUEUE; i++) 2359 mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0; 2360 } 2361 2362 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 2363 char *pude_pl, void *priv) 2364 { 2365 struct mlxsw_sp *mlxsw_sp = priv; 2366 struct mlxsw_sp_port *mlxsw_sp_port; 2367 enum mlxsw_reg_pude_oper_status status; 2368 u16 local_port; 2369 2370 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 2371 2372 if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port))) 2373 return; 2374 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2375 if (!mlxsw_sp_port) 2376 return; 2377 2378 status = mlxsw_reg_pude_oper_status_get(pude_pl); 2379 if (status == MLXSW_PORT_OPER_STATUS_UP) { 2380 netdev_info(mlxsw_sp_port->dev, "link up\n"); 2381 netif_carrier_on(mlxsw_sp_port->dev); 2382 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0); 2383 } else { 2384 netdev_info(mlxsw_sp_port->dev, "link down\n"); 2385 netif_carrier_off(mlxsw_sp_port->dev); 2386 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port); 2387 } 2388 } 2389 2390 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp, 2391 char *mtpptr_pl, bool ingress) 2392 { 2393 u16 local_port; 2394 u8 num_rec; 2395 int i; 2396 2397 local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl); 2398 num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl); 2399 for (i = 0; i < num_rec; i++) { 2400 u8 domain_number; 2401 u8 message_type; 2402 u16 sequence_id; 2403 u64 timestamp; 2404 2405 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type, 2406 &domain_number, &sequence_id, 2407 ×tamp); 2408 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port, 2409 message_type, domain_number, 2410 sequence_id, timestamp); 2411 } 2412 } 2413 2414 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg, 2415 char *mtpptr_pl, void *priv) 2416 { 2417 struct mlxsw_sp *mlxsw_sp = priv; 2418 2419 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true); 2420 } 2421 2422 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg, 2423 char *mtpptr_pl, void *priv) 2424 { 2425 struct mlxsw_sp *mlxsw_sp = priv; 2426 2427 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false); 2428 } 2429 2430 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 2431 u16 local_port, void *priv) 2432 { 2433 struct mlxsw_sp *mlxsw_sp = priv; 2434 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2435 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 2436 2437 if (unlikely(!mlxsw_sp_port)) { 2438 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 2439 local_port); 2440 return; 2441 } 2442 2443 skb->dev = mlxsw_sp_port->dev; 2444 2445 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 2446 u64_stats_update_begin(&pcpu_stats->syncp); 2447 pcpu_stats->rx_packets++; 2448 pcpu_stats->rx_bytes += skb->len; 2449 u64_stats_update_end(&pcpu_stats->syncp); 2450 2451 skb->protocol = eth_type_trans(skb, skb->dev); 2452 netif_receive_skb(skb); 2453 } 2454 2455 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u16 local_port, 2456 void *priv) 2457 { 2458 skb->offload_fwd_mark = 1; 2459 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 2460 } 2461 2462 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, 2463 u16 local_port, void *priv) 2464 { 2465 skb->offload_l3_fwd_mark = 1; 2466 skb->offload_fwd_mark = 1; 2467 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 2468 } 2469 2470 void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 2471 u16 local_port) 2472 { 2473 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port); 2474 } 2475 2476 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2477 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 2478 _is_ctrl, SP_##_trap_group, DISCARD) 2479 2480 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2481 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 2482 _is_ctrl, SP_##_trap_group, DISCARD) 2483 2484 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2485 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \ 2486 _is_ctrl, SP_##_trap_group, DISCARD) 2487 2488 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 2489 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 2490 2491 static const struct mlxsw_listener mlxsw_sp_listener[] = { 2492 /* Events */ 2493 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 2494 /* L2 traps */ 2495 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false), 2496 /* L3 traps */ 2497 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 2498 false), 2499 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 2500 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 2501 false), 2502 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD, 2503 ROUTER_EXP, false), 2504 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD, 2505 ROUTER_EXP, false), 2506 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD, 2507 ROUTER_EXP, false), 2508 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD, 2509 ROUTER_EXP, false), 2510 /* Multicast Router Traps */ 2511 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 2512 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 2513 /* NVE traps */ 2514 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false), 2515 }; 2516 2517 static const struct mlxsw_listener mlxsw_sp1_listener[] = { 2518 /* Events */ 2519 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0), 2520 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0), 2521 }; 2522 2523 static const struct mlxsw_listener mlxsw_sp2_listener[] = { 2524 /* Events */ 2525 MLXSW_SP_EVENTL(mlxsw_sp_port_mapping_listener_func, PMLPE), 2526 }; 2527 2528 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 2529 { 2530 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2531 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 2532 enum mlxsw_reg_qpcr_ir_units ir_units; 2533 int max_cpu_policers; 2534 bool is_bytes; 2535 u8 burst_size; 2536 u32 rate; 2537 int i, err; 2538 2539 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 2540 return -EIO; 2541 2542 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 2543 2544 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 2545 for (i = 0; i < max_cpu_policers; i++) { 2546 is_bytes = false; 2547 switch (i) { 2548 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 2549 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 2550 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS: 2551 rate = 1024; 2552 burst_size = 7; 2553 break; 2554 default: 2555 continue; 2556 } 2557 2558 __set_bit(i, mlxsw_sp->trap->policers_usage); 2559 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 2560 burst_size); 2561 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 2562 if (err) 2563 return err; 2564 } 2565 2566 return 0; 2567 } 2568 2569 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 2570 { 2571 char htgt_pl[MLXSW_REG_HTGT_LEN]; 2572 enum mlxsw_reg_htgt_trap_group i; 2573 int max_cpu_policers; 2574 int max_trap_groups; 2575 u8 priority, tc; 2576 u16 policer_id; 2577 int err; 2578 2579 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 2580 return -EIO; 2581 2582 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 2583 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 2584 2585 for (i = 0; i < max_trap_groups; i++) { 2586 policer_id = i; 2587 switch (i) { 2588 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 2589 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 2590 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS: 2591 priority = 1; 2592 tc = 1; 2593 break; 2594 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 2595 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 2596 tc = MLXSW_REG_HTGT_DEFAULT_TC; 2597 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 2598 break; 2599 default: 2600 continue; 2601 } 2602 2603 if (max_cpu_policers <= policer_id && 2604 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 2605 return -EIO; 2606 2607 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 2608 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 2609 if (err) 2610 return err; 2611 } 2612 2613 return 0; 2614 } 2615 2616 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 2617 { 2618 struct mlxsw_sp_trap *trap; 2619 u64 max_policers; 2620 int err; 2621 2622 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS)) 2623 return -EIO; 2624 max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS); 2625 trap = kzalloc(struct_size(trap, policers_usage, 2626 BITS_TO_LONGS(max_policers)), GFP_KERNEL); 2627 if (!trap) 2628 return -ENOMEM; 2629 trap->max_policers = max_policers; 2630 mlxsw_sp->trap = trap; 2631 2632 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 2633 if (err) 2634 goto err_cpu_policers_set; 2635 2636 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 2637 if (err) 2638 goto err_trap_groups_set; 2639 2640 err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp_listener, 2641 ARRAY_SIZE(mlxsw_sp_listener), 2642 mlxsw_sp); 2643 if (err) 2644 goto err_traps_register; 2645 2646 err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp->listeners, 2647 mlxsw_sp->listeners_count, mlxsw_sp); 2648 if (err) 2649 goto err_extra_traps_init; 2650 2651 return 0; 2652 2653 err_extra_traps_init: 2654 mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener, 2655 ARRAY_SIZE(mlxsw_sp_listener), 2656 mlxsw_sp); 2657 err_traps_register: 2658 err_trap_groups_set: 2659 err_cpu_policers_set: 2660 kfree(trap); 2661 return err; 2662 } 2663 2664 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 2665 { 2666 mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp->listeners, 2667 mlxsw_sp->listeners_count, 2668 mlxsw_sp); 2669 mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener, 2670 ARRAY_SIZE(mlxsw_sp_listener), mlxsw_sp); 2671 kfree(mlxsw_sp->trap); 2672 } 2673 2674 static int mlxsw_sp_lag_pgt_init(struct mlxsw_sp *mlxsw_sp) 2675 { 2676 char sgcr_pl[MLXSW_REG_SGCR_LEN]; 2677 int err; 2678 2679 if (mlxsw_core_lag_mode(mlxsw_sp->core) != 2680 MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW) 2681 return 0; 2682 2683 /* In DDD mode, which we by default use, each LAG entry is 8 PGT 2684 * entries. The LAG table address needs to be 8-aligned, but that ought 2685 * to be the case, since the LAG table is allocated first. 2686 */ 2687 err = mlxsw_sp_pgt_mid_alloc_range(mlxsw_sp, &mlxsw_sp->lag_pgt_base, 2688 mlxsw_sp->max_lag * 8); 2689 if (err) 2690 return err; 2691 if (WARN_ON_ONCE(mlxsw_sp->lag_pgt_base % 8)) { 2692 err = -EINVAL; 2693 goto err_mid_alloc_range; 2694 } 2695 2696 mlxsw_reg_sgcr_pack(sgcr_pl, mlxsw_sp->lag_pgt_base); 2697 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sgcr), sgcr_pl); 2698 if (err) 2699 goto err_mid_alloc_range; 2700 2701 return 0; 2702 2703 err_mid_alloc_range: 2704 mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mlxsw_sp->lag_pgt_base, 2705 mlxsw_sp->max_lag * 8); 2706 return err; 2707 } 2708 2709 static void mlxsw_sp_lag_pgt_fini(struct mlxsw_sp *mlxsw_sp) 2710 { 2711 if (mlxsw_core_lag_mode(mlxsw_sp->core) != 2712 MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW) 2713 return; 2714 2715 mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mlxsw_sp->lag_pgt_base, 2716 mlxsw_sp->max_lag * 8); 2717 } 2718 2719 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe 2720 2721 struct mlxsw_sp_lag { 2722 struct net_device *dev; 2723 refcount_t ref_count; 2724 u16 lag_id; 2725 }; 2726 2727 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 2728 { 2729 char slcr_pl[MLXSW_REG_SLCR_LEN]; 2730 u32 seed; 2731 int err; 2732 2733 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 2734 MLXSW_SP_LAG_SEED_INIT); 2735 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 2736 MLXSW_REG_SLCR_LAG_HASH_DMAC | 2737 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 2738 MLXSW_REG_SLCR_LAG_HASH_VLANID | 2739 MLXSW_REG_SLCR_LAG_HASH_SIP | 2740 MLXSW_REG_SLCR_LAG_HASH_DIP | 2741 MLXSW_REG_SLCR_LAG_HASH_SPORT | 2742 MLXSW_REG_SLCR_LAG_HASH_DPORT | 2743 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed); 2744 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 2745 if (err) 2746 return err; 2747 2748 err = mlxsw_core_max_lag(mlxsw_sp->core, &mlxsw_sp->max_lag); 2749 if (err) 2750 return err; 2751 2752 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 2753 return -EIO; 2754 2755 err = mlxsw_sp_lag_pgt_init(mlxsw_sp); 2756 if (err) 2757 return err; 2758 2759 mlxsw_sp->lags = kcalloc(mlxsw_sp->max_lag, sizeof(struct mlxsw_sp_lag), 2760 GFP_KERNEL); 2761 if (!mlxsw_sp->lags) { 2762 err = -ENOMEM; 2763 goto err_kcalloc; 2764 } 2765 2766 return 0; 2767 2768 err_kcalloc: 2769 mlxsw_sp_lag_pgt_fini(mlxsw_sp); 2770 return err; 2771 } 2772 2773 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 2774 { 2775 mlxsw_sp_lag_pgt_fini(mlxsw_sp); 2776 kfree(mlxsw_sp->lags); 2777 } 2778 2779 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = { 2780 .clock_init = mlxsw_sp1_ptp_clock_init, 2781 .clock_fini = mlxsw_sp1_ptp_clock_fini, 2782 .init = mlxsw_sp1_ptp_init, 2783 .fini = mlxsw_sp1_ptp_fini, 2784 .receive = mlxsw_sp1_ptp_receive, 2785 .transmitted = mlxsw_sp1_ptp_transmitted, 2786 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get, 2787 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set, 2788 .shaper_work = mlxsw_sp1_ptp_shaper_work, 2789 .get_ts_info = mlxsw_sp1_ptp_get_ts_info, 2790 .get_stats_count = mlxsw_sp1_get_stats_count, 2791 .get_stats_strings = mlxsw_sp1_get_stats_strings, 2792 .get_stats = mlxsw_sp1_get_stats, 2793 .txhdr_construct = mlxsw_sp_ptp_txhdr_construct, 2794 }; 2795 2796 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { 2797 .clock_init = mlxsw_sp2_ptp_clock_init, 2798 .clock_fini = mlxsw_sp2_ptp_clock_fini, 2799 .init = mlxsw_sp2_ptp_init, 2800 .fini = mlxsw_sp2_ptp_fini, 2801 .receive = mlxsw_sp2_ptp_receive, 2802 .transmitted = mlxsw_sp2_ptp_transmitted, 2803 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get, 2804 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set, 2805 .shaper_work = mlxsw_sp2_ptp_shaper_work, 2806 .get_ts_info = mlxsw_sp2_ptp_get_ts_info, 2807 .get_stats_count = mlxsw_sp2_get_stats_count, 2808 .get_stats_strings = mlxsw_sp2_get_stats_strings, 2809 .get_stats = mlxsw_sp2_get_stats, 2810 .txhdr_construct = mlxsw_sp2_ptp_txhdr_construct, 2811 }; 2812 2813 static const struct mlxsw_sp_ptp_ops mlxsw_sp4_ptp_ops = { 2814 .clock_init = mlxsw_sp2_ptp_clock_init, 2815 .clock_fini = mlxsw_sp2_ptp_clock_fini, 2816 .init = mlxsw_sp2_ptp_init, 2817 .fini = mlxsw_sp2_ptp_fini, 2818 .receive = mlxsw_sp2_ptp_receive, 2819 .transmitted = mlxsw_sp2_ptp_transmitted, 2820 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get, 2821 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set, 2822 .shaper_work = mlxsw_sp2_ptp_shaper_work, 2823 .get_ts_info = mlxsw_sp2_ptp_get_ts_info, 2824 .get_stats_count = mlxsw_sp2_get_stats_count, 2825 .get_stats_strings = mlxsw_sp2_get_stats_strings, 2826 .get_stats = mlxsw_sp2_get_stats, 2827 .txhdr_construct = mlxsw_sp_ptp_txhdr_construct, 2828 }; 2829 2830 struct mlxsw_sp_sample_trigger_node { 2831 struct mlxsw_sp_sample_trigger trigger; 2832 struct mlxsw_sp_sample_params params; 2833 struct rhash_head ht_node; 2834 struct rcu_head rcu; 2835 refcount_t refcount; 2836 }; 2837 2838 static const struct rhashtable_params mlxsw_sp_sample_trigger_ht_params = { 2839 .key_offset = offsetof(struct mlxsw_sp_sample_trigger_node, trigger), 2840 .head_offset = offsetof(struct mlxsw_sp_sample_trigger_node, ht_node), 2841 .key_len = sizeof(struct mlxsw_sp_sample_trigger), 2842 .automatic_shrinking = true, 2843 }; 2844 2845 static void 2846 mlxsw_sp_sample_trigger_key_init(struct mlxsw_sp_sample_trigger *key, 2847 const struct mlxsw_sp_sample_trigger *trigger) 2848 { 2849 memset(key, 0, sizeof(*key)); 2850 key->type = trigger->type; 2851 key->local_port = trigger->local_port; 2852 } 2853 2854 /* RCU read lock must be held */ 2855 struct mlxsw_sp_sample_params * 2856 mlxsw_sp_sample_trigger_params_lookup(struct mlxsw_sp *mlxsw_sp, 2857 const struct mlxsw_sp_sample_trigger *trigger) 2858 { 2859 struct mlxsw_sp_sample_trigger_node *trigger_node; 2860 struct mlxsw_sp_sample_trigger key; 2861 2862 mlxsw_sp_sample_trigger_key_init(&key, trigger); 2863 trigger_node = rhashtable_lookup(&mlxsw_sp->sample_trigger_ht, &key, 2864 mlxsw_sp_sample_trigger_ht_params); 2865 if (!trigger_node) 2866 return NULL; 2867 2868 return &trigger_node->params; 2869 } 2870 2871 static int 2872 mlxsw_sp_sample_trigger_node_init(struct mlxsw_sp *mlxsw_sp, 2873 const struct mlxsw_sp_sample_trigger *trigger, 2874 const struct mlxsw_sp_sample_params *params) 2875 { 2876 struct mlxsw_sp_sample_trigger_node *trigger_node; 2877 int err; 2878 2879 trigger_node = kzalloc(sizeof(*trigger_node), GFP_KERNEL); 2880 if (!trigger_node) 2881 return -ENOMEM; 2882 2883 trigger_node->trigger = *trigger; 2884 trigger_node->params = *params; 2885 refcount_set(&trigger_node->refcount, 1); 2886 2887 err = rhashtable_insert_fast(&mlxsw_sp->sample_trigger_ht, 2888 &trigger_node->ht_node, 2889 mlxsw_sp_sample_trigger_ht_params); 2890 if (err) 2891 goto err_rhashtable_insert; 2892 2893 return 0; 2894 2895 err_rhashtable_insert: 2896 kfree(trigger_node); 2897 return err; 2898 } 2899 2900 static void 2901 mlxsw_sp_sample_trigger_node_fini(struct mlxsw_sp *mlxsw_sp, 2902 struct mlxsw_sp_sample_trigger_node *trigger_node) 2903 { 2904 rhashtable_remove_fast(&mlxsw_sp->sample_trigger_ht, 2905 &trigger_node->ht_node, 2906 mlxsw_sp_sample_trigger_ht_params); 2907 kfree_rcu(trigger_node, rcu); 2908 } 2909 2910 int 2911 mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp, 2912 const struct mlxsw_sp_sample_trigger *trigger, 2913 const struct mlxsw_sp_sample_params *params, 2914 struct netlink_ext_ack *extack) 2915 { 2916 struct mlxsw_sp_sample_trigger_node *trigger_node; 2917 struct mlxsw_sp_sample_trigger key; 2918 2919 ASSERT_RTNL(); 2920 2921 mlxsw_sp_sample_trigger_key_init(&key, trigger); 2922 2923 trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht, 2924 &key, 2925 mlxsw_sp_sample_trigger_ht_params); 2926 if (!trigger_node) 2927 return mlxsw_sp_sample_trigger_node_init(mlxsw_sp, &key, 2928 params); 2929 2930 if (trigger_node->trigger.local_port) { 2931 NL_SET_ERR_MSG_MOD(extack, "Sampling already enabled on port"); 2932 return -EINVAL; 2933 } 2934 2935 if (trigger_node->params.psample_group != params->psample_group || 2936 trigger_node->params.truncate != params->truncate || 2937 trigger_node->params.rate != params->rate || 2938 trigger_node->params.trunc_size != params->trunc_size) { 2939 NL_SET_ERR_MSG_MOD(extack, "Sampling parameters do not match for an existing sampling trigger"); 2940 return -EINVAL; 2941 } 2942 2943 refcount_inc(&trigger_node->refcount); 2944 2945 return 0; 2946 } 2947 2948 void 2949 mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp, 2950 const struct mlxsw_sp_sample_trigger *trigger) 2951 { 2952 struct mlxsw_sp_sample_trigger_node *trigger_node; 2953 struct mlxsw_sp_sample_trigger key; 2954 2955 ASSERT_RTNL(); 2956 2957 mlxsw_sp_sample_trigger_key_init(&key, trigger); 2958 2959 trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht, 2960 &key, 2961 mlxsw_sp_sample_trigger_ht_params); 2962 if (!trigger_node) 2963 return; 2964 2965 if (!refcount_dec_and_test(&trigger_node->refcount)) 2966 return; 2967 2968 mlxsw_sp_sample_trigger_node_fini(mlxsw_sp, trigger_node); 2969 } 2970 2971 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 2972 unsigned long event, void *ptr); 2973 2974 #define MLXSW_SP_DEFAULT_PARSING_DEPTH 96 2975 #define MLXSW_SP_INCREASED_PARSING_DEPTH 128 2976 #define MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT 4789 2977 2978 static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp) 2979 { 2980 refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 0); 2981 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH; 2982 mlxsw_sp->parsing.vxlan_udp_dport = MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT; 2983 mutex_init(&mlxsw_sp->parsing.lock); 2984 } 2985 2986 static void mlxsw_sp_parsing_fini(struct mlxsw_sp *mlxsw_sp) 2987 { 2988 mutex_destroy(&mlxsw_sp->parsing.lock); 2989 WARN_ON_ONCE(refcount_read(&mlxsw_sp->parsing.parsing_depth_ref)); 2990 } 2991 2992 struct mlxsw_sp_ipv6_addr_node { 2993 struct in6_addr key; 2994 struct rhash_head ht_node; 2995 u32 kvdl_index; 2996 refcount_t refcount; 2997 }; 2998 2999 static const struct rhashtable_params mlxsw_sp_ipv6_addr_ht_params = { 3000 .key_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, key), 3001 .head_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, ht_node), 3002 .key_len = sizeof(struct in6_addr), 3003 .automatic_shrinking = true, 3004 }; 3005 3006 static int 3007 mlxsw_sp_ipv6_addr_init(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6, 3008 u32 *p_kvdl_index) 3009 { 3010 struct mlxsw_sp_ipv6_addr_node *node; 3011 char rips_pl[MLXSW_REG_RIPS_LEN]; 3012 int err; 3013 3014 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 3015 MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, 3016 p_kvdl_index); 3017 if (err) 3018 return err; 3019 3020 mlxsw_reg_rips_pack(rips_pl, *p_kvdl_index, addr6); 3021 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rips), rips_pl); 3022 if (err) 3023 goto err_rips_write; 3024 3025 node = kzalloc(sizeof(*node), GFP_KERNEL); 3026 if (!node) { 3027 err = -ENOMEM; 3028 goto err_node_alloc; 3029 } 3030 3031 node->key = *addr6; 3032 node->kvdl_index = *p_kvdl_index; 3033 refcount_set(&node->refcount, 1); 3034 3035 err = rhashtable_insert_fast(&mlxsw_sp->ipv6_addr_ht, 3036 &node->ht_node, 3037 mlxsw_sp_ipv6_addr_ht_params); 3038 if (err) 3039 goto err_rhashtable_insert; 3040 3041 return 0; 3042 3043 err_rhashtable_insert: 3044 kfree(node); 3045 err_node_alloc: 3046 err_rips_write: 3047 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, 3048 *p_kvdl_index); 3049 return err; 3050 } 3051 3052 static void mlxsw_sp_ipv6_addr_fini(struct mlxsw_sp *mlxsw_sp, 3053 struct mlxsw_sp_ipv6_addr_node *node) 3054 { 3055 u32 kvdl_index = node->kvdl_index; 3056 3057 rhashtable_remove_fast(&mlxsw_sp->ipv6_addr_ht, &node->ht_node, 3058 mlxsw_sp_ipv6_addr_ht_params); 3059 kfree(node); 3060 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, 3061 kvdl_index); 3062 } 3063 3064 int mlxsw_sp_ipv6_addr_kvdl_index_get(struct mlxsw_sp *mlxsw_sp, 3065 const struct in6_addr *addr6, 3066 u32 *p_kvdl_index) 3067 { 3068 struct mlxsw_sp_ipv6_addr_node *node; 3069 int err = 0; 3070 3071 mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock); 3072 node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6, 3073 mlxsw_sp_ipv6_addr_ht_params); 3074 if (node) { 3075 refcount_inc(&node->refcount); 3076 *p_kvdl_index = node->kvdl_index; 3077 goto out_unlock; 3078 } 3079 3080 err = mlxsw_sp_ipv6_addr_init(mlxsw_sp, addr6, p_kvdl_index); 3081 3082 out_unlock: 3083 mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock); 3084 return err; 3085 } 3086 3087 void 3088 mlxsw_sp_ipv6_addr_put(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6) 3089 { 3090 struct mlxsw_sp_ipv6_addr_node *node; 3091 3092 mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock); 3093 node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6, 3094 mlxsw_sp_ipv6_addr_ht_params); 3095 if (WARN_ON(!node)) 3096 goto out_unlock; 3097 3098 if (!refcount_dec_and_test(&node->refcount)) 3099 goto out_unlock; 3100 3101 mlxsw_sp_ipv6_addr_fini(mlxsw_sp, node); 3102 3103 out_unlock: 3104 mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock); 3105 } 3106 3107 static int mlxsw_sp_ipv6_addr_ht_init(struct mlxsw_sp *mlxsw_sp) 3108 { 3109 int err; 3110 3111 err = rhashtable_init(&mlxsw_sp->ipv6_addr_ht, 3112 &mlxsw_sp_ipv6_addr_ht_params); 3113 if (err) 3114 return err; 3115 3116 mutex_init(&mlxsw_sp->ipv6_addr_ht_lock); 3117 return 0; 3118 } 3119 3120 static void mlxsw_sp_ipv6_addr_ht_fini(struct mlxsw_sp *mlxsw_sp) 3121 { 3122 mutex_destroy(&mlxsw_sp->ipv6_addr_ht_lock); 3123 rhashtable_destroy(&mlxsw_sp->ipv6_addr_ht); 3124 } 3125 3126 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 3127 const struct mlxsw_bus_info *mlxsw_bus_info, 3128 struct netlink_ext_ack *extack) 3129 { 3130 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3131 int err; 3132 3133 mlxsw_sp->core = mlxsw_core; 3134 mlxsw_sp->bus_info = mlxsw_bus_info; 3135 3136 mlxsw_sp_parsing_init(mlxsw_sp); 3137 3138 err = mlxsw_sp_base_mac_get(mlxsw_sp); 3139 if (err) { 3140 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 3141 return err; 3142 } 3143 3144 err = mlxsw_sp_kvdl_init(mlxsw_sp); 3145 if (err) { 3146 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 3147 return err; 3148 } 3149 3150 err = mlxsw_sp_pgt_init(mlxsw_sp); 3151 if (err) { 3152 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PGT\n"); 3153 goto err_pgt_init; 3154 } 3155 3156 /* Initialize before FIDs so that the LAG table is at the start of PGT 3157 * and 8-aligned without overallocation. 3158 */ 3159 err = mlxsw_sp_lag_init(mlxsw_sp); 3160 if (err) { 3161 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 3162 goto err_lag_init; 3163 } 3164 3165 err = mlxsw_sp->fid_core_ops->init(mlxsw_sp); 3166 if (err) { 3167 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 3168 goto err_fid_core_init; 3169 } 3170 3171 err = mlxsw_sp_policers_init(mlxsw_sp); 3172 if (err) { 3173 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize policers\n"); 3174 goto err_policers_init; 3175 } 3176 3177 err = mlxsw_sp_traps_init(mlxsw_sp); 3178 if (err) { 3179 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 3180 goto err_traps_init; 3181 } 3182 3183 err = mlxsw_sp_devlink_traps_init(mlxsw_sp); 3184 if (err) { 3185 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n"); 3186 goto err_devlink_traps_init; 3187 } 3188 3189 err = mlxsw_sp_buffers_init(mlxsw_sp); 3190 if (err) { 3191 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 3192 goto err_buffers_init; 3193 } 3194 3195 /* Initialize SPAN before router and switchdev, so that those components 3196 * can call mlxsw_sp_span_respin(). 3197 */ 3198 err = mlxsw_sp_span_init(mlxsw_sp); 3199 if (err) { 3200 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 3201 goto err_span_init; 3202 } 3203 3204 err = mlxsw_sp_switchdev_init(mlxsw_sp); 3205 if (err) { 3206 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 3207 goto err_switchdev_init; 3208 } 3209 3210 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 3211 if (err) { 3212 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 3213 goto err_counter_pool_init; 3214 } 3215 3216 err = mlxsw_sp_afa_init(mlxsw_sp); 3217 if (err) { 3218 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 3219 goto err_afa_init; 3220 } 3221 3222 err = mlxsw_sp_ipv6_addr_ht_init(mlxsw_sp); 3223 if (err) { 3224 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize hash table for IPv6 addresses\n"); 3225 goto err_ipv6_addr_ht_init; 3226 } 3227 3228 err = mlxsw_sp_nve_init(mlxsw_sp); 3229 if (err) { 3230 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n"); 3231 goto err_nve_init; 3232 } 3233 3234 err = mlxsw_sp_port_range_init(mlxsw_sp); 3235 if (err) { 3236 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize port ranges\n"); 3237 goto err_port_range_init; 3238 } 3239 3240 err = mlxsw_sp_acl_init(mlxsw_sp); 3241 if (err) { 3242 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 3243 goto err_acl_init; 3244 } 3245 3246 err = mlxsw_sp_router_init(mlxsw_sp, extack); 3247 if (err) { 3248 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 3249 goto err_router_init; 3250 } 3251 3252 if (mlxsw_sp->bus_info->read_clock_capable) { 3253 /* NULL is a valid return value from clock_init */ 3254 mlxsw_sp->clock = 3255 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp, 3256 mlxsw_sp->bus_info->dev); 3257 if (IS_ERR(mlxsw_sp->clock)) { 3258 err = PTR_ERR(mlxsw_sp->clock); 3259 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n"); 3260 goto err_ptp_clock_init; 3261 } 3262 } 3263 3264 if (mlxsw_sp->clock) { 3265 /* NULL is a valid return value from ptp_ops->init */ 3266 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp); 3267 if (IS_ERR(mlxsw_sp->ptp_state)) { 3268 err = PTR_ERR(mlxsw_sp->ptp_state); 3269 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n"); 3270 goto err_ptp_init; 3271 } 3272 } 3273 3274 /* Initialize netdevice notifier after SPAN is initialized, so that the 3275 * event handler can call SPAN respin. 3276 */ 3277 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 3278 err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 3279 &mlxsw_sp->netdevice_nb); 3280 if (err) { 3281 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 3282 goto err_netdev_notifier; 3283 } 3284 3285 err = mlxsw_sp_dpipe_init(mlxsw_sp); 3286 if (err) { 3287 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 3288 goto err_dpipe_init; 3289 } 3290 3291 err = mlxsw_sp_port_module_info_init(mlxsw_sp); 3292 if (err) { 3293 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n"); 3294 goto err_port_module_info_init; 3295 } 3296 3297 err = rhashtable_init(&mlxsw_sp->sample_trigger_ht, 3298 &mlxsw_sp_sample_trigger_ht_params); 3299 if (err) { 3300 dev_err(mlxsw_sp->bus_info->dev, "Failed to init sampling trigger hashtable\n"); 3301 goto err_sample_trigger_init; 3302 } 3303 3304 err = mlxsw_sp_ports_create(mlxsw_sp); 3305 if (err) { 3306 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 3307 goto err_ports_create; 3308 } 3309 3310 return 0; 3311 3312 err_ports_create: 3313 rhashtable_destroy(&mlxsw_sp->sample_trigger_ht); 3314 err_sample_trigger_init: 3315 mlxsw_sp_port_module_info_fini(mlxsw_sp); 3316 err_port_module_info_init: 3317 mlxsw_sp_dpipe_fini(mlxsw_sp); 3318 err_dpipe_init: 3319 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 3320 &mlxsw_sp->netdevice_nb); 3321 err_netdev_notifier: 3322 if (mlxsw_sp->clock) 3323 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 3324 err_ptp_init: 3325 if (mlxsw_sp->clock) 3326 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 3327 err_ptp_clock_init: 3328 mlxsw_sp_router_fini(mlxsw_sp); 3329 err_router_init: 3330 mlxsw_sp_acl_fini(mlxsw_sp); 3331 err_acl_init: 3332 mlxsw_sp_port_range_fini(mlxsw_sp); 3333 err_port_range_init: 3334 mlxsw_sp_nve_fini(mlxsw_sp); 3335 err_nve_init: 3336 mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp); 3337 err_ipv6_addr_ht_init: 3338 mlxsw_sp_afa_fini(mlxsw_sp); 3339 err_afa_init: 3340 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3341 err_counter_pool_init: 3342 mlxsw_sp_switchdev_fini(mlxsw_sp); 3343 err_switchdev_init: 3344 mlxsw_sp_span_fini(mlxsw_sp); 3345 err_span_init: 3346 mlxsw_sp_buffers_fini(mlxsw_sp); 3347 err_buffers_init: 3348 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 3349 err_devlink_traps_init: 3350 mlxsw_sp_traps_fini(mlxsw_sp); 3351 err_traps_init: 3352 mlxsw_sp_policers_fini(mlxsw_sp); 3353 err_policers_init: 3354 mlxsw_sp->fid_core_ops->fini(mlxsw_sp); 3355 err_fid_core_init: 3356 mlxsw_sp_lag_fini(mlxsw_sp); 3357 err_lag_init: 3358 mlxsw_sp_pgt_fini(mlxsw_sp); 3359 err_pgt_init: 3360 mlxsw_sp_kvdl_fini(mlxsw_sp); 3361 mlxsw_sp_parsing_fini(mlxsw_sp); 3362 return err; 3363 } 3364 3365 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, 3366 const struct mlxsw_bus_info *mlxsw_bus_info, 3367 struct netlink_ext_ack *extack) 3368 { 3369 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3370 3371 mlxsw_sp->switchdev_ops = &mlxsw_sp1_switchdev_ops; 3372 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; 3373 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; 3374 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; 3375 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; 3376 mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops; 3377 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; 3378 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; 3379 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; 3380 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals; 3381 mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops; 3382 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops; 3383 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops; 3384 mlxsw_sp->span_ops = &mlxsw_sp1_span_ops; 3385 mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops; 3386 mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops; 3387 mlxsw_sp->mall_ops = &mlxsw_sp1_mall_ops; 3388 mlxsw_sp->router_ops = &mlxsw_sp1_router_ops; 3389 mlxsw_sp->listeners = mlxsw_sp1_listener; 3390 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener); 3391 mlxsw_sp->fid_core_ops = &mlxsw_sp1_fid_core_ops; 3392 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1; 3393 mlxsw_sp->pgt_smpe_index_valid = true; 3394 3395 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3396 } 3397 3398 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, 3399 const struct mlxsw_bus_info *mlxsw_bus_info, 3400 struct netlink_ext_ack *extack) 3401 { 3402 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3403 3404 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; 3405 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 3406 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 3407 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 3408 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 3409 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 3410 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 3411 mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops; 3412 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 3413 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 3414 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 3415 mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops; 3416 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 3417 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 3418 mlxsw_sp->span_ops = &mlxsw_sp2_span_ops; 3419 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; 3420 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; 3421 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; 3422 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; 3423 mlxsw_sp->listeners = mlxsw_sp2_listener; 3424 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener); 3425 mlxsw_sp->fid_core_ops = &mlxsw_sp2_fid_core_ops; 3426 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2; 3427 mlxsw_sp->pgt_smpe_index_valid = false; 3428 3429 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3430 } 3431 3432 static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core, 3433 const struct mlxsw_bus_info *mlxsw_bus_info, 3434 struct netlink_ext_ack *extack) 3435 { 3436 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3437 3438 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; 3439 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 3440 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 3441 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 3442 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 3443 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 3444 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 3445 mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops; 3446 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 3447 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 3448 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 3449 mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops; 3450 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 3451 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 3452 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops; 3453 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; 3454 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; 3455 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; 3456 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; 3457 mlxsw_sp->listeners = mlxsw_sp2_listener; 3458 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener); 3459 mlxsw_sp->fid_core_ops = &mlxsw_sp2_fid_core_ops; 3460 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3; 3461 mlxsw_sp->pgt_smpe_index_valid = false; 3462 3463 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3464 } 3465 3466 static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core, 3467 const struct mlxsw_bus_info *mlxsw_bus_info, 3468 struct netlink_ext_ack *extack) 3469 { 3470 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3471 3472 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; 3473 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 3474 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 3475 mlxsw_sp->afk_ops = &mlxsw_sp4_afk_ops; 3476 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 3477 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 3478 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 3479 mlxsw_sp->acl_bf_ops = &mlxsw_sp4_acl_bf_ops; 3480 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 3481 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 3482 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 3483 mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops; 3484 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 3485 mlxsw_sp->ptp_ops = &mlxsw_sp4_ptp_ops; 3486 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops; 3487 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; 3488 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; 3489 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; 3490 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; 3491 mlxsw_sp->listeners = mlxsw_sp2_listener; 3492 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener); 3493 mlxsw_sp->fid_core_ops = &mlxsw_sp2_fid_core_ops; 3494 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4; 3495 mlxsw_sp->pgt_smpe_index_valid = false; 3496 3497 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3498 } 3499 3500 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 3501 { 3502 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3503 3504 mlxsw_sp_ports_remove(mlxsw_sp); 3505 rhashtable_destroy(&mlxsw_sp->sample_trigger_ht); 3506 mlxsw_sp_port_module_info_fini(mlxsw_sp); 3507 mlxsw_sp_dpipe_fini(mlxsw_sp); 3508 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 3509 &mlxsw_sp->netdevice_nb); 3510 if (mlxsw_sp->clock) { 3511 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 3512 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 3513 } 3514 mlxsw_sp_router_fini(mlxsw_sp); 3515 mlxsw_sp_acl_fini(mlxsw_sp); 3516 mlxsw_sp_port_range_fini(mlxsw_sp); 3517 mlxsw_sp_nve_fini(mlxsw_sp); 3518 mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp); 3519 mlxsw_sp_afa_fini(mlxsw_sp); 3520 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3521 mlxsw_sp_switchdev_fini(mlxsw_sp); 3522 mlxsw_sp_span_fini(mlxsw_sp); 3523 mlxsw_sp_buffers_fini(mlxsw_sp); 3524 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 3525 mlxsw_sp_traps_fini(mlxsw_sp); 3526 mlxsw_sp_policers_fini(mlxsw_sp); 3527 mlxsw_sp->fid_core_ops->fini(mlxsw_sp); 3528 mlxsw_sp_lag_fini(mlxsw_sp); 3529 mlxsw_sp_pgt_fini(mlxsw_sp); 3530 mlxsw_sp_kvdl_fini(mlxsw_sp); 3531 mlxsw_sp_parsing_fini(mlxsw_sp); 3532 } 3533 3534 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { 3535 .used_flood_mode = 1, 3536 .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED, 3537 .used_max_ib_mc = 1, 3538 .max_ib_mc = 0, 3539 .used_max_pkey = 1, 3540 .max_pkey = 0, 3541 .used_ubridge = 1, 3542 .ubridge = 1, 3543 .used_kvd_sizes = 1, 3544 .kvd_hash_single_parts = 59, 3545 .kvd_hash_double_parts = 41, 3546 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 3547 .swid_config = { 3548 { 3549 .used_type = 1, 3550 .type = MLXSW_PORT_SWID_TYPE_ETH, 3551 } 3552 }, 3553 }; 3554 3555 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { 3556 .used_flood_mode = 1, 3557 .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED, 3558 .used_max_ib_mc = 1, 3559 .max_ib_mc = 0, 3560 .used_max_pkey = 1, 3561 .max_pkey = 0, 3562 .used_ubridge = 1, 3563 .ubridge = 1, 3564 .swid_config = { 3565 { 3566 .used_type = 1, 3567 .type = MLXSW_PORT_SWID_TYPE_ETH, 3568 } 3569 }, 3570 .used_cqe_time_stamp_type = 1, 3571 .cqe_time_stamp_type = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC, 3572 .lag_mode_prefer_sw = true, 3573 .flood_mode_prefer_cff = true, 3574 }; 3575 3576 /* Reduce number of LAGs from full capacity (256) to the maximum supported LAGs 3577 * in Spectrum-2/3, to avoid regression in number of free entries in the PGT 3578 * table. 3579 */ 3580 #define MLXSW_SP4_CONFIG_PROFILE_MAX_LAG 128 3581 3582 static const struct mlxsw_config_profile mlxsw_sp4_config_profile = { 3583 .used_max_lag = 1, 3584 .max_lag = MLXSW_SP4_CONFIG_PROFILE_MAX_LAG, 3585 .used_flood_mode = 1, 3586 .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED, 3587 .used_max_ib_mc = 1, 3588 .max_ib_mc = 0, 3589 .used_max_pkey = 1, 3590 .max_pkey = 0, 3591 .used_ubridge = 1, 3592 .ubridge = 1, 3593 .swid_config = { 3594 { 3595 .used_type = 1, 3596 .type = MLXSW_PORT_SWID_TYPE_ETH, 3597 } 3598 }, 3599 .used_cqe_time_stamp_type = 1, 3600 .cqe_time_stamp_type = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC, 3601 .lag_mode_prefer_sw = true, 3602 .flood_mode_prefer_cff = true, 3603 }; 3604 3605 static void 3606 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, 3607 struct devlink_resource_size_params *kvd_size_params, 3608 struct devlink_resource_size_params *linear_size_params, 3609 struct devlink_resource_size_params *hash_double_size_params, 3610 struct devlink_resource_size_params *hash_single_size_params) 3611 { 3612 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 3613 KVD_SINGLE_MIN_SIZE); 3614 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 3615 KVD_DOUBLE_MIN_SIZE); 3616 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3617 u32 linear_size_min = 0; 3618 3619 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, 3620 MLXSW_SP_KVD_GRANULARITY, 3621 DEVLINK_RESOURCE_UNIT_ENTRY); 3622 devlink_resource_size_params_init(linear_size_params, linear_size_min, 3623 kvd_size - single_size_min - 3624 double_size_min, 3625 MLXSW_SP_KVD_GRANULARITY, 3626 DEVLINK_RESOURCE_UNIT_ENTRY); 3627 devlink_resource_size_params_init(hash_double_size_params, 3628 double_size_min, 3629 kvd_size - single_size_min - 3630 linear_size_min, 3631 MLXSW_SP_KVD_GRANULARITY, 3632 DEVLINK_RESOURCE_UNIT_ENTRY); 3633 devlink_resource_size_params_init(hash_single_size_params, 3634 single_size_min, 3635 kvd_size - double_size_min - 3636 linear_size_min, 3637 MLXSW_SP_KVD_GRANULARITY, 3638 DEVLINK_RESOURCE_UNIT_ENTRY); 3639 } 3640 3641 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) 3642 { 3643 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3644 struct devlink_resource_size_params hash_single_size_params; 3645 struct devlink_resource_size_params hash_double_size_params; 3646 struct devlink_resource_size_params linear_size_params; 3647 struct devlink_resource_size_params kvd_size_params; 3648 u32 kvd_size, single_size, double_size, linear_size; 3649 const struct mlxsw_config_profile *profile; 3650 int err; 3651 3652 profile = &mlxsw_sp1_config_profile; 3653 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 3654 return -EIO; 3655 3656 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, 3657 &linear_size_params, 3658 &hash_double_size_params, 3659 &hash_single_size_params); 3660 3661 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3662 err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 3663 kvd_size, MLXSW_SP_RESOURCE_KVD, 3664 DEVLINK_RESOURCE_ID_PARENT_TOP, 3665 &kvd_size_params); 3666 if (err) 3667 return err; 3668 3669 linear_size = profile->kvd_linear_size; 3670 err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, 3671 linear_size, 3672 MLXSW_SP_RESOURCE_KVD_LINEAR, 3673 MLXSW_SP_RESOURCE_KVD, 3674 &linear_size_params); 3675 if (err) 3676 return err; 3677 3678 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); 3679 if (err) 3680 return err; 3681 3682 double_size = kvd_size - linear_size; 3683 double_size *= profile->kvd_hash_double_parts; 3684 double_size /= profile->kvd_hash_double_parts + 3685 profile->kvd_hash_single_parts; 3686 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY); 3687 err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, 3688 double_size, 3689 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 3690 MLXSW_SP_RESOURCE_KVD, 3691 &hash_double_size_params); 3692 if (err) 3693 return err; 3694 3695 single_size = kvd_size - double_size - linear_size; 3696 err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, 3697 single_size, 3698 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 3699 MLXSW_SP_RESOURCE_KVD, 3700 &hash_single_size_params); 3701 if (err) 3702 return err; 3703 3704 return 0; 3705 } 3706 3707 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core) 3708 { 3709 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3710 struct devlink_resource_size_params kvd_size_params; 3711 u32 kvd_size; 3712 3713 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 3714 return -EIO; 3715 3716 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3717 devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size, 3718 MLXSW_SP_KVD_GRANULARITY, 3719 DEVLINK_RESOURCE_UNIT_ENTRY); 3720 3721 return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 3722 kvd_size, MLXSW_SP_RESOURCE_KVD, 3723 DEVLINK_RESOURCE_ID_PARENT_TOP, 3724 &kvd_size_params); 3725 } 3726 3727 static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core) 3728 { 3729 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3730 struct devlink_resource_size_params span_size_params; 3731 u32 max_span; 3732 3733 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN)) 3734 return -EIO; 3735 3736 max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN); 3737 devlink_resource_size_params_init(&span_size_params, max_span, max_span, 3738 1, DEVLINK_RESOURCE_UNIT_ENTRY); 3739 3740 return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN, 3741 max_span, MLXSW_SP_RESOURCE_SPAN, 3742 DEVLINK_RESOURCE_ID_PARENT_TOP, 3743 &span_size_params); 3744 } 3745 3746 static int 3747 mlxsw_sp_resources_rif_mac_profile_register(struct mlxsw_core *mlxsw_core) 3748 { 3749 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3750 struct devlink_resource_size_params size_params; 3751 u8 max_rif_mac_profiles; 3752 3753 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIF_MAC_PROFILES)) 3754 max_rif_mac_profiles = 1; 3755 else 3756 max_rif_mac_profiles = MLXSW_CORE_RES_GET(mlxsw_core, 3757 MAX_RIF_MAC_PROFILES); 3758 devlink_resource_size_params_init(&size_params, max_rif_mac_profiles, 3759 max_rif_mac_profiles, 1, 3760 DEVLINK_RESOURCE_UNIT_ENTRY); 3761 3762 return devl_resource_register(devlink, 3763 "rif_mac_profiles", 3764 max_rif_mac_profiles, 3765 MLXSW_SP_RESOURCE_RIF_MAC_PROFILES, 3766 DEVLINK_RESOURCE_ID_PARENT_TOP, 3767 &size_params); 3768 } 3769 3770 static int mlxsw_sp_resources_rifs_register(struct mlxsw_core *mlxsw_core) 3771 { 3772 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3773 struct devlink_resource_size_params size_params; 3774 u64 max_rifs; 3775 3776 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIFS)) 3777 return -EIO; 3778 3779 max_rifs = MLXSW_CORE_RES_GET(mlxsw_core, MAX_RIFS); 3780 devlink_resource_size_params_init(&size_params, max_rifs, max_rifs, 3781 1, DEVLINK_RESOURCE_UNIT_ENTRY); 3782 3783 return devl_resource_register(devlink, "rifs", max_rifs, 3784 MLXSW_SP_RESOURCE_RIFS, 3785 DEVLINK_RESOURCE_ID_PARENT_TOP, 3786 &size_params); 3787 } 3788 3789 static int 3790 mlxsw_sp_resources_port_range_register(struct mlxsw_core *mlxsw_core) 3791 { 3792 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3793 struct devlink_resource_size_params size_params; 3794 u64 max; 3795 3796 if (!MLXSW_CORE_RES_VALID(mlxsw_core, ACL_MAX_L4_PORT_RANGE)) 3797 return -EIO; 3798 3799 max = MLXSW_CORE_RES_GET(mlxsw_core, ACL_MAX_L4_PORT_RANGE); 3800 devlink_resource_size_params_init(&size_params, max, max, 1, 3801 DEVLINK_RESOURCE_UNIT_ENTRY); 3802 3803 return devl_resource_register(devlink, "port_range_registers", max, 3804 MLXSW_SP_RESOURCE_PORT_RANGE_REGISTERS, 3805 DEVLINK_RESOURCE_ID_PARENT_TOP, 3806 &size_params); 3807 } 3808 3809 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) 3810 { 3811 int err; 3812 3813 err = mlxsw_sp1_resources_kvd_register(mlxsw_core); 3814 if (err) 3815 return err; 3816 3817 err = mlxsw_sp_resources_span_register(mlxsw_core); 3818 if (err) 3819 goto err_resources_span_register; 3820 3821 err = mlxsw_sp_counter_resources_register(mlxsw_core); 3822 if (err) 3823 goto err_resources_counter_register; 3824 3825 err = mlxsw_sp_policer_resources_register(mlxsw_core); 3826 if (err) 3827 goto err_policer_resources_register; 3828 3829 err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core); 3830 if (err) 3831 goto err_resources_rif_mac_profile_register; 3832 3833 err = mlxsw_sp_resources_rifs_register(mlxsw_core); 3834 if (err) 3835 goto err_resources_rifs_register; 3836 3837 err = mlxsw_sp_resources_port_range_register(mlxsw_core); 3838 if (err) 3839 goto err_resources_port_range_register; 3840 3841 return 0; 3842 3843 err_resources_port_range_register: 3844 err_resources_rifs_register: 3845 err_resources_rif_mac_profile_register: 3846 err_policer_resources_register: 3847 err_resources_counter_register: 3848 err_resources_span_register: 3849 devl_resources_unregister(priv_to_devlink(mlxsw_core)); 3850 return err; 3851 } 3852 3853 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) 3854 { 3855 int err; 3856 3857 err = mlxsw_sp2_resources_kvd_register(mlxsw_core); 3858 if (err) 3859 return err; 3860 3861 err = mlxsw_sp_resources_span_register(mlxsw_core); 3862 if (err) 3863 goto err_resources_span_register; 3864 3865 err = mlxsw_sp_counter_resources_register(mlxsw_core); 3866 if (err) 3867 goto err_resources_counter_register; 3868 3869 err = mlxsw_sp_policer_resources_register(mlxsw_core); 3870 if (err) 3871 goto err_policer_resources_register; 3872 3873 err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core); 3874 if (err) 3875 goto err_resources_rif_mac_profile_register; 3876 3877 err = mlxsw_sp_resources_rifs_register(mlxsw_core); 3878 if (err) 3879 goto err_resources_rifs_register; 3880 3881 err = mlxsw_sp_resources_port_range_register(mlxsw_core); 3882 if (err) 3883 goto err_resources_port_range_register; 3884 3885 return 0; 3886 3887 err_resources_port_range_register: 3888 err_resources_rifs_register: 3889 err_resources_rif_mac_profile_register: 3890 err_policer_resources_register: 3891 err_resources_counter_register: 3892 err_resources_span_register: 3893 devl_resources_unregister(priv_to_devlink(mlxsw_core)); 3894 return err; 3895 } 3896 3897 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 3898 const struct mlxsw_config_profile *profile, 3899 u64 *p_single_size, u64 *p_double_size, 3900 u64 *p_linear_size) 3901 { 3902 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3903 u32 double_size; 3904 int err; 3905 3906 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 3907 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) 3908 return -EIO; 3909 3910 /* The hash part is what left of the kvd without the 3911 * linear part. It is split to the single size and 3912 * double size by the parts ratio from the profile. 3913 * Both sizes must be a multiplications of the 3914 * granularity from the profile. In case the user 3915 * provided the sizes they are obtained via devlink. 3916 */ 3917 err = devl_resource_size_get(devlink, 3918 MLXSW_SP_RESOURCE_KVD_LINEAR, 3919 p_linear_size); 3920 if (err) 3921 *p_linear_size = profile->kvd_linear_size; 3922 3923 err = devl_resource_size_get(devlink, 3924 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 3925 p_double_size); 3926 if (err) { 3927 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 3928 *p_linear_size; 3929 double_size *= profile->kvd_hash_double_parts; 3930 double_size /= profile->kvd_hash_double_parts + 3931 profile->kvd_hash_single_parts; 3932 *p_double_size = rounddown(double_size, 3933 MLXSW_SP_KVD_GRANULARITY); 3934 } 3935 3936 err = devl_resource_size_get(devlink, 3937 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 3938 p_single_size); 3939 if (err) 3940 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 3941 *p_double_size - *p_linear_size; 3942 3943 /* Check results are legal. */ 3944 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 3945 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || 3946 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size) 3947 return -EIO; 3948 3949 return 0; 3950 } 3951 3952 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core, 3953 struct sk_buff *skb, u16 local_port) 3954 { 3955 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3956 3957 skb_pull(skb, MLXSW_TXHDR_LEN); 3958 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port); 3959 } 3960 3961 static struct mlxsw_driver mlxsw_sp1_driver = { 3962 .kind = mlxsw_sp1_driver_name, 3963 .priv_size = sizeof(struct mlxsw_sp), 3964 .fw_req_rev = &mlxsw_sp1_fw_rev, 3965 .fw_filename = MLXSW_SP1_FW_FILENAME, 3966 .init = mlxsw_sp1_init, 3967 .fini = mlxsw_sp_fini, 3968 .port_split = mlxsw_sp_port_split, 3969 .port_unsplit = mlxsw_sp_port_unsplit, 3970 .sb_pool_get = mlxsw_sp_sb_pool_get, 3971 .sb_pool_set = mlxsw_sp_sb_pool_set, 3972 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3973 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3974 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3975 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3976 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3977 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3978 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3979 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3980 .trap_init = mlxsw_sp_trap_init, 3981 .trap_fini = mlxsw_sp_trap_fini, 3982 .trap_action_set = mlxsw_sp_trap_action_set, 3983 .trap_group_init = mlxsw_sp_trap_group_init, 3984 .trap_group_set = mlxsw_sp_trap_group_set, 3985 .trap_policer_init = mlxsw_sp_trap_policer_init, 3986 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3987 .trap_policer_set = mlxsw_sp_trap_policer_set, 3988 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3989 .txhdr_construct = mlxsw_sp_txhdr_construct, 3990 .resources_register = mlxsw_sp1_resources_register, 3991 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, 3992 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3993 .txhdr_len = MLXSW_TXHDR_LEN, 3994 .profile = &mlxsw_sp1_config_profile, 3995 .sdq_supports_cqe_v2 = false, 3996 }; 3997 3998 static struct mlxsw_driver mlxsw_sp2_driver = { 3999 .kind = mlxsw_sp2_driver_name, 4000 .priv_size = sizeof(struct mlxsw_sp), 4001 .fw_req_rev = &mlxsw_sp2_fw_rev, 4002 .fw_filename = MLXSW_SP2_FW_FILENAME, 4003 .init = mlxsw_sp2_init, 4004 .fini = mlxsw_sp_fini, 4005 .port_split = mlxsw_sp_port_split, 4006 .port_unsplit = mlxsw_sp_port_unsplit, 4007 .ports_remove_selected = mlxsw_sp_ports_remove_selected, 4008 .sb_pool_get = mlxsw_sp_sb_pool_get, 4009 .sb_pool_set = mlxsw_sp_sb_pool_set, 4010 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 4011 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 4012 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 4013 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 4014 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 4015 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 4016 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 4017 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 4018 .trap_init = mlxsw_sp_trap_init, 4019 .trap_fini = mlxsw_sp_trap_fini, 4020 .trap_action_set = mlxsw_sp_trap_action_set, 4021 .trap_group_init = mlxsw_sp_trap_group_init, 4022 .trap_group_set = mlxsw_sp_trap_group_set, 4023 .trap_policer_init = mlxsw_sp_trap_policer_init, 4024 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 4025 .trap_policer_set = mlxsw_sp_trap_policer_set, 4026 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 4027 .txhdr_construct = mlxsw_sp_txhdr_construct, 4028 .resources_register = mlxsw_sp2_resources_register, 4029 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 4030 .txhdr_len = MLXSW_TXHDR_LEN, 4031 .profile = &mlxsw_sp2_config_profile, 4032 .sdq_supports_cqe_v2 = true, 4033 }; 4034 4035 static struct mlxsw_driver mlxsw_sp3_driver = { 4036 .kind = mlxsw_sp3_driver_name, 4037 .priv_size = sizeof(struct mlxsw_sp), 4038 .fw_req_rev = &mlxsw_sp3_fw_rev, 4039 .fw_filename = MLXSW_SP3_FW_FILENAME, 4040 .init = mlxsw_sp3_init, 4041 .fini = mlxsw_sp_fini, 4042 .port_split = mlxsw_sp_port_split, 4043 .port_unsplit = mlxsw_sp_port_unsplit, 4044 .ports_remove_selected = mlxsw_sp_ports_remove_selected, 4045 .sb_pool_get = mlxsw_sp_sb_pool_get, 4046 .sb_pool_set = mlxsw_sp_sb_pool_set, 4047 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 4048 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 4049 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 4050 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 4051 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 4052 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 4053 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 4054 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 4055 .trap_init = mlxsw_sp_trap_init, 4056 .trap_fini = mlxsw_sp_trap_fini, 4057 .trap_action_set = mlxsw_sp_trap_action_set, 4058 .trap_group_init = mlxsw_sp_trap_group_init, 4059 .trap_group_set = mlxsw_sp_trap_group_set, 4060 .trap_policer_init = mlxsw_sp_trap_policer_init, 4061 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 4062 .trap_policer_set = mlxsw_sp_trap_policer_set, 4063 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 4064 .txhdr_construct = mlxsw_sp_txhdr_construct, 4065 .resources_register = mlxsw_sp2_resources_register, 4066 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 4067 .txhdr_len = MLXSW_TXHDR_LEN, 4068 .profile = &mlxsw_sp2_config_profile, 4069 .sdq_supports_cqe_v2 = true, 4070 }; 4071 4072 static struct mlxsw_driver mlxsw_sp4_driver = { 4073 .kind = mlxsw_sp4_driver_name, 4074 .priv_size = sizeof(struct mlxsw_sp), 4075 .init = mlxsw_sp4_init, 4076 .fini = mlxsw_sp_fini, 4077 .port_split = mlxsw_sp_port_split, 4078 .port_unsplit = mlxsw_sp_port_unsplit, 4079 .ports_remove_selected = mlxsw_sp_ports_remove_selected, 4080 .sb_pool_get = mlxsw_sp_sb_pool_get, 4081 .sb_pool_set = mlxsw_sp_sb_pool_set, 4082 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 4083 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 4084 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 4085 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 4086 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 4087 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 4088 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 4089 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 4090 .trap_init = mlxsw_sp_trap_init, 4091 .trap_fini = mlxsw_sp_trap_fini, 4092 .trap_action_set = mlxsw_sp_trap_action_set, 4093 .trap_group_init = mlxsw_sp_trap_group_init, 4094 .trap_group_set = mlxsw_sp_trap_group_set, 4095 .trap_policer_init = mlxsw_sp_trap_policer_init, 4096 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 4097 .trap_policer_set = mlxsw_sp_trap_policer_set, 4098 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 4099 .txhdr_construct = mlxsw_sp_txhdr_construct, 4100 .resources_register = mlxsw_sp2_resources_register, 4101 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 4102 .txhdr_len = MLXSW_TXHDR_LEN, 4103 .profile = &mlxsw_sp4_config_profile, 4104 .sdq_supports_cqe_v2 = true, 4105 }; 4106 4107 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 4108 { 4109 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 4110 } 4111 4112 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, 4113 struct netdev_nested_priv *priv) 4114 { 4115 int ret = 0; 4116 4117 if (mlxsw_sp_port_dev_check(lower_dev)) { 4118 priv->data = (void *)netdev_priv(lower_dev); 4119 ret = 1; 4120 } 4121 4122 return ret; 4123 } 4124 4125 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 4126 { 4127 struct netdev_nested_priv priv = { 4128 .data = NULL, 4129 }; 4130 4131 if (mlxsw_sp_port_dev_check(dev)) 4132 return netdev_priv(dev); 4133 4134 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &priv); 4135 4136 return (struct mlxsw_sp_port *)priv.data; 4137 } 4138 4139 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 4140 { 4141 struct mlxsw_sp_port *mlxsw_sp_port; 4142 4143 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 4144 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 4145 } 4146 4147 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 4148 { 4149 struct netdev_nested_priv priv = { 4150 .data = NULL, 4151 }; 4152 4153 if (mlxsw_sp_port_dev_check(dev)) 4154 return netdev_priv(dev); 4155 4156 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 4157 &priv); 4158 4159 return (struct mlxsw_sp_port *)priv.data; 4160 } 4161 4162 int mlxsw_sp_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp) 4163 { 4164 char mprs_pl[MLXSW_REG_MPRS_LEN]; 4165 int err = 0; 4166 4167 mutex_lock(&mlxsw_sp->parsing.lock); 4168 4169 if (refcount_inc_not_zero(&mlxsw_sp->parsing.parsing_depth_ref)) 4170 goto out_unlock; 4171 4172 mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_INCREASED_PARSING_DEPTH, 4173 mlxsw_sp->parsing.vxlan_udp_dport); 4174 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); 4175 if (err) 4176 goto out_unlock; 4177 4178 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_INCREASED_PARSING_DEPTH; 4179 refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 1); 4180 4181 out_unlock: 4182 mutex_unlock(&mlxsw_sp->parsing.lock); 4183 return err; 4184 } 4185 4186 void mlxsw_sp_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp) 4187 { 4188 char mprs_pl[MLXSW_REG_MPRS_LEN]; 4189 4190 mutex_lock(&mlxsw_sp->parsing.lock); 4191 4192 if (!refcount_dec_and_test(&mlxsw_sp->parsing.parsing_depth_ref)) 4193 goto out_unlock; 4194 4195 mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_DEFAULT_PARSING_DEPTH, 4196 mlxsw_sp->parsing.vxlan_udp_dport); 4197 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); 4198 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH; 4199 4200 out_unlock: 4201 mutex_unlock(&mlxsw_sp->parsing.lock); 4202 } 4203 4204 int mlxsw_sp_parsing_vxlan_udp_dport_set(struct mlxsw_sp *mlxsw_sp, 4205 __be16 udp_dport) 4206 { 4207 char mprs_pl[MLXSW_REG_MPRS_LEN]; 4208 int err; 4209 4210 mutex_lock(&mlxsw_sp->parsing.lock); 4211 4212 mlxsw_reg_mprs_pack(mprs_pl, mlxsw_sp->parsing.parsing_depth, 4213 be16_to_cpu(udp_dport)); 4214 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); 4215 if (err) 4216 goto out_unlock; 4217 4218 mlxsw_sp->parsing.vxlan_udp_dport = be16_to_cpu(udp_dport); 4219 4220 out_unlock: 4221 mutex_unlock(&mlxsw_sp->parsing.lock); 4222 return err; 4223 } 4224 4225 static void 4226 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port, 4227 struct net_device *lag_dev) 4228 { 4229 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev); 4230 struct net_device *upper_dev; 4231 struct list_head *iter; 4232 4233 if (netif_is_bridge_port(lag_dev)) 4234 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev); 4235 4236 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 4237 if (!netif_is_bridge_port(upper_dev)) 4238 continue; 4239 br_dev = netdev_master_upper_dev_get(upper_dev); 4240 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev); 4241 } 4242 } 4243 4244 static struct mlxsw_sp_lag * 4245 mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, struct net_device *lag_dev, 4246 struct netlink_ext_ack *extack) 4247 { 4248 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4249 struct mlxsw_sp_lag *lag; 4250 u16 lag_id; 4251 int i, err; 4252 4253 for (i = 0; i < mlxsw_sp->max_lag; i++) { 4254 if (!mlxsw_sp->lags[i].dev) 4255 break; 4256 } 4257 4258 if (i == mlxsw_sp->max_lag) { 4259 NL_SET_ERR_MSG_MOD(extack, 4260 "Exceeded number of supported LAG devices"); 4261 return ERR_PTR(-EBUSY); 4262 } 4263 4264 lag_id = i; 4265 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 4266 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4267 if (err) 4268 return ERR_PTR(err); 4269 4270 lag = &mlxsw_sp->lags[lag_id]; 4271 lag->lag_id = lag_id; 4272 lag->dev = lag_dev; 4273 refcount_set(&lag->ref_count, 1); 4274 4275 return lag; 4276 } 4277 4278 static int 4279 mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_lag *lag) 4280 { 4281 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4282 4283 lag->dev = NULL; 4284 4285 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag->lag_id); 4286 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4287 } 4288 4289 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 4290 u16 lag_id, u8 port_index) 4291 { 4292 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4293 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4294 4295 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 4296 lag_id, port_index); 4297 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4298 } 4299 4300 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 4301 u16 lag_id) 4302 { 4303 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4304 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4305 4306 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 4307 lag_id); 4308 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4309 } 4310 4311 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 4312 u16 lag_id) 4313 { 4314 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4315 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4316 4317 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 4318 lag_id); 4319 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4320 } 4321 4322 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 4323 u16 lag_id) 4324 { 4325 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4326 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4327 4328 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 4329 lag_id); 4330 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4331 } 4332 4333 static struct mlxsw_sp_lag * 4334 mlxsw_sp_lag_find(struct mlxsw_sp *mlxsw_sp, struct net_device *lag_dev) 4335 { 4336 int i; 4337 4338 for (i = 0; i < mlxsw_sp->max_lag; i++) { 4339 if (!mlxsw_sp->lags[i].dev) 4340 continue; 4341 4342 if (mlxsw_sp->lags[i].dev == lag_dev) 4343 return &mlxsw_sp->lags[i]; 4344 } 4345 4346 return NULL; 4347 } 4348 4349 static struct mlxsw_sp_lag * 4350 mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, struct net_device *lag_dev, 4351 struct netlink_ext_ack *extack) 4352 { 4353 struct mlxsw_sp_lag *lag; 4354 4355 lag = mlxsw_sp_lag_find(mlxsw_sp, lag_dev); 4356 if (lag) { 4357 refcount_inc(&lag->ref_count); 4358 return lag; 4359 } 4360 4361 return mlxsw_sp_lag_create(mlxsw_sp, lag_dev, extack); 4362 } 4363 4364 static void 4365 mlxsw_sp_lag_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_lag *lag) 4366 { 4367 if (!refcount_dec_and_test(&lag->ref_count)) 4368 return; 4369 4370 mlxsw_sp_lag_destroy(mlxsw_sp, lag); 4371 } 4372 4373 static bool 4374 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 4375 struct net_device *lag_dev, 4376 struct netdev_lag_upper_info *lag_upper_info, 4377 struct netlink_ext_ack *extack) 4378 { 4379 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 4380 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 4381 return false; 4382 } 4383 return true; 4384 } 4385 4386 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 4387 u16 lag_id, u8 *p_port_index) 4388 { 4389 u64 max_lag_members; 4390 int i; 4391 4392 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 4393 MAX_LAG_MEMBERS); 4394 for (i = 0; i < max_lag_members; i++) { 4395 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 4396 *p_port_index = i; 4397 return 0; 4398 } 4399 } 4400 return -EBUSY; 4401 } 4402 4403 static int mlxsw_sp_lag_uppers_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port, 4404 struct net_device *lag_dev, 4405 struct netlink_ext_ack *extack) 4406 { 4407 struct net_device *upper_dev; 4408 struct net_device *master; 4409 struct list_head *iter; 4410 int done = 0; 4411 int err; 4412 4413 master = netdev_master_upper_dev_get(lag_dev); 4414 if (master && netif_is_bridge_master(master)) { 4415 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, lag_dev, master, 4416 extack); 4417 if (err) 4418 return err; 4419 } 4420 4421 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 4422 if (!is_vlan_dev(upper_dev)) 4423 continue; 4424 4425 master = netdev_master_upper_dev_get(upper_dev); 4426 if (master && netif_is_bridge_master(master)) { 4427 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4428 upper_dev, master, 4429 extack); 4430 if (err) 4431 goto err_port_bridge_join; 4432 } 4433 4434 ++done; 4435 } 4436 4437 return 0; 4438 4439 err_port_bridge_join: 4440 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 4441 if (!is_vlan_dev(upper_dev)) 4442 continue; 4443 4444 master = netdev_master_upper_dev_get(upper_dev); 4445 if (!master || !netif_is_bridge_master(master)) 4446 continue; 4447 4448 if (!done--) 4449 break; 4450 4451 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, master); 4452 } 4453 4454 master = netdev_master_upper_dev_get(lag_dev); 4455 if (master && netif_is_bridge_master(master)) 4456 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, master); 4457 4458 return err; 4459 } 4460 4461 static void 4462 mlxsw_sp_lag_uppers_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port, 4463 struct net_device *lag_dev) 4464 { 4465 struct net_device *upper_dev; 4466 struct net_device *master; 4467 struct list_head *iter; 4468 4469 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 4470 if (!is_vlan_dev(upper_dev)) 4471 continue; 4472 4473 master = netdev_master_upper_dev_get(upper_dev); 4474 if (!master) 4475 continue; 4476 4477 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, master); 4478 } 4479 4480 master = netdev_master_upper_dev_get(lag_dev); 4481 if (master) 4482 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, master); 4483 } 4484 4485 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 4486 struct net_device *lag_dev, 4487 struct netlink_ext_ack *extack) 4488 { 4489 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4490 struct mlxsw_sp_lag *lag; 4491 u16 lag_id; 4492 u8 port_index; 4493 int err; 4494 4495 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_dev, extack); 4496 if (IS_ERR(lag)) 4497 return PTR_ERR(lag); 4498 4499 lag_id = lag->lag_id; 4500 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 4501 if (err) 4502 return err; 4503 4504 err = mlxsw_sp_lag_uppers_bridge_join(mlxsw_sp_port, lag_dev, 4505 extack); 4506 if (err) 4507 goto err_lag_uppers_bridge_join; 4508 4509 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 4510 if (err) 4511 goto err_col_port_add; 4512 4513 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 4514 mlxsw_sp_port->local_port); 4515 mlxsw_sp_port->lag_id = lag_id; 4516 mlxsw_sp_port->lagged = 1; 4517 4518 err = mlxsw_sp_fid_port_join_lag(mlxsw_sp_port); 4519 if (err) 4520 goto err_fid_port_join_lag; 4521 4522 /* Port is no longer usable as a router interface */ 4523 if (mlxsw_sp_port->default_vlan->fid) 4524 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); 4525 4526 /* Join a router interface configured on the LAG, if exists */ 4527 err = mlxsw_sp_router_port_join_lag(mlxsw_sp_port, lag_dev, 4528 extack); 4529 if (err) 4530 goto err_router_join; 4531 4532 err = mlxsw_sp_netdevice_enslavement_replay(mlxsw_sp, lag_dev, extack); 4533 if (err) 4534 goto err_replay; 4535 4536 return 0; 4537 4538 err_replay: 4539 mlxsw_sp_router_port_leave_lag(mlxsw_sp_port, lag_dev); 4540 err_router_join: 4541 mlxsw_sp_fid_port_leave_lag(mlxsw_sp_port); 4542 err_fid_port_join_lag: 4543 mlxsw_sp_port->lagged = 0; 4544 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 4545 mlxsw_sp_port->local_port); 4546 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4547 err_col_port_add: 4548 mlxsw_sp_lag_uppers_bridge_leave(mlxsw_sp_port, lag_dev); 4549 err_lag_uppers_bridge_join: 4550 mlxsw_sp_lag_put(mlxsw_sp, lag); 4551 return err; 4552 } 4553 4554 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 4555 struct net_device *lag_dev) 4556 { 4557 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4558 u16 lag_id = mlxsw_sp_port->lag_id; 4559 struct mlxsw_sp_lag *lag; 4560 4561 if (!mlxsw_sp_port->lagged) 4562 return; 4563 lag = &mlxsw_sp->lags[lag_id]; 4564 4565 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4566 4567 /* Any VLANs configured on the port are no longer valid */ 4568 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false); 4569 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan); 4570 /* Make the LAG and its directly linked uppers leave bridges they 4571 * are memeber in 4572 */ 4573 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev); 4574 4575 mlxsw_sp_fid_port_leave_lag(mlxsw_sp_port); 4576 4577 mlxsw_sp_lag_put(mlxsw_sp, lag); 4578 4579 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 4580 mlxsw_sp_port->local_port); 4581 mlxsw_sp_port->lagged = 0; 4582 4583 /* Make sure untagged frames are allowed to ingress */ 4584 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID, 4585 ETH_P_8021Q); 4586 } 4587 4588 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 4589 u16 lag_id) 4590 { 4591 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4592 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4593 4594 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 4595 mlxsw_sp_port->local_port); 4596 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4597 } 4598 4599 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 4600 u16 lag_id) 4601 { 4602 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4603 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4604 4605 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 4606 mlxsw_sp_port->local_port); 4607 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4608 } 4609 4610 static int 4611 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port) 4612 { 4613 int err; 4614 4615 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, 4616 mlxsw_sp_port->lag_id); 4617 if (err) 4618 return err; 4619 4620 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 4621 if (err) 4622 goto err_dist_port_add; 4623 4624 return 0; 4625 4626 err_dist_port_add: 4627 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id); 4628 return err; 4629 } 4630 4631 static int 4632 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port) 4633 { 4634 int err; 4635 4636 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 4637 mlxsw_sp_port->lag_id); 4638 if (err) 4639 return err; 4640 4641 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, 4642 mlxsw_sp_port->lag_id); 4643 if (err) 4644 goto err_col_port_disable; 4645 4646 return 0; 4647 4648 err_col_port_disable: 4649 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 4650 return err; 4651 } 4652 4653 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 4654 struct netdev_lag_lower_state_info *info) 4655 { 4656 if (info->tx_enabled) 4657 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port); 4658 else 4659 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 4660 } 4661 4662 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 4663 bool enable) 4664 { 4665 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4666 enum mlxsw_reg_spms_state spms_state; 4667 char *spms_pl; 4668 u16 vid; 4669 int err; 4670 4671 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 4672 MLXSW_REG_SPMS_STATE_DISCARDING; 4673 4674 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 4675 if (!spms_pl) 4676 return -ENOMEM; 4677 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 4678 4679 for (vid = 0; vid < VLAN_N_VID; vid++) 4680 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 4681 4682 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 4683 kfree(spms_pl); 4684 return err; 4685 } 4686 4687 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 4688 { 4689 u16 vid = 1; 4690 int err; 4691 4692 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 4693 if (err) 4694 return err; 4695 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 4696 if (err) 4697 goto err_port_stp_set; 4698 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 4699 true, false); 4700 if (err) 4701 goto err_port_vlan_set; 4702 4703 for (; vid <= VLAN_N_VID - 1; vid++) { 4704 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4705 vid, false); 4706 if (err) 4707 goto err_vid_learning_set; 4708 } 4709 4710 return 0; 4711 4712 err_vid_learning_set: 4713 for (vid--; vid >= 1; vid--) 4714 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 4715 err_port_vlan_set: 4716 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4717 err_port_stp_set: 4718 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4719 return err; 4720 } 4721 4722 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 4723 { 4724 u16 vid; 4725 4726 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 4727 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4728 vid, true); 4729 4730 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 4731 false, false); 4732 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4733 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4734 } 4735 4736 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev) 4737 { 4738 unsigned int num_vxlans = 0; 4739 struct net_device *dev; 4740 struct list_head *iter; 4741 4742 netdev_for_each_lower_dev(br_dev, dev, iter) { 4743 if (netif_is_vxlan(dev)) 4744 num_vxlans++; 4745 } 4746 4747 return num_vxlans > 1; 4748 } 4749 4750 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev) 4751 { 4752 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0}; 4753 struct net_device *dev; 4754 struct list_head *iter; 4755 4756 netdev_for_each_lower_dev(br_dev, dev, iter) { 4757 u16 pvid; 4758 int err; 4759 4760 if (!netif_is_vxlan(dev)) 4761 continue; 4762 4763 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid); 4764 if (err || !pvid) 4765 continue; 4766 4767 if (test_and_set_bit(pvid, vlans)) 4768 return false; 4769 } 4770 4771 return true; 4772 } 4773 4774 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev, 4775 struct netlink_ext_ack *extack) 4776 { 4777 if (br_multicast_enabled(br_dev)) { 4778 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device"); 4779 return false; 4780 } 4781 4782 if (!br_vlan_enabled(br_dev) && 4783 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) { 4784 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge"); 4785 return false; 4786 } 4787 4788 if (br_vlan_enabled(br_dev) && 4789 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) { 4790 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged"); 4791 return false; 4792 } 4793 4794 return true; 4795 } 4796 4797 static bool mlxsw_sp_netdev_is_master(struct net_device *upper_dev, 4798 struct net_device *dev) 4799 { 4800 return upper_dev == netdev_master_upper_dev_get(dev); 4801 } 4802 4803 static int __mlxsw_sp_netdevice_event(struct mlxsw_sp *mlxsw_sp, 4804 unsigned long event, void *ptr, 4805 bool process_foreign); 4806 4807 static int mlxsw_sp_netdevice_validate_uppers(struct mlxsw_sp *mlxsw_sp, 4808 struct net_device *dev, 4809 struct netlink_ext_ack *extack) 4810 { 4811 struct net_device *upper_dev; 4812 struct list_head *iter; 4813 int err; 4814 4815 netdev_for_each_upper_dev_rcu(dev, upper_dev, iter) { 4816 struct netdev_notifier_changeupper_info info = { 4817 .info = { 4818 .dev = dev, 4819 .extack = extack, 4820 }, 4821 .master = mlxsw_sp_netdev_is_master(upper_dev, dev), 4822 .upper_dev = upper_dev, 4823 .linking = true, 4824 4825 /* upper_info is relevant for LAG devices. But we would 4826 * only need this if LAG were a valid upper above 4827 * another upper (e.g. a bridge that is a member of a 4828 * LAG), and that is never a valid configuration. So we 4829 * can keep this as NULL. 4830 */ 4831 .upper_info = NULL, 4832 }; 4833 4834 err = __mlxsw_sp_netdevice_event(mlxsw_sp, 4835 NETDEV_PRECHANGEUPPER, 4836 &info, true); 4837 if (err) 4838 return err; 4839 4840 err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp, upper_dev, 4841 extack); 4842 if (err) 4843 return err; 4844 } 4845 4846 return 0; 4847 } 4848 4849 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 4850 struct net_device *dev, 4851 unsigned long event, void *ptr, 4852 bool replay_deslavement) 4853 { 4854 struct netdev_notifier_changeupper_info *info; 4855 struct mlxsw_sp_port *mlxsw_sp_port; 4856 struct netlink_ext_ack *extack; 4857 struct net_device *upper_dev; 4858 struct mlxsw_sp *mlxsw_sp; 4859 int err = 0; 4860 u16 proto; 4861 4862 mlxsw_sp_port = netdev_priv(dev); 4863 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4864 info = ptr; 4865 extack = netdev_notifier_info_to_extack(&info->info); 4866 4867 switch (event) { 4868 case NETDEV_PRECHANGEUPPER: 4869 upper_dev = info->upper_dev; 4870 if (!is_vlan_dev(upper_dev) && 4871 !netif_is_lag_master(upper_dev) && 4872 !netif_is_bridge_master(upper_dev) && 4873 !netif_is_ovs_master(upper_dev) && 4874 !netif_is_macvlan(upper_dev) && 4875 !netif_is_l3_master(upper_dev)) { 4876 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4877 return -EINVAL; 4878 } 4879 if (!info->linking) 4880 break; 4881 if (netif_is_bridge_master(upper_dev) && 4882 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 4883 mlxsw_sp_bridge_has_vxlan(upper_dev) && 4884 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 4885 return -EOPNOTSUPP; 4886 if (netdev_has_any_upper_dev(upper_dev) && 4887 (!netif_is_bridge_master(upper_dev) || 4888 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 4889 upper_dev))) { 4890 err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp, 4891 upper_dev, 4892 extack); 4893 if (err) 4894 return err; 4895 } 4896 if (netif_is_lag_master(upper_dev) && 4897 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 4898 info->upper_info, extack)) 4899 return -EINVAL; 4900 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 4901 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); 4902 return -EINVAL; 4903 } 4904 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 4905 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 4906 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); 4907 return -EINVAL; 4908 } 4909 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 4910 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); 4911 return -EINVAL; 4912 } 4913 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 4914 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 4915 return -EINVAL; 4916 } 4917 if (netif_is_bridge_master(upper_dev)) { 4918 br_vlan_get_proto(upper_dev, &proto); 4919 if (br_vlan_enabled(upper_dev) && 4920 proto != ETH_P_8021Q && proto != ETH_P_8021AD) { 4921 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a bridge with unknown VLAN protocol is not supported"); 4922 return -EOPNOTSUPP; 4923 } 4924 if (vlan_uses_dev(lower_dev) && 4925 br_vlan_enabled(upper_dev) && 4926 proto == ETH_P_8021AD) { 4927 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port that already has a VLAN upper to an 802.1ad bridge is not supported"); 4928 return -EOPNOTSUPP; 4929 } 4930 } 4931 if (netif_is_bridge_port(lower_dev) && is_vlan_dev(upper_dev)) { 4932 struct net_device *br_dev = netdev_master_upper_dev_get(lower_dev); 4933 4934 if (br_vlan_enabled(br_dev)) { 4935 br_vlan_get_proto(br_dev, &proto); 4936 if (proto == ETH_P_8021AD) { 4937 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a port enslaved to an 802.1ad bridge"); 4938 return -EOPNOTSUPP; 4939 } 4940 } 4941 } 4942 if (is_vlan_dev(upper_dev) && 4943 ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) { 4944 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol"); 4945 return -EOPNOTSUPP; 4946 } 4947 if (is_vlan_dev(upper_dev) && mlxsw_sp_port->security) { 4948 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a locked port"); 4949 return -EOPNOTSUPP; 4950 } 4951 break; 4952 case NETDEV_CHANGEUPPER: 4953 upper_dev = info->upper_dev; 4954 if (netif_is_bridge_master(upper_dev)) { 4955 if (info->linking) { 4956 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4957 lower_dev, 4958 upper_dev, 4959 extack); 4960 } else { 4961 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4962 lower_dev, 4963 upper_dev); 4964 if (!replay_deslavement) 4965 break; 4966 mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp, 4967 lower_dev); 4968 } 4969 } else if (netif_is_lag_master(upper_dev)) { 4970 if (info->linking) { 4971 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 4972 upper_dev, extack); 4973 } else { 4974 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 4975 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 4976 upper_dev); 4977 mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp, 4978 dev); 4979 } 4980 } else if (netif_is_ovs_master(upper_dev)) { 4981 if (info->linking) 4982 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 4983 else 4984 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 4985 } else if (netif_is_macvlan(upper_dev)) { 4986 if (!info->linking) 4987 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4988 } else if (is_vlan_dev(upper_dev)) { 4989 struct net_device *br_dev; 4990 4991 if (!netif_is_bridge_port(upper_dev)) 4992 break; 4993 if (info->linking) 4994 break; 4995 br_dev = netdev_master_upper_dev_get(upper_dev); 4996 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, 4997 br_dev); 4998 } 4999 break; 5000 } 5001 5002 return err; 5003 } 5004 5005 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 5006 unsigned long event, void *ptr) 5007 { 5008 struct netdev_notifier_changelowerstate_info *info; 5009 struct mlxsw_sp_port *mlxsw_sp_port; 5010 int err; 5011 5012 mlxsw_sp_port = netdev_priv(dev); 5013 info = ptr; 5014 5015 switch (event) { 5016 case NETDEV_CHANGELOWERSTATE: 5017 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 5018 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 5019 info->lower_state_info); 5020 if (err) 5021 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 5022 } 5023 break; 5024 } 5025 5026 return 0; 5027 } 5028 5029 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 5030 struct net_device *port_dev, 5031 unsigned long event, void *ptr, 5032 bool replay_deslavement) 5033 { 5034 switch (event) { 5035 case NETDEV_PRECHANGEUPPER: 5036 case NETDEV_CHANGEUPPER: 5037 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 5038 event, ptr, 5039 replay_deslavement); 5040 case NETDEV_CHANGELOWERSTATE: 5041 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 5042 ptr); 5043 } 5044 5045 return 0; 5046 } 5047 5048 /* Called for LAG or its upper VLAN after the per-LAG-lower processing was done, 5049 * to do any per-LAG / per-LAG-upper processing. 5050 */ 5051 static int mlxsw_sp_netdevice_post_lag_event(struct net_device *dev, 5052 unsigned long event, 5053 void *ptr) 5054 { 5055 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(dev); 5056 struct netdev_notifier_changeupper_info *info = ptr; 5057 5058 if (!mlxsw_sp) 5059 return 0; 5060 5061 switch (event) { 5062 case NETDEV_CHANGEUPPER: 5063 if (info->linking) 5064 break; 5065 if (netif_is_bridge_master(info->upper_dev)) 5066 mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp, dev); 5067 break; 5068 } 5069 return 0; 5070 } 5071 5072 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 5073 unsigned long event, void *ptr) 5074 { 5075 struct net_device *dev; 5076 struct list_head *iter; 5077 int ret; 5078 5079 netdev_for_each_lower_dev(lag_dev, dev, iter) { 5080 if (mlxsw_sp_port_dev_check(dev)) { 5081 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 5082 ptr, false); 5083 if (ret) 5084 return ret; 5085 } 5086 } 5087 5088 return mlxsw_sp_netdevice_post_lag_event(lag_dev, event, ptr); 5089 } 5090 5091 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 5092 struct net_device *dev, 5093 unsigned long event, void *ptr, 5094 u16 vid, bool replay_deslavement) 5095 { 5096 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 5097 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5098 struct netdev_notifier_changeupper_info *info = ptr; 5099 struct netlink_ext_ack *extack; 5100 struct net_device *upper_dev; 5101 int err = 0; 5102 5103 extack = netdev_notifier_info_to_extack(&info->info); 5104 5105 switch (event) { 5106 case NETDEV_PRECHANGEUPPER: 5107 upper_dev = info->upper_dev; 5108 if (!netif_is_bridge_master(upper_dev) && 5109 !netif_is_macvlan(upper_dev) && 5110 !netif_is_l3_master(upper_dev)) { 5111 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5112 return -EINVAL; 5113 } 5114 if (!info->linking) 5115 break; 5116 if (netif_is_bridge_master(upper_dev) && 5117 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 5118 mlxsw_sp_bridge_has_vxlan(upper_dev) && 5119 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 5120 return -EOPNOTSUPP; 5121 if (netdev_has_any_upper_dev(upper_dev) && 5122 (!netif_is_bridge_master(upper_dev) || 5123 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 5124 upper_dev))) { 5125 err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp, 5126 upper_dev, 5127 extack); 5128 if (err) 5129 return err; 5130 } 5131 break; 5132 case NETDEV_CHANGEUPPER: 5133 upper_dev = info->upper_dev; 5134 if (netif_is_bridge_master(upper_dev)) { 5135 if (info->linking) { 5136 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 5137 vlan_dev, 5138 upper_dev, 5139 extack); 5140 } else { 5141 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 5142 vlan_dev, 5143 upper_dev); 5144 if (!replay_deslavement) 5145 break; 5146 mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp, 5147 vlan_dev); 5148 } 5149 } else if (netif_is_macvlan(upper_dev)) { 5150 if (!info->linking) 5151 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 5152 } 5153 break; 5154 } 5155 5156 return err; 5157 } 5158 5159 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 5160 struct net_device *lag_dev, 5161 unsigned long event, 5162 void *ptr, u16 vid) 5163 { 5164 struct net_device *dev; 5165 struct list_head *iter; 5166 int ret; 5167 5168 netdev_for_each_lower_dev(lag_dev, dev, iter) { 5169 if (mlxsw_sp_port_dev_check(dev)) { 5170 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 5171 event, ptr, 5172 vid, false); 5173 if (ret) 5174 return ret; 5175 } 5176 } 5177 5178 return mlxsw_sp_netdevice_post_lag_event(vlan_dev, event, ptr); 5179 } 5180 5181 static int mlxsw_sp_netdevice_bridge_vlan_event(struct mlxsw_sp *mlxsw_sp, 5182 struct net_device *vlan_dev, 5183 struct net_device *br_dev, 5184 unsigned long event, void *ptr, 5185 u16 vid, bool process_foreign) 5186 { 5187 struct netdev_notifier_changeupper_info *info = ptr; 5188 struct netlink_ext_ack *extack; 5189 struct net_device *upper_dev; 5190 5191 if (!process_foreign && !mlxsw_sp_lower_get(vlan_dev)) 5192 return 0; 5193 5194 extack = netdev_notifier_info_to_extack(&info->info); 5195 5196 switch (event) { 5197 case NETDEV_PRECHANGEUPPER: 5198 upper_dev = info->upper_dev; 5199 if (!netif_is_macvlan(upper_dev) && 5200 !netif_is_l3_master(upper_dev)) { 5201 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5202 return -EOPNOTSUPP; 5203 } 5204 break; 5205 case NETDEV_CHANGEUPPER: 5206 upper_dev = info->upper_dev; 5207 if (info->linking) 5208 break; 5209 if (netif_is_macvlan(upper_dev)) 5210 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 5211 break; 5212 } 5213 5214 return 0; 5215 } 5216 5217 static int mlxsw_sp_netdevice_vlan_event(struct mlxsw_sp *mlxsw_sp, 5218 struct net_device *vlan_dev, 5219 unsigned long event, void *ptr, 5220 bool process_foreign) 5221 { 5222 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 5223 u16 vid = vlan_dev_vlan_id(vlan_dev); 5224 5225 if (mlxsw_sp_port_dev_check(real_dev)) 5226 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 5227 event, ptr, vid, 5228 true); 5229 else if (netif_is_lag_master(real_dev)) 5230 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 5231 real_dev, event, 5232 ptr, vid); 5233 else if (netif_is_bridge_master(real_dev)) 5234 return mlxsw_sp_netdevice_bridge_vlan_event(mlxsw_sp, vlan_dev, 5235 real_dev, event, 5236 ptr, vid, 5237 process_foreign); 5238 5239 return 0; 5240 } 5241 5242 static int mlxsw_sp_netdevice_bridge_event(struct mlxsw_sp *mlxsw_sp, 5243 struct net_device *br_dev, 5244 unsigned long event, void *ptr, 5245 bool process_foreign) 5246 { 5247 struct netdev_notifier_changeupper_info *info = ptr; 5248 struct netlink_ext_ack *extack; 5249 struct net_device *upper_dev; 5250 u16 proto; 5251 5252 if (!process_foreign && !mlxsw_sp_lower_get(br_dev)) 5253 return 0; 5254 5255 extack = netdev_notifier_info_to_extack(&info->info); 5256 5257 switch (event) { 5258 case NETDEV_PRECHANGEUPPER: 5259 upper_dev = info->upper_dev; 5260 if (!is_vlan_dev(upper_dev) && 5261 !netif_is_macvlan(upper_dev) && 5262 !netif_is_l3_master(upper_dev)) { 5263 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5264 return -EOPNOTSUPP; 5265 } 5266 if (!info->linking) 5267 break; 5268 if (br_vlan_enabled(br_dev)) { 5269 br_vlan_get_proto(br_dev, &proto); 5270 if (proto == ETH_P_8021AD) { 5271 NL_SET_ERR_MSG_MOD(extack, "Upper devices are not supported on top of an 802.1ad bridge"); 5272 return -EOPNOTSUPP; 5273 } 5274 } 5275 if (is_vlan_dev(upper_dev) && 5276 ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) { 5277 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol"); 5278 return -EOPNOTSUPP; 5279 } 5280 break; 5281 case NETDEV_CHANGEUPPER: 5282 upper_dev = info->upper_dev; 5283 if (info->linking) 5284 break; 5285 if (is_vlan_dev(upper_dev)) 5286 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev); 5287 if (netif_is_macvlan(upper_dev)) 5288 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 5289 break; 5290 } 5291 5292 return 0; 5293 } 5294 5295 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, 5296 unsigned long event, void *ptr) 5297 { 5298 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); 5299 struct netdev_notifier_changeupper_info *info = ptr; 5300 struct netlink_ext_ack *extack; 5301 struct net_device *upper_dev; 5302 5303 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) 5304 return 0; 5305 5306 extack = netdev_notifier_info_to_extack(&info->info); 5307 upper_dev = info->upper_dev; 5308 5309 if (!netif_is_l3_master(upper_dev)) { 5310 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5311 return -EOPNOTSUPP; 5312 } 5313 5314 return 0; 5315 } 5316 5317 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, 5318 struct net_device *dev, 5319 unsigned long event, void *ptr) 5320 { 5321 struct netdev_notifier_changeupper_info *cu_info; 5322 struct netdev_notifier_info *info = ptr; 5323 struct netlink_ext_ack *extack; 5324 struct net_device *upper_dev; 5325 5326 extack = netdev_notifier_info_to_extack(info); 5327 5328 switch (event) { 5329 case NETDEV_CHANGEUPPER: 5330 cu_info = container_of(info, 5331 struct netdev_notifier_changeupper_info, 5332 info); 5333 upper_dev = cu_info->upper_dev; 5334 if (!netif_is_bridge_master(upper_dev)) 5335 return 0; 5336 if (!mlxsw_sp_lower_get(upper_dev)) 5337 return 0; 5338 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 5339 return -EOPNOTSUPP; 5340 if (cu_info->linking) { 5341 if (!netif_running(dev)) 5342 return 0; 5343 /* When the bridge is VLAN-aware, the VNI of the VxLAN 5344 * device needs to be mapped to a VLAN, but at this 5345 * point no VLANs are configured on the VxLAN device 5346 */ 5347 if (br_vlan_enabled(upper_dev)) 5348 return 0; 5349 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, 5350 dev, 0, extack); 5351 } else { 5352 /* VLANs were already flushed, which triggered the 5353 * necessary cleanup 5354 */ 5355 if (br_vlan_enabled(upper_dev)) 5356 return 0; 5357 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 5358 } 5359 break; 5360 case NETDEV_PRE_UP: 5361 upper_dev = netdev_master_upper_dev_get(dev); 5362 if (!upper_dev) 5363 return 0; 5364 if (!netif_is_bridge_master(upper_dev)) 5365 return 0; 5366 if (!mlxsw_sp_lower_get(upper_dev)) 5367 return 0; 5368 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0, 5369 extack); 5370 case NETDEV_DOWN: 5371 upper_dev = netdev_master_upper_dev_get(dev); 5372 if (!upper_dev) 5373 return 0; 5374 if (!netif_is_bridge_master(upper_dev)) 5375 return 0; 5376 if (!mlxsw_sp_lower_get(upper_dev)) 5377 return 0; 5378 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 5379 break; 5380 } 5381 5382 return 0; 5383 } 5384 5385 static int __mlxsw_sp_netdevice_event(struct mlxsw_sp *mlxsw_sp, 5386 unsigned long event, void *ptr, 5387 bool process_foreign) 5388 { 5389 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 5390 struct mlxsw_sp_span_entry *span_entry; 5391 int err = 0; 5392 5393 if (event == NETDEV_UNREGISTER) { 5394 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); 5395 if (span_entry) 5396 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); 5397 } 5398 5399 if (netif_is_vxlan(dev)) 5400 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr); 5401 else if (mlxsw_sp_port_dev_check(dev)) 5402 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr, true); 5403 else if (netif_is_lag_master(dev)) 5404 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 5405 else if (is_vlan_dev(dev)) 5406 err = mlxsw_sp_netdevice_vlan_event(mlxsw_sp, dev, event, ptr, 5407 process_foreign); 5408 else if (netif_is_bridge_master(dev)) 5409 err = mlxsw_sp_netdevice_bridge_event(mlxsw_sp, dev, event, ptr, 5410 process_foreign); 5411 else if (netif_is_macvlan(dev)) 5412 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); 5413 5414 return err; 5415 } 5416 5417 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 5418 unsigned long event, void *ptr) 5419 { 5420 struct mlxsw_sp *mlxsw_sp; 5421 int err; 5422 5423 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 5424 mlxsw_sp_span_respin(mlxsw_sp); 5425 err = __mlxsw_sp_netdevice_event(mlxsw_sp, event, ptr, false); 5426 5427 return notifier_from_errno(err); 5428 } 5429 5430 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { 5431 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 5432 {0, }, 5433 }; 5434 5435 static struct pci_driver mlxsw_sp1_pci_driver = { 5436 .name = mlxsw_sp1_driver_name, 5437 .id_table = mlxsw_sp1_pci_id_table, 5438 }; 5439 5440 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { 5441 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, 5442 {0, }, 5443 }; 5444 5445 static struct pci_driver mlxsw_sp2_pci_driver = { 5446 .name = mlxsw_sp2_driver_name, 5447 .id_table = mlxsw_sp2_pci_id_table, 5448 }; 5449 5450 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = { 5451 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0}, 5452 {0, }, 5453 }; 5454 5455 static struct pci_driver mlxsw_sp3_pci_driver = { 5456 .name = mlxsw_sp3_driver_name, 5457 .id_table = mlxsw_sp3_pci_id_table, 5458 }; 5459 5460 static const struct pci_device_id mlxsw_sp4_pci_id_table[] = { 5461 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM4), 0}, 5462 {0, }, 5463 }; 5464 5465 static struct pci_driver mlxsw_sp4_pci_driver = { 5466 .name = mlxsw_sp4_driver_name, 5467 .id_table = mlxsw_sp4_pci_id_table, 5468 }; 5469 5470 static int __init mlxsw_sp_module_init(void) 5471 { 5472 int err; 5473 5474 err = mlxsw_core_driver_register(&mlxsw_sp1_driver); 5475 if (err) 5476 return err; 5477 5478 err = mlxsw_core_driver_register(&mlxsw_sp2_driver); 5479 if (err) 5480 goto err_sp2_core_driver_register; 5481 5482 err = mlxsw_core_driver_register(&mlxsw_sp3_driver); 5483 if (err) 5484 goto err_sp3_core_driver_register; 5485 5486 err = mlxsw_core_driver_register(&mlxsw_sp4_driver); 5487 if (err) 5488 goto err_sp4_core_driver_register; 5489 5490 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); 5491 if (err) 5492 goto err_sp1_pci_driver_register; 5493 5494 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); 5495 if (err) 5496 goto err_sp2_pci_driver_register; 5497 5498 err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver); 5499 if (err) 5500 goto err_sp3_pci_driver_register; 5501 5502 err = mlxsw_pci_driver_register(&mlxsw_sp4_pci_driver); 5503 if (err) 5504 goto err_sp4_pci_driver_register; 5505 5506 return 0; 5507 5508 err_sp4_pci_driver_register: 5509 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver); 5510 err_sp3_pci_driver_register: 5511 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 5512 err_sp2_pci_driver_register: 5513 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 5514 err_sp1_pci_driver_register: 5515 mlxsw_core_driver_unregister(&mlxsw_sp4_driver); 5516 err_sp4_core_driver_register: 5517 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 5518 err_sp3_core_driver_register: 5519 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 5520 err_sp2_core_driver_register: 5521 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 5522 return err; 5523 } 5524 5525 static void __exit mlxsw_sp_module_exit(void) 5526 { 5527 mlxsw_pci_driver_unregister(&mlxsw_sp4_pci_driver); 5528 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver); 5529 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 5530 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 5531 mlxsw_core_driver_unregister(&mlxsw_sp4_driver); 5532 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 5533 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 5534 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 5535 } 5536 5537 module_init(mlxsw_sp_module_init); 5538 module_exit(mlxsw_sp_module_exit); 5539 5540 MODULE_LICENSE("Dual BSD/GPL"); 5541 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 5542 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 5543 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); 5544 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); 5545 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table); 5546 MODULE_DEVICE_TABLE(pci, mlxsw_sp4_pci_id_table); 5547 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); 5548 MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME); 5549 MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME); 5550 MODULE_FIRMWARE(MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME); 5551