1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/types.h> 7 #include <linux/pci.h> 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/ethtool.h> 11 #include <linux/slab.h> 12 #include <linux/device.h> 13 #include <linux/skbuff.h> 14 #include <linux/if_vlan.h> 15 #include <linux/if_bridge.h> 16 #include <linux/workqueue.h> 17 #include <linux/jiffies.h> 18 #include <linux/bitops.h> 19 #include <linux/list.h> 20 #include <linux/notifier.h> 21 #include <linux/dcbnl.h> 22 #include <linux/inetdevice.h> 23 #include <linux/netlink.h> 24 #include <linux/jhash.h> 25 #include <linux/log2.h> 26 #include <linux/refcount.h> 27 #include <linux/rhashtable.h> 28 #include <net/switchdev.h> 29 #include <net/pkt_cls.h> 30 #include <net/netevent.h> 31 #include <net/addrconf.h> 32 33 #include "spectrum.h" 34 #include "pci.h" 35 #include "core.h" 36 #include "core_env.h" 37 #include "reg.h" 38 #include "port.h" 39 #include "trap.h" 40 #include "txheader.h" 41 #include "spectrum_cnt.h" 42 #include "spectrum_dpipe.h" 43 #include "spectrum_acl_flex_actions.h" 44 #include "spectrum_span.h" 45 #include "spectrum_ptp.h" 46 #include "spectrum_trap.h" 47 48 #define MLXSW_SP1_FWREV_MAJOR 13 49 #define MLXSW_SP1_FWREV_MINOR 2010 50 #define MLXSW_SP1_FWREV_SUBMINOR 1006 51 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 52 53 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { 54 .major = MLXSW_SP1_FWREV_MAJOR, 55 .minor = MLXSW_SP1_FWREV_MINOR, 56 .subminor = MLXSW_SP1_FWREV_SUBMINOR, 57 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR, 58 }; 59 60 #define MLXSW_SP1_FW_FILENAME \ 61 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ 62 "." __stringify(MLXSW_SP1_FWREV_MINOR) \ 63 "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2" 64 65 #define MLXSW_SP2_FWREV_MAJOR 29 66 #define MLXSW_SP2_FWREV_MINOR 2010 67 #define MLXSW_SP2_FWREV_SUBMINOR 1006 68 69 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { 70 .major = MLXSW_SP2_FWREV_MAJOR, 71 .minor = MLXSW_SP2_FWREV_MINOR, 72 .subminor = MLXSW_SP2_FWREV_SUBMINOR, 73 }; 74 75 #define MLXSW_SP2_FW_FILENAME \ 76 "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \ 77 "." __stringify(MLXSW_SP2_FWREV_MINOR) \ 78 "." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2" 79 80 #define MLXSW_SP3_FWREV_MAJOR 30 81 #define MLXSW_SP3_FWREV_MINOR 2010 82 #define MLXSW_SP3_FWREV_SUBMINOR 1006 83 84 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = { 85 .major = MLXSW_SP3_FWREV_MAJOR, 86 .minor = MLXSW_SP3_FWREV_MINOR, 87 .subminor = MLXSW_SP3_FWREV_SUBMINOR, 88 }; 89 90 #define MLXSW_SP3_FW_FILENAME \ 91 "mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \ 92 "." __stringify(MLXSW_SP3_FWREV_MINOR) \ 93 "." __stringify(MLXSW_SP3_FWREV_SUBMINOR) ".mfa2" 94 95 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; 96 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; 97 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3"; 98 static const char mlxsw_sp4_driver_name[] = "mlxsw_spectrum4"; 99 100 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = { 101 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00 102 }; 103 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = { 104 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00 105 }; 106 107 /* tx_hdr_version 108 * Tx header version. 109 * Must be set to 1. 110 */ 111 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 112 113 /* tx_hdr_ctl 114 * Packet control type. 115 * 0 - Ethernet control (e.g. EMADs, LACP) 116 * 1 - Ethernet data 117 */ 118 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 119 120 /* tx_hdr_proto 121 * Packet protocol type. Must be set to 1 (Ethernet). 122 */ 123 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 124 125 /* tx_hdr_rx_is_router 126 * Packet is sent from the router. Valid for data packets only. 127 */ 128 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 129 130 /* tx_hdr_fid_valid 131 * Indicates if the 'fid' field is valid and should be used for 132 * forwarding lookup. Valid for data packets only. 133 */ 134 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 135 136 /* tx_hdr_swid 137 * Switch partition ID. Must be set to 0. 138 */ 139 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 140 141 /* tx_hdr_control_tclass 142 * Indicates if the packet should use the control TClass and not one 143 * of the data TClasses. 144 */ 145 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 146 147 /* tx_hdr_etclass 148 * Egress TClass to be used on the egress device on the egress port. 149 */ 150 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 151 152 /* tx_hdr_port_mid 153 * Destination local port for unicast packets. 154 * Destination multicast ID for multicast packets. 155 * 156 * Control packets are directed to a specific egress port, while data 157 * packets are transmitted through the CPU port (0) into the switch partition, 158 * where forwarding rules are applied. 159 */ 160 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 161 162 /* tx_hdr_fid 163 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 164 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 165 * Valid for data packets only. 166 */ 167 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 168 169 /* tx_hdr_type 170 * 0 - Data packets 171 * 6 - Control packets 172 */ 173 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 174 175 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 176 unsigned int counter_index, u64 *packets, 177 u64 *bytes) 178 { 179 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 180 int err; 181 182 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 183 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 184 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 185 if (err) 186 return err; 187 if (packets) 188 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 189 if (bytes) 190 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 191 return 0; 192 } 193 194 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 195 unsigned int counter_index) 196 { 197 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 198 199 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 200 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 201 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 202 } 203 204 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 205 unsigned int *p_counter_index) 206 { 207 int err; 208 209 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 210 p_counter_index); 211 if (err) 212 return err; 213 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 214 if (err) 215 goto err_counter_clear; 216 return 0; 217 218 err_counter_clear: 219 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 220 *p_counter_index); 221 return err; 222 } 223 224 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 225 unsigned int counter_index) 226 { 227 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 228 counter_index); 229 } 230 231 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 232 const struct mlxsw_tx_info *tx_info) 233 { 234 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 235 236 memset(txhdr, 0, MLXSW_TXHDR_LEN); 237 238 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 239 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 240 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 241 mlxsw_tx_hdr_swid_set(txhdr, 0); 242 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 243 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 244 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 245 } 246 247 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) 248 { 249 switch (state) { 250 case BR_STATE_FORWARDING: 251 return MLXSW_REG_SPMS_STATE_FORWARDING; 252 case BR_STATE_LEARNING: 253 return MLXSW_REG_SPMS_STATE_LEARNING; 254 case BR_STATE_LISTENING: 255 case BR_STATE_DISABLED: 256 case BR_STATE_BLOCKING: 257 return MLXSW_REG_SPMS_STATE_DISCARDING; 258 default: 259 BUG(); 260 } 261 } 262 263 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 264 u8 state) 265 { 266 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state); 267 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 268 char *spms_pl; 269 int err; 270 271 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 272 if (!spms_pl) 273 return -ENOMEM; 274 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 275 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 276 277 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 278 kfree(spms_pl); 279 return err; 280 } 281 282 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 283 { 284 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 285 int err; 286 287 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 288 if (err) 289 return err; 290 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 291 return 0; 292 } 293 294 int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 295 bool is_up) 296 { 297 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 298 char paos_pl[MLXSW_REG_PAOS_LEN]; 299 300 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 301 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 302 MLXSW_PORT_ADMIN_STATUS_DOWN); 303 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 304 } 305 306 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 307 const unsigned char *addr) 308 { 309 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 310 char ppad_pl[MLXSW_REG_PPAD_LEN]; 311 312 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 313 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 314 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 315 } 316 317 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 318 { 319 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 320 321 eth_hw_addr_gen(mlxsw_sp_port->dev, mlxsw_sp->base_mac, 322 mlxsw_sp_port->local_port); 323 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, 324 mlxsw_sp_port->dev->dev_addr); 325 } 326 327 static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu) 328 { 329 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 330 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 331 int err; 332 333 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 334 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 335 if (err) 336 return err; 337 338 *p_max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 339 return 0; 340 } 341 342 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 343 { 344 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 345 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 346 347 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 348 if (mtu > mlxsw_sp_port->max_mtu) 349 return -EINVAL; 350 351 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 352 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 353 } 354 355 static int mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, 356 u16 local_port, u8 swid) 357 { 358 char pspa_pl[MLXSW_REG_PSPA_LEN]; 359 360 mlxsw_reg_pspa_pack(pspa_pl, swid, local_port); 361 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 362 } 363 364 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 365 { 366 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 367 char svpe_pl[MLXSW_REG_SVPE_LEN]; 368 369 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 370 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 371 } 372 373 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 374 bool learn_enable) 375 { 376 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 377 char *spvmlr_pl; 378 int err; 379 380 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 381 if (!spvmlr_pl) 382 return -ENOMEM; 383 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 384 learn_enable); 385 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 386 kfree(spvmlr_pl); 387 return err; 388 } 389 390 int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type) 391 { 392 switch (ethtype) { 393 case ETH_P_8021Q: 394 *p_sver_type = 0; 395 break; 396 case ETH_P_8021AD: 397 *p_sver_type = 1; 398 break; 399 default: 400 return -EINVAL; 401 } 402 403 return 0; 404 } 405 406 int mlxsw_sp_port_egress_ethtype_set(struct mlxsw_sp_port *mlxsw_sp_port, 407 u16 ethtype) 408 { 409 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 410 char spevet_pl[MLXSW_REG_SPEVET_LEN]; 411 u8 sver_type; 412 int err; 413 414 err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type); 415 if (err) 416 return err; 417 418 mlxsw_reg_spevet_pack(spevet_pl, mlxsw_sp_port->local_port, sver_type); 419 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spevet), spevet_pl); 420 } 421 422 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 423 u16 vid, u16 ethtype) 424 { 425 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 426 char spvid_pl[MLXSW_REG_SPVID_LEN]; 427 u8 sver_type; 428 int err; 429 430 err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type); 431 if (err) 432 return err; 433 434 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid, 435 sver_type); 436 437 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 438 } 439 440 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 441 bool allow) 442 { 443 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 444 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 445 446 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 447 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 448 } 449 450 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 451 u16 ethtype) 452 { 453 int err; 454 455 if (!vid) { 456 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 457 if (err) 458 return err; 459 } else { 460 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid, ethtype); 461 if (err) 462 return err; 463 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 464 if (err) 465 goto err_port_allow_untagged_set; 466 } 467 468 mlxsw_sp_port->pvid = vid; 469 return 0; 470 471 err_port_allow_untagged_set: 472 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid, ethtype); 473 return err; 474 } 475 476 static int 477 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 478 { 479 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 480 char sspr_pl[MLXSW_REG_SSPR_LEN]; 481 482 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 483 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 484 } 485 486 static int 487 mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port, 488 struct mlxsw_sp_port_mapping *port_mapping) 489 { 490 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 491 bool separate_rxtx; 492 u8 module; 493 u8 width; 494 int err; 495 int i; 496 497 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 498 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 499 if (err) 500 return err; 501 module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 502 width = mlxsw_reg_pmlp_width_get(pmlp_pl); 503 separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl); 504 505 if (width && !is_power_of_2(width)) { 506 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n", 507 local_port); 508 return -EINVAL; 509 } 510 511 for (i = 0; i < width; i++) { 512 if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) { 513 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n", 514 local_port); 515 return -EINVAL; 516 } 517 if (separate_rxtx && 518 mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != 519 mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) { 520 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n", 521 local_port); 522 return -EINVAL; 523 } 524 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i) { 525 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n", 526 local_port); 527 return -EINVAL; 528 } 529 } 530 531 port_mapping->module = module; 532 port_mapping->width = width; 533 port_mapping->module_width = width; 534 port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 535 return 0; 536 } 537 538 static int 539 mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port, 540 const struct mlxsw_sp_port_mapping *port_mapping) 541 { 542 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 543 int i, err; 544 545 mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->module); 546 547 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 548 mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width); 549 for (i = 0; i < port_mapping->width; i++) { 550 mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module); 551 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */ 552 } 553 554 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 555 if (err) 556 goto err_pmlp_write; 557 return 0; 558 559 err_pmlp_write: 560 mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->module); 561 return err; 562 } 563 564 static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u16 local_port, 565 u8 module) 566 { 567 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 568 569 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 570 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 571 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 572 mlxsw_env_module_port_unmap(mlxsw_sp->core, module); 573 } 574 575 static int mlxsw_sp_port_open(struct net_device *dev) 576 { 577 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 578 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 579 int err; 580 581 err = mlxsw_env_module_port_up(mlxsw_sp->core, 582 mlxsw_sp_port->mapping.module); 583 if (err) 584 return err; 585 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 586 if (err) 587 goto err_port_admin_status_set; 588 netif_start_queue(dev); 589 return 0; 590 591 err_port_admin_status_set: 592 mlxsw_env_module_port_down(mlxsw_sp->core, 593 mlxsw_sp_port->mapping.module); 594 return err; 595 } 596 597 static int mlxsw_sp_port_stop(struct net_device *dev) 598 { 599 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 600 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 601 602 netif_stop_queue(dev); 603 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 604 mlxsw_env_module_port_down(mlxsw_sp->core, 605 mlxsw_sp_port->mapping.module); 606 return 0; 607 } 608 609 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 610 struct net_device *dev) 611 { 612 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 613 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 614 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 615 const struct mlxsw_tx_info tx_info = { 616 .local_port = mlxsw_sp_port->local_port, 617 .is_emad = false, 618 }; 619 u64 len; 620 int err; 621 622 if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) { 623 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 624 dev_kfree_skb_any(skb); 625 return NETDEV_TX_OK; 626 } 627 628 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); 629 630 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 631 return NETDEV_TX_BUSY; 632 633 if (eth_skb_pad(skb)) { 634 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 635 return NETDEV_TX_OK; 636 } 637 638 mlxsw_sp_txhdr_construct(skb, &tx_info); 639 /* TX header is consumed by HW on the way so we shouldn't count its 640 * bytes as being sent. 641 */ 642 len = skb->len - MLXSW_TXHDR_LEN; 643 644 /* Due to a race we might fail here because of a full queue. In that 645 * unlikely case we simply drop the packet. 646 */ 647 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 648 649 if (!err) { 650 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 651 u64_stats_update_begin(&pcpu_stats->syncp); 652 pcpu_stats->tx_packets++; 653 pcpu_stats->tx_bytes += len; 654 u64_stats_update_end(&pcpu_stats->syncp); 655 } else { 656 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 657 dev_kfree_skb_any(skb); 658 } 659 return NETDEV_TX_OK; 660 } 661 662 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 663 { 664 } 665 666 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 667 { 668 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 669 struct sockaddr *addr = p; 670 int err; 671 672 if (!is_valid_ether_addr(addr->sa_data)) 673 return -EADDRNOTAVAIL; 674 675 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 676 if (err) 677 return err; 678 eth_hw_addr_set(dev, addr->sa_data); 679 return 0; 680 } 681 682 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 683 { 684 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 685 struct mlxsw_sp_hdroom orig_hdroom; 686 struct mlxsw_sp_hdroom hdroom; 687 int err; 688 689 orig_hdroom = *mlxsw_sp_port->hdroom; 690 691 hdroom = orig_hdroom; 692 hdroom.mtu = mtu; 693 mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom); 694 695 err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom); 696 if (err) { 697 netdev_err(dev, "Failed to configure port's headroom\n"); 698 return err; 699 } 700 701 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 702 if (err) 703 goto err_port_mtu_set; 704 dev->mtu = mtu; 705 return 0; 706 707 err_port_mtu_set: 708 mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom); 709 return err; 710 } 711 712 static int 713 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 714 struct rtnl_link_stats64 *stats) 715 { 716 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 717 struct mlxsw_sp_port_pcpu_stats *p; 718 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 719 u32 tx_dropped = 0; 720 unsigned int start; 721 int i; 722 723 for_each_possible_cpu(i) { 724 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 725 do { 726 start = u64_stats_fetch_begin_irq(&p->syncp); 727 rx_packets = p->rx_packets; 728 rx_bytes = p->rx_bytes; 729 tx_packets = p->tx_packets; 730 tx_bytes = p->tx_bytes; 731 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 732 733 stats->rx_packets += rx_packets; 734 stats->rx_bytes += rx_bytes; 735 stats->tx_packets += tx_packets; 736 stats->tx_bytes += tx_bytes; 737 /* tx_dropped is u32, updated without syncp protection. */ 738 tx_dropped += p->tx_dropped; 739 } 740 stats->tx_dropped = tx_dropped; 741 return 0; 742 } 743 744 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 745 { 746 switch (attr_id) { 747 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 748 return true; 749 } 750 751 return false; 752 } 753 754 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 755 void *sp) 756 { 757 switch (attr_id) { 758 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 759 return mlxsw_sp_port_get_sw_stats64(dev, sp); 760 } 761 762 return -EINVAL; 763 } 764 765 int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 766 int prio, char *ppcnt_pl) 767 { 768 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 769 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 770 771 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 772 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 773 } 774 775 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 776 struct rtnl_link_stats64 *stats) 777 { 778 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 779 int err; 780 781 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 782 0, ppcnt_pl); 783 if (err) 784 goto out; 785 786 stats->tx_packets = 787 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 788 stats->rx_packets = 789 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 790 stats->tx_bytes = 791 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 792 stats->rx_bytes = 793 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 794 stats->multicast = 795 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 796 797 stats->rx_crc_errors = 798 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 799 stats->rx_frame_errors = 800 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 801 802 stats->rx_length_errors = ( 803 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 804 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 805 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 806 807 stats->rx_errors = (stats->rx_crc_errors + 808 stats->rx_frame_errors + stats->rx_length_errors); 809 810 out: 811 return err; 812 } 813 814 static void 815 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 816 struct mlxsw_sp_port_xstats *xstats) 817 { 818 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 819 int err, i; 820 821 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 822 ppcnt_pl); 823 if (!err) 824 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 825 826 for (i = 0; i < TC_MAX_QUEUE; i++) { 827 err = mlxsw_sp_port_get_stats_raw(dev, 828 MLXSW_REG_PPCNT_TC_CONG_CNT, 829 i, ppcnt_pl); 830 if (err) 831 goto tc_cnt; 832 833 xstats->wred_drop[i] = 834 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 835 xstats->tc_ecn[i] = mlxsw_reg_ppcnt_ecn_marked_tc_get(ppcnt_pl); 836 837 tc_cnt: 838 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 839 i, ppcnt_pl); 840 if (err) 841 continue; 842 843 xstats->backlog[i] = 844 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 845 xstats->tail_drop[i] = 846 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 847 } 848 849 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 850 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, 851 i, ppcnt_pl); 852 if (err) 853 continue; 854 855 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); 856 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); 857 } 858 } 859 860 static void update_stats_cache(struct work_struct *work) 861 { 862 struct mlxsw_sp_port *mlxsw_sp_port = 863 container_of(work, struct mlxsw_sp_port, 864 periodic_hw_stats.update_dw.work); 865 866 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 867 /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as 868 * necessary when port goes down. 869 */ 870 goto out; 871 872 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 873 &mlxsw_sp_port->periodic_hw_stats.stats); 874 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 875 &mlxsw_sp_port->periodic_hw_stats.xstats); 876 877 out: 878 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 879 MLXSW_HW_STATS_UPDATE_TIME); 880 } 881 882 /* Return the stats from a cache that is updated periodically, 883 * as this function might get called in an atomic context. 884 */ 885 static void 886 mlxsw_sp_port_get_stats64(struct net_device *dev, 887 struct rtnl_link_stats64 *stats) 888 { 889 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 890 891 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 892 } 893 894 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 895 u16 vid_begin, u16 vid_end, 896 bool is_member, bool untagged) 897 { 898 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 899 char *spvm_pl; 900 int err; 901 902 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 903 if (!spvm_pl) 904 return -ENOMEM; 905 906 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 907 vid_end, is_member, untagged); 908 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 909 kfree(spvm_pl); 910 return err; 911 } 912 913 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 914 u16 vid_end, bool is_member, bool untagged) 915 { 916 u16 vid, vid_e; 917 int err; 918 919 for (vid = vid_begin; vid <= vid_end; 920 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 921 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 922 vid_end); 923 924 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 925 is_member, untagged); 926 if (err) 927 return err; 928 } 929 930 return 0; 931 } 932 933 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port, 934 bool flush_default) 935 { 936 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 937 938 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 939 &mlxsw_sp_port->vlans_list, list) { 940 if (!flush_default && 941 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID) 942 continue; 943 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 944 } 945 } 946 947 static void 948 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 949 { 950 if (mlxsw_sp_port_vlan->bridge_port) 951 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 952 else if (mlxsw_sp_port_vlan->fid) 953 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 954 } 955 956 struct mlxsw_sp_port_vlan * 957 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 958 { 959 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 960 bool untagged = vid == MLXSW_SP_DEFAULT_VID; 961 int err; 962 963 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 964 if (mlxsw_sp_port_vlan) 965 return ERR_PTR(-EEXIST); 966 967 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 968 if (err) 969 return ERR_PTR(err); 970 971 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 972 if (!mlxsw_sp_port_vlan) { 973 err = -ENOMEM; 974 goto err_port_vlan_alloc; 975 } 976 977 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 978 mlxsw_sp_port_vlan->vid = vid; 979 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 980 981 return mlxsw_sp_port_vlan; 982 983 err_port_vlan_alloc: 984 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 985 return ERR_PTR(err); 986 } 987 988 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 989 { 990 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 991 u16 vid = mlxsw_sp_port_vlan->vid; 992 993 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan); 994 list_del(&mlxsw_sp_port_vlan->list); 995 kfree(mlxsw_sp_port_vlan); 996 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 997 } 998 999 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1000 __be16 __always_unused proto, u16 vid) 1001 { 1002 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1003 1004 /* VLAN 0 is added to HW filter when device goes up, but it is 1005 * reserved in our case, so simply return. 1006 */ 1007 if (!vid) 1008 return 0; 1009 1010 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid)); 1011 } 1012 1013 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1014 __be16 __always_unused proto, u16 vid) 1015 { 1016 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1017 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1018 1019 /* VLAN 0 is removed from HW filter when device goes down, but 1020 * it is reserved in our case, so simply return. 1021 */ 1022 if (!vid) 1023 return 0; 1024 1025 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1026 if (!mlxsw_sp_port_vlan) 1027 return 0; 1028 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1029 1030 return 0; 1031 } 1032 1033 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 1034 struct flow_block_offload *f) 1035 { 1036 switch (f->binder_type) { 1037 case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS: 1038 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true); 1039 case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS: 1040 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false); 1041 case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP: 1042 return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f); 1043 case FLOW_BLOCK_BINDER_TYPE_RED_MARK: 1044 return mlxsw_sp_setup_tc_block_qevent_mark(mlxsw_sp_port, f); 1045 default: 1046 return -EOPNOTSUPP; 1047 } 1048 } 1049 1050 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1051 void *type_data) 1052 { 1053 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1054 1055 switch (type) { 1056 case TC_SETUP_BLOCK: 1057 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1058 case TC_SETUP_QDISC_RED: 1059 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1060 case TC_SETUP_QDISC_PRIO: 1061 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); 1062 case TC_SETUP_QDISC_ETS: 1063 return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data); 1064 case TC_SETUP_QDISC_TBF: 1065 return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data); 1066 case TC_SETUP_QDISC_FIFO: 1067 return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data); 1068 default: 1069 return -EOPNOTSUPP; 1070 } 1071 } 1072 1073 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) 1074 { 1075 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1076 1077 if (!enable) { 1078 if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) || 1079 mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) { 1080 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 1081 return -EINVAL; 1082 } 1083 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block); 1084 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block); 1085 } else { 1086 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block); 1087 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block); 1088 } 1089 return 0; 1090 } 1091 1092 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable) 1093 { 1094 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1095 char pplr_pl[MLXSW_REG_PPLR_LEN]; 1096 int err; 1097 1098 if (netif_running(dev)) 1099 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1100 1101 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable); 1102 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr), 1103 pplr_pl); 1104 1105 if (netif_running(dev)) 1106 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1107 1108 return err; 1109 } 1110 1111 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); 1112 1113 static int mlxsw_sp_handle_feature(struct net_device *dev, 1114 netdev_features_t wanted_features, 1115 netdev_features_t feature, 1116 mlxsw_sp_feature_handler feature_handler) 1117 { 1118 netdev_features_t changes = wanted_features ^ dev->features; 1119 bool enable = !!(wanted_features & feature); 1120 int err; 1121 1122 if (!(changes & feature)) 1123 return 0; 1124 1125 err = feature_handler(dev, enable); 1126 if (err) { 1127 netdev_err(dev, "%s feature %pNF failed, err %d\n", 1128 enable ? "Enable" : "Disable", &feature, err); 1129 return err; 1130 } 1131 1132 if (enable) 1133 dev->features |= feature; 1134 else 1135 dev->features &= ~feature; 1136 1137 return 0; 1138 } 1139 static int mlxsw_sp_set_features(struct net_device *dev, 1140 netdev_features_t features) 1141 { 1142 netdev_features_t oper_features = dev->features; 1143 int err = 0; 1144 1145 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, 1146 mlxsw_sp_feature_hw_tc); 1147 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK, 1148 mlxsw_sp_feature_loopback); 1149 1150 if (err) { 1151 dev->features = oper_features; 1152 return -EINVAL; 1153 } 1154 1155 return 0; 1156 } 1157 1158 static struct devlink_port * 1159 mlxsw_sp_port_get_devlink_port(struct net_device *dev) 1160 { 1161 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1162 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1163 1164 return mlxsw_core_port_devlink_port_get(mlxsw_sp->core, 1165 mlxsw_sp_port->local_port); 1166 } 1167 1168 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, 1169 struct ifreq *ifr) 1170 { 1171 struct hwtstamp_config config; 1172 int err; 1173 1174 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 1175 return -EFAULT; 1176 1177 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, 1178 &config); 1179 if (err) 1180 return err; 1181 1182 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1183 return -EFAULT; 1184 1185 return 0; 1186 } 1187 1188 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port, 1189 struct ifreq *ifr) 1190 { 1191 struct hwtstamp_config config; 1192 int err; 1193 1194 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port, 1195 &config); 1196 if (err) 1197 return err; 1198 1199 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1200 return -EFAULT; 1201 1202 return 0; 1203 } 1204 1205 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port) 1206 { 1207 struct hwtstamp_config config = {0}; 1208 1209 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config); 1210 } 1211 1212 static int 1213 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1214 { 1215 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1216 1217 switch (cmd) { 1218 case SIOCSHWTSTAMP: 1219 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr); 1220 case SIOCGHWTSTAMP: 1221 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr); 1222 default: 1223 return -EOPNOTSUPP; 1224 } 1225 } 1226 1227 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1228 .ndo_open = mlxsw_sp_port_open, 1229 .ndo_stop = mlxsw_sp_port_stop, 1230 .ndo_start_xmit = mlxsw_sp_port_xmit, 1231 .ndo_setup_tc = mlxsw_sp_setup_tc, 1232 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1233 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1234 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1235 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1236 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1237 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1238 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1239 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1240 .ndo_set_features = mlxsw_sp_set_features, 1241 .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port, 1242 .ndo_eth_ioctl = mlxsw_sp_port_ioctl, 1243 }; 1244 1245 static int 1246 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port) 1247 { 1248 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1249 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; 1250 const struct mlxsw_sp_port_type_speed_ops *ops; 1251 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1252 u32 eth_proto_cap_masked; 1253 int err; 1254 1255 ops = mlxsw_sp->port_type_speed_ops; 1256 1257 /* Set advertised speeds to speeds supported by both the driver 1258 * and the device. 1259 */ 1260 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 1261 0, false); 1262 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1263 if (err) 1264 return err; 1265 1266 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, 1267 ð_proto_admin, ð_proto_oper); 1268 eth_proto_cap_masked = ops->ptys_proto_cap_masked_get(eth_proto_cap); 1269 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 1270 eth_proto_cap_masked, 1271 mlxsw_sp_port->link.autoneg); 1272 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1273 } 1274 1275 int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed) 1276 { 1277 const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops; 1278 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1279 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1280 u32 eth_proto_oper; 1281 int err; 1282 1283 port_type_speed_ops = mlxsw_sp->port_type_speed_ops; 1284 port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, 1285 mlxsw_sp_port->local_port, 0, 1286 false); 1287 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1288 if (err) 1289 return err; 1290 port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL, 1291 ð_proto_oper); 1292 *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper); 1293 return 0; 1294 } 1295 1296 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 1297 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 1298 bool dwrr, u8 dwrr_weight) 1299 { 1300 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1301 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1302 1303 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1304 next_index); 1305 mlxsw_reg_qeec_de_set(qeec_pl, true); 1306 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 1307 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 1308 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1309 } 1310 1311 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 1312 enum mlxsw_reg_qeec_hr hr, u8 index, 1313 u8 next_index, u32 maxrate, u8 burst_size) 1314 { 1315 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1316 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1317 1318 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1319 next_index); 1320 mlxsw_reg_qeec_mase_set(qeec_pl, true); 1321 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 1322 mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size); 1323 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1324 } 1325 1326 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port, 1327 enum mlxsw_reg_qeec_hr hr, u8 index, 1328 u8 next_index, u32 minrate) 1329 { 1330 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1331 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1332 1333 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1334 next_index); 1335 mlxsw_reg_qeec_mise_set(qeec_pl, true); 1336 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate); 1337 1338 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1339 } 1340 1341 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 1342 u8 switch_prio, u8 tclass) 1343 { 1344 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1345 char qtct_pl[MLXSW_REG_QTCT_LEN]; 1346 1347 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 1348 tclass); 1349 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 1350 } 1351 1352 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 1353 { 1354 int err, i; 1355 1356 /* Setup the elements hierarcy, so that each TC is linked to 1357 * one subgroup, which are all member in the same group. 1358 */ 1359 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1360 MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0); 1361 if (err) 1362 return err; 1363 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1364 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1365 MLXSW_REG_QEEC_HR_SUBGROUP, i, 1366 0, false, 0); 1367 if (err) 1368 return err; 1369 } 1370 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1371 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1372 MLXSW_REG_QEEC_HR_TC, i, i, 1373 false, 0); 1374 if (err) 1375 return err; 1376 1377 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1378 MLXSW_REG_QEEC_HR_TC, 1379 i + 8, i, 1380 true, 100); 1381 if (err) 1382 return err; 1383 } 1384 1385 /* Make sure the max shaper is disabled in all hierarchies that support 1386 * it. Note that this disables ptps (PTP shaper), but that is intended 1387 * for the initial configuration. 1388 */ 1389 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1390 MLXSW_REG_QEEC_HR_PORT, 0, 0, 1391 MLXSW_REG_QEEC_MAS_DIS, 0); 1392 if (err) 1393 return err; 1394 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1395 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1396 MLXSW_REG_QEEC_HR_SUBGROUP, 1397 i, 0, 1398 MLXSW_REG_QEEC_MAS_DIS, 0); 1399 if (err) 1400 return err; 1401 } 1402 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1403 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1404 MLXSW_REG_QEEC_HR_TC, 1405 i, i, 1406 MLXSW_REG_QEEC_MAS_DIS, 0); 1407 if (err) 1408 return err; 1409 1410 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1411 MLXSW_REG_QEEC_HR_TC, 1412 i + 8, i, 1413 MLXSW_REG_QEEC_MAS_DIS, 0); 1414 if (err) 1415 return err; 1416 } 1417 1418 /* Configure the min shaper for multicast TCs. */ 1419 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1420 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port, 1421 MLXSW_REG_QEEC_HR_TC, 1422 i + 8, i, 1423 MLXSW_REG_QEEC_MIS_MIN); 1424 if (err) 1425 return err; 1426 } 1427 1428 /* Map all priorities to traffic class 0. */ 1429 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1430 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 1431 if (err) 1432 return err; 1433 } 1434 1435 return 0; 1436 } 1437 1438 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 1439 bool enable) 1440 { 1441 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1442 char qtctm_pl[MLXSW_REG_QTCTM_LEN]; 1443 1444 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable); 1445 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl); 1446 } 1447 1448 static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port) 1449 { 1450 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1451 u8 module = mlxsw_sp_port->mapping.module; 1452 u64 overheat_counter; 1453 int err; 1454 1455 err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, module, 1456 &overheat_counter); 1457 if (err) 1458 return err; 1459 1460 mlxsw_sp_port->module_overheat_initial_val = overheat_counter; 1461 return 0; 1462 } 1463 1464 int 1465 mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port, 1466 bool is_8021ad_tagged, 1467 bool is_8021q_tagged) 1468 { 1469 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1470 char spvc_pl[MLXSW_REG_SPVC_LEN]; 1471 1472 mlxsw_reg_spvc_pack(spvc_pl, mlxsw_sp_port->local_port, 1473 is_8021ad_tagged, is_8021q_tagged); 1474 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvc), spvc_pl); 1475 } 1476 1477 static int mlxsw_sp_port_label_info_get(struct mlxsw_sp *mlxsw_sp, 1478 u16 local_port, u8 *port_number, 1479 u8 *split_port_subnumber, 1480 u8 *slot_index) 1481 { 1482 char pllp_pl[MLXSW_REG_PLLP_LEN]; 1483 int err; 1484 1485 mlxsw_reg_pllp_pack(pllp_pl, local_port); 1486 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pllp), pllp_pl); 1487 if (err) 1488 return err; 1489 mlxsw_reg_pllp_unpack(pllp_pl, port_number, 1490 split_port_subnumber, slot_index); 1491 return 0; 1492 } 1493 1494 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port, 1495 bool split, 1496 struct mlxsw_sp_port_mapping *port_mapping) 1497 { 1498 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1499 struct mlxsw_sp_port *mlxsw_sp_port; 1500 u32 lanes = port_mapping->width; 1501 u8 split_port_subnumber; 1502 struct net_device *dev; 1503 u8 port_number; 1504 u8 slot_index; 1505 bool splittable; 1506 int err; 1507 1508 err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, port_mapping); 1509 if (err) { 1510 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 1511 local_port); 1512 return err; 1513 } 1514 1515 err = mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 0); 1516 if (err) { 1517 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 1518 local_port); 1519 goto err_port_swid_set; 1520 } 1521 1522 err = mlxsw_sp_port_label_info_get(mlxsw_sp, local_port, &port_number, 1523 &split_port_subnumber, &slot_index); 1524 if (err) { 1525 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get port label information\n", 1526 local_port); 1527 goto err_port_label_info_get; 1528 } 1529 1530 splittable = lanes > 1 && !split; 1531 err = mlxsw_core_port_init(mlxsw_sp->core, local_port, 1532 port_number, split, split_port_subnumber, 1533 splittable, lanes, mlxsw_sp->base_mac, 1534 sizeof(mlxsw_sp->base_mac)); 1535 if (err) { 1536 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 1537 local_port); 1538 goto err_core_port_init; 1539 } 1540 1541 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 1542 if (!dev) { 1543 err = -ENOMEM; 1544 goto err_alloc_etherdev; 1545 } 1546 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 1547 dev_net_set(dev, mlxsw_sp_net(mlxsw_sp)); 1548 mlxsw_sp_port = netdev_priv(dev); 1549 mlxsw_sp_port->dev = dev; 1550 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 1551 mlxsw_sp_port->local_port = local_port; 1552 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID; 1553 mlxsw_sp_port->split = split; 1554 mlxsw_sp_port->mapping = *port_mapping; 1555 mlxsw_sp_port->link.autoneg = 1; 1556 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 1557 1558 mlxsw_sp_port->pcpu_stats = 1559 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 1560 if (!mlxsw_sp_port->pcpu_stats) { 1561 err = -ENOMEM; 1562 goto err_alloc_stats; 1563 } 1564 1565 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1566 &update_stats_cache); 1567 1568 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 1569 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 1570 1571 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 1572 if (err) { 1573 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 1574 mlxsw_sp_port->local_port); 1575 goto err_dev_addr_init; 1576 } 1577 1578 netif_carrier_off(dev); 1579 1580 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 1581 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 1582 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK; 1583 1584 dev->min_mtu = 0; 1585 dev->max_mtu = ETH_MAX_MTU; 1586 1587 /* Each packet needs to have a Tx header (metadata) on top all other 1588 * headers. 1589 */ 1590 dev->needed_headroom = MLXSW_TXHDR_LEN; 1591 1592 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 1593 if (err) { 1594 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 1595 mlxsw_sp_port->local_port); 1596 goto err_port_system_port_mapping_set; 1597 } 1598 1599 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port); 1600 if (err) { 1601 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 1602 mlxsw_sp_port->local_port); 1603 goto err_port_speed_by_width_set; 1604 } 1605 1606 err = mlxsw_sp->port_type_speed_ops->ptys_max_speed(mlxsw_sp_port, 1607 &mlxsw_sp_port->max_speed); 1608 if (err) { 1609 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum speed\n", 1610 mlxsw_sp_port->local_port); 1611 goto err_max_speed_get; 1612 } 1613 1614 err = mlxsw_sp_port_max_mtu_get(mlxsw_sp_port, &mlxsw_sp_port->max_mtu); 1615 if (err) { 1616 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum MTU\n", 1617 mlxsw_sp_port->local_port); 1618 goto err_port_max_mtu_get; 1619 } 1620 1621 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 1622 if (err) { 1623 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 1624 mlxsw_sp_port->local_port); 1625 goto err_port_mtu_set; 1626 } 1627 1628 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1629 if (err) 1630 goto err_port_admin_status_set; 1631 1632 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 1633 if (err) { 1634 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 1635 mlxsw_sp_port->local_port); 1636 goto err_port_buffers_init; 1637 } 1638 1639 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 1640 if (err) { 1641 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 1642 mlxsw_sp_port->local_port); 1643 goto err_port_ets_init; 1644 } 1645 1646 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true); 1647 if (err) { 1648 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n", 1649 mlxsw_sp_port->local_port); 1650 goto err_port_tc_mc_mode; 1651 } 1652 1653 /* ETS and buffers must be initialized before DCB. */ 1654 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 1655 if (err) { 1656 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 1657 mlxsw_sp_port->local_port); 1658 goto err_port_dcb_init; 1659 } 1660 1661 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 1662 if (err) { 1663 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 1664 mlxsw_sp_port->local_port); 1665 goto err_port_fids_init; 1666 } 1667 1668 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); 1669 if (err) { 1670 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", 1671 mlxsw_sp_port->local_port); 1672 goto err_port_qdiscs_init; 1673 } 1674 1675 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false, 1676 false); 1677 if (err) { 1678 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n", 1679 mlxsw_sp_port->local_port); 1680 goto err_port_vlan_clear; 1681 } 1682 1683 err = mlxsw_sp_port_nve_init(mlxsw_sp_port); 1684 if (err) { 1685 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n", 1686 mlxsw_sp_port->local_port); 1687 goto err_port_nve_init; 1688 } 1689 1690 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID, 1691 ETH_P_8021Q); 1692 if (err) { 1693 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n", 1694 mlxsw_sp_port->local_port); 1695 goto err_port_pvid_set; 1696 } 1697 1698 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, 1699 MLXSW_SP_DEFAULT_VID); 1700 if (IS_ERR(mlxsw_sp_port_vlan)) { 1701 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 1702 mlxsw_sp_port->local_port); 1703 err = PTR_ERR(mlxsw_sp_port_vlan); 1704 goto err_port_vlan_create; 1705 } 1706 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan; 1707 1708 /* Set SPVC.et0=true and SPVC.et1=false to make the local port to treat 1709 * only packets with 802.1q header as tagged packets. 1710 */ 1711 err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true); 1712 if (err) { 1713 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set default VLAN classification\n", 1714 local_port); 1715 goto err_port_vlan_classification_set; 1716 } 1717 1718 INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw, 1719 mlxsw_sp->ptp_ops->shaper_work); 1720 1721 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 1722 1723 err = mlxsw_sp_port_overheat_init_val_set(mlxsw_sp_port); 1724 if (err) { 1725 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set overheat initial value\n", 1726 mlxsw_sp_port->local_port); 1727 goto err_port_overheat_init_val_set; 1728 } 1729 1730 err = register_netdev(dev); 1731 if (err) { 1732 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 1733 mlxsw_sp_port->local_port); 1734 goto err_register_netdev; 1735 } 1736 1737 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 1738 mlxsw_sp_port, dev); 1739 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 1740 return 0; 1741 1742 err_register_netdev: 1743 err_port_overheat_init_val_set: 1744 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true); 1745 err_port_vlan_classification_set: 1746 mlxsw_sp->ports[local_port] = NULL; 1747 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1748 err_port_vlan_create: 1749 err_port_pvid_set: 1750 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 1751 err_port_nve_init: 1752 err_port_vlan_clear: 1753 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 1754 err_port_qdiscs_init: 1755 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 1756 err_port_fids_init: 1757 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 1758 err_port_dcb_init: 1759 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 1760 err_port_tc_mc_mode: 1761 err_port_ets_init: 1762 mlxsw_sp_port_buffers_fini(mlxsw_sp_port); 1763 err_port_buffers_init: 1764 err_port_admin_status_set: 1765 err_port_mtu_set: 1766 err_port_max_mtu_get: 1767 err_max_speed_get: 1768 err_port_speed_by_width_set: 1769 err_port_system_port_mapping_set: 1770 err_dev_addr_init: 1771 free_percpu(mlxsw_sp_port->pcpu_stats); 1772 err_alloc_stats: 1773 free_netdev(dev); 1774 err_alloc_etherdev: 1775 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 1776 err_core_port_init: 1777 err_port_label_info_get: 1778 mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 1779 MLXSW_PORT_SWID_DISABLED_PORT); 1780 err_port_swid_set: 1781 mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, port_mapping->module); 1782 return err; 1783 } 1784 1785 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port) 1786 { 1787 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1788 u8 module = mlxsw_sp_port->mapping.module; 1789 1790 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 1791 cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw); 1792 mlxsw_sp_port_ptp_clear(mlxsw_sp_port); 1793 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 1794 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 1795 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true); 1796 mlxsw_sp->ports[local_port] = NULL; 1797 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true); 1798 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 1799 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 1800 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 1801 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 1802 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 1803 mlxsw_sp_port_buffers_fini(mlxsw_sp_port); 1804 free_percpu(mlxsw_sp_port->pcpu_stats); 1805 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 1806 free_netdev(mlxsw_sp_port->dev); 1807 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 1808 mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 1809 MLXSW_PORT_SWID_DISABLED_PORT); 1810 mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, module); 1811 } 1812 1813 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp) 1814 { 1815 struct mlxsw_sp_port *mlxsw_sp_port; 1816 int err; 1817 1818 mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL); 1819 if (!mlxsw_sp_port) 1820 return -ENOMEM; 1821 1822 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 1823 mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT; 1824 1825 err = mlxsw_core_cpu_port_init(mlxsw_sp->core, 1826 mlxsw_sp_port, 1827 mlxsw_sp->base_mac, 1828 sizeof(mlxsw_sp->base_mac)); 1829 if (err) { 1830 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n"); 1831 goto err_core_cpu_port_init; 1832 } 1833 1834 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port; 1835 return 0; 1836 1837 err_core_cpu_port_init: 1838 kfree(mlxsw_sp_port); 1839 return err; 1840 } 1841 1842 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp) 1843 { 1844 struct mlxsw_sp_port *mlxsw_sp_port = 1845 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT]; 1846 1847 mlxsw_core_cpu_port_fini(mlxsw_sp->core); 1848 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL; 1849 kfree(mlxsw_sp_port); 1850 } 1851 1852 static bool mlxsw_sp_local_port_valid(u16 local_port) 1853 { 1854 return local_port != MLXSW_PORT_CPU_PORT; 1855 } 1856 1857 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u16 local_port) 1858 { 1859 if (!mlxsw_sp_local_port_valid(local_port)) 1860 return false; 1861 return mlxsw_sp->ports[local_port] != NULL; 1862 } 1863 1864 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 1865 { 1866 int i; 1867 1868 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 1869 if (mlxsw_sp_port_created(mlxsw_sp, i)) 1870 mlxsw_sp_port_remove(mlxsw_sp, i); 1871 mlxsw_sp_cpu_port_remove(mlxsw_sp); 1872 kfree(mlxsw_sp->ports); 1873 mlxsw_sp->ports = NULL; 1874 } 1875 1876 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 1877 { 1878 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 1879 struct mlxsw_sp_port_mapping *port_mapping; 1880 size_t alloc_size; 1881 int i; 1882 int err; 1883 1884 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 1885 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 1886 if (!mlxsw_sp->ports) 1887 return -ENOMEM; 1888 1889 err = mlxsw_sp_cpu_port_create(mlxsw_sp); 1890 if (err) 1891 goto err_cpu_port_create; 1892 1893 for (i = 1; i < max_ports; i++) { 1894 port_mapping = mlxsw_sp->port_mapping[i]; 1895 if (!port_mapping) 1896 continue; 1897 err = mlxsw_sp_port_create(mlxsw_sp, i, false, port_mapping); 1898 if (err) 1899 goto err_port_create; 1900 } 1901 return 0; 1902 1903 err_port_create: 1904 for (i--; i >= 1; i--) 1905 if (mlxsw_sp_port_created(mlxsw_sp, i)) 1906 mlxsw_sp_port_remove(mlxsw_sp, i); 1907 mlxsw_sp_cpu_port_remove(mlxsw_sp); 1908 err_cpu_port_create: 1909 kfree(mlxsw_sp->ports); 1910 mlxsw_sp->ports = NULL; 1911 return err; 1912 } 1913 1914 static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp) 1915 { 1916 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 1917 struct mlxsw_sp_port_mapping port_mapping; 1918 int i; 1919 int err; 1920 1921 mlxsw_sp->port_mapping = kcalloc(max_ports, 1922 sizeof(struct mlxsw_sp_port_mapping *), 1923 GFP_KERNEL); 1924 if (!mlxsw_sp->port_mapping) 1925 return -ENOMEM; 1926 1927 for (i = 1; i < max_ports; i++) { 1928 if (mlxsw_core_port_is_xm(mlxsw_sp->core, i)) 1929 continue; 1930 1931 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &port_mapping); 1932 if (err) 1933 goto err_port_module_info_get; 1934 if (!port_mapping.width) 1935 continue; 1936 1937 mlxsw_sp->port_mapping[i] = kmemdup(&port_mapping, 1938 sizeof(port_mapping), 1939 GFP_KERNEL); 1940 if (!mlxsw_sp->port_mapping[i]) { 1941 err = -ENOMEM; 1942 goto err_port_module_info_dup; 1943 } 1944 } 1945 return 0; 1946 1947 err_port_module_info_get: 1948 err_port_module_info_dup: 1949 for (i--; i >= 1; i--) 1950 kfree(mlxsw_sp->port_mapping[i]); 1951 kfree(mlxsw_sp->port_mapping); 1952 return err; 1953 } 1954 1955 static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp) 1956 { 1957 int i; 1958 1959 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 1960 kfree(mlxsw_sp->port_mapping[i]); 1961 kfree(mlxsw_sp->port_mapping); 1962 } 1963 1964 static int 1965 mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, 1966 struct mlxsw_sp_port_mapping *port_mapping, 1967 unsigned int count, const char *pmtdb_pl) 1968 { 1969 struct mlxsw_sp_port_mapping split_port_mapping; 1970 int err, i; 1971 1972 split_port_mapping = *port_mapping; 1973 split_port_mapping.width /= count; 1974 for (i = 0; i < count; i++) { 1975 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 1976 1977 if (!mlxsw_sp_local_port_valid(s_local_port)) 1978 continue; 1979 1980 err = mlxsw_sp_port_create(mlxsw_sp, s_local_port, 1981 true, &split_port_mapping); 1982 if (err) 1983 goto err_port_create; 1984 split_port_mapping.lane += split_port_mapping.width; 1985 } 1986 1987 return 0; 1988 1989 err_port_create: 1990 for (i--; i >= 0; i--) { 1991 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 1992 1993 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) 1994 mlxsw_sp_port_remove(mlxsw_sp, s_local_port); 1995 } 1996 return err; 1997 } 1998 1999 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 2000 unsigned int count, 2001 const char *pmtdb_pl) 2002 { 2003 struct mlxsw_sp_port_mapping *port_mapping; 2004 int i; 2005 2006 /* Go over original unsplit ports in the gap and recreate them. */ 2007 for (i = 0; i < count; i++) { 2008 u16 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2009 2010 port_mapping = mlxsw_sp->port_mapping[local_port]; 2011 if (!port_mapping || !mlxsw_sp_local_port_valid(local_port)) 2012 continue; 2013 mlxsw_sp_port_create(mlxsw_sp, local_port, 2014 false, port_mapping); 2015 } 2016 } 2017 2018 static struct mlxsw_sp_port * 2019 mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u16 local_port) 2020 { 2021 if (mlxsw_sp->ports && mlxsw_sp->ports[local_port]) 2022 return mlxsw_sp->ports[local_port]; 2023 return NULL; 2024 } 2025 2026 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port, 2027 unsigned int count, 2028 struct netlink_ext_ack *extack) 2029 { 2030 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2031 struct mlxsw_sp_port_mapping port_mapping; 2032 struct mlxsw_sp_port *mlxsw_sp_port; 2033 enum mlxsw_reg_pmtdb_status status; 2034 char pmtdb_pl[MLXSW_REG_PMTDB_LEN]; 2035 int i; 2036 int err; 2037 2038 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); 2039 if (!mlxsw_sp_port) { 2040 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 2041 local_port); 2042 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 2043 return -EINVAL; 2044 } 2045 2046 if (mlxsw_sp_port->split) { 2047 NL_SET_ERR_MSG_MOD(extack, "Port is already split"); 2048 return -EINVAL; 2049 } 2050 2051 mlxsw_reg_pmtdb_pack(pmtdb_pl, 0, mlxsw_sp_port->mapping.module, 2052 mlxsw_sp_port->mapping.module_width / count, 2053 count); 2054 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl); 2055 if (err) { 2056 NL_SET_ERR_MSG_MOD(extack, "Failed to query split info"); 2057 return err; 2058 } 2059 2060 status = mlxsw_reg_pmtdb_status_get(pmtdb_pl); 2061 if (status != MLXSW_REG_PMTDB_STATUS_SUCCESS) { 2062 NL_SET_ERR_MSG_MOD(extack, "Unsupported split configuration"); 2063 return -EINVAL; 2064 } 2065 2066 port_mapping = mlxsw_sp_port->mapping; 2067 2068 for (i = 0; i < count; i++) { 2069 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2070 2071 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) 2072 mlxsw_sp_port_remove(mlxsw_sp, s_local_port); 2073 } 2074 2075 err = mlxsw_sp_port_split_create(mlxsw_sp, &port_mapping, 2076 count, pmtdb_pl); 2077 if (err) { 2078 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 2079 goto err_port_split_create; 2080 } 2081 2082 return 0; 2083 2084 err_port_split_create: 2085 mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl); 2086 return err; 2087 } 2088 2089 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u16 local_port, 2090 struct netlink_ext_ack *extack) 2091 { 2092 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2093 struct mlxsw_sp_port *mlxsw_sp_port; 2094 char pmtdb_pl[MLXSW_REG_PMTDB_LEN]; 2095 unsigned int count; 2096 int i; 2097 int err; 2098 2099 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); 2100 if (!mlxsw_sp_port) { 2101 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 2102 local_port); 2103 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 2104 return -EINVAL; 2105 } 2106 2107 if (!mlxsw_sp_port->split) { 2108 NL_SET_ERR_MSG_MOD(extack, "Port was not split"); 2109 return -EINVAL; 2110 } 2111 2112 count = mlxsw_sp_port->mapping.module_width / 2113 mlxsw_sp_port->mapping.width; 2114 2115 mlxsw_reg_pmtdb_pack(pmtdb_pl, 0, mlxsw_sp_port->mapping.module, 2116 mlxsw_sp_port->mapping.module_width / count, 2117 count); 2118 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl); 2119 if (err) { 2120 NL_SET_ERR_MSG_MOD(extack, "Failed to query split info"); 2121 return err; 2122 } 2123 2124 for (i = 0; i < count; i++) { 2125 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2126 2127 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) 2128 mlxsw_sp_port_remove(mlxsw_sp, s_local_port); 2129 } 2130 2131 mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl); 2132 2133 return 0; 2134 } 2135 2136 static void 2137 mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port) 2138 { 2139 int i; 2140 2141 for (i = 0; i < TC_MAX_QUEUE; i++) 2142 mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0; 2143 } 2144 2145 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 2146 char *pude_pl, void *priv) 2147 { 2148 struct mlxsw_sp *mlxsw_sp = priv; 2149 struct mlxsw_sp_port *mlxsw_sp_port; 2150 enum mlxsw_reg_pude_oper_status status; 2151 u16 local_port; 2152 2153 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 2154 2155 if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port))) 2156 return; 2157 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2158 if (!mlxsw_sp_port) 2159 return; 2160 2161 status = mlxsw_reg_pude_oper_status_get(pude_pl); 2162 if (status == MLXSW_PORT_OPER_STATUS_UP) { 2163 netdev_info(mlxsw_sp_port->dev, "link up\n"); 2164 netif_carrier_on(mlxsw_sp_port->dev); 2165 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0); 2166 } else { 2167 netdev_info(mlxsw_sp_port->dev, "link down\n"); 2168 netif_carrier_off(mlxsw_sp_port->dev); 2169 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port); 2170 } 2171 } 2172 2173 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp, 2174 char *mtpptr_pl, bool ingress) 2175 { 2176 u16 local_port; 2177 u8 num_rec; 2178 int i; 2179 2180 local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl); 2181 num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl); 2182 for (i = 0; i < num_rec; i++) { 2183 u8 domain_number; 2184 u8 message_type; 2185 u16 sequence_id; 2186 u64 timestamp; 2187 2188 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type, 2189 &domain_number, &sequence_id, 2190 ×tamp); 2191 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port, 2192 message_type, domain_number, 2193 sequence_id, timestamp); 2194 } 2195 } 2196 2197 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg, 2198 char *mtpptr_pl, void *priv) 2199 { 2200 struct mlxsw_sp *mlxsw_sp = priv; 2201 2202 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true); 2203 } 2204 2205 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg, 2206 char *mtpptr_pl, void *priv) 2207 { 2208 struct mlxsw_sp *mlxsw_sp = priv; 2209 2210 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false); 2211 } 2212 2213 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 2214 u16 local_port, void *priv) 2215 { 2216 struct mlxsw_sp *mlxsw_sp = priv; 2217 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2218 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 2219 2220 if (unlikely(!mlxsw_sp_port)) { 2221 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 2222 local_port); 2223 return; 2224 } 2225 2226 skb->dev = mlxsw_sp_port->dev; 2227 2228 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 2229 u64_stats_update_begin(&pcpu_stats->syncp); 2230 pcpu_stats->rx_packets++; 2231 pcpu_stats->rx_bytes += skb->len; 2232 u64_stats_update_end(&pcpu_stats->syncp); 2233 2234 skb->protocol = eth_type_trans(skb, skb->dev); 2235 netif_receive_skb(skb); 2236 } 2237 2238 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u16 local_port, 2239 void *priv) 2240 { 2241 skb->offload_fwd_mark = 1; 2242 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 2243 } 2244 2245 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, 2246 u16 local_port, void *priv) 2247 { 2248 skb->offload_l3_fwd_mark = 1; 2249 skb->offload_fwd_mark = 1; 2250 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 2251 } 2252 2253 void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 2254 u16 local_port) 2255 { 2256 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port); 2257 } 2258 2259 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2260 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 2261 _is_ctrl, SP_##_trap_group, DISCARD) 2262 2263 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2264 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 2265 _is_ctrl, SP_##_trap_group, DISCARD) 2266 2267 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2268 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \ 2269 _is_ctrl, SP_##_trap_group, DISCARD) 2270 2271 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 2272 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 2273 2274 static const struct mlxsw_listener mlxsw_sp_listener[] = { 2275 /* Events */ 2276 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 2277 /* L2 traps */ 2278 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false), 2279 /* L3 traps */ 2280 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 2281 false), 2282 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 2283 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 2284 false), 2285 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD, 2286 ROUTER_EXP, false), 2287 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD, 2288 ROUTER_EXP, false), 2289 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD, 2290 ROUTER_EXP, false), 2291 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD, 2292 ROUTER_EXP, false), 2293 /* Multicast Router Traps */ 2294 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 2295 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 2296 /* NVE traps */ 2297 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false), 2298 }; 2299 2300 static const struct mlxsw_listener mlxsw_sp1_listener[] = { 2301 /* Events */ 2302 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0), 2303 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0), 2304 }; 2305 2306 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 2307 { 2308 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2309 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 2310 enum mlxsw_reg_qpcr_ir_units ir_units; 2311 int max_cpu_policers; 2312 bool is_bytes; 2313 u8 burst_size; 2314 u32 rate; 2315 int i, err; 2316 2317 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 2318 return -EIO; 2319 2320 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 2321 2322 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 2323 for (i = 0; i < max_cpu_policers; i++) { 2324 is_bytes = false; 2325 switch (i) { 2326 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 2327 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 2328 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS: 2329 rate = 1024; 2330 burst_size = 7; 2331 break; 2332 default: 2333 continue; 2334 } 2335 2336 __set_bit(i, mlxsw_sp->trap->policers_usage); 2337 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 2338 burst_size); 2339 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 2340 if (err) 2341 return err; 2342 } 2343 2344 return 0; 2345 } 2346 2347 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 2348 { 2349 char htgt_pl[MLXSW_REG_HTGT_LEN]; 2350 enum mlxsw_reg_htgt_trap_group i; 2351 int max_cpu_policers; 2352 int max_trap_groups; 2353 u8 priority, tc; 2354 u16 policer_id; 2355 int err; 2356 2357 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 2358 return -EIO; 2359 2360 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 2361 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 2362 2363 for (i = 0; i < max_trap_groups; i++) { 2364 policer_id = i; 2365 switch (i) { 2366 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 2367 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 2368 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS: 2369 priority = 1; 2370 tc = 1; 2371 break; 2372 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 2373 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 2374 tc = MLXSW_REG_HTGT_DEFAULT_TC; 2375 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 2376 break; 2377 default: 2378 continue; 2379 } 2380 2381 if (max_cpu_policers <= policer_id && 2382 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 2383 return -EIO; 2384 2385 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 2386 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 2387 if (err) 2388 return err; 2389 } 2390 2391 return 0; 2392 } 2393 2394 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 2395 { 2396 struct mlxsw_sp_trap *trap; 2397 u64 max_policers; 2398 int err; 2399 2400 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS)) 2401 return -EIO; 2402 max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS); 2403 trap = kzalloc(struct_size(trap, policers_usage, 2404 BITS_TO_LONGS(max_policers)), GFP_KERNEL); 2405 if (!trap) 2406 return -ENOMEM; 2407 trap->max_policers = max_policers; 2408 mlxsw_sp->trap = trap; 2409 2410 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 2411 if (err) 2412 goto err_cpu_policers_set; 2413 2414 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 2415 if (err) 2416 goto err_trap_groups_set; 2417 2418 err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp_listener, 2419 ARRAY_SIZE(mlxsw_sp_listener), 2420 mlxsw_sp); 2421 if (err) 2422 goto err_traps_register; 2423 2424 err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp->listeners, 2425 mlxsw_sp->listeners_count, mlxsw_sp); 2426 if (err) 2427 goto err_extra_traps_init; 2428 2429 return 0; 2430 2431 err_extra_traps_init: 2432 mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener, 2433 ARRAY_SIZE(mlxsw_sp_listener), 2434 mlxsw_sp); 2435 err_traps_register: 2436 err_trap_groups_set: 2437 err_cpu_policers_set: 2438 kfree(trap); 2439 return err; 2440 } 2441 2442 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 2443 { 2444 mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp->listeners, 2445 mlxsw_sp->listeners_count, 2446 mlxsw_sp); 2447 mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener, 2448 ARRAY_SIZE(mlxsw_sp_listener), mlxsw_sp); 2449 kfree(mlxsw_sp->trap); 2450 } 2451 2452 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe 2453 2454 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 2455 { 2456 char slcr_pl[MLXSW_REG_SLCR_LEN]; 2457 u32 seed; 2458 int err; 2459 2460 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 2461 MLXSW_SP_LAG_SEED_INIT); 2462 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 2463 MLXSW_REG_SLCR_LAG_HASH_DMAC | 2464 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 2465 MLXSW_REG_SLCR_LAG_HASH_VLANID | 2466 MLXSW_REG_SLCR_LAG_HASH_SIP | 2467 MLXSW_REG_SLCR_LAG_HASH_DIP | 2468 MLXSW_REG_SLCR_LAG_HASH_SPORT | 2469 MLXSW_REG_SLCR_LAG_HASH_DPORT | 2470 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed); 2471 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 2472 if (err) 2473 return err; 2474 2475 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 2476 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 2477 return -EIO; 2478 2479 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 2480 sizeof(struct mlxsw_sp_upper), 2481 GFP_KERNEL); 2482 if (!mlxsw_sp->lags) 2483 return -ENOMEM; 2484 2485 return 0; 2486 } 2487 2488 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 2489 { 2490 kfree(mlxsw_sp->lags); 2491 } 2492 2493 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = { 2494 .clock_init = mlxsw_sp1_ptp_clock_init, 2495 .clock_fini = mlxsw_sp1_ptp_clock_fini, 2496 .init = mlxsw_sp1_ptp_init, 2497 .fini = mlxsw_sp1_ptp_fini, 2498 .receive = mlxsw_sp1_ptp_receive, 2499 .transmitted = mlxsw_sp1_ptp_transmitted, 2500 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get, 2501 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set, 2502 .shaper_work = mlxsw_sp1_ptp_shaper_work, 2503 .get_ts_info = mlxsw_sp1_ptp_get_ts_info, 2504 .get_stats_count = mlxsw_sp1_get_stats_count, 2505 .get_stats_strings = mlxsw_sp1_get_stats_strings, 2506 .get_stats = mlxsw_sp1_get_stats, 2507 }; 2508 2509 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { 2510 .clock_init = mlxsw_sp2_ptp_clock_init, 2511 .clock_fini = mlxsw_sp2_ptp_clock_fini, 2512 .init = mlxsw_sp2_ptp_init, 2513 .fini = mlxsw_sp2_ptp_fini, 2514 .receive = mlxsw_sp2_ptp_receive, 2515 .transmitted = mlxsw_sp2_ptp_transmitted, 2516 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get, 2517 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set, 2518 .shaper_work = mlxsw_sp2_ptp_shaper_work, 2519 .get_ts_info = mlxsw_sp2_ptp_get_ts_info, 2520 .get_stats_count = mlxsw_sp2_get_stats_count, 2521 .get_stats_strings = mlxsw_sp2_get_stats_strings, 2522 .get_stats = mlxsw_sp2_get_stats, 2523 }; 2524 2525 struct mlxsw_sp_sample_trigger_node { 2526 struct mlxsw_sp_sample_trigger trigger; 2527 struct mlxsw_sp_sample_params params; 2528 struct rhash_head ht_node; 2529 struct rcu_head rcu; 2530 refcount_t refcount; 2531 }; 2532 2533 static const struct rhashtable_params mlxsw_sp_sample_trigger_ht_params = { 2534 .key_offset = offsetof(struct mlxsw_sp_sample_trigger_node, trigger), 2535 .head_offset = offsetof(struct mlxsw_sp_sample_trigger_node, ht_node), 2536 .key_len = sizeof(struct mlxsw_sp_sample_trigger), 2537 .automatic_shrinking = true, 2538 }; 2539 2540 static void 2541 mlxsw_sp_sample_trigger_key_init(struct mlxsw_sp_sample_trigger *key, 2542 const struct mlxsw_sp_sample_trigger *trigger) 2543 { 2544 memset(key, 0, sizeof(*key)); 2545 key->type = trigger->type; 2546 key->local_port = trigger->local_port; 2547 } 2548 2549 /* RCU read lock must be held */ 2550 struct mlxsw_sp_sample_params * 2551 mlxsw_sp_sample_trigger_params_lookup(struct mlxsw_sp *mlxsw_sp, 2552 const struct mlxsw_sp_sample_trigger *trigger) 2553 { 2554 struct mlxsw_sp_sample_trigger_node *trigger_node; 2555 struct mlxsw_sp_sample_trigger key; 2556 2557 mlxsw_sp_sample_trigger_key_init(&key, trigger); 2558 trigger_node = rhashtable_lookup(&mlxsw_sp->sample_trigger_ht, &key, 2559 mlxsw_sp_sample_trigger_ht_params); 2560 if (!trigger_node) 2561 return NULL; 2562 2563 return &trigger_node->params; 2564 } 2565 2566 static int 2567 mlxsw_sp_sample_trigger_node_init(struct mlxsw_sp *mlxsw_sp, 2568 const struct mlxsw_sp_sample_trigger *trigger, 2569 const struct mlxsw_sp_sample_params *params) 2570 { 2571 struct mlxsw_sp_sample_trigger_node *trigger_node; 2572 int err; 2573 2574 trigger_node = kzalloc(sizeof(*trigger_node), GFP_KERNEL); 2575 if (!trigger_node) 2576 return -ENOMEM; 2577 2578 trigger_node->trigger = *trigger; 2579 trigger_node->params = *params; 2580 refcount_set(&trigger_node->refcount, 1); 2581 2582 err = rhashtable_insert_fast(&mlxsw_sp->sample_trigger_ht, 2583 &trigger_node->ht_node, 2584 mlxsw_sp_sample_trigger_ht_params); 2585 if (err) 2586 goto err_rhashtable_insert; 2587 2588 return 0; 2589 2590 err_rhashtable_insert: 2591 kfree(trigger_node); 2592 return err; 2593 } 2594 2595 static void 2596 mlxsw_sp_sample_trigger_node_fini(struct mlxsw_sp *mlxsw_sp, 2597 struct mlxsw_sp_sample_trigger_node *trigger_node) 2598 { 2599 rhashtable_remove_fast(&mlxsw_sp->sample_trigger_ht, 2600 &trigger_node->ht_node, 2601 mlxsw_sp_sample_trigger_ht_params); 2602 kfree_rcu(trigger_node, rcu); 2603 } 2604 2605 int 2606 mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp, 2607 const struct mlxsw_sp_sample_trigger *trigger, 2608 const struct mlxsw_sp_sample_params *params, 2609 struct netlink_ext_ack *extack) 2610 { 2611 struct mlxsw_sp_sample_trigger_node *trigger_node; 2612 struct mlxsw_sp_sample_trigger key; 2613 2614 ASSERT_RTNL(); 2615 2616 mlxsw_sp_sample_trigger_key_init(&key, trigger); 2617 2618 trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht, 2619 &key, 2620 mlxsw_sp_sample_trigger_ht_params); 2621 if (!trigger_node) 2622 return mlxsw_sp_sample_trigger_node_init(mlxsw_sp, &key, 2623 params); 2624 2625 if (trigger_node->trigger.local_port) { 2626 NL_SET_ERR_MSG_MOD(extack, "Sampling already enabled on port"); 2627 return -EINVAL; 2628 } 2629 2630 if (trigger_node->params.psample_group != params->psample_group || 2631 trigger_node->params.truncate != params->truncate || 2632 trigger_node->params.rate != params->rate || 2633 trigger_node->params.trunc_size != params->trunc_size) { 2634 NL_SET_ERR_MSG_MOD(extack, "Sampling parameters do not match for an existing sampling trigger"); 2635 return -EINVAL; 2636 } 2637 2638 refcount_inc(&trigger_node->refcount); 2639 2640 return 0; 2641 } 2642 2643 void 2644 mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp, 2645 const struct mlxsw_sp_sample_trigger *trigger) 2646 { 2647 struct mlxsw_sp_sample_trigger_node *trigger_node; 2648 struct mlxsw_sp_sample_trigger key; 2649 2650 ASSERT_RTNL(); 2651 2652 mlxsw_sp_sample_trigger_key_init(&key, trigger); 2653 2654 trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht, 2655 &key, 2656 mlxsw_sp_sample_trigger_ht_params); 2657 if (!trigger_node) 2658 return; 2659 2660 if (!refcount_dec_and_test(&trigger_node->refcount)) 2661 return; 2662 2663 mlxsw_sp_sample_trigger_node_fini(mlxsw_sp, trigger_node); 2664 } 2665 2666 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 2667 unsigned long event, void *ptr); 2668 2669 #define MLXSW_SP_DEFAULT_PARSING_DEPTH 96 2670 #define MLXSW_SP_INCREASED_PARSING_DEPTH 128 2671 #define MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT 4789 2672 2673 static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp) 2674 { 2675 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH; 2676 mlxsw_sp->parsing.vxlan_udp_dport = MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT; 2677 mutex_init(&mlxsw_sp->parsing.lock); 2678 } 2679 2680 static void mlxsw_sp_parsing_fini(struct mlxsw_sp *mlxsw_sp) 2681 { 2682 mutex_destroy(&mlxsw_sp->parsing.lock); 2683 } 2684 2685 struct mlxsw_sp_ipv6_addr_node { 2686 struct in6_addr key; 2687 struct rhash_head ht_node; 2688 u32 kvdl_index; 2689 refcount_t refcount; 2690 }; 2691 2692 static const struct rhashtable_params mlxsw_sp_ipv6_addr_ht_params = { 2693 .key_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, key), 2694 .head_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, ht_node), 2695 .key_len = sizeof(struct in6_addr), 2696 .automatic_shrinking = true, 2697 }; 2698 2699 static int 2700 mlxsw_sp_ipv6_addr_init(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6, 2701 u32 *p_kvdl_index) 2702 { 2703 struct mlxsw_sp_ipv6_addr_node *node; 2704 char rips_pl[MLXSW_REG_RIPS_LEN]; 2705 int err; 2706 2707 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 2708 MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, 2709 p_kvdl_index); 2710 if (err) 2711 return err; 2712 2713 mlxsw_reg_rips_pack(rips_pl, *p_kvdl_index, addr6); 2714 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rips), rips_pl); 2715 if (err) 2716 goto err_rips_write; 2717 2718 node = kzalloc(sizeof(*node), GFP_KERNEL); 2719 if (!node) { 2720 err = -ENOMEM; 2721 goto err_node_alloc; 2722 } 2723 2724 node->key = *addr6; 2725 node->kvdl_index = *p_kvdl_index; 2726 refcount_set(&node->refcount, 1); 2727 2728 err = rhashtable_insert_fast(&mlxsw_sp->ipv6_addr_ht, 2729 &node->ht_node, 2730 mlxsw_sp_ipv6_addr_ht_params); 2731 if (err) 2732 goto err_rhashtable_insert; 2733 2734 return 0; 2735 2736 err_rhashtable_insert: 2737 kfree(node); 2738 err_node_alloc: 2739 err_rips_write: 2740 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, 2741 *p_kvdl_index); 2742 return err; 2743 } 2744 2745 static void mlxsw_sp_ipv6_addr_fini(struct mlxsw_sp *mlxsw_sp, 2746 struct mlxsw_sp_ipv6_addr_node *node) 2747 { 2748 u32 kvdl_index = node->kvdl_index; 2749 2750 rhashtable_remove_fast(&mlxsw_sp->ipv6_addr_ht, &node->ht_node, 2751 mlxsw_sp_ipv6_addr_ht_params); 2752 kfree(node); 2753 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, 2754 kvdl_index); 2755 } 2756 2757 int mlxsw_sp_ipv6_addr_kvdl_index_get(struct mlxsw_sp *mlxsw_sp, 2758 const struct in6_addr *addr6, 2759 u32 *p_kvdl_index) 2760 { 2761 struct mlxsw_sp_ipv6_addr_node *node; 2762 int err = 0; 2763 2764 mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock); 2765 node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6, 2766 mlxsw_sp_ipv6_addr_ht_params); 2767 if (node) { 2768 refcount_inc(&node->refcount); 2769 *p_kvdl_index = node->kvdl_index; 2770 goto out_unlock; 2771 } 2772 2773 err = mlxsw_sp_ipv6_addr_init(mlxsw_sp, addr6, p_kvdl_index); 2774 2775 out_unlock: 2776 mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock); 2777 return err; 2778 } 2779 2780 void 2781 mlxsw_sp_ipv6_addr_put(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6) 2782 { 2783 struct mlxsw_sp_ipv6_addr_node *node; 2784 2785 mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock); 2786 node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6, 2787 mlxsw_sp_ipv6_addr_ht_params); 2788 if (WARN_ON(!node)) 2789 goto out_unlock; 2790 2791 if (!refcount_dec_and_test(&node->refcount)) 2792 goto out_unlock; 2793 2794 mlxsw_sp_ipv6_addr_fini(mlxsw_sp, node); 2795 2796 out_unlock: 2797 mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock); 2798 } 2799 2800 static int mlxsw_sp_ipv6_addr_ht_init(struct mlxsw_sp *mlxsw_sp) 2801 { 2802 int err; 2803 2804 err = rhashtable_init(&mlxsw_sp->ipv6_addr_ht, 2805 &mlxsw_sp_ipv6_addr_ht_params); 2806 if (err) 2807 return err; 2808 2809 mutex_init(&mlxsw_sp->ipv6_addr_ht_lock); 2810 return 0; 2811 } 2812 2813 static void mlxsw_sp_ipv6_addr_ht_fini(struct mlxsw_sp *mlxsw_sp) 2814 { 2815 mutex_destroy(&mlxsw_sp->ipv6_addr_ht_lock); 2816 rhashtable_destroy(&mlxsw_sp->ipv6_addr_ht); 2817 } 2818 2819 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 2820 const struct mlxsw_bus_info *mlxsw_bus_info, 2821 struct netlink_ext_ack *extack) 2822 { 2823 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2824 int err; 2825 2826 mlxsw_sp->core = mlxsw_core; 2827 mlxsw_sp->bus_info = mlxsw_bus_info; 2828 2829 mlxsw_sp_parsing_init(mlxsw_sp); 2830 mlxsw_core_emad_string_tlv_enable(mlxsw_core); 2831 2832 err = mlxsw_sp_base_mac_get(mlxsw_sp); 2833 if (err) { 2834 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 2835 return err; 2836 } 2837 2838 err = mlxsw_sp_kvdl_init(mlxsw_sp); 2839 if (err) { 2840 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 2841 return err; 2842 } 2843 2844 err = mlxsw_sp_fids_init(mlxsw_sp); 2845 if (err) { 2846 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 2847 goto err_fids_init; 2848 } 2849 2850 err = mlxsw_sp_policers_init(mlxsw_sp); 2851 if (err) { 2852 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize policers\n"); 2853 goto err_policers_init; 2854 } 2855 2856 err = mlxsw_sp_traps_init(mlxsw_sp); 2857 if (err) { 2858 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 2859 goto err_traps_init; 2860 } 2861 2862 err = mlxsw_sp_devlink_traps_init(mlxsw_sp); 2863 if (err) { 2864 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n"); 2865 goto err_devlink_traps_init; 2866 } 2867 2868 err = mlxsw_sp_buffers_init(mlxsw_sp); 2869 if (err) { 2870 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 2871 goto err_buffers_init; 2872 } 2873 2874 err = mlxsw_sp_lag_init(mlxsw_sp); 2875 if (err) { 2876 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 2877 goto err_lag_init; 2878 } 2879 2880 /* Initialize SPAN before router and switchdev, so that those components 2881 * can call mlxsw_sp_span_respin(). 2882 */ 2883 err = mlxsw_sp_span_init(mlxsw_sp); 2884 if (err) { 2885 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 2886 goto err_span_init; 2887 } 2888 2889 err = mlxsw_sp_switchdev_init(mlxsw_sp); 2890 if (err) { 2891 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 2892 goto err_switchdev_init; 2893 } 2894 2895 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 2896 if (err) { 2897 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 2898 goto err_counter_pool_init; 2899 } 2900 2901 err = mlxsw_sp_afa_init(mlxsw_sp); 2902 if (err) { 2903 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 2904 goto err_afa_init; 2905 } 2906 2907 err = mlxsw_sp_ipv6_addr_ht_init(mlxsw_sp); 2908 if (err) { 2909 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize hash table for IPv6 addresses\n"); 2910 goto err_ipv6_addr_ht_init; 2911 } 2912 2913 err = mlxsw_sp_nve_init(mlxsw_sp); 2914 if (err) { 2915 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n"); 2916 goto err_nve_init; 2917 } 2918 2919 err = mlxsw_sp_acl_init(mlxsw_sp); 2920 if (err) { 2921 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 2922 goto err_acl_init; 2923 } 2924 2925 err = mlxsw_sp_router_init(mlxsw_sp, extack); 2926 if (err) { 2927 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 2928 goto err_router_init; 2929 } 2930 2931 if (mlxsw_sp->bus_info->read_frc_capable) { 2932 /* NULL is a valid return value from clock_init */ 2933 mlxsw_sp->clock = 2934 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp, 2935 mlxsw_sp->bus_info->dev); 2936 if (IS_ERR(mlxsw_sp->clock)) { 2937 err = PTR_ERR(mlxsw_sp->clock); 2938 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n"); 2939 goto err_ptp_clock_init; 2940 } 2941 } 2942 2943 if (mlxsw_sp->clock) { 2944 /* NULL is a valid return value from ptp_ops->init */ 2945 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp); 2946 if (IS_ERR(mlxsw_sp->ptp_state)) { 2947 err = PTR_ERR(mlxsw_sp->ptp_state); 2948 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n"); 2949 goto err_ptp_init; 2950 } 2951 } 2952 2953 /* Initialize netdevice notifier after router and SPAN is initialized, 2954 * so that the event handler can use router structures and call SPAN 2955 * respin. 2956 */ 2957 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 2958 err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 2959 &mlxsw_sp->netdevice_nb); 2960 if (err) { 2961 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 2962 goto err_netdev_notifier; 2963 } 2964 2965 err = mlxsw_sp_dpipe_init(mlxsw_sp); 2966 if (err) { 2967 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 2968 goto err_dpipe_init; 2969 } 2970 2971 err = mlxsw_sp_port_module_info_init(mlxsw_sp); 2972 if (err) { 2973 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n"); 2974 goto err_port_module_info_init; 2975 } 2976 2977 err = rhashtable_init(&mlxsw_sp->sample_trigger_ht, 2978 &mlxsw_sp_sample_trigger_ht_params); 2979 if (err) { 2980 dev_err(mlxsw_sp->bus_info->dev, "Failed to init sampling trigger hashtable\n"); 2981 goto err_sample_trigger_init; 2982 } 2983 2984 err = mlxsw_sp_ports_create(mlxsw_sp); 2985 if (err) { 2986 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 2987 goto err_ports_create; 2988 } 2989 2990 return 0; 2991 2992 err_ports_create: 2993 rhashtable_destroy(&mlxsw_sp->sample_trigger_ht); 2994 err_sample_trigger_init: 2995 mlxsw_sp_port_module_info_fini(mlxsw_sp); 2996 err_port_module_info_init: 2997 mlxsw_sp_dpipe_fini(mlxsw_sp); 2998 err_dpipe_init: 2999 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 3000 &mlxsw_sp->netdevice_nb); 3001 err_netdev_notifier: 3002 if (mlxsw_sp->clock) 3003 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 3004 err_ptp_init: 3005 if (mlxsw_sp->clock) 3006 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 3007 err_ptp_clock_init: 3008 mlxsw_sp_router_fini(mlxsw_sp); 3009 err_router_init: 3010 mlxsw_sp_acl_fini(mlxsw_sp); 3011 err_acl_init: 3012 mlxsw_sp_nve_fini(mlxsw_sp); 3013 err_nve_init: 3014 mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp); 3015 err_ipv6_addr_ht_init: 3016 mlxsw_sp_afa_fini(mlxsw_sp); 3017 err_afa_init: 3018 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3019 err_counter_pool_init: 3020 mlxsw_sp_switchdev_fini(mlxsw_sp); 3021 err_switchdev_init: 3022 mlxsw_sp_span_fini(mlxsw_sp); 3023 err_span_init: 3024 mlxsw_sp_lag_fini(mlxsw_sp); 3025 err_lag_init: 3026 mlxsw_sp_buffers_fini(mlxsw_sp); 3027 err_buffers_init: 3028 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 3029 err_devlink_traps_init: 3030 mlxsw_sp_traps_fini(mlxsw_sp); 3031 err_traps_init: 3032 mlxsw_sp_policers_fini(mlxsw_sp); 3033 err_policers_init: 3034 mlxsw_sp_fids_fini(mlxsw_sp); 3035 err_fids_init: 3036 mlxsw_sp_kvdl_fini(mlxsw_sp); 3037 mlxsw_sp_parsing_fini(mlxsw_sp); 3038 return err; 3039 } 3040 3041 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, 3042 const struct mlxsw_bus_info *mlxsw_bus_info, 3043 struct netlink_ext_ack *extack) 3044 { 3045 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3046 3047 mlxsw_sp->switchdev_ops = &mlxsw_sp1_switchdev_ops; 3048 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; 3049 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; 3050 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; 3051 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; 3052 mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops; 3053 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; 3054 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; 3055 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; 3056 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals; 3057 mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops; 3058 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops; 3059 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops; 3060 mlxsw_sp->span_ops = &mlxsw_sp1_span_ops; 3061 mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops; 3062 mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops; 3063 mlxsw_sp->mall_ops = &mlxsw_sp1_mall_ops; 3064 mlxsw_sp->router_ops = &mlxsw_sp1_router_ops; 3065 mlxsw_sp->listeners = mlxsw_sp1_listener; 3066 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener); 3067 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1; 3068 3069 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3070 } 3071 3072 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, 3073 const struct mlxsw_bus_info *mlxsw_bus_info, 3074 struct netlink_ext_ack *extack) 3075 { 3076 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3077 3078 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; 3079 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 3080 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 3081 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 3082 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 3083 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 3084 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 3085 mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops; 3086 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 3087 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 3088 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 3089 mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops; 3090 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 3091 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 3092 mlxsw_sp->span_ops = &mlxsw_sp2_span_ops; 3093 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; 3094 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; 3095 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; 3096 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; 3097 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2; 3098 3099 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3100 } 3101 3102 static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core, 3103 const struct mlxsw_bus_info *mlxsw_bus_info, 3104 struct netlink_ext_ack *extack) 3105 { 3106 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3107 3108 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; 3109 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 3110 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 3111 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 3112 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 3113 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 3114 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 3115 mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops; 3116 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 3117 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 3118 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 3119 mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops; 3120 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 3121 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 3122 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops; 3123 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; 3124 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; 3125 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; 3126 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; 3127 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3; 3128 3129 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3130 } 3131 3132 static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core, 3133 const struct mlxsw_bus_info *mlxsw_bus_info, 3134 struct netlink_ext_ack *extack) 3135 { 3136 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3137 3138 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; 3139 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 3140 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 3141 mlxsw_sp->afk_ops = &mlxsw_sp4_afk_ops; 3142 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 3143 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 3144 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 3145 mlxsw_sp->acl_bf_ops = &mlxsw_sp4_acl_bf_ops; 3146 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 3147 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 3148 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 3149 mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops; 3150 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 3151 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 3152 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops; 3153 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; 3154 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; 3155 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; 3156 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; 3157 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4; 3158 3159 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3160 } 3161 3162 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 3163 { 3164 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3165 3166 mlxsw_sp_ports_remove(mlxsw_sp); 3167 rhashtable_destroy(&mlxsw_sp->sample_trigger_ht); 3168 mlxsw_sp_port_module_info_fini(mlxsw_sp); 3169 mlxsw_sp_dpipe_fini(mlxsw_sp); 3170 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 3171 &mlxsw_sp->netdevice_nb); 3172 if (mlxsw_sp->clock) { 3173 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 3174 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 3175 } 3176 mlxsw_sp_router_fini(mlxsw_sp); 3177 mlxsw_sp_acl_fini(mlxsw_sp); 3178 mlxsw_sp_nve_fini(mlxsw_sp); 3179 mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp); 3180 mlxsw_sp_afa_fini(mlxsw_sp); 3181 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3182 mlxsw_sp_switchdev_fini(mlxsw_sp); 3183 mlxsw_sp_span_fini(mlxsw_sp); 3184 mlxsw_sp_lag_fini(mlxsw_sp); 3185 mlxsw_sp_buffers_fini(mlxsw_sp); 3186 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 3187 mlxsw_sp_traps_fini(mlxsw_sp); 3188 mlxsw_sp_policers_fini(mlxsw_sp); 3189 mlxsw_sp_fids_fini(mlxsw_sp); 3190 mlxsw_sp_kvdl_fini(mlxsw_sp); 3191 mlxsw_sp_parsing_fini(mlxsw_sp); 3192 } 3193 3194 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated 3195 * 802.1Q FIDs 3196 */ 3197 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \ 3198 VLAN_VID_MASK - 1) 3199 3200 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { 3201 .used_max_mid = 1, 3202 .max_mid = MLXSW_SP_MID_MAX, 3203 .used_flood_tables = 1, 3204 .used_flood_mode = 1, 3205 .flood_mode = 3, 3206 .max_fid_flood_tables = 3, 3207 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 3208 .used_max_ib_mc = 1, 3209 .max_ib_mc = 0, 3210 .used_max_pkey = 1, 3211 .max_pkey = 0, 3212 .used_kvd_sizes = 1, 3213 .kvd_hash_single_parts = 59, 3214 .kvd_hash_double_parts = 41, 3215 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 3216 .swid_config = { 3217 { 3218 .used_type = 1, 3219 .type = MLXSW_PORT_SWID_TYPE_ETH, 3220 } 3221 }, 3222 }; 3223 3224 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { 3225 .used_max_mid = 1, 3226 .max_mid = MLXSW_SP_MID_MAX, 3227 .used_flood_tables = 1, 3228 .used_flood_mode = 1, 3229 .flood_mode = 3, 3230 .max_fid_flood_tables = 3, 3231 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 3232 .used_max_ib_mc = 1, 3233 .max_ib_mc = 0, 3234 .used_max_pkey = 1, 3235 .max_pkey = 0, 3236 .used_kvh_xlt_cache_mode = 1, 3237 .kvh_xlt_cache_mode = 1, 3238 .swid_config = { 3239 { 3240 .used_type = 1, 3241 .type = MLXSW_PORT_SWID_TYPE_ETH, 3242 } 3243 }, 3244 }; 3245 3246 static void 3247 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, 3248 struct devlink_resource_size_params *kvd_size_params, 3249 struct devlink_resource_size_params *linear_size_params, 3250 struct devlink_resource_size_params *hash_double_size_params, 3251 struct devlink_resource_size_params *hash_single_size_params) 3252 { 3253 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 3254 KVD_SINGLE_MIN_SIZE); 3255 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 3256 KVD_DOUBLE_MIN_SIZE); 3257 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3258 u32 linear_size_min = 0; 3259 3260 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, 3261 MLXSW_SP_KVD_GRANULARITY, 3262 DEVLINK_RESOURCE_UNIT_ENTRY); 3263 devlink_resource_size_params_init(linear_size_params, linear_size_min, 3264 kvd_size - single_size_min - 3265 double_size_min, 3266 MLXSW_SP_KVD_GRANULARITY, 3267 DEVLINK_RESOURCE_UNIT_ENTRY); 3268 devlink_resource_size_params_init(hash_double_size_params, 3269 double_size_min, 3270 kvd_size - single_size_min - 3271 linear_size_min, 3272 MLXSW_SP_KVD_GRANULARITY, 3273 DEVLINK_RESOURCE_UNIT_ENTRY); 3274 devlink_resource_size_params_init(hash_single_size_params, 3275 single_size_min, 3276 kvd_size - double_size_min - 3277 linear_size_min, 3278 MLXSW_SP_KVD_GRANULARITY, 3279 DEVLINK_RESOURCE_UNIT_ENTRY); 3280 } 3281 3282 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) 3283 { 3284 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3285 struct devlink_resource_size_params hash_single_size_params; 3286 struct devlink_resource_size_params hash_double_size_params; 3287 struct devlink_resource_size_params linear_size_params; 3288 struct devlink_resource_size_params kvd_size_params; 3289 u32 kvd_size, single_size, double_size, linear_size; 3290 const struct mlxsw_config_profile *profile; 3291 int err; 3292 3293 profile = &mlxsw_sp1_config_profile; 3294 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 3295 return -EIO; 3296 3297 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, 3298 &linear_size_params, 3299 &hash_double_size_params, 3300 &hash_single_size_params); 3301 3302 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3303 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 3304 kvd_size, MLXSW_SP_RESOURCE_KVD, 3305 DEVLINK_RESOURCE_ID_PARENT_TOP, 3306 &kvd_size_params); 3307 if (err) 3308 return err; 3309 3310 linear_size = profile->kvd_linear_size; 3311 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, 3312 linear_size, 3313 MLXSW_SP_RESOURCE_KVD_LINEAR, 3314 MLXSW_SP_RESOURCE_KVD, 3315 &linear_size_params); 3316 if (err) 3317 return err; 3318 3319 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); 3320 if (err) 3321 return err; 3322 3323 double_size = kvd_size - linear_size; 3324 double_size *= profile->kvd_hash_double_parts; 3325 double_size /= profile->kvd_hash_double_parts + 3326 profile->kvd_hash_single_parts; 3327 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY); 3328 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, 3329 double_size, 3330 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 3331 MLXSW_SP_RESOURCE_KVD, 3332 &hash_double_size_params); 3333 if (err) 3334 return err; 3335 3336 single_size = kvd_size - double_size - linear_size; 3337 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, 3338 single_size, 3339 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 3340 MLXSW_SP_RESOURCE_KVD, 3341 &hash_single_size_params); 3342 if (err) 3343 return err; 3344 3345 return 0; 3346 } 3347 3348 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core) 3349 { 3350 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3351 struct devlink_resource_size_params kvd_size_params; 3352 u32 kvd_size; 3353 3354 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 3355 return -EIO; 3356 3357 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3358 devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size, 3359 MLXSW_SP_KVD_GRANULARITY, 3360 DEVLINK_RESOURCE_UNIT_ENTRY); 3361 3362 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 3363 kvd_size, MLXSW_SP_RESOURCE_KVD, 3364 DEVLINK_RESOURCE_ID_PARENT_TOP, 3365 &kvd_size_params); 3366 } 3367 3368 static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core) 3369 { 3370 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3371 struct devlink_resource_size_params span_size_params; 3372 u32 max_span; 3373 3374 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN)) 3375 return -EIO; 3376 3377 max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN); 3378 devlink_resource_size_params_init(&span_size_params, max_span, max_span, 3379 1, DEVLINK_RESOURCE_UNIT_ENTRY); 3380 3381 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN, 3382 max_span, MLXSW_SP_RESOURCE_SPAN, 3383 DEVLINK_RESOURCE_ID_PARENT_TOP, 3384 &span_size_params); 3385 } 3386 3387 static int 3388 mlxsw_sp_resources_rif_mac_profile_register(struct mlxsw_core *mlxsw_core) 3389 { 3390 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3391 struct devlink_resource_size_params size_params; 3392 u8 max_rif_mac_profiles; 3393 3394 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIF_MAC_PROFILES)) 3395 max_rif_mac_profiles = 1; 3396 else 3397 max_rif_mac_profiles = MLXSW_CORE_RES_GET(mlxsw_core, 3398 MAX_RIF_MAC_PROFILES); 3399 devlink_resource_size_params_init(&size_params, max_rif_mac_profiles, 3400 max_rif_mac_profiles, 1, 3401 DEVLINK_RESOURCE_UNIT_ENTRY); 3402 3403 return devlink_resource_register(devlink, 3404 "rif_mac_profiles", 3405 max_rif_mac_profiles, 3406 MLXSW_SP_RESOURCE_RIF_MAC_PROFILES, 3407 DEVLINK_RESOURCE_ID_PARENT_TOP, 3408 &size_params); 3409 } 3410 3411 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) 3412 { 3413 int err; 3414 3415 err = mlxsw_sp1_resources_kvd_register(mlxsw_core); 3416 if (err) 3417 return err; 3418 3419 err = mlxsw_sp_resources_span_register(mlxsw_core); 3420 if (err) 3421 goto err_resources_span_register; 3422 3423 err = mlxsw_sp_counter_resources_register(mlxsw_core); 3424 if (err) 3425 goto err_resources_counter_register; 3426 3427 err = mlxsw_sp_policer_resources_register(mlxsw_core); 3428 if (err) 3429 goto err_policer_resources_register; 3430 3431 err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core); 3432 if (err) 3433 goto err_resources_rif_mac_profile_register; 3434 3435 return 0; 3436 3437 err_resources_rif_mac_profile_register: 3438 err_policer_resources_register: 3439 err_resources_counter_register: 3440 err_resources_span_register: 3441 devlink_resources_unregister(priv_to_devlink(mlxsw_core)); 3442 return err; 3443 } 3444 3445 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) 3446 { 3447 int err; 3448 3449 err = mlxsw_sp2_resources_kvd_register(mlxsw_core); 3450 if (err) 3451 return err; 3452 3453 err = mlxsw_sp_resources_span_register(mlxsw_core); 3454 if (err) 3455 goto err_resources_span_register; 3456 3457 err = mlxsw_sp_counter_resources_register(mlxsw_core); 3458 if (err) 3459 goto err_resources_counter_register; 3460 3461 err = mlxsw_sp_policer_resources_register(mlxsw_core); 3462 if (err) 3463 goto err_policer_resources_register; 3464 3465 err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core); 3466 if (err) 3467 goto err_resources_rif_mac_profile_register; 3468 3469 return 0; 3470 3471 err_resources_rif_mac_profile_register: 3472 err_policer_resources_register: 3473 err_resources_counter_register: 3474 err_resources_span_register: 3475 devlink_resources_unregister(priv_to_devlink(mlxsw_core)); 3476 return err; 3477 } 3478 3479 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 3480 const struct mlxsw_config_profile *profile, 3481 u64 *p_single_size, u64 *p_double_size, 3482 u64 *p_linear_size) 3483 { 3484 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3485 u32 double_size; 3486 int err; 3487 3488 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 3489 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) 3490 return -EIO; 3491 3492 /* The hash part is what left of the kvd without the 3493 * linear part. It is split to the single size and 3494 * double size by the parts ratio from the profile. 3495 * Both sizes must be a multiplications of the 3496 * granularity from the profile. In case the user 3497 * provided the sizes they are obtained via devlink. 3498 */ 3499 err = devlink_resource_size_get(devlink, 3500 MLXSW_SP_RESOURCE_KVD_LINEAR, 3501 p_linear_size); 3502 if (err) 3503 *p_linear_size = profile->kvd_linear_size; 3504 3505 err = devlink_resource_size_get(devlink, 3506 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 3507 p_double_size); 3508 if (err) { 3509 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 3510 *p_linear_size; 3511 double_size *= profile->kvd_hash_double_parts; 3512 double_size /= profile->kvd_hash_double_parts + 3513 profile->kvd_hash_single_parts; 3514 *p_double_size = rounddown(double_size, 3515 MLXSW_SP_KVD_GRANULARITY); 3516 } 3517 3518 err = devlink_resource_size_get(devlink, 3519 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 3520 p_single_size); 3521 if (err) 3522 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 3523 *p_double_size - *p_linear_size; 3524 3525 /* Check results are legal. */ 3526 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 3527 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || 3528 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size) 3529 return -EIO; 3530 3531 return 0; 3532 } 3533 3534 static int 3535 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id, 3536 struct devlink_param_gset_ctx *ctx) 3537 { 3538 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 3539 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3540 3541 ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp); 3542 return 0; 3543 } 3544 3545 static int 3546 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id, 3547 struct devlink_param_gset_ctx *ctx) 3548 { 3549 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 3550 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3551 3552 return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32); 3553 } 3554 3555 static const struct devlink_param mlxsw_sp2_devlink_params[] = { 3556 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 3557 "acl_region_rehash_interval", 3558 DEVLINK_PARAM_TYPE_U32, 3559 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 3560 mlxsw_sp_params_acl_region_rehash_intrvl_get, 3561 mlxsw_sp_params_acl_region_rehash_intrvl_set, 3562 NULL), 3563 }; 3564 3565 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core) 3566 { 3567 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3568 union devlink_param_value value; 3569 int err; 3570 3571 err = devlink_params_register(devlink, mlxsw_sp2_devlink_params, 3572 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 3573 if (err) 3574 return err; 3575 3576 value.vu32 = 0; 3577 devlink_param_driverinit_value_set(devlink, 3578 MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 3579 value); 3580 return 0; 3581 } 3582 3583 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core) 3584 { 3585 devlink_params_unregister(priv_to_devlink(mlxsw_core), 3586 mlxsw_sp2_devlink_params, 3587 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 3588 } 3589 3590 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core, 3591 struct sk_buff *skb, u16 local_port) 3592 { 3593 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3594 3595 skb_pull(skb, MLXSW_TXHDR_LEN); 3596 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port); 3597 } 3598 3599 static struct mlxsw_driver mlxsw_sp1_driver = { 3600 .kind = mlxsw_sp1_driver_name, 3601 .priv_size = sizeof(struct mlxsw_sp), 3602 .fw_req_rev = &mlxsw_sp1_fw_rev, 3603 .fw_filename = MLXSW_SP1_FW_FILENAME, 3604 .init = mlxsw_sp1_init, 3605 .fini = mlxsw_sp_fini, 3606 .port_split = mlxsw_sp_port_split, 3607 .port_unsplit = mlxsw_sp_port_unsplit, 3608 .sb_pool_get = mlxsw_sp_sb_pool_get, 3609 .sb_pool_set = mlxsw_sp_sb_pool_set, 3610 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3611 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3612 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3613 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3614 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3615 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3616 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3617 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3618 .trap_init = mlxsw_sp_trap_init, 3619 .trap_fini = mlxsw_sp_trap_fini, 3620 .trap_action_set = mlxsw_sp_trap_action_set, 3621 .trap_group_init = mlxsw_sp_trap_group_init, 3622 .trap_group_set = mlxsw_sp_trap_group_set, 3623 .trap_policer_init = mlxsw_sp_trap_policer_init, 3624 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3625 .trap_policer_set = mlxsw_sp_trap_policer_set, 3626 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3627 .txhdr_construct = mlxsw_sp_txhdr_construct, 3628 .resources_register = mlxsw_sp1_resources_register, 3629 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, 3630 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3631 .txhdr_len = MLXSW_TXHDR_LEN, 3632 .profile = &mlxsw_sp1_config_profile, 3633 .res_query_enabled = true, 3634 .fw_fatal_enabled = true, 3635 .temp_warn_enabled = true, 3636 }; 3637 3638 static struct mlxsw_driver mlxsw_sp2_driver = { 3639 .kind = mlxsw_sp2_driver_name, 3640 .priv_size = sizeof(struct mlxsw_sp), 3641 .fw_req_rev = &mlxsw_sp2_fw_rev, 3642 .fw_filename = MLXSW_SP2_FW_FILENAME, 3643 .init = mlxsw_sp2_init, 3644 .fini = mlxsw_sp_fini, 3645 .port_split = mlxsw_sp_port_split, 3646 .port_unsplit = mlxsw_sp_port_unsplit, 3647 .sb_pool_get = mlxsw_sp_sb_pool_get, 3648 .sb_pool_set = mlxsw_sp_sb_pool_set, 3649 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3650 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3651 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3652 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3653 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3654 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3655 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3656 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3657 .trap_init = mlxsw_sp_trap_init, 3658 .trap_fini = mlxsw_sp_trap_fini, 3659 .trap_action_set = mlxsw_sp_trap_action_set, 3660 .trap_group_init = mlxsw_sp_trap_group_init, 3661 .trap_group_set = mlxsw_sp_trap_group_set, 3662 .trap_policer_init = mlxsw_sp_trap_policer_init, 3663 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3664 .trap_policer_set = mlxsw_sp_trap_policer_set, 3665 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3666 .txhdr_construct = mlxsw_sp_txhdr_construct, 3667 .resources_register = mlxsw_sp2_resources_register, 3668 .params_register = mlxsw_sp2_params_register, 3669 .params_unregister = mlxsw_sp2_params_unregister, 3670 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3671 .txhdr_len = MLXSW_TXHDR_LEN, 3672 .profile = &mlxsw_sp2_config_profile, 3673 .res_query_enabled = true, 3674 .fw_fatal_enabled = true, 3675 .temp_warn_enabled = true, 3676 }; 3677 3678 static struct mlxsw_driver mlxsw_sp3_driver = { 3679 .kind = mlxsw_sp3_driver_name, 3680 .priv_size = sizeof(struct mlxsw_sp), 3681 .fw_req_rev = &mlxsw_sp3_fw_rev, 3682 .fw_filename = MLXSW_SP3_FW_FILENAME, 3683 .init = mlxsw_sp3_init, 3684 .fini = mlxsw_sp_fini, 3685 .port_split = mlxsw_sp_port_split, 3686 .port_unsplit = mlxsw_sp_port_unsplit, 3687 .sb_pool_get = mlxsw_sp_sb_pool_get, 3688 .sb_pool_set = mlxsw_sp_sb_pool_set, 3689 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3690 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3691 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3692 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3693 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3694 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3695 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3696 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3697 .trap_init = mlxsw_sp_trap_init, 3698 .trap_fini = mlxsw_sp_trap_fini, 3699 .trap_action_set = mlxsw_sp_trap_action_set, 3700 .trap_group_init = mlxsw_sp_trap_group_init, 3701 .trap_group_set = mlxsw_sp_trap_group_set, 3702 .trap_policer_init = mlxsw_sp_trap_policer_init, 3703 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3704 .trap_policer_set = mlxsw_sp_trap_policer_set, 3705 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3706 .txhdr_construct = mlxsw_sp_txhdr_construct, 3707 .resources_register = mlxsw_sp2_resources_register, 3708 .params_register = mlxsw_sp2_params_register, 3709 .params_unregister = mlxsw_sp2_params_unregister, 3710 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3711 .txhdr_len = MLXSW_TXHDR_LEN, 3712 .profile = &mlxsw_sp2_config_profile, 3713 .res_query_enabled = true, 3714 .fw_fatal_enabled = true, 3715 .temp_warn_enabled = true, 3716 }; 3717 3718 static struct mlxsw_driver mlxsw_sp4_driver = { 3719 .kind = mlxsw_sp4_driver_name, 3720 .priv_size = sizeof(struct mlxsw_sp), 3721 .init = mlxsw_sp4_init, 3722 .fini = mlxsw_sp_fini, 3723 .port_split = mlxsw_sp_port_split, 3724 .port_unsplit = mlxsw_sp_port_unsplit, 3725 .sb_pool_get = mlxsw_sp_sb_pool_get, 3726 .sb_pool_set = mlxsw_sp_sb_pool_set, 3727 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3728 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3729 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3730 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3731 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3732 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3733 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3734 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3735 .trap_init = mlxsw_sp_trap_init, 3736 .trap_fini = mlxsw_sp_trap_fini, 3737 .trap_action_set = mlxsw_sp_trap_action_set, 3738 .trap_group_init = mlxsw_sp_trap_group_init, 3739 .trap_group_set = mlxsw_sp_trap_group_set, 3740 .trap_policer_init = mlxsw_sp_trap_policer_init, 3741 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3742 .trap_policer_set = mlxsw_sp_trap_policer_set, 3743 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3744 .txhdr_construct = mlxsw_sp_txhdr_construct, 3745 .resources_register = mlxsw_sp2_resources_register, 3746 .params_register = mlxsw_sp2_params_register, 3747 .params_unregister = mlxsw_sp2_params_unregister, 3748 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3749 .txhdr_len = MLXSW_TXHDR_LEN, 3750 .profile = &mlxsw_sp2_config_profile, 3751 .res_query_enabled = true, 3752 .fw_fatal_enabled = true, 3753 .temp_warn_enabled = true, 3754 }; 3755 3756 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 3757 { 3758 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 3759 } 3760 3761 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, 3762 struct netdev_nested_priv *priv) 3763 { 3764 int ret = 0; 3765 3766 if (mlxsw_sp_port_dev_check(lower_dev)) { 3767 priv->data = (void *)netdev_priv(lower_dev); 3768 ret = 1; 3769 } 3770 3771 return ret; 3772 } 3773 3774 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 3775 { 3776 struct netdev_nested_priv priv = { 3777 .data = NULL, 3778 }; 3779 3780 if (mlxsw_sp_port_dev_check(dev)) 3781 return netdev_priv(dev); 3782 3783 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &priv); 3784 3785 return (struct mlxsw_sp_port *)priv.data; 3786 } 3787 3788 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 3789 { 3790 struct mlxsw_sp_port *mlxsw_sp_port; 3791 3792 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 3793 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 3794 } 3795 3796 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 3797 { 3798 struct netdev_nested_priv priv = { 3799 .data = NULL, 3800 }; 3801 3802 if (mlxsw_sp_port_dev_check(dev)) 3803 return netdev_priv(dev); 3804 3805 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 3806 &priv); 3807 3808 return (struct mlxsw_sp_port *)priv.data; 3809 } 3810 3811 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 3812 { 3813 struct mlxsw_sp_port *mlxsw_sp_port; 3814 3815 rcu_read_lock(); 3816 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 3817 if (mlxsw_sp_port) 3818 dev_hold(mlxsw_sp_port->dev); 3819 rcu_read_unlock(); 3820 return mlxsw_sp_port; 3821 } 3822 3823 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 3824 { 3825 dev_put(mlxsw_sp_port->dev); 3826 } 3827 3828 int mlxsw_sp_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp) 3829 { 3830 char mprs_pl[MLXSW_REG_MPRS_LEN]; 3831 int err = 0; 3832 3833 mutex_lock(&mlxsw_sp->parsing.lock); 3834 3835 if (refcount_inc_not_zero(&mlxsw_sp->parsing.parsing_depth_ref)) 3836 goto out_unlock; 3837 3838 mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_INCREASED_PARSING_DEPTH, 3839 mlxsw_sp->parsing.vxlan_udp_dport); 3840 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); 3841 if (err) 3842 goto out_unlock; 3843 3844 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_INCREASED_PARSING_DEPTH; 3845 refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 1); 3846 3847 out_unlock: 3848 mutex_unlock(&mlxsw_sp->parsing.lock); 3849 return err; 3850 } 3851 3852 void mlxsw_sp_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp) 3853 { 3854 char mprs_pl[MLXSW_REG_MPRS_LEN]; 3855 3856 mutex_lock(&mlxsw_sp->parsing.lock); 3857 3858 if (!refcount_dec_and_test(&mlxsw_sp->parsing.parsing_depth_ref)) 3859 goto out_unlock; 3860 3861 mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_DEFAULT_PARSING_DEPTH, 3862 mlxsw_sp->parsing.vxlan_udp_dport); 3863 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); 3864 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH; 3865 3866 out_unlock: 3867 mutex_unlock(&mlxsw_sp->parsing.lock); 3868 } 3869 3870 int mlxsw_sp_parsing_vxlan_udp_dport_set(struct mlxsw_sp *mlxsw_sp, 3871 __be16 udp_dport) 3872 { 3873 char mprs_pl[MLXSW_REG_MPRS_LEN]; 3874 int err; 3875 3876 mutex_lock(&mlxsw_sp->parsing.lock); 3877 3878 mlxsw_reg_mprs_pack(mprs_pl, mlxsw_sp->parsing.parsing_depth, 3879 be16_to_cpu(udp_dport)); 3880 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); 3881 if (err) 3882 goto out_unlock; 3883 3884 mlxsw_sp->parsing.vxlan_udp_dport = be16_to_cpu(udp_dport); 3885 3886 out_unlock: 3887 mutex_unlock(&mlxsw_sp->parsing.lock); 3888 return err; 3889 } 3890 3891 static void 3892 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port, 3893 struct net_device *lag_dev) 3894 { 3895 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev); 3896 struct net_device *upper_dev; 3897 struct list_head *iter; 3898 3899 if (netif_is_bridge_port(lag_dev)) 3900 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev); 3901 3902 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 3903 if (!netif_is_bridge_port(upper_dev)) 3904 continue; 3905 br_dev = netdev_master_upper_dev_get(upper_dev); 3906 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev); 3907 } 3908 } 3909 3910 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 3911 { 3912 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3913 3914 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 3915 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3916 } 3917 3918 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 3919 { 3920 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3921 3922 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 3923 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3924 } 3925 3926 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 3927 u16 lag_id, u8 port_index) 3928 { 3929 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3930 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3931 3932 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 3933 lag_id, port_index); 3934 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3935 } 3936 3937 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 3938 u16 lag_id) 3939 { 3940 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3941 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3942 3943 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 3944 lag_id); 3945 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3946 } 3947 3948 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 3949 u16 lag_id) 3950 { 3951 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3952 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3953 3954 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 3955 lag_id); 3956 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3957 } 3958 3959 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 3960 u16 lag_id) 3961 { 3962 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3963 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3964 3965 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 3966 lag_id); 3967 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3968 } 3969 3970 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 3971 struct net_device *lag_dev, 3972 u16 *p_lag_id) 3973 { 3974 struct mlxsw_sp_upper *lag; 3975 int free_lag_id = -1; 3976 u64 max_lag; 3977 int i; 3978 3979 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 3980 for (i = 0; i < max_lag; i++) { 3981 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 3982 if (lag->ref_count) { 3983 if (lag->dev == lag_dev) { 3984 *p_lag_id = i; 3985 return 0; 3986 } 3987 } else if (free_lag_id < 0) { 3988 free_lag_id = i; 3989 } 3990 } 3991 if (free_lag_id < 0) 3992 return -EBUSY; 3993 *p_lag_id = free_lag_id; 3994 return 0; 3995 } 3996 3997 static bool 3998 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 3999 struct net_device *lag_dev, 4000 struct netdev_lag_upper_info *lag_upper_info, 4001 struct netlink_ext_ack *extack) 4002 { 4003 u16 lag_id; 4004 4005 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 4006 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices"); 4007 return false; 4008 } 4009 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 4010 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 4011 return false; 4012 } 4013 return true; 4014 } 4015 4016 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 4017 u16 lag_id, u8 *p_port_index) 4018 { 4019 u64 max_lag_members; 4020 int i; 4021 4022 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 4023 MAX_LAG_MEMBERS); 4024 for (i = 0; i < max_lag_members; i++) { 4025 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 4026 *p_port_index = i; 4027 return 0; 4028 } 4029 } 4030 return -EBUSY; 4031 } 4032 4033 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 4034 struct net_device *lag_dev, 4035 struct netlink_ext_ack *extack) 4036 { 4037 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4038 struct mlxsw_sp_upper *lag; 4039 u16 lag_id; 4040 u8 port_index; 4041 int err; 4042 4043 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 4044 if (err) 4045 return err; 4046 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 4047 if (!lag->ref_count) { 4048 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 4049 if (err) 4050 return err; 4051 lag->dev = lag_dev; 4052 } 4053 4054 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 4055 if (err) 4056 return err; 4057 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 4058 if (err) 4059 goto err_col_port_add; 4060 4061 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 4062 mlxsw_sp_port->local_port); 4063 mlxsw_sp_port->lag_id = lag_id; 4064 mlxsw_sp_port->lagged = 1; 4065 lag->ref_count++; 4066 4067 /* Port is no longer usable as a router interface */ 4068 if (mlxsw_sp_port->default_vlan->fid) 4069 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); 4070 4071 /* Join a router interface configured on the LAG, if exists */ 4072 err = mlxsw_sp_port_vlan_router_join(mlxsw_sp_port->default_vlan, 4073 lag_dev, extack); 4074 if (err) 4075 goto err_router_join; 4076 4077 return 0; 4078 4079 err_router_join: 4080 lag->ref_count--; 4081 mlxsw_sp_port->lagged = 0; 4082 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 4083 mlxsw_sp_port->local_port); 4084 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4085 err_col_port_add: 4086 if (!lag->ref_count) 4087 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 4088 return err; 4089 } 4090 4091 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 4092 struct net_device *lag_dev) 4093 { 4094 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4095 u16 lag_id = mlxsw_sp_port->lag_id; 4096 struct mlxsw_sp_upper *lag; 4097 4098 if (!mlxsw_sp_port->lagged) 4099 return; 4100 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 4101 WARN_ON(lag->ref_count == 0); 4102 4103 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4104 4105 /* Any VLANs configured on the port are no longer valid */ 4106 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false); 4107 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan); 4108 /* Make the LAG and its directly linked uppers leave bridges they 4109 * are memeber in 4110 */ 4111 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev); 4112 4113 if (lag->ref_count == 1) 4114 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 4115 4116 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 4117 mlxsw_sp_port->local_port); 4118 mlxsw_sp_port->lagged = 0; 4119 lag->ref_count--; 4120 4121 /* Make sure untagged frames are allowed to ingress */ 4122 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID, 4123 ETH_P_8021Q); 4124 } 4125 4126 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 4127 u16 lag_id) 4128 { 4129 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4130 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4131 4132 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 4133 mlxsw_sp_port->local_port); 4134 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4135 } 4136 4137 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 4138 u16 lag_id) 4139 { 4140 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4141 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4142 4143 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 4144 mlxsw_sp_port->local_port); 4145 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4146 } 4147 4148 static int 4149 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port) 4150 { 4151 int err; 4152 4153 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, 4154 mlxsw_sp_port->lag_id); 4155 if (err) 4156 return err; 4157 4158 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 4159 if (err) 4160 goto err_dist_port_add; 4161 4162 return 0; 4163 4164 err_dist_port_add: 4165 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id); 4166 return err; 4167 } 4168 4169 static int 4170 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port) 4171 { 4172 int err; 4173 4174 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 4175 mlxsw_sp_port->lag_id); 4176 if (err) 4177 return err; 4178 4179 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, 4180 mlxsw_sp_port->lag_id); 4181 if (err) 4182 goto err_col_port_disable; 4183 4184 return 0; 4185 4186 err_col_port_disable: 4187 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 4188 return err; 4189 } 4190 4191 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 4192 struct netdev_lag_lower_state_info *info) 4193 { 4194 if (info->tx_enabled) 4195 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port); 4196 else 4197 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 4198 } 4199 4200 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 4201 bool enable) 4202 { 4203 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4204 enum mlxsw_reg_spms_state spms_state; 4205 char *spms_pl; 4206 u16 vid; 4207 int err; 4208 4209 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 4210 MLXSW_REG_SPMS_STATE_DISCARDING; 4211 4212 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 4213 if (!spms_pl) 4214 return -ENOMEM; 4215 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 4216 4217 for (vid = 0; vid < VLAN_N_VID; vid++) 4218 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 4219 4220 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 4221 kfree(spms_pl); 4222 return err; 4223 } 4224 4225 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 4226 { 4227 u16 vid = 1; 4228 int err; 4229 4230 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 4231 if (err) 4232 return err; 4233 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 4234 if (err) 4235 goto err_port_stp_set; 4236 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 4237 true, false); 4238 if (err) 4239 goto err_port_vlan_set; 4240 4241 for (; vid <= VLAN_N_VID - 1; vid++) { 4242 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4243 vid, false); 4244 if (err) 4245 goto err_vid_learning_set; 4246 } 4247 4248 return 0; 4249 4250 err_vid_learning_set: 4251 for (vid--; vid >= 1; vid--) 4252 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 4253 err_port_vlan_set: 4254 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4255 err_port_stp_set: 4256 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4257 return err; 4258 } 4259 4260 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 4261 { 4262 u16 vid; 4263 4264 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 4265 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4266 vid, true); 4267 4268 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 4269 false, false); 4270 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4271 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4272 } 4273 4274 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev) 4275 { 4276 unsigned int num_vxlans = 0; 4277 struct net_device *dev; 4278 struct list_head *iter; 4279 4280 netdev_for_each_lower_dev(br_dev, dev, iter) { 4281 if (netif_is_vxlan(dev)) 4282 num_vxlans++; 4283 } 4284 4285 return num_vxlans > 1; 4286 } 4287 4288 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev) 4289 { 4290 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0}; 4291 struct net_device *dev; 4292 struct list_head *iter; 4293 4294 netdev_for_each_lower_dev(br_dev, dev, iter) { 4295 u16 pvid; 4296 int err; 4297 4298 if (!netif_is_vxlan(dev)) 4299 continue; 4300 4301 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid); 4302 if (err || !pvid) 4303 continue; 4304 4305 if (test_and_set_bit(pvid, vlans)) 4306 return false; 4307 } 4308 4309 return true; 4310 } 4311 4312 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev, 4313 struct netlink_ext_ack *extack) 4314 { 4315 if (br_multicast_enabled(br_dev)) { 4316 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device"); 4317 return false; 4318 } 4319 4320 if (!br_vlan_enabled(br_dev) && 4321 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) { 4322 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge"); 4323 return false; 4324 } 4325 4326 if (br_vlan_enabled(br_dev) && 4327 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) { 4328 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged"); 4329 return false; 4330 } 4331 4332 return true; 4333 } 4334 4335 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 4336 struct net_device *dev, 4337 unsigned long event, void *ptr) 4338 { 4339 struct netdev_notifier_changeupper_info *info; 4340 struct mlxsw_sp_port *mlxsw_sp_port; 4341 struct netlink_ext_ack *extack; 4342 struct net_device *upper_dev; 4343 struct mlxsw_sp *mlxsw_sp; 4344 int err = 0; 4345 u16 proto; 4346 4347 mlxsw_sp_port = netdev_priv(dev); 4348 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4349 info = ptr; 4350 extack = netdev_notifier_info_to_extack(&info->info); 4351 4352 switch (event) { 4353 case NETDEV_PRECHANGEUPPER: 4354 upper_dev = info->upper_dev; 4355 if (!is_vlan_dev(upper_dev) && 4356 !netif_is_lag_master(upper_dev) && 4357 !netif_is_bridge_master(upper_dev) && 4358 !netif_is_ovs_master(upper_dev) && 4359 !netif_is_macvlan(upper_dev)) { 4360 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4361 return -EINVAL; 4362 } 4363 if (!info->linking) 4364 break; 4365 if (netif_is_bridge_master(upper_dev) && 4366 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 4367 mlxsw_sp_bridge_has_vxlan(upper_dev) && 4368 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 4369 return -EOPNOTSUPP; 4370 if (netdev_has_any_upper_dev(upper_dev) && 4371 (!netif_is_bridge_master(upper_dev) || 4372 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 4373 upper_dev))) { 4374 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 4375 return -EINVAL; 4376 } 4377 if (netif_is_lag_master(upper_dev) && 4378 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 4379 info->upper_info, extack)) 4380 return -EINVAL; 4381 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 4382 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); 4383 return -EINVAL; 4384 } 4385 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 4386 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 4387 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); 4388 return -EINVAL; 4389 } 4390 if (netif_is_macvlan(upper_dev) && 4391 !mlxsw_sp_rif_exists(mlxsw_sp, lower_dev)) { 4392 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4393 return -EOPNOTSUPP; 4394 } 4395 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 4396 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); 4397 return -EINVAL; 4398 } 4399 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 4400 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 4401 return -EINVAL; 4402 } 4403 if (netif_is_bridge_master(upper_dev)) { 4404 br_vlan_get_proto(upper_dev, &proto); 4405 if (br_vlan_enabled(upper_dev) && 4406 proto != ETH_P_8021Q && proto != ETH_P_8021AD) { 4407 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a bridge with unknown VLAN protocol is not supported"); 4408 return -EOPNOTSUPP; 4409 } 4410 if (vlan_uses_dev(lower_dev) && 4411 br_vlan_enabled(upper_dev) && 4412 proto == ETH_P_8021AD) { 4413 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port that already has a VLAN upper to an 802.1ad bridge is not supported"); 4414 return -EOPNOTSUPP; 4415 } 4416 } 4417 if (netif_is_bridge_port(lower_dev) && is_vlan_dev(upper_dev)) { 4418 struct net_device *br_dev = netdev_master_upper_dev_get(lower_dev); 4419 4420 if (br_vlan_enabled(br_dev)) { 4421 br_vlan_get_proto(br_dev, &proto); 4422 if (proto == ETH_P_8021AD) { 4423 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a port enslaved to an 802.1ad bridge"); 4424 return -EOPNOTSUPP; 4425 } 4426 } 4427 } 4428 if (is_vlan_dev(upper_dev) && 4429 ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) { 4430 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol"); 4431 return -EOPNOTSUPP; 4432 } 4433 break; 4434 case NETDEV_CHANGEUPPER: 4435 upper_dev = info->upper_dev; 4436 if (netif_is_bridge_master(upper_dev)) { 4437 if (info->linking) 4438 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4439 lower_dev, 4440 upper_dev, 4441 extack); 4442 else 4443 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4444 lower_dev, 4445 upper_dev); 4446 } else if (netif_is_lag_master(upper_dev)) { 4447 if (info->linking) { 4448 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 4449 upper_dev, extack); 4450 } else { 4451 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 4452 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 4453 upper_dev); 4454 } 4455 } else if (netif_is_ovs_master(upper_dev)) { 4456 if (info->linking) 4457 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 4458 else 4459 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 4460 } else if (netif_is_macvlan(upper_dev)) { 4461 if (!info->linking) 4462 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4463 } else if (is_vlan_dev(upper_dev)) { 4464 struct net_device *br_dev; 4465 4466 if (!netif_is_bridge_port(upper_dev)) 4467 break; 4468 if (info->linking) 4469 break; 4470 br_dev = netdev_master_upper_dev_get(upper_dev); 4471 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, 4472 br_dev); 4473 } 4474 break; 4475 } 4476 4477 return err; 4478 } 4479 4480 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 4481 unsigned long event, void *ptr) 4482 { 4483 struct netdev_notifier_changelowerstate_info *info; 4484 struct mlxsw_sp_port *mlxsw_sp_port; 4485 int err; 4486 4487 mlxsw_sp_port = netdev_priv(dev); 4488 info = ptr; 4489 4490 switch (event) { 4491 case NETDEV_CHANGELOWERSTATE: 4492 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 4493 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 4494 info->lower_state_info); 4495 if (err) 4496 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 4497 } 4498 break; 4499 } 4500 4501 return 0; 4502 } 4503 4504 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 4505 struct net_device *port_dev, 4506 unsigned long event, void *ptr) 4507 { 4508 switch (event) { 4509 case NETDEV_PRECHANGEUPPER: 4510 case NETDEV_CHANGEUPPER: 4511 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 4512 event, ptr); 4513 case NETDEV_CHANGELOWERSTATE: 4514 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 4515 ptr); 4516 } 4517 4518 return 0; 4519 } 4520 4521 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 4522 unsigned long event, void *ptr) 4523 { 4524 struct net_device *dev; 4525 struct list_head *iter; 4526 int ret; 4527 4528 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4529 if (mlxsw_sp_port_dev_check(dev)) { 4530 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 4531 ptr); 4532 if (ret) 4533 return ret; 4534 } 4535 } 4536 4537 return 0; 4538 } 4539 4540 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 4541 struct net_device *dev, 4542 unsigned long event, void *ptr, 4543 u16 vid) 4544 { 4545 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 4546 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4547 struct netdev_notifier_changeupper_info *info = ptr; 4548 struct netlink_ext_ack *extack; 4549 struct net_device *upper_dev; 4550 int err = 0; 4551 4552 extack = netdev_notifier_info_to_extack(&info->info); 4553 4554 switch (event) { 4555 case NETDEV_PRECHANGEUPPER: 4556 upper_dev = info->upper_dev; 4557 if (!netif_is_bridge_master(upper_dev) && 4558 !netif_is_macvlan(upper_dev)) { 4559 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4560 return -EINVAL; 4561 } 4562 if (!info->linking) 4563 break; 4564 if (netif_is_bridge_master(upper_dev) && 4565 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 4566 mlxsw_sp_bridge_has_vxlan(upper_dev) && 4567 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 4568 return -EOPNOTSUPP; 4569 if (netdev_has_any_upper_dev(upper_dev) && 4570 (!netif_is_bridge_master(upper_dev) || 4571 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 4572 upper_dev))) { 4573 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 4574 return -EINVAL; 4575 } 4576 if (netif_is_macvlan(upper_dev) && 4577 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) { 4578 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4579 return -EOPNOTSUPP; 4580 } 4581 break; 4582 case NETDEV_CHANGEUPPER: 4583 upper_dev = info->upper_dev; 4584 if (netif_is_bridge_master(upper_dev)) { 4585 if (info->linking) 4586 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4587 vlan_dev, 4588 upper_dev, 4589 extack); 4590 else 4591 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4592 vlan_dev, 4593 upper_dev); 4594 } else if (netif_is_macvlan(upper_dev)) { 4595 if (!info->linking) 4596 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4597 } else { 4598 err = -EINVAL; 4599 WARN_ON(1); 4600 } 4601 break; 4602 } 4603 4604 return err; 4605 } 4606 4607 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 4608 struct net_device *lag_dev, 4609 unsigned long event, 4610 void *ptr, u16 vid) 4611 { 4612 struct net_device *dev; 4613 struct list_head *iter; 4614 int ret; 4615 4616 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4617 if (mlxsw_sp_port_dev_check(dev)) { 4618 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 4619 event, ptr, 4620 vid); 4621 if (ret) 4622 return ret; 4623 } 4624 } 4625 4626 return 0; 4627 } 4628 4629 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev, 4630 struct net_device *br_dev, 4631 unsigned long event, void *ptr, 4632 u16 vid) 4633 { 4634 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); 4635 struct netdev_notifier_changeupper_info *info = ptr; 4636 struct netlink_ext_ack *extack; 4637 struct net_device *upper_dev; 4638 4639 if (!mlxsw_sp) 4640 return 0; 4641 4642 extack = netdev_notifier_info_to_extack(&info->info); 4643 4644 switch (event) { 4645 case NETDEV_PRECHANGEUPPER: 4646 upper_dev = info->upper_dev; 4647 if (!netif_is_macvlan(upper_dev)) { 4648 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4649 return -EOPNOTSUPP; 4650 } 4651 if (!info->linking) 4652 break; 4653 if (netif_is_macvlan(upper_dev) && 4654 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) { 4655 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4656 return -EOPNOTSUPP; 4657 } 4658 break; 4659 case NETDEV_CHANGEUPPER: 4660 upper_dev = info->upper_dev; 4661 if (info->linking) 4662 break; 4663 if (netif_is_macvlan(upper_dev)) 4664 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4665 break; 4666 } 4667 4668 return 0; 4669 } 4670 4671 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 4672 unsigned long event, void *ptr) 4673 { 4674 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 4675 u16 vid = vlan_dev_vlan_id(vlan_dev); 4676 4677 if (mlxsw_sp_port_dev_check(real_dev)) 4678 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 4679 event, ptr, vid); 4680 else if (netif_is_lag_master(real_dev)) 4681 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 4682 real_dev, event, 4683 ptr, vid); 4684 else if (netif_is_bridge_master(real_dev)) 4685 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev, 4686 event, ptr, vid); 4687 4688 return 0; 4689 } 4690 4691 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, 4692 unsigned long event, void *ptr) 4693 { 4694 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev); 4695 struct netdev_notifier_changeupper_info *info = ptr; 4696 struct netlink_ext_ack *extack; 4697 struct net_device *upper_dev; 4698 u16 proto; 4699 4700 if (!mlxsw_sp) 4701 return 0; 4702 4703 extack = netdev_notifier_info_to_extack(&info->info); 4704 4705 switch (event) { 4706 case NETDEV_PRECHANGEUPPER: 4707 upper_dev = info->upper_dev; 4708 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) { 4709 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4710 return -EOPNOTSUPP; 4711 } 4712 if (!info->linking) 4713 break; 4714 if (br_vlan_enabled(br_dev)) { 4715 br_vlan_get_proto(br_dev, &proto); 4716 if (proto == ETH_P_8021AD) { 4717 NL_SET_ERR_MSG_MOD(extack, "Upper devices are not supported on top of an 802.1ad bridge"); 4718 return -EOPNOTSUPP; 4719 } 4720 } 4721 if (is_vlan_dev(upper_dev) && 4722 ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) { 4723 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol"); 4724 return -EOPNOTSUPP; 4725 } 4726 if (netif_is_macvlan(upper_dev) && 4727 !mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) { 4728 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4729 return -EOPNOTSUPP; 4730 } 4731 break; 4732 case NETDEV_CHANGEUPPER: 4733 upper_dev = info->upper_dev; 4734 if (info->linking) 4735 break; 4736 if (is_vlan_dev(upper_dev)) 4737 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev); 4738 if (netif_is_macvlan(upper_dev)) 4739 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4740 break; 4741 } 4742 4743 return 0; 4744 } 4745 4746 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, 4747 unsigned long event, void *ptr) 4748 { 4749 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); 4750 struct netdev_notifier_changeupper_info *info = ptr; 4751 struct netlink_ext_ack *extack; 4752 4753 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) 4754 return 0; 4755 4756 extack = netdev_notifier_info_to_extack(&info->info); 4757 4758 /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */ 4759 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4760 4761 return -EOPNOTSUPP; 4762 } 4763 4764 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) 4765 { 4766 struct netdev_notifier_changeupper_info *info = ptr; 4767 4768 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) 4769 return false; 4770 return netif_is_l3_master(info->upper_dev); 4771 } 4772 4773 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, 4774 struct net_device *dev, 4775 unsigned long event, void *ptr) 4776 { 4777 struct netdev_notifier_changeupper_info *cu_info; 4778 struct netdev_notifier_info *info = ptr; 4779 struct netlink_ext_ack *extack; 4780 struct net_device *upper_dev; 4781 4782 extack = netdev_notifier_info_to_extack(info); 4783 4784 switch (event) { 4785 case NETDEV_CHANGEUPPER: 4786 cu_info = container_of(info, 4787 struct netdev_notifier_changeupper_info, 4788 info); 4789 upper_dev = cu_info->upper_dev; 4790 if (!netif_is_bridge_master(upper_dev)) 4791 return 0; 4792 if (!mlxsw_sp_lower_get(upper_dev)) 4793 return 0; 4794 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 4795 return -EOPNOTSUPP; 4796 if (cu_info->linking) { 4797 if (!netif_running(dev)) 4798 return 0; 4799 /* When the bridge is VLAN-aware, the VNI of the VxLAN 4800 * device needs to be mapped to a VLAN, but at this 4801 * point no VLANs are configured on the VxLAN device 4802 */ 4803 if (br_vlan_enabled(upper_dev)) 4804 return 0; 4805 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, 4806 dev, 0, extack); 4807 } else { 4808 /* VLANs were already flushed, which triggered the 4809 * necessary cleanup 4810 */ 4811 if (br_vlan_enabled(upper_dev)) 4812 return 0; 4813 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 4814 } 4815 break; 4816 case NETDEV_PRE_UP: 4817 upper_dev = netdev_master_upper_dev_get(dev); 4818 if (!upper_dev) 4819 return 0; 4820 if (!netif_is_bridge_master(upper_dev)) 4821 return 0; 4822 if (!mlxsw_sp_lower_get(upper_dev)) 4823 return 0; 4824 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0, 4825 extack); 4826 case NETDEV_DOWN: 4827 upper_dev = netdev_master_upper_dev_get(dev); 4828 if (!upper_dev) 4829 return 0; 4830 if (!netif_is_bridge_master(upper_dev)) 4831 return 0; 4832 if (!mlxsw_sp_lower_get(upper_dev)) 4833 return 0; 4834 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 4835 break; 4836 } 4837 4838 return 0; 4839 } 4840 4841 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 4842 unsigned long event, void *ptr) 4843 { 4844 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 4845 struct mlxsw_sp_span_entry *span_entry; 4846 struct mlxsw_sp *mlxsw_sp; 4847 int err = 0; 4848 4849 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 4850 if (event == NETDEV_UNREGISTER) { 4851 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); 4852 if (span_entry) 4853 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); 4854 } 4855 mlxsw_sp_span_respin(mlxsw_sp); 4856 4857 if (netif_is_vxlan(dev)) 4858 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr); 4859 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) 4860 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, 4861 event, ptr); 4862 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev)) 4863 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev, 4864 event, ptr); 4865 else if (event == NETDEV_PRE_CHANGEADDR || 4866 event == NETDEV_CHANGEADDR || 4867 event == NETDEV_CHANGEMTU) 4868 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr); 4869 else if (mlxsw_sp_is_vrf_event(event, ptr)) 4870 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); 4871 else if (mlxsw_sp_port_dev_check(dev)) 4872 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 4873 else if (netif_is_lag_master(dev)) 4874 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 4875 else if (is_vlan_dev(dev)) 4876 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 4877 else if (netif_is_bridge_master(dev)) 4878 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); 4879 else if (netif_is_macvlan(dev)) 4880 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); 4881 4882 return notifier_from_errno(err); 4883 } 4884 4885 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = { 4886 .notifier_call = mlxsw_sp_inetaddr_valid_event, 4887 }; 4888 4889 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = { 4890 .notifier_call = mlxsw_sp_inet6addr_valid_event, 4891 }; 4892 4893 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { 4894 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 4895 {0, }, 4896 }; 4897 4898 static struct pci_driver mlxsw_sp1_pci_driver = { 4899 .name = mlxsw_sp1_driver_name, 4900 .id_table = mlxsw_sp1_pci_id_table, 4901 }; 4902 4903 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { 4904 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, 4905 {0, }, 4906 }; 4907 4908 static struct pci_driver mlxsw_sp2_pci_driver = { 4909 .name = mlxsw_sp2_driver_name, 4910 .id_table = mlxsw_sp2_pci_id_table, 4911 }; 4912 4913 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = { 4914 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0}, 4915 {0, }, 4916 }; 4917 4918 static struct pci_driver mlxsw_sp3_pci_driver = { 4919 .name = mlxsw_sp3_driver_name, 4920 .id_table = mlxsw_sp3_pci_id_table, 4921 }; 4922 4923 static const struct pci_device_id mlxsw_sp4_pci_id_table[] = { 4924 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM4), 0}, 4925 {0, }, 4926 }; 4927 4928 static struct pci_driver mlxsw_sp4_pci_driver = { 4929 .name = mlxsw_sp4_driver_name, 4930 .id_table = mlxsw_sp4_pci_id_table, 4931 }; 4932 4933 static int __init mlxsw_sp_module_init(void) 4934 { 4935 int err; 4936 4937 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 4938 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 4939 4940 err = mlxsw_core_driver_register(&mlxsw_sp1_driver); 4941 if (err) 4942 goto err_sp1_core_driver_register; 4943 4944 err = mlxsw_core_driver_register(&mlxsw_sp2_driver); 4945 if (err) 4946 goto err_sp2_core_driver_register; 4947 4948 err = mlxsw_core_driver_register(&mlxsw_sp3_driver); 4949 if (err) 4950 goto err_sp3_core_driver_register; 4951 4952 err = mlxsw_core_driver_register(&mlxsw_sp4_driver); 4953 if (err) 4954 goto err_sp4_core_driver_register; 4955 4956 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); 4957 if (err) 4958 goto err_sp1_pci_driver_register; 4959 4960 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); 4961 if (err) 4962 goto err_sp2_pci_driver_register; 4963 4964 err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver); 4965 if (err) 4966 goto err_sp3_pci_driver_register; 4967 4968 err = mlxsw_pci_driver_register(&mlxsw_sp4_pci_driver); 4969 if (err) 4970 goto err_sp4_pci_driver_register; 4971 4972 return 0; 4973 4974 err_sp4_pci_driver_register: 4975 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver); 4976 err_sp3_pci_driver_register: 4977 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 4978 err_sp2_pci_driver_register: 4979 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 4980 err_sp1_pci_driver_register: 4981 mlxsw_core_driver_unregister(&mlxsw_sp4_driver); 4982 err_sp4_core_driver_register: 4983 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 4984 err_sp3_core_driver_register: 4985 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 4986 err_sp2_core_driver_register: 4987 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 4988 err_sp1_core_driver_register: 4989 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 4990 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 4991 return err; 4992 } 4993 4994 static void __exit mlxsw_sp_module_exit(void) 4995 { 4996 mlxsw_pci_driver_unregister(&mlxsw_sp4_pci_driver); 4997 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver); 4998 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 4999 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 5000 mlxsw_core_driver_unregister(&mlxsw_sp4_driver); 5001 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 5002 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 5003 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 5004 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 5005 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 5006 } 5007 5008 module_init(mlxsw_sp_module_init); 5009 module_exit(mlxsw_sp_module_exit); 5010 5011 MODULE_LICENSE("Dual BSD/GPL"); 5012 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 5013 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 5014 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); 5015 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); 5016 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table); 5017 MODULE_DEVICE_TABLE(pci, mlxsw_sp4_pci_id_table); 5018 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); 5019 MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME); 5020 MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME); 5021