1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/types.h> 7 #include <linux/pci.h> 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/ethtool.h> 11 #include <linux/slab.h> 12 #include <linux/device.h> 13 #include <linux/skbuff.h> 14 #include <linux/if_vlan.h> 15 #include <linux/if_bridge.h> 16 #include <linux/workqueue.h> 17 #include <linux/jiffies.h> 18 #include <linux/bitops.h> 19 #include <linux/list.h> 20 #include <linux/notifier.h> 21 #include <linux/dcbnl.h> 22 #include <linux/inetdevice.h> 23 #include <linux/netlink.h> 24 #include <linux/jhash.h> 25 #include <linux/log2.h> 26 #include <linux/refcount.h> 27 #include <linux/rhashtable.h> 28 #include <net/switchdev.h> 29 #include <net/pkt_cls.h> 30 #include <net/netevent.h> 31 #include <net/addrconf.h> 32 33 #include "spectrum.h" 34 #include "pci.h" 35 #include "core.h" 36 #include "core_env.h" 37 #include "reg.h" 38 #include "port.h" 39 #include "trap.h" 40 #include "txheader.h" 41 #include "spectrum_cnt.h" 42 #include "spectrum_dpipe.h" 43 #include "spectrum_acl_flex_actions.h" 44 #include "spectrum_span.h" 45 #include "spectrum_ptp.h" 46 #include "spectrum_trap.h" 47 48 #define MLXSW_SP1_FWREV_MAJOR 13 49 #define MLXSW_SP1_FWREV_MINOR 2008 50 #define MLXSW_SP1_FWREV_SUBMINOR 2406 51 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 52 53 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { 54 .major = MLXSW_SP1_FWREV_MAJOR, 55 .minor = MLXSW_SP1_FWREV_MINOR, 56 .subminor = MLXSW_SP1_FWREV_SUBMINOR, 57 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR, 58 }; 59 60 #define MLXSW_SP1_FW_FILENAME \ 61 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ 62 "." __stringify(MLXSW_SP1_FWREV_MINOR) \ 63 "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2" 64 65 #define MLXSW_SP2_FWREV_MAJOR 29 66 #define MLXSW_SP2_FWREV_MINOR 2008 67 #define MLXSW_SP2_FWREV_SUBMINOR 2406 68 69 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { 70 .major = MLXSW_SP2_FWREV_MAJOR, 71 .minor = MLXSW_SP2_FWREV_MINOR, 72 .subminor = MLXSW_SP2_FWREV_SUBMINOR, 73 }; 74 75 #define MLXSW_SP2_FW_FILENAME \ 76 "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \ 77 "." __stringify(MLXSW_SP2_FWREV_MINOR) \ 78 "." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2" 79 80 #define MLXSW_SP3_FWREV_MAJOR 30 81 #define MLXSW_SP3_FWREV_MINOR 2008 82 #define MLXSW_SP3_FWREV_SUBMINOR 2406 83 84 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = { 85 .major = MLXSW_SP3_FWREV_MAJOR, 86 .minor = MLXSW_SP3_FWREV_MINOR, 87 .subminor = MLXSW_SP3_FWREV_SUBMINOR, 88 }; 89 90 #define MLXSW_SP3_FW_FILENAME \ 91 "mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \ 92 "." __stringify(MLXSW_SP3_FWREV_MINOR) \ 93 "." __stringify(MLXSW_SP3_FWREV_SUBMINOR) ".mfa2" 94 95 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; 96 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; 97 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3"; 98 99 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = { 100 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00 101 }; 102 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = { 103 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00 104 }; 105 106 /* tx_hdr_version 107 * Tx header version. 108 * Must be set to 1. 109 */ 110 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 111 112 /* tx_hdr_ctl 113 * Packet control type. 114 * 0 - Ethernet control (e.g. EMADs, LACP) 115 * 1 - Ethernet data 116 */ 117 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 118 119 /* tx_hdr_proto 120 * Packet protocol type. Must be set to 1 (Ethernet). 121 */ 122 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 123 124 /* tx_hdr_rx_is_router 125 * Packet is sent from the router. Valid for data packets only. 126 */ 127 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 128 129 /* tx_hdr_fid_valid 130 * Indicates if the 'fid' field is valid and should be used for 131 * forwarding lookup. Valid for data packets only. 132 */ 133 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 134 135 /* tx_hdr_swid 136 * Switch partition ID. Must be set to 0. 137 */ 138 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 139 140 /* tx_hdr_control_tclass 141 * Indicates if the packet should use the control TClass and not one 142 * of the data TClasses. 143 */ 144 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 145 146 /* tx_hdr_etclass 147 * Egress TClass to be used on the egress device on the egress port. 148 */ 149 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 150 151 /* tx_hdr_port_mid 152 * Destination local port for unicast packets. 153 * Destination multicast ID for multicast packets. 154 * 155 * Control packets are directed to a specific egress port, while data 156 * packets are transmitted through the CPU port (0) into the switch partition, 157 * where forwarding rules are applied. 158 */ 159 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 160 161 /* tx_hdr_fid 162 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 163 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 164 * Valid for data packets only. 165 */ 166 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 167 168 /* tx_hdr_type 169 * 0 - Data packets 170 * 6 - Control packets 171 */ 172 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 173 174 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 175 unsigned int counter_index, u64 *packets, 176 u64 *bytes) 177 { 178 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 179 int err; 180 181 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 182 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 183 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 184 if (err) 185 return err; 186 if (packets) 187 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 188 if (bytes) 189 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 190 return 0; 191 } 192 193 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 194 unsigned int counter_index) 195 { 196 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 197 198 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 199 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 200 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 201 } 202 203 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 204 unsigned int *p_counter_index) 205 { 206 int err; 207 208 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 209 p_counter_index); 210 if (err) 211 return err; 212 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 213 if (err) 214 goto err_counter_clear; 215 return 0; 216 217 err_counter_clear: 218 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 219 *p_counter_index); 220 return err; 221 } 222 223 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 224 unsigned int counter_index) 225 { 226 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 227 counter_index); 228 } 229 230 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 231 const struct mlxsw_tx_info *tx_info) 232 { 233 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 234 235 memset(txhdr, 0, MLXSW_TXHDR_LEN); 236 237 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 238 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 239 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 240 mlxsw_tx_hdr_swid_set(txhdr, 0); 241 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 242 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 243 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 244 } 245 246 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) 247 { 248 switch (state) { 249 case BR_STATE_FORWARDING: 250 return MLXSW_REG_SPMS_STATE_FORWARDING; 251 case BR_STATE_LEARNING: 252 return MLXSW_REG_SPMS_STATE_LEARNING; 253 case BR_STATE_LISTENING: 254 case BR_STATE_DISABLED: 255 case BR_STATE_BLOCKING: 256 return MLXSW_REG_SPMS_STATE_DISCARDING; 257 default: 258 BUG(); 259 } 260 } 261 262 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 263 u8 state) 264 { 265 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state); 266 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 267 char *spms_pl; 268 int err; 269 270 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 271 if (!spms_pl) 272 return -ENOMEM; 273 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 274 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 275 276 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 277 kfree(spms_pl); 278 return err; 279 } 280 281 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 282 { 283 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 284 int err; 285 286 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 287 if (err) 288 return err; 289 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 290 return 0; 291 } 292 293 int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 294 bool is_up) 295 { 296 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 297 char paos_pl[MLXSW_REG_PAOS_LEN]; 298 299 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 300 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 301 MLXSW_PORT_ADMIN_STATUS_DOWN); 302 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 303 } 304 305 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 306 unsigned char *addr) 307 { 308 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 309 char ppad_pl[MLXSW_REG_PPAD_LEN]; 310 311 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 312 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 313 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 314 } 315 316 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 317 { 318 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 319 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 320 321 ether_addr_copy(addr, mlxsw_sp->base_mac); 322 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 323 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 324 } 325 326 static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu) 327 { 328 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 329 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 330 int err; 331 332 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 333 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 334 if (err) 335 return err; 336 337 *p_max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 338 return 0; 339 } 340 341 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 342 { 343 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 344 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 345 346 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 347 if (mtu > mlxsw_sp_port->max_mtu) 348 return -EINVAL; 349 350 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 351 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 352 } 353 354 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 355 { 356 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 357 char pspa_pl[MLXSW_REG_PSPA_LEN]; 358 359 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 360 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 361 } 362 363 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 364 { 365 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 366 char svpe_pl[MLXSW_REG_SVPE_LEN]; 367 368 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 369 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 370 } 371 372 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 373 bool learn_enable) 374 { 375 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 376 char *spvmlr_pl; 377 int err; 378 379 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 380 if (!spvmlr_pl) 381 return -ENOMEM; 382 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 383 learn_enable); 384 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 385 kfree(spvmlr_pl); 386 return err; 387 } 388 389 int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type) 390 { 391 switch (ethtype) { 392 case ETH_P_8021Q: 393 *p_sver_type = 0; 394 break; 395 case ETH_P_8021AD: 396 *p_sver_type = 1; 397 break; 398 default: 399 return -EINVAL; 400 } 401 402 return 0; 403 } 404 405 int mlxsw_sp_port_egress_ethtype_set(struct mlxsw_sp_port *mlxsw_sp_port, 406 u16 ethtype) 407 { 408 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 409 char spevet_pl[MLXSW_REG_SPEVET_LEN]; 410 u8 sver_type; 411 int err; 412 413 err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type); 414 if (err) 415 return err; 416 417 mlxsw_reg_spevet_pack(spevet_pl, mlxsw_sp_port->local_port, sver_type); 418 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spevet), spevet_pl); 419 } 420 421 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 422 u16 vid, u16 ethtype) 423 { 424 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 425 char spvid_pl[MLXSW_REG_SPVID_LEN]; 426 u8 sver_type; 427 int err; 428 429 err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type); 430 if (err) 431 return err; 432 433 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid, 434 sver_type); 435 436 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 437 } 438 439 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 440 bool allow) 441 { 442 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 443 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 444 445 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 446 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 447 } 448 449 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 450 u16 ethtype) 451 { 452 int err; 453 454 if (!vid) { 455 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 456 if (err) 457 return err; 458 } else { 459 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid, ethtype); 460 if (err) 461 return err; 462 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 463 if (err) 464 goto err_port_allow_untagged_set; 465 } 466 467 mlxsw_sp_port->pvid = vid; 468 return 0; 469 470 err_port_allow_untagged_set: 471 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid, ethtype); 472 return err; 473 } 474 475 static int 476 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 477 { 478 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 479 char sspr_pl[MLXSW_REG_SSPR_LEN]; 480 481 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 482 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 483 } 484 485 static int 486 mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port, 487 struct mlxsw_sp_port_mapping *port_mapping) 488 { 489 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 490 bool separate_rxtx; 491 u8 module; 492 u8 width; 493 int err; 494 int i; 495 496 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 497 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 498 if (err) 499 return err; 500 module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 501 width = mlxsw_reg_pmlp_width_get(pmlp_pl); 502 separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl); 503 504 if (width && !is_power_of_2(width)) { 505 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n", 506 local_port); 507 return -EINVAL; 508 } 509 510 for (i = 0; i < width; i++) { 511 if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) { 512 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n", 513 local_port); 514 return -EINVAL; 515 } 516 if (separate_rxtx && 517 mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != 518 mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) { 519 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n", 520 local_port); 521 return -EINVAL; 522 } 523 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i) { 524 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n", 525 local_port); 526 return -EINVAL; 527 } 528 } 529 530 port_mapping->module = module; 531 port_mapping->width = width; 532 port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 533 return 0; 534 } 535 536 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port) 537 { 538 struct mlxsw_sp_port_mapping *port_mapping = &mlxsw_sp_port->mapping; 539 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 540 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 541 int i; 542 543 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 544 mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width); 545 for (i = 0; i < port_mapping->width; i++) { 546 mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module); 547 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */ 548 } 549 550 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 551 } 552 553 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port) 554 { 555 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 556 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 557 558 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 559 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 560 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 561 } 562 563 static int mlxsw_sp_port_open(struct net_device *dev) 564 { 565 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 566 int err; 567 568 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 569 if (err) 570 return err; 571 netif_start_queue(dev); 572 return 0; 573 } 574 575 static int mlxsw_sp_port_stop(struct net_device *dev) 576 { 577 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 578 579 netif_stop_queue(dev); 580 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 581 } 582 583 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 584 struct net_device *dev) 585 { 586 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 587 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 588 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 589 const struct mlxsw_tx_info tx_info = { 590 .local_port = mlxsw_sp_port->local_port, 591 .is_emad = false, 592 }; 593 u64 len; 594 int err; 595 596 if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) { 597 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 598 dev_kfree_skb_any(skb); 599 return NETDEV_TX_OK; 600 } 601 602 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); 603 604 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 605 return NETDEV_TX_BUSY; 606 607 if (eth_skb_pad(skb)) { 608 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 609 return NETDEV_TX_OK; 610 } 611 612 mlxsw_sp_txhdr_construct(skb, &tx_info); 613 /* TX header is consumed by HW on the way so we shouldn't count its 614 * bytes as being sent. 615 */ 616 len = skb->len - MLXSW_TXHDR_LEN; 617 618 /* Due to a race we might fail here because of a full queue. In that 619 * unlikely case we simply drop the packet. 620 */ 621 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 622 623 if (!err) { 624 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 625 u64_stats_update_begin(&pcpu_stats->syncp); 626 pcpu_stats->tx_packets++; 627 pcpu_stats->tx_bytes += len; 628 u64_stats_update_end(&pcpu_stats->syncp); 629 } else { 630 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 631 dev_kfree_skb_any(skb); 632 } 633 return NETDEV_TX_OK; 634 } 635 636 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 637 { 638 } 639 640 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 641 { 642 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 643 struct sockaddr *addr = p; 644 int err; 645 646 if (!is_valid_ether_addr(addr->sa_data)) 647 return -EADDRNOTAVAIL; 648 649 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 650 if (err) 651 return err; 652 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 653 return 0; 654 } 655 656 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 657 { 658 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 659 struct mlxsw_sp_hdroom orig_hdroom; 660 struct mlxsw_sp_hdroom hdroom; 661 int err; 662 663 orig_hdroom = *mlxsw_sp_port->hdroom; 664 665 hdroom = orig_hdroom; 666 hdroom.mtu = mtu; 667 mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom); 668 669 err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom); 670 if (err) { 671 netdev_err(dev, "Failed to configure port's headroom\n"); 672 return err; 673 } 674 675 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 676 if (err) 677 goto err_port_mtu_set; 678 dev->mtu = mtu; 679 return 0; 680 681 err_port_mtu_set: 682 mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom); 683 return err; 684 } 685 686 static int 687 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 688 struct rtnl_link_stats64 *stats) 689 { 690 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 691 struct mlxsw_sp_port_pcpu_stats *p; 692 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 693 u32 tx_dropped = 0; 694 unsigned int start; 695 int i; 696 697 for_each_possible_cpu(i) { 698 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 699 do { 700 start = u64_stats_fetch_begin_irq(&p->syncp); 701 rx_packets = p->rx_packets; 702 rx_bytes = p->rx_bytes; 703 tx_packets = p->tx_packets; 704 tx_bytes = p->tx_bytes; 705 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 706 707 stats->rx_packets += rx_packets; 708 stats->rx_bytes += rx_bytes; 709 stats->tx_packets += tx_packets; 710 stats->tx_bytes += tx_bytes; 711 /* tx_dropped is u32, updated without syncp protection. */ 712 tx_dropped += p->tx_dropped; 713 } 714 stats->tx_dropped = tx_dropped; 715 return 0; 716 } 717 718 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 719 { 720 switch (attr_id) { 721 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 722 return true; 723 } 724 725 return false; 726 } 727 728 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 729 void *sp) 730 { 731 switch (attr_id) { 732 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 733 return mlxsw_sp_port_get_sw_stats64(dev, sp); 734 } 735 736 return -EINVAL; 737 } 738 739 int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 740 int prio, char *ppcnt_pl) 741 { 742 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 743 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 744 745 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 746 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 747 } 748 749 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 750 struct rtnl_link_stats64 *stats) 751 { 752 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 753 int err; 754 755 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 756 0, ppcnt_pl); 757 if (err) 758 goto out; 759 760 stats->tx_packets = 761 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 762 stats->rx_packets = 763 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 764 stats->tx_bytes = 765 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 766 stats->rx_bytes = 767 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 768 stats->multicast = 769 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 770 771 stats->rx_crc_errors = 772 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 773 stats->rx_frame_errors = 774 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 775 776 stats->rx_length_errors = ( 777 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 778 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 779 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 780 781 stats->rx_errors = (stats->rx_crc_errors + 782 stats->rx_frame_errors + stats->rx_length_errors); 783 784 out: 785 return err; 786 } 787 788 static void 789 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 790 struct mlxsw_sp_port_xstats *xstats) 791 { 792 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 793 int err, i; 794 795 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 796 ppcnt_pl); 797 if (!err) 798 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 799 800 for (i = 0; i < TC_MAX_QUEUE; i++) { 801 err = mlxsw_sp_port_get_stats_raw(dev, 802 MLXSW_REG_PPCNT_TC_CONG_TC, 803 i, ppcnt_pl); 804 if (!err) 805 xstats->wred_drop[i] = 806 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 807 808 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 809 i, ppcnt_pl); 810 if (err) 811 continue; 812 813 xstats->backlog[i] = 814 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 815 xstats->tail_drop[i] = 816 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 817 } 818 819 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 820 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, 821 i, ppcnt_pl); 822 if (err) 823 continue; 824 825 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); 826 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); 827 } 828 } 829 830 static void update_stats_cache(struct work_struct *work) 831 { 832 struct mlxsw_sp_port *mlxsw_sp_port = 833 container_of(work, struct mlxsw_sp_port, 834 periodic_hw_stats.update_dw.work); 835 836 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 837 /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as 838 * necessary when port goes down. 839 */ 840 goto out; 841 842 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 843 &mlxsw_sp_port->periodic_hw_stats.stats); 844 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 845 &mlxsw_sp_port->periodic_hw_stats.xstats); 846 847 out: 848 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 849 MLXSW_HW_STATS_UPDATE_TIME); 850 } 851 852 /* Return the stats from a cache that is updated periodically, 853 * as this function might get called in an atomic context. 854 */ 855 static void 856 mlxsw_sp_port_get_stats64(struct net_device *dev, 857 struct rtnl_link_stats64 *stats) 858 { 859 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 860 861 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 862 } 863 864 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 865 u16 vid_begin, u16 vid_end, 866 bool is_member, bool untagged) 867 { 868 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 869 char *spvm_pl; 870 int err; 871 872 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 873 if (!spvm_pl) 874 return -ENOMEM; 875 876 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 877 vid_end, is_member, untagged); 878 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 879 kfree(spvm_pl); 880 return err; 881 } 882 883 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 884 u16 vid_end, bool is_member, bool untagged) 885 { 886 u16 vid, vid_e; 887 int err; 888 889 for (vid = vid_begin; vid <= vid_end; 890 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 891 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 892 vid_end); 893 894 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 895 is_member, untagged); 896 if (err) 897 return err; 898 } 899 900 return 0; 901 } 902 903 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port, 904 bool flush_default) 905 { 906 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 907 908 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 909 &mlxsw_sp_port->vlans_list, list) { 910 if (!flush_default && 911 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID) 912 continue; 913 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 914 } 915 } 916 917 static void 918 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 919 { 920 if (mlxsw_sp_port_vlan->bridge_port) 921 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 922 else if (mlxsw_sp_port_vlan->fid) 923 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 924 } 925 926 struct mlxsw_sp_port_vlan * 927 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 928 { 929 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 930 bool untagged = vid == MLXSW_SP_DEFAULT_VID; 931 int err; 932 933 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 934 if (mlxsw_sp_port_vlan) 935 return ERR_PTR(-EEXIST); 936 937 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 938 if (err) 939 return ERR_PTR(err); 940 941 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 942 if (!mlxsw_sp_port_vlan) { 943 err = -ENOMEM; 944 goto err_port_vlan_alloc; 945 } 946 947 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 948 mlxsw_sp_port_vlan->vid = vid; 949 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 950 951 return mlxsw_sp_port_vlan; 952 953 err_port_vlan_alloc: 954 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 955 return ERR_PTR(err); 956 } 957 958 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 959 { 960 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 961 u16 vid = mlxsw_sp_port_vlan->vid; 962 963 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan); 964 list_del(&mlxsw_sp_port_vlan->list); 965 kfree(mlxsw_sp_port_vlan); 966 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 967 } 968 969 static int mlxsw_sp_port_add_vid(struct net_device *dev, 970 __be16 __always_unused proto, u16 vid) 971 { 972 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 973 974 /* VLAN 0 is added to HW filter when device goes up, but it is 975 * reserved in our case, so simply return. 976 */ 977 if (!vid) 978 return 0; 979 980 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid)); 981 } 982 983 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 984 __be16 __always_unused proto, u16 vid) 985 { 986 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 987 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 988 989 /* VLAN 0 is removed from HW filter when device goes down, but 990 * it is reserved in our case, so simply return. 991 */ 992 if (!vid) 993 return 0; 994 995 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 996 if (!mlxsw_sp_port_vlan) 997 return 0; 998 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 999 1000 return 0; 1001 } 1002 1003 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 1004 struct flow_block_offload *f) 1005 { 1006 switch (f->binder_type) { 1007 case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS: 1008 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true); 1009 case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS: 1010 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false); 1011 case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP: 1012 return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f); 1013 default: 1014 return -EOPNOTSUPP; 1015 } 1016 } 1017 1018 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1019 void *type_data) 1020 { 1021 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1022 1023 switch (type) { 1024 case TC_SETUP_BLOCK: 1025 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1026 case TC_SETUP_QDISC_RED: 1027 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1028 case TC_SETUP_QDISC_PRIO: 1029 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); 1030 case TC_SETUP_QDISC_ETS: 1031 return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data); 1032 case TC_SETUP_QDISC_TBF: 1033 return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data); 1034 case TC_SETUP_QDISC_FIFO: 1035 return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data); 1036 default: 1037 return -EOPNOTSUPP; 1038 } 1039 } 1040 1041 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) 1042 { 1043 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1044 1045 if (!enable) { 1046 if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) || 1047 mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) { 1048 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 1049 return -EINVAL; 1050 } 1051 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block); 1052 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block); 1053 } else { 1054 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block); 1055 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block); 1056 } 1057 return 0; 1058 } 1059 1060 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable) 1061 { 1062 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1063 char pplr_pl[MLXSW_REG_PPLR_LEN]; 1064 int err; 1065 1066 if (netif_running(dev)) 1067 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1068 1069 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable); 1070 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr), 1071 pplr_pl); 1072 1073 if (netif_running(dev)) 1074 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1075 1076 return err; 1077 } 1078 1079 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); 1080 1081 static int mlxsw_sp_handle_feature(struct net_device *dev, 1082 netdev_features_t wanted_features, 1083 netdev_features_t feature, 1084 mlxsw_sp_feature_handler feature_handler) 1085 { 1086 netdev_features_t changes = wanted_features ^ dev->features; 1087 bool enable = !!(wanted_features & feature); 1088 int err; 1089 1090 if (!(changes & feature)) 1091 return 0; 1092 1093 err = feature_handler(dev, enable); 1094 if (err) { 1095 netdev_err(dev, "%s feature %pNF failed, err %d\n", 1096 enable ? "Enable" : "Disable", &feature, err); 1097 return err; 1098 } 1099 1100 if (enable) 1101 dev->features |= feature; 1102 else 1103 dev->features &= ~feature; 1104 1105 return 0; 1106 } 1107 static int mlxsw_sp_set_features(struct net_device *dev, 1108 netdev_features_t features) 1109 { 1110 netdev_features_t oper_features = dev->features; 1111 int err = 0; 1112 1113 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, 1114 mlxsw_sp_feature_hw_tc); 1115 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK, 1116 mlxsw_sp_feature_loopback); 1117 1118 if (err) { 1119 dev->features = oper_features; 1120 return -EINVAL; 1121 } 1122 1123 return 0; 1124 } 1125 1126 static struct devlink_port * 1127 mlxsw_sp_port_get_devlink_port(struct net_device *dev) 1128 { 1129 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1130 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1131 1132 return mlxsw_core_port_devlink_port_get(mlxsw_sp->core, 1133 mlxsw_sp_port->local_port); 1134 } 1135 1136 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, 1137 struct ifreq *ifr) 1138 { 1139 struct hwtstamp_config config; 1140 int err; 1141 1142 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 1143 return -EFAULT; 1144 1145 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, 1146 &config); 1147 if (err) 1148 return err; 1149 1150 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1151 return -EFAULT; 1152 1153 return 0; 1154 } 1155 1156 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port, 1157 struct ifreq *ifr) 1158 { 1159 struct hwtstamp_config config; 1160 int err; 1161 1162 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port, 1163 &config); 1164 if (err) 1165 return err; 1166 1167 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1168 return -EFAULT; 1169 1170 return 0; 1171 } 1172 1173 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port) 1174 { 1175 struct hwtstamp_config config = {0}; 1176 1177 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config); 1178 } 1179 1180 static int 1181 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1182 { 1183 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1184 1185 switch (cmd) { 1186 case SIOCSHWTSTAMP: 1187 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr); 1188 case SIOCGHWTSTAMP: 1189 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr); 1190 default: 1191 return -EOPNOTSUPP; 1192 } 1193 } 1194 1195 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1196 .ndo_open = mlxsw_sp_port_open, 1197 .ndo_stop = mlxsw_sp_port_stop, 1198 .ndo_start_xmit = mlxsw_sp_port_xmit, 1199 .ndo_setup_tc = mlxsw_sp_setup_tc, 1200 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1201 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1202 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1203 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1204 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1205 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1206 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1207 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1208 .ndo_set_features = mlxsw_sp_set_features, 1209 .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port, 1210 .ndo_do_ioctl = mlxsw_sp_port_ioctl, 1211 }; 1212 1213 static int 1214 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port) 1215 { 1216 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1217 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; 1218 const struct mlxsw_sp_port_type_speed_ops *ops; 1219 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1220 u32 eth_proto_cap_masked; 1221 int err; 1222 1223 ops = mlxsw_sp->port_type_speed_ops; 1224 1225 /* Set advertised speeds to speeds supported by both the driver 1226 * and the device. 1227 */ 1228 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 1229 0, false); 1230 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1231 if (err) 1232 return err; 1233 1234 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, 1235 ð_proto_admin, ð_proto_oper); 1236 eth_proto_cap_masked = ops->ptys_proto_cap_masked_get(eth_proto_cap); 1237 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 1238 eth_proto_cap_masked, 1239 mlxsw_sp_port->link.autoneg); 1240 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1241 } 1242 1243 int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed) 1244 { 1245 const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops; 1246 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1247 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1248 u32 eth_proto_oper; 1249 int err; 1250 1251 port_type_speed_ops = mlxsw_sp->port_type_speed_ops; 1252 port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, 1253 mlxsw_sp_port->local_port, 0, 1254 false); 1255 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1256 if (err) 1257 return err; 1258 port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL, 1259 ð_proto_oper); 1260 *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper); 1261 return 0; 1262 } 1263 1264 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 1265 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 1266 bool dwrr, u8 dwrr_weight) 1267 { 1268 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1269 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1270 1271 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1272 next_index); 1273 mlxsw_reg_qeec_de_set(qeec_pl, true); 1274 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 1275 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 1276 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1277 } 1278 1279 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 1280 enum mlxsw_reg_qeec_hr hr, u8 index, 1281 u8 next_index, u32 maxrate, u8 burst_size) 1282 { 1283 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1284 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1285 1286 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1287 next_index); 1288 mlxsw_reg_qeec_mase_set(qeec_pl, true); 1289 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 1290 mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size); 1291 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1292 } 1293 1294 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port, 1295 enum mlxsw_reg_qeec_hr hr, u8 index, 1296 u8 next_index, u32 minrate) 1297 { 1298 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1299 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1300 1301 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1302 next_index); 1303 mlxsw_reg_qeec_mise_set(qeec_pl, true); 1304 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate); 1305 1306 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1307 } 1308 1309 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 1310 u8 switch_prio, u8 tclass) 1311 { 1312 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1313 char qtct_pl[MLXSW_REG_QTCT_LEN]; 1314 1315 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 1316 tclass); 1317 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 1318 } 1319 1320 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 1321 { 1322 int err, i; 1323 1324 /* Setup the elements hierarcy, so that each TC is linked to 1325 * one subgroup, which are all member in the same group. 1326 */ 1327 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1328 MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0); 1329 if (err) 1330 return err; 1331 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1332 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1333 MLXSW_REG_QEEC_HR_SUBGROUP, i, 1334 0, false, 0); 1335 if (err) 1336 return err; 1337 } 1338 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1339 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1340 MLXSW_REG_QEEC_HR_TC, i, i, 1341 false, 0); 1342 if (err) 1343 return err; 1344 1345 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1346 MLXSW_REG_QEEC_HR_TC, 1347 i + 8, i, 1348 true, 100); 1349 if (err) 1350 return err; 1351 } 1352 1353 /* Make sure the max shaper is disabled in all hierarchies that support 1354 * it. Note that this disables ptps (PTP shaper), but that is intended 1355 * for the initial configuration. 1356 */ 1357 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1358 MLXSW_REG_QEEC_HR_PORT, 0, 0, 1359 MLXSW_REG_QEEC_MAS_DIS, 0); 1360 if (err) 1361 return err; 1362 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1363 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1364 MLXSW_REG_QEEC_HR_SUBGROUP, 1365 i, 0, 1366 MLXSW_REG_QEEC_MAS_DIS, 0); 1367 if (err) 1368 return err; 1369 } 1370 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1371 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1372 MLXSW_REG_QEEC_HR_TC, 1373 i, i, 1374 MLXSW_REG_QEEC_MAS_DIS, 0); 1375 if (err) 1376 return err; 1377 1378 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1379 MLXSW_REG_QEEC_HR_TC, 1380 i + 8, i, 1381 MLXSW_REG_QEEC_MAS_DIS, 0); 1382 if (err) 1383 return err; 1384 } 1385 1386 /* Configure the min shaper for multicast TCs. */ 1387 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1388 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port, 1389 MLXSW_REG_QEEC_HR_TC, 1390 i + 8, i, 1391 MLXSW_REG_QEEC_MIS_MIN); 1392 if (err) 1393 return err; 1394 } 1395 1396 /* Map all priorities to traffic class 0. */ 1397 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1398 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 1399 if (err) 1400 return err; 1401 } 1402 1403 return 0; 1404 } 1405 1406 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 1407 bool enable) 1408 { 1409 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1410 char qtctm_pl[MLXSW_REG_QTCTM_LEN]; 1411 1412 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable); 1413 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl); 1414 } 1415 1416 static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port) 1417 { 1418 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1419 u8 module = mlxsw_sp_port->mapping.module; 1420 u64 overheat_counter; 1421 int err; 1422 1423 err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, module, 1424 &overheat_counter); 1425 if (err) 1426 return err; 1427 1428 mlxsw_sp_port->module_overheat_initial_val = overheat_counter; 1429 return 0; 1430 } 1431 1432 int 1433 mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port, 1434 bool is_8021ad_tagged, 1435 bool is_8021q_tagged) 1436 { 1437 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1438 char spvc_pl[MLXSW_REG_SPVC_LEN]; 1439 1440 mlxsw_reg_spvc_pack(spvc_pl, mlxsw_sp_port->local_port, 1441 is_8021ad_tagged, is_8021q_tagged); 1442 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvc), spvc_pl); 1443 } 1444 1445 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 1446 u8 split_base_local_port, 1447 struct mlxsw_sp_port_mapping *port_mapping) 1448 { 1449 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1450 bool split = !!split_base_local_port; 1451 struct mlxsw_sp_port *mlxsw_sp_port; 1452 u32 lanes = port_mapping->width; 1453 struct net_device *dev; 1454 bool splittable; 1455 int err; 1456 1457 splittable = lanes > 1 && !split; 1458 err = mlxsw_core_port_init(mlxsw_sp->core, local_port, 1459 port_mapping->module + 1, split, 1460 port_mapping->lane / lanes, 1461 splittable, lanes, 1462 mlxsw_sp->base_mac, 1463 sizeof(mlxsw_sp->base_mac)); 1464 if (err) { 1465 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 1466 local_port); 1467 return err; 1468 } 1469 1470 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 1471 if (!dev) { 1472 err = -ENOMEM; 1473 goto err_alloc_etherdev; 1474 } 1475 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 1476 dev_net_set(dev, mlxsw_sp_net(mlxsw_sp)); 1477 mlxsw_sp_port = netdev_priv(dev); 1478 mlxsw_sp_port->dev = dev; 1479 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 1480 mlxsw_sp_port->local_port = local_port; 1481 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID; 1482 mlxsw_sp_port->split = split; 1483 mlxsw_sp_port->split_base_local_port = split_base_local_port; 1484 mlxsw_sp_port->mapping = *port_mapping; 1485 mlxsw_sp_port->link.autoneg = 1; 1486 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 1487 1488 mlxsw_sp_port->pcpu_stats = 1489 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 1490 if (!mlxsw_sp_port->pcpu_stats) { 1491 err = -ENOMEM; 1492 goto err_alloc_stats; 1493 } 1494 1495 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1496 &update_stats_cache); 1497 1498 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 1499 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 1500 1501 err = mlxsw_sp_port_module_map(mlxsw_sp_port); 1502 if (err) { 1503 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 1504 mlxsw_sp_port->local_port); 1505 goto err_port_module_map; 1506 } 1507 1508 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 1509 if (err) { 1510 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 1511 mlxsw_sp_port->local_port); 1512 goto err_port_swid_set; 1513 } 1514 1515 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 1516 if (err) { 1517 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 1518 mlxsw_sp_port->local_port); 1519 goto err_dev_addr_init; 1520 } 1521 1522 netif_carrier_off(dev); 1523 1524 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 1525 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 1526 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK; 1527 1528 dev->min_mtu = 0; 1529 dev->max_mtu = ETH_MAX_MTU; 1530 1531 /* Each packet needs to have a Tx header (metadata) on top all other 1532 * headers. 1533 */ 1534 dev->needed_headroom = MLXSW_TXHDR_LEN; 1535 1536 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 1537 if (err) { 1538 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 1539 mlxsw_sp_port->local_port); 1540 goto err_port_system_port_mapping_set; 1541 } 1542 1543 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port); 1544 if (err) { 1545 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 1546 mlxsw_sp_port->local_port); 1547 goto err_port_speed_by_width_set; 1548 } 1549 1550 err = mlxsw_sp->port_type_speed_ops->ptys_max_speed(mlxsw_sp_port, 1551 &mlxsw_sp_port->max_speed); 1552 if (err) { 1553 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum speed\n", 1554 mlxsw_sp_port->local_port); 1555 goto err_max_speed_get; 1556 } 1557 1558 err = mlxsw_sp_port_max_mtu_get(mlxsw_sp_port, &mlxsw_sp_port->max_mtu); 1559 if (err) { 1560 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum MTU\n", 1561 mlxsw_sp_port->local_port); 1562 goto err_port_max_mtu_get; 1563 } 1564 1565 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 1566 if (err) { 1567 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 1568 mlxsw_sp_port->local_port); 1569 goto err_port_mtu_set; 1570 } 1571 1572 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1573 if (err) 1574 goto err_port_admin_status_set; 1575 1576 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 1577 if (err) { 1578 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 1579 mlxsw_sp_port->local_port); 1580 goto err_port_buffers_init; 1581 } 1582 1583 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 1584 if (err) { 1585 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 1586 mlxsw_sp_port->local_port); 1587 goto err_port_ets_init; 1588 } 1589 1590 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true); 1591 if (err) { 1592 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n", 1593 mlxsw_sp_port->local_port); 1594 goto err_port_tc_mc_mode; 1595 } 1596 1597 /* ETS and buffers must be initialized before DCB. */ 1598 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 1599 if (err) { 1600 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 1601 mlxsw_sp_port->local_port); 1602 goto err_port_dcb_init; 1603 } 1604 1605 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 1606 if (err) { 1607 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 1608 mlxsw_sp_port->local_port); 1609 goto err_port_fids_init; 1610 } 1611 1612 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); 1613 if (err) { 1614 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", 1615 mlxsw_sp_port->local_port); 1616 goto err_port_qdiscs_init; 1617 } 1618 1619 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false, 1620 false); 1621 if (err) { 1622 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n", 1623 mlxsw_sp_port->local_port); 1624 goto err_port_vlan_clear; 1625 } 1626 1627 err = mlxsw_sp_port_nve_init(mlxsw_sp_port); 1628 if (err) { 1629 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n", 1630 mlxsw_sp_port->local_port); 1631 goto err_port_nve_init; 1632 } 1633 1634 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID, 1635 ETH_P_8021Q); 1636 if (err) { 1637 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n", 1638 mlxsw_sp_port->local_port); 1639 goto err_port_pvid_set; 1640 } 1641 1642 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, 1643 MLXSW_SP_DEFAULT_VID); 1644 if (IS_ERR(mlxsw_sp_port_vlan)) { 1645 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 1646 mlxsw_sp_port->local_port); 1647 err = PTR_ERR(mlxsw_sp_port_vlan); 1648 goto err_port_vlan_create; 1649 } 1650 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan; 1651 1652 /* Set SPVC.et0=true and SPVC.et1=false to make the local port to treat 1653 * only packets with 802.1q header as tagged packets. 1654 */ 1655 err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true); 1656 if (err) { 1657 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set default VLAN classification\n", 1658 local_port); 1659 goto err_port_vlan_classification_set; 1660 } 1661 1662 INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw, 1663 mlxsw_sp->ptp_ops->shaper_work); 1664 1665 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 1666 1667 err = mlxsw_sp_port_overheat_init_val_set(mlxsw_sp_port); 1668 if (err) { 1669 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set overheat initial value\n", 1670 mlxsw_sp_port->local_port); 1671 goto err_port_overheat_init_val_set; 1672 } 1673 1674 err = register_netdev(dev); 1675 if (err) { 1676 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 1677 mlxsw_sp_port->local_port); 1678 goto err_register_netdev; 1679 } 1680 1681 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 1682 mlxsw_sp_port, dev); 1683 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 1684 return 0; 1685 1686 err_register_netdev: 1687 err_port_overheat_init_val_set: 1688 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true); 1689 err_port_vlan_classification_set: 1690 mlxsw_sp->ports[local_port] = NULL; 1691 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1692 err_port_vlan_create: 1693 err_port_pvid_set: 1694 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 1695 err_port_nve_init: 1696 err_port_vlan_clear: 1697 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 1698 err_port_qdiscs_init: 1699 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 1700 err_port_fids_init: 1701 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 1702 err_port_dcb_init: 1703 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 1704 err_port_tc_mc_mode: 1705 err_port_ets_init: 1706 mlxsw_sp_port_buffers_fini(mlxsw_sp_port); 1707 err_port_buffers_init: 1708 err_port_admin_status_set: 1709 err_port_mtu_set: 1710 err_port_max_mtu_get: 1711 err_max_speed_get: 1712 err_port_speed_by_width_set: 1713 err_port_system_port_mapping_set: 1714 err_dev_addr_init: 1715 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 1716 err_port_swid_set: 1717 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 1718 err_port_module_map: 1719 free_percpu(mlxsw_sp_port->pcpu_stats); 1720 err_alloc_stats: 1721 free_netdev(dev); 1722 err_alloc_etherdev: 1723 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 1724 return err; 1725 } 1726 1727 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 1728 { 1729 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1730 1731 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 1732 cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw); 1733 mlxsw_sp_port_ptp_clear(mlxsw_sp_port); 1734 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 1735 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 1736 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true); 1737 mlxsw_sp->ports[local_port] = NULL; 1738 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true); 1739 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 1740 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 1741 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 1742 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 1743 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 1744 mlxsw_sp_port_buffers_fini(mlxsw_sp_port); 1745 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 1746 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 1747 free_percpu(mlxsw_sp_port->pcpu_stats); 1748 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 1749 free_netdev(mlxsw_sp_port->dev); 1750 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 1751 } 1752 1753 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp) 1754 { 1755 struct mlxsw_sp_port *mlxsw_sp_port; 1756 int err; 1757 1758 mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL); 1759 if (!mlxsw_sp_port) 1760 return -ENOMEM; 1761 1762 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 1763 mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT; 1764 1765 err = mlxsw_core_cpu_port_init(mlxsw_sp->core, 1766 mlxsw_sp_port, 1767 mlxsw_sp->base_mac, 1768 sizeof(mlxsw_sp->base_mac)); 1769 if (err) { 1770 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n"); 1771 goto err_core_cpu_port_init; 1772 } 1773 1774 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port; 1775 return 0; 1776 1777 err_core_cpu_port_init: 1778 kfree(mlxsw_sp_port); 1779 return err; 1780 } 1781 1782 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp) 1783 { 1784 struct mlxsw_sp_port *mlxsw_sp_port = 1785 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT]; 1786 1787 mlxsw_core_cpu_port_fini(mlxsw_sp->core); 1788 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL; 1789 kfree(mlxsw_sp_port); 1790 } 1791 1792 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) 1793 { 1794 return mlxsw_sp->ports[local_port] != NULL; 1795 } 1796 1797 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 1798 { 1799 int i; 1800 1801 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 1802 if (mlxsw_sp_port_created(mlxsw_sp, i)) 1803 mlxsw_sp_port_remove(mlxsw_sp, i); 1804 mlxsw_sp_cpu_port_remove(mlxsw_sp); 1805 kfree(mlxsw_sp->ports); 1806 mlxsw_sp->ports = NULL; 1807 } 1808 1809 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 1810 { 1811 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 1812 struct mlxsw_sp_port_mapping *port_mapping; 1813 size_t alloc_size; 1814 int i; 1815 int err; 1816 1817 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 1818 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 1819 if (!mlxsw_sp->ports) 1820 return -ENOMEM; 1821 1822 err = mlxsw_sp_cpu_port_create(mlxsw_sp); 1823 if (err) 1824 goto err_cpu_port_create; 1825 1826 for (i = 1; i < max_ports; i++) { 1827 port_mapping = mlxsw_sp->port_mapping[i]; 1828 if (!port_mapping) 1829 continue; 1830 err = mlxsw_sp_port_create(mlxsw_sp, i, 0, port_mapping); 1831 if (err) 1832 goto err_port_create; 1833 } 1834 return 0; 1835 1836 err_port_create: 1837 for (i--; i >= 1; i--) 1838 if (mlxsw_sp_port_created(mlxsw_sp, i)) 1839 mlxsw_sp_port_remove(mlxsw_sp, i); 1840 mlxsw_sp_cpu_port_remove(mlxsw_sp); 1841 err_cpu_port_create: 1842 kfree(mlxsw_sp->ports); 1843 mlxsw_sp->ports = NULL; 1844 return err; 1845 } 1846 1847 static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp) 1848 { 1849 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 1850 struct mlxsw_sp_port_mapping port_mapping; 1851 int i; 1852 int err; 1853 1854 mlxsw_sp->port_mapping = kcalloc(max_ports, 1855 sizeof(struct mlxsw_sp_port_mapping *), 1856 GFP_KERNEL); 1857 if (!mlxsw_sp->port_mapping) 1858 return -ENOMEM; 1859 1860 for (i = 1; i < max_ports; i++) { 1861 if (mlxsw_core_port_is_xm(mlxsw_sp->core, i)) 1862 continue; 1863 1864 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &port_mapping); 1865 if (err) 1866 goto err_port_module_info_get; 1867 if (!port_mapping.width) 1868 continue; 1869 1870 mlxsw_sp->port_mapping[i] = kmemdup(&port_mapping, 1871 sizeof(port_mapping), 1872 GFP_KERNEL); 1873 if (!mlxsw_sp->port_mapping[i]) { 1874 err = -ENOMEM; 1875 goto err_port_module_info_dup; 1876 } 1877 } 1878 return 0; 1879 1880 err_port_module_info_get: 1881 err_port_module_info_dup: 1882 for (i--; i >= 1; i--) 1883 kfree(mlxsw_sp->port_mapping[i]); 1884 kfree(mlxsw_sp->port_mapping); 1885 return err; 1886 } 1887 1888 static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp) 1889 { 1890 int i; 1891 1892 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 1893 kfree(mlxsw_sp->port_mapping[i]); 1894 kfree(mlxsw_sp->port_mapping); 1895 } 1896 1897 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port, unsigned int max_width) 1898 { 1899 u8 offset = (local_port - 1) % max_width; 1900 1901 return local_port - offset; 1902 } 1903 1904 static int 1905 mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 1906 struct mlxsw_sp_port_mapping *port_mapping, 1907 unsigned int count, u8 offset) 1908 { 1909 struct mlxsw_sp_port_mapping split_port_mapping; 1910 int err, i; 1911 1912 split_port_mapping = *port_mapping; 1913 split_port_mapping.width /= count; 1914 for (i = 0; i < count; i++) { 1915 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset, 1916 base_port, &split_port_mapping); 1917 if (err) 1918 goto err_port_create; 1919 split_port_mapping.lane += split_port_mapping.width; 1920 } 1921 1922 return 0; 1923 1924 err_port_create: 1925 for (i--; i >= 0; i--) 1926 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 1927 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 1928 return err; 1929 } 1930 1931 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 1932 u8 base_port, 1933 unsigned int count, u8 offset) 1934 { 1935 struct mlxsw_sp_port_mapping *port_mapping; 1936 int i; 1937 1938 /* Go over original unsplit ports in the gap and recreate them. */ 1939 for (i = 0; i < count * offset; i++) { 1940 port_mapping = mlxsw_sp->port_mapping[base_port + i]; 1941 if (!port_mapping) 1942 continue; 1943 mlxsw_sp_port_create(mlxsw_sp, base_port + i, 0, port_mapping); 1944 } 1945 } 1946 1947 static int mlxsw_sp_local_ports_offset(struct mlxsw_core *mlxsw_core, 1948 unsigned int count, 1949 unsigned int max_width) 1950 { 1951 enum mlxsw_res_id local_ports_in_x_res_id; 1952 int split_width = max_width / count; 1953 1954 if (split_width == 1) 1955 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_1X; 1956 else if (split_width == 2) 1957 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_2X; 1958 else if (split_width == 4) 1959 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_4X; 1960 else 1961 return -EINVAL; 1962 1963 if (!mlxsw_core_res_valid(mlxsw_core, local_ports_in_x_res_id)) 1964 return -EINVAL; 1965 return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id); 1966 } 1967 1968 static struct mlxsw_sp_port * 1969 mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port) 1970 { 1971 if (mlxsw_sp->ports && mlxsw_sp->ports[local_port]) 1972 return mlxsw_sp->ports[local_port]; 1973 return NULL; 1974 } 1975 1976 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 1977 unsigned int count, 1978 struct netlink_ext_ack *extack) 1979 { 1980 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 1981 struct mlxsw_sp_port_mapping port_mapping; 1982 struct mlxsw_sp_port *mlxsw_sp_port; 1983 int max_width; 1984 u8 base_port; 1985 int offset; 1986 int i; 1987 int err; 1988 1989 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); 1990 if (!mlxsw_sp_port) { 1991 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 1992 local_port); 1993 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 1994 return -EINVAL; 1995 } 1996 1997 max_width = mlxsw_core_module_max_width(mlxsw_core, 1998 mlxsw_sp_port->mapping.module); 1999 if (max_width < 0) { 2000 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n"); 2001 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module"); 2002 return max_width; 2003 } 2004 2005 /* Split port with non-max cannot be split. */ 2006 if (mlxsw_sp_port->mapping.width != max_width) { 2007 netdev_err(mlxsw_sp_port->dev, "Port cannot be split\n"); 2008 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split"); 2009 return -EINVAL; 2010 } 2011 2012 offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width); 2013 if (offset < 0) { 2014 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n"); 2015 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset"); 2016 return -EINVAL; 2017 } 2018 2019 /* Only in case max split is being done, the local port and 2020 * base port may differ. 2021 */ 2022 base_port = count == max_width ? 2023 mlxsw_sp_cluster_base_port_get(local_port, max_width) : 2024 local_port; 2025 2026 for (i = 0; i < count * offset; i++) { 2027 /* Expect base port to exist and also the one in the middle in 2028 * case of maximal split count. 2029 */ 2030 if (i == 0 || (count == max_width && i == count / 2)) 2031 continue; 2032 2033 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) { 2034 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 2035 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 2036 return -EINVAL; 2037 } 2038 } 2039 2040 port_mapping = mlxsw_sp_port->mapping; 2041 2042 for (i = 0; i < count; i++) 2043 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 2044 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 2045 2046 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, &port_mapping, 2047 count, offset); 2048 if (err) { 2049 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 2050 goto err_port_split_create; 2051 } 2052 2053 return 0; 2054 2055 err_port_split_create: 2056 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset); 2057 return err; 2058 } 2059 2060 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, 2061 struct netlink_ext_ack *extack) 2062 { 2063 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2064 struct mlxsw_sp_port *mlxsw_sp_port; 2065 unsigned int count; 2066 int max_width; 2067 u8 base_port; 2068 int offset; 2069 int i; 2070 2071 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); 2072 if (!mlxsw_sp_port) { 2073 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 2074 local_port); 2075 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 2076 return -EINVAL; 2077 } 2078 2079 if (!mlxsw_sp_port->split) { 2080 netdev_err(mlxsw_sp_port->dev, "Port was not split\n"); 2081 NL_SET_ERR_MSG_MOD(extack, "Port was not split"); 2082 return -EINVAL; 2083 } 2084 2085 max_width = mlxsw_core_module_max_width(mlxsw_core, 2086 mlxsw_sp_port->mapping.module); 2087 if (max_width < 0) { 2088 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n"); 2089 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module"); 2090 return max_width; 2091 } 2092 2093 count = max_width / mlxsw_sp_port->mapping.width; 2094 2095 offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width); 2096 if (WARN_ON(offset < 0)) { 2097 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n"); 2098 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset"); 2099 return -EINVAL; 2100 } 2101 2102 base_port = mlxsw_sp_port->split_base_local_port; 2103 2104 for (i = 0; i < count; i++) 2105 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 2106 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 2107 2108 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset); 2109 2110 return 0; 2111 } 2112 2113 static void 2114 mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port) 2115 { 2116 int i; 2117 2118 for (i = 0; i < TC_MAX_QUEUE; i++) 2119 mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0; 2120 } 2121 2122 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 2123 char *pude_pl, void *priv) 2124 { 2125 struct mlxsw_sp *mlxsw_sp = priv; 2126 struct mlxsw_sp_port *mlxsw_sp_port; 2127 enum mlxsw_reg_pude_oper_status status; 2128 u8 local_port; 2129 2130 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 2131 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2132 if (!mlxsw_sp_port) 2133 return; 2134 2135 status = mlxsw_reg_pude_oper_status_get(pude_pl); 2136 if (status == MLXSW_PORT_OPER_STATUS_UP) { 2137 netdev_info(mlxsw_sp_port->dev, "link up\n"); 2138 netif_carrier_on(mlxsw_sp_port->dev); 2139 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0); 2140 } else { 2141 netdev_info(mlxsw_sp_port->dev, "link down\n"); 2142 netif_carrier_off(mlxsw_sp_port->dev); 2143 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port); 2144 } 2145 } 2146 2147 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp, 2148 char *mtpptr_pl, bool ingress) 2149 { 2150 u8 local_port; 2151 u8 num_rec; 2152 int i; 2153 2154 local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl); 2155 num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl); 2156 for (i = 0; i < num_rec; i++) { 2157 u8 domain_number; 2158 u8 message_type; 2159 u16 sequence_id; 2160 u64 timestamp; 2161 2162 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type, 2163 &domain_number, &sequence_id, 2164 ×tamp); 2165 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port, 2166 message_type, domain_number, 2167 sequence_id, timestamp); 2168 } 2169 } 2170 2171 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg, 2172 char *mtpptr_pl, void *priv) 2173 { 2174 struct mlxsw_sp *mlxsw_sp = priv; 2175 2176 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true); 2177 } 2178 2179 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg, 2180 char *mtpptr_pl, void *priv) 2181 { 2182 struct mlxsw_sp *mlxsw_sp = priv; 2183 2184 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false); 2185 } 2186 2187 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 2188 u8 local_port, void *priv) 2189 { 2190 struct mlxsw_sp *mlxsw_sp = priv; 2191 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2192 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 2193 2194 if (unlikely(!mlxsw_sp_port)) { 2195 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 2196 local_port); 2197 return; 2198 } 2199 2200 skb->dev = mlxsw_sp_port->dev; 2201 2202 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 2203 u64_stats_update_begin(&pcpu_stats->syncp); 2204 pcpu_stats->rx_packets++; 2205 pcpu_stats->rx_bytes += skb->len; 2206 u64_stats_update_end(&pcpu_stats->syncp); 2207 2208 skb->protocol = eth_type_trans(skb, skb->dev); 2209 netif_receive_skb(skb); 2210 } 2211 2212 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, 2213 void *priv) 2214 { 2215 skb->offload_fwd_mark = 1; 2216 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 2217 } 2218 2219 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, 2220 u8 local_port, void *priv) 2221 { 2222 skb->offload_l3_fwd_mark = 1; 2223 skb->offload_fwd_mark = 1; 2224 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 2225 } 2226 2227 void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 2228 u8 local_port) 2229 { 2230 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port); 2231 } 2232 2233 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2234 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 2235 _is_ctrl, SP_##_trap_group, DISCARD) 2236 2237 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2238 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 2239 _is_ctrl, SP_##_trap_group, DISCARD) 2240 2241 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2242 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \ 2243 _is_ctrl, SP_##_trap_group, DISCARD) 2244 2245 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 2246 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 2247 2248 static const struct mlxsw_listener mlxsw_sp_listener[] = { 2249 /* Events */ 2250 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 2251 /* L2 traps */ 2252 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false), 2253 /* L3 traps */ 2254 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 2255 false), 2256 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 2257 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 2258 false), 2259 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD, 2260 ROUTER_EXP, false), 2261 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD, 2262 ROUTER_EXP, false), 2263 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD, 2264 ROUTER_EXP, false), 2265 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD, 2266 ROUTER_EXP, false), 2267 /* Multicast Router Traps */ 2268 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 2269 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 2270 /* NVE traps */ 2271 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false), 2272 }; 2273 2274 static const struct mlxsw_listener mlxsw_sp1_listener[] = { 2275 /* Events */ 2276 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0), 2277 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0), 2278 }; 2279 2280 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 2281 { 2282 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2283 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 2284 enum mlxsw_reg_qpcr_ir_units ir_units; 2285 int max_cpu_policers; 2286 bool is_bytes; 2287 u8 burst_size; 2288 u32 rate; 2289 int i, err; 2290 2291 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 2292 return -EIO; 2293 2294 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 2295 2296 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 2297 for (i = 0; i < max_cpu_policers; i++) { 2298 is_bytes = false; 2299 switch (i) { 2300 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 2301 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 2302 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS: 2303 rate = 1024; 2304 burst_size = 7; 2305 break; 2306 default: 2307 continue; 2308 } 2309 2310 __set_bit(i, mlxsw_sp->trap->policers_usage); 2311 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 2312 burst_size); 2313 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 2314 if (err) 2315 return err; 2316 } 2317 2318 return 0; 2319 } 2320 2321 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 2322 { 2323 char htgt_pl[MLXSW_REG_HTGT_LEN]; 2324 enum mlxsw_reg_htgt_trap_group i; 2325 int max_cpu_policers; 2326 int max_trap_groups; 2327 u8 priority, tc; 2328 u16 policer_id; 2329 int err; 2330 2331 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 2332 return -EIO; 2333 2334 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 2335 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 2336 2337 for (i = 0; i < max_trap_groups; i++) { 2338 policer_id = i; 2339 switch (i) { 2340 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 2341 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 2342 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS: 2343 priority = 1; 2344 tc = 1; 2345 break; 2346 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 2347 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 2348 tc = MLXSW_REG_HTGT_DEFAULT_TC; 2349 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 2350 break; 2351 default: 2352 continue; 2353 } 2354 2355 if (max_cpu_policers <= policer_id && 2356 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 2357 return -EIO; 2358 2359 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 2360 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 2361 if (err) 2362 return err; 2363 } 2364 2365 return 0; 2366 } 2367 2368 static int mlxsw_sp_traps_register(struct mlxsw_sp *mlxsw_sp, 2369 const struct mlxsw_listener listeners[], 2370 size_t listeners_count) 2371 { 2372 int i; 2373 int err; 2374 2375 for (i = 0; i < listeners_count; i++) { 2376 err = mlxsw_core_trap_register(mlxsw_sp->core, 2377 &listeners[i], 2378 mlxsw_sp); 2379 if (err) 2380 goto err_listener_register; 2381 2382 } 2383 return 0; 2384 2385 err_listener_register: 2386 for (i--; i >= 0; i--) { 2387 mlxsw_core_trap_unregister(mlxsw_sp->core, 2388 &listeners[i], 2389 mlxsw_sp); 2390 } 2391 return err; 2392 } 2393 2394 static void mlxsw_sp_traps_unregister(struct mlxsw_sp *mlxsw_sp, 2395 const struct mlxsw_listener listeners[], 2396 size_t listeners_count) 2397 { 2398 int i; 2399 2400 for (i = 0; i < listeners_count; i++) { 2401 mlxsw_core_trap_unregister(mlxsw_sp->core, 2402 &listeners[i], 2403 mlxsw_sp); 2404 } 2405 } 2406 2407 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 2408 { 2409 struct mlxsw_sp_trap *trap; 2410 u64 max_policers; 2411 int err; 2412 2413 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS)) 2414 return -EIO; 2415 max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS); 2416 trap = kzalloc(struct_size(trap, policers_usage, 2417 BITS_TO_LONGS(max_policers)), GFP_KERNEL); 2418 if (!trap) 2419 return -ENOMEM; 2420 trap->max_policers = max_policers; 2421 mlxsw_sp->trap = trap; 2422 2423 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 2424 if (err) 2425 goto err_cpu_policers_set; 2426 2427 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 2428 if (err) 2429 goto err_trap_groups_set; 2430 2431 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp_listener, 2432 ARRAY_SIZE(mlxsw_sp_listener)); 2433 if (err) 2434 goto err_traps_register; 2435 2436 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp->listeners, 2437 mlxsw_sp->listeners_count); 2438 if (err) 2439 goto err_extra_traps_init; 2440 2441 return 0; 2442 2443 err_extra_traps_init: 2444 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 2445 ARRAY_SIZE(mlxsw_sp_listener)); 2446 err_traps_register: 2447 err_trap_groups_set: 2448 err_cpu_policers_set: 2449 kfree(trap); 2450 return err; 2451 } 2452 2453 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 2454 { 2455 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp->listeners, 2456 mlxsw_sp->listeners_count); 2457 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 2458 ARRAY_SIZE(mlxsw_sp_listener)); 2459 kfree(mlxsw_sp->trap); 2460 } 2461 2462 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe 2463 2464 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 2465 { 2466 char slcr_pl[MLXSW_REG_SLCR_LEN]; 2467 u32 seed; 2468 int err; 2469 2470 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 2471 MLXSW_SP_LAG_SEED_INIT); 2472 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 2473 MLXSW_REG_SLCR_LAG_HASH_DMAC | 2474 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 2475 MLXSW_REG_SLCR_LAG_HASH_VLANID | 2476 MLXSW_REG_SLCR_LAG_HASH_SIP | 2477 MLXSW_REG_SLCR_LAG_HASH_DIP | 2478 MLXSW_REG_SLCR_LAG_HASH_SPORT | 2479 MLXSW_REG_SLCR_LAG_HASH_DPORT | 2480 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed); 2481 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 2482 if (err) 2483 return err; 2484 2485 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 2486 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 2487 return -EIO; 2488 2489 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 2490 sizeof(struct mlxsw_sp_upper), 2491 GFP_KERNEL); 2492 if (!mlxsw_sp->lags) 2493 return -ENOMEM; 2494 2495 return 0; 2496 } 2497 2498 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 2499 { 2500 kfree(mlxsw_sp->lags); 2501 } 2502 2503 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) 2504 { 2505 char htgt_pl[MLXSW_REG_HTGT_LEN]; 2506 int err; 2507 2508 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 2509 MLXSW_REG_HTGT_INVALID_POLICER, 2510 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 2511 MLXSW_REG_HTGT_DEFAULT_TC); 2512 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 2513 if (err) 2514 return err; 2515 2516 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_MFDE, 2517 MLXSW_REG_HTGT_INVALID_POLICER, 2518 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 2519 MLXSW_REG_HTGT_DEFAULT_TC); 2520 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 2521 if (err) 2522 return err; 2523 2524 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_MTWE, 2525 MLXSW_REG_HTGT_INVALID_POLICER, 2526 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 2527 MLXSW_REG_HTGT_DEFAULT_TC); 2528 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 2529 if (err) 2530 return err; 2531 2532 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_PMPE, 2533 MLXSW_REG_HTGT_INVALID_POLICER, 2534 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 2535 MLXSW_REG_HTGT_DEFAULT_TC); 2536 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 2537 } 2538 2539 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = { 2540 .clock_init = mlxsw_sp1_ptp_clock_init, 2541 .clock_fini = mlxsw_sp1_ptp_clock_fini, 2542 .init = mlxsw_sp1_ptp_init, 2543 .fini = mlxsw_sp1_ptp_fini, 2544 .receive = mlxsw_sp1_ptp_receive, 2545 .transmitted = mlxsw_sp1_ptp_transmitted, 2546 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get, 2547 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set, 2548 .shaper_work = mlxsw_sp1_ptp_shaper_work, 2549 .get_ts_info = mlxsw_sp1_ptp_get_ts_info, 2550 .get_stats_count = mlxsw_sp1_get_stats_count, 2551 .get_stats_strings = mlxsw_sp1_get_stats_strings, 2552 .get_stats = mlxsw_sp1_get_stats, 2553 }; 2554 2555 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { 2556 .clock_init = mlxsw_sp2_ptp_clock_init, 2557 .clock_fini = mlxsw_sp2_ptp_clock_fini, 2558 .init = mlxsw_sp2_ptp_init, 2559 .fini = mlxsw_sp2_ptp_fini, 2560 .receive = mlxsw_sp2_ptp_receive, 2561 .transmitted = mlxsw_sp2_ptp_transmitted, 2562 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get, 2563 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set, 2564 .shaper_work = mlxsw_sp2_ptp_shaper_work, 2565 .get_ts_info = mlxsw_sp2_ptp_get_ts_info, 2566 .get_stats_count = mlxsw_sp2_get_stats_count, 2567 .get_stats_strings = mlxsw_sp2_get_stats_strings, 2568 .get_stats = mlxsw_sp2_get_stats, 2569 }; 2570 2571 struct mlxsw_sp_sample_trigger_node { 2572 struct mlxsw_sp_sample_trigger trigger; 2573 struct mlxsw_sp_sample_params params; 2574 struct rhash_head ht_node; 2575 struct rcu_head rcu; 2576 refcount_t refcount; 2577 }; 2578 2579 static const struct rhashtable_params mlxsw_sp_sample_trigger_ht_params = { 2580 .key_offset = offsetof(struct mlxsw_sp_sample_trigger_node, trigger), 2581 .head_offset = offsetof(struct mlxsw_sp_sample_trigger_node, ht_node), 2582 .key_len = sizeof(struct mlxsw_sp_sample_trigger), 2583 .automatic_shrinking = true, 2584 }; 2585 2586 static void 2587 mlxsw_sp_sample_trigger_key_init(struct mlxsw_sp_sample_trigger *key, 2588 const struct mlxsw_sp_sample_trigger *trigger) 2589 { 2590 memset(key, 0, sizeof(*key)); 2591 key->type = trigger->type; 2592 key->local_port = trigger->local_port; 2593 } 2594 2595 /* RCU read lock must be held */ 2596 struct mlxsw_sp_sample_params * 2597 mlxsw_sp_sample_trigger_params_lookup(struct mlxsw_sp *mlxsw_sp, 2598 const struct mlxsw_sp_sample_trigger *trigger) 2599 { 2600 struct mlxsw_sp_sample_trigger_node *trigger_node; 2601 struct mlxsw_sp_sample_trigger key; 2602 2603 mlxsw_sp_sample_trigger_key_init(&key, trigger); 2604 trigger_node = rhashtable_lookup(&mlxsw_sp->sample_trigger_ht, &key, 2605 mlxsw_sp_sample_trigger_ht_params); 2606 if (!trigger_node) 2607 return NULL; 2608 2609 return &trigger_node->params; 2610 } 2611 2612 static int 2613 mlxsw_sp_sample_trigger_node_init(struct mlxsw_sp *mlxsw_sp, 2614 const struct mlxsw_sp_sample_trigger *trigger, 2615 const struct mlxsw_sp_sample_params *params) 2616 { 2617 struct mlxsw_sp_sample_trigger_node *trigger_node; 2618 int err; 2619 2620 trigger_node = kzalloc(sizeof(*trigger_node), GFP_KERNEL); 2621 if (!trigger_node) 2622 return -ENOMEM; 2623 2624 trigger_node->trigger = *trigger; 2625 trigger_node->params = *params; 2626 refcount_set(&trigger_node->refcount, 1); 2627 2628 err = rhashtable_insert_fast(&mlxsw_sp->sample_trigger_ht, 2629 &trigger_node->ht_node, 2630 mlxsw_sp_sample_trigger_ht_params); 2631 if (err) 2632 goto err_rhashtable_insert; 2633 2634 return 0; 2635 2636 err_rhashtable_insert: 2637 kfree(trigger_node); 2638 return err; 2639 } 2640 2641 static void 2642 mlxsw_sp_sample_trigger_node_fini(struct mlxsw_sp *mlxsw_sp, 2643 struct mlxsw_sp_sample_trigger_node *trigger_node) 2644 { 2645 rhashtable_remove_fast(&mlxsw_sp->sample_trigger_ht, 2646 &trigger_node->ht_node, 2647 mlxsw_sp_sample_trigger_ht_params); 2648 kfree_rcu(trigger_node, rcu); 2649 } 2650 2651 int 2652 mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp, 2653 const struct mlxsw_sp_sample_trigger *trigger, 2654 const struct mlxsw_sp_sample_params *params, 2655 struct netlink_ext_ack *extack) 2656 { 2657 struct mlxsw_sp_sample_trigger_node *trigger_node; 2658 struct mlxsw_sp_sample_trigger key; 2659 2660 ASSERT_RTNL(); 2661 2662 mlxsw_sp_sample_trigger_key_init(&key, trigger); 2663 2664 trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht, 2665 &key, 2666 mlxsw_sp_sample_trigger_ht_params); 2667 if (!trigger_node) 2668 return mlxsw_sp_sample_trigger_node_init(mlxsw_sp, &key, 2669 params); 2670 2671 if (trigger_node->params.psample_group != params->psample_group || 2672 trigger_node->params.truncate != params->truncate || 2673 trigger_node->params.rate != params->rate || 2674 trigger_node->params.trunc_size != params->trunc_size) { 2675 NL_SET_ERR_MSG_MOD(extack, "Sampling parameters do not match for an existing sampling trigger"); 2676 return -EINVAL; 2677 } 2678 2679 refcount_inc(&trigger_node->refcount); 2680 2681 return 0; 2682 } 2683 2684 void 2685 mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp, 2686 const struct mlxsw_sp_sample_trigger *trigger) 2687 { 2688 struct mlxsw_sp_sample_trigger_node *trigger_node; 2689 struct mlxsw_sp_sample_trigger key; 2690 2691 ASSERT_RTNL(); 2692 2693 mlxsw_sp_sample_trigger_key_init(&key, trigger); 2694 2695 trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht, 2696 &key, 2697 mlxsw_sp_sample_trigger_ht_params); 2698 if (!trigger_node) 2699 return; 2700 2701 if (!refcount_dec_and_test(&trigger_node->refcount)) 2702 return; 2703 2704 mlxsw_sp_sample_trigger_node_fini(mlxsw_sp, trigger_node); 2705 } 2706 2707 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 2708 unsigned long event, void *ptr); 2709 2710 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 2711 const struct mlxsw_bus_info *mlxsw_bus_info, 2712 struct netlink_ext_ack *extack) 2713 { 2714 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2715 int err; 2716 2717 mlxsw_sp->core = mlxsw_core; 2718 mlxsw_sp->bus_info = mlxsw_bus_info; 2719 2720 mlxsw_core_emad_string_tlv_enable(mlxsw_core); 2721 2722 err = mlxsw_sp_base_mac_get(mlxsw_sp); 2723 if (err) { 2724 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 2725 return err; 2726 } 2727 2728 err = mlxsw_sp_kvdl_init(mlxsw_sp); 2729 if (err) { 2730 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 2731 return err; 2732 } 2733 2734 err = mlxsw_sp_fids_init(mlxsw_sp); 2735 if (err) { 2736 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 2737 goto err_fids_init; 2738 } 2739 2740 err = mlxsw_sp_policers_init(mlxsw_sp); 2741 if (err) { 2742 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize policers\n"); 2743 goto err_policers_init; 2744 } 2745 2746 err = mlxsw_sp_traps_init(mlxsw_sp); 2747 if (err) { 2748 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 2749 goto err_traps_init; 2750 } 2751 2752 err = mlxsw_sp_devlink_traps_init(mlxsw_sp); 2753 if (err) { 2754 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n"); 2755 goto err_devlink_traps_init; 2756 } 2757 2758 err = mlxsw_sp_buffers_init(mlxsw_sp); 2759 if (err) { 2760 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 2761 goto err_buffers_init; 2762 } 2763 2764 err = mlxsw_sp_lag_init(mlxsw_sp); 2765 if (err) { 2766 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 2767 goto err_lag_init; 2768 } 2769 2770 /* Initialize SPAN before router and switchdev, so that those components 2771 * can call mlxsw_sp_span_respin(). 2772 */ 2773 err = mlxsw_sp_span_init(mlxsw_sp); 2774 if (err) { 2775 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 2776 goto err_span_init; 2777 } 2778 2779 err = mlxsw_sp_switchdev_init(mlxsw_sp); 2780 if (err) { 2781 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 2782 goto err_switchdev_init; 2783 } 2784 2785 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 2786 if (err) { 2787 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 2788 goto err_counter_pool_init; 2789 } 2790 2791 err = mlxsw_sp_afa_init(mlxsw_sp); 2792 if (err) { 2793 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 2794 goto err_afa_init; 2795 } 2796 2797 err = mlxsw_sp_nve_init(mlxsw_sp); 2798 if (err) { 2799 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n"); 2800 goto err_nve_init; 2801 } 2802 2803 err = mlxsw_sp_acl_init(mlxsw_sp); 2804 if (err) { 2805 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 2806 goto err_acl_init; 2807 } 2808 2809 err = mlxsw_sp_router_init(mlxsw_sp, extack); 2810 if (err) { 2811 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 2812 goto err_router_init; 2813 } 2814 2815 if (mlxsw_sp->bus_info->read_frc_capable) { 2816 /* NULL is a valid return value from clock_init */ 2817 mlxsw_sp->clock = 2818 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp, 2819 mlxsw_sp->bus_info->dev); 2820 if (IS_ERR(mlxsw_sp->clock)) { 2821 err = PTR_ERR(mlxsw_sp->clock); 2822 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n"); 2823 goto err_ptp_clock_init; 2824 } 2825 } 2826 2827 if (mlxsw_sp->clock) { 2828 /* NULL is a valid return value from ptp_ops->init */ 2829 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp); 2830 if (IS_ERR(mlxsw_sp->ptp_state)) { 2831 err = PTR_ERR(mlxsw_sp->ptp_state); 2832 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n"); 2833 goto err_ptp_init; 2834 } 2835 } 2836 2837 /* Initialize netdevice notifier after router and SPAN is initialized, 2838 * so that the event handler can use router structures and call SPAN 2839 * respin. 2840 */ 2841 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 2842 err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 2843 &mlxsw_sp->netdevice_nb); 2844 if (err) { 2845 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 2846 goto err_netdev_notifier; 2847 } 2848 2849 err = mlxsw_sp_dpipe_init(mlxsw_sp); 2850 if (err) { 2851 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 2852 goto err_dpipe_init; 2853 } 2854 2855 err = mlxsw_sp_port_module_info_init(mlxsw_sp); 2856 if (err) { 2857 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n"); 2858 goto err_port_module_info_init; 2859 } 2860 2861 err = rhashtable_init(&mlxsw_sp->sample_trigger_ht, 2862 &mlxsw_sp_sample_trigger_ht_params); 2863 if (err) { 2864 dev_err(mlxsw_sp->bus_info->dev, "Failed to init sampling trigger hashtable\n"); 2865 goto err_sample_trigger_init; 2866 } 2867 2868 err = mlxsw_sp_ports_create(mlxsw_sp); 2869 if (err) { 2870 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 2871 goto err_ports_create; 2872 } 2873 2874 return 0; 2875 2876 err_ports_create: 2877 rhashtable_destroy(&mlxsw_sp->sample_trigger_ht); 2878 err_sample_trigger_init: 2879 mlxsw_sp_port_module_info_fini(mlxsw_sp); 2880 err_port_module_info_init: 2881 mlxsw_sp_dpipe_fini(mlxsw_sp); 2882 err_dpipe_init: 2883 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 2884 &mlxsw_sp->netdevice_nb); 2885 err_netdev_notifier: 2886 if (mlxsw_sp->clock) 2887 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 2888 err_ptp_init: 2889 if (mlxsw_sp->clock) 2890 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 2891 err_ptp_clock_init: 2892 mlxsw_sp_router_fini(mlxsw_sp); 2893 err_router_init: 2894 mlxsw_sp_acl_fini(mlxsw_sp); 2895 err_acl_init: 2896 mlxsw_sp_nve_fini(mlxsw_sp); 2897 err_nve_init: 2898 mlxsw_sp_afa_fini(mlxsw_sp); 2899 err_afa_init: 2900 mlxsw_sp_counter_pool_fini(mlxsw_sp); 2901 err_counter_pool_init: 2902 mlxsw_sp_switchdev_fini(mlxsw_sp); 2903 err_switchdev_init: 2904 mlxsw_sp_span_fini(mlxsw_sp); 2905 err_span_init: 2906 mlxsw_sp_lag_fini(mlxsw_sp); 2907 err_lag_init: 2908 mlxsw_sp_buffers_fini(mlxsw_sp); 2909 err_buffers_init: 2910 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 2911 err_devlink_traps_init: 2912 mlxsw_sp_traps_fini(mlxsw_sp); 2913 err_traps_init: 2914 mlxsw_sp_policers_fini(mlxsw_sp); 2915 err_policers_init: 2916 mlxsw_sp_fids_fini(mlxsw_sp); 2917 err_fids_init: 2918 mlxsw_sp_kvdl_fini(mlxsw_sp); 2919 return err; 2920 } 2921 2922 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, 2923 const struct mlxsw_bus_info *mlxsw_bus_info, 2924 struct netlink_ext_ack *extack) 2925 { 2926 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2927 2928 mlxsw_sp->switchdev_ops = &mlxsw_sp1_switchdev_ops; 2929 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; 2930 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; 2931 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; 2932 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; 2933 mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops; 2934 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; 2935 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; 2936 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; 2937 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals; 2938 mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops; 2939 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops; 2940 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops; 2941 mlxsw_sp->span_ops = &mlxsw_sp1_span_ops; 2942 mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops; 2943 mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops; 2944 mlxsw_sp->mall_ops = &mlxsw_sp1_mall_ops; 2945 mlxsw_sp->router_ops = &mlxsw_sp1_router_ops; 2946 mlxsw_sp->listeners = mlxsw_sp1_listener; 2947 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener); 2948 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1; 2949 2950 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 2951 } 2952 2953 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, 2954 const struct mlxsw_bus_info *mlxsw_bus_info, 2955 struct netlink_ext_ack *extack) 2956 { 2957 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2958 2959 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; 2960 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 2961 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 2962 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 2963 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 2964 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 2965 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 2966 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 2967 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 2968 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 2969 mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops; 2970 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 2971 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 2972 mlxsw_sp->span_ops = &mlxsw_sp2_span_ops; 2973 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; 2974 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; 2975 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; 2976 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; 2977 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2; 2978 2979 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 2980 } 2981 2982 static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core, 2983 const struct mlxsw_bus_info *mlxsw_bus_info, 2984 struct netlink_ext_ack *extack) 2985 { 2986 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2987 2988 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; 2989 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 2990 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 2991 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 2992 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 2993 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 2994 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 2995 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 2996 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 2997 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 2998 mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops; 2999 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 3000 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 3001 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops; 3002 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; 3003 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; 3004 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; 3005 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; 3006 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3; 3007 3008 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3009 } 3010 3011 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 3012 { 3013 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3014 3015 mlxsw_sp_ports_remove(mlxsw_sp); 3016 rhashtable_destroy(&mlxsw_sp->sample_trigger_ht); 3017 mlxsw_sp_port_module_info_fini(mlxsw_sp); 3018 mlxsw_sp_dpipe_fini(mlxsw_sp); 3019 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 3020 &mlxsw_sp->netdevice_nb); 3021 if (mlxsw_sp->clock) { 3022 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 3023 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 3024 } 3025 mlxsw_sp_router_fini(mlxsw_sp); 3026 mlxsw_sp_acl_fini(mlxsw_sp); 3027 mlxsw_sp_nve_fini(mlxsw_sp); 3028 mlxsw_sp_afa_fini(mlxsw_sp); 3029 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3030 mlxsw_sp_switchdev_fini(mlxsw_sp); 3031 mlxsw_sp_span_fini(mlxsw_sp); 3032 mlxsw_sp_lag_fini(mlxsw_sp); 3033 mlxsw_sp_buffers_fini(mlxsw_sp); 3034 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 3035 mlxsw_sp_traps_fini(mlxsw_sp); 3036 mlxsw_sp_policers_fini(mlxsw_sp); 3037 mlxsw_sp_fids_fini(mlxsw_sp); 3038 mlxsw_sp_kvdl_fini(mlxsw_sp); 3039 } 3040 3041 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated 3042 * 802.1Q FIDs 3043 */ 3044 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \ 3045 VLAN_VID_MASK - 1) 3046 3047 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { 3048 .used_max_mid = 1, 3049 .max_mid = MLXSW_SP_MID_MAX, 3050 .used_flood_tables = 1, 3051 .used_flood_mode = 1, 3052 .flood_mode = 3, 3053 .max_fid_flood_tables = 3, 3054 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 3055 .used_max_ib_mc = 1, 3056 .max_ib_mc = 0, 3057 .used_max_pkey = 1, 3058 .max_pkey = 0, 3059 .used_kvd_sizes = 1, 3060 .kvd_hash_single_parts = 59, 3061 .kvd_hash_double_parts = 41, 3062 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 3063 .swid_config = { 3064 { 3065 .used_type = 1, 3066 .type = MLXSW_PORT_SWID_TYPE_ETH, 3067 } 3068 }, 3069 }; 3070 3071 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { 3072 .used_max_mid = 1, 3073 .max_mid = MLXSW_SP_MID_MAX, 3074 .used_flood_tables = 1, 3075 .used_flood_mode = 1, 3076 .flood_mode = 3, 3077 .max_fid_flood_tables = 3, 3078 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 3079 .used_max_ib_mc = 1, 3080 .max_ib_mc = 0, 3081 .used_max_pkey = 1, 3082 .max_pkey = 0, 3083 .used_kvh_xlt_cache_mode = 1, 3084 .kvh_xlt_cache_mode = 1, 3085 .swid_config = { 3086 { 3087 .used_type = 1, 3088 .type = MLXSW_PORT_SWID_TYPE_ETH, 3089 } 3090 }, 3091 }; 3092 3093 static void 3094 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, 3095 struct devlink_resource_size_params *kvd_size_params, 3096 struct devlink_resource_size_params *linear_size_params, 3097 struct devlink_resource_size_params *hash_double_size_params, 3098 struct devlink_resource_size_params *hash_single_size_params) 3099 { 3100 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 3101 KVD_SINGLE_MIN_SIZE); 3102 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 3103 KVD_DOUBLE_MIN_SIZE); 3104 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3105 u32 linear_size_min = 0; 3106 3107 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, 3108 MLXSW_SP_KVD_GRANULARITY, 3109 DEVLINK_RESOURCE_UNIT_ENTRY); 3110 devlink_resource_size_params_init(linear_size_params, linear_size_min, 3111 kvd_size - single_size_min - 3112 double_size_min, 3113 MLXSW_SP_KVD_GRANULARITY, 3114 DEVLINK_RESOURCE_UNIT_ENTRY); 3115 devlink_resource_size_params_init(hash_double_size_params, 3116 double_size_min, 3117 kvd_size - single_size_min - 3118 linear_size_min, 3119 MLXSW_SP_KVD_GRANULARITY, 3120 DEVLINK_RESOURCE_UNIT_ENTRY); 3121 devlink_resource_size_params_init(hash_single_size_params, 3122 single_size_min, 3123 kvd_size - double_size_min - 3124 linear_size_min, 3125 MLXSW_SP_KVD_GRANULARITY, 3126 DEVLINK_RESOURCE_UNIT_ENTRY); 3127 } 3128 3129 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) 3130 { 3131 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3132 struct devlink_resource_size_params hash_single_size_params; 3133 struct devlink_resource_size_params hash_double_size_params; 3134 struct devlink_resource_size_params linear_size_params; 3135 struct devlink_resource_size_params kvd_size_params; 3136 u32 kvd_size, single_size, double_size, linear_size; 3137 const struct mlxsw_config_profile *profile; 3138 int err; 3139 3140 profile = &mlxsw_sp1_config_profile; 3141 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 3142 return -EIO; 3143 3144 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, 3145 &linear_size_params, 3146 &hash_double_size_params, 3147 &hash_single_size_params); 3148 3149 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3150 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 3151 kvd_size, MLXSW_SP_RESOURCE_KVD, 3152 DEVLINK_RESOURCE_ID_PARENT_TOP, 3153 &kvd_size_params); 3154 if (err) 3155 return err; 3156 3157 linear_size = profile->kvd_linear_size; 3158 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, 3159 linear_size, 3160 MLXSW_SP_RESOURCE_KVD_LINEAR, 3161 MLXSW_SP_RESOURCE_KVD, 3162 &linear_size_params); 3163 if (err) 3164 return err; 3165 3166 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); 3167 if (err) 3168 return err; 3169 3170 double_size = kvd_size - linear_size; 3171 double_size *= profile->kvd_hash_double_parts; 3172 double_size /= profile->kvd_hash_double_parts + 3173 profile->kvd_hash_single_parts; 3174 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY); 3175 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, 3176 double_size, 3177 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 3178 MLXSW_SP_RESOURCE_KVD, 3179 &hash_double_size_params); 3180 if (err) 3181 return err; 3182 3183 single_size = kvd_size - double_size - linear_size; 3184 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, 3185 single_size, 3186 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 3187 MLXSW_SP_RESOURCE_KVD, 3188 &hash_single_size_params); 3189 if (err) 3190 return err; 3191 3192 return 0; 3193 } 3194 3195 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core) 3196 { 3197 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3198 struct devlink_resource_size_params kvd_size_params; 3199 u32 kvd_size; 3200 3201 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 3202 return -EIO; 3203 3204 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3205 devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size, 3206 MLXSW_SP_KVD_GRANULARITY, 3207 DEVLINK_RESOURCE_UNIT_ENTRY); 3208 3209 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 3210 kvd_size, MLXSW_SP_RESOURCE_KVD, 3211 DEVLINK_RESOURCE_ID_PARENT_TOP, 3212 &kvd_size_params); 3213 } 3214 3215 static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core) 3216 { 3217 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3218 struct devlink_resource_size_params span_size_params; 3219 u32 max_span; 3220 3221 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN)) 3222 return -EIO; 3223 3224 max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN); 3225 devlink_resource_size_params_init(&span_size_params, max_span, max_span, 3226 1, DEVLINK_RESOURCE_UNIT_ENTRY); 3227 3228 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN, 3229 max_span, MLXSW_SP_RESOURCE_SPAN, 3230 DEVLINK_RESOURCE_ID_PARENT_TOP, 3231 &span_size_params); 3232 } 3233 3234 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) 3235 { 3236 int err; 3237 3238 err = mlxsw_sp1_resources_kvd_register(mlxsw_core); 3239 if (err) 3240 return err; 3241 3242 err = mlxsw_sp_resources_span_register(mlxsw_core); 3243 if (err) 3244 goto err_resources_span_register; 3245 3246 err = mlxsw_sp_counter_resources_register(mlxsw_core); 3247 if (err) 3248 goto err_resources_counter_register; 3249 3250 err = mlxsw_sp_policer_resources_register(mlxsw_core); 3251 if (err) 3252 goto err_resources_counter_register; 3253 3254 return 0; 3255 3256 err_resources_counter_register: 3257 err_resources_span_register: 3258 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL); 3259 return err; 3260 } 3261 3262 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) 3263 { 3264 int err; 3265 3266 err = mlxsw_sp2_resources_kvd_register(mlxsw_core); 3267 if (err) 3268 return err; 3269 3270 err = mlxsw_sp_resources_span_register(mlxsw_core); 3271 if (err) 3272 goto err_resources_span_register; 3273 3274 err = mlxsw_sp_counter_resources_register(mlxsw_core); 3275 if (err) 3276 goto err_resources_counter_register; 3277 3278 err = mlxsw_sp_policer_resources_register(mlxsw_core); 3279 if (err) 3280 goto err_resources_counter_register; 3281 3282 return 0; 3283 3284 err_resources_counter_register: 3285 err_resources_span_register: 3286 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL); 3287 return err; 3288 } 3289 3290 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 3291 const struct mlxsw_config_profile *profile, 3292 u64 *p_single_size, u64 *p_double_size, 3293 u64 *p_linear_size) 3294 { 3295 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3296 u32 double_size; 3297 int err; 3298 3299 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 3300 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) 3301 return -EIO; 3302 3303 /* The hash part is what left of the kvd without the 3304 * linear part. It is split to the single size and 3305 * double size by the parts ratio from the profile. 3306 * Both sizes must be a multiplications of the 3307 * granularity from the profile. In case the user 3308 * provided the sizes they are obtained via devlink. 3309 */ 3310 err = devlink_resource_size_get(devlink, 3311 MLXSW_SP_RESOURCE_KVD_LINEAR, 3312 p_linear_size); 3313 if (err) 3314 *p_linear_size = profile->kvd_linear_size; 3315 3316 err = devlink_resource_size_get(devlink, 3317 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 3318 p_double_size); 3319 if (err) { 3320 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 3321 *p_linear_size; 3322 double_size *= profile->kvd_hash_double_parts; 3323 double_size /= profile->kvd_hash_double_parts + 3324 profile->kvd_hash_single_parts; 3325 *p_double_size = rounddown(double_size, 3326 MLXSW_SP_KVD_GRANULARITY); 3327 } 3328 3329 err = devlink_resource_size_get(devlink, 3330 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 3331 p_single_size); 3332 if (err) 3333 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 3334 *p_double_size - *p_linear_size; 3335 3336 /* Check results are legal. */ 3337 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 3338 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || 3339 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size) 3340 return -EIO; 3341 3342 return 0; 3343 } 3344 3345 static int 3346 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id, 3347 struct devlink_param_gset_ctx *ctx) 3348 { 3349 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 3350 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3351 3352 ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp); 3353 return 0; 3354 } 3355 3356 static int 3357 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id, 3358 struct devlink_param_gset_ctx *ctx) 3359 { 3360 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 3361 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3362 3363 return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32); 3364 } 3365 3366 static const struct devlink_param mlxsw_sp2_devlink_params[] = { 3367 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 3368 "acl_region_rehash_interval", 3369 DEVLINK_PARAM_TYPE_U32, 3370 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 3371 mlxsw_sp_params_acl_region_rehash_intrvl_get, 3372 mlxsw_sp_params_acl_region_rehash_intrvl_set, 3373 NULL), 3374 }; 3375 3376 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core) 3377 { 3378 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3379 union devlink_param_value value; 3380 int err; 3381 3382 err = devlink_params_register(devlink, mlxsw_sp2_devlink_params, 3383 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 3384 if (err) 3385 return err; 3386 3387 value.vu32 = 0; 3388 devlink_param_driverinit_value_set(devlink, 3389 MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 3390 value); 3391 return 0; 3392 } 3393 3394 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core) 3395 { 3396 devlink_params_unregister(priv_to_devlink(mlxsw_core), 3397 mlxsw_sp2_devlink_params, 3398 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 3399 } 3400 3401 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core, 3402 struct sk_buff *skb, u8 local_port) 3403 { 3404 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3405 3406 skb_pull(skb, MLXSW_TXHDR_LEN); 3407 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port); 3408 } 3409 3410 static struct mlxsw_driver mlxsw_sp1_driver = { 3411 .kind = mlxsw_sp1_driver_name, 3412 .priv_size = sizeof(struct mlxsw_sp), 3413 .fw_req_rev = &mlxsw_sp1_fw_rev, 3414 .fw_filename = MLXSW_SP1_FW_FILENAME, 3415 .init = mlxsw_sp1_init, 3416 .fini = mlxsw_sp_fini, 3417 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 3418 .port_split = mlxsw_sp_port_split, 3419 .port_unsplit = mlxsw_sp_port_unsplit, 3420 .sb_pool_get = mlxsw_sp_sb_pool_get, 3421 .sb_pool_set = mlxsw_sp_sb_pool_set, 3422 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3423 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3424 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3425 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3426 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3427 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3428 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3429 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3430 .trap_init = mlxsw_sp_trap_init, 3431 .trap_fini = mlxsw_sp_trap_fini, 3432 .trap_action_set = mlxsw_sp_trap_action_set, 3433 .trap_group_init = mlxsw_sp_trap_group_init, 3434 .trap_group_set = mlxsw_sp_trap_group_set, 3435 .trap_policer_init = mlxsw_sp_trap_policer_init, 3436 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3437 .trap_policer_set = mlxsw_sp_trap_policer_set, 3438 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3439 .txhdr_construct = mlxsw_sp_txhdr_construct, 3440 .resources_register = mlxsw_sp1_resources_register, 3441 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, 3442 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3443 .txhdr_len = MLXSW_TXHDR_LEN, 3444 .profile = &mlxsw_sp1_config_profile, 3445 .res_query_enabled = true, 3446 .fw_fatal_enabled = true, 3447 .temp_warn_enabled = true, 3448 }; 3449 3450 static struct mlxsw_driver mlxsw_sp2_driver = { 3451 .kind = mlxsw_sp2_driver_name, 3452 .priv_size = sizeof(struct mlxsw_sp), 3453 .fw_req_rev = &mlxsw_sp2_fw_rev, 3454 .fw_filename = MLXSW_SP2_FW_FILENAME, 3455 .init = mlxsw_sp2_init, 3456 .fini = mlxsw_sp_fini, 3457 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 3458 .port_split = mlxsw_sp_port_split, 3459 .port_unsplit = mlxsw_sp_port_unsplit, 3460 .sb_pool_get = mlxsw_sp_sb_pool_get, 3461 .sb_pool_set = mlxsw_sp_sb_pool_set, 3462 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3463 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3464 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3465 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3466 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3467 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3468 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3469 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3470 .trap_init = mlxsw_sp_trap_init, 3471 .trap_fini = mlxsw_sp_trap_fini, 3472 .trap_action_set = mlxsw_sp_trap_action_set, 3473 .trap_group_init = mlxsw_sp_trap_group_init, 3474 .trap_group_set = mlxsw_sp_trap_group_set, 3475 .trap_policer_init = mlxsw_sp_trap_policer_init, 3476 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3477 .trap_policer_set = mlxsw_sp_trap_policer_set, 3478 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3479 .txhdr_construct = mlxsw_sp_txhdr_construct, 3480 .resources_register = mlxsw_sp2_resources_register, 3481 .params_register = mlxsw_sp2_params_register, 3482 .params_unregister = mlxsw_sp2_params_unregister, 3483 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3484 .txhdr_len = MLXSW_TXHDR_LEN, 3485 .profile = &mlxsw_sp2_config_profile, 3486 .res_query_enabled = true, 3487 .fw_fatal_enabled = true, 3488 .temp_warn_enabled = true, 3489 }; 3490 3491 static struct mlxsw_driver mlxsw_sp3_driver = { 3492 .kind = mlxsw_sp3_driver_name, 3493 .priv_size = sizeof(struct mlxsw_sp), 3494 .fw_req_rev = &mlxsw_sp3_fw_rev, 3495 .fw_filename = MLXSW_SP3_FW_FILENAME, 3496 .init = mlxsw_sp3_init, 3497 .fini = mlxsw_sp_fini, 3498 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 3499 .port_split = mlxsw_sp_port_split, 3500 .port_unsplit = mlxsw_sp_port_unsplit, 3501 .sb_pool_get = mlxsw_sp_sb_pool_get, 3502 .sb_pool_set = mlxsw_sp_sb_pool_set, 3503 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3504 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3505 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3506 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3507 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3508 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3509 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3510 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3511 .trap_init = mlxsw_sp_trap_init, 3512 .trap_fini = mlxsw_sp_trap_fini, 3513 .trap_action_set = mlxsw_sp_trap_action_set, 3514 .trap_group_init = mlxsw_sp_trap_group_init, 3515 .trap_group_set = mlxsw_sp_trap_group_set, 3516 .trap_policer_init = mlxsw_sp_trap_policer_init, 3517 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3518 .trap_policer_set = mlxsw_sp_trap_policer_set, 3519 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3520 .txhdr_construct = mlxsw_sp_txhdr_construct, 3521 .resources_register = mlxsw_sp2_resources_register, 3522 .params_register = mlxsw_sp2_params_register, 3523 .params_unregister = mlxsw_sp2_params_unregister, 3524 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3525 .txhdr_len = MLXSW_TXHDR_LEN, 3526 .profile = &mlxsw_sp2_config_profile, 3527 .res_query_enabled = true, 3528 .fw_fatal_enabled = true, 3529 .temp_warn_enabled = true, 3530 }; 3531 3532 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 3533 { 3534 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 3535 } 3536 3537 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, 3538 struct netdev_nested_priv *priv) 3539 { 3540 int ret = 0; 3541 3542 if (mlxsw_sp_port_dev_check(lower_dev)) { 3543 priv->data = (void *)netdev_priv(lower_dev); 3544 ret = 1; 3545 } 3546 3547 return ret; 3548 } 3549 3550 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 3551 { 3552 struct netdev_nested_priv priv = { 3553 .data = NULL, 3554 }; 3555 3556 if (mlxsw_sp_port_dev_check(dev)) 3557 return netdev_priv(dev); 3558 3559 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &priv); 3560 3561 return (struct mlxsw_sp_port *)priv.data; 3562 } 3563 3564 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 3565 { 3566 struct mlxsw_sp_port *mlxsw_sp_port; 3567 3568 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 3569 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 3570 } 3571 3572 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 3573 { 3574 struct netdev_nested_priv priv = { 3575 .data = NULL, 3576 }; 3577 3578 if (mlxsw_sp_port_dev_check(dev)) 3579 return netdev_priv(dev); 3580 3581 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 3582 &priv); 3583 3584 return (struct mlxsw_sp_port *)priv.data; 3585 } 3586 3587 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 3588 { 3589 struct mlxsw_sp_port *mlxsw_sp_port; 3590 3591 rcu_read_lock(); 3592 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 3593 if (mlxsw_sp_port) 3594 dev_hold(mlxsw_sp_port->dev); 3595 rcu_read_unlock(); 3596 return mlxsw_sp_port; 3597 } 3598 3599 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 3600 { 3601 dev_put(mlxsw_sp_port->dev); 3602 } 3603 3604 static void 3605 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port, 3606 struct net_device *lag_dev) 3607 { 3608 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev); 3609 struct net_device *upper_dev; 3610 struct list_head *iter; 3611 3612 if (netif_is_bridge_port(lag_dev)) 3613 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev); 3614 3615 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 3616 if (!netif_is_bridge_port(upper_dev)) 3617 continue; 3618 br_dev = netdev_master_upper_dev_get(upper_dev); 3619 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev); 3620 } 3621 } 3622 3623 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 3624 { 3625 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3626 3627 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 3628 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3629 } 3630 3631 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 3632 { 3633 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3634 3635 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 3636 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3637 } 3638 3639 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 3640 u16 lag_id, u8 port_index) 3641 { 3642 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3643 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3644 3645 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 3646 lag_id, port_index); 3647 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3648 } 3649 3650 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 3651 u16 lag_id) 3652 { 3653 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3654 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3655 3656 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 3657 lag_id); 3658 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3659 } 3660 3661 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 3662 u16 lag_id) 3663 { 3664 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3665 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3666 3667 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 3668 lag_id); 3669 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3670 } 3671 3672 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 3673 u16 lag_id) 3674 { 3675 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3676 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3677 3678 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 3679 lag_id); 3680 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3681 } 3682 3683 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 3684 struct net_device *lag_dev, 3685 u16 *p_lag_id) 3686 { 3687 struct mlxsw_sp_upper *lag; 3688 int free_lag_id = -1; 3689 u64 max_lag; 3690 int i; 3691 3692 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 3693 for (i = 0; i < max_lag; i++) { 3694 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 3695 if (lag->ref_count) { 3696 if (lag->dev == lag_dev) { 3697 *p_lag_id = i; 3698 return 0; 3699 } 3700 } else if (free_lag_id < 0) { 3701 free_lag_id = i; 3702 } 3703 } 3704 if (free_lag_id < 0) 3705 return -EBUSY; 3706 *p_lag_id = free_lag_id; 3707 return 0; 3708 } 3709 3710 static bool 3711 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 3712 struct net_device *lag_dev, 3713 struct netdev_lag_upper_info *lag_upper_info, 3714 struct netlink_ext_ack *extack) 3715 { 3716 u16 lag_id; 3717 3718 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 3719 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices"); 3720 return false; 3721 } 3722 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 3723 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 3724 return false; 3725 } 3726 return true; 3727 } 3728 3729 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 3730 u16 lag_id, u8 *p_port_index) 3731 { 3732 u64 max_lag_members; 3733 int i; 3734 3735 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 3736 MAX_LAG_MEMBERS); 3737 for (i = 0; i < max_lag_members; i++) { 3738 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 3739 *p_port_index = i; 3740 return 0; 3741 } 3742 } 3743 return -EBUSY; 3744 } 3745 3746 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 3747 struct net_device *lag_dev, 3748 struct netlink_ext_ack *extack) 3749 { 3750 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3751 struct mlxsw_sp_upper *lag; 3752 u16 lag_id; 3753 u8 port_index; 3754 int err; 3755 3756 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 3757 if (err) 3758 return err; 3759 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 3760 if (!lag->ref_count) { 3761 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 3762 if (err) 3763 return err; 3764 lag->dev = lag_dev; 3765 } 3766 3767 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 3768 if (err) 3769 return err; 3770 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 3771 if (err) 3772 goto err_col_port_add; 3773 3774 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 3775 mlxsw_sp_port->local_port); 3776 mlxsw_sp_port->lag_id = lag_id; 3777 mlxsw_sp_port->lagged = 1; 3778 lag->ref_count++; 3779 3780 /* Port is no longer usable as a router interface */ 3781 if (mlxsw_sp_port->default_vlan->fid) 3782 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); 3783 3784 /* Join a router interface configured on the LAG, if exists */ 3785 err = mlxsw_sp_port_vlan_router_join(mlxsw_sp_port->default_vlan, 3786 lag_dev, extack); 3787 if (err) 3788 goto err_router_join; 3789 3790 return 0; 3791 3792 err_router_join: 3793 lag->ref_count--; 3794 mlxsw_sp_port->lagged = 0; 3795 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 3796 mlxsw_sp_port->local_port); 3797 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 3798 err_col_port_add: 3799 if (!lag->ref_count) 3800 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 3801 return err; 3802 } 3803 3804 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 3805 struct net_device *lag_dev) 3806 { 3807 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3808 u16 lag_id = mlxsw_sp_port->lag_id; 3809 struct mlxsw_sp_upper *lag; 3810 3811 if (!mlxsw_sp_port->lagged) 3812 return; 3813 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 3814 WARN_ON(lag->ref_count == 0); 3815 3816 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 3817 3818 /* Any VLANs configured on the port are no longer valid */ 3819 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false); 3820 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan); 3821 /* Make the LAG and its directly linked uppers leave bridges they 3822 * are memeber in 3823 */ 3824 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev); 3825 3826 if (lag->ref_count == 1) 3827 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 3828 3829 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 3830 mlxsw_sp_port->local_port); 3831 mlxsw_sp_port->lagged = 0; 3832 lag->ref_count--; 3833 3834 /* Make sure untagged frames are allowed to ingress */ 3835 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID, 3836 ETH_P_8021Q); 3837 } 3838 3839 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 3840 u16 lag_id) 3841 { 3842 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3843 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3844 3845 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 3846 mlxsw_sp_port->local_port); 3847 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3848 } 3849 3850 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 3851 u16 lag_id) 3852 { 3853 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3854 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3855 3856 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 3857 mlxsw_sp_port->local_port); 3858 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3859 } 3860 3861 static int 3862 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port) 3863 { 3864 int err; 3865 3866 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, 3867 mlxsw_sp_port->lag_id); 3868 if (err) 3869 return err; 3870 3871 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 3872 if (err) 3873 goto err_dist_port_add; 3874 3875 return 0; 3876 3877 err_dist_port_add: 3878 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id); 3879 return err; 3880 } 3881 3882 static int 3883 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port) 3884 { 3885 int err; 3886 3887 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 3888 mlxsw_sp_port->lag_id); 3889 if (err) 3890 return err; 3891 3892 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, 3893 mlxsw_sp_port->lag_id); 3894 if (err) 3895 goto err_col_port_disable; 3896 3897 return 0; 3898 3899 err_col_port_disable: 3900 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 3901 return err; 3902 } 3903 3904 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 3905 struct netdev_lag_lower_state_info *info) 3906 { 3907 if (info->tx_enabled) 3908 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port); 3909 else 3910 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 3911 } 3912 3913 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 3914 bool enable) 3915 { 3916 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3917 enum mlxsw_reg_spms_state spms_state; 3918 char *spms_pl; 3919 u16 vid; 3920 int err; 3921 3922 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 3923 MLXSW_REG_SPMS_STATE_DISCARDING; 3924 3925 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 3926 if (!spms_pl) 3927 return -ENOMEM; 3928 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 3929 3930 for (vid = 0; vid < VLAN_N_VID; vid++) 3931 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 3932 3933 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 3934 kfree(spms_pl); 3935 return err; 3936 } 3937 3938 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 3939 { 3940 u16 vid = 1; 3941 int err; 3942 3943 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 3944 if (err) 3945 return err; 3946 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 3947 if (err) 3948 goto err_port_stp_set; 3949 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 3950 true, false); 3951 if (err) 3952 goto err_port_vlan_set; 3953 3954 for (; vid <= VLAN_N_VID - 1; vid++) { 3955 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 3956 vid, false); 3957 if (err) 3958 goto err_vid_learning_set; 3959 } 3960 3961 return 0; 3962 3963 err_vid_learning_set: 3964 for (vid--; vid >= 1; vid--) 3965 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 3966 err_port_vlan_set: 3967 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 3968 err_port_stp_set: 3969 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 3970 return err; 3971 } 3972 3973 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 3974 { 3975 u16 vid; 3976 3977 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 3978 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 3979 vid, true); 3980 3981 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 3982 false, false); 3983 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 3984 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 3985 } 3986 3987 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev) 3988 { 3989 unsigned int num_vxlans = 0; 3990 struct net_device *dev; 3991 struct list_head *iter; 3992 3993 netdev_for_each_lower_dev(br_dev, dev, iter) { 3994 if (netif_is_vxlan(dev)) 3995 num_vxlans++; 3996 } 3997 3998 return num_vxlans > 1; 3999 } 4000 4001 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev) 4002 { 4003 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0}; 4004 struct net_device *dev; 4005 struct list_head *iter; 4006 4007 netdev_for_each_lower_dev(br_dev, dev, iter) { 4008 u16 pvid; 4009 int err; 4010 4011 if (!netif_is_vxlan(dev)) 4012 continue; 4013 4014 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid); 4015 if (err || !pvid) 4016 continue; 4017 4018 if (test_and_set_bit(pvid, vlans)) 4019 return false; 4020 } 4021 4022 return true; 4023 } 4024 4025 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev, 4026 struct netlink_ext_ack *extack) 4027 { 4028 if (br_multicast_enabled(br_dev)) { 4029 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device"); 4030 return false; 4031 } 4032 4033 if (!br_vlan_enabled(br_dev) && 4034 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) { 4035 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge"); 4036 return false; 4037 } 4038 4039 if (br_vlan_enabled(br_dev) && 4040 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) { 4041 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged"); 4042 return false; 4043 } 4044 4045 return true; 4046 } 4047 4048 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 4049 struct net_device *dev, 4050 unsigned long event, void *ptr) 4051 { 4052 struct netdev_notifier_changeupper_info *info; 4053 struct mlxsw_sp_port *mlxsw_sp_port; 4054 struct netlink_ext_ack *extack; 4055 struct net_device *upper_dev; 4056 struct mlxsw_sp *mlxsw_sp; 4057 int err = 0; 4058 u16 proto; 4059 4060 mlxsw_sp_port = netdev_priv(dev); 4061 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4062 info = ptr; 4063 extack = netdev_notifier_info_to_extack(&info->info); 4064 4065 switch (event) { 4066 case NETDEV_PRECHANGEUPPER: 4067 upper_dev = info->upper_dev; 4068 if (!is_vlan_dev(upper_dev) && 4069 !netif_is_lag_master(upper_dev) && 4070 !netif_is_bridge_master(upper_dev) && 4071 !netif_is_ovs_master(upper_dev) && 4072 !netif_is_macvlan(upper_dev)) { 4073 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4074 return -EINVAL; 4075 } 4076 if (!info->linking) 4077 break; 4078 if (netif_is_bridge_master(upper_dev) && 4079 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 4080 mlxsw_sp_bridge_has_vxlan(upper_dev) && 4081 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 4082 return -EOPNOTSUPP; 4083 if (netdev_has_any_upper_dev(upper_dev) && 4084 (!netif_is_bridge_master(upper_dev) || 4085 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 4086 upper_dev))) { 4087 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 4088 return -EINVAL; 4089 } 4090 if (netif_is_lag_master(upper_dev) && 4091 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 4092 info->upper_info, extack)) 4093 return -EINVAL; 4094 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 4095 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); 4096 return -EINVAL; 4097 } 4098 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 4099 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 4100 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); 4101 return -EINVAL; 4102 } 4103 if (netif_is_macvlan(upper_dev) && 4104 !mlxsw_sp_rif_exists(mlxsw_sp, lower_dev)) { 4105 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4106 return -EOPNOTSUPP; 4107 } 4108 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 4109 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); 4110 return -EINVAL; 4111 } 4112 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 4113 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 4114 return -EINVAL; 4115 } 4116 if (netif_is_bridge_master(upper_dev)) { 4117 br_vlan_get_proto(upper_dev, &proto); 4118 if (br_vlan_enabled(upper_dev) && 4119 proto != ETH_P_8021Q && proto != ETH_P_8021AD) { 4120 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a bridge with unknown VLAN protocol is not supported"); 4121 return -EOPNOTSUPP; 4122 } 4123 if (vlan_uses_dev(lower_dev) && 4124 br_vlan_enabled(upper_dev) && 4125 proto == ETH_P_8021AD) { 4126 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port that already has a VLAN upper to an 802.1ad bridge is not supported"); 4127 return -EOPNOTSUPP; 4128 } 4129 } 4130 if (netif_is_bridge_port(lower_dev) && is_vlan_dev(upper_dev)) { 4131 struct net_device *br_dev = netdev_master_upper_dev_get(lower_dev); 4132 4133 if (br_vlan_enabled(br_dev)) { 4134 br_vlan_get_proto(br_dev, &proto); 4135 if (proto == ETH_P_8021AD) { 4136 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a port enslaved to an 802.1ad bridge"); 4137 return -EOPNOTSUPP; 4138 } 4139 } 4140 } 4141 if (is_vlan_dev(upper_dev) && 4142 ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) { 4143 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol"); 4144 return -EOPNOTSUPP; 4145 } 4146 break; 4147 case NETDEV_CHANGEUPPER: 4148 upper_dev = info->upper_dev; 4149 if (netif_is_bridge_master(upper_dev)) { 4150 if (info->linking) 4151 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4152 lower_dev, 4153 upper_dev, 4154 extack); 4155 else 4156 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4157 lower_dev, 4158 upper_dev); 4159 } else if (netif_is_lag_master(upper_dev)) { 4160 if (info->linking) { 4161 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 4162 upper_dev, extack); 4163 } else { 4164 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 4165 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 4166 upper_dev); 4167 } 4168 } else if (netif_is_ovs_master(upper_dev)) { 4169 if (info->linking) 4170 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 4171 else 4172 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 4173 } else if (netif_is_macvlan(upper_dev)) { 4174 if (!info->linking) 4175 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4176 } else if (is_vlan_dev(upper_dev)) { 4177 struct net_device *br_dev; 4178 4179 if (!netif_is_bridge_port(upper_dev)) 4180 break; 4181 if (info->linking) 4182 break; 4183 br_dev = netdev_master_upper_dev_get(upper_dev); 4184 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, 4185 br_dev); 4186 } 4187 break; 4188 } 4189 4190 return err; 4191 } 4192 4193 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 4194 unsigned long event, void *ptr) 4195 { 4196 struct netdev_notifier_changelowerstate_info *info; 4197 struct mlxsw_sp_port *mlxsw_sp_port; 4198 int err; 4199 4200 mlxsw_sp_port = netdev_priv(dev); 4201 info = ptr; 4202 4203 switch (event) { 4204 case NETDEV_CHANGELOWERSTATE: 4205 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 4206 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 4207 info->lower_state_info); 4208 if (err) 4209 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 4210 } 4211 break; 4212 } 4213 4214 return 0; 4215 } 4216 4217 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 4218 struct net_device *port_dev, 4219 unsigned long event, void *ptr) 4220 { 4221 switch (event) { 4222 case NETDEV_PRECHANGEUPPER: 4223 case NETDEV_CHANGEUPPER: 4224 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 4225 event, ptr); 4226 case NETDEV_CHANGELOWERSTATE: 4227 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 4228 ptr); 4229 } 4230 4231 return 0; 4232 } 4233 4234 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 4235 unsigned long event, void *ptr) 4236 { 4237 struct net_device *dev; 4238 struct list_head *iter; 4239 int ret; 4240 4241 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4242 if (mlxsw_sp_port_dev_check(dev)) { 4243 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 4244 ptr); 4245 if (ret) 4246 return ret; 4247 } 4248 } 4249 4250 return 0; 4251 } 4252 4253 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 4254 struct net_device *dev, 4255 unsigned long event, void *ptr, 4256 u16 vid) 4257 { 4258 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 4259 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4260 struct netdev_notifier_changeupper_info *info = ptr; 4261 struct netlink_ext_ack *extack; 4262 struct net_device *upper_dev; 4263 int err = 0; 4264 4265 extack = netdev_notifier_info_to_extack(&info->info); 4266 4267 switch (event) { 4268 case NETDEV_PRECHANGEUPPER: 4269 upper_dev = info->upper_dev; 4270 if (!netif_is_bridge_master(upper_dev) && 4271 !netif_is_macvlan(upper_dev)) { 4272 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4273 return -EINVAL; 4274 } 4275 if (!info->linking) 4276 break; 4277 if (netif_is_bridge_master(upper_dev) && 4278 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 4279 mlxsw_sp_bridge_has_vxlan(upper_dev) && 4280 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 4281 return -EOPNOTSUPP; 4282 if (netdev_has_any_upper_dev(upper_dev) && 4283 (!netif_is_bridge_master(upper_dev) || 4284 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 4285 upper_dev))) { 4286 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 4287 return -EINVAL; 4288 } 4289 if (netif_is_macvlan(upper_dev) && 4290 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) { 4291 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4292 return -EOPNOTSUPP; 4293 } 4294 break; 4295 case NETDEV_CHANGEUPPER: 4296 upper_dev = info->upper_dev; 4297 if (netif_is_bridge_master(upper_dev)) { 4298 if (info->linking) 4299 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4300 vlan_dev, 4301 upper_dev, 4302 extack); 4303 else 4304 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4305 vlan_dev, 4306 upper_dev); 4307 } else if (netif_is_macvlan(upper_dev)) { 4308 if (!info->linking) 4309 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4310 } else { 4311 err = -EINVAL; 4312 WARN_ON(1); 4313 } 4314 break; 4315 } 4316 4317 return err; 4318 } 4319 4320 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 4321 struct net_device *lag_dev, 4322 unsigned long event, 4323 void *ptr, u16 vid) 4324 { 4325 struct net_device *dev; 4326 struct list_head *iter; 4327 int ret; 4328 4329 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4330 if (mlxsw_sp_port_dev_check(dev)) { 4331 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 4332 event, ptr, 4333 vid); 4334 if (ret) 4335 return ret; 4336 } 4337 } 4338 4339 return 0; 4340 } 4341 4342 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev, 4343 struct net_device *br_dev, 4344 unsigned long event, void *ptr, 4345 u16 vid) 4346 { 4347 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); 4348 struct netdev_notifier_changeupper_info *info = ptr; 4349 struct netlink_ext_ack *extack; 4350 struct net_device *upper_dev; 4351 4352 if (!mlxsw_sp) 4353 return 0; 4354 4355 extack = netdev_notifier_info_to_extack(&info->info); 4356 4357 switch (event) { 4358 case NETDEV_PRECHANGEUPPER: 4359 upper_dev = info->upper_dev; 4360 if (!netif_is_macvlan(upper_dev)) { 4361 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4362 return -EOPNOTSUPP; 4363 } 4364 if (!info->linking) 4365 break; 4366 if (netif_is_macvlan(upper_dev) && 4367 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) { 4368 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4369 return -EOPNOTSUPP; 4370 } 4371 break; 4372 case NETDEV_CHANGEUPPER: 4373 upper_dev = info->upper_dev; 4374 if (info->linking) 4375 break; 4376 if (netif_is_macvlan(upper_dev)) 4377 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4378 break; 4379 } 4380 4381 return 0; 4382 } 4383 4384 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 4385 unsigned long event, void *ptr) 4386 { 4387 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 4388 u16 vid = vlan_dev_vlan_id(vlan_dev); 4389 4390 if (mlxsw_sp_port_dev_check(real_dev)) 4391 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 4392 event, ptr, vid); 4393 else if (netif_is_lag_master(real_dev)) 4394 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 4395 real_dev, event, 4396 ptr, vid); 4397 else if (netif_is_bridge_master(real_dev)) 4398 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev, 4399 event, ptr, vid); 4400 4401 return 0; 4402 } 4403 4404 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, 4405 unsigned long event, void *ptr) 4406 { 4407 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev); 4408 struct netdev_notifier_changeupper_info *info = ptr; 4409 struct netlink_ext_ack *extack; 4410 struct net_device *upper_dev; 4411 u16 proto; 4412 4413 if (!mlxsw_sp) 4414 return 0; 4415 4416 extack = netdev_notifier_info_to_extack(&info->info); 4417 4418 switch (event) { 4419 case NETDEV_PRECHANGEUPPER: 4420 upper_dev = info->upper_dev; 4421 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) { 4422 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4423 return -EOPNOTSUPP; 4424 } 4425 if (!info->linking) 4426 break; 4427 if (br_vlan_enabled(br_dev)) { 4428 br_vlan_get_proto(br_dev, &proto); 4429 if (proto == ETH_P_8021AD) { 4430 NL_SET_ERR_MSG_MOD(extack, "Upper devices are not supported on top of an 802.1ad bridge"); 4431 return -EOPNOTSUPP; 4432 } 4433 } 4434 if (is_vlan_dev(upper_dev) && 4435 ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) { 4436 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol"); 4437 return -EOPNOTSUPP; 4438 } 4439 if (netif_is_macvlan(upper_dev) && 4440 !mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) { 4441 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4442 return -EOPNOTSUPP; 4443 } 4444 break; 4445 case NETDEV_CHANGEUPPER: 4446 upper_dev = info->upper_dev; 4447 if (info->linking) 4448 break; 4449 if (is_vlan_dev(upper_dev)) 4450 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev); 4451 if (netif_is_macvlan(upper_dev)) 4452 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4453 break; 4454 } 4455 4456 return 0; 4457 } 4458 4459 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, 4460 unsigned long event, void *ptr) 4461 { 4462 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); 4463 struct netdev_notifier_changeupper_info *info = ptr; 4464 struct netlink_ext_ack *extack; 4465 4466 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) 4467 return 0; 4468 4469 extack = netdev_notifier_info_to_extack(&info->info); 4470 4471 /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */ 4472 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4473 4474 return -EOPNOTSUPP; 4475 } 4476 4477 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) 4478 { 4479 struct netdev_notifier_changeupper_info *info = ptr; 4480 4481 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) 4482 return false; 4483 return netif_is_l3_master(info->upper_dev); 4484 } 4485 4486 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, 4487 struct net_device *dev, 4488 unsigned long event, void *ptr) 4489 { 4490 struct netdev_notifier_changeupper_info *cu_info; 4491 struct netdev_notifier_info *info = ptr; 4492 struct netlink_ext_ack *extack; 4493 struct net_device *upper_dev; 4494 4495 extack = netdev_notifier_info_to_extack(info); 4496 4497 switch (event) { 4498 case NETDEV_CHANGEUPPER: 4499 cu_info = container_of(info, 4500 struct netdev_notifier_changeupper_info, 4501 info); 4502 upper_dev = cu_info->upper_dev; 4503 if (!netif_is_bridge_master(upper_dev)) 4504 return 0; 4505 if (!mlxsw_sp_lower_get(upper_dev)) 4506 return 0; 4507 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 4508 return -EOPNOTSUPP; 4509 if (cu_info->linking) { 4510 if (!netif_running(dev)) 4511 return 0; 4512 /* When the bridge is VLAN-aware, the VNI of the VxLAN 4513 * device needs to be mapped to a VLAN, but at this 4514 * point no VLANs are configured on the VxLAN device 4515 */ 4516 if (br_vlan_enabled(upper_dev)) 4517 return 0; 4518 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, 4519 dev, 0, extack); 4520 } else { 4521 /* VLANs were already flushed, which triggered the 4522 * necessary cleanup 4523 */ 4524 if (br_vlan_enabled(upper_dev)) 4525 return 0; 4526 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 4527 } 4528 break; 4529 case NETDEV_PRE_UP: 4530 upper_dev = netdev_master_upper_dev_get(dev); 4531 if (!upper_dev) 4532 return 0; 4533 if (!netif_is_bridge_master(upper_dev)) 4534 return 0; 4535 if (!mlxsw_sp_lower_get(upper_dev)) 4536 return 0; 4537 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0, 4538 extack); 4539 case NETDEV_DOWN: 4540 upper_dev = netdev_master_upper_dev_get(dev); 4541 if (!upper_dev) 4542 return 0; 4543 if (!netif_is_bridge_master(upper_dev)) 4544 return 0; 4545 if (!mlxsw_sp_lower_get(upper_dev)) 4546 return 0; 4547 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 4548 break; 4549 } 4550 4551 return 0; 4552 } 4553 4554 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 4555 unsigned long event, void *ptr) 4556 { 4557 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 4558 struct mlxsw_sp_span_entry *span_entry; 4559 struct mlxsw_sp *mlxsw_sp; 4560 int err = 0; 4561 4562 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 4563 if (event == NETDEV_UNREGISTER) { 4564 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); 4565 if (span_entry) 4566 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); 4567 } 4568 mlxsw_sp_span_respin(mlxsw_sp); 4569 4570 if (netif_is_vxlan(dev)) 4571 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr); 4572 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) 4573 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, 4574 event, ptr); 4575 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev)) 4576 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev, 4577 event, ptr); 4578 else if (event == NETDEV_PRE_CHANGEADDR || 4579 event == NETDEV_CHANGEADDR || 4580 event == NETDEV_CHANGEMTU) 4581 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr); 4582 else if (mlxsw_sp_is_vrf_event(event, ptr)) 4583 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); 4584 else if (mlxsw_sp_port_dev_check(dev)) 4585 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 4586 else if (netif_is_lag_master(dev)) 4587 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 4588 else if (is_vlan_dev(dev)) 4589 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 4590 else if (netif_is_bridge_master(dev)) 4591 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); 4592 else if (netif_is_macvlan(dev)) 4593 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); 4594 4595 return notifier_from_errno(err); 4596 } 4597 4598 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = { 4599 .notifier_call = mlxsw_sp_inetaddr_valid_event, 4600 }; 4601 4602 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = { 4603 .notifier_call = mlxsw_sp_inet6addr_valid_event, 4604 }; 4605 4606 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { 4607 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 4608 {0, }, 4609 }; 4610 4611 static struct pci_driver mlxsw_sp1_pci_driver = { 4612 .name = mlxsw_sp1_driver_name, 4613 .id_table = mlxsw_sp1_pci_id_table, 4614 }; 4615 4616 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { 4617 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, 4618 {0, }, 4619 }; 4620 4621 static struct pci_driver mlxsw_sp2_pci_driver = { 4622 .name = mlxsw_sp2_driver_name, 4623 .id_table = mlxsw_sp2_pci_id_table, 4624 }; 4625 4626 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = { 4627 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0}, 4628 {0, }, 4629 }; 4630 4631 static struct pci_driver mlxsw_sp3_pci_driver = { 4632 .name = mlxsw_sp3_driver_name, 4633 .id_table = mlxsw_sp3_pci_id_table, 4634 }; 4635 4636 static int __init mlxsw_sp_module_init(void) 4637 { 4638 int err; 4639 4640 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 4641 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 4642 4643 err = mlxsw_core_driver_register(&mlxsw_sp1_driver); 4644 if (err) 4645 goto err_sp1_core_driver_register; 4646 4647 err = mlxsw_core_driver_register(&mlxsw_sp2_driver); 4648 if (err) 4649 goto err_sp2_core_driver_register; 4650 4651 err = mlxsw_core_driver_register(&mlxsw_sp3_driver); 4652 if (err) 4653 goto err_sp3_core_driver_register; 4654 4655 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); 4656 if (err) 4657 goto err_sp1_pci_driver_register; 4658 4659 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); 4660 if (err) 4661 goto err_sp2_pci_driver_register; 4662 4663 err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver); 4664 if (err) 4665 goto err_sp3_pci_driver_register; 4666 4667 return 0; 4668 4669 err_sp3_pci_driver_register: 4670 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 4671 err_sp2_pci_driver_register: 4672 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 4673 err_sp1_pci_driver_register: 4674 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 4675 err_sp3_core_driver_register: 4676 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 4677 err_sp2_core_driver_register: 4678 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 4679 err_sp1_core_driver_register: 4680 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 4681 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 4682 return err; 4683 } 4684 4685 static void __exit mlxsw_sp_module_exit(void) 4686 { 4687 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver); 4688 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 4689 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 4690 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 4691 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 4692 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 4693 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 4694 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 4695 } 4696 4697 module_init(mlxsw_sp_module_init); 4698 module_exit(mlxsw_sp_module_exit); 4699 4700 MODULE_LICENSE("Dual BSD/GPL"); 4701 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 4702 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 4703 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); 4704 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); 4705 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table); 4706 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); 4707 MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME); 4708 MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME); 4709