1 /* 2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c 3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> 5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> 6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <linux/kernel.h> 38 #include <linux/module.h> 39 #include <linux/types.h> 40 #include <linux/netdevice.h> 41 #include <linux/etherdevice.h> 42 #include <linux/ethtool.h> 43 #include <linux/slab.h> 44 #include <linux/device.h> 45 #include <linux/skbuff.h> 46 #include <linux/if_vlan.h> 47 #include <linux/if_bridge.h> 48 #include <linux/workqueue.h> 49 #include <linux/jiffies.h> 50 #include <linux/bitops.h> 51 #include <linux/list.h> 52 #include <net/devlink.h> 53 #include <net/switchdev.h> 54 #include <generated/utsrelease.h> 55 56 #include "spectrum.h" 57 #include "core.h" 58 #include "reg.h" 59 #include "port.h" 60 #include "trap.h" 61 #include "txheader.h" 62 63 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum"; 64 static const char mlxsw_sp_driver_version[] = "1.0"; 65 66 /* tx_hdr_version 67 * Tx header version. 68 * Must be set to 1. 69 */ 70 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 71 72 /* tx_hdr_ctl 73 * Packet control type. 74 * 0 - Ethernet control (e.g. EMADs, LACP) 75 * 1 - Ethernet data 76 */ 77 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 78 79 /* tx_hdr_proto 80 * Packet protocol type. Must be set to 1 (Ethernet). 81 */ 82 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 83 84 /* tx_hdr_rx_is_router 85 * Packet is sent from the router. Valid for data packets only. 86 */ 87 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 88 89 /* tx_hdr_fid_valid 90 * Indicates if the 'fid' field is valid and should be used for 91 * forwarding lookup. Valid for data packets only. 92 */ 93 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 94 95 /* tx_hdr_swid 96 * Switch partition ID. Must be set to 0. 97 */ 98 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 99 100 /* tx_hdr_control_tclass 101 * Indicates if the packet should use the control TClass and not one 102 * of the data TClasses. 103 */ 104 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 105 106 /* tx_hdr_etclass 107 * Egress TClass to be used on the egress device on the egress port. 108 */ 109 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 110 111 /* tx_hdr_port_mid 112 * Destination local port for unicast packets. 113 * Destination multicast ID for multicast packets. 114 * 115 * Control packets are directed to a specific egress port, while data 116 * packets are transmitted through the CPU port (0) into the switch partition, 117 * where forwarding rules are applied. 118 */ 119 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 120 121 /* tx_hdr_fid 122 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 123 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 124 * Valid for data packets only. 125 */ 126 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 127 128 /* tx_hdr_type 129 * 0 - Data packets 130 * 6 - Control packets 131 */ 132 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 133 134 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 135 const struct mlxsw_tx_info *tx_info) 136 { 137 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 138 139 memset(txhdr, 0, MLXSW_TXHDR_LEN); 140 141 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 142 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 143 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 144 mlxsw_tx_hdr_swid_set(txhdr, 0); 145 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 146 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 147 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 148 } 149 150 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 151 { 152 char spad_pl[MLXSW_REG_SPAD_LEN]; 153 int err; 154 155 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 156 if (err) 157 return err; 158 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 159 return 0; 160 } 161 162 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 163 bool is_up) 164 { 165 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 166 char paos_pl[MLXSW_REG_PAOS_LEN]; 167 168 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 169 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 170 MLXSW_PORT_ADMIN_STATUS_DOWN); 171 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 172 } 173 174 static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port, 175 bool *p_is_up) 176 { 177 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 178 char paos_pl[MLXSW_REG_PAOS_LEN]; 179 u8 oper_status; 180 int err; 181 182 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0); 183 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 184 if (err) 185 return err; 186 oper_status = mlxsw_reg_paos_oper_status_get(paos_pl); 187 *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false; 188 return 0; 189 } 190 191 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 192 unsigned char *addr) 193 { 194 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 195 char ppad_pl[MLXSW_REG_PPAD_LEN]; 196 197 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 198 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 199 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 200 } 201 202 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 203 { 204 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 205 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 206 207 ether_addr_copy(addr, mlxsw_sp->base_mac); 208 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 209 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 210 } 211 212 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, 213 u16 vid, enum mlxsw_reg_spms_state state) 214 { 215 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 216 char *spms_pl; 217 int err; 218 219 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 220 if (!spms_pl) 221 return -ENOMEM; 222 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 223 mlxsw_reg_spms_vid_pack(spms_pl, vid, state); 224 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 225 kfree(spms_pl); 226 return err; 227 } 228 229 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 230 { 231 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 232 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 233 int max_mtu; 234 int err; 235 236 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 237 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 238 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 239 if (err) 240 return err; 241 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 242 243 if (mtu > max_mtu) 244 return -EINVAL; 245 246 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 247 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 248 } 249 250 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 251 { 252 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 253 char pspa_pl[MLXSW_REG_PSPA_LEN]; 254 255 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 256 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 257 } 258 259 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 260 bool enable) 261 { 262 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 263 char svpe_pl[MLXSW_REG_SVPE_LEN]; 264 265 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 266 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 267 } 268 269 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port, 270 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid, 271 u16 vid) 272 { 273 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 274 char svfa_pl[MLXSW_REG_SVFA_LEN]; 275 276 mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid, 277 fid, vid); 278 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl); 279 } 280 281 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, 282 u16 vid, bool learn_enable) 283 { 284 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 285 char *spvmlr_pl; 286 int err; 287 288 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 289 if (!spvmlr_pl) 290 return -ENOMEM; 291 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 292 learn_enable); 293 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 294 kfree(spvmlr_pl); 295 return err; 296 } 297 298 static int 299 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 300 { 301 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 302 char sspr_pl[MLXSW_REG_SSPR_LEN]; 303 304 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 305 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 306 } 307 308 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, 309 u8 local_port, u8 *p_module, 310 u8 *p_width) 311 { 312 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 313 int err; 314 315 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 316 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 317 if (err) 318 return err; 319 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 320 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl); 321 return 0; 322 } 323 324 static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port, 325 u8 module, u8 width, u8 lane) 326 { 327 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 328 int i; 329 330 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 331 mlxsw_reg_pmlp_width_set(pmlp_pl, width); 332 for (i = 0; i < width; i++) { 333 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module); 334 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */ 335 } 336 337 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 338 } 339 340 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port) 341 { 342 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 343 344 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 345 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 346 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 347 } 348 349 static int mlxsw_sp_port_open(struct net_device *dev) 350 { 351 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 352 int err; 353 354 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 355 if (err) 356 return err; 357 netif_start_queue(dev); 358 return 0; 359 } 360 361 static int mlxsw_sp_port_stop(struct net_device *dev) 362 { 363 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 364 365 netif_stop_queue(dev); 366 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 367 } 368 369 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 370 struct net_device *dev) 371 { 372 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 373 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 374 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 375 const struct mlxsw_tx_info tx_info = { 376 .local_port = mlxsw_sp_port->local_port, 377 .is_emad = false, 378 }; 379 u64 len; 380 int err; 381 382 if (mlxsw_core_skb_transmit_busy(mlxsw_sp, &tx_info)) 383 return NETDEV_TX_BUSY; 384 385 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { 386 struct sk_buff *skb_orig = skb; 387 388 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); 389 if (!skb) { 390 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 391 dev_kfree_skb_any(skb_orig); 392 return NETDEV_TX_OK; 393 } 394 } 395 396 if (eth_skb_pad(skb)) { 397 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 398 return NETDEV_TX_OK; 399 } 400 401 mlxsw_sp_txhdr_construct(skb, &tx_info); 402 len = skb->len; 403 /* Due to a race we might fail here because of a full queue. In that 404 * unlikely case we simply drop the packet. 405 */ 406 err = mlxsw_core_skb_transmit(mlxsw_sp, skb, &tx_info); 407 408 if (!err) { 409 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 410 u64_stats_update_begin(&pcpu_stats->syncp); 411 pcpu_stats->tx_packets++; 412 pcpu_stats->tx_bytes += len; 413 u64_stats_update_end(&pcpu_stats->syncp); 414 } else { 415 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 416 dev_kfree_skb_any(skb); 417 } 418 return NETDEV_TX_OK; 419 } 420 421 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 422 { 423 } 424 425 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 426 { 427 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 428 struct sockaddr *addr = p; 429 int err; 430 431 if (!is_valid_ether_addr(addr->sa_data)) 432 return -EADDRNOTAVAIL; 433 434 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 435 if (err) 436 return err; 437 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 438 return 0; 439 } 440 441 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 442 { 443 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 444 int err; 445 446 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 447 if (err) 448 return err; 449 dev->mtu = mtu; 450 return 0; 451 } 452 453 static struct rtnl_link_stats64 * 454 mlxsw_sp_port_get_stats64(struct net_device *dev, 455 struct rtnl_link_stats64 *stats) 456 { 457 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 458 struct mlxsw_sp_port_pcpu_stats *p; 459 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 460 u32 tx_dropped = 0; 461 unsigned int start; 462 int i; 463 464 for_each_possible_cpu(i) { 465 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 466 do { 467 start = u64_stats_fetch_begin_irq(&p->syncp); 468 rx_packets = p->rx_packets; 469 rx_bytes = p->rx_bytes; 470 tx_packets = p->tx_packets; 471 tx_bytes = p->tx_bytes; 472 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 473 474 stats->rx_packets += rx_packets; 475 stats->rx_bytes += rx_bytes; 476 stats->tx_packets += tx_packets; 477 stats->tx_bytes += tx_bytes; 478 /* tx_dropped is u32, updated without syncp protection. */ 479 tx_dropped += p->tx_dropped; 480 } 481 stats->tx_dropped = tx_dropped; 482 return stats; 483 } 484 485 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 486 u16 vid_end, bool is_member, bool untagged) 487 { 488 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 489 char *spvm_pl; 490 int err; 491 492 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 493 if (!spvm_pl) 494 return -ENOMEM; 495 496 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 497 vid_end, is_member, untagged); 498 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 499 kfree(spvm_pl); 500 return err; 501 } 502 503 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port) 504 { 505 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; 506 u16 vid, last_visited_vid; 507 int err; 508 509 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { 510 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid, 511 vid); 512 if (err) { 513 last_visited_vid = vid; 514 goto err_port_vid_to_fid_set; 515 } 516 } 517 518 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 519 if (err) { 520 last_visited_vid = VLAN_N_VID; 521 goto err_port_vid_to_fid_set; 522 } 523 524 return 0; 525 526 err_port_vid_to_fid_set: 527 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid) 528 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid, 529 vid); 530 return err; 531 } 532 533 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port) 534 { 535 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; 536 u16 vid; 537 int err; 538 539 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 540 if (err) 541 return err; 542 543 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { 544 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, 545 vid, vid); 546 if (err) 547 return err; 548 } 549 550 return 0; 551 } 552 553 static struct mlxsw_sp_vfid * 554 mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, u16 vid) 555 { 556 struct mlxsw_sp_vfid *vfid; 557 558 list_for_each_entry(vfid, &mlxsw_sp->port_vfids.list, list) { 559 if (vfid->vid == vid) 560 return vfid; 561 } 562 563 return NULL; 564 } 565 566 static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp) 567 { 568 return find_first_zero_bit(mlxsw_sp->port_vfids.mapped, 569 MLXSW_SP_VFID_PORT_MAX); 570 } 571 572 static int __mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid) 573 { 574 u16 fid = mlxsw_sp_vfid_to_fid(vfid); 575 char sfmr_pl[MLXSW_REG_SFMR_LEN]; 576 577 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, 0); 578 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); 579 } 580 581 static void __mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid) 582 { 583 u16 fid = mlxsw_sp_vfid_to_fid(vfid); 584 char sfmr_pl[MLXSW_REG_SFMR_LEN]; 585 586 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, fid, 0); 587 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); 588 } 589 590 static struct mlxsw_sp_vfid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, 591 u16 vid) 592 { 593 struct device *dev = mlxsw_sp->bus_info->dev; 594 struct mlxsw_sp_vfid *vfid; 595 u16 n_vfid; 596 int err; 597 598 n_vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp); 599 if (n_vfid == MLXSW_SP_VFID_PORT_MAX) { 600 dev_err(dev, "No available vFIDs\n"); 601 return ERR_PTR(-ERANGE); 602 } 603 604 err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid); 605 if (err) { 606 dev_err(dev, "Failed to create vFID=%d\n", n_vfid); 607 return ERR_PTR(err); 608 } 609 610 vfid = kzalloc(sizeof(*vfid), GFP_KERNEL); 611 if (!vfid) 612 goto err_allocate_vfid; 613 614 vfid->vfid = n_vfid; 615 vfid->vid = vid; 616 617 list_add(&vfid->list, &mlxsw_sp->port_vfids.list); 618 set_bit(n_vfid, mlxsw_sp->port_vfids.mapped); 619 620 return vfid; 621 622 err_allocate_vfid: 623 __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid); 624 return ERR_PTR(-ENOMEM); 625 } 626 627 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, 628 struct mlxsw_sp_vfid *vfid) 629 { 630 clear_bit(vfid->vfid, mlxsw_sp->port_vfids.mapped); 631 list_del(&vfid->list); 632 633 __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid); 634 635 kfree(vfid); 636 } 637 638 static struct mlxsw_sp_port * 639 mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, 640 struct mlxsw_sp_vfid *vfid) 641 { 642 struct mlxsw_sp_port *mlxsw_sp_vport; 643 644 mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL); 645 if (!mlxsw_sp_vport) 646 return NULL; 647 648 /* dev will be set correctly after the VLAN device is linked 649 * with the real device. In case of bridge SELF invocation, dev 650 * will remain as is. 651 */ 652 mlxsw_sp_vport->dev = mlxsw_sp_port->dev; 653 mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 654 mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port; 655 mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING; 656 mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged; 657 mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id; 658 mlxsw_sp_vport->vport.vfid = vfid; 659 mlxsw_sp_vport->vport.vid = vfid->vid; 660 661 list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list); 662 663 return mlxsw_sp_vport; 664 } 665 666 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport) 667 { 668 list_del(&mlxsw_sp_vport->vport.list); 669 kfree(mlxsw_sp_vport); 670 } 671 672 int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, 673 u16 vid) 674 { 675 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 676 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 677 struct mlxsw_sp_port *mlxsw_sp_vport; 678 struct mlxsw_sp_vfid *vfid; 679 int err; 680 681 /* VLAN 0 is added to HW filter when device goes up, but it is 682 * reserved in our case, so simply return. 683 */ 684 if (!vid) 685 return 0; 686 687 if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) { 688 netdev_warn(dev, "VID=%d already configured\n", vid); 689 return 0; 690 } 691 692 vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid); 693 if (!vfid) { 694 vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid); 695 if (IS_ERR(vfid)) { 696 netdev_err(dev, "Failed to create vFID for VID=%d\n", 697 vid); 698 return PTR_ERR(vfid); 699 } 700 } 701 702 mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vfid); 703 if (!mlxsw_sp_vport) { 704 netdev_err(dev, "Failed to create vPort for VID=%d\n", vid); 705 err = -ENOMEM; 706 goto err_port_vport_create; 707 } 708 709 if (!vfid->nr_vports) { 710 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, 711 true, false); 712 if (err) { 713 netdev_err(dev, "Failed to setup flooding for vFID=%d\n", 714 vfid->vfid); 715 goto err_vport_flood_set; 716 } 717 } 718 719 /* When adding the first VLAN interface on a bridged port we need to 720 * transition all the active 802.1Q bridge VLANs to use explicit 721 * {Port, VID} to FID mappings and set the port's mode to Virtual mode. 722 */ 723 if (list_is_singular(&mlxsw_sp_port->vports_list)) { 724 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port); 725 if (err) { 726 netdev_err(dev, "Failed to set to Virtual mode\n"); 727 goto err_port_vp_mode_trans; 728 } 729 } 730 731 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, 732 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, 733 true, 734 mlxsw_sp_vfid_to_fid(vfid->vfid), 735 vid); 736 if (err) { 737 netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n", 738 vid, vfid->vfid); 739 goto err_port_vid_to_fid_set; 740 } 741 742 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); 743 if (err) { 744 netdev_err(dev, "Failed to disable learning for VID=%d\n", vid); 745 goto err_port_vid_learning_set; 746 } 747 748 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, false); 749 if (err) { 750 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", 751 vid); 752 goto err_port_add_vid; 753 } 754 755 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid, 756 MLXSW_REG_SPMS_STATE_FORWARDING); 757 if (err) { 758 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid); 759 goto err_port_stp_state_set; 760 } 761 762 vfid->nr_vports++; 763 764 return 0; 765 766 err_port_stp_state_set: 767 mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); 768 err_port_add_vid: 769 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); 770 err_port_vid_learning_set: 771 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, 772 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false, 773 mlxsw_sp_vfid_to_fid(vfid->vfid), vid); 774 err_port_vid_to_fid_set: 775 if (list_is_singular(&mlxsw_sp_port->vports_list)) 776 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); 777 err_port_vp_mode_trans: 778 if (!vfid->nr_vports) 779 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, 780 false); 781 err_vport_flood_set: 782 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); 783 err_port_vport_create: 784 if (!vfid->nr_vports) 785 mlxsw_sp_vfid_destroy(mlxsw_sp, vfid); 786 return err; 787 } 788 789 int mlxsw_sp_port_kill_vid(struct net_device *dev, 790 __be16 __always_unused proto, u16 vid) 791 { 792 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 793 struct mlxsw_sp_port *mlxsw_sp_vport; 794 struct mlxsw_sp_vfid *vfid; 795 int err; 796 797 /* VLAN 0 is removed from HW filter when device goes down, but 798 * it is reserved in our case, so simply return. 799 */ 800 if (!vid) 801 return 0; 802 803 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 804 if (!mlxsw_sp_vport) { 805 netdev_warn(dev, "VID=%d does not exist\n", vid); 806 return 0; 807 } 808 809 vfid = mlxsw_sp_vport->vport.vfid; 810 811 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid, 812 MLXSW_REG_SPMS_STATE_DISCARDING); 813 if (err) { 814 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid); 815 return err; 816 } 817 818 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); 819 if (err) { 820 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", 821 vid); 822 return err; 823 } 824 825 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); 826 if (err) { 827 netdev_err(dev, "Failed to enable learning for VID=%d\n", vid); 828 return err; 829 } 830 831 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, 832 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, 833 false, 834 mlxsw_sp_vfid_to_fid(vfid->vfid), 835 vid); 836 if (err) { 837 netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n", 838 vid, vfid->vfid); 839 return err; 840 } 841 842 /* When removing the last VLAN interface on a bridged port we need to 843 * transition all active 802.1Q bridge VLANs to use VID to FID 844 * mappings and set port's mode to VLAN mode. 845 */ 846 if (list_is_singular(&mlxsw_sp_port->vports_list)) { 847 err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); 848 if (err) { 849 netdev_err(dev, "Failed to set to VLAN mode\n"); 850 return err; 851 } 852 } 853 854 vfid->nr_vports--; 855 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); 856 857 /* Destroy the vFID if no vPorts are assigned to it anymore. */ 858 if (!vfid->nr_vports) 859 mlxsw_sp_vfid_destroy(mlxsw_sp_port->mlxsw_sp, vfid); 860 861 return 0; 862 } 863 864 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 865 .ndo_open = mlxsw_sp_port_open, 866 .ndo_stop = mlxsw_sp_port_stop, 867 .ndo_start_xmit = mlxsw_sp_port_xmit, 868 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 869 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 870 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 871 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 872 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 873 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 874 .ndo_fdb_add = switchdev_port_fdb_add, 875 .ndo_fdb_del = switchdev_port_fdb_del, 876 .ndo_fdb_dump = switchdev_port_fdb_dump, 877 .ndo_bridge_setlink = switchdev_port_bridge_setlink, 878 .ndo_bridge_getlink = switchdev_port_bridge_getlink, 879 .ndo_bridge_dellink = switchdev_port_bridge_dellink, 880 }; 881 882 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 883 struct ethtool_drvinfo *drvinfo) 884 { 885 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 886 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 887 888 strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver)); 889 strlcpy(drvinfo->version, mlxsw_sp_driver_version, 890 sizeof(drvinfo->version)); 891 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 892 "%d.%d.%d", 893 mlxsw_sp->bus_info->fw_rev.major, 894 mlxsw_sp->bus_info->fw_rev.minor, 895 mlxsw_sp->bus_info->fw_rev.subminor); 896 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 897 sizeof(drvinfo->bus_info)); 898 } 899 900 struct mlxsw_sp_port_hw_stats { 901 char str[ETH_GSTRING_LEN]; 902 u64 (*getter)(char *payload); 903 }; 904 905 static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 906 { 907 .str = "a_frames_transmitted_ok", 908 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 909 }, 910 { 911 .str = "a_frames_received_ok", 912 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 913 }, 914 { 915 .str = "a_frame_check_sequence_errors", 916 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 917 }, 918 { 919 .str = "a_alignment_errors", 920 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 921 }, 922 { 923 .str = "a_octets_transmitted_ok", 924 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 925 }, 926 { 927 .str = "a_octets_received_ok", 928 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 929 }, 930 { 931 .str = "a_multicast_frames_xmitted_ok", 932 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 933 }, 934 { 935 .str = "a_broadcast_frames_xmitted_ok", 936 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 937 }, 938 { 939 .str = "a_multicast_frames_received_ok", 940 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 941 }, 942 { 943 .str = "a_broadcast_frames_received_ok", 944 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 945 }, 946 { 947 .str = "a_in_range_length_errors", 948 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 949 }, 950 { 951 .str = "a_out_of_range_length_field", 952 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 953 }, 954 { 955 .str = "a_frame_too_long_errors", 956 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 957 }, 958 { 959 .str = "a_symbol_error_during_carrier", 960 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 961 }, 962 { 963 .str = "a_mac_control_frames_transmitted", 964 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 965 }, 966 { 967 .str = "a_mac_control_frames_received", 968 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 969 }, 970 { 971 .str = "a_unsupported_opcodes_received", 972 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 973 }, 974 { 975 .str = "a_pause_mac_ctrl_frames_received", 976 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 977 }, 978 { 979 .str = "a_pause_mac_ctrl_frames_xmitted", 980 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 981 }, 982 }; 983 984 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 985 986 static void mlxsw_sp_port_get_strings(struct net_device *dev, 987 u32 stringset, u8 *data) 988 { 989 u8 *p = data; 990 int i; 991 992 switch (stringset) { 993 case ETH_SS_STATS: 994 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 995 memcpy(p, mlxsw_sp_port_hw_stats[i].str, 996 ETH_GSTRING_LEN); 997 p += ETH_GSTRING_LEN; 998 } 999 break; 1000 } 1001 } 1002 1003 static int mlxsw_sp_port_set_phys_id(struct net_device *dev, 1004 enum ethtool_phys_id_state state) 1005 { 1006 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1007 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1008 char mlcr_pl[MLXSW_REG_MLCR_LEN]; 1009 bool active; 1010 1011 switch (state) { 1012 case ETHTOOL_ID_ACTIVE: 1013 active = true; 1014 break; 1015 case ETHTOOL_ID_INACTIVE: 1016 active = false; 1017 break; 1018 default: 1019 return -EOPNOTSUPP; 1020 } 1021 1022 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); 1023 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); 1024 } 1025 1026 static void mlxsw_sp_port_get_stats(struct net_device *dev, 1027 struct ethtool_stats *stats, u64 *data) 1028 { 1029 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1030 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1031 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1032 int i; 1033 int err; 1034 1035 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port); 1036 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 1037 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) 1038 data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0; 1039 } 1040 1041 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 1042 { 1043 switch (sset) { 1044 case ETH_SS_STATS: 1045 return MLXSW_SP_PORT_HW_STATS_LEN; 1046 default: 1047 return -EOPNOTSUPP; 1048 } 1049 } 1050 1051 struct mlxsw_sp_port_link_mode { 1052 u32 mask; 1053 u32 supported; 1054 u32 advertised; 1055 u32 speed; 1056 }; 1057 1058 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = { 1059 { 1060 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 1061 .supported = SUPPORTED_100baseT_Full, 1062 .advertised = ADVERTISED_100baseT_Full, 1063 .speed = 100, 1064 }, 1065 { 1066 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX, 1067 .speed = 100, 1068 }, 1069 { 1070 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 1071 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 1072 .supported = SUPPORTED_1000baseKX_Full, 1073 .advertised = ADVERTISED_1000baseKX_Full, 1074 .speed = 1000, 1075 }, 1076 { 1077 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 1078 .supported = SUPPORTED_10000baseT_Full, 1079 .advertised = ADVERTISED_10000baseT_Full, 1080 .speed = 10000, 1081 }, 1082 { 1083 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 1084 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 1085 .supported = SUPPORTED_10000baseKX4_Full, 1086 .advertised = ADVERTISED_10000baseKX4_Full, 1087 .speed = 10000, 1088 }, 1089 { 1090 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 1091 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 1092 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 1093 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 1094 .supported = SUPPORTED_10000baseKR_Full, 1095 .advertised = ADVERTISED_10000baseKR_Full, 1096 .speed = 10000, 1097 }, 1098 { 1099 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 1100 .supported = SUPPORTED_20000baseKR2_Full, 1101 .advertised = ADVERTISED_20000baseKR2_Full, 1102 .speed = 20000, 1103 }, 1104 { 1105 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 1106 .supported = SUPPORTED_40000baseCR4_Full, 1107 .advertised = ADVERTISED_40000baseCR4_Full, 1108 .speed = 40000, 1109 }, 1110 { 1111 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 1112 .supported = SUPPORTED_40000baseKR4_Full, 1113 .advertised = ADVERTISED_40000baseKR4_Full, 1114 .speed = 40000, 1115 }, 1116 { 1117 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 1118 .supported = SUPPORTED_40000baseSR4_Full, 1119 .advertised = ADVERTISED_40000baseSR4_Full, 1120 .speed = 40000, 1121 }, 1122 { 1123 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 1124 .supported = SUPPORTED_40000baseLR4_Full, 1125 .advertised = ADVERTISED_40000baseLR4_Full, 1126 .speed = 40000, 1127 }, 1128 { 1129 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR | 1130 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR | 1131 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 1132 .speed = 25000, 1133 }, 1134 { 1135 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 | 1136 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 | 1137 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 1138 .speed = 50000, 1139 }, 1140 { 1141 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 1142 .supported = SUPPORTED_56000baseKR4_Full, 1143 .advertised = ADVERTISED_56000baseKR4_Full, 1144 .speed = 56000, 1145 }, 1146 { 1147 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 | 1148 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 1149 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 1150 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 1151 .speed = 100000, 1152 }, 1153 }; 1154 1155 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode) 1156 1157 static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto) 1158 { 1159 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 1160 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 1161 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 1162 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 1163 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 1164 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 1165 return SUPPORTED_FIBRE; 1166 1167 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 1168 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 1169 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 1170 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 1171 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 1172 return SUPPORTED_Backplane; 1173 return 0; 1174 } 1175 1176 static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto) 1177 { 1178 u32 modes = 0; 1179 int i; 1180 1181 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1182 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) 1183 modes |= mlxsw_sp_port_link_mode[i].supported; 1184 } 1185 return modes; 1186 } 1187 1188 static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto) 1189 { 1190 u32 modes = 0; 1191 int i; 1192 1193 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1194 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) 1195 modes |= mlxsw_sp_port_link_mode[i].advertised; 1196 } 1197 return modes; 1198 } 1199 1200 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto, 1201 struct ethtool_cmd *cmd) 1202 { 1203 u32 speed = SPEED_UNKNOWN; 1204 u8 duplex = DUPLEX_UNKNOWN; 1205 int i; 1206 1207 if (!carrier_ok) 1208 goto out; 1209 1210 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1211 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) { 1212 speed = mlxsw_sp_port_link_mode[i].speed; 1213 duplex = DUPLEX_FULL; 1214 break; 1215 } 1216 } 1217 out: 1218 ethtool_cmd_speed_set(cmd, speed); 1219 cmd->duplex = duplex; 1220 } 1221 1222 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto) 1223 { 1224 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 1225 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 1226 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 1227 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 1228 return PORT_FIBRE; 1229 1230 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 1231 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 1232 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4)) 1233 return PORT_DA; 1234 1235 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 1236 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 1237 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 1238 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4)) 1239 return PORT_NONE; 1240 1241 return PORT_OTHER; 1242 } 1243 1244 static int mlxsw_sp_port_get_settings(struct net_device *dev, 1245 struct ethtool_cmd *cmd) 1246 { 1247 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1248 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1249 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1250 u32 eth_proto_cap; 1251 u32 eth_proto_admin; 1252 u32 eth_proto_oper; 1253 int err; 1254 1255 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0); 1256 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1257 if (err) { 1258 netdev_err(dev, "Failed to get proto"); 1259 return err; 1260 } 1261 mlxsw_reg_ptys_unpack(ptys_pl, ð_proto_cap, 1262 ð_proto_admin, ð_proto_oper); 1263 1264 cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) | 1265 mlxsw_sp_from_ptys_supported_link(eth_proto_cap) | 1266 SUPPORTED_Pause | SUPPORTED_Asym_Pause; 1267 cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin); 1268 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), 1269 eth_proto_oper, cmd); 1270 1271 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; 1272 cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper); 1273 cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper); 1274 1275 cmd->transceiver = XCVR_INTERNAL; 1276 return 0; 1277 } 1278 1279 static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising) 1280 { 1281 u32 ptys_proto = 0; 1282 int i; 1283 1284 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1285 if (advertising & mlxsw_sp_port_link_mode[i].advertised) 1286 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 1287 } 1288 return ptys_proto; 1289 } 1290 1291 static u32 mlxsw_sp_to_ptys_speed(u32 speed) 1292 { 1293 u32 ptys_proto = 0; 1294 int i; 1295 1296 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1297 if (speed == mlxsw_sp_port_link_mode[i].speed) 1298 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 1299 } 1300 return ptys_proto; 1301 } 1302 1303 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed) 1304 { 1305 u32 ptys_proto = 0; 1306 int i; 1307 1308 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1309 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed) 1310 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 1311 } 1312 return ptys_proto; 1313 } 1314 1315 static int mlxsw_sp_port_set_settings(struct net_device *dev, 1316 struct ethtool_cmd *cmd) 1317 { 1318 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1319 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1320 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1321 u32 speed; 1322 u32 eth_proto_new; 1323 u32 eth_proto_cap; 1324 u32 eth_proto_admin; 1325 bool is_up; 1326 int err; 1327 1328 speed = ethtool_cmd_speed(cmd); 1329 1330 eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ? 1331 mlxsw_sp_to_ptys_advert_link(cmd->advertising) : 1332 mlxsw_sp_to_ptys_speed(speed); 1333 1334 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0); 1335 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1336 if (err) { 1337 netdev_err(dev, "Failed to get proto"); 1338 return err; 1339 } 1340 mlxsw_reg_ptys_unpack(ptys_pl, ð_proto_cap, ð_proto_admin, NULL); 1341 1342 eth_proto_new = eth_proto_new & eth_proto_cap; 1343 if (!eth_proto_new) { 1344 netdev_err(dev, "Not supported proto admin requested"); 1345 return -EINVAL; 1346 } 1347 if (eth_proto_new == eth_proto_admin) 1348 return 0; 1349 1350 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new); 1351 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1352 if (err) { 1353 netdev_err(dev, "Failed to set proto admin"); 1354 return err; 1355 } 1356 1357 err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up); 1358 if (err) { 1359 netdev_err(dev, "Failed to get oper status"); 1360 return err; 1361 } 1362 if (!is_up) 1363 return 0; 1364 1365 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1366 if (err) { 1367 netdev_err(dev, "Failed to set admin status"); 1368 return err; 1369 } 1370 1371 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1372 if (err) { 1373 netdev_err(dev, "Failed to set admin status"); 1374 return err; 1375 } 1376 1377 return 0; 1378 } 1379 1380 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 1381 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 1382 .get_link = ethtool_op_get_link, 1383 .get_strings = mlxsw_sp_port_get_strings, 1384 .set_phys_id = mlxsw_sp_port_set_phys_id, 1385 .get_ethtool_stats = mlxsw_sp_port_get_stats, 1386 .get_sset_count = mlxsw_sp_port_get_sset_count, 1387 .get_settings = mlxsw_sp_port_get_settings, 1388 .set_settings = mlxsw_sp_port_set_settings, 1389 }; 1390 1391 static int 1392 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width) 1393 { 1394 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1395 u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width; 1396 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1397 u32 eth_proto_admin; 1398 1399 eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed); 1400 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 1401 eth_proto_admin); 1402 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1403 } 1404 1405 static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 1406 bool split, u8 module, u8 width) 1407 { 1408 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); 1409 struct mlxsw_sp_port *mlxsw_sp_port; 1410 struct devlink_port *devlink_port; 1411 struct net_device *dev; 1412 size_t bytes; 1413 int err; 1414 1415 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 1416 if (!dev) 1417 return -ENOMEM; 1418 mlxsw_sp_port = netdev_priv(dev); 1419 mlxsw_sp_port->dev = dev; 1420 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 1421 mlxsw_sp_port->local_port = local_port; 1422 mlxsw_sp_port->split = split; 1423 bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE); 1424 mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL); 1425 if (!mlxsw_sp_port->active_vlans) { 1426 err = -ENOMEM; 1427 goto err_port_active_vlans_alloc; 1428 } 1429 mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL); 1430 if (!mlxsw_sp_port->untagged_vlans) { 1431 err = -ENOMEM; 1432 goto err_port_untagged_vlans_alloc; 1433 } 1434 INIT_LIST_HEAD(&mlxsw_sp_port->vports_list); 1435 1436 mlxsw_sp_port->pcpu_stats = 1437 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 1438 if (!mlxsw_sp_port->pcpu_stats) { 1439 err = -ENOMEM; 1440 goto err_alloc_stats; 1441 } 1442 1443 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 1444 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 1445 1446 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 1447 if (err) { 1448 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 1449 mlxsw_sp_port->local_port); 1450 goto err_dev_addr_init; 1451 } 1452 1453 netif_carrier_off(dev); 1454 1455 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 1456 NETIF_F_HW_VLAN_CTAG_FILTER; 1457 1458 /* Each packet needs to have a Tx header (metadata) on top all other 1459 * headers. 1460 */ 1461 dev->hard_header_len += MLXSW_TXHDR_LEN; 1462 1463 devlink_port = &mlxsw_sp_port->devlink_port; 1464 if (mlxsw_sp_port->split) 1465 devlink_port_split_set(devlink_port, module); 1466 err = devlink_port_register(devlink, devlink_port, local_port); 1467 if (err) { 1468 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register devlink port\n", 1469 mlxsw_sp_port->local_port); 1470 goto err_devlink_port_register; 1471 } 1472 1473 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 1474 if (err) { 1475 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 1476 mlxsw_sp_port->local_port); 1477 goto err_port_system_port_mapping_set; 1478 } 1479 1480 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 1481 if (err) { 1482 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 1483 mlxsw_sp_port->local_port); 1484 goto err_port_swid_set; 1485 } 1486 1487 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width); 1488 if (err) { 1489 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 1490 mlxsw_sp_port->local_port); 1491 goto err_port_speed_by_width_set; 1492 } 1493 1494 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 1495 if (err) { 1496 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 1497 mlxsw_sp_port->local_port); 1498 goto err_port_mtu_set; 1499 } 1500 1501 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1502 if (err) 1503 goto err_port_admin_status_set; 1504 1505 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 1506 if (err) { 1507 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 1508 mlxsw_sp_port->local_port); 1509 goto err_port_buffers_init; 1510 } 1511 1512 mlxsw_sp_port_switchdev_init(mlxsw_sp_port); 1513 err = register_netdev(dev); 1514 if (err) { 1515 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 1516 mlxsw_sp_port->local_port); 1517 goto err_register_netdev; 1518 } 1519 1520 devlink_port_type_eth_set(devlink_port, dev); 1521 1522 err = mlxsw_sp_port_vlan_init(mlxsw_sp_port); 1523 if (err) 1524 goto err_port_vlan_init; 1525 1526 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 1527 return 0; 1528 1529 err_port_vlan_init: 1530 unregister_netdev(dev); 1531 err_register_netdev: 1532 err_port_buffers_init: 1533 err_port_admin_status_set: 1534 err_port_mtu_set: 1535 err_port_speed_by_width_set: 1536 err_port_swid_set: 1537 err_port_system_port_mapping_set: 1538 devlink_port_unregister(&mlxsw_sp_port->devlink_port); 1539 err_devlink_port_register: 1540 err_dev_addr_init: 1541 free_percpu(mlxsw_sp_port->pcpu_stats); 1542 err_alloc_stats: 1543 kfree(mlxsw_sp_port->untagged_vlans); 1544 err_port_untagged_vlans_alloc: 1545 kfree(mlxsw_sp_port->active_vlans); 1546 err_port_active_vlans_alloc: 1547 free_netdev(dev); 1548 return err; 1549 } 1550 1551 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 1552 bool split, u8 module, u8 width, u8 lane) 1553 { 1554 int err; 1555 1556 err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width, 1557 lane); 1558 if (err) 1559 return err; 1560 1561 err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split, module, 1562 width); 1563 if (err) 1564 goto err_port_create; 1565 1566 return 0; 1567 1568 err_port_create: 1569 mlxsw_sp_port_module_unmap(mlxsw_sp, local_port); 1570 return err; 1571 } 1572 1573 static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port) 1574 { 1575 struct net_device *dev = mlxsw_sp_port->dev; 1576 struct mlxsw_sp_port *mlxsw_sp_vport, *tmp; 1577 1578 list_for_each_entry_safe(mlxsw_sp_vport, tmp, 1579 &mlxsw_sp_port->vports_list, vport.list) { 1580 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); 1581 1582 /* vPorts created for VLAN devices should already be gone 1583 * by now, since we unregistered the port netdev. 1584 */ 1585 WARN_ON(is_vlan_dev(mlxsw_sp_vport->dev)); 1586 mlxsw_sp_port_kill_vid(dev, 0, vid); 1587 } 1588 } 1589 1590 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 1591 { 1592 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1593 struct devlink_port *devlink_port; 1594 1595 if (!mlxsw_sp_port) 1596 return; 1597 mlxsw_sp->ports[local_port] = NULL; 1598 devlink_port = &mlxsw_sp_port->devlink_port; 1599 devlink_port_type_clear(devlink_port); 1600 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 1601 devlink_port_unregister(devlink_port); 1602 mlxsw_sp_port_vports_fini(mlxsw_sp_port); 1603 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 1604 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 1605 mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port); 1606 free_percpu(mlxsw_sp_port->pcpu_stats); 1607 kfree(mlxsw_sp_port->untagged_vlans); 1608 kfree(mlxsw_sp_port->active_vlans); 1609 free_netdev(mlxsw_sp_port->dev); 1610 } 1611 1612 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 1613 { 1614 int i; 1615 1616 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) 1617 mlxsw_sp_port_remove(mlxsw_sp, i); 1618 kfree(mlxsw_sp->ports); 1619 } 1620 1621 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 1622 { 1623 size_t alloc_size; 1624 u8 module, width; 1625 int i; 1626 int err; 1627 1628 alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS; 1629 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 1630 if (!mlxsw_sp->ports) 1631 return -ENOMEM; 1632 1633 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) { 1634 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, 1635 &width); 1636 if (err) 1637 goto err_port_module_info_get; 1638 if (!width) 1639 continue; 1640 mlxsw_sp->port_to_module[i] = module; 1641 err = __mlxsw_sp_port_create(mlxsw_sp, i, false, module, width); 1642 if (err) 1643 goto err_port_create; 1644 } 1645 return 0; 1646 1647 err_port_create: 1648 err_port_module_info_get: 1649 for (i--; i >= 1; i--) 1650 mlxsw_sp_port_remove(mlxsw_sp, i); 1651 kfree(mlxsw_sp->ports); 1652 return err; 1653 } 1654 1655 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port) 1656 { 1657 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX; 1658 1659 return local_port - offset; 1660 } 1661 1662 static int mlxsw_sp_port_split(void *priv, u8 local_port, unsigned int count) 1663 { 1664 struct mlxsw_sp *mlxsw_sp = priv; 1665 struct mlxsw_sp_port *mlxsw_sp_port; 1666 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; 1667 u8 module, cur_width, base_port; 1668 int i; 1669 int err; 1670 1671 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1672 if (!mlxsw_sp_port) { 1673 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 1674 local_port); 1675 return -EINVAL; 1676 } 1677 1678 if (count != 2 && count != 4) { 1679 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); 1680 return -EINVAL; 1681 } 1682 1683 err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module, 1684 &cur_width); 1685 if (err) { 1686 netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n"); 1687 return err; 1688 } 1689 1690 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { 1691 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); 1692 return -EINVAL; 1693 } 1694 1695 /* Make sure we have enough slave (even) ports for the split. */ 1696 if (count == 2) { 1697 base_port = local_port; 1698 if (mlxsw_sp->ports[base_port + 1]) { 1699 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 1700 return -EINVAL; 1701 } 1702 } else { 1703 base_port = mlxsw_sp_cluster_base_port_get(local_port); 1704 if (mlxsw_sp->ports[base_port + 1] || 1705 mlxsw_sp->ports[base_port + 3]) { 1706 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 1707 return -EINVAL; 1708 } 1709 } 1710 1711 for (i = 0; i < count; i++) 1712 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 1713 1714 for (i = 0; i < count; i++) { 1715 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true, 1716 module, width, i * width); 1717 if (err) { 1718 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split port\n"); 1719 goto err_port_create; 1720 } 1721 } 1722 1723 return 0; 1724 1725 err_port_create: 1726 for (i--; i >= 0; i--) 1727 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 1728 for (i = 0; i < count / 2; i++) { 1729 module = mlxsw_sp->port_to_module[base_port + i * 2]; 1730 mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false, 1731 module, MLXSW_PORT_MODULE_MAX_WIDTH, 0); 1732 } 1733 return err; 1734 } 1735 1736 static int mlxsw_sp_port_unsplit(void *priv, u8 local_port) 1737 { 1738 struct mlxsw_sp *mlxsw_sp = priv; 1739 struct mlxsw_sp_port *mlxsw_sp_port; 1740 u8 module, cur_width, base_port; 1741 unsigned int count; 1742 int i; 1743 int err; 1744 1745 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1746 if (!mlxsw_sp_port) { 1747 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 1748 local_port); 1749 return -EINVAL; 1750 } 1751 1752 if (!mlxsw_sp_port->split) { 1753 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n"); 1754 return -EINVAL; 1755 } 1756 1757 err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module, 1758 &cur_width); 1759 if (err) { 1760 netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n"); 1761 return err; 1762 } 1763 count = cur_width == 1 ? 4 : 2; 1764 1765 base_port = mlxsw_sp_cluster_base_port_get(local_port); 1766 1767 /* Determine which ports to remove. */ 1768 if (count == 2 && local_port >= base_port + 2) 1769 base_port = base_port + 2; 1770 1771 for (i = 0; i < count; i++) 1772 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 1773 1774 for (i = 0; i < count / 2; i++) { 1775 module = mlxsw_sp->port_to_module[base_port + i * 2]; 1776 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false, 1777 module, MLXSW_PORT_MODULE_MAX_WIDTH, 1778 0); 1779 if (err) 1780 dev_err(mlxsw_sp->bus_info->dev, "Failed to reinstantiate port\n"); 1781 } 1782 1783 return 0; 1784 } 1785 1786 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 1787 char *pude_pl, void *priv) 1788 { 1789 struct mlxsw_sp *mlxsw_sp = priv; 1790 struct mlxsw_sp_port *mlxsw_sp_port; 1791 enum mlxsw_reg_pude_oper_status status; 1792 u8 local_port; 1793 1794 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 1795 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1796 if (!mlxsw_sp_port) { 1797 dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n", 1798 local_port); 1799 return; 1800 } 1801 1802 status = mlxsw_reg_pude_oper_status_get(pude_pl); 1803 if (status == MLXSW_PORT_OPER_STATUS_UP) { 1804 netdev_info(mlxsw_sp_port->dev, "link up\n"); 1805 netif_carrier_on(mlxsw_sp_port->dev); 1806 } else { 1807 netdev_info(mlxsw_sp_port->dev, "link down\n"); 1808 netif_carrier_off(mlxsw_sp_port->dev); 1809 } 1810 } 1811 1812 static struct mlxsw_event_listener mlxsw_sp_pude_event = { 1813 .func = mlxsw_sp_pude_event_func, 1814 .trap_id = MLXSW_TRAP_ID_PUDE, 1815 }; 1816 1817 static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp, 1818 enum mlxsw_event_trap_id trap_id) 1819 { 1820 struct mlxsw_event_listener *el; 1821 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 1822 int err; 1823 1824 switch (trap_id) { 1825 case MLXSW_TRAP_ID_PUDE: 1826 el = &mlxsw_sp_pude_event; 1827 break; 1828 } 1829 err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp); 1830 if (err) 1831 return err; 1832 1833 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id); 1834 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); 1835 if (err) 1836 goto err_event_trap_set; 1837 1838 return 0; 1839 1840 err_event_trap_set: 1841 mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp); 1842 return err; 1843 } 1844 1845 static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp, 1846 enum mlxsw_event_trap_id trap_id) 1847 { 1848 struct mlxsw_event_listener *el; 1849 1850 switch (trap_id) { 1851 case MLXSW_TRAP_ID_PUDE: 1852 el = &mlxsw_sp_pude_event; 1853 break; 1854 } 1855 mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp); 1856 } 1857 1858 static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port, 1859 void *priv) 1860 { 1861 struct mlxsw_sp *mlxsw_sp = priv; 1862 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1863 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 1864 1865 if (unlikely(!mlxsw_sp_port)) { 1866 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 1867 local_port); 1868 return; 1869 } 1870 1871 skb->dev = mlxsw_sp_port->dev; 1872 1873 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 1874 u64_stats_update_begin(&pcpu_stats->syncp); 1875 pcpu_stats->rx_packets++; 1876 pcpu_stats->rx_bytes += skb->len; 1877 u64_stats_update_end(&pcpu_stats->syncp); 1878 1879 skb->protocol = eth_type_trans(skb, skb->dev); 1880 netif_receive_skb(skb); 1881 } 1882 1883 static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = { 1884 { 1885 .func = mlxsw_sp_rx_listener_func, 1886 .local_port = MLXSW_PORT_DONT_CARE, 1887 .trap_id = MLXSW_TRAP_ID_FDB_MC, 1888 }, 1889 /* Traps for specific L2 packet types, not trapped as FDB MC */ 1890 { 1891 .func = mlxsw_sp_rx_listener_func, 1892 .local_port = MLXSW_PORT_DONT_CARE, 1893 .trap_id = MLXSW_TRAP_ID_STP, 1894 }, 1895 { 1896 .func = mlxsw_sp_rx_listener_func, 1897 .local_port = MLXSW_PORT_DONT_CARE, 1898 .trap_id = MLXSW_TRAP_ID_LACP, 1899 }, 1900 { 1901 .func = mlxsw_sp_rx_listener_func, 1902 .local_port = MLXSW_PORT_DONT_CARE, 1903 .trap_id = MLXSW_TRAP_ID_EAPOL, 1904 }, 1905 { 1906 .func = mlxsw_sp_rx_listener_func, 1907 .local_port = MLXSW_PORT_DONT_CARE, 1908 .trap_id = MLXSW_TRAP_ID_LLDP, 1909 }, 1910 { 1911 .func = mlxsw_sp_rx_listener_func, 1912 .local_port = MLXSW_PORT_DONT_CARE, 1913 .trap_id = MLXSW_TRAP_ID_MMRP, 1914 }, 1915 { 1916 .func = mlxsw_sp_rx_listener_func, 1917 .local_port = MLXSW_PORT_DONT_CARE, 1918 .trap_id = MLXSW_TRAP_ID_MVRP, 1919 }, 1920 { 1921 .func = mlxsw_sp_rx_listener_func, 1922 .local_port = MLXSW_PORT_DONT_CARE, 1923 .trap_id = MLXSW_TRAP_ID_RPVST, 1924 }, 1925 { 1926 .func = mlxsw_sp_rx_listener_func, 1927 .local_port = MLXSW_PORT_DONT_CARE, 1928 .trap_id = MLXSW_TRAP_ID_DHCP, 1929 }, 1930 { 1931 .func = mlxsw_sp_rx_listener_func, 1932 .local_port = MLXSW_PORT_DONT_CARE, 1933 .trap_id = MLXSW_TRAP_ID_IGMP_QUERY, 1934 }, 1935 { 1936 .func = mlxsw_sp_rx_listener_func, 1937 .local_port = MLXSW_PORT_DONT_CARE, 1938 .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT, 1939 }, 1940 { 1941 .func = mlxsw_sp_rx_listener_func, 1942 .local_port = MLXSW_PORT_DONT_CARE, 1943 .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT, 1944 }, 1945 { 1946 .func = mlxsw_sp_rx_listener_func, 1947 .local_port = MLXSW_PORT_DONT_CARE, 1948 .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE, 1949 }, 1950 { 1951 .func = mlxsw_sp_rx_listener_func, 1952 .local_port = MLXSW_PORT_DONT_CARE, 1953 .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT, 1954 }, 1955 }; 1956 1957 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 1958 { 1959 char htgt_pl[MLXSW_REG_HTGT_LEN]; 1960 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 1961 int i; 1962 int err; 1963 1964 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX); 1965 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl); 1966 if (err) 1967 return err; 1968 1969 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL); 1970 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl); 1971 if (err) 1972 return err; 1973 1974 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) { 1975 err = mlxsw_core_rx_listener_register(mlxsw_sp->core, 1976 &mlxsw_sp_rx_listener[i], 1977 mlxsw_sp); 1978 if (err) 1979 goto err_rx_listener_register; 1980 1981 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU, 1982 mlxsw_sp_rx_listener[i].trap_id); 1983 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); 1984 if (err) 1985 goto err_rx_trap_set; 1986 } 1987 return 0; 1988 1989 err_rx_trap_set: 1990 mlxsw_core_rx_listener_unregister(mlxsw_sp->core, 1991 &mlxsw_sp_rx_listener[i], 1992 mlxsw_sp); 1993 err_rx_listener_register: 1994 for (i--; i >= 0; i--) { 1995 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, 1996 mlxsw_sp_rx_listener[i].trap_id); 1997 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); 1998 1999 mlxsw_core_rx_listener_unregister(mlxsw_sp->core, 2000 &mlxsw_sp_rx_listener[i], 2001 mlxsw_sp); 2002 } 2003 return err; 2004 } 2005 2006 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 2007 { 2008 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 2009 int i; 2010 2011 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) { 2012 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, 2013 mlxsw_sp_rx_listener[i].trap_id); 2014 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); 2015 2016 mlxsw_core_rx_listener_unregister(mlxsw_sp->core, 2017 &mlxsw_sp_rx_listener[i], 2018 mlxsw_sp); 2019 } 2020 } 2021 2022 static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core, 2023 enum mlxsw_reg_sfgc_type type, 2024 enum mlxsw_reg_sfgc_bridge_type bridge_type) 2025 { 2026 enum mlxsw_flood_table_type table_type; 2027 enum mlxsw_sp_flood_table flood_table; 2028 char sfgc_pl[MLXSW_REG_SFGC_LEN]; 2029 2030 if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID) 2031 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID; 2032 else 2033 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; 2034 2035 if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST) 2036 flood_table = MLXSW_SP_FLOOD_TABLE_UC; 2037 else 2038 flood_table = MLXSW_SP_FLOOD_TABLE_BM; 2039 2040 mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type, 2041 flood_table); 2042 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl); 2043 } 2044 2045 static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp) 2046 { 2047 int type, err; 2048 2049 for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) { 2050 if (type == MLXSW_REG_SFGC_TYPE_RESERVED) 2051 continue; 2052 2053 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type, 2054 MLXSW_REG_SFGC_BRIDGE_TYPE_VFID); 2055 if (err) 2056 return err; 2057 2058 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type, 2059 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID); 2060 if (err) 2061 return err; 2062 } 2063 2064 return 0; 2065 } 2066 2067 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 2068 { 2069 char slcr_pl[MLXSW_REG_SLCR_LEN]; 2070 2071 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 2072 MLXSW_REG_SLCR_LAG_HASH_DMAC | 2073 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 2074 MLXSW_REG_SLCR_LAG_HASH_VLANID | 2075 MLXSW_REG_SLCR_LAG_HASH_SIP | 2076 MLXSW_REG_SLCR_LAG_HASH_DIP | 2077 MLXSW_REG_SLCR_LAG_HASH_SPORT | 2078 MLXSW_REG_SLCR_LAG_HASH_DPORT | 2079 MLXSW_REG_SLCR_LAG_HASH_IPPROTO); 2080 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 2081 } 2082 2083 static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core, 2084 const struct mlxsw_bus_info *mlxsw_bus_info) 2085 { 2086 struct mlxsw_sp *mlxsw_sp = priv; 2087 int err; 2088 2089 mlxsw_sp->core = mlxsw_core; 2090 mlxsw_sp->bus_info = mlxsw_bus_info; 2091 INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list); 2092 INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list); 2093 INIT_LIST_HEAD(&mlxsw_sp->br_mids.list); 2094 2095 err = mlxsw_sp_base_mac_get(mlxsw_sp); 2096 if (err) { 2097 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 2098 return err; 2099 } 2100 2101 err = mlxsw_sp_ports_create(mlxsw_sp); 2102 if (err) { 2103 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 2104 return err; 2105 } 2106 2107 err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE); 2108 if (err) { 2109 dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n"); 2110 goto err_event_register; 2111 } 2112 2113 err = mlxsw_sp_traps_init(mlxsw_sp); 2114 if (err) { 2115 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n"); 2116 goto err_rx_listener_register; 2117 } 2118 2119 err = mlxsw_sp_flood_init(mlxsw_sp); 2120 if (err) { 2121 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n"); 2122 goto err_flood_init; 2123 } 2124 2125 err = mlxsw_sp_buffers_init(mlxsw_sp); 2126 if (err) { 2127 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 2128 goto err_buffers_init; 2129 } 2130 2131 err = mlxsw_sp_lag_init(mlxsw_sp); 2132 if (err) { 2133 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 2134 goto err_lag_init; 2135 } 2136 2137 err = mlxsw_sp_switchdev_init(mlxsw_sp); 2138 if (err) { 2139 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 2140 goto err_switchdev_init; 2141 } 2142 2143 return 0; 2144 2145 err_switchdev_init: 2146 err_lag_init: 2147 err_buffers_init: 2148 err_flood_init: 2149 mlxsw_sp_traps_fini(mlxsw_sp); 2150 err_rx_listener_register: 2151 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); 2152 err_event_register: 2153 mlxsw_sp_ports_remove(mlxsw_sp); 2154 return err; 2155 } 2156 2157 static void mlxsw_sp_fini(void *priv) 2158 { 2159 struct mlxsw_sp *mlxsw_sp = priv; 2160 2161 mlxsw_sp_switchdev_fini(mlxsw_sp); 2162 mlxsw_sp_traps_fini(mlxsw_sp); 2163 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); 2164 mlxsw_sp_ports_remove(mlxsw_sp); 2165 } 2166 2167 static struct mlxsw_config_profile mlxsw_sp_config_profile = { 2168 .used_max_vepa_channels = 1, 2169 .max_vepa_channels = 0, 2170 .used_max_lag = 1, 2171 .max_lag = MLXSW_SP_LAG_MAX, 2172 .used_max_port_per_lag = 1, 2173 .max_port_per_lag = MLXSW_SP_PORT_PER_LAG_MAX, 2174 .used_max_mid = 1, 2175 .max_mid = MLXSW_SP_MID_MAX, 2176 .used_max_pgt = 1, 2177 .max_pgt = 0, 2178 .used_max_system_port = 1, 2179 .max_system_port = 64, 2180 .used_max_vlan_groups = 1, 2181 .max_vlan_groups = 127, 2182 .used_max_regions = 1, 2183 .max_regions = 400, 2184 .used_flood_tables = 1, 2185 .used_flood_mode = 1, 2186 .flood_mode = 3, 2187 .max_fid_offset_flood_tables = 2, 2188 .fid_offset_flood_table_size = VLAN_N_VID - 1, 2189 .max_fid_flood_tables = 2, 2190 .fid_flood_table_size = MLXSW_SP_VFID_MAX, 2191 .used_max_ib_mc = 1, 2192 .max_ib_mc = 0, 2193 .used_max_pkey = 1, 2194 .max_pkey = 0, 2195 .swid_config = { 2196 { 2197 .used_type = 1, 2198 .type = MLXSW_PORT_SWID_TYPE_ETH, 2199 } 2200 }, 2201 }; 2202 2203 static struct mlxsw_driver mlxsw_sp_driver = { 2204 .kind = MLXSW_DEVICE_KIND_SPECTRUM, 2205 .owner = THIS_MODULE, 2206 .priv_size = sizeof(struct mlxsw_sp), 2207 .init = mlxsw_sp_init, 2208 .fini = mlxsw_sp_fini, 2209 .port_split = mlxsw_sp_port_split, 2210 .port_unsplit = mlxsw_sp_port_unsplit, 2211 .txhdr_construct = mlxsw_sp_txhdr_construct, 2212 .txhdr_len = MLXSW_TXHDR_LEN, 2213 .profile = &mlxsw_sp_config_profile, 2214 }; 2215 2216 static int 2217 mlxsw_sp_port_fdb_flush_by_port(const struct mlxsw_sp_port *mlxsw_sp_port) 2218 { 2219 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2220 char sfdf_pl[MLXSW_REG_SFDF_LEN]; 2221 2222 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT); 2223 mlxsw_reg_sfdf_system_port_set(sfdf_pl, mlxsw_sp_port->local_port); 2224 2225 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); 2226 } 2227 2228 static int 2229 mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port, 2230 u16 fid) 2231 { 2232 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2233 char sfdf_pl[MLXSW_REG_SFDF_LEN]; 2234 2235 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID); 2236 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid); 2237 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, 2238 mlxsw_sp_port->local_port); 2239 2240 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); 2241 } 2242 2243 static int 2244 mlxsw_sp_port_fdb_flush_by_lag_id(const struct mlxsw_sp_port *mlxsw_sp_port) 2245 { 2246 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2247 char sfdf_pl[MLXSW_REG_SFDF_LEN]; 2248 2249 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG); 2250 mlxsw_reg_sfdf_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id); 2251 2252 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); 2253 } 2254 2255 static int 2256 mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port, 2257 u16 fid) 2258 { 2259 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2260 char sfdf_pl[MLXSW_REG_SFDF_LEN]; 2261 2262 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID); 2263 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid); 2264 mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id); 2265 2266 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); 2267 } 2268 2269 static int 2270 __mlxsw_sp_port_fdb_flush(const struct mlxsw_sp_port *mlxsw_sp_port) 2271 { 2272 int err, last_err = 0; 2273 u16 vid; 2274 2275 for (vid = 1; vid < VLAN_N_VID - 1; vid++) { 2276 err = mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, vid); 2277 if (err) 2278 last_err = err; 2279 } 2280 2281 return last_err; 2282 } 2283 2284 static int 2285 __mlxsw_sp_port_fdb_flush_lagged(const struct mlxsw_sp_port *mlxsw_sp_port) 2286 { 2287 int err, last_err = 0; 2288 u16 vid; 2289 2290 for (vid = 1; vid < VLAN_N_VID - 1; vid++) { 2291 err = mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, vid); 2292 if (err) 2293 last_err = err; 2294 } 2295 2296 return last_err; 2297 } 2298 2299 static int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port) 2300 { 2301 if (!list_empty(&mlxsw_sp_port->vports_list)) 2302 if (mlxsw_sp_port->lagged) 2303 return __mlxsw_sp_port_fdb_flush_lagged(mlxsw_sp_port); 2304 else 2305 return __mlxsw_sp_port_fdb_flush(mlxsw_sp_port); 2306 else 2307 if (mlxsw_sp_port->lagged) 2308 return mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port); 2309 else 2310 return mlxsw_sp_port_fdb_flush_by_port(mlxsw_sp_port); 2311 } 2312 2313 static int mlxsw_sp_vport_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_vport) 2314 { 2315 u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_vport); 2316 u16 fid = mlxsw_sp_vfid_to_fid(vfid); 2317 2318 if (mlxsw_sp_vport->lagged) 2319 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_vport, 2320 fid); 2321 else 2322 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_vport, fid); 2323 } 2324 2325 static bool mlxsw_sp_port_dev_check(const struct net_device *dev) 2326 { 2327 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 2328 } 2329 2330 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port) 2331 { 2332 struct net_device *dev = mlxsw_sp_port->dev; 2333 int err; 2334 2335 /* When port is not bridged untagged packets are tagged with 2336 * PVID=VID=1, thereby creating an implicit VLAN interface in 2337 * the device. Remove it and let bridge code take care of its 2338 * own VLANs. 2339 */ 2340 err = mlxsw_sp_port_kill_vid(dev, 0, 1); 2341 if (err) 2342 return err; 2343 2344 mlxsw_sp_port->learning = 1; 2345 mlxsw_sp_port->learning_sync = 1; 2346 mlxsw_sp_port->uc_flood = 1; 2347 mlxsw_sp_port->bridged = 1; 2348 2349 return 0; 2350 } 2351 2352 static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port, 2353 bool flush_fdb) 2354 { 2355 struct net_device *dev = mlxsw_sp_port->dev; 2356 2357 if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port)) 2358 netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n"); 2359 2360 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1); 2361 2362 mlxsw_sp_port->learning = 0; 2363 mlxsw_sp_port->learning_sync = 0; 2364 mlxsw_sp_port->uc_flood = 0; 2365 mlxsw_sp_port->bridged = 0; 2366 2367 /* Add implicit VLAN interface in the device, so that untagged 2368 * packets will be classified to the default vFID. 2369 */ 2370 return mlxsw_sp_port_add_vid(dev, 0, 1); 2371 } 2372 2373 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp, 2374 struct net_device *br_dev) 2375 { 2376 return !mlxsw_sp->master_bridge.dev || 2377 mlxsw_sp->master_bridge.dev == br_dev; 2378 } 2379 2380 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp, 2381 struct net_device *br_dev) 2382 { 2383 mlxsw_sp->master_bridge.dev = br_dev; 2384 mlxsw_sp->master_bridge.ref_count++; 2385 } 2386 2387 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp, 2388 struct net_device *br_dev) 2389 { 2390 if (--mlxsw_sp->master_bridge.ref_count == 0) 2391 mlxsw_sp->master_bridge.dev = NULL; 2392 } 2393 2394 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 2395 { 2396 char sldr_pl[MLXSW_REG_SLDR_LEN]; 2397 2398 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 2399 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 2400 } 2401 2402 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 2403 { 2404 char sldr_pl[MLXSW_REG_SLDR_LEN]; 2405 2406 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 2407 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 2408 } 2409 2410 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 2411 u16 lag_id, u8 port_index) 2412 { 2413 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2414 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 2415 2416 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 2417 lag_id, port_index); 2418 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 2419 } 2420 2421 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 2422 u16 lag_id) 2423 { 2424 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2425 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 2426 2427 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 2428 lag_id); 2429 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 2430 } 2431 2432 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 2433 u16 lag_id) 2434 { 2435 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2436 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 2437 2438 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 2439 lag_id); 2440 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 2441 } 2442 2443 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 2444 u16 lag_id) 2445 { 2446 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2447 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 2448 2449 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 2450 lag_id); 2451 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 2452 } 2453 2454 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 2455 struct net_device *lag_dev, 2456 u16 *p_lag_id) 2457 { 2458 struct mlxsw_sp_upper *lag; 2459 int free_lag_id = -1; 2460 int i; 2461 2462 for (i = 0; i < MLXSW_SP_LAG_MAX; i++) { 2463 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 2464 if (lag->ref_count) { 2465 if (lag->dev == lag_dev) { 2466 *p_lag_id = i; 2467 return 0; 2468 } 2469 } else if (free_lag_id < 0) { 2470 free_lag_id = i; 2471 } 2472 } 2473 if (free_lag_id < 0) 2474 return -EBUSY; 2475 *p_lag_id = free_lag_id; 2476 return 0; 2477 } 2478 2479 static bool 2480 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 2481 struct net_device *lag_dev, 2482 struct netdev_lag_upper_info *lag_upper_info) 2483 { 2484 u16 lag_id; 2485 2486 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) 2487 return false; 2488 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) 2489 return false; 2490 return true; 2491 } 2492 2493 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 2494 u16 lag_id, u8 *p_port_index) 2495 { 2496 int i; 2497 2498 for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) { 2499 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 2500 *p_port_index = i; 2501 return 0; 2502 } 2503 } 2504 return -EBUSY; 2505 } 2506 2507 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 2508 struct net_device *lag_dev) 2509 { 2510 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2511 struct mlxsw_sp_upper *lag; 2512 u16 lag_id; 2513 u8 port_index; 2514 int err; 2515 2516 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 2517 if (err) 2518 return err; 2519 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 2520 if (!lag->ref_count) { 2521 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 2522 if (err) 2523 return err; 2524 lag->dev = lag_dev; 2525 } 2526 2527 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 2528 if (err) 2529 return err; 2530 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 2531 if (err) 2532 goto err_col_port_add; 2533 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id); 2534 if (err) 2535 goto err_col_port_enable; 2536 2537 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 2538 mlxsw_sp_port->local_port); 2539 mlxsw_sp_port->lag_id = lag_id; 2540 mlxsw_sp_port->lagged = 1; 2541 lag->ref_count++; 2542 return 0; 2543 2544 err_col_port_add: 2545 if (!lag->ref_count) 2546 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 2547 err_col_port_enable: 2548 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 2549 return err; 2550 } 2551 2552 static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, 2553 struct net_device *br_dev, 2554 bool flush_fdb); 2555 2556 static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 2557 struct net_device *lag_dev) 2558 { 2559 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2560 struct mlxsw_sp_port *mlxsw_sp_vport; 2561 struct mlxsw_sp_upper *lag; 2562 u16 lag_id = mlxsw_sp_port->lag_id; 2563 int err; 2564 2565 if (!mlxsw_sp_port->lagged) 2566 return 0; 2567 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 2568 WARN_ON(lag->ref_count == 0); 2569 2570 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id); 2571 if (err) 2572 return err; 2573 err = mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 2574 if (err) 2575 return err; 2576 2577 /* In case we leave a LAG device that has bridges built on top, 2578 * then their teardown sequence is never issued and we need to 2579 * invoke the necessary cleanup routines ourselves. 2580 */ 2581 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, 2582 vport.list) { 2583 struct net_device *br_dev; 2584 2585 if (!mlxsw_sp_vport->bridged) 2586 continue; 2587 2588 br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport); 2589 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, false); 2590 } 2591 2592 if (mlxsw_sp_port->bridged) { 2593 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port); 2594 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false); 2595 mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL); 2596 } 2597 2598 if (lag->ref_count == 1) { 2599 if (mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port)) 2600 netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n"); 2601 err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 2602 if (err) 2603 return err; 2604 } 2605 2606 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 2607 mlxsw_sp_port->local_port); 2608 mlxsw_sp_port->lagged = 0; 2609 lag->ref_count--; 2610 return 0; 2611 } 2612 2613 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 2614 u16 lag_id) 2615 { 2616 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2617 char sldr_pl[MLXSW_REG_SLDR_LEN]; 2618 2619 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 2620 mlxsw_sp_port->local_port); 2621 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 2622 } 2623 2624 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 2625 u16 lag_id) 2626 { 2627 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2628 char sldr_pl[MLXSW_REG_SLDR_LEN]; 2629 2630 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 2631 mlxsw_sp_port->local_port); 2632 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 2633 } 2634 2635 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port, 2636 bool lag_tx_enabled) 2637 { 2638 if (lag_tx_enabled) 2639 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, 2640 mlxsw_sp_port->lag_id); 2641 else 2642 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 2643 mlxsw_sp_port->lag_id); 2644 } 2645 2646 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 2647 struct netdev_lag_lower_state_info *info) 2648 { 2649 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled); 2650 } 2651 2652 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port, 2653 struct net_device *vlan_dev) 2654 { 2655 struct mlxsw_sp_port *mlxsw_sp_vport; 2656 u16 vid = vlan_dev_vlan_id(vlan_dev); 2657 2658 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 2659 if (!mlxsw_sp_vport) { 2660 WARN_ON(!mlxsw_sp_vport); 2661 return -EINVAL; 2662 } 2663 2664 mlxsw_sp_vport->dev = vlan_dev; 2665 2666 return 0; 2667 } 2668 2669 static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port, 2670 struct net_device *vlan_dev) 2671 { 2672 struct mlxsw_sp_port *mlxsw_sp_vport; 2673 u16 vid = vlan_dev_vlan_id(vlan_dev); 2674 2675 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 2676 if (!mlxsw_sp_vport) { 2677 WARN_ON(!mlxsw_sp_vport); 2678 return -EINVAL; 2679 } 2680 2681 /* When removing a VLAN device while still bridged we should first 2682 * remove it from the bridge, as we receive the bridge's notification 2683 * when the vPort is already gone. 2684 */ 2685 if (mlxsw_sp_vport->bridged) { 2686 struct net_device *br_dev; 2687 2688 br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport); 2689 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, true); 2690 } 2691 2692 mlxsw_sp_vport->dev = mlxsw_sp_port->dev; 2693 2694 return 0; 2695 } 2696 2697 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev, 2698 unsigned long event, void *ptr) 2699 { 2700 struct netdev_notifier_changeupper_info *info; 2701 struct mlxsw_sp_port *mlxsw_sp_port; 2702 struct net_device *upper_dev; 2703 struct mlxsw_sp *mlxsw_sp; 2704 int err; 2705 2706 mlxsw_sp_port = netdev_priv(dev); 2707 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2708 info = ptr; 2709 2710 switch (event) { 2711 case NETDEV_PRECHANGEUPPER: 2712 upper_dev = info->upper_dev; 2713 if (!info->master || !info->linking) 2714 break; 2715 /* HW limitation forbids to put ports to multiple bridges. */ 2716 if (netif_is_bridge_master(upper_dev) && 2717 !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev)) 2718 return NOTIFY_BAD; 2719 if (netif_is_lag_master(upper_dev) && 2720 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 2721 info->upper_info)) 2722 return NOTIFY_BAD; 2723 break; 2724 case NETDEV_CHANGEUPPER: 2725 upper_dev = info->upper_dev; 2726 if (is_vlan_dev(upper_dev)) { 2727 if (info->linking) { 2728 err = mlxsw_sp_port_vlan_link(mlxsw_sp_port, 2729 upper_dev); 2730 if (err) { 2731 netdev_err(dev, "Failed to link VLAN device\n"); 2732 return NOTIFY_BAD; 2733 } 2734 } else { 2735 err = mlxsw_sp_port_vlan_unlink(mlxsw_sp_port, 2736 upper_dev); 2737 if (err) { 2738 netdev_err(dev, "Failed to unlink VLAN device\n"); 2739 return NOTIFY_BAD; 2740 } 2741 } 2742 } else if (netif_is_bridge_master(upper_dev)) { 2743 if (info->linking) { 2744 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port); 2745 if (err) { 2746 netdev_err(dev, "Failed to join bridge\n"); 2747 return NOTIFY_BAD; 2748 } 2749 mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev); 2750 } else { 2751 err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 2752 true); 2753 mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev); 2754 if (err) { 2755 netdev_err(dev, "Failed to leave bridge\n"); 2756 return NOTIFY_BAD; 2757 } 2758 } 2759 } else if (netif_is_lag_master(upper_dev)) { 2760 if (info->linking) { 2761 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 2762 upper_dev); 2763 if (err) { 2764 netdev_err(dev, "Failed to join link aggregation\n"); 2765 return NOTIFY_BAD; 2766 } 2767 } else { 2768 err = mlxsw_sp_port_lag_leave(mlxsw_sp_port, 2769 upper_dev); 2770 if (err) { 2771 netdev_err(dev, "Failed to leave link aggregation\n"); 2772 return NOTIFY_BAD; 2773 } 2774 } 2775 } 2776 break; 2777 } 2778 2779 return NOTIFY_DONE; 2780 } 2781 2782 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 2783 unsigned long event, void *ptr) 2784 { 2785 struct netdev_notifier_changelowerstate_info *info; 2786 struct mlxsw_sp_port *mlxsw_sp_port; 2787 int err; 2788 2789 mlxsw_sp_port = netdev_priv(dev); 2790 info = ptr; 2791 2792 switch (event) { 2793 case NETDEV_CHANGELOWERSTATE: 2794 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 2795 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 2796 info->lower_state_info); 2797 if (err) 2798 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 2799 } 2800 break; 2801 } 2802 2803 return NOTIFY_DONE; 2804 } 2805 2806 static int mlxsw_sp_netdevice_port_event(struct net_device *dev, 2807 unsigned long event, void *ptr) 2808 { 2809 switch (event) { 2810 case NETDEV_PRECHANGEUPPER: 2811 case NETDEV_CHANGEUPPER: 2812 return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr); 2813 case NETDEV_CHANGELOWERSTATE: 2814 return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr); 2815 } 2816 2817 return NOTIFY_DONE; 2818 } 2819 2820 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 2821 unsigned long event, void *ptr) 2822 { 2823 struct net_device *dev; 2824 struct list_head *iter; 2825 int ret; 2826 2827 netdev_for_each_lower_dev(lag_dev, dev, iter) { 2828 if (mlxsw_sp_port_dev_check(dev)) { 2829 ret = mlxsw_sp_netdevice_port_event(dev, event, ptr); 2830 if (ret == NOTIFY_BAD) 2831 return ret; 2832 } 2833 } 2834 2835 return NOTIFY_DONE; 2836 } 2837 2838 static struct mlxsw_sp_vfid * 2839 mlxsw_sp_br_vfid_find(const struct mlxsw_sp *mlxsw_sp, 2840 const struct net_device *br_dev) 2841 { 2842 struct mlxsw_sp_vfid *vfid; 2843 2844 list_for_each_entry(vfid, &mlxsw_sp->br_vfids.list, list) { 2845 if (vfid->br_dev == br_dev) 2846 return vfid; 2847 } 2848 2849 return NULL; 2850 } 2851 2852 static u16 mlxsw_sp_vfid_to_br_vfid(u16 vfid) 2853 { 2854 return vfid - MLXSW_SP_VFID_PORT_MAX; 2855 } 2856 2857 static u16 mlxsw_sp_br_vfid_to_vfid(u16 br_vfid) 2858 { 2859 return MLXSW_SP_VFID_PORT_MAX + br_vfid; 2860 } 2861 2862 static u16 mlxsw_sp_avail_br_vfid_get(const struct mlxsw_sp *mlxsw_sp) 2863 { 2864 return find_first_zero_bit(mlxsw_sp->br_vfids.mapped, 2865 MLXSW_SP_VFID_BR_MAX); 2866 } 2867 2868 static struct mlxsw_sp_vfid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp, 2869 struct net_device *br_dev) 2870 { 2871 struct device *dev = mlxsw_sp->bus_info->dev; 2872 struct mlxsw_sp_vfid *vfid; 2873 u16 n_vfid; 2874 int err; 2875 2876 n_vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp)); 2877 if (n_vfid == MLXSW_SP_VFID_MAX) { 2878 dev_err(dev, "No available vFIDs\n"); 2879 return ERR_PTR(-ERANGE); 2880 } 2881 2882 err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid); 2883 if (err) { 2884 dev_err(dev, "Failed to create vFID=%d\n", n_vfid); 2885 return ERR_PTR(err); 2886 } 2887 2888 vfid = kzalloc(sizeof(*vfid), GFP_KERNEL); 2889 if (!vfid) 2890 goto err_allocate_vfid; 2891 2892 vfid->vfid = n_vfid; 2893 vfid->br_dev = br_dev; 2894 2895 list_add(&vfid->list, &mlxsw_sp->br_vfids.list); 2896 set_bit(mlxsw_sp_vfid_to_br_vfid(n_vfid), mlxsw_sp->br_vfids.mapped); 2897 2898 return vfid; 2899 2900 err_allocate_vfid: 2901 __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid); 2902 return ERR_PTR(-ENOMEM); 2903 } 2904 2905 static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp, 2906 struct mlxsw_sp_vfid *vfid) 2907 { 2908 u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid->vfid); 2909 2910 clear_bit(br_vfid, mlxsw_sp->br_vfids.mapped); 2911 list_del(&vfid->list); 2912 2913 __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid); 2914 2915 kfree(vfid); 2916 } 2917 2918 static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, 2919 struct net_device *br_dev, 2920 bool flush_fdb) 2921 { 2922 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; 2923 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); 2924 struct net_device *dev = mlxsw_sp_vport->dev; 2925 struct mlxsw_sp_vfid *vfid, *new_vfid; 2926 int err; 2927 2928 vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev); 2929 if (!vfid) { 2930 WARN_ON(!vfid); 2931 return -EINVAL; 2932 } 2933 2934 /* We need a vFID to go back to after leaving the bridge's vFID. */ 2935 new_vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid); 2936 if (!new_vfid) { 2937 new_vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid); 2938 if (IS_ERR(new_vfid)) { 2939 netdev_err(dev, "Failed to create vFID for VID=%d\n", 2940 vid); 2941 return PTR_ERR(new_vfid); 2942 } 2943 } 2944 2945 /* Invalidate existing {Port, VID} to vFID mapping and create a new 2946 * one for the new vFID. 2947 */ 2948 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, 2949 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, 2950 false, 2951 mlxsw_sp_vfid_to_fid(vfid->vfid), 2952 vid); 2953 if (err) { 2954 netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n", 2955 vfid->vfid); 2956 goto err_port_vid_to_fid_invalidate; 2957 } 2958 2959 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, 2960 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, 2961 true, 2962 mlxsw_sp_vfid_to_fid(new_vfid->vfid), 2963 vid); 2964 if (err) { 2965 netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n", 2966 new_vfid->vfid); 2967 goto err_port_vid_to_fid_validate; 2968 } 2969 2970 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); 2971 if (err) { 2972 netdev_err(dev, "Failed to disable learning\n"); 2973 goto err_port_vid_learning_set; 2974 } 2975 2976 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, 2977 false); 2978 if (err) { 2979 netdev_err(dev, "Failed clear to clear flooding\n"); 2980 goto err_vport_flood_set; 2981 } 2982 2983 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid, 2984 MLXSW_REG_SPMS_STATE_FORWARDING); 2985 if (err) { 2986 netdev_err(dev, "Failed to set STP state\n"); 2987 goto err_port_stp_state_set; 2988 } 2989 2990 if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport)) 2991 netdev_err(dev, "Failed to flush FDB\n"); 2992 2993 /* Switch between the vFIDs and destroy the old one if needed. */ 2994 new_vfid->nr_vports++; 2995 mlxsw_sp_vport->vport.vfid = new_vfid; 2996 vfid->nr_vports--; 2997 if (!vfid->nr_vports) 2998 mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid); 2999 3000 mlxsw_sp_vport->learning = 0; 3001 mlxsw_sp_vport->learning_sync = 0; 3002 mlxsw_sp_vport->uc_flood = 0; 3003 mlxsw_sp_vport->bridged = 0; 3004 3005 return 0; 3006 3007 err_port_stp_state_set: 3008 err_vport_flood_set: 3009 err_port_vid_learning_set: 3010 err_port_vid_to_fid_validate: 3011 err_port_vid_to_fid_invalidate: 3012 /* Rollback vFID only if new. */ 3013 if (!new_vfid->nr_vports) 3014 mlxsw_sp_vfid_destroy(mlxsw_sp, new_vfid); 3015 return err; 3016 } 3017 3018 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport, 3019 struct net_device *br_dev) 3020 { 3021 struct mlxsw_sp_vfid *old_vfid = mlxsw_sp_vport->vport.vfid; 3022 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; 3023 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); 3024 struct net_device *dev = mlxsw_sp_vport->dev; 3025 struct mlxsw_sp_vfid *vfid; 3026 int err; 3027 3028 vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev); 3029 if (!vfid) { 3030 vfid = mlxsw_sp_br_vfid_create(mlxsw_sp, br_dev); 3031 if (IS_ERR(vfid)) { 3032 netdev_err(dev, "Failed to create bridge vFID\n"); 3033 return PTR_ERR(vfid); 3034 } 3035 } 3036 3037 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, true, false); 3038 if (err) { 3039 netdev_err(dev, "Failed to setup flooding for vFID=%d\n", 3040 vfid->vfid); 3041 goto err_port_flood_set; 3042 } 3043 3044 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); 3045 if (err) { 3046 netdev_err(dev, "Failed to enable learning\n"); 3047 goto err_port_vid_learning_set; 3048 } 3049 3050 /* We need to invalidate existing {Port, VID} to vFID mapping and 3051 * create a new one for the bridge's vFID. 3052 */ 3053 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, 3054 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, 3055 false, 3056 mlxsw_sp_vfid_to_fid(old_vfid->vfid), 3057 vid); 3058 if (err) { 3059 netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n", 3060 old_vfid->vfid); 3061 goto err_port_vid_to_fid_invalidate; 3062 } 3063 3064 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, 3065 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, 3066 true, 3067 mlxsw_sp_vfid_to_fid(vfid->vfid), 3068 vid); 3069 if (err) { 3070 netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n", 3071 vfid->vfid); 3072 goto err_port_vid_to_fid_validate; 3073 } 3074 3075 /* Switch between the vFIDs and destroy the old one if needed. */ 3076 vfid->nr_vports++; 3077 mlxsw_sp_vport->vport.vfid = vfid; 3078 old_vfid->nr_vports--; 3079 if (!old_vfid->nr_vports) 3080 mlxsw_sp_vfid_destroy(mlxsw_sp, old_vfid); 3081 3082 mlxsw_sp_vport->learning = 1; 3083 mlxsw_sp_vport->learning_sync = 1; 3084 mlxsw_sp_vport->uc_flood = 1; 3085 mlxsw_sp_vport->bridged = 1; 3086 3087 return 0; 3088 3089 err_port_vid_to_fid_validate: 3090 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, 3091 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false, 3092 mlxsw_sp_vfid_to_fid(old_vfid->vfid), vid); 3093 err_port_vid_to_fid_invalidate: 3094 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); 3095 err_port_vid_learning_set: 3096 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, false); 3097 err_port_flood_set: 3098 if (!vfid->nr_vports) 3099 mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid); 3100 return err; 3101 } 3102 3103 static bool 3104 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port, 3105 const struct net_device *br_dev) 3106 { 3107 struct mlxsw_sp_port *mlxsw_sp_vport; 3108 3109 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, 3110 vport.list) { 3111 if (mlxsw_sp_vport_br_get(mlxsw_sp_vport) == br_dev) 3112 return false; 3113 } 3114 3115 return true; 3116 } 3117 3118 static int mlxsw_sp_netdevice_vport_event(struct net_device *dev, 3119 unsigned long event, void *ptr, 3120 u16 vid) 3121 { 3122 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 3123 struct netdev_notifier_changeupper_info *info = ptr; 3124 struct mlxsw_sp_port *mlxsw_sp_vport; 3125 struct net_device *upper_dev; 3126 int err; 3127 3128 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 3129 3130 switch (event) { 3131 case NETDEV_PRECHANGEUPPER: 3132 upper_dev = info->upper_dev; 3133 if (!info->master || !info->linking) 3134 break; 3135 if (!netif_is_bridge_master(upper_dev)) 3136 return NOTIFY_BAD; 3137 /* We can't have multiple VLAN interfaces configured on 3138 * the same port and being members in the same bridge. 3139 */ 3140 if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port, 3141 upper_dev)) 3142 return NOTIFY_BAD; 3143 break; 3144 case NETDEV_CHANGEUPPER: 3145 upper_dev = info->upper_dev; 3146 if (!info->master) 3147 break; 3148 if (info->linking) { 3149 if (!mlxsw_sp_vport) { 3150 WARN_ON(!mlxsw_sp_vport); 3151 return NOTIFY_BAD; 3152 } 3153 err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport, 3154 upper_dev); 3155 if (err) { 3156 netdev_err(dev, "Failed to join bridge\n"); 3157 return NOTIFY_BAD; 3158 } 3159 } else { 3160 /* We ignore bridge's unlinking notifications if vPort 3161 * is gone, since we already left the bridge when the 3162 * VLAN device was unlinked from the real device. 3163 */ 3164 if (!mlxsw_sp_vport) 3165 return NOTIFY_DONE; 3166 err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, 3167 upper_dev, true); 3168 if (err) { 3169 netdev_err(dev, "Failed to leave bridge\n"); 3170 return NOTIFY_BAD; 3171 } 3172 } 3173 } 3174 3175 return NOTIFY_DONE; 3176 } 3177 3178 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev, 3179 unsigned long event, void *ptr, 3180 u16 vid) 3181 { 3182 struct net_device *dev; 3183 struct list_head *iter; 3184 int ret; 3185 3186 netdev_for_each_lower_dev(lag_dev, dev, iter) { 3187 if (mlxsw_sp_port_dev_check(dev)) { 3188 ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr, 3189 vid); 3190 if (ret == NOTIFY_BAD) 3191 return ret; 3192 } 3193 } 3194 3195 return NOTIFY_DONE; 3196 } 3197 3198 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 3199 unsigned long event, void *ptr) 3200 { 3201 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 3202 u16 vid = vlan_dev_vlan_id(vlan_dev); 3203 3204 if (mlxsw_sp_port_dev_check(real_dev)) 3205 return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr, 3206 vid); 3207 else if (netif_is_lag_master(real_dev)) 3208 return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr, 3209 vid); 3210 3211 return NOTIFY_DONE; 3212 } 3213 3214 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 3215 unsigned long event, void *ptr) 3216 { 3217 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3218 3219 if (mlxsw_sp_port_dev_check(dev)) 3220 return mlxsw_sp_netdevice_port_event(dev, event, ptr); 3221 3222 if (netif_is_lag_master(dev)) 3223 return mlxsw_sp_netdevice_lag_event(dev, event, ptr); 3224 3225 if (is_vlan_dev(dev)) 3226 return mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 3227 3228 return NOTIFY_DONE; 3229 } 3230 3231 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = { 3232 .notifier_call = mlxsw_sp_netdevice_event, 3233 }; 3234 3235 static int __init mlxsw_sp_module_init(void) 3236 { 3237 int err; 3238 3239 register_netdevice_notifier(&mlxsw_sp_netdevice_nb); 3240 err = mlxsw_core_driver_register(&mlxsw_sp_driver); 3241 if (err) 3242 goto err_core_driver_register; 3243 return 0; 3244 3245 err_core_driver_register: 3246 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); 3247 return err; 3248 } 3249 3250 static void __exit mlxsw_sp_module_exit(void) 3251 { 3252 mlxsw_core_driver_unregister(&mlxsw_sp_driver); 3253 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); 3254 } 3255 3256 module_init(mlxsw_sp_module_init); 3257 module_exit(mlxsw_sp_module_exit); 3258 3259 MODULE_LICENSE("Dual BSD/GPL"); 3260 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 3261 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 3262 MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM); 3263