xref: /linux/drivers/net/ethernet/mellanox/mlxsw/spectrum.c (revision 0883c2c06fb5bcf5b9e008270827e63c09a88c1e)
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5  * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6  * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/ethtool.h>
43 #include <linux/slab.h>
44 #include <linux/device.h>
45 #include <linux/skbuff.h>
46 #include <linux/if_vlan.h>
47 #include <linux/if_bridge.h>
48 #include <linux/workqueue.h>
49 #include <linux/jiffies.h>
50 #include <linux/bitops.h>
51 #include <linux/list.h>
52 #include <linux/dcbnl.h>
53 #include <net/switchdev.h>
54 #include <generated/utsrelease.h>
55 
56 #include "spectrum.h"
57 #include "core.h"
58 #include "reg.h"
59 #include "port.h"
60 #include "trap.h"
61 #include "txheader.h"
62 
63 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
64 static const char mlxsw_sp_driver_version[] = "1.0";
65 
66 /* tx_hdr_version
67  * Tx header version.
68  * Must be set to 1.
69  */
70 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
71 
72 /* tx_hdr_ctl
73  * Packet control type.
74  * 0 - Ethernet control (e.g. EMADs, LACP)
75  * 1 - Ethernet data
76  */
77 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
78 
79 /* tx_hdr_proto
80  * Packet protocol type. Must be set to 1 (Ethernet).
81  */
82 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
83 
84 /* tx_hdr_rx_is_router
85  * Packet is sent from the router. Valid for data packets only.
86  */
87 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
88 
89 /* tx_hdr_fid_valid
90  * Indicates if the 'fid' field is valid and should be used for
91  * forwarding lookup. Valid for data packets only.
92  */
93 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
94 
95 /* tx_hdr_swid
96  * Switch partition ID. Must be set to 0.
97  */
98 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
99 
100 /* tx_hdr_control_tclass
101  * Indicates if the packet should use the control TClass and not one
102  * of the data TClasses.
103  */
104 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
105 
106 /* tx_hdr_etclass
107  * Egress TClass to be used on the egress device on the egress port.
108  */
109 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
110 
111 /* tx_hdr_port_mid
112  * Destination local port for unicast packets.
113  * Destination multicast ID for multicast packets.
114  *
115  * Control packets are directed to a specific egress port, while data
116  * packets are transmitted through the CPU port (0) into the switch partition,
117  * where forwarding rules are applied.
118  */
119 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
120 
121 /* tx_hdr_fid
122  * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
123  * set, otherwise calculated based on the packet's VID using VID to FID mapping.
124  * Valid for data packets only.
125  */
126 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
127 
128 /* tx_hdr_type
129  * 0 - Data packets
130  * 6 - Control packets
131  */
132 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
133 
134 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
135 				     const struct mlxsw_tx_info *tx_info)
136 {
137 	char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
138 
139 	memset(txhdr, 0, MLXSW_TXHDR_LEN);
140 
141 	mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
142 	mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
143 	mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
144 	mlxsw_tx_hdr_swid_set(txhdr, 0);
145 	mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
146 	mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
147 	mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
148 }
149 
150 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
151 {
152 	char spad_pl[MLXSW_REG_SPAD_LEN];
153 	int err;
154 
155 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
156 	if (err)
157 		return err;
158 	mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
159 	return 0;
160 }
161 
162 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
163 					  bool is_up)
164 {
165 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
166 	char paos_pl[MLXSW_REG_PAOS_LEN];
167 
168 	mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
169 			    is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
170 			    MLXSW_PORT_ADMIN_STATUS_DOWN);
171 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
172 }
173 
174 static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
175 					 bool *p_is_up)
176 {
177 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
178 	char paos_pl[MLXSW_REG_PAOS_LEN];
179 	u8 oper_status;
180 	int err;
181 
182 	mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0);
183 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
184 	if (err)
185 		return err;
186 	oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
187 	*p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
188 	return 0;
189 }
190 
191 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
192 				      unsigned char *addr)
193 {
194 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
195 	char ppad_pl[MLXSW_REG_PPAD_LEN];
196 
197 	mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
198 	mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
199 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
200 }
201 
202 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
203 {
204 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
205 	unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
206 
207 	ether_addr_copy(addr, mlxsw_sp->base_mac);
208 	addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
209 	return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
210 }
211 
212 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
213 				       u16 vid, enum mlxsw_reg_spms_state state)
214 {
215 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
216 	char *spms_pl;
217 	int err;
218 
219 	spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
220 	if (!spms_pl)
221 		return -ENOMEM;
222 	mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
223 	mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
224 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
225 	kfree(spms_pl);
226 	return err;
227 }
228 
229 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
230 {
231 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
232 	char pmtu_pl[MLXSW_REG_PMTU_LEN];
233 	int max_mtu;
234 	int err;
235 
236 	mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
237 	mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
238 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
239 	if (err)
240 		return err;
241 	max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
242 
243 	if (mtu > max_mtu)
244 		return -EINVAL;
245 
246 	mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
247 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
248 }
249 
250 static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
251 				    u8 swid)
252 {
253 	char pspa_pl[MLXSW_REG_PSPA_LEN];
254 
255 	mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
256 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
257 }
258 
259 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
260 {
261 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
262 
263 	return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
264 					swid);
265 }
266 
267 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
268 				     bool enable)
269 {
270 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
271 	char svpe_pl[MLXSW_REG_SVPE_LEN];
272 
273 	mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
274 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
275 }
276 
277 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
278 				 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
279 				 u16 vid)
280 {
281 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
282 	char svfa_pl[MLXSW_REG_SVFA_LEN];
283 
284 	mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
285 			    fid, vid);
286 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
287 }
288 
289 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
290 					  u16 vid, bool learn_enable)
291 {
292 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
293 	char *spvmlr_pl;
294 	int err;
295 
296 	spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
297 	if (!spvmlr_pl)
298 		return -ENOMEM;
299 	mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
300 			      learn_enable);
301 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
302 	kfree(spvmlr_pl);
303 	return err;
304 }
305 
306 static int
307 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
308 {
309 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
310 	char sspr_pl[MLXSW_REG_SSPR_LEN];
311 
312 	mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
313 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
314 }
315 
316 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
317 					 u8 local_port, u8 *p_module,
318 					 u8 *p_width, u8 *p_lane)
319 {
320 	char pmlp_pl[MLXSW_REG_PMLP_LEN];
321 	int err;
322 
323 	mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
324 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
325 	if (err)
326 		return err;
327 	*p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
328 	*p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
329 	*p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
330 	return 0;
331 }
332 
333 static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
334 				    u8 module, u8 width, u8 lane)
335 {
336 	char pmlp_pl[MLXSW_REG_PMLP_LEN];
337 	int i;
338 
339 	mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
340 	mlxsw_reg_pmlp_width_set(pmlp_pl, width);
341 	for (i = 0; i < width; i++) {
342 		mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
343 		mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i);  /* Rx & Tx */
344 	}
345 
346 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
347 }
348 
349 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port)
350 {
351 	char pmlp_pl[MLXSW_REG_PMLP_LEN];
352 
353 	mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
354 	mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
355 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
356 }
357 
358 static int mlxsw_sp_port_open(struct net_device *dev)
359 {
360 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
361 	int err;
362 
363 	err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
364 	if (err)
365 		return err;
366 	netif_start_queue(dev);
367 	return 0;
368 }
369 
370 static int mlxsw_sp_port_stop(struct net_device *dev)
371 {
372 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
373 
374 	netif_stop_queue(dev);
375 	return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
376 }
377 
378 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
379 				      struct net_device *dev)
380 {
381 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
382 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
383 	struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
384 	const struct mlxsw_tx_info tx_info = {
385 		.local_port = mlxsw_sp_port->local_port,
386 		.is_emad = false,
387 	};
388 	u64 len;
389 	int err;
390 
391 	if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
392 		return NETDEV_TX_BUSY;
393 
394 	if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
395 		struct sk_buff *skb_orig = skb;
396 
397 		skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
398 		if (!skb) {
399 			this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
400 			dev_kfree_skb_any(skb_orig);
401 			return NETDEV_TX_OK;
402 		}
403 	}
404 
405 	if (eth_skb_pad(skb)) {
406 		this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
407 		return NETDEV_TX_OK;
408 	}
409 
410 	mlxsw_sp_txhdr_construct(skb, &tx_info);
411 	len = skb->len;
412 	/* Due to a race we might fail here because of a full queue. In that
413 	 * unlikely case we simply drop the packet.
414 	 */
415 	err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
416 
417 	if (!err) {
418 		pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
419 		u64_stats_update_begin(&pcpu_stats->syncp);
420 		pcpu_stats->tx_packets++;
421 		pcpu_stats->tx_bytes += len;
422 		u64_stats_update_end(&pcpu_stats->syncp);
423 	} else {
424 		this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
425 		dev_kfree_skb_any(skb);
426 	}
427 	return NETDEV_TX_OK;
428 }
429 
430 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
431 {
432 }
433 
434 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
435 {
436 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
437 	struct sockaddr *addr = p;
438 	int err;
439 
440 	if (!is_valid_ether_addr(addr->sa_data))
441 		return -EADDRNOTAVAIL;
442 
443 	err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
444 	if (err)
445 		return err;
446 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
447 	return 0;
448 }
449 
450 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu,
451 				 bool pause_en, bool pfc_en, u16 delay)
452 {
453 	u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu);
454 
455 	delay = pfc_en ? mlxsw_sp_pfc_delay_get(mtu, delay) :
456 			 MLXSW_SP_PAUSE_DELAY;
457 
458 	if (pause_en || pfc_en)
459 		mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, pg_index,
460 						    pg_size + delay, pg_size);
461 	else
462 		mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size);
463 }
464 
465 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
466 				 u8 *prio_tc, bool pause_en,
467 				 struct ieee_pfc *my_pfc)
468 {
469 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
470 	u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
471 	u16 delay = !!my_pfc ? my_pfc->delay : 0;
472 	char pbmc_pl[MLXSW_REG_PBMC_LEN];
473 	int i, j, err;
474 
475 	mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
476 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
477 	if (err)
478 		return err;
479 
480 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
481 		bool configure = false;
482 		bool pfc = false;
483 
484 		for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
485 			if (prio_tc[j] == i) {
486 				pfc = pfc_en & BIT(j);
487 				configure = true;
488 				break;
489 			}
490 		}
491 
492 		if (!configure)
493 			continue;
494 		mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en, pfc, delay);
495 	}
496 
497 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
498 }
499 
500 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
501 				      int mtu, bool pause_en)
502 {
503 	u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
504 	bool dcb_en = !!mlxsw_sp_port->dcb.ets;
505 	struct ieee_pfc *my_pfc;
506 	u8 *prio_tc;
507 
508 	prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
509 	my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
510 
511 	return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
512 					    pause_en, my_pfc);
513 }
514 
515 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
516 {
517 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
518 	bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
519 	int err;
520 
521 	err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
522 	if (err)
523 		return err;
524 	err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
525 	if (err)
526 		goto err_port_mtu_set;
527 	dev->mtu = mtu;
528 	return 0;
529 
530 err_port_mtu_set:
531 	mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
532 	return err;
533 }
534 
535 static struct rtnl_link_stats64 *
536 mlxsw_sp_port_get_stats64(struct net_device *dev,
537 			  struct rtnl_link_stats64 *stats)
538 {
539 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
540 	struct mlxsw_sp_port_pcpu_stats *p;
541 	u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
542 	u32 tx_dropped = 0;
543 	unsigned int start;
544 	int i;
545 
546 	for_each_possible_cpu(i) {
547 		p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
548 		do {
549 			start = u64_stats_fetch_begin_irq(&p->syncp);
550 			rx_packets	= p->rx_packets;
551 			rx_bytes	= p->rx_bytes;
552 			tx_packets	= p->tx_packets;
553 			tx_bytes	= p->tx_bytes;
554 		} while (u64_stats_fetch_retry_irq(&p->syncp, start));
555 
556 		stats->rx_packets	+= rx_packets;
557 		stats->rx_bytes		+= rx_bytes;
558 		stats->tx_packets	+= tx_packets;
559 		stats->tx_bytes		+= tx_bytes;
560 		/* tx_dropped is u32, updated without syncp protection. */
561 		tx_dropped	+= p->tx_dropped;
562 	}
563 	stats->tx_dropped	= tx_dropped;
564 	return stats;
565 }
566 
567 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
568 			   u16 vid_end, bool is_member, bool untagged)
569 {
570 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
571 	char *spvm_pl;
572 	int err;
573 
574 	spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
575 	if (!spvm_pl)
576 		return -ENOMEM;
577 
578 	mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port,	vid_begin,
579 			    vid_end, is_member, untagged);
580 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
581 	kfree(spvm_pl);
582 	return err;
583 }
584 
585 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
586 {
587 	enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
588 	u16 vid, last_visited_vid;
589 	int err;
590 
591 	for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
592 		err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
593 						   vid);
594 		if (err) {
595 			last_visited_vid = vid;
596 			goto err_port_vid_to_fid_set;
597 		}
598 	}
599 
600 	err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
601 	if (err) {
602 		last_visited_vid = VLAN_N_VID;
603 		goto err_port_vid_to_fid_set;
604 	}
605 
606 	return 0;
607 
608 err_port_vid_to_fid_set:
609 	for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
610 		mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
611 					     vid);
612 	return err;
613 }
614 
615 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
616 {
617 	enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
618 	u16 vid;
619 	int err;
620 
621 	err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
622 	if (err)
623 		return err;
624 
625 	for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
626 		err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
627 						   vid, vid);
628 		if (err)
629 			return err;
630 	}
631 
632 	return 0;
633 }
634 
635 static struct mlxsw_sp_vfid *
636 mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, u16 vid)
637 {
638 	struct mlxsw_sp_vfid *vfid;
639 
640 	list_for_each_entry(vfid, &mlxsw_sp->port_vfids.list, list) {
641 		if (vfid->vid == vid)
642 			return vfid;
643 	}
644 
645 	return NULL;
646 }
647 
648 static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
649 {
650 	return find_first_zero_bit(mlxsw_sp->port_vfids.mapped,
651 				   MLXSW_SP_VFID_PORT_MAX);
652 }
653 
654 static int __mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid)
655 {
656 	u16 fid = mlxsw_sp_vfid_to_fid(vfid);
657 	char sfmr_pl[MLXSW_REG_SFMR_LEN];
658 
659 	mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, 0);
660 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
661 }
662 
663 static void __mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid)
664 {
665 	u16 fid = mlxsw_sp_vfid_to_fid(vfid);
666 	char sfmr_pl[MLXSW_REG_SFMR_LEN];
667 
668 	mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, fid, 0);
669 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
670 }
671 
672 static struct mlxsw_sp_vfid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
673 						  u16 vid)
674 {
675 	struct device *dev = mlxsw_sp->bus_info->dev;
676 	struct mlxsw_sp_vfid *vfid;
677 	u16 n_vfid;
678 	int err;
679 
680 	n_vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
681 	if (n_vfid == MLXSW_SP_VFID_PORT_MAX) {
682 		dev_err(dev, "No available vFIDs\n");
683 		return ERR_PTR(-ERANGE);
684 	}
685 
686 	err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid);
687 	if (err) {
688 		dev_err(dev, "Failed to create vFID=%d\n", n_vfid);
689 		return ERR_PTR(err);
690 	}
691 
692 	vfid = kzalloc(sizeof(*vfid), GFP_KERNEL);
693 	if (!vfid)
694 		goto err_allocate_vfid;
695 
696 	vfid->vfid = n_vfid;
697 	vfid->vid = vid;
698 
699 	list_add(&vfid->list, &mlxsw_sp->port_vfids.list);
700 	set_bit(n_vfid, mlxsw_sp->port_vfids.mapped);
701 
702 	return vfid;
703 
704 err_allocate_vfid:
705 	__mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid);
706 	return ERR_PTR(-ENOMEM);
707 }
708 
709 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
710 				  struct mlxsw_sp_vfid *vfid)
711 {
712 	clear_bit(vfid->vfid, mlxsw_sp->port_vfids.mapped);
713 	list_del(&vfid->list);
714 
715 	__mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid);
716 
717 	kfree(vfid);
718 }
719 
720 static struct mlxsw_sp_port *
721 mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port,
722 			   struct mlxsw_sp_vfid *vfid)
723 {
724 	struct mlxsw_sp_port *mlxsw_sp_vport;
725 
726 	mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL);
727 	if (!mlxsw_sp_vport)
728 		return NULL;
729 
730 	/* dev will be set correctly after the VLAN device is linked
731 	 * with the real device. In case of bridge SELF invocation, dev
732 	 * will remain as is.
733 	 */
734 	mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
735 	mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
736 	mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port;
737 	mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
738 	mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
739 	mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
740 	mlxsw_sp_vport->vport.vfid = vfid;
741 	mlxsw_sp_vport->vport.vid = vfid->vid;
742 
743 	list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
744 
745 	return mlxsw_sp_vport;
746 }
747 
748 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
749 {
750 	list_del(&mlxsw_sp_vport->vport.list);
751 	kfree(mlxsw_sp_vport);
752 }
753 
754 int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
755 			  u16 vid)
756 {
757 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
758 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
759 	struct mlxsw_sp_port *mlxsw_sp_vport;
760 	struct mlxsw_sp_vfid *vfid;
761 	int err;
762 
763 	/* VLAN 0 is added to HW filter when device goes up, but it is
764 	 * reserved in our case, so simply return.
765 	 */
766 	if (!vid)
767 		return 0;
768 
769 	if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) {
770 		netdev_warn(dev, "VID=%d already configured\n", vid);
771 		return 0;
772 	}
773 
774 	vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid);
775 	if (!vfid) {
776 		vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
777 		if (IS_ERR(vfid)) {
778 			netdev_err(dev, "Failed to create vFID for VID=%d\n",
779 				   vid);
780 			return PTR_ERR(vfid);
781 		}
782 	}
783 
784 	mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vfid);
785 	if (!mlxsw_sp_vport) {
786 		netdev_err(dev, "Failed to create vPort for VID=%d\n", vid);
787 		err = -ENOMEM;
788 		goto err_port_vport_create;
789 	}
790 
791 	if (!vfid->nr_vports) {
792 		err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid,
793 					       true, false);
794 		if (err) {
795 			netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
796 				   vfid->vfid);
797 			goto err_vport_flood_set;
798 		}
799 	}
800 
801 	/* When adding the first VLAN interface on a bridged port we need to
802 	 * transition all the active 802.1Q bridge VLANs to use explicit
803 	 * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
804 	 */
805 	if (list_is_singular(&mlxsw_sp_port->vports_list)) {
806 		err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
807 		if (err) {
808 			netdev_err(dev, "Failed to set to Virtual mode\n");
809 			goto err_port_vp_mode_trans;
810 		}
811 	}
812 
813 	err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
814 					   MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
815 					   true,
816 					   mlxsw_sp_vfid_to_fid(vfid->vfid),
817 					   vid);
818 	if (err) {
819 		netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n",
820 			   vid, vfid->vfid);
821 		goto err_port_vid_to_fid_set;
822 	}
823 
824 	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
825 	if (err) {
826 		netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
827 		goto err_port_vid_learning_set;
828 	}
829 
830 	err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, false);
831 	if (err) {
832 		netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
833 			   vid);
834 		goto err_port_add_vid;
835 	}
836 
837 	err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
838 					  MLXSW_REG_SPMS_STATE_FORWARDING);
839 	if (err) {
840 		netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
841 		goto err_port_stp_state_set;
842 	}
843 
844 	vfid->nr_vports++;
845 
846 	return 0;
847 
848 err_port_stp_state_set:
849 	mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
850 err_port_add_vid:
851 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
852 err_port_vid_learning_set:
853 	mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
854 				     MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
855 				     mlxsw_sp_vfid_to_fid(vfid->vfid), vid);
856 err_port_vid_to_fid_set:
857 	if (list_is_singular(&mlxsw_sp_port->vports_list))
858 		mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
859 err_port_vp_mode_trans:
860 	if (!vfid->nr_vports)
861 		mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false,
862 					 false);
863 err_vport_flood_set:
864 	mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
865 err_port_vport_create:
866 	if (!vfid->nr_vports)
867 		mlxsw_sp_vfid_destroy(mlxsw_sp, vfid);
868 	return err;
869 }
870 
871 int mlxsw_sp_port_kill_vid(struct net_device *dev,
872 			   __be16 __always_unused proto, u16 vid)
873 {
874 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
875 	struct mlxsw_sp_port *mlxsw_sp_vport;
876 	struct mlxsw_sp_vfid *vfid;
877 	int err;
878 
879 	/* VLAN 0 is removed from HW filter when device goes down, but
880 	 * it is reserved in our case, so simply return.
881 	 */
882 	if (!vid)
883 		return 0;
884 
885 	mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
886 	if (!mlxsw_sp_vport) {
887 		netdev_warn(dev, "VID=%d does not exist\n", vid);
888 		return 0;
889 	}
890 
891 	vfid = mlxsw_sp_vport->vport.vfid;
892 
893 	err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
894 					  MLXSW_REG_SPMS_STATE_DISCARDING);
895 	if (err) {
896 		netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
897 		return err;
898 	}
899 
900 	err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
901 	if (err) {
902 		netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
903 			   vid);
904 		return err;
905 	}
906 
907 	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
908 	if (err) {
909 		netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
910 		return err;
911 	}
912 
913 	err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
914 					   MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
915 					   false,
916 					   mlxsw_sp_vfid_to_fid(vfid->vfid),
917 					   vid);
918 	if (err) {
919 		netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n",
920 			   vid, vfid->vfid);
921 		return err;
922 	}
923 
924 	/* When removing the last VLAN interface on a bridged port we need to
925 	 * transition all active 802.1Q bridge VLANs to use VID to FID
926 	 * mappings and set port's mode to VLAN mode.
927 	 */
928 	if (list_is_singular(&mlxsw_sp_port->vports_list)) {
929 		err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
930 		if (err) {
931 			netdev_err(dev, "Failed to set to VLAN mode\n");
932 			return err;
933 		}
934 	}
935 
936 	vfid->nr_vports--;
937 	mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
938 
939 	/* Destroy the vFID if no vPorts are assigned to it anymore. */
940 	if (!vfid->nr_vports)
941 		mlxsw_sp_vfid_destroy(mlxsw_sp_port->mlxsw_sp, vfid);
942 
943 	return 0;
944 }
945 
946 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
947 					    size_t len)
948 {
949 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
950 	u8 module = mlxsw_sp_port->mapping.module;
951 	u8 width = mlxsw_sp_port->mapping.width;
952 	u8 lane = mlxsw_sp_port->mapping.lane;
953 	int err;
954 
955 	if (!mlxsw_sp_port->split)
956 		err = snprintf(name, len, "p%d", module + 1);
957 	else
958 		err = snprintf(name, len, "p%ds%d", module + 1,
959 			       lane / width);
960 
961 	if (err >= len)
962 		return -EINVAL;
963 
964 	return 0;
965 }
966 
967 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
968 	.ndo_open		= mlxsw_sp_port_open,
969 	.ndo_stop		= mlxsw_sp_port_stop,
970 	.ndo_start_xmit		= mlxsw_sp_port_xmit,
971 	.ndo_set_rx_mode	= mlxsw_sp_set_rx_mode,
972 	.ndo_set_mac_address	= mlxsw_sp_port_set_mac_address,
973 	.ndo_change_mtu		= mlxsw_sp_port_change_mtu,
974 	.ndo_get_stats64	= mlxsw_sp_port_get_stats64,
975 	.ndo_vlan_rx_add_vid	= mlxsw_sp_port_add_vid,
976 	.ndo_vlan_rx_kill_vid	= mlxsw_sp_port_kill_vid,
977 	.ndo_fdb_add		= switchdev_port_fdb_add,
978 	.ndo_fdb_del		= switchdev_port_fdb_del,
979 	.ndo_fdb_dump		= switchdev_port_fdb_dump,
980 	.ndo_bridge_setlink	= switchdev_port_bridge_setlink,
981 	.ndo_bridge_getlink	= switchdev_port_bridge_getlink,
982 	.ndo_bridge_dellink	= switchdev_port_bridge_dellink,
983 	.ndo_get_phys_port_name	= mlxsw_sp_port_get_phys_port_name,
984 };
985 
986 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
987 				      struct ethtool_drvinfo *drvinfo)
988 {
989 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
990 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
991 
992 	strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
993 	strlcpy(drvinfo->version, mlxsw_sp_driver_version,
994 		sizeof(drvinfo->version));
995 	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
996 		 "%d.%d.%d",
997 		 mlxsw_sp->bus_info->fw_rev.major,
998 		 mlxsw_sp->bus_info->fw_rev.minor,
999 		 mlxsw_sp->bus_info->fw_rev.subminor);
1000 	strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
1001 		sizeof(drvinfo->bus_info));
1002 }
1003 
1004 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
1005 					 struct ethtool_pauseparam *pause)
1006 {
1007 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1008 
1009 	pause->rx_pause = mlxsw_sp_port->link.rx_pause;
1010 	pause->tx_pause = mlxsw_sp_port->link.tx_pause;
1011 }
1012 
1013 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
1014 				   struct ethtool_pauseparam *pause)
1015 {
1016 	char pfcc_pl[MLXSW_REG_PFCC_LEN];
1017 
1018 	mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
1019 	mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
1020 	mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
1021 
1022 	return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
1023 			       pfcc_pl);
1024 }
1025 
1026 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
1027 					struct ethtool_pauseparam *pause)
1028 {
1029 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1030 	bool pause_en = pause->tx_pause || pause->rx_pause;
1031 	int err;
1032 
1033 	if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
1034 		netdev_err(dev, "PFC already enabled on port\n");
1035 		return -EINVAL;
1036 	}
1037 
1038 	if (pause->autoneg) {
1039 		netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
1040 		return -EINVAL;
1041 	}
1042 
1043 	err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1044 	if (err) {
1045 		netdev_err(dev, "Failed to configure port's headroom\n");
1046 		return err;
1047 	}
1048 
1049 	err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
1050 	if (err) {
1051 		netdev_err(dev, "Failed to set PAUSE parameters\n");
1052 		goto err_port_pause_configure;
1053 	}
1054 
1055 	mlxsw_sp_port->link.rx_pause = pause->rx_pause;
1056 	mlxsw_sp_port->link.tx_pause = pause->tx_pause;
1057 
1058 	return 0;
1059 
1060 err_port_pause_configure:
1061 	pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1062 	mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1063 	return err;
1064 }
1065 
1066 struct mlxsw_sp_port_hw_stats {
1067 	char str[ETH_GSTRING_LEN];
1068 	u64 (*getter)(char *payload);
1069 };
1070 
1071 static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
1072 	{
1073 		.str = "a_frames_transmitted_ok",
1074 		.getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
1075 	},
1076 	{
1077 		.str = "a_frames_received_ok",
1078 		.getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
1079 	},
1080 	{
1081 		.str = "a_frame_check_sequence_errors",
1082 		.getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
1083 	},
1084 	{
1085 		.str = "a_alignment_errors",
1086 		.getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
1087 	},
1088 	{
1089 		.str = "a_octets_transmitted_ok",
1090 		.getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
1091 	},
1092 	{
1093 		.str = "a_octets_received_ok",
1094 		.getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
1095 	},
1096 	{
1097 		.str = "a_multicast_frames_xmitted_ok",
1098 		.getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
1099 	},
1100 	{
1101 		.str = "a_broadcast_frames_xmitted_ok",
1102 		.getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
1103 	},
1104 	{
1105 		.str = "a_multicast_frames_received_ok",
1106 		.getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
1107 	},
1108 	{
1109 		.str = "a_broadcast_frames_received_ok",
1110 		.getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
1111 	},
1112 	{
1113 		.str = "a_in_range_length_errors",
1114 		.getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
1115 	},
1116 	{
1117 		.str = "a_out_of_range_length_field",
1118 		.getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
1119 	},
1120 	{
1121 		.str = "a_frame_too_long_errors",
1122 		.getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
1123 	},
1124 	{
1125 		.str = "a_symbol_error_during_carrier",
1126 		.getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
1127 	},
1128 	{
1129 		.str = "a_mac_control_frames_transmitted",
1130 		.getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
1131 	},
1132 	{
1133 		.str = "a_mac_control_frames_received",
1134 		.getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
1135 	},
1136 	{
1137 		.str = "a_unsupported_opcodes_received",
1138 		.getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
1139 	},
1140 	{
1141 		.str = "a_pause_mac_ctrl_frames_received",
1142 		.getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
1143 	},
1144 	{
1145 		.str = "a_pause_mac_ctrl_frames_xmitted",
1146 		.getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
1147 	},
1148 };
1149 
1150 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1151 
1152 static void mlxsw_sp_port_get_strings(struct net_device *dev,
1153 				      u32 stringset, u8 *data)
1154 {
1155 	u8 *p = data;
1156 	int i;
1157 
1158 	switch (stringset) {
1159 	case ETH_SS_STATS:
1160 		for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
1161 			memcpy(p, mlxsw_sp_port_hw_stats[i].str,
1162 			       ETH_GSTRING_LEN);
1163 			p += ETH_GSTRING_LEN;
1164 		}
1165 		break;
1166 	}
1167 }
1168 
1169 static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
1170 				     enum ethtool_phys_id_state state)
1171 {
1172 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1173 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1174 	char mlcr_pl[MLXSW_REG_MLCR_LEN];
1175 	bool active;
1176 
1177 	switch (state) {
1178 	case ETHTOOL_ID_ACTIVE:
1179 		active = true;
1180 		break;
1181 	case ETHTOOL_ID_INACTIVE:
1182 		active = false;
1183 		break;
1184 	default:
1185 		return -EOPNOTSUPP;
1186 	}
1187 
1188 	mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
1189 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
1190 }
1191 
1192 static void mlxsw_sp_port_get_stats(struct net_device *dev,
1193 				    struct ethtool_stats *stats, u64 *data)
1194 {
1195 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1196 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1197 	char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1198 	int i;
1199 	int err;
1200 
1201 	mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port,
1202 			     MLXSW_REG_PPCNT_IEEE_8023_CNT, 0);
1203 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
1204 	for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++)
1205 		data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0;
1206 }
1207 
1208 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
1209 {
1210 	switch (sset) {
1211 	case ETH_SS_STATS:
1212 		return MLXSW_SP_PORT_HW_STATS_LEN;
1213 	default:
1214 		return -EOPNOTSUPP;
1215 	}
1216 }
1217 
1218 struct mlxsw_sp_port_link_mode {
1219 	u32 mask;
1220 	u32 supported;
1221 	u32 advertised;
1222 	u32 speed;
1223 };
1224 
1225 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
1226 	{
1227 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
1228 		.supported	= SUPPORTED_100baseT_Full,
1229 		.advertised	= ADVERTISED_100baseT_Full,
1230 		.speed		= 100,
1231 	},
1232 	{
1233 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
1234 		.speed		= 100,
1235 	},
1236 	{
1237 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_SGMII |
1238 				  MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
1239 		.supported	= SUPPORTED_1000baseKX_Full,
1240 		.advertised	= ADVERTISED_1000baseKX_Full,
1241 		.speed		= 1000,
1242 	},
1243 	{
1244 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
1245 		.supported	= SUPPORTED_10000baseT_Full,
1246 		.advertised	= ADVERTISED_10000baseT_Full,
1247 		.speed		= 10000,
1248 	},
1249 	{
1250 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
1251 				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
1252 		.supported	= SUPPORTED_10000baseKX4_Full,
1253 		.advertised	= ADVERTISED_10000baseKX4_Full,
1254 		.speed		= 10000,
1255 	},
1256 	{
1257 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1258 				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1259 				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1260 				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
1261 		.supported	= SUPPORTED_10000baseKR_Full,
1262 		.advertised	= ADVERTISED_10000baseKR_Full,
1263 		.speed		= 10000,
1264 	},
1265 	{
1266 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
1267 		.supported	= SUPPORTED_20000baseKR2_Full,
1268 		.advertised	= ADVERTISED_20000baseKR2_Full,
1269 		.speed		= 20000,
1270 	},
1271 	{
1272 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
1273 		.supported	= SUPPORTED_40000baseCR4_Full,
1274 		.advertised	= ADVERTISED_40000baseCR4_Full,
1275 		.speed		= 40000,
1276 	},
1277 	{
1278 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
1279 		.supported	= SUPPORTED_40000baseKR4_Full,
1280 		.advertised	= ADVERTISED_40000baseKR4_Full,
1281 		.speed		= 40000,
1282 	},
1283 	{
1284 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
1285 		.supported	= SUPPORTED_40000baseSR4_Full,
1286 		.advertised	= ADVERTISED_40000baseSR4_Full,
1287 		.speed		= 40000,
1288 	},
1289 	{
1290 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
1291 		.supported	= SUPPORTED_40000baseLR4_Full,
1292 		.advertised	= ADVERTISED_40000baseLR4_Full,
1293 		.speed		= 40000,
1294 	},
1295 	{
1296 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
1297 				  MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
1298 				  MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
1299 		.speed		= 25000,
1300 	},
1301 	{
1302 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
1303 				  MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
1304 				  MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
1305 		.speed		= 50000,
1306 	},
1307 	{
1308 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1309 		.supported	= SUPPORTED_56000baseKR4_Full,
1310 		.advertised	= ADVERTISED_56000baseKR4_Full,
1311 		.speed		= 56000,
1312 	},
1313 	{
1314 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
1315 				  MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1316 				  MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1317 				  MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
1318 		.speed		= 100000,
1319 	},
1320 };
1321 
1322 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
1323 
1324 static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
1325 {
1326 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1327 			      MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1328 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1329 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1330 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1331 			      MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1332 		return SUPPORTED_FIBRE;
1333 
1334 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1335 			      MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1336 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1337 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1338 			      MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
1339 		return SUPPORTED_Backplane;
1340 	return 0;
1341 }
1342 
1343 static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto)
1344 {
1345 	u32 modes = 0;
1346 	int i;
1347 
1348 	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1349 		if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1350 			modes |= mlxsw_sp_port_link_mode[i].supported;
1351 	}
1352 	return modes;
1353 }
1354 
1355 static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto)
1356 {
1357 	u32 modes = 0;
1358 	int i;
1359 
1360 	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1361 		if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1362 			modes |= mlxsw_sp_port_link_mode[i].advertised;
1363 	}
1364 	return modes;
1365 }
1366 
1367 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
1368 					    struct ethtool_cmd *cmd)
1369 {
1370 	u32 speed = SPEED_UNKNOWN;
1371 	u8 duplex = DUPLEX_UNKNOWN;
1372 	int i;
1373 
1374 	if (!carrier_ok)
1375 		goto out;
1376 
1377 	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1378 		if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
1379 			speed = mlxsw_sp_port_link_mode[i].speed;
1380 			duplex = DUPLEX_FULL;
1381 			break;
1382 		}
1383 	}
1384 out:
1385 	ethtool_cmd_speed_set(cmd, speed);
1386 	cmd->duplex = duplex;
1387 }
1388 
1389 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
1390 {
1391 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1392 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1393 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1394 			      MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1395 		return PORT_FIBRE;
1396 
1397 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1398 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1399 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
1400 		return PORT_DA;
1401 
1402 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1403 			      MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1404 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1405 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
1406 		return PORT_NONE;
1407 
1408 	return PORT_OTHER;
1409 }
1410 
1411 static int mlxsw_sp_port_get_settings(struct net_device *dev,
1412 				      struct ethtool_cmd *cmd)
1413 {
1414 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1415 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1416 	char ptys_pl[MLXSW_REG_PTYS_LEN];
1417 	u32 eth_proto_cap;
1418 	u32 eth_proto_admin;
1419 	u32 eth_proto_oper;
1420 	int err;
1421 
1422 	mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1423 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1424 	if (err) {
1425 		netdev_err(dev, "Failed to get proto");
1426 		return err;
1427 	}
1428 	mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
1429 			      &eth_proto_admin, &eth_proto_oper);
1430 
1431 	cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
1432 			 mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
1433 			 SUPPORTED_Pause | SUPPORTED_Asym_Pause;
1434 	cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
1435 	mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
1436 					eth_proto_oper, cmd);
1437 
1438 	eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
1439 	cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper);
1440 	cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper);
1441 
1442 	cmd->transceiver = XCVR_INTERNAL;
1443 	return 0;
1444 }
1445 
1446 static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising)
1447 {
1448 	u32 ptys_proto = 0;
1449 	int i;
1450 
1451 	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1452 		if (advertising & mlxsw_sp_port_link_mode[i].advertised)
1453 			ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1454 	}
1455 	return ptys_proto;
1456 }
1457 
1458 static u32 mlxsw_sp_to_ptys_speed(u32 speed)
1459 {
1460 	u32 ptys_proto = 0;
1461 	int i;
1462 
1463 	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1464 		if (speed == mlxsw_sp_port_link_mode[i].speed)
1465 			ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1466 	}
1467 	return ptys_proto;
1468 }
1469 
1470 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
1471 {
1472 	u32 ptys_proto = 0;
1473 	int i;
1474 
1475 	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1476 		if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
1477 			ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1478 	}
1479 	return ptys_proto;
1480 }
1481 
1482 static int mlxsw_sp_port_set_settings(struct net_device *dev,
1483 				      struct ethtool_cmd *cmd)
1484 {
1485 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1486 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1487 	char ptys_pl[MLXSW_REG_PTYS_LEN];
1488 	u32 speed;
1489 	u32 eth_proto_new;
1490 	u32 eth_proto_cap;
1491 	u32 eth_proto_admin;
1492 	bool is_up;
1493 	int err;
1494 
1495 	speed = ethtool_cmd_speed(cmd);
1496 
1497 	eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
1498 		mlxsw_sp_to_ptys_advert_link(cmd->advertising) :
1499 		mlxsw_sp_to_ptys_speed(speed);
1500 
1501 	mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1502 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1503 	if (err) {
1504 		netdev_err(dev, "Failed to get proto");
1505 		return err;
1506 	}
1507 	mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
1508 
1509 	eth_proto_new = eth_proto_new & eth_proto_cap;
1510 	if (!eth_proto_new) {
1511 		netdev_err(dev, "Not supported proto admin requested");
1512 		return -EINVAL;
1513 	}
1514 	if (eth_proto_new == eth_proto_admin)
1515 		return 0;
1516 
1517 	mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new);
1518 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1519 	if (err) {
1520 		netdev_err(dev, "Failed to set proto admin");
1521 		return err;
1522 	}
1523 
1524 	err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up);
1525 	if (err) {
1526 		netdev_err(dev, "Failed to get oper status");
1527 		return err;
1528 	}
1529 	if (!is_up)
1530 		return 0;
1531 
1532 	err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1533 	if (err) {
1534 		netdev_err(dev, "Failed to set admin status");
1535 		return err;
1536 	}
1537 
1538 	err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1539 	if (err) {
1540 		netdev_err(dev, "Failed to set admin status");
1541 		return err;
1542 	}
1543 
1544 	return 0;
1545 }
1546 
1547 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
1548 	.get_drvinfo		= mlxsw_sp_port_get_drvinfo,
1549 	.get_link		= ethtool_op_get_link,
1550 	.get_pauseparam		= mlxsw_sp_port_get_pauseparam,
1551 	.set_pauseparam		= mlxsw_sp_port_set_pauseparam,
1552 	.get_strings		= mlxsw_sp_port_get_strings,
1553 	.set_phys_id		= mlxsw_sp_port_set_phys_id,
1554 	.get_ethtool_stats	= mlxsw_sp_port_get_stats,
1555 	.get_sset_count		= mlxsw_sp_port_get_sset_count,
1556 	.get_settings		= mlxsw_sp_port_get_settings,
1557 	.set_settings		= mlxsw_sp_port_set_settings,
1558 };
1559 
1560 static int
1561 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
1562 {
1563 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1564 	u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
1565 	char ptys_pl[MLXSW_REG_PTYS_LEN];
1566 	u32 eth_proto_admin;
1567 
1568 	eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
1569 	mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port,
1570 			    eth_proto_admin);
1571 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1572 }
1573 
1574 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
1575 			  enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
1576 			  bool dwrr, u8 dwrr_weight)
1577 {
1578 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1579 	char qeec_pl[MLXSW_REG_QEEC_LEN];
1580 
1581 	mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1582 			    next_index);
1583 	mlxsw_reg_qeec_de_set(qeec_pl, true);
1584 	mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
1585 	mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
1586 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1587 }
1588 
1589 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
1590 				  enum mlxsw_reg_qeec_hr hr, u8 index,
1591 				  u8 next_index, u32 maxrate)
1592 {
1593 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1594 	char qeec_pl[MLXSW_REG_QEEC_LEN];
1595 
1596 	mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1597 			    next_index);
1598 	mlxsw_reg_qeec_mase_set(qeec_pl, true);
1599 	mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
1600 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1601 }
1602 
1603 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
1604 			      u8 switch_prio, u8 tclass)
1605 {
1606 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1607 	char qtct_pl[MLXSW_REG_QTCT_LEN];
1608 
1609 	mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
1610 			    tclass);
1611 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
1612 }
1613 
1614 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
1615 {
1616 	int err, i;
1617 
1618 	/* Setup the elements hierarcy, so that each TC is linked to
1619 	 * one subgroup, which are all member in the same group.
1620 	 */
1621 	err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1622 				    MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
1623 				    0);
1624 	if (err)
1625 		return err;
1626 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1627 		err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1628 					    MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
1629 					    0, false, 0);
1630 		if (err)
1631 			return err;
1632 	}
1633 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1634 		err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1635 					    MLXSW_REG_QEEC_HIERARCY_TC, i, i,
1636 					    false, 0);
1637 		if (err)
1638 			return err;
1639 	}
1640 
1641 	/* Make sure the max shaper is disabled in all hierarcies that
1642 	 * support it.
1643 	 */
1644 	err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1645 					    MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
1646 					    MLXSW_REG_QEEC_MAS_DIS);
1647 	if (err)
1648 		return err;
1649 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1650 		err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1651 						    MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
1652 						    i, 0,
1653 						    MLXSW_REG_QEEC_MAS_DIS);
1654 		if (err)
1655 			return err;
1656 	}
1657 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1658 		err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1659 						    MLXSW_REG_QEEC_HIERARCY_TC,
1660 						    i, i,
1661 						    MLXSW_REG_QEEC_MAS_DIS);
1662 		if (err)
1663 			return err;
1664 	}
1665 
1666 	/* Map all priorities to traffic class 0. */
1667 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1668 		err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
1669 		if (err)
1670 			return err;
1671 	}
1672 
1673 	return 0;
1674 }
1675 
1676 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1677 				bool split, u8 module, u8 width, u8 lane)
1678 {
1679 	struct mlxsw_sp_port *mlxsw_sp_port;
1680 	struct net_device *dev;
1681 	size_t bytes;
1682 	int err;
1683 
1684 	dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1685 	if (!dev)
1686 		return -ENOMEM;
1687 	mlxsw_sp_port = netdev_priv(dev);
1688 	mlxsw_sp_port->dev = dev;
1689 	mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1690 	mlxsw_sp_port->local_port = local_port;
1691 	mlxsw_sp_port->split = split;
1692 	mlxsw_sp_port->mapping.module = module;
1693 	mlxsw_sp_port->mapping.width = width;
1694 	mlxsw_sp_port->mapping.lane = lane;
1695 	bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
1696 	mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
1697 	if (!mlxsw_sp_port->active_vlans) {
1698 		err = -ENOMEM;
1699 		goto err_port_active_vlans_alloc;
1700 	}
1701 	mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL);
1702 	if (!mlxsw_sp_port->untagged_vlans) {
1703 		err = -ENOMEM;
1704 		goto err_port_untagged_vlans_alloc;
1705 	}
1706 	INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
1707 
1708 	mlxsw_sp_port->pcpu_stats =
1709 		netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1710 	if (!mlxsw_sp_port->pcpu_stats) {
1711 		err = -ENOMEM;
1712 		goto err_alloc_stats;
1713 	}
1714 
1715 	dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1716 	dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1717 
1718 	err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1719 	if (err) {
1720 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1721 			mlxsw_sp_port->local_port);
1722 		goto err_dev_addr_init;
1723 	}
1724 
1725 	netif_carrier_off(dev);
1726 
1727 	dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1728 			 NETIF_F_HW_VLAN_CTAG_FILTER;
1729 
1730 	/* Each packet needs to have a Tx header (metadata) on top all other
1731 	 * headers.
1732 	 */
1733 	dev->hard_header_len += MLXSW_TXHDR_LEN;
1734 
1735 	err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1736 	if (err) {
1737 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1738 			mlxsw_sp_port->local_port);
1739 		goto err_port_system_port_mapping_set;
1740 	}
1741 
1742 	err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
1743 	if (err) {
1744 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1745 			mlxsw_sp_port->local_port);
1746 		goto err_port_swid_set;
1747 	}
1748 
1749 	err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
1750 	if (err) {
1751 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
1752 			mlxsw_sp_port->local_port);
1753 		goto err_port_speed_by_width_set;
1754 	}
1755 
1756 	err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1757 	if (err) {
1758 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1759 			mlxsw_sp_port->local_port);
1760 		goto err_port_mtu_set;
1761 	}
1762 
1763 	err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1764 	if (err)
1765 		goto err_port_admin_status_set;
1766 
1767 	err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1768 	if (err) {
1769 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1770 			mlxsw_sp_port->local_port);
1771 		goto err_port_buffers_init;
1772 	}
1773 
1774 	err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
1775 	if (err) {
1776 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
1777 			mlxsw_sp_port->local_port);
1778 		goto err_port_ets_init;
1779 	}
1780 
1781 	/* ETS and buffers must be initialized before DCB. */
1782 	err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
1783 	if (err) {
1784 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
1785 			mlxsw_sp_port->local_port);
1786 		goto err_port_dcb_init;
1787 	}
1788 
1789 	mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
1790 	err = register_netdev(dev);
1791 	if (err) {
1792 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1793 			mlxsw_sp_port->local_port);
1794 		goto err_register_netdev;
1795 	}
1796 
1797 	err = mlxsw_core_port_init(mlxsw_sp->core, &mlxsw_sp_port->core_port,
1798 				   mlxsw_sp_port->local_port, dev,
1799 				   mlxsw_sp_port->split, module);
1800 	if (err) {
1801 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
1802 			mlxsw_sp_port->local_port);
1803 		goto err_core_port_init;
1804 	}
1805 
1806 	err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
1807 	if (err)
1808 		goto err_port_vlan_init;
1809 
1810 	mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1811 	return 0;
1812 
1813 err_port_vlan_init:
1814 	mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
1815 err_core_port_init:
1816 	unregister_netdev(dev);
1817 err_register_netdev:
1818 err_port_dcb_init:
1819 err_port_ets_init:
1820 err_port_buffers_init:
1821 err_port_admin_status_set:
1822 err_port_mtu_set:
1823 err_port_speed_by_width_set:
1824 err_port_swid_set:
1825 err_port_system_port_mapping_set:
1826 err_dev_addr_init:
1827 	free_percpu(mlxsw_sp_port->pcpu_stats);
1828 err_alloc_stats:
1829 	kfree(mlxsw_sp_port->untagged_vlans);
1830 err_port_untagged_vlans_alloc:
1831 	kfree(mlxsw_sp_port->active_vlans);
1832 err_port_active_vlans_alloc:
1833 	free_netdev(dev);
1834 	return err;
1835 }
1836 
1837 static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
1838 {
1839 	struct net_device *dev = mlxsw_sp_port->dev;
1840 	struct mlxsw_sp_port *mlxsw_sp_vport, *tmp;
1841 
1842 	list_for_each_entry_safe(mlxsw_sp_vport, tmp,
1843 				 &mlxsw_sp_port->vports_list, vport.list) {
1844 		u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
1845 
1846 		/* vPorts created for VLAN devices should already be gone
1847 		 * by now, since we unregistered the port netdev.
1848 		 */
1849 		WARN_ON(is_vlan_dev(mlxsw_sp_vport->dev));
1850 		mlxsw_sp_port_kill_vid(dev, 0, vid);
1851 	}
1852 }
1853 
1854 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1855 {
1856 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1857 
1858 	if (!mlxsw_sp_port)
1859 		return;
1860 	mlxsw_sp->ports[local_port] = NULL;
1861 	mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
1862 	unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1863 	mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1864 	mlxsw_sp_port_vports_fini(mlxsw_sp_port);
1865 	mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
1866 	mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
1867 	mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
1868 	free_percpu(mlxsw_sp_port->pcpu_stats);
1869 	kfree(mlxsw_sp_port->untagged_vlans);
1870 	kfree(mlxsw_sp_port->active_vlans);
1871 	free_netdev(mlxsw_sp_port->dev);
1872 }
1873 
1874 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
1875 {
1876 	int i;
1877 
1878 	for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
1879 		mlxsw_sp_port_remove(mlxsw_sp, i);
1880 	kfree(mlxsw_sp->ports);
1881 }
1882 
1883 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
1884 {
1885 	u8 module, width, lane;
1886 	size_t alloc_size;
1887 	int i;
1888 	int err;
1889 
1890 	alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS;
1891 	mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
1892 	if (!mlxsw_sp->ports)
1893 		return -ENOMEM;
1894 
1895 	for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
1896 		err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
1897 						    &width, &lane);
1898 		if (err)
1899 			goto err_port_module_info_get;
1900 		if (!width)
1901 			continue;
1902 		mlxsw_sp->port_to_module[i] = module;
1903 		err = mlxsw_sp_port_create(mlxsw_sp, i, false, module, width,
1904 					   lane);
1905 		if (err)
1906 			goto err_port_create;
1907 	}
1908 	return 0;
1909 
1910 err_port_create:
1911 err_port_module_info_get:
1912 	for (i--; i >= 1; i--)
1913 		mlxsw_sp_port_remove(mlxsw_sp, i);
1914 	kfree(mlxsw_sp->ports);
1915 	return err;
1916 }
1917 
1918 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
1919 {
1920 	u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
1921 
1922 	return local_port - offset;
1923 }
1924 
1925 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
1926 				      u8 module, unsigned int count)
1927 {
1928 	u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
1929 	int err, i;
1930 
1931 	for (i = 0; i < count; i++) {
1932 		err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
1933 					       width, i * width);
1934 		if (err)
1935 			goto err_port_module_map;
1936 	}
1937 
1938 	for (i = 0; i < count; i++) {
1939 		err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
1940 		if (err)
1941 			goto err_port_swid_set;
1942 	}
1943 
1944 	for (i = 0; i < count; i++) {
1945 		err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
1946 					   module, width, i * width);
1947 		if (err)
1948 			goto err_port_create;
1949 	}
1950 
1951 	return 0;
1952 
1953 err_port_create:
1954 	for (i--; i >= 0; i--)
1955 		mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
1956 	i = count;
1957 err_port_swid_set:
1958 	for (i--; i >= 0; i--)
1959 		__mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
1960 					 MLXSW_PORT_SWID_DISABLED_PORT);
1961 	i = count;
1962 err_port_module_map:
1963 	for (i--; i >= 0; i--)
1964 		mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
1965 	return err;
1966 }
1967 
1968 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
1969 					 u8 base_port, unsigned int count)
1970 {
1971 	u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
1972 	int i;
1973 
1974 	/* Split by four means we need to re-create two ports, otherwise
1975 	 * only one.
1976 	 */
1977 	count = count / 2;
1978 
1979 	for (i = 0; i < count; i++) {
1980 		local_port = base_port + i * 2;
1981 		module = mlxsw_sp->port_to_module[local_port];
1982 
1983 		mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
1984 					 0);
1985 	}
1986 
1987 	for (i = 0; i < count; i++)
1988 		__mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
1989 
1990 	for (i = 0; i < count; i++) {
1991 		local_port = base_port + i * 2;
1992 		module = mlxsw_sp->port_to_module[local_port];
1993 
1994 		mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
1995 				     width, 0);
1996 	}
1997 }
1998 
1999 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
2000 			       unsigned int count)
2001 {
2002 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2003 	struct mlxsw_sp_port *mlxsw_sp_port;
2004 	u8 module, cur_width, base_port;
2005 	int i;
2006 	int err;
2007 
2008 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
2009 	if (!mlxsw_sp_port) {
2010 		dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2011 			local_port);
2012 		return -EINVAL;
2013 	}
2014 
2015 	module = mlxsw_sp_port->mapping.module;
2016 	cur_width = mlxsw_sp_port->mapping.width;
2017 
2018 	if (count != 2 && count != 4) {
2019 		netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
2020 		return -EINVAL;
2021 	}
2022 
2023 	if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
2024 		netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
2025 		return -EINVAL;
2026 	}
2027 
2028 	/* Make sure we have enough slave (even) ports for the split. */
2029 	if (count == 2) {
2030 		base_port = local_port;
2031 		if (mlxsw_sp->ports[base_port + 1]) {
2032 			netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2033 			return -EINVAL;
2034 		}
2035 	} else {
2036 		base_port = mlxsw_sp_cluster_base_port_get(local_port);
2037 		if (mlxsw_sp->ports[base_port + 1] ||
2038 		    mlxsw_sp->ports[base_port + 3]) {
2039 			netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2040 			return -EINVAL;
2041 		}
2042 	}
2043 
2044 	for (i = 0; i < count; i++)
2045 		mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2046 
2047 	err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
2048 	if (err) {
2049 		dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
2050 		goto err_port_split_create;
2051 	}
2052 
2053 	return 0;
2054 
2055 err_port_split_create:
2056 	mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
2057 	return err;
2058 }
2059 
2060 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
2061 {
2062 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2063 	struct mlxsw_sp_port *mlxsw_sp_port;
2064 	u8 cur_width, base_port;
2065 	unsigned int count;
2066 	int i;
2067 
2068 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
2069 	if (!mlxsw_sp_port) {
2070 		dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2071 			local_port);
2072 		return -EINVAL;
2073 	}
2074 
2075 	if (!mlxsw_sp_port->split) {
2076 		netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
2077 		return -EINVAL;
2078 	}
2079 
2080 	cur_width = mlxsw_sp_port->mapping.width;
2081 	count = cur_width == 1 ? 4 : 2;
2082 
2083 	base_port = mlxsw_sp_cluster_base_port_get(local_port);
2084 
2085 	/* Determine which ports to remove. */
2086 	if (count == 2 && local_port >= base_port + 2)
2087 		base_port = base_port + 2;
2088 
2089 	for (i = 0; i < count; i++)
2090 		mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2091 
2092 	mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
2093 
2094 	return 0;
2095 }
2096 
2097 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2098 				     char *pude_pl, void *priv)
2099 {
2100 	struct mlxsw_sp *mlxsw_sp = priv;
2101 	struct mlxsw_sp_port *mlxsw_sp_port;
2102 	enum mlxsw_reg_pude_oper_status status;
2103 	u8 local_port;
2104 
2105 	local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2106 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
2107 	if (!mlxsw_sp_port) {
2108 		dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n",
2109 			 local_port);
2110 		return;
2111 	}
2112 
2113 	status = mlxsw_reg_pude_oper_status_get(pude_pl);
2114 	if (status == MLXSW_PORT_OPER_STATUS_UP) {
2115 		netdev_info(mlxsw_sp_port->dev, "link up\n");
2116 		netif_carrier_on(mlxsw_sp_port->dev);
2117 	} else {
2118 		netdev_info(mlxsw_sp_port->dev, "link down\n");
2119 		netif_carrier_off(mlxsw_sp_port->dev);
2120 	}
2121 }
2122 
2123 static struct mlxsw_event_listener mlxsw_sp_pude_event = {
2124 	.func = mlxsw_sp_pude_event_func,
2125 	.trap_id = MLXSW_TRAP_ID_PUDE,
2126 };
2127 
2128 static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp,
2129 				   enum mlxsw_event_trap_id trap_id)
2130 {
2131 	struct mlxsw_event_listener *el;
2132 	char hpkt_pl[MLXSW_REG_HPKT_LEN];
2133 	int err;
2134 
2135 	switch (trap_id) {
2136 	case MLXSW_TRAP_ID_PUDE:
2137 		el = &mlxsw_sp_pude_event;
2138 		break;
2139 	}
2140 	err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp);
2141 	if (err)
2142 		return err;
2143 
2144 	mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
2145 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2146 	if (err)
2147 		goto err_event_trap_set;
2148 
2149 	return 0;
2150 
2151 err_event_trap_set:
2152 	mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
2153 	return err;
2154 }
2155 
2156 static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp,
2157 				      enum mlxsw_event_trap_id trap_id)
2158 {
2159 	struct mlxsw_event_listener *el;
2160 
2161 	switch (trap_id) {
2162 	case MLXSW_TRAP_ID_PUDE:
2163 		el = &mlxsw_sp_pude_event;
2164 		break;
2165 	}
2166 	mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
2167 }
2168 
2169 static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port,
2170 				      void *priv)
2171 {
2172 	struct mlxsw_sp *mlxsw_sp = priv;
2173 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2174 	struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2175 
2176 	if (unlikely(!mlxsw_sp_port)) {
2177 		dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2178 				     local_port);
2179 		return;
2180 	}
2181 
2182 	skb->dev = mlxsw_sp_port->dev;
2183 
2184 	pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2185 	u64_stats_update_begin(&pcpu_stats->syncp);
2186 	pcpu_stats->rx_packets++;
2187 	pcpu_stats->rx_bytes += skb->len;
2188 	u64_stats_update_end(&pcpu_stats->syncp);
2189 
2190 	skb->protocol = eth_type_trans(skb, skb->dev);
2191 	netif_receive_skb(skb);
2192 }
2193 
2194 static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
2195 	{
2196 		.func = mlxsw_sp_rx_listener_func,
2197 		.local_port = MLXSW_PORT_DONT_CARE,
2198 		.trap_id = MLXSW_TRAP_ID_FDB_MC,
2199 	},
2200 	/* Traps for specific L2 packet types, not trapped as FDB MC */
2201 	{
2202 		.func = mlxsw_sp_rx_listener_func,
2203 		.local_port = MLXSW_PORT_DONT_CARE,
2204 		.trap_id = MLXSW_TRAP_ID_STP,
2205 	},
2206 	{
2207 		.func = mlxsw_sp_rx_listener_func,
2208 		.local_port = MLXSW_PORT_DONT_CARE,
2209 		.trap_id = MLXSW_TRAP_ID_LACP,
2210 	},
2211 	{
2212 		.func = mlxsw_sp_rx_listener_func,
2213 		.local_port = MLXSW_PORT_DONT_CARE,
2214 		.trap_id = MLXSW_TRAP_ID_EAPOL,
2215 	},
2216 	{
2217 		.func = mlxsw_sp_rx_listener_func,
2218 		.local_port = MLXSW_PORT_DONT_CARE,
2219 		.trap_id = MLXSW_TRAP_ID_LLDP,
2220 	},
2221 	{
2222 		.func = mlxsw_sp_rx_listener_func,
2223 		.local_port = MLXSW_PORT_DONT_CARE,
2224 		.trap_id = MLXSW_TRAP_ID_MMRP,
2225 	},
2226 	{
2227 		.func = mlxsw_sp_rx_listener_func,
2228 		.local_port = MLXSW_PORT_DONT_CARE,
2229 		.trap_id = MLXSW_TRAP_ID_MVRP,
2230 	},
2231 	{
2232 		.func = mlxsw_sp_rx_listener_func,
2233 		.local_port = MLXSW_PORT_DONT_CARE,
2234 		.trap_id = MLXSW_TRAP_ID_RPVST,
2235 	},
2236 	{
2237 		.func = mlxsw_sp_rx_listener_func,
2238 		.local_port = MLXSW_PORT_DONT_CARE,
2239 		.trap_id = MLXSW_TRAP_ID_DHCP,
2240 	},
2241 	{
2242 		.func = mlxsw_sp_rx_listener_func,
2243 		.local_port = MLXSW_PORT_DONT_CARE,
2244 		.trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
2245 	},
2246 	{
2247 		.func = mlxsw_sp_rx_listener_func,
2248 		.local_port = MLXSW_PORT_DONT_CARE,
2249 		.trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
2250 	},
2251 	{
2252 		.func = mlxsw_sp_rx_listener_func,
2253 		.local_port = MLXSW_PORT_DONT_CARE,
2254 		.trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
2255 	},
2256 	{
2257 		.func = mlxsw_sp_rx_listener_func,
2258 		.local_port = MLXSW_PORT_DONT_CARE,
2259 		.trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
2260 	},
2261 	{
2262 		.func = mlxsw_sp_rx_listener_func,
2263 		.local_port = MLXSW_PORT_DONT_CARE,
2264 		.trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
2265 	},
2266 };
2267 
2268 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
2269 {
2270 	char htgt_pl[MLXSW_REG_HTGT_LEN];
2271 	char hpkt_pl[MLXSW_REG_HPKT_LEN];
2272 	int i;
2273 	int err;
2274 
2275 	mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
2276 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
2277 	if (err)
2278 		return err;
2279 
2280 	mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
2281 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
2282 	if (err)
2283 		return err;
2284 
2285 	for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2286 		err = mlxsw_core_rx_listener_register(mlxsw_sp->core,
2287 						      &mlxsw_sp_rx_listener[i],
2288 						      mlxsw_sp);
2289 		if (err)
2290 			goto err_rx_listener_register;
2291 
2292 		mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
2293 				    mlxsw_sp_rx_listener[i].trap_id);
2294 		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2295 		if (err)
2296 			goto err_rx_trap_set;
2297 	}
2298 	return 0;
2299 
2300 err_rx_trap_set:
2301 	mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2302 					  &mlxsw_sp_rx_listener[i],
2303 					  mlxsw_sp);
2304 err_rx_listener_register:
2305 	for (i--; i >= 0; i--) {
2306 		mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
2307 				    mlxsw_sp_rx_listener[i].trap_id);
2308 		mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2309 
2310 		mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2311 						  &mlxsw_sp_rx_listener[i],
2312 						  mlxsw_sp);
2313 	}
2314 	return err;
2315 }
2316 
2317 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2318 {
2319 	char hpkt_pl[MLXSW_REG_HPKT_LEN];
2320 	int i;
2321 
2322 	for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2323 		mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
2324 				    mlxsw_sp_rx_listener[i].trap_id);
2325 		mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2326 
2327 		mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2328 						  &mlxsw_sp_rx_listener[i],
2329 						  mlxsw_sp);
2330 	}
2331 }
2332 
2333 static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
2334 				 enum mlxsw_reg_sfgc_type type,
2335 				 enum mlxsw_reg_sfgc_bridge_type bridge_type)
2336 {
2337 	enum mlxsw_flood_table_type table_type;
2338 	enum mlxsw_sp_flood_table flood_table;
2339 	char sfgc_pl[MLXSW_REG_SFGC_LEN];
2340 
2341 	if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID)
2342 		table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
2343 	else
2344 		table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
2345 
2346 	if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST)
2347 		flood_table = MLXSW_SP_FLOOD_TABLE_UC;
2348 	else
2349 		flood_table = MLXSW_SP_FLOOD_TABLE_BM;
2350 
2351 	mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
2352 			    flood_table);
2353 	return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
2354 }
2355 
2356 static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
2357 {
2358 	int type, err;
2359 
2360 	for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
2361 		if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
2362 			continue;
2363 
2364 		err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2365 					    MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
2366 		if (err)
2367 			return err;
2368 
2369 		err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2370 					    MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
2371 		if (err)
2372 			return err;
2373 	}
2374 
2375 	return 0;
2376 }
2377 
2378 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2379 {
2380 	char slcr_pl[MLXSW_REG_SLCR_LEN];
2381 
2382 	mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
2383 				     MLXSW_REG_SLCR_LAG_HASH_DMAC |
2384 				     MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
2385 				     MLXSW_REG_SLCR_LAG_HASH_VLANID |
2386 				     MLXSW_REG_SLCR_LAG_HASH_SIP |
2387 				     MLXSW_REG_SLCR_LAG_HASH_DIP |
2388 				     MLXSW_REG_SLCR_LAG_HASH_SPORT |
2389 				     MLXSW_REG_SLCR_LAG_HASH_DPORT |
2390 				     MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
2391 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
2392 }
2393 
2394 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
2395 			 const struct mlxsw_bus_info *mlxsw_bus_info)
2396 {
2397 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2398 	int err;
2399 
2400 	mlxsw_sp->core = mlxsw_core;
2401 	mlxsw_sp->bus_info = mlxsw_bus_info;
2402 	INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list);
2403 	INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list);
2404 	INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
2405 
2406 	err = mlxsw_sp_base_mac_get(mlxsw_sp);
2407 	if (err) {
2408 		dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
2409 		return err;
2410 	}
2411 
2412 	err = mlxsw_sp_ports_create(mlxsw_sp);
2413 	if (err) {
2414 		dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
2415 		return err;
2416 	}
2417 
2418 	err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2419 	if (err) {
2420 		dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n");
2421 		goto err_event_register;
2422 	}
2423 
2424 	err = mlxsw_sp_traps_init(mlxsw_sp);
2425 	if (err) {
2426 		dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n");
2427 		goto err_rx_listener_register;
2428 	}
2429 
2430 	err = mlxsw_sp_flood_init(mlxsw_sp);
2431 	if (err) {
2432 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
2433 		goto err_flood_init;
2434 	}
2435 
2436 	err = mlxsw_sp_buffers_init(mlxsw_sp);
2437 	if (err) {
2438 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
2439 		goto err_buffers_init;
2440 	}
2441 
2442 	err = mlxsw_sp_lag_init(mlxsw_sp);
2443 	if (err) {
2444 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
2445 		goto err_lag_init;
2446 	}
2447 
2448 	err = mlxsw_sp_switchdev_init(mlxsw_sp);
2449 	if (err) {
2450 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
2451 		goto err_switchdev_init;
2452 	}
2453 
2454 	return 0;
2455 
2456 err_switchdev_init:
2457 err_lag_init:
2458 	mlxsw_sp_buffers_fini(mlxsw_sp);
2459 err_buffers_init:
2460 err_flood_init:
2461 	mlxsw_sp_traps_fini(mlxsw_sp);
2462 err_rx_listener_register:
2463 	mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2464 err_event_register:
2465 	mlxsw_sp_ports_remove(mlxsw_sp);
2466 	return err;
2467 }
2468 
2469 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
2470 {
2471 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2472 
2473 	mlxsw_sp_switchdev_fini(mlxsw_sp);
2474 	mlxsw_sp_buffers_fini(mlxsw_sp);
2475 	mlxsw_sp_traps_fini(mlxsw_sp);
2476 	mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2477 	mlxsw_sp_ports_remove(mlxsw_sp);
2478 }
2479 
2480 static struct mlxsw_config_profile mlxsw_sp_config_profile = {
2481 	.used_max_vepa_channels		= 1,
2482 	.max_vepa_channels		= 0,
2483 	.used_max_lag			= 1,
2484 	.max_lag			= MLXSW_SP_LAG_MAX,
2485 	.used_max_port_per_lag		= 1,
2486 	.max_port_per_lag		= MLXSW_SP_PORT_PER_LAG_MAX,
2487 	.used_max_mid			= 1,
2488 	.max_mid			= MLXSW_SP_MID_MAX,
2489 	.used_max_pgt			= 1,
2490 	.max_pgt			= 0,
2491 	.used_max_system_port		= 1,
2492 	.max_system_port		= 64,
2493 	.used_max_vlan_groups		= 1,
2494 	.max_vlan_groups		= 127,
2495 	.used_max_regions		= 1,
2496 	.max_regions			= 400,
2497 	.used_flood_tables		= 1,
2498 	.used_flood_mode		= 1,
2499 	.flood_mode			= 3,
2500 	.max_fid_offset_flood_tables	= 2,
2501 	.fid_offset_flood_table_size	= VLAN_N_VID - 1,
2502 	.max_fid_flood_tables		= 2,
2503 	.fid_flood_table_size		= MLXSW_SP_VFID_MAX,
2504 	.used_max_ib_mc			= 1,
2505 	.max_ib_mc			= 0,
2506 	.used_max_pkey			= 1,
2507 	.max_pkey			= 0,
2508 	.swid_config			= {
2509 		{
2510 			.used_type	= 1,
2511 			.type		= MLXSW_PORT_SWID_TYPE_ETH,
2512 		}
2513 	},
2514 };
2515 
2516 static struct mlxsw_driver mlxsw_sp_driver = {
2517 	.kind				= MLXSW_DEVICE_KIND_SPECTRUM,
2518 	.owner				= THIS_MODULE,
2519 	.priv_size			= sizeof(struct mlxsw_sp),
2520 	.init				= mlxsw_sp_init,
2521 	.fini				= mlxsw_sp_fini,
2522 	.port_split			= mlxsw_sp_port_split,
2523 	.port_unsplit			= mlxsw_sp_port_unsplit,
2524 	.sb_pool_get			= mlxsw_sp_sb_pool_get,
2525 	.sb_pool_set			= mlxsw_sp_sb_pool_set,
2526 	.sb_port_pool_get		= mlxsw_sp_sb_port_pool_get,
2527 	.sb_port_pool_set		= mlxsw_sp_sb_port_pool_set,
2528 	.sb_tc_pool_bind_get		= mlxsw_sp_sb_tc_pool_bind_get,
2529 	.sb_tc_pool_bind_set		= mlxsw_sp_sb_tc_pool_bind_set,
2530 	.sb_occ_snapshot		= mlxsw_sp_sb_occ_snapshot,
2531 	.sb_occ_max_clear		= mlxsw_sp_sb_occ_max_clear,
2532 	.sb_occ_port_pool_get		= mlxsw_sp_sb_occ_port_pool_get,
2533 	.sb_occ_tc_port_bind_get	= mlxsw_sp_sb_occ_tc_port_bind_get,
2534 	.txhdr_construct		= mlxsw_sp_txhdr_construct,
2535 	.txhdr_len			= MLXSW_TXHDR_LEN,
2536 	.profile			= &mlxsw_sp_config_profile,
2537 };
2538 
2539 static int
2540 mlxsw_sp_port_fdb_flush_by_port(const struct mlxsw_sp_port *mlxsw_sp_port)
2541 {
2542 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2543 	char sfdf_pl[MLXSW_REG_SFDF_LEN];
2544 
2545 	mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT);
2546 	mlxsw_reg_sfdf_system_port_set(sfdf_pl, mlxsw_sp_port->local_port);
2547 
2548 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2549 }
2550 
2551 static int
2552 mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
2553 				    u16 fid)
2554 {
2555 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2556 	char sfdf_pl[MLXSW_REG_SFDF_LEN];
2557 
2558 	mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
2559 	mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
2560 	mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
2561 						mlxsw_sp_port->local_port);
2562 
2563 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2564 }
2565 
2566 static int
2567 mlxsw_sp_port_fdb_flush_by_lag_id(const struct mlxsw_sp_port *mlxsw_sp_port)
2568 {
2569 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2570 	char sfdf_pl[MLXSW_REG_SFDF_LEN];
2571 
2572 	mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG);
2573 	mlxsw_reg_sfdf_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
2574 
2575 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2576 }
2577 
2578 static int
2579 mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
2580 				      u16 fid)
2581 {
2582 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2583 	char sfdf_pl[MLXSW_REG_SFDF_LEN];
2584 
2585 	mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
2586 	mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
2587 	mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
2588 
2589 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2590 }
2591 
2592 static int
2593 __mlxsw_sp_port_fdb_flush(const struct mlxsw_sp_port *mlxsw_sp_port)
2594 {
2595 	int err, last_err = 0;
2596 	u16 vid;
2597 
2598 	for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
2599 		err = mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, vid);
2600 		if (err)
2601 			last_err = err;
2602 	}
2603 
2604 	return last_err;
2605 }
2606 
2607 static int
2608 __mlxsw_sp_port_fdb_flush_lagged(const struct mlxsw_sp_port *mlxsw_sp_port)
2609 {
2610 	int err, last_err = 0;
2611 	u16 vid;
2612 
2613 	for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
2614 		err = mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, vid);
2615 		if (err)
2616 			last_err = err;
2617 	}
2618 
2619 	return last_err;
2620 }
2621 
2622 static int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port)
2623 {
2624 	if (!list_empty(&mlxsw_sp_port->vports_list))
2625 		if (mlxsw_sp_port->lagged)
2626 			return __mlxsw_sp_port_fdb_flush_lagged(mlxsw_sp_port);
2627 		else
2628 			return __mlxsw_sp_port_fdb_flush(mlxsw_sp_port);
2629 	else
2630 		if (mlxsw_sp_port->lagged)
2631 			return mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port);
2632 		else
2633 			return mlxsw_sp_port_fdb_flush_by_port(mlxsw_sp_port);
2634 }
2635 
2636 static int mlxsw_sp_vport_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_vport)
2637 {
2638 	u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_vport);
2639 	u16 fid = mlxsw_sp_vfid_to_fid(vfid);
2640 
2641 	if (mlxsw_sp_vport->lagged)
2642 		return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_vport,
2643 							     fid);
2644 	else
2645 		return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_vport, fid);
2646 }
2647 
2648 static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
2649 {
2650 	return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
2651 }
2652 
2653 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port)
2654 {
2655 	struct net_device *dev = mlxsw_sp_port->dev;
2656 	int err;
2657 
2658 	/* When port is not bridged untagged packets are tagged with
2659 	 * PVID=VID=1, thereby creating an implicit VLAN interface in
2660 	 * the device. Remove it and let bridge code take care of its
2661 	 * own VLANs.
2662 	 */
2663 	err = mlxsw_sp_port_kill_vid(dev, 0, 1);
2664 	if (err)
2665 		return err;
2666 
2667 	mlxsw_sp_port->learning = 1;
2668 	mlxsw_sp_port->learning_sync = 1;
2669 	mlxsw_sp_port->uc_flood = 1;
2670 	mlxsw_sp_port->bridged = 1;
2671 
2672 	return 0;
2673 }
2674 
2675 static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2676 				      bool flush_fdb)
2677 {
2678 	struct net_device *dev = mlxsw_sp_port->dev;
2679 
2680 	if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port))
2681 		netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
2682 
2683 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
2684 
2685 	mlxsw_sp_port->learning = 0;
2686 	mlxsw_sp_port->learning_sync = 0;
2687 	mlxsw_sp_port->uc_flood = 0;
2688 	mlxsw_sp_port->bridged = 0;
2689 
2690 	/* Add implicit VLAN interface in the device, so that untagged
2691 	 * packets will be classified to the default vFID.
2692 	 */
2693 	return mlxsw_sp_port_add_vid(dev, 0, 1);
2694 }
2695 
2696 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
2697 					 struct net_device *br_dev)
2698 {
2699 	return !mlxsw_sp->master_bridge.dev ||
2700 	       mlxsw_sp->master_bridge.dev == br_dev;
2701 }
2702 
2703 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
2704 				       struct net_device *br_dev)
2705 {
2706 	mlxsw_sp->master_bridge.dev = br_dev;
2707 	mlxsw_sp->master_bridge.ref_count++;
2708 }
2709 
2710 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp,
2711 				       struct net_device *br_dev)
2712 {
2713 	if (--mlxsw_sp->master_bridge.ref_count == 0)
2714 		mlxsw_sp->master_bridge.dev = NULL;
2715 }
2716 
2717 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
2718 {
2719 	char sldr_pl[MLXSW_REG_SLDR_LEN];
2720 
2721 	mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
2722 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2723 }
2724 
2725 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
2726 {
2727 	char sldr_pl[MLXSW_REG_SLDR_LEN];
2728 
2729 	mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
2730 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2731 }
2732 
2733 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
2734 				     u16 lag_id, u8 port_index)
2735 {
2736 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2737 	char slcor_pl[MLXSW_REG_SLCOR_LEN];
2738 
2739 	mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
2740 				      lag_id, port_index);
2741 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2742 }
2743 
2744 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
2745 					u16 lag_id)
2746 {
2747 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2748 	char slcor_pl[MLXSW_REG_SLCOR_LEN];
2749 
2750 	mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
2751 					 lag_id);
2752 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2753 }
2754 
2755 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
2756 					u16 lag_id)
2757 {
2758 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2759 	char slcor_pl[MLXSW_REG_SLCOR_LEN];
2760 
2761 	mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
2762 					lag_id);
2763 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2764 }
2765 
2766 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
2767 					 u16 lag_id)
2768 {
2769 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2770 	char slcor_pl[MLXSW_REG_SLCOR_LEN];
2771 
2772 	mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
2773 					 lag_id);
2774 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2775 }
2776 
2777 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
2778 				  struct net_device *lag_dev,
2779 				  u16 *p_lag_id)
2780 {
2781 	struct mlxsw_sp_upper *lag;
2782 	int free_lag_id = -1;
2783 	int i;
2784 
2785 	for (i = 0; i < MLXSW_SP_LAG_MAX; i++) {
2786 		lag = mlxsw_sp_lag_get(mlxsw_sp, i);
2787 		if (lag->ref_count) {
2788 			if (lag->dev == lag_dev) {
2789 				*p_lag_id = i;
2790 				return 0;
2791 			}
2792 		} else if (free_lag_id < 0) {
2793 			free_lag_id = i;
2794 		}
2795 	}
2796 	if (free_lag_id < 0)
2797 		return -EBUSY;
2798 	*p_lag_id = free_lag_id;
2799 	return 0;
2800 }
2801 
2802 static bool
2803 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
2804 			  struct net_device *lag_dev,
2805 			  struct netdev_lag_upper_info *lag_upper_info)
2806 {
2807 	u16 lag_id;
2808 
2809 	if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
2810 		return false;
2811 	if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
2812 		return false;
2813 	return true;
2814 }
2815 
2816 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
2817 				       u16 lag_id, u8 *p_port_index)
2818 {
2819 	int i;
2820 
2821 	for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
2822 		if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
2823 			*p_port_index = i;
2824 			return 0;
2825 		}
2826 	}
2827 	return -EBUSY;
2828 }
2829 
2830 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
2831 				  struct net_device *lag_dev)
2832 {
2833 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2834 	struct mlxsw_sp_upper *lag;
2835 	u16 lag_id;
2836 	u8 port_index;
2837 	int err;
2838 
2839 	err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
2840 	if (err)
2841 		return err;
2842 	lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
2843 	if (!lag->ref_count) {
2844 		err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
2845 		if (err)
2846 			return err;
2847 		lag->dev = lag_dev;
2848 	}
2849 
2850 	err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
2851 	if (err)
2852 		return err;
2853 	err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
2854 	if (err)
2855 		goto err_col_port_add;
2856 	err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
2857 	if (err)
2858 		goto err_col_port_enable;
2859 
2860 	mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
2861 				   mlxsw_sp_port->local_port);
2862 	mlxsw_sp_port->lag_id = lag_id;
2863 	mlxsw_sp_port->lagged = 1;
2864 	lag->ref_count++;
2865 	return 0;
2866 
2867 err_col_port_enable:
2868 	mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
2869 err_col_port_add:
2870 	if (!lag->ref_count)
2871 		mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2872 	return err;
2873 }
2874 
2875 static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
2876 				       struct net_device *br_dev,
2877 				       bool flush_fdb);
2878 
2879 static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2880 				   struct net_device *lag_dev)
2881 {
2882 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2883 	struct mlxsw_sp_port *mlxsw_sp_vport;
2884 	struct mlxsw_sp_upper *lag;
2885 	u16 lag_id = mlxsw_sp_port->lag_id;
2886 	int err;
2887 
2888 	if (!mlxsw_sp_port->lagged)
2889 		return 0;
2890 	lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
2891 	WARN_ON(lag->ref_count == 0);
2892 
2893 	err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
2894 	if (err)
2895 		return err;
2896 	err = mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
2897 	if (err)
2898 		return err;
2899 
2900 	/* In case we leave a LAG device that has bridges built on top,
2901 	 * then their teardown sequence is never issued and we need to
2902 	 * invoke the necessary cleanup routines ourselves.
2903 	 */
2904 	list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
2905 			    vport.list) {
2906 		struct net_device *br_dev;
2907 
2908 		if (!mlxsw_sp_vport->bridged)
2909 			continue;
2910 
2911 		br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
2912 		mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, false);
2913 	}
2914 
2915 	if (mlxsw_sp_port->bridged) {
2916 		mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
2917 		mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false);
2918 		mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL);
2919 	}
2920 
2921 	if (lag->ref_count == 1) {
2922 		if (mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port))
2923 			netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
2924 		err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2925 		if (err)
2926 			return err;
2927 	}
2928 
2929 	mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
2930 				     mlxsw_sp_port->local_port);
2931 	mlxsw_sp_port->lagged = 0;
2932 	lag->ref_count--;
2933 	return 0;
2934 }
2935 
2936 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
2937 				      u16 lag_id)
2938 {
2939 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2940 	char sldr_pl[MLXSW_REG_SLDR_LEN];
2941 
2942 	mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
2943 					 mlxsw_sp_port->local_port);
2944 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2945 }
2946 
2947 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
2948 					 u16 lag_id)
2949 {
2950 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2951 	char sldr_pl[MLXSW_REG_SLDR_LEN];
2952 
2953 	mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
2954 					    mlxsw_sp_port->local_port);
2955 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2956 }
2957 
2958 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
2959 				       bool lag_tx_enabled)
2960 {
2961 	if (lag_tx_enabled)
2962 		return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
2963 						  mlxsw_sp_port->lag_id);
2964 	else
2965 		return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
2966 						     mlxsw_sp_port->lag_id);
2967 }
2968 
2969 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
2970 				     struct netdev_lag_lower_state_info *info)
2971 {
2972 	return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
2973 }
2974 
2975 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
2976 				   struct net_device *vlan_dev)
2977 {
2978 	struct mlxsw_sp_port *mlxsw_sp_vport;
2979 	u16 vid = vlan_dev_vlan_id(vlan_dev);
2980 
2981 	mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2982 	if (!mlxsw_sp_vport) {
2983 		WARN_ON(!mlxsw_sp_vport);
2984 		return -EINVAL;
2985 	}
2986 
2987 	mlxsw_sp_vport->dev = vlan_dev;
2988 
2989 	return 0;
2990 }
2991 
2992 static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
2993 				     struct net_device *vlan_dev)
2994 {
2995 	struct mlxsw_sp_port *mlxsw_sp_vport;
2996 	u16 vid = vlan_dev_vlan_id(vlan_dev);
2997 
2998 	mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2999 	if (!mlxsw_sp_vport) {
3000 		WARN_ON(!mlxsw_sp_vport);
3001 		return -EINVAL;
3002 	}
3003 
3004 	/* When removing a VLAN device while still bridged we should first
3005 	 * remove it from the bridge, as we receive the bridge's notification
3006 	 * when the vPort is already gone.
3007 	 */
3008 	if (mlxsw_sp_vport->bridged) {
3009 		struct net_device *br_dev;
3010 
3011 		br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
3012 		mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, true);
3013 	}
3014 
3015 	mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
3016 
3017 	return 0;
3018 }
3019 
3020 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
3021 					       unsigned long event, void *ptr)
3022 {
3023 	struct netdev_notifier_changeupper_info *info;
3024 	struct mlxsw_sp_port *mlxsw_sp_port;
3025 	struct net_device *upper_dev;
3026 	struct mlxsw_sp *mlxsw_sp;
3027 	int err;
3028 
3029 	mlxsw_sp_port = netdev_priv(dev);
3030 	mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3031 	info = ptr;
3032 
3033 	switch (event) {
3034 	case NETDEV_PRECHANGEUPPER:
3035 		upper_dev = info->upper_dev;
3036 		if (!info->master || !info->linking)
3037 			break;
3038 		/* HW limitation forbids to put ports to multiple bridges. */
3039 		if (netif_is_bridge_master(upper_dev) &&
3040 		    !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
3041 			return NOTIFY_BAD;
3042 		if (netif_is_lag_master(upper_dev) &&
3043 		    !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
3044 					       info->upper_info))
3045 			return NOTIFY_BAD;
3046 		break;
3047 	case NETDEV_CHANGEUPPER:
3048 		upper_dev = info->upper_dev;
3049 		if (is_vlan_dev(upper_dev)) {
3050 			if (info->linking) {
3051 				err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
3052 							      upper_dev);
3053 				if (err) {
3054 					netdev_err(dev, "Failed to link VLAN device\n");
3055 					return NOTIFY_BAD;
3056 				}
3057 			} else {
3058 				err = mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
3059 								upper_dev);
3060 				if (err) {
3061 					netdev_err(dev, "Failed to unlink VLAN device\n");
3062 					return NOTIFY_BAD;
3063 				}
3064 			}
3065 		} else if (netif_is_bridge_master(upper_dev)) {
3066 			if (info->linking) {
3067 				err = mlxsw_sp_port_bridge_join(mlxsw_sp_port);
3068 				if (err) {
3069 					netdev_err(dev, "Failed to join bridge\n");
3070 					return NOTIFY_BAD;
3071 				}
3072 				mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev);
3073 			} else {
3074 				err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
3075 								 true);
3076 				mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev);
3077 				if (err) {
3078 					netdev_err(dev, "Failed to leave bridge\n");
3079 					return NOTIFY_BAD;
3080 				}
3081 			}
3082 		} else if (netif_is_lag_master(upper_dev)) {
3083 			if (info->linking) {
3084 				err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
3085 							     upper_dev);
3086 				if (err) {
3087 					netdev_err(dev, "Failed to join link aggregation\n");
3088 					return NOTIFY_BAD;
3089 				}
3090 			} else {
3091 				err = mlxsw_sp_port_lag_leave(mlxsw_sp_port,
3092 							      upper_dev);
3093 				if (err) {
3094 					netdev_err(dev, "Failed to leave link aggregation\n");
3095 					return NOTIFY_BAD;
3096 				}
3097 			}
3098 		}
3099 		break;
3100 	}
3101 
3102 	return NOTIFY_DONE;
3103 }
3104 
3105 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
3106 					       unsigned long event, void *ptr)
3107 {
3108 	struct netdev_notifier_changelowerstate_info *info;
3109 	struct mlxsw_sp_port *mlxsw_sp_port;
3110 	int err;
3111 
3112 	mlxsw_sp_port = netdev_priv(dev);
3113 	info = ptr;
3114 
3115 	switch (event) {
3116 	case NETDEV_CHANGELOWERSTATE:
3117 		if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
3118 			err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
3119 							info->lower_state_info);
3120 			if (err)
3121 				netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
3122 		}
3123 		break;
3124 	}
3125 
3126 	return NOTIFY_DONE;
3127 }
3128 
3129 static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
3130 					 unsigned long event, void *ptr)
3131 {
3132 	switch (event) {
3133 	case NETDEV_PRECHANGEUPPER:
3134 	case NETDEV_CHANGEUPPER:
3135 		return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr);
3136 	case NETDEV_CHANGELOWERSTATE:
3137 		return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
3138 	}
3139 
3140 	return NOTIFY_DONE;
3141 }
3142 
3143 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
3144 					unsigned long event, void *ptr)
3145 {
3146 	struct net_device *dev;
3147 	struct list_head *iter;
3148 	int ret;
3149 
3150 	netdev_for_each_lower_dev(lag_dev, dev, iter) {
3151 		if (mlxsw_sp_port_dev_check(dev)) {
3152 			ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
3153 			if (ret == NOTIFY_BAD)
3154 				return ret;
3155 		}
3156 	}
3157 
3158 	return NOTIFY_DONE;
3159 }
3160 
3161 static struct mlxsw_sp_vfid *
3162 mlxsw_sp_br_vfid_find(const struct mlxsw_sp *mlxsw_sp,
3163 		      const struct net_device *br_dev)
3164 {
3165 	struct mlxsw_sp_vfid *vfid;
3166 
3167 	list_for_each_entry(vfid, &mlxsw_sp->br_vfids.list, list) {
3168 		if (vfid->br_dev == br_dev)
3169 			return vfid;
3170 	}
3171 
3172 	return NULL;
3173 }
3174 
3175 static u16 mlxsw_sp_vfid_to_br_vfid(u16 vfid)
3176 {
3177 	return vfid - MLXSW_SP_VFID_PORT_MAX;
3178 }
3179 
3180 static u16 mlxsw_sp_br_vfid_to_vfid(u16 br_vfid)
3181 {
3182 	return MLXSW_SP_VFID_PORT_MAX + br_vfid;
3183 }
3184 
3185 static u16 mlxsw_sp_avail_br_vfid_get(const struct mlxsw_sp *mlxsw_sp)
3186 {
3187 	return find_first_zero_bit(mlxsw_sp->br_vfids.mapped,
3188 				   MLXSW_SP_VFID_BR_MAX);
3189 }
3190 
3191 static struct mlxsw_sp_vfid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp,
3192 						     struct net_device *br_dev)
3193 {
3194 	struct device *dev = mlxsw_sp->bus_info->dev;
3195 	struct mlxsw_sp_vfid *vfid;
3196 	u16 n_vfid;
3197 	int err;
3198 
3199 	n_vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp));
3200 	if (n_vfid == MLXSW_SP_VFID_MAX) {
3201 		dev_err(dev, "No available vFIDs\n");
3202 		return ERR_PTR(-ERANGE);
3203 	}
3204 
3205 	err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid);
3206 	if (err) {
3207 		dev_err(dev, "Failed to create vFID=%d\n", n_vfid);
3208 		return ERR_PTR(err);
3209 	}
3210 
3211 	vfid = kzalloc(sizeof(*vfid), GFP_KERNEL);
3212 	if (!vfid)
3213 		goto err_allocate_vfid;
3214 
3215 	vfid->vfid = n_vfid;
3216 	vfid->br_dev = br_dev;
3217 
3218 	list_add(&vfid->list, &mlxsw_sp->br_vfids.list);
3219 	set_bit(mlxsw_sp_vfid_to_br_vfid(n_vfid), mlxsw_sp->br_vfids.mapped);
3220 
3221 	return vfid;
3222 
3223 err_allocate_vfid:
3224 	__mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid);
3225 	return ERR_PTR(-ENOMEM);
3226 }
3227 
3228 static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
3229 				     struct mlxsw_sp_vfid *vfid)
3230 {
3231 	u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid->vfid);
3232 
3233 	clear_bit(br_vfid, mlxsw_sp->br_vfids.mapped);
3234 	list_del(&vfid->list);
3235 
3236 	__mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid);
3237 
3238 	kfree(vfid);
3239 }
3240 
3241 static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
3242 				       struct net_device *br_dev,
3243 				       bool flush_fdb)
3244 {
3245 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
3246 	u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3247 	struct net_device *dev = mlxsw_sp_vport->dev;
3248 	struct mlxsw_sp_vfid *vfid, *new_vfid;
3249 	int err;
3250 
3251 	vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev);
3252 	if (!vfid) {
3253 		WARN_ON(!vfid);
3254 		return -EINVAL;
3255 	}
3256 
3257 	/* We need a vFID to go back to after leaving the bridge's vFID. */
3258 	new_vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid);
3259 	if (!new_vfid) {
3260 		new_vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
3261 		if (IS_ERR(new_vfid)) {
3262 			netdev_err(dev, "Failed to create vFID for VID=%d\n",
3263 				   vid);
3264 			return PTR_ERR(new_vfid);
3265 		}
3266 	}
3267 
3268 	/* Invalidate existing {Port, VID} to vFID mapping and create a new
3269 	 * one for the new vFID.
3270 	 */
3271 	err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3272 					   MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
3273 					   false,
3274 					   mlxsw_sp_vfid_to_fid(vfid->vfid),
3275 					   vid);
3276 	if (err) {
3277 		netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
3278 			   vfid->vfid);
3279 		goto err_port_vid_to_fid_invalidate;
3280 	}
3281 
3282 	err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3283 					   MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
3284 					   true,
3285 					   mlxsw_sp_vfid_to_fid(new_vfid->vfid),
3286 					   vid);
3287 	if (err) {
3288 		netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
3289 			   new_vfid->vfid);
3290 		goto err_port_vid_to_fid_validate;
3291 	}
3292 
3293 	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
3294 	if (err) {
3295 		netdev_err(dev, "Failed to disable learning\n");
3296 		goto err_port_vid_learning_set;
3297 	}
3298 
3299 	err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false,
3300 				       false);
3301 	if (err) {
3302 		netdev_err(dev, "Failed clear to clear flooding\n");
3303 		goto err_vport_flood_set;
3304 	}
3305 
3306 	err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
3307 					  MLXSW_REG_SPMS_STATE_FORWARDING);
3308 	if (err) {
3309 		netdev_err(dev, "Failed to set STP state\n");
3310 		goto err_port_stp_state_set;
3311 	}
3312 
3313 	if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport))
3314 		netdev_err(dev, "Failed to flush FDB\n");
3315 
3316 	/* Switch between the vFIDs and destroy the old one if needed. */
3317 	new_vfid->nr_vports++;
3318 	mlxsw_sp_vport->vport.vfid = new_vfid;
3319 	vfid->nr_vports--;
3320 	if (!vfid->nr_vports)
3321 		mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
3322 
3323 	mlxsw_sp_vport->learning = 0;
3324 	mlxsw_sp_vport->learning_sync = 0;
3325 	mlxsw_sp_vport->uc_flood = 0;
3326 	mlxsw_sp_vport->bridged = 0;
3327 
3328 	return 0;
3329 
3330 err_port_stp_state_set:
3331 err_vport_flood_set:
3332 err_port_vid_learning_set:
3333 err_port_vid_to_fid_validate:
3334 err_port_vid_to_fid_invalidate:
3335 	/* Rollback vFID only if new. */
3336 	if (!new_vfid->nr_vports)
3337 		mlxsw_sp_vfid_destroy(mlxsw_sp, new_vfid);
3338 	return err;
3339 }
3340 
3341 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3342 				      struct net_device *br_dev)
3343 {
3344 	struct mlxsw_sp_vfid *old_vfid = mlxsw_sp_vport->vport.vfid;
3345 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
3346 	u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3347 	struct net_device *dev = mlxsw_sp_vport->dev;
3348 	struct mlxsw_sp_vfid *vfid;
3349 	int err;
3350 
3351 	vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev);
3352 	if (!vfid) {
3353 		vfid = mlxsw_sp_br_vfid_create(mlxsw_sp, br_dev);
3354 		if (IS_ERR(vfid)) {
3355 			netdev_err(dev, "Failed to create bridge vFID\n");
3356 			return PTR_ERR(vfid);
3357 		}
3358 	}
3359 
3360 	err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, true, false);
3361 	if (err) {
3362 		netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
3363 			   vfid->vfid);
3364 		goto err_port_flood_set;
3365 	}
3366 
3367 	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
3368 	if (err) {
3369 		netdev_err(dev, "Failed to enable learning\n");
3370 		goto err_port_vid_learning_set;
3371 	}
3372 
3373 	/* We need to invalidate existing {Port, VID} to vFID mapping and
3374 	 * create a new one for the bridge's vFID.
3375 	 */
3376 	err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3377 					   MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
3378 					   false,
3379 					   mlxsw_sp_vfid_to_fid(old_vfid->vfid),
3380 					   vid);
3381 	if (err) {
3382 		netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
3383 			   old_vfid->vfid);
3384 		goto err_port_vid_to_fid_invalidate;
3385 	}
3386 
3387 	err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3388 					   MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
3389 					   true,
3390 					   mlxsw_sp_vfid_to_fid(vfid->vfid),
3391 					   vid);
3392 	if (err) {
3393 		netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
3394 			   vfid->vfid);
3395 		goto err_port_vid_to_fid_validate;
3396 	}
3397 
3398 	/* Switch between the vFIDs and destroy the old one if needed. */
3399 	vfid->nr_vports++;
3400 	mlxsw_sp_vport->vport.vfid = vfid;
3401 	old_vfid->nr_vports--;
3402 	if (!old_vfid->nr_vports)
3403 		mlxsw_sp_vfid_destroy(mlxsw_sp, old_vfid);
3404 
3405 	mlxsw_sp_vport->learning = 1;
3406 	mlxsw_sp_vport->learning_sync = 1;
3407 	mlxsw_sp_vport->uc_flood = 1;
3408 	mlxsw_sp_vport->bridged = 1;
3409 
3410 	return 0;
3411 
3412 err_port_vid_to_fid_validate:
3413 	mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3414 				     MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
3415 				     mlxsw_sp_vfid_to_fid(old_vfid->vfid), vid);
3416 err_port_vid_to_fid_invalidate:
3417 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
3418 err_port_vid_learning_set:
3419 	mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, false);
3420 err_port_flood_set:
3421 	if (!vfid->nr_vports)
3422 		mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
3423 	return err;
3424 }
3425 
3426 static bool
3427 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
3428 				  const struct net_device *br_dev)
3429 {
3430 	struct mlxsw_sp_port *mlxsw_sp_vport;
3431 
3432 	list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
3433 			    vport.list) {
3434 		if (mlxsw_sp_vport_br_get(mlxsw_sp_vport) == br_dev)
3435 			return false;
3436 	}
3437 
3438 	return true;
3439 }
3440 
3441 static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
3442 					  unsigned long event, void *ptr,
3443 					  u16 vid)
3444 {
3445 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
3446 	struct netdev_notifier_changeupper_info *info = ptr;
3447 	struct mlxsw_sp_port *mlxsw_sp_vport;
3448 	struct net_device *upper_dev;
3449 	int err;
3450 
3451 	mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3452 
3453 	switch (event) {
3454 	case NETDEV_PRECHANGEUPPER:
3455 		upper_dev = info->upper_dev;
3456 		if (!info->master || !info->linking)
3457 			break;
3458 		if (!netif_is_bridge_master(upper_dev))
3459 			return NOTIFY_BAD;
3460 		/* We can't have multiple VLAN interfaces configured on
3461 		 * the same port and being members in the same bridge.
3462 		 */
3463 		if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
3464 						       upper_dev))
3465 			return NOTIFY_BAD;
3466 		break;
3467 	case NETDEV_CHANGEUPPER:
3468 		upper_dev = info->upper_dev;
3469 		if (!info->master)
3470 			break;
3471 		if (info->linking) {
3472 			if (!mlxsw_sp_vport) {
3473 				WARN_ON(!mlxsw_sp_vport);
3474 				return NOTIFY_BAD;
3475 			}
3476 			err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
3477 							 upper_dev);
3478 			if (err) {
3479 				netdev_err(dev, "Failed to join bridge\n");
3480 				return NOTIFY_BAD;
3481 			}
3482 		} else {
3483 			/* We ignore bridge's unlinking notifications if vPort
3484 			 * is gone, since we already left the bridge when the
3485 			 * VLAN device was unlinked from the real device.
3486 			 */
3487 			if (!mlxsw_sp_vport)
3488 				return NOTIFY_DONE;
3489 			err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport,
3490 							  upper_dev, true);
3491 			if (err) {
3492 				netdev_err(dev, "Failed to leave bridge\n");
3493 				return NOTIFY_BAD;
3494 			}
3495 		}
3496 	}
3497 
3498 	return NOTIFY_DONE;
3499 }
3500 
3501 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
3502 					      unsigned long event, void *ptr,
3503 					      u16 vid)
3504 {
3505 	struct net_device *dev;
3506 	struct list_head *iter;
3507 	int ret;
3508 
3509 	netdev_for_each_lower_dev(lag_dev, dev, iter) {
3510 		if (mlxsw_sp_port_dev_check(dev)) {
3511 			ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
3512 							     vid);
3513 			if (ret == NOTIFY_BAD)
3514 				return ret;
3515 		}
3516 	}
3517 
3518 	return NOTIFY_DONE;
3519 }
3520 
3521 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
3522 					 unsigned long event, void *ptr)
3523 {
3524 	struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
3525 	u16 vid = vlan_dev_vlan_id(vlan_dev);
3526 
3527 	if (mlxsw_sp_port_dev_check(real_dev))
3528 		return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr,
3529 						      vid);
3530 	else if (netif_is_lag_master(real_dev))
3531 		return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
3532 							  vid);
3533 
3534 	return NOTIFY_DONE;
3535 }
3536 
3537 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
3538 				    unsigned long event, void *ptr)
3539 {
3540 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3541 
3542 	if (mlxsw_sp_port_dev_check(dev))
3543 		return mlxsw_sp_netdevice_port_event(dev, event, ptr);
3544 
3545 	if (netif_is_lag_master(dev))
3546 		return mlxsw_sp_netdevice_lag_event(dev, event, ptr);
3547 
3548 	if (is_vlan_dev(dev))
3549 		return mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
3550 
3551 	return NOTIFY_DONE;
3552 }
3553 
3554 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
3555 	.notifier_call = mlxsw_sp_netdevice_event,
3556 };
3557 
3558 static int __init mlxsw_sp_module_init(void)
3559 {
3560 	int err;
3561 
3562 	register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3563 	err = mlxsw_core_driver_register(&mlxsw_sp_driver);
3564 	if (err)
3565 		goto err_core_driver_register;
3566 	return 0;
3567 
3568 err_core_driver_register:
3569 	unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3570 	return err;
3571 }
3572 
3573 static void __exit mlxsw_sp_module_exit(void)
3574 {
3575 	mlxsw_core_driver_unregister(&mlxsw_sp_driver);
3576 	unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3577 }
3578 
3579 module_init(mlxsw_sp_module_init);
3580 module_exit(mlxsw_sp_module_exit);
3581 
3582 MODULE_LICENSE("Dual BSD/GPL");
3583 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
3584 MODULE_DESCRIPTION("Mellanox Spectrum driver");
3585 MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM);
3586