xref: /linux/drivers/net/ethernet/mellanox/mlxsw/spectrum.c (revision cc3ae7b0af27118994c1e491382b253be3b762bf)
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5  * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6  * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/ethtool.h>
43 #include <linux/slab.h>
44 #include <linux/device.h>
45 #include <linux/skbuff.h>
46 #include <linux/if_vlan.h>
47 #include <linux/if_bridge.h>
48 #include <linux/workqueue.h>
49 #include <linux/jiffies.h>
50 #include <linux/bitops.h>
51 #include <linux/list.h>
52 #include <linux/dcbnl.h>
53 #include <net/switchdev.h>
54 #include <generated/utsrelease.h>
55 
56 #include "spectrum.h"
57 #include "core.h"
58 #include "reg.h"
59 #include "port.h"
60 #include "trap.h"
61 #include "txheader.h"
62 
63 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
64 static const char mlxsw_sp_driver_version[] = "1.0";
65 
66 /* tx_hdr_version
67  * Tx header version.
68  * Must be set to 1.
69  */
70 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
71 
72 /* tx_hdr_ctl
73  * Packet control type.
74  * 0 - Ethernet control (e.g. EMADs, LACP)
75  * 1 - Ethernet data
76  */
77 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
78 
79 /* tx_hdr_proto
80  * Packet protocol type. Must be set to 1 (Ethernet).
81  */
82 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
83 
84 /* tx_hdr_rx_is_router
85  * Packet is sent from the router. Valid for data packets only.
86  */
87 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
88 
89 /* tx_hdr_fid_valid
90  * Indicates if the 'fid' field is valid and should be used for
91  * forwarding lookup. Valid for data packets only.
92  */
93 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
94 
95 /* tx_hdr_swid
96  * Switch partition ID. Must be set to 0.
97  */
98 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
99 
100 /* tx_hdr_control_tclass
101  * Indicates if the packet should use the control TClass and not one
102  * of the data TClasses.
103  */
104 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
105 
106 /* tx_hdr_etclass
107  * Egress TClass to be used on the egress device on the egress port.
108  */
109 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
110 
111 /* tx_hdr_port_mid
112  * Destination local port for unicast packets.
113  * Destination multicast ID for multicast packets.
114  *
115  * Control packets are directed to a specific egress port, while data
116  * packets are transmitted through the CPU port (0) into the switch partition,
117  * where forwarding rules are applied.
118  */
119 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
120 
121 /* tx_hdr_fid
122  * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
123  * set, otherwise calculated based on the packet's VID using VID to FID mapping.
124  * Valid for data packets only.
125  */
126 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
127 
128 /* tx_hdr_type
129  * 0 - Data packets
130  * 6 - Control packets
131  */
132 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
133 
134 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
135 				     const struct mlxsw_tx_info *tx_info)
136 {
137 	char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
138 
139 	memset(txhdr, 0, MLXSW_TXHDR_LEN);
140 
141 	mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
142 	mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
143 	mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
144 	mlxsw_tx_hdr_swid_set(txhdr, 0);
145 	mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
146 	mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
147 	mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
148 }
149 
150 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
151 {
152 	char spad_pl[MLXSW_REG_SPAD_LEN];
153 	int err;
154 
155 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
156 	if (err)
157 		return err;
158 	mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
159 	return 0;
160 }
161 
162 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
163 					  bool is_up)
164 {
165 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
166 	char paos_pl[MLXSW_REG_PAOS_LEN];
167 
168 	mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
169 			    is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
170 			    MLXSW_PORT_ADMIN_STATUS_DOWN);
171 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
172 }
173 
174 static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
175 					 bool *p_is_up)
176 {
177 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
178 	char paos_pl[MLXSW_REG_PAOS_LEN];
179 	u8 oper_status;
180 	int err;
181 
182 	mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0);
183 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
184 	if (err)
185 		return err;
186 	oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
187 	*p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
188 	return 0;
189 }
190 
191 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
192 				      unsigned char *addr)
193 {
194 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
195 	char ppad_pl[MLXSW_REG_PPAD_LEN];
196 
197 	mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
198 	mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
199 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
200 }
201 
202 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
203 {
204 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
205 	unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
206 
207 	ether_addr_copy(addr, mlxsw_sp->base_mac);
208 	addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
209 	return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
210 }
211 
212 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
213 				       u16 vid, enum mlxsw_reg_spms_state state)
214 {
215 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
216 	char *spms_pl;
217 	int err;
218 
219 	spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
220 	if (!spms_pl)
221 		return -ENOMEM;
222 	mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
223 	mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
224 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
225 	kfree(spms_pl);
226 	return err;
227 }
228 
229 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
230 {
231 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
232 	char pmtu_pl[MLXSW_REG_PMTU_LEN];
233 	int max_mtu;
234 	int err;
235 
236 	mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
237 	mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
238 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
239 	if (err)
240 		return err;
241 	max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
242 
243 	if (mtu > max_mtu)
244 		return -EINVAL;
245 
246 	mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
247 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
248 }
249 
250 static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
251 				    u8 swid)
252 {
253 	char pspa_pl[MLXSW_REG_PSPA_LEN];
254 
255 	mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
256 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
257 }
258 
259 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
260 {
261 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
262 
263 	return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
264 					swid);
265 }
266 
267 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
268 				     bool enable)
269 {
270 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
271 	char svpe_pl[MLXSW_REG_SVPE_LEN];
272 
273 	mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
274 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
275 }
276 
277 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
278 				 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
279 				 u16 vid)
280 {
281 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
282 	char svfa_pl[MLXSW_REG_SVFA_LEN];
283 
284 	mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
285 			    fid, vid);
286 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
287 }
288 
289 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
290 					  u16 vid, bool learn_enable)
291 {
292 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
293 	char *spvmlr_pl;
294 	int err;
295 
296 	spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
297 	if (!spvmlr_pl)
298 		return -ENOMEM;
299 	mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
300 			      learn_enable);
301 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
302 	kfree(spvmlr_pl);
303 	return err;
304 }
305 
306 static int
307 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
308 {
309 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
310 	char sspr_pl[MLXSW_REG_SSPR_LEN];
311 
312 	mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
313 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
314 }
315 
316 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
317 					 u8 local_port, u8 *p_module,
318 					 u8 *p_width, u8 *p_lane)
319 {
320 	char pmlp_pl[MLXSW_REG_PMLP_LEN];
321 	int err;
322 
323 	mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
324 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
325 	if (err)
326 		return err;
327 	*p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
328 	*p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
329 	*p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
330 	return 0;
331 }
332 
333 static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
334 				    u8 module, u8 width, u8 lane)
335 {
336 	char pmlp_pl[MLXSW_REG_PMLP_LEN];
337 	int i;
338 
339 	mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
340 	mlxsw_reg_pmlp_width_set(pmlp_pl, width);
341 	for (i = 0; i < width; i++) {
342 		mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
343 		mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i);  /* Rx & Tx */
344 	}
345 
346 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
347 }
348 
349 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port)
350 {
351 	char pmlp_pl[MLXSW_REG_PMLP_LEN];
352 
353 	mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
354 	mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
355 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
356 }
357 
358 static int mlxsw_sp_port_open(struct net_device *dev)
359 {
360 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
361 	int err;
362 
363 	err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
364 	if (err)
365 		return err;
366 	netif_start_queue(dev);
367 	return 0;
368 }
369 
370 static int mlxsw_sp_port_stop(struct net_device *dev)
371 {
372 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
373 
374 	netif_stop_queue(dev);
375 	return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
376 }
377 
378 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
379 				      struct net_device *dev)
380 {
381 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
382 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
383 	struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
384 	const struct mlxsw_tx_info tx_info = {
385 		.local_port = mlxsw_sp_port->local_port,
386 		.is_emad = false,
387 	};
388 	u64 len;
389 	int err;
390 
391 	if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
392 		return NETDEV_TX_BUSY;
393 
394 	if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
395 		struct sk_buff *skb_orig = skb;
396 
397 		skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
398 		if (!skb) {
399 			this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
400 			dev_kfree_skb_any(skb_orig);
401 			return NETDEV_TX_OK;
402 		}
403 	}
404 
405 	if (eth_skb_pad(skb)) {
406 		this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
407 		return NETDEV_TX_OK;
408 	}
409 
410 	mlxsw_sp_txhdr_construct(skb, &tx_info);
411 	/* TX header is consumed by HW on the way so we shouldn't count its
412 	 * bytes as being sent.
413 	 */
414 	len = skb->len - MLXSW_TXHDR_LEN;
415 
416 	/* Due to a race we might fail here because of a full queue. In that
417 	 * unlikely case we simply drop the packet.
418 	 */
419 	err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
420 
421 	if (!err) {
422 		pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
423 		u64_stats_update_begin(&pcpu_stats->syncp);
424 		pcpu_stats->tx_packets++;
425 		pcpu_stats->tx_bytes += len;
426 		u64_stats_update_end(&pcpu_stats->syncp);
427 	} else {
428 		this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
429 		dev_kfree_skb_any(skb);
430 	}
431 	return NETDEV_TX_OK;
432 }
433 
434 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
435 {
436 }
437 
438 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
439 {
440 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
441 	struct sockaddr *addr = p;
442 	int err;
443 
444 	if (!is_valid_ether_addr(addr->sa_data))
445 		return -EADDRNOTAVAIL;
446 
447 	err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
448 	if (err)
449 		return err;
450 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
451 	return 0;
452 }
453 
454 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu,
455 				 bool pause_en, bool pfc_en, u16 delay)
456 {
457 	u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu);
458 
459 	delay = pfc_en ? mlxsw_sp_pfc_delay_get(mtu, delay) :
460 			 MLXSW_SP_PAUSE_DELAY;
461 
462 	if (pause_en || pfc_en)
463 		mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, pg_index,
464 						    pg_size + delay, pg_size);
465 	else
466 		mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size);
467 }
468 
469 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
470 				 u8 *prio_tc, bool pause_en,
471 				 struct ieee_pfc *my_pfc)
472 {
473 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
474 	u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
475 	u16 delay = !!my_pfc ? my_pfc->delay : 0;
476 	char pbmc_pl[MLXSW_REG_PBMC_LEN];
477 	int i, j, err;
478 
479 	mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
480 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
481 	if (err)
482 		return err;
483 
484 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
485 		bool configure = false;
486 		bool pfc = false;
487 
488 		for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
489 			if (prio_tc[j] == i) {
490 				pfc = pfc_en & BIT(j);
491 				configure = true;
492 				break;
493 			}
494 		}
495 
496 		if (!configure)
497 			continue;
498 		mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en, pfc, delay);
499 	}
500 
501 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
502 }
503 
504 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
505 				      int mtu, bool pause_en)
506 {
507 	u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
508 	bool dcb_en = !!mlxsw_sp_port->dcb.ets;
509 	struct ieee_pfc *my_pfc;
510 	u8 *prio_tc;
511 
512 	prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
513 	my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
514 
515 	return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
516 					    pause_en, my_pfc);
517 }
518 
519 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
520 {
521 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
522 	bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
523 	int err;
524 
525 	err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
526 	if (err)
527 		return err;
528 	err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
529 	if (err)
530 		goto err_port_mtu_set;
531 	dev->mtu = mtu;
532 	return 0;
533 
534 err_port_mtu_set:
535 	mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
536 	return err;
537 }
538 
539 static struct rtnl_link_stats64 *
540 mlxsw_sp_port_get_stats64(struct net_device *dev,
541 			  struct rtnl_link_stats64 *stats)
542 {
543 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
544 	struct mlxsw_sp_port_pcpu_stats *p;
545 	u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
546 	u32 tx_dropped = 0;
547 	unsigned int start;
548 	int i;
549 
550 	for_each_possible_cpu(i) {
551 		p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
552 		do {
553 			start = u64_stats_fetch_begin_irq(&p->syncp);
554 			rx_packets	= p->rx_packets;
555 			rx_bytes	= p->rx_bytes;
556 			tx_packets	= p->tx_packets;
557 			tx_bytes	= p->tx_bytes;
558 		} while (u64_stats_fetch_retry_irq(&p->syncp, start));
559 
560 		stats->rx_packets	+= rx_packets;
561 		stats->rx_bytes		+= rx_bytes;
562 		stats->tx_packets	+= tx_packets;
563 		stats->tx_bytes		+= tx_bytes;
564 		/* tx_dropped is u32, updated without syncp protection. */
565 		tx_dropped	+= p->tx_dropped;
566 	}
567 	stats->tx_dropped	= tx_dropped;
568 	return stats;
569 }
570 
571 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
572 			   u16 vid_end, bool is_member, bool untagged)
573 {
574 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
575 	char *spvm_pl;
576 	int err;
577 
578 	spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
579 	if (!spvm_pl)
580 		return -ENOMEM;
581 
582 	mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port,	vid_begin,
583 			    vid_end, is_member, untagged);
584 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
585 	kfree(spvm_pl);
586 	return err;
587 }
588 
589 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
590 {
591 	enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
592 	u16 vid, last_visited_vid;
593 	int err;
594 
595 	for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
596 		err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
597 						   vid);
598 		if (err) {
599 			last_visited_vid = vid;
600 			goto err_port_vid_to_fid_set;
601 		}
602 	}
603 
604 	err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
605 	if (err) {
606 		last_visited_vid = VLAN_N_VID;
607 		goto err_port_vid_to_fid_set;
608 	}
609 
610 	return 0;
611 
612 err_port_vid_to_fid_set:
613 	for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
614 		mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
615 					     vid);
616 	return err;
617 }
618 
619 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
620 {
621 	enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
622 	u16 vid;
623 	int err;
624 
625 	err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
626 	if (err)
627 		return err;
628 
629 	for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
630 		err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
631 						   vid, vid);
632 		if (err)
633 			return err;
634 	}
635 
636 	return 0;
637 }
638 
639 static struct mlxsw_sp_vfid *
640 mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, u16 vid)
641 {
642 	struct mlxsw_sp_vfid *vfid;
643 
644 	list_for_each_entry(vfid, &mlxsw_sp->port_vfids.list, list) {
645 		if (vfid->vid == vid)
646 			return vfid;
647 	}
648 
649 	return NULL;
650 }
651 
652 static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
653 {
654 	return find_first_zero_bit(mlxsw_sp->port_vfids.mapped,
655 				   MLXSW_SP_VFID_PORT_MAX);
656 }
657 
658 static int __mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid)
659 {
660 	u16 fid = mlxsw_sp_vfid_to_fid(vfid);
661 	char sfmr_pl[MLXSW_REG_SFMR_LEN];
662 
663 	mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, 0);
664 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
665 }
666 
667 static void __mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid)
668 {
669 	u16 fid = mlxsw_sp_vfid_to_fid(vfid);
670 	char sfmr_pl[MLXSW_REG_SFMR_LEN];
671 
672 	mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, fid, 0);
673 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
674 }
675 
676 static struct mlxsw_sp_vfid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
677 						  u16 vid)
678 {
679 	struct device *dev = mlxsw_sp->bus_info->dev;
680 	struct mlxsw_sp_vfid *vfid;
681 	u16 n_vfid;
682 	int err;
683 
684 	n_vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
685 	if (n_vfid == MLXSW_SP_VFID_PORT_MAX) {
686 		dev_err(dev, "No available vFIDs\n");
687 		return ERR_PTR(-ERANGE);
688 	}
689 
690 	err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid);
691 	if (err) {
692 		dev_err(dev, "Failed to create vFID=%d\n", n_vfid);
693 		return ERR_PTR(err);
694 	}
695 
696 	vfid = kzalloc(sizeof(*vfid), GFP_KERNEL);
697 	if (!vfid)
698 		goto err_allocate_vfid;
699 
700 	vfid->vfid = n_vfid;
701 	vfid->vid = vid;
702 
703 	list_add(&vfid->list, &mlxsw_sp->port_vfids.list);
704 	set_bit(n_vfid, mlxsw_sp->port_vfids.mapped);
705 
706 	return vfid;
707 
708 err_allocate_vfid:
709 	__mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid);
710 	return ERR_PTR(-ENOMEM);
711 }
712 
713 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
714 				  struct mlxsw_sp_vfid *vfid)
715 {
716 	clear_bit(vfid->vfid, mlxsw_sp->port_vfids.mapped);
717 	list_del(&vfid->list);
718 
719 	__mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid);
720 
721 	kfree(vfid);
722 }
723 
724 static struct mlxsw_sp_port *
725 mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port,
726 			   struct mlxsw_sp_vfid *vfid)
727 {
728 	struct mlxsw_sp_port *mlxsw_sp_vport;
729 
730 	mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL);
731 	if (!mlxsw_sp_vport)
732 		return NULL;
733 
734 	/* dev will be set correctly after the VLAN device is linked
735 	 * with the real device. In case of bridge SELF invocation, dev
736 	 * will remain as is.
737 	 */
738 	mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
739 	mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
740 	mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port;
741 	mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
742 	mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
743 	mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
744 	mlxsw_sp_vport->vport.vfid = vfid;
745 	mlxsw_sp_vport->vport.vid = vfid->vid;
746 
747 	list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
748 
749 	return mlxsw_sp_vport;
750 }
751 
752 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
753 {
754 	list_del(&mlxsw_sp_vport->vport.list);
755 	kfree(mlxsw_sp_vport);
756 }
757 
758 int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
759 			  u16 vid)
760 {
761 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
762 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
763 	struct mlxsw_sp_port *mlxsw_sp_vport;
764 	struct mlxsw_sp_vfid *vfid;
765 	int err;
766 
767 	/* VLAN 0 is added to HW filter when device goes up, but it is
768 	 * reserved in our case, so simply return.
769 	 */
770 	if (!vid)
771 		return 0;
772 
773 	if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) {
774 		netdev_warn(dev, "VID=%d already configured\n", vid);
775 		return 0;
776 	}
777 
778 	vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid);
779 	if (!vfid) {
780 		vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
781 		if (IS_ERR(vfid)) {
782 			netdev_err(dev, "Failed to create vFID for VID=%d\n",
783 				   vid);
784 			return PTR_ERR(vfid);
785 		}
786 	}
787 
788 	mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vfid);
789 	if (!mlxsw_sp_vport) {
790 		netdev_err(dev, "Failed to create vPort for VID=%d\n", vid);
791 		err = -ENOMEM;
792 		goto err_port_vport_create;
793 	}
794 
795 	if (!vfid->nr_vports) {
796 		err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid,
797 					       true, false);
798 		if (err) {
799 			netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
800 				   vfid->vfid);
801 			goto err_vport_flood_set;
802 		}
803 	}
804 
805 	/* When adding the first VLAN interface on a bridged port we need to
806 	 * transition all the active 802.1Q bridge VLANs to use explicit
807 	 * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
808 	 */
809 	if (list_is_singular(&mlxsw_sp_port->vports_list)) {
810 		err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
811 		if (err) {
812 			netdev_err(dev, "Failed to set to Virtual mode\n");
813 			goto err_port_vp_mode_trans;
814 		}
815 	}
816 
817 	err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
818 					   MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
819 					   true,
820 					   mlxsw_sp_vfid_to_fid(vfid->vfid),
821 					   vid);
822 	if (err) {
823 		netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n",
824 			   vid, vfid->vfid);
825 		goto err_port_vid_to_fid_set;
826 	}
827 
828 	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
829 	if (err) {
830 		netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
831 		goto err_port_vid_learning_set;
832 	}
833 
834 	err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, false);
835 	if (err) {
836 		netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
837 			   vid);
838 		goto err_port_add_vid;
839 	}
840 
841 	err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
842 					  MLXSW_REG_SPMS_STATE_FORWARDING);
843 	if (err) {
844 		netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
845 		goto err_port_stp_state_set;
846 	}
847 
848 	vfid->nr_vports++;
849 
850 	return 0;
851 
852 err_port_stp_state_set:
853 	mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
854 err_port_add_vid:
855 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
856 err_port_vid_learning_set:
857 	mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
858 				     MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
859 				     mlxsw_sp_vfid_to_fid(vfid->vfid), vid);
860 err_port_vid_to_fid_set:
861 	if (list_is_singular(&mlxsw_sp_port->vports_list))
862 		mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
863 err_port_vp_mode_trans:
864 	if (!vfid->nr_vports)
865 		mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false,
866 					 false);
867 err_vport_flood_set:
868 	mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
869 err_port_vport_create:
870 	if (!vfid->nr_vports)
871 		mlxsw_sp_vfid_destroy(mlxsw_sp, vfid);
872 	return err;
873 }
874 
875 int mlxsw_sp_port_kill_vid(struct net_device *dev,
876 			   __be16 __always_unused proto, u16 vid)
877 {
878 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
879 	struct mlxsw_sp_port *mlxsw_sp_vport;
880 	struct mlxsw_sp_vfid *vfid;
881 	int err;
882 
883 	/* VLAN 0 is removed from HW filter when device goes down, but
884 	 * it is reserved in our case, so simply return.
885 	 */
886 	if (!vid)
887 		return 0;
888 
889 	mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
890 	if (!mlxsw_sp_vport) {
891 		netdev_warn(dev, "VID=%d does not exist\n", vid);
892 		return 0;
893 	}
894 
895 	vfid = mlxsw_sp_vport->vport.vfid;
896 
897 	err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
898 					  MLXSW_REG_SPMS_STATE_DISCARDING);
899 	if (err) {
900 		netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
901 		return err;
902 	}
903 
904 	err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
905 	if (err) {
906 		netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
907 			   vid);
908 		return err;
909 	}
910 
911 	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
912 	if (err) {
913 		netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
914 		return err;
915 	}
916 
917 	err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
918 					   MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
919 					   false,
920 					   mlxsw_sp_vfid_to_fid(vfid->vfid),
921 					   vid);
922 	if (err) {
923 		netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n",
924 			   vid, vfid->vfid);
925 		return err;
926 	}
927 
928 	/* When removing the last VLAN interface on a bridged port we need to
929 	 * transition all active 802.1Q bridge VLANs to use VID to FID
930 	 * mappings and set port's mode to VLAN mode.
931 	 */
932 	if (list_is_singular(&mlxsw_sp_port->vports_list)) {
933 		err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
934 		if (err) {
935 			netdev_err(dev, "Failed to set to VLAN mode\n");
936 			return err;
937 		}
938 	}
939 
940 	vfid->nr_vports--;
941 	mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
942 
943 	/* Destroy the vFID if no vPorts are assigned to it anymore. */
944 	if (!vfid->nr_vports)
945 		mlxsw_sp_vfid_destroy(mlxsw_sp_port->mlxsw_sp, vfid);
946 
947 	return 0;
948 }
949 
950 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
951 					    size_t len)
952 {
953 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
954 	u8 module = mlxsw_sp_port->mapping.module;
955 	u8 width = mlxsw_sp_port->mapping.width;
956 	u8 lane = mlxsw_sp_port->mapping.lane;
957 	int err;
958 
959 	if (!mlxsw_sp_port->split)
960 		err = snprintf(name, len, "p%d", module + 1);
961 	else
962 		err = snprintf(name, len, "p%ds%d", module + 1,
963 			       lane / width);
964 
965 	if (err >= len)
966 		return -EINVAL;
967 
968 	return 0;
969 }
970 
971 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
972 	.ndo_open		= mlxsw_sp_port_open,
973 	.ndo_stop		= mlxsw_sp_port_stop,
974 	.ndo_start_xmit		= mlxsw_sp_port_xmit,
975 	.ndo_set_rx_mode	= mlxsw_sp_set_rx_mode,
976 	.ndo_set_mac_address	= mlxsw_sp_port_set_mac_address,
977 	.ndo_change_mtu		= mlxsw_sp_port_change_mtu,
978 	.ndo_get_stats64	= mlxsw_sp_port_get_stats64,
979 	.ndo_vlan_rx_add_vid	= mlxsw_sp_port_add_vid,
980 	.ndo_vlan_rx_kill_vid	= mlxsw_sp_port_kill_vid,
981 	.ndo_fdb_add		= switchdev_port_fdb_add,
982 	.ndo_fdb_del		= switchdev_port_fdb_del,
983 	.ndo_fdb_dump		= switchdev_port_fdb_dump,
984 	.ndo_bridge_setlink	= switchdev_port_bridge_setlink,
985 	.ndo_bridge_getlink	= switchdev_port_bridge_getlink,
986 	.ndo_bridge_dellink	= switchdev_port_bridge_dellink,
987 	.ndo_get_phys_port_name	= mlxsw_sp_port_get_phys_port_name,
988 };
989 
990 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
991 				      struct ethtool_drvinfo *drvinfo)
992 {
993 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
994 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
995 
996 	strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
997 	strlcpy(drvinfo->version, mlxsw_sp_driver_version,
998 		sizeof(drvinfo->version));
999 	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1000 		 "%d.%d.%d",
1001 		 mlxsw_sp->bus_info->fw_rev.major,
1002 		 mlxsw_sp->bus_info->fw_rev.minor,
1003 		 mlxsw_sp->bus_info->fw_rev.subminor);
1004 	strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
1005 		sizeof(drvinfo->bus_info));
1006 }
1007 
1008 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
1009 					 struct ethtool_pauseparam *pause)
1010 {
1011 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1012 
1013 	pause->rx_pause = mlxsw_sp_port->link.rx_pause;
1014 	pause->tx_pause = mlxsw_sp_port->link.tx_pause;
1015 }
1016 
1017 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
1018 				   struct ethtool_pauseparam *pause)
1019 {
1020 	char pfcc_pl[MLXSW_REG_PFCC_LEN];
1021 
1022 	mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
1023 	mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
1024 	mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
1025 
1026 	return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
1027 			       pfcc_pl);
1028 }
1029 
1030 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
1031 					struct ethtool_pauseparam *pause)
1032 {
1033 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1034 	bool pause_en = pause->tx_pause || pause->rx_pause;
1035 	int err;
1036 
1037 	if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
1038 		netdev_err(dev, "PFC already enabled on port\n");
1039 		return -EINVAL;
1040 	}
1041 
1042 	if (pause->autoneg) {
1043 		netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
1044 		return -EINVAL;
1045 	}
1046 
1047 	err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1048 	if (err) {
1049 		netdev_err(dev, "Failed to configure port's headroom\n");
1050 		return err;
1051 	}
1052 
1053 	err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
1054 	if (err) {
1055 		netdev_err(dev, "Failed to set PAUSE parameters\n");
1056 		goto err_port_pause_configure;
1057 	}
1058 
1059 	mlxsw_sp_port->link.rx_pause = pause->rx_pause;
1060 	mlxsw_sp_port->link.tx_pause = pause->tx_pause;
1061 
1062 	return 0;
1063 
1064 err_port_pause_configure:
1065 	pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1066 	mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1067 	return err;
1068 }
1069 
1070 struct mlxsw_sp_port_hw_stats {
1071 	char str[ETH_GSTRING_LEN];
1072 	u64 (*getter)(char *payload);
1073 };
1074 
1075 static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
1076 	{
1077 		.str = "a_frames_transmitted_ok",
1078 		.getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
1079 	},
1080 	{
1081 		.str = "a_frames_received_ok",
1082 		.getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
1083 	},
1084 	{
1085 		.str = "a_frame_check_sequence_errors",
1086 		.getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
1087 	},
1088 	{
1089 		.str = "a_alignment_errors",
1090 		.getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
1091 	},
1092 	{
1093 		.str = "a_octets_transmitted_ok",
1094 		.getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
1095 	},
1096 	{
1097 		.str = "a_octets_received_ok",
1098 		.getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
1099 	},
1100 	{
1101 		.str = "a_multicast_frames_xmitted_ok",
1102 		.getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
1103 	},
1104 	{
1105 		.str = "a_broadcast_frames_xmitted_ok",
1106 		.getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
1107 	},
1108 	{
1109 		.str = "a_multicast_frames_received_ok",
1110 		.getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
1111 	},
1112 	{
1113 		.str = "a_broadcast_frames_received_ok",
1114 		.getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
1115 	},
1116 	{
1117 		.str = "a_in_range_length_errors",
1118 		.getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
1119 	},
1120 	{
1121 		.str = "a_out_of_range_length_field",
1122 		.getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
1123 	},
1124 	{
1125 		.str = "a_frame_too_long_errors",
1126 		.getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
1127 	},
1128 	{
1129 		.str = "a_symbol_error_during_carrier",
1130 		.getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
1131 	},
1132 	{
1133 		.str = "a_mac_control_frames_transmitted",
1134 		.getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
1135 	},
1136 	{
1137 		.str = "a_mac_control_frames_received",
1138 		.getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
1139 	},
1140 	{
1141 		.str = "a_unsupported_opcodes_received",
1142 		.getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
1143 	},
1144 	{
1145 		.str = "a_pause_mac_ctrl_frames_received",
1146 		.getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
1147 	},
1148 	{
1149 		.str = "a_pause_mac_ctrl_frames_xmitted",
1150 		.getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
1151 	},
1152 };
1153 
1154 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1155 
1156 static void mlxsw_sp_port_get_strings(struct net_device *dev,
1157 				      u32 stringset, u8 *data)
1158 {
1159 	u8 *p = data;
1160 	int i;
1161 
1162 	switch (stringset) {
1163 	case ETH_SS_STATS:
1164 		for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
1165 			memcpy(p, mlxsw_sp_port_hw_stats[i].str,
1166 			       ETH_GSTRING_LEN);
1167 			p += ETH_GSTRING_LEN;
1168 		}
1169 		break;
1170 	}
1171 }
1172 
1173 static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
1174 				     enum ethtool_phys_id_state state)
1175 {
1176 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1177 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1178 	char mlcr_pl[MLXSW_REG_MLCR_LEN];
1179 	bool active;
1180 
1181 	switch (state) {
1182 	case ETHTOOL_ID_ACTIVE:
1183 		active = true;
1184 		break;
1185 	case ETHTOOL_ID_INACTIVE:
1186 		active = false;
1187 		break;
1188 	default:
1189 		return -EOPNOTSUPP;
1190 	}
1191 
1192 	mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
1193 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
1194 }
1195 
1196 static void mlxsw_sp_port_get_stats(struct net_device *dev,
1197 				    struct ethtool_stats *stats, u64 *data)
1198 {
1199 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1200 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1201 	char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1202 	int i;
1203 	int err;
1204 
1205 	mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port,
1206 			     MLXSW_REG_PPCNT_IEEE_8023_CNT, 0);
1207 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
1208 	for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++)
1209 		data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0;
1210 }
1211 
1212 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
1213 {
1214 	switch (sset) {
1215 	case ETH_SS_STATS:
1216 		return MLXSW_SP_PORT_HW_STATS_LEN;
1217 	default:
1218 		return -EOPNOTSUPP;
1219 	}
1220 }
1221 
1222 struct mlxsw_sp_port_link_mode {
1223 	u32 mask;
1224 	u32 supported;
1225 	u32 advertised;
1226 	u32 speed;
1227 };
1228 
1229 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
1230 	{
1231 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
1232 		.supported	= SUPPORTED_100baseT_Full,
1233 		.advertised	= ADVERTISED_100baseT_Full,
1234 		.speed		= 100,
1235 	},
1236 	{
1237 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
1238 		.speed		= 100,
1239 	},
1240 	{
1241 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_SGMII |
1242 				  MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
1243 		.supported	= SUPPORTED_1000baseKX_Full,
1244 		.advertised	= ADVERTISED_1000baseKX_Full,
1245 		.speed		= 1000,
1246 	},
1247 	{
1248 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
1249 		.supported	= SUPPORTED_10000baseT_Full,
1250 		.advertised	= ADVERTISED_10000baseT_Full,
1251 		.speed		= 10000,
1252 	},
1253 	{
1254 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
1255 				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
1256 		.supported	= SUPPORTED_10000baseKX4_Full,
1257 		.advertised	= ADVERTISED_10000baseKX4_Full,
1258 		.speed		= 10000,
1259 	},
1260 	{
1261 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1262 				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1263 				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1264 				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
1265 		.supported	= SUPPORTED_10000baseKR_Full,
1266 		.advertised	= ADVERTISED_10000baseKR_Full,
1267 		.speed		= 10000,
1268 	},
1269 	{
1270 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
1271 		.supported	= SUPPORTED_20000baseKR2_Full,
1272 		.advertised	= ADVERTISED_20000baseKR2_Full,
1273 		.speed		= 20000,
1274 	},
1275 	{
1276 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
1277 		.supported	= SUPPORTED_40000baseCR4_Full,
1278 		.advertised	= ADVERTISED_40000baseCR4_Full,
1279 		.speed		= 40000,
1280 	},
1281 	{
1282 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
1283 		.supported	= SUPPORTED_40000baseKR4_Full,
1284 		.advertised	= ADVERTISED_40000baseKR4_Full,
1285 		.speed		= 40000,
1286 	},
1287 	{
1288 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
1289 		.supported	= SUPPORTED_40000baseSR4_Full,
1290 		.advertised	= ADVERTISED_40000baseSR4_Full,
1291 		.speed		= 40000,
1292 	},
1293 	{
1294 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
1295 		.supported	= SUPPORTED_40000baseLR4_Full,
1296 		.advertised	= ADVERTISED_40000baseLR4_Full,
1297 		.speed		= 40000,
1298 	},
1299 	{
1300 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
1301 				  MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
1302 				  MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
1303 		.speed		= 25000,
1304 	},
1305 	{
1306 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
1307 				  MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
1308 				  MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
1309 		.speed		= 50000,
1310 	},
1311 	{
1312 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1313 		.supported	= SUPPORTED_56000baseKR4_Full,
1314 		.advertised	= ADVERTISED_56000baseKR4_Full,
1315 		.speed		= 56000,
1316 	},
1317 	{
1318 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
1319 				  MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1320 				  MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1321 				  MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
1322 		.speed		= 100000,
1323 	},
1324 };
1325 
1326 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
1327 
1328 static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
1329 {
1330 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1331 			      MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1332 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1333 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1334 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1335 			      MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1336 		return SUPPORTED_FIBRE;
1337 
1338 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1339 			      MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1340 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1341 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1342 			      MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
1343 		return SUPPORTED_Backplane;
1344 	return 0;
1345 }
1346 
1347 static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto)
1348 {
1349 	u32 modes = 0;
1350 	int i;
1351 
1352 	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1353 		if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1354 			modes |= mlxsw_sp_port_link_mode[i].supported;
1355 	}
1356 	return modes;
1357 }
1358 
1359 static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto)
1360 {
1361 	u32 modes = 0;
1362 	int i;
1363 
1364 	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1365 		if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1366 			modes |= mlxsw_sp_port_link_mode[i].advertised;
1367 	}
1368 	return modes;
1369 }
1370 
1371 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
1372 					    struct ethtool_cmd *cmd)
1373 {
1374 	u32 speed = SPEED_UNKNOWN;
1375 	u8 duplex = DUPLEX_UNKNOWN;
1376 	int i;
1377 
1378 	if (!carrier_ok)
1379 		goto out;
1380 
1381 	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1382 		if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
1383 			speed = mlxsw_sp_port_link_mode[i].speed;
1384 			duplex = DUPLEX_FULL;
1385 			break;
1386 		}
1387 	}
1388 out:
1389 	ethtool_cmd_speed_set(cmd, speed);
1390 	cmd->duplex = duplex;
1391 }
1392 
1393 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
1394 {
1395 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1396 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1397 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1398 			      MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1399 		return PORT_FIBRE;
1400 
1401 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1402 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1403 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
1404 		return PORT_DA;
1405 
1406 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1407 			      MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1408 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1409 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
1410 		return PORT_NONE;
1411 
1412 	return PORT_OTHER;
1413 }
1414 
1415 static int mlxsw_sp_port_get_settings(struct net_device *dev,
1416 				      struct ethtool_cmd *cmd)
1417 {
1418 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1419 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1420 	char ptys_pl[MLXSW_REG_PTYS_LEN];
1421 	u32 eth_proto_cap;
1422 	u32 eth_proto_admin;
1423 	u32 eth_proto_oper;
1424 	int err;
1425 
1426 	mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1427 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1428 	if (err) {
1429 		netdev_err(dev, "Failed to get proto");
1430 		return err;
1431 	}
1432 	mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
1433 			      &eth_proto_admin, &eth_proto_oper);
1434 
1435 	cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
1436 			 mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
1437 			 SUPPORTED_Pause | SUPPORTED_Asym_Pause;
1438 	cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
1439 	mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
1440 					eth_proto_oper, cmd);
1441 
1442 	eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
1443 	cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper);
1444 	cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper);
1445 
1446 	cmd->transceiver = XCVR_INTERNAL;
1447 	return 0;
1448 }
1449 
1450 static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising)
1451 {
1452 	u32 ptys_proto = 0;
1453 	int i;
1454 
1455 	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1456 		if (advertising & mlxsw_sp_port_link_mode[i].advertised)
1457 			ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1458 	}
1459 	return ptys_proto;
1460 }
1461 
1462 static u32 mlxsw_sp_to_ptys_speed(u32 speed)
1463 {
1464 	u32 ptys_proto = 0;
1465 	int i;
1466 
1467 	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1468 		if (speed == mlxsw_sp_port_link_mode[i].speed)
1469 			ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1470 	}
1471 	return ptys_proto;
1472 }
1473 
1474 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
1475 {
1476 	u32 ptys_proto = 0;
1477 	int i;
1478 
1479 	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1480 		if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
1481 			ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1482 	}
1483 	return ptys_proto;
1484 }
1485 
1486 static int mlxsw_sp_port_set_settings(struct net_device *dev,
1487 				      struct ethtool_cmd *cmd)
1488 {
1489 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1490 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1491 	char ptys_pl[MLXSW_REG_PTYS_LEN];
1492 	u32 speed;
1493 	u32 eth_proto_new;
1494 	u32 eth_proto_cap;
1495 	u32 eth_proto_admin;
1496 	bool is_up;
1497 	int err;
1498 
1499 	speed = ethtool_cmd_speed(cmd);
1500 
1501 	eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
1502 		mlxsw_sp_to_ptys_advert_link(cmd->advertising) :
1503 		mlxsw_sp_to_ptys_speed(speed);
1504 
1505 	mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1506 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1507 	if (err) {
1508 		netdev_err(dev, "Failed to get proto");
1509 		return err;
1510 	}
1511 	mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
1512 
1513 	eth_proto_new = eth_proto_new & eth_proto_cap;
1514 	if (!eth_proto_new) {
1515 		netdev_err(dev, "Not supported proto admin requested");
1516 		return -EINVAL;
1517 	}
1518 	if (eth_proto_new == eth_proto_admin)
1519 		return 0;
1520 
1521 	mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new);
1522 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1523 	if (err) {
1524 		netdev_err(dev, "Failed to set proto admin");
1525 		return err;
1526 	}
1527 
1528 	err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up);
1529 	if (err) {
1530 		netdev_err(dev, "Failed to get oper status");
1531 		return err;
1532 	}
1533 	if (!is_up)
1534 		return 0;
1535 
1536 	err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1537 	if (err) {
1538 		netdev_err(dev, "Failed to set admin status");
1539 		return err;
1540 	}
1541 
1542 	err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1543 	if (err) {
1544 		netdev_err(dev, "Failed to set admin status");
1545 		return err;
1546 	}
1547 
1548 	return 0;
1549 }
1550 
1551 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
1552 	.get_drvinfo		= mlxsw_sp_port_get_drvinfo,
1553 	.get_link		= ethtool_op_get_link,
1554 	.get_pauseparam		= mlxsw_sp_port_get_pauseparam,
1555 	.set_pauseparam		= mlxsw_sp_port_set_pauseparam,
1556 	.get_strings		= mlxsw_sp_port_get_strings,
1557 	.set_phys_id		= mlxsw_sp_port_set_phys_id,
1558 	.get_ethtool_stats	= mlxsw_sp_port_get_stats,
1559 	.get_sset_count		= mlxsw_sp_port_get_sset_count,
1560 	.get_settings		= mlxsw_sp_port_get_settings,
1561 	.set_settings		= mlxsw_sp_port_set_settings,
1562 };
1563 
1564 static int
1565 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
1566 {
1567 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1568 	u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
1569 	char ptys_pl[MLXSW_REG_PTYS_LEN];
1570 	u32 eth_proto_admin;
1571 
1572 	eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
1573 	mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port,
1574 			    eth_proto_admin);
1575 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1576 }
1577 
1578 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
1579 			  enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
1580 			  bool dwrr, u8 dwrr_weight)
1581 {
1582 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1583 	char qeec_pl[MLXSW_REG_QEEC_LEN];
1584 
1585 	mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1586 			    next_index);
1587 	mlxsw_reg_qeec_de_set(qeec_pl, true);
1588 	mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
1589 	mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
1590 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1591 }
1592 
1593 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
1594 				  enum mlxsw_reg_qeec_hr hr, u8 index,
1595 				  u8 next_index, u32 maxrate)
1596 {
1597 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1598 	char qeec_pl[MLXSW_REG_QEEC_LEN];
1599 
1600 	mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1601 			    next_index);
1602 	mlxsw_reg_qeec_mase_set(qeec_pl, true);
1603 	mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
1604 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1605 }
1606 
1607 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
1608 			      u8 switch_prio, u8 tclass)
1609 {
1610 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1611 	char qtct_pl[MLXSW_REG_QTCT_LEN];
1612 
1613 	mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
1614 			    tclass);
1615 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
1616 }
1617 
1618 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
1619 {
1620 	int err, i;
1621 
1622 	/* Setup the elements hierarcy, so that each TC is linked to
1623 	 * one subgroup, which are all member in the same group.
1624 	 */
1625 	err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1626 				    MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
1627 				    0);
1628 	if (err)
1629 		return err;
1630 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1631 		err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1632 					    MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
1633 					    0, false, 0);
1634 		if (err)
1635 			return err;
1636 	}
1637 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1638 		err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1639 					    MLXSW_REG_QEEC_HIERARCY_TC, i, i,
1640 					    false, 0);
1641 		if (err)
1642 			return err;
1643 	}
1644 
1645 	/* Make sure the max shaper is disabled in all hierarcies that
1646 	 * support it.
1647 	 */
1648 	err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1649 					    MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
1650 					    MLXSW_REG_QEEC_MAS_DIS);
1651 	if (err)
1652 		return err;
1653 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1654 		err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1655 						    MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
1656 						    i, 0,
1657 						    MLXSW_REG_QEEC_MAS_DIS);
1658 		if (err)
1659 			return err;
1660 	}
1661 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1662 		err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1663 						    MLXSW_REG_QEEC_HIERARCY_TC,
1664 						    i, i,
1665 						    MLXSW_REG_QEEC_MAS_DIS);
1666 		if (err)
1667 			return err;
1668 	}
1669 
1670 	/* Map all priorities to traffic class 0. */
1671 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1672 		err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
1673 		if (err)
1674 			return err;
1675 	}
1676 
1677 	return 0;
1678 }
1679 
1680 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1681 				bool split, u8 module, u8 width, u8 lane)
1682 {
1683 	struct mlxsw_sp_port *mlxsw_sp_port;
1684 	struct net_device *dev;
1685 	size_t bytes;
1686 	int err;
1687 
1688 	dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1689 	if (!dev)
1690 		return -ENOMEM;
1691 	mlxsw_sp_port = netdev_priv(dev);
1692 	mlxsw_sp_port->dev = dev;
1693 	mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1694 	mlxsw_sp_port->local_port = local_port;
1695 	mlxsw_sp_port->split = split;
1696 	mlxsw_sp_port->mapping.module = module;
1697 	mlxsw_sp_port->mapping.width = width;
1698 	mlxsw_sp_port->mapping.lane = lane;
1699 	bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
1700 	mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
1701 	if (!mlxsw_sp_port->active_vlans) {
1702 		err = -ENOMEM;
1703 		goto err_port_active_vlans_alloc;
1704 	}
1705 	mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL);
1706 	if (!mlxsw_sp_port->untagged_vlans) {
1707 		err = -ENOMEM;
1708 		goto err_port_untagged_vlans_alloc;
1709 	}
1710 	INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
1711 
1712 	mlxsw_sp_port->pcpu_stats =
1713 		netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1714 	if (!mlxsw_sp_port->pcpu_stats) {
1715 		err = -ENOMEM;
1716 		goto err_alloc_stats;
1717 	}
1718 
1719 	dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1720 	dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1721 
1722 	err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1723 	if (err) {
1724 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1725 			mlxsw_sp_port->local_port);
1726 		goto err_dev_addr_init;
1727 	}
1728 
1729 	netif_carrier_off(dev);
1730 
1731 	dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1732 			 NETIF_F_HW_VLAN_CTAG_FILTER;
1733 
1734 	/* Each packet needs to have a Tx header (metadata) on top all other
1735 	 * headers.
1736 	 */
1737 	dev->hard_header_len += MLXSW_TXHDR_LEN;
1738 
1739 	err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1740 	if (err) {
1741 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1742 			mlxsw_sp_port->local_port);
1743 		goto err_port_system_port_mapping_set;
1744 	}
1745 
1746 	err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
1747 	if (err) {
1748 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1749 			mlxsw_sp_port->local_port);
1750 		goto err_port_swid_set;
1751 	}
1752 
1753 	err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
1754 	if (err) {
1755 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
1756 			mlxsw_sp_port->local_port);
1757 		goto err_port_speed_by_width_set;
1758 	}
1759 
1760 	err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1761 	if (err) {
1762 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1763 			mlxsw_sp_port->local_port);
1764 		goto err_port_mtu_set;
1765 	}
1766 
1767 	err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1768 	if (err)
1769 		goto err_port_admin_status_set;
1770 
1771 	err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1772 	if (err) {
1773 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1774 			mlxsw_sp_port->local_port);
1775 		goto err_port_buffers_init;
1776 	}
1777 
1778 	err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
1779 	if (err) {
1780 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
1781 			mlxsw_sp_port->local_port);
1782 		goto err_port_ets_init;
1783 	}
1784 
1785 	/* ETS and buffers must be initialized before DCB. */
1786 	err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
1787 	if (err) {
1788 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
1789 			mlxsw_sp_port->local_port);
1790 		goto err_port_dcb_init;
1791 	}
1792 
1793 	mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
1794 	err = register_netdev(dev);
1795 	if (err) {
1796 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1797 			mlxsw_sp_port->local_port);
1798 		goto err_register_netdev;
1799 	}
1800 
1801 	err = mlxsw_core_port_init(mlxsw_sp->core, &mlxsw_sp_port->core_port,
1802 				   mlxsw_sp_port->local_port, dev,
1803 				   mlxsw_sp_port->split, module);
1804 	if (err) {
1805 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
1806 			mlxsw_sp_port->local_port);
1807 		goto err_core_port_init;
1808 	}
1809 
1810 	err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
1811 	if (err)
1812 		goto err_port_vlan_init;
1813 
1814 	mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1815 	return 0;
1816 
1817 err_port_vlan_init:
1818 	mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
1819 err_core_port_init:
1820 	unregister_netdev(dev);
1821 err_register_netdev:
1822 err_port_dcb_init:
1823 err_port_ets_init:
1824 err_port_buffers_init:
1825 err_port_admin_status_set:
1826 err_port_mtu_set:
1827 err_port_speed_by_width_set:
1828 err_port_swid_set:
1829 err_port_system_port_mapping_set:
1830 err_dev_addr_init:
1831 	free_percpu(mlxsw_sp_port->pcpu_stats);
1832 err_alloc_stats:
1833 	kfree(mlxsw_sp_port->untagged_vlans);
1834 err_port_untagged_vlans_alloc:
1835 	kfree(mlxsw_sp_port->active_vlans);
1836 err_port_active_vlans_alloc:
1837 	free_netdev(dev);
1838 	return err;
1839 }
1840 
1841 static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
1842 {
1843 	struct net_device *dev = mlxsw_sp_port->dev;
1844 	struct mlxsw_sp_port *mlxsw_sp_vport, *tmp;
1845 
1846 	list_for_each_entry_safe(mlxsw_sp_vport, tmp,
1847 				 &mlxsw_sp_port->vports_list, vport.list) {
1848 		u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
1849 
1850 		/* vPorts created for VLAN devices should already be gone
1851 		 * by now, since we unregistered the port netdev.
1852 		 */
1853 		WARN_ON(is_vlan_dev(mlxsw_sp_vport->dev));
1854 		mlxsw_sp_port_kill_vid(dev, 0, vid);
1855 	}
1856 }
1857 
1858 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1859 {
1860 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1861 
1862 	if (!mlxsw_sp_port)
1863 		return;
1864 	mlxsw_sp->ports[local_port] = NULL;
1865 	mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
1866 	unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1867 	mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1868 	mlxsw_sp_port_vports_fini(mlxsw_sp_port);
1869 	mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
1870 	mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
1871 	mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
1872 	free_percpu(mlxsw_sp_port->pcpu_stats);
1873 	kfree(mlxsw_sp_port->untagged_vlans);
1874 	kfree(mlxsw_sp_port->active_vlans);
1875 	free_netdev(mlxsw_sp_port->dev);
1876 }
1877 
1878 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
1879 {
1880 	int i;
1881 
1882 	for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
1883 		mlxsw_sp_port_remove(mlxsw_sp, i);
1884 	kfree(mlxsw_sp->ports);
1885 }
1886 
1887 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
1888 {
1889 	u8 module, width, lane;
1890 	size_t alloc_size;
1891 	int i;
1892 	int err;
1893 
1894 	alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS;
1895 	mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
1896 	if (!mlxsw_sp->ports)
1897 		return -ENOMEM;
1898 
1899 	for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
1900 		err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
1901 						    &width, &lane);
1902 		if (err)
1903 			goto err_port_module_info_get;
1904 		if (!width)
1905 			continue;
1906 		mlxsw_sp->port_to_module[i] = module;
1907 		err = mlxsw_sp_port_create(mlxsw_sp, i, false, module, width,
1908 					   lane);
1909 		if (err)
1910 			goto err_port_create;
1911 	}
1912 	return 0;
1913 
1914 err_port_create:
1915 err_port_module_info_get:
1916 	for (i--; i >= 1; i--)
1917 		mlxsw_sp_port_remove(mlxsw_sp, i);
1918 	kfree(mlxsw_sp->ports);
1919 	return err;
1920 }
1921 
1922 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
1923 {
1924 	u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
1925 
1926 	return local_port - offset;
1927 }
1928 
1929 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
1930 				      u8 module, unsigned int count)
1931 {
1932 	u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
1933 	int err, i;
1934 
1935 	for (i = 0; i < count; i++) {
1936 		err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
1937 					       width, i * width);
1938 		if (err)
1939 			goto err_port_module_map;
1940 	}
1941 
1942 	for (i = 0; i < count; i++) {
1943 		err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
1944 		if (err)
1945 			goto err_port_swid_set;
1946 	}
1947 
1948 	for (i = 0; i < count; i++) {
1949 		err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
1950 					   module, width, i * width);
1951 		if (err)
1952 			goto err_port_create;
1953 	}
1954 
1955 	return 0;
1956 
1957 err_port_create:
1958 	for (i--; i >= 0; i--)
1959 		mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
1960 	i = count;
1961 err_port_swid_set:
1962 	for (i--; i >= 0; i--)
1963 		__mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
1964 					 MLXSW_PORT_SWID_DISABLED_PORT);
1965 	i = count;
1966 err_port_module_map:
1967 	for (i--; i >= 0; i--)
1968 		mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
1969 	return err;
1970 }
1971 
1972 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
1973 					 u8 base_port, unsigned int count)
1974 {
1975 	u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
1976 	int i;
1977 
1978 	/* Split by four means we need to re-create two ports, otherwise
1979 	 * only one.
1980 	 */
1981 	count = count / 2;
1982 
1983 	for (i = 0; i < count; i++) {
1984 		local_port = base_port + i * 2;
1985 		module = mlxsw_sp->port_to_module[local_port];
1986 
1987 		mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
1988 					 0);
1989 	}
1990 
1991 	for (i = 0; i < count; i++)
1992 		__mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
1993 
1994 	for (i = 0; i < count; i++) {
1995 		local_port = base_port + i * 2;
1996 		module = mlxsw_sp->port_to_module[local_port];
1997 
1998 		mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
1999 				     width, 0);
2000 	}
2001 }
2002 
2003 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
2004 			       unsigned int count)
2005 {
2006 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2007 	struct mlxsw_sp_port *mlxsw_sp_port;
2008 	u8 module, cur_width, base_port;
2009 	int i;
2010 	int err;
2011 
2012 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
2013 	if (!mlxsw_sp_port) {
2014 		dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2015 			local_port);
2016 		return -EINVAL;
2017 	}
2018 
2019 	module = mlxsw_sp_port->mapping.module;
2020 	cur_width = mlxsw_sp_port->mapping.width;
2021 
2022 	if (count != 2 && count != 4) {
2023 		netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
2024 		return -EINVAL;
2025 	}
2026 
2027 	if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
2028 		netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
2029 		return -EINVAL;
2030 	}
2031 
2032 	/* Make sure we have enough slave (even) ports for the split. */
2033 	if (count == 2) {
2034 		base_port = local_port;
2035 		if (mlxsw_sp->ports[base_port + 1]) {
2036 			netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2037 			return -EINVAL;
2038 		}
2039 	} else {
2040 		base_port = mlxsw_sp_cluster_base_port_get(local_port);
2041 		if (mlxsw_sp->ports[base_port + 1] ||
2042 		    mlxsw_sp->ports[base_port + 3]) {
2043 			netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2044 			return -EINVAL;
2045 		}
2046 	}
2047 
2048 	for (i = 0; i < count; i++)
2049 		mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2050 
2051 	err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
2052 	if (err) {
2053 		dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
2054 		goto err_port_split_create;
2055 	}
2056 
2057 	return 0;
2058 
2059 err_port_split_create:
2060 	mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
2061 	return err;
2062 }
2063 
2064 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
2065 {
2066 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2067 	struct mlxsw_sp_port *mlxsw_sp_port;
2068 	u8 cur_width, base_port;
2069 	unsigned int count;
2070 	int i;
2071 
2072 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
2073 	if (!mlxsw_sp_port) {
2074 		dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2075 			local_port);
2076 		return -EINVAL;
2077 	}
2078 
2079 	if (!mlxsw_sp_port->split) {
2080 		netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
2081 		return -EINVAL;
2082 	}
2083 
2084 	cur_width = mlxsw_sp_port->mapping.width;
2085 	count = cur_width == 1 ? 4 : 2;
2086 
2087 	base_port = mlxsw_sp_cluster_base_port_get(local_port);
2088 
2089 	/* Determine which ports to remove. */
2090 	if (count == 2 && local_port >= base_port + 2)
2091 		base_port = base_port + 2;
2092 
2093 	for (i = 0; i < count; i++)
2094 		mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2095 
2096 	mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
2097 
2098 	return 0;
2099 }
2100 
2101 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2102 				     char *pude_pl, void *priv)
2103 {
2104 	struct mlxsw_sp *mlxsw_sp = priv;
2105 	struct mlxsw_sp_port *mlxsw_sp_port;
2106 	enum mlxsw_reg_pude_oper_status status;
2107 	u8 local_port;
2108 
2109 	local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2110 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
2111 	if (!mlxsw_sp_port) {
2112 		dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n",
2113 			 local_port);
2114 		return;
2115 	}
2116 
2117 	status = mlxsw_reg_pude_oper_status_get(pude_pl);
2118 	if (status == MLXSW_PORT_OPER_STATUS_UP) {
2119 		netdev_info(mlxsw_sp_port->dev, "link up\n");
2120 		netif_carrier_on(mlxsw_sp_port->dev);
2121 	} else {
2122 		netdev_info(mlxsw_sp_port->dev, "link down\n");
2123 		netif_carrier_off(mlxsw_sp_port->dev);
2124 	}
2125 }
2126 
2127 static struct mlxsw_event_listener mlxsw_sp_pude_event = {
2128 	.func = mlxsw_sp_pude_event_func,
2129 	.trap_id = MLXSW_TRAP_ID_PUDE,
2130 };
2131 
2132 static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp,
2133 				   enum mlxsw_event_trap_id trap_id)
2134 {
2135 	struct mlxsw_event_listener *el;
2136 	char hpkt_pl[MLXSW_REG_HPKT_LEN];
2137 	int err;
2138 
2139 	switch (trap_id) {
2140 	case MLXSW_TRAP_ID_PUDE:
2141 		el = &mlxsw_sp_pude_event;
2142 		break;
2143 	}
2144 	err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp);
2145 	if (err)
2146 		return err;
2147 
2148 	mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
2149 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2150 	if (err)
2151 		goto err_event_trap_set;
2152 
2153 	return 0;
2154 
2155 err_event_trap_set:
2156 	mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
2157 	return err;
2158 }
2159 
2160 static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp,
2161 				      enum mlxsw_event_trap_id trap_id)
2162 {
2163 	struct mlxsw_event_listener *el;
2164 
2165 	switch (trap_id) {
2166 	case MLXSW_TRAP_ID_PUDE:
2167 		el = &mlxsw_sp_pude_event;
2168 		break;
2169 	}
2170 	mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
2171 }
2172 
2173 static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port,
2174 				      void *priv)
2175 {
2176 	struct mlxsw_sp *mlxsw_sp = priv;
2177 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2178 	struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2179 
2180 	if (unlikely(!mlxsw_sp_port)) {
2181 		dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2182 				     local_port);
2183 		return;
2184 	}
2185 
2186 	skb->dev = mlxsw_sp_port->dev;
2187 
2188 	pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2189 	u64_stats_update_begin(&pcpu_stats->syncp);
2190 	pcpu_stats->rx_packets++;
2191 	pcpu_stats->rx_bytes += skb->len;
2192 	u64_stats_update_end(&pcpu_stats->syncp);
2193 
2194 	skb->protocol = eth_type_trans(skb, skb->dev);
2195 	netif_receive_skb(skb);
2196 }
2197 
2198 static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
2199 	{
2200 		.func = mlxsw_sp_rx_listener_func,
2201 		.local_port = MLXSW_PORT_DONT_CARE,
2202 		.trap_id = MLXSW_TRAP_ID_FDB_MC,
2203 	},
2204 	/* Traps for specific L2 packet types, not trapped as FDB MC */
2205 	{
2206 		.func = mlxsw_sp_rx_listener_func,
2207 		.local_port = MLXSW_PORT_DONT_CARE,
2208 		.trap_id = MLXSW_TRAP_ID_STP,
2209 	},
2210 	{
2211 		.func = mlxsw_sp_rx_listener_func,
2212 		.local_port = MLXSW_PORT_DONT_CARE,
2213 		.trap_id = MLXSW_TRAP_ID_LACP,
2214 	},
2215 	{
2216 		.func = mlxsw_sp_rx_listener_func,
2217 		.local_port = MLXSW_PORT_DONT_CARE,
2218 		.trap_id = MLXSW_TRAP_ID_EAPOL,
2219 	},
2220 	{
2221 		.func = mlxsw_sp_rx_listener_func,
2222 		.local_port = MLXSW_PORT_DONT_CARE,
2223 		.trap_id = MLXSW_TRAP_ID_LLDP,
2224 	},
2225 	{
2226 		.func = mlxsw_sp_rx_listener_func,
2227 		.local_port = MLXSW_PORT_DONT_CARE,
2228 		.trap_id = MLXSW_TRAP_ID_MMRP,
2229 	},
2230 	{
2231 		.func = mlxsw_sp_rx_listener_func,
2232 		.local_port = MLXSW_PORT_DONT_CARE,
2233 		.trap_id = MLXSW_TRAP_ID_MVRP,
2234 	},
2235 	{
2236 		.func = mlxsw_sp_rx_listener_func,
2237 		.local_port = MLXSW_PORT_DONT_CARE,
2238 		.trap_id = MLXSW_TRAP_ID_RPVST,
2239 	},
2240 	{
2241 		.func = mlxsw_sp_rx_listener_func,
2242 		.local_port = MLXSW_PORT_DONT_CARE,
2243 		.trap_id = MLXSW_TRAP_ID_DHCP,
2244 	},
2245 	{
2246 		.func = mlxsw_sp_rx_listener_func,
2247 		.local_port = MLXSW_PORT_DONT_CARE,
2248 		.trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
2249 	},
2250 	{
2251 		.func = mlxsw_sp_rx_listener_func,
2252 		.local_port = MLXSW_PORT_DONT_CARE,
2253 		.trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
2254 	},
2255 	{
2256 		.func = mlxsw_sp_rx_listener_func,
2257 		.local_port = MLXSW_PORT_DONT_CARE,
2258 		.trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
2259 	},
2260 	{
2261 		.func = mlxsw_sp_rx_listener_func,
2262 		.local_port = MLXSW_PORT_DONT_CARE,
2263 		.trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
2264 	},
2265 	{
2266 		.func = mlxsw_sp_rx_listener_func,
2267 		.local_port = MLXSW_PORT_DONT_CARE,
2268 		.trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
2269 	},
2270 };
2271 
2272 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
2273 {
2274 	char htgt_pl[MLXSW_REG_HTGT_LEN];
2275 	char hpkt_pl[MLXSW_REG_HPKT_LEN];
2276 	int i;
2277 	int err;
2278 
2279 	mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
2280 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
2281 	if (err)
2282 		return err;
2283 
2284 	mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
2285 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
2286 	if (err)
2287 		return err;
2288 
2289 	for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2290 		err = mlxsw_core_rx_listener_register(mlxsw_sp->core,
2291 						      &mlxsw_sp_rx_listener[i],
2292 						      mlxsw_sp);
2293 		if (err)
2294 			goto err_rx_listener_register;
2295 
2296 		mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
2297 				    mlxsw_sp_rx_listener[i].trap_id);
2298 		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2299 		if (err)
2300 			goto err_rx_trap_set;
2301 	}
2302 	return 0;
2303 
2304 err_rx_trap_set:
2305 	mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2306 					  &mlxsw_sp_rx_listener[i],
2307 					  mlxsw_sp);
2308 err_rx_listener_register:
2309 	for (i--; i >= 0; i--) {
2310 		mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
2311 				    mlxsw_sp_rx_listener[i].trap_id);
2312 		mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2313 
2314 		mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2315 						  &mlxsw_sp_rx_listener[i],
2316 						  mlxsw_sp);
2317 	}
2318 	return err;
2319 }
2320 
2321 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2322 {
2323 	char hpkt_pl[MLXSW_REG_HPKT_LEN];
2324 	int i;
2325 
2326 	for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2327 		mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
2328 				    mlxsw_sp_rx_listener[i].trap_id);
2329 		mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2330 
2331 		mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2332 						  &mlxsw_sp_rx_listener[i],
2333 						  mlxsw_sp);
2334 	}
2335 }
2336 
2337 static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
2338 				 enum mlxsw_reg_sfgc_type type,
2339 				 enum mlxsw_reg_sfgc_bridge_type bridge_type)
2340 {
2341 	enum mlxsw_flood_table_type table_type;
2342 	enum mlxsw_sp_flood_table flood_table;
2343 	char sfgc_pl[MLXSW_REG_SFGC_LEN];
2344 
2345 	if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID)
2346 		table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
2347 	else
2348 		table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
2349 
2350 	if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST)
2351 		flood_table = MLXSW_SP_FLOOD_TABLE_UC;
2352 	else
2353 		flood_table = MLXSW_SP_FLOOD_TABLE_BM;
2354 
2355 	mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
2356 			    flood_table);
2357 	return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
2358 }
2359 
2360 static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
2361 {
2362 	int type, err;
2363 
2364 	for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
2365 		if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
2366 			continue;
2367 
2368 		err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2369 					    MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
2370 		if (err)
2371 			return err;
2372 
2373 		err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2374 					    MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
2375 		if (err)
2376 			return err;
2377 	}
2378 
2379 	return 0;
2380 }
2381 
2382 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2383 {
2384 	char slcr_pl[MLXSW_REG_SLCR_LEN];
2385 
2386 	mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
2387 				     MLXSW_REG_SLCR_LAG_HASH_DMAC |
2388 				     MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
2389 				     MLXSW_REG_SLCR_LAG_HASH_VLANID |
2390 				     MLXSW_REG_SLCR_LAG_HASH_SIP |
2391 				     MLXSW_REG_SLCR_LAG_HASH_DIP |
2392 				     MLXSW_REG_SLCR_LAG_HASH_SPORT |
2393 				     MLXSW_REG_SLCR_LAG_HASH_DPORT |
2394 				     MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
2395 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
2396 }
2397 
2398 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
2399 			 const struct mlxsw_bus_info *mlxsw_bus_info)
2400 {
2401 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2402 	int err;
2403 
2404 	mlxsw_sp->core = mlxsw_core;
2405 	mlxsw_sp->bus_info = mlxsw_bus_info;
2406 	INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list);
2407 	INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list);
2408 	INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
2409 
2410 	err = mlxsw_sp_base_mac_get(mlxsw_sp);
2411 	if (err) {
2412 		dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
2413 		return err;
2414 	}
2415 
2416 	err = mlxsw_sp_ports_create(mlxsw_sp);
2417 	if (err) {
2418 		dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
2419 		return err;
2420 	}
2421 
2422 	err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2423 	if (err) {
2424 		dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n");
2425 		goto err_event_register;
2426 	}
2427 
2428 	err = mlxsw_sp_traps_init(mlxsw_sp);
2429 	if (err) {
2430 		dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n");
2431 		goto err_rx_listener_register;
2432 	}
2433 
2434 	err = mlxsw_sp_flood_init(mlxsw_sp);
2435 	if (err) {
2436 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
2437 		goto err_flood_init;
2438 	}
2439 
2440 	err = mlxsw_sp_buffers_init(mlxsw_sp);
2441 	if (err) {
2442 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
2443 		goto err_buffers_init;
2444 	}
2445 
2446 	err = mlxsw_sp_lag_init(mlxsw_sp);
2447 	if (err) {
2448 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
2449 		goto err_lag_init;
2450 	}
2451 
2452 	err = mlxsw_sp_switchdev_init(mlxsw_sp);
2453 	if (err) {
2454 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
2455 		goto err_switchdev_init;
2456 	}
2457 
2458 	return 0;
2459 
2460 err_switchdev_init:
2461 err_lag_init:
2462 	mlxsw_sp_buffers_fini(mlxsw_sp);
2463 err_buffers_init:
2464 err_flood_init:
2465 	mlxsw_sp_traps_fini(mlxsw_sp);
2466 err_rx_listener_register:
2467 	mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2468 err_event_register:
2469 	mlxsw_sp_ports_remove(mlxsw_sp);
2470 	return err;
2471 }
2472 
2473 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
2474 {
2475 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2476 
2477 	mlxsw_sp_switchdev_fini(mlxsw_sp);
2478 	mlxsw_sp_buffers_fini(mlxsw_sp);
2479 	mlxsw_sp_traps_fini(mlxsw_sp);
2480 	mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2481 	mlxsw_sp_ports_remove(mlxsw_sp);
2482 }
2483 
2484 static struct mlxsw_config_profile mlxsw_sp_config_profile = {
2485 	.used_max_vepa_channels		= 1,
2486 	.max_vepa_channels		= 0,
2487 	.used_max_lag			= 1,
2488 	.max_lag			= MLXSW_SP_LAG_MAX,
2489 	.used_max_port_per_lag		= 1,
2490 	.max_port_per_lag		= MLXSW_SP_PORT_PER_LAG_MAX,
2491 	.used_max_mid			= 1,
2492 	.max_mid			= MLXSW_SP_MID_MAX,
2493 	.used_max_pgt			= 1,
2494 	.max_pgt			= 0,
2495 	.used_max_system_port		= 1,
2496 	.max_system_port		= 64,
2497 	.used_max_vlan_groups		= 1,
2498 	.max_vlan_groups		= 127,
2499 	.used_max_regions		= 1,
2500 	.max_regions			= 400,
2501 	.used_flood_tables		= 1,
2502 	.used_flood_mode		= 1,
2503 	.flood_mode			= 3,
2504 	.max_fid_offset_flood_tables	= 2,
2505 	.fid_offset_flood_table_size	= VLAN_N_VID - 1,
2506 	.max_fid_flood_tables		= 2,
2507 	.fid_flood_table_size		= MLXSW_SP_VFID_MAX,
2508 	.used_max_ib_mc			= 1,
2509 	.max_ib_mc			= 0,
2510 	.used_max_pkey			= 1,
2511 	.max_pkey			= 0,
2512 	.swid_config			= {
2513 		{
2514 			.used_type	= 1,
2515 			.type		= MLXSW_PORT_SWID_TYPE_ETH,
2516 		}
2517 	},
2518 };
2519 
2520 static struct mlxsw_driver mlxsw_sp_driver = {
2521 	.kind				= MLXSW_DEVICE_KIND_SPECTRUM,
2522 	.owner				= THIS_MODULE,
2523 	.priv_size			= sizeof(struct mlxsw_sp),
2524 	.init				= mlxsw_sp_init,
2525 	.fini				= mlxsw_sp_fini,
2526 	.port_split			= mlxsw_sp_port_split,
2527 	.port_unsplit			= mlxsw_sp_port_unsplit,
2528 	.sb_pool_get			= mlxsw_sp_sb_pool_get,
2529 	.sb_pool_set			= mlxsw_sp_sb_pool_set,
2530 	.sb_port_pool_get		= mlxsw_sp_sb_port_pool_get,
2531 	.sb_port_pool_set		= mlxsw_sp_sb_port_pool_set,
2532 	.sb_tc_pool_bind_get		= mlxsw_sp_sb_tc_pool_bind_get,
2533 	.sb_tc_pool_bind_set		= mlxsw_sp_sb_tc_pool_bind_set,
2534 	.sb_occ_snapshot		= mlxsw_sp_sb_occ_snapshot,
2535 	.sb_occ_max_clear		= mlxsw_sp_sb_occ_max_clear,
2536 	.sb_occ_port_pool_get		= mlxsw_sp_sb_occ_port_pool_get,
2537 	.sb_occ_tc_port_bind_get	= mlxsw_sp_sb_occ_tc_port_bind_get,
2538 	.txhdr_construct		= mlxsw_sp_txhdr_construct,
2539 	.txhdr_len			= MLXSW_TXHDR_LEN,
2540 	.profile			= &mlxsw_sp_config_profile,
2541 };
2542 
2543 static int
2544 mlxsw_sp_port_fdb_flush_by_port(const struct mlxsw_sp_port *mlxsw_sp_port)
2545 {
2546 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2547 	char sfdf_pl[MLXSW_REG_SFDF_LEN];
2548 
2549 	mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT);
2550 	mlxsw_reg_sfdf_system_port_set(sfdf_pl, mlxsw_sp_port->local_port);
2551 
2552 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2553 }
2554 
2555 static int
2556 mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
2557 				    u16 fid)
2558 {
2559 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2560 	char sfdf_pl[MLXSW_REG_SFDF_LEN];
2561 
2562 	mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
2563 	mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
2564 	mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
2565 						mlxsw_sp_port->local_port);
2566 
2567 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2568 }
2569 
2570 static int
2571 mlxsw_sp_port_fdb_flush_by_lag_id(const struct mlxsw_sp_port *mlxsw_sp_port)
2572 {
2573 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2574 	char sfdf_pl[MLXSW_REG_SFDF_LEN];
2575 
2576 	mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG);
2577 	mlxsw_reg_sfdf_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
2578 
2579 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2580 }
2581 
2582 static int
2583 mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
2584 				      u16 fid)
2585 {
2586 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2587 	char sfdf_pl[MLXSW_REG_SFDF_LEN];
2588 
2589 	mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
2590 	mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
2591 	mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
2592 
2593 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2594 }
2595 
2596 static int
2597 __mlxsw_sp_port_fdb_flush(const struct mlxsw_sp_port *mlxsw_sp_port)
2598 {
2599 	int err, last_err = 0;
2600 	u16 vid;
2601 
2602 	for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
2603 		err = mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, vid);
2604 		if (err)
2605 			last_err = err;
2606 	}
2607 
2608 	return last_err;
2609 }
2610 
2611 static int
2612 __mlxsw_sp_port_fdb_flush_lagged(const struct mlxsw_sp_port *mlxsw_sp_port)
2613 {
2614 	int err, last_err = 0;
2615 	u16 vid;
2616 
2617 	for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
2618 		err = mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, vid);
2619 		if (err)
2620 			last_err = err;
2621 	}
2622 
2623 	return last_err;
2624 }
2625 
2626 static int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port)
2627 {
2628 	if (!list_empty(&mlxsw_sp_port->vports_list))
2629 		if (mlxsw_sp_port->lagged)
2630 			return __mlxsw_sp_port_fdb_flush_lagged(mlxsw_sp_port);
2631 		else
2632 			return __mlxsw_sp_port_fdb_flush(mlxsw_sp_port);
2633 	else
2634 		if (mlxsw_sp_port->lagged)
2635 			return mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port);
2636 		else
2637 			return mlxsw_sp_port_fdb_flush_by_port(mlxsw_sp_port);
2638 }
2639 
2640 static int mlxsw_sp_vport_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_vport)
2641 {
2642 	u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_vport);
2643 	u16 fid = mlxsw_sp_vfid_to_fid(vfid);
2644 
2645 	if (mlxsw_sp_vport->lagged)
2646 		return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_vport,
2647 							     fid);
2648 	else
2649 		return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_vport, fid);
2650 }
2651 
2652 static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
2653 {
2654 	return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
2655 }
2656 
2657 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port)
2658 {
2659 	struct net_device *dev = mlxsw_sp_port->dev;
2660 	int err;
2661 
2662 	/* When port is not bridged untagged packets are tagged with
2663 	 * PVID=VID=1, thereby creating an implicit VLAN interface in
2664 	 * the device. Remove it and let bridge code take care of its
2665 	 * own VLANs.
2666 	 */
2667 	err = mlxsw_sp_port_kill_vid(dev, 0, 1);
2668 	if (err)
2669 		return err;
2670 
2671 	mlxsw_sp_port->learning = 1;
2672 	mlxsw_sp_port->learning_sync = 1;
2673 	mlxsw_sp_port->uc_flood = 1;
2674 	mlxsw_sp_port->bridged = 1;
2675 
2676 	return 0;
2677 }
2678 
2679 static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2680 				      bool flush_fdb)
2681 {
2682 	struct net_device *dev = mlxsw_sp_port->dev;
2683 
2684 	if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port))
2685 		netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
2686 
2687 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
2688 
2689 	mlxsw_sp_port->learning = 0;
2690 	mlxsw_sp_port->learning_sync = 0;
2691 	mlxsw_sp_port->uc_flood = 0;
2692 	mlxsw_sp_port->bridged = 0;
2693 
2694 	/* Add implicit VLAN interface in the device, so that untagged
2695 	 * packets will be classified to the default vFID.
2696 	 */
2697 	return mlxsw_sp_port_add_vid(dev, 0, 1);
2698 }
2699 
2700 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
2701 					 struct net_device *br_dev)
2702 {
2703 	return !mlxsw_sp->master_bridge.dev ||
2704 	       mlxsw_sp->master_bridge.dev == br_dev;
2705 }
2706 
2707 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
2708 				       struct net_device *br_dev)
2709 {
2710 	mlxsw_sp->master_bridge.dev = br_dev;
2711 	mlxsw_sp->master_bridge.ref_count++;
2712 }
2713 
2714 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp,
2715 				       struct net_device *br_dev)
2716 {
2717 	if (--mlxsw_sp->master_bridge.ref_count == 0)
2718 		mlxsw_sp->master_bridge.dev = NULL;
2719 }
2720 
2721 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
2722 {
2723 	char sldr_pl[MLXSW_REG_SLDR_LEN];
2724 
2725 	mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
2726 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2727 }
2728 
2729 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
2730 {
2731 	char sldr_pl[MLXSW_REG_SLDR_LEN];
2732 
2733 	mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
2734 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2735 }
2736 
2737 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
2738 				     u16 lag_id, u8 port_index)
2739 {
2740 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2741 	char slcor_pl[MLXSW_REG_SLCOR_LEN];
2742 
2743 	mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
2744 				      lag_id, port_index);
2745 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2746 }
2747 
2748 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
2749 					u16 lag_id)
2750 {
2751 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2752 	char slcor_pl[MLXSW_REG_SLCOR_LEN];
2753 
2754 	mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
2755 					 lag_id);
2756 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2757 }
2758 
2759 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
2760 					u16 lag_id)
2761 {
2762 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2763 	char slcor_pl[MLXSW_REG_SLCOR_LEN];
2764 
2765 	mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
2766 					lag_id);
2767 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2768 }
2769 
2770 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
2771 					 u16 lag_id)
2772 {
2773 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2774 	char slcor_pl[MLXSW_REG_SLCOR_LEN];
2775 
2776 	mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
2777 					 lag_id);
2778 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2779 }
2780 
2781 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
2782 				  struct net_device *lag_dev,
2783 				  u16 *p_lag_id)
2784 {
2785 	struct mlxsw_sp_upper *lag;
2786 	int free_lag_id = -1;
2787 	int i;
2788 
2789 	for (i = 0; i < MLXSW_SP_LAG_MAX; i++) {
2790 		lag = mlxsw_sp_lag_get(mlxsw_sp, i);
2791 		if (lag->ref_count) {
2792 			if (lag->dev == lag_dev) {
2793 				*p_lag_id = i;
2794 				return 0;
2795 			}
2796 		} else if (free_lag_id < 0) {
2797 			free_lag_id = i;
2798 		}
2799 	}
2800 	if (free_lag_id < 0)
2801 		return -EBUSY;
2802 	*p_lag_id = free_lag_id;
2803 	return 0;
2804 }
2805 
2806 static bool
2807 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
2808 			  struct net_device *lag_dev,
2809 			  struct netdev_lag_upper_info *lag_upper_info)
2810 {
2811 	u16 lag_id;
2812 
2813 	if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
2814 		return false;
2815 	if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
2816 		return false;
2817 	return true;
2818 }
2819 
2820 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
2821 				       u16 lag_id, u8 *p_port_index)
2822 {
2823 	int i;
2824 
2825 	for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
2826 		if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
2827 			*p_port_index = i;
2828 			return 0;
2829 		}
2830 	}
2831 	return -EBUSY;
2832 }
2833 
2834 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
2835 				  struct net_device *lag_dev)
2836 {
2837 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2838 	struct mlxsw_sp_upper *lag;
2839 	u16 lag_id;
2840 	u8 port_index;
2841 	int err;
2842 
2843 	err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
2844 	if (err)
2845 		return err;
2846 	lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
2847 	if (!lag->ref_count) {
2848 		err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
2849 		if (err)
2850 			return err;
2851 		lag->dev = lag_dev;
2852 	}
2853 
2854 	err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
2855 	if (err)
2856 		return err;
2857 	err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
2858 	if (err)
2859 		goto err_col_port_add;
2860 	err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
2861 	if (err)
2862 		goto err_col_port_enable;
2863 
2864 	mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
2865 				   mlxsw_sp_port->local_port);
2866 	mlxsw_sp_port->lag_id = lag_id;
2867 	mlxsw_sp_port->lagged = 1;
2868 	lag->ref_count++;
2869 	return 0;
2870 
2871 err_col_port_enable:
2872 	mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
2873 err_col_port_add:
2874 	if (!lag->ref_count)
2875 		mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2876 	return err;
2877 }
2878 
2879 static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
2880 				       struct net_device *br_dev,
2881 				       bool flush_fdb);
2882 
2883 static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2884 				   struct net_device *lag_dev)
2885 {
2886 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2887 	struct mlxsw_sp_port *mlxsw_sp_vport;
2888 	struct mlxsw_sp_upper *lag;
2889 	u16 lag_id = mlxsw_sp_port->lag_id;
2890 	int err;
2891 
2892 	if (!mlxsw_sp_port->lagged)
2893 		return 0;
2894 	lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
2895 	WARN_ON(lag->ref_count == 0);
2896 
2897 	err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
2898 	if (err)
2899 		return err;
2900 	err = mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
2901 	if (err)
2902 		return err;
2903 
2904 	/* In case we leave a LAG device that has bridges built on top,
2905 	 * then their teardown sequence is never issued and we need to
2906 	 * invoke the necessary cleanup routines ourselves.
2907 	 */
2908 	list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
2909 			    vport.list) {
2910 		struct net_device *br_dev;
2911 
2912 		if (!mlxsw_sp_vport->bridged)
2913 			continue;
2914 
2915 		br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
2916 		mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, false);
2917 	}
2918 
2919 	if (mlxsw_sp_port->bridged) {
2920 		mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
2921 		mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false);
2922 		mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL);
2923 	}
2924 
2925 	if (lag->ref_count == 1) {
2926 		if (mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port))
2927 			netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
2928 		err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2929 		if (err)
2930 			return err;
2931 	}
2932 
2933 	mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
2934 				     mlxsw_sp_port->local_port);
2935 	mlxsw_sp_port->lagged = 0;
2936 	lag->ref_count--;
2937 	return 0;
2938 }
2939 
2940 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
2941 				      u16 lag_id)
2942 {
2943 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2944 	char sldr_pl[MLXSW_REG_SLDR_LEN];
2945 
2946 	mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
2947 					 mlxsw_sp_port->local_port);
2948 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2949 }
2950 
2951 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
2952 					 u16 lag_id)
2953 {
2954 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2955 	char sldr_pl[MLXSW_REG_SLDR_LEN];
2956 
2957 	mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
2958 					    mlxsw_sp_port->local_port);
2959 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2960 }
2961 
2962 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
2963 				       bool lag_tx_enabled)
2964 {
2965 	if (lag_tx_enabled)
2966 		return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
2967 						  mlxsw_sp_port->lag_id);
2968 	else
2969 		return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
2970 						     mlxsw_sp_port->lag_id);
2971 }
2972 
2973 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
2974 				     struct netdev_lag_lower_state_info *info)
2975 {
2976 	return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
2977 }
2978 
2979 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
2980 				   struct net_device *vlan_dev)
2981 {
2982 	struct mlxsw_sp_port *mlxsw_sp_vport;
2983 	u16 vid = vlan_dev_vlan_id(vlan_dev);
2984 
2985 	mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2986 	if (!mlxsw_sp_vport) {
2987 		WARN_ON(!mlxsw_sp_vport);
2988 		return -EINVAL;
2989 	}
2990 
2991 	mlxsw_sp_vport->dev = vlan_dev;
2992 
2993 	return 0;
2994 }
2995 
2996 static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
2997 				     struct net_device *vlan_dev)
2998 {
2999 	struct mlxsw_sp_port *mlxsw_sp_vport;
3000 	u16 vid = vlan_dev_vlan_id(vlan_dev);
3001 
3002 	mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3003 	if (!mlxsw_sp_vport) {
3004 		WARN_ON(!mlxsw_sp_vport);
3005 		return -EINVAL;
3006 	}
3007 
3008 	/* When removing a VLAN device while still bridged we should first
3009 	 * remove it from the bridge, as we receive the bridge's notification
3010 	 * when the vPort is already gone.
3011 	 */
3012 	if (mlxsw_sp_vport->bridged) {
3013 		struct net_device *br_dev;
3014 
3015 		br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
3016 		mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, true);
3017 	}
3018 
3019 	mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
3020 
3021 	return 0;
3022 }
3023 
3024 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
3025 					       unsigned long event, void *ptr)
3026 {
3027 	struct netdev_notifier_changeupper_info *info;
3028 	struct mlxsw_sp_port *mlxsw_sp_port;
3029 	struct net_device *upper_dev;
3030 	struct mlxsw_sp *mlxsw_sp;
3031 	int err;
3032 
3033 	mlxsw_sp_port = netdev_priv(dev);
3034 	mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3035 	info = ptr;
3036 
3037 	switch (event) {
3038 	case NETDEV_PRECHANGEUPPER:
3039 		upper_dev = info->upper_dev;
3040 		if (!info->master || !info->linking)
3041 			break;
3042 		/* HW limitation forbids to put ports to multiple bridges. */
3043 		if (netif_is_bridge_master(upper_dev) &&
3044 		    !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
3045 			return NOTIFY_BAD;
3046 		if (netif_is_lag_master(upper_dev) &&
3047 		    !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
3048 					       info->upper_info))
3049 			return NOTIFY_BAD;
3050 		break;
3051 	case NETDEV_CHANGEUPPER:
3052 		upper_dev = info->upper_dev;
3053 		if (is_vlan_dev(upper_dev)) {
3054 			if (info->linking) {
3055 				err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
3056 							      upper_dev);
3057 				if (err) {
3058 					netdev_err(dev, "Failed to link VLAN device\n");
3059 					return NOTIFY_BAD;
3060 				}
3061 			} else {
3062 				err = mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
3063 								upper_dev);
3064 				if (err) {
3065 					netdev_err(dev, "Failed to unlink VLAN device\n");
3066 					return NOTIFY_BAD;
3067 				}
3068 			}
3069 		} else if (netif_is_bridge_master(upper_dev)) {
3070 			if (info->linking) {
3071 				err = mlxsw_sp_port_bridge_join(mlxsw_sp_port);
3072 				if (err) {
3073 					netdev_err(dev, "Failed to join bridge\n");
3074 					return NOTIFY_BAD;
3075 				}
3076 				mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev);
3077 			} else {
3078 				err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
3079 								 true);
3080 				mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev);
3081 				if (err) {
3082 					netdev_err(dev, "Failed to leave bridge\n");
3083 					return NOTIFY_BAD;
3084 				}
3085 			}
3086 		} else if (netif_is_lag_master(upper_dev)) {
3087 			if (info->linking) {
3088 				err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
3089 							     upper_dev);
3090 				if (err) {
3091 					netdev_err(dev, "Failed to join link aggregation\n");
3092 					return NOTIFY_BAD;
3093 				}
3094 			} else {
3095 				err = mlxsw_sp_port_lag_leave(mlxsw_sp_port,
3096 							      upper_dev);
3097 				if (err) {
3098 					netdev_err(dev, "Failed to leave link aggregation\n");
3099 					return NOTIFY_BAD;
3100 				}
3101 			}
3102 		}
3103 		break;
3104 	}
3105 
3106 	return NOTIFY_DONE;
3107 }
3108 
3109 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
3110 					       unsigned long event, void *ptr)
3111 {
3112 	struct netdev_notifier_changelowerstate_info *info;
3113 	struct mlxsw_sp_port *mlxsw_sp_port;
3114 	int err;
3115 
3116 	mlxsw_sp_port = netdev_priv(dev);
3117 	info = ptr;
3118 
3119 	switch (event) {
3120 	case NETDEV_CHANGELOWERSTATE:
3121 		if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
3122 			err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
3123 							info->lower_state_info);
3124 			if (err)
3125 				netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
3126 		}
3127 		break;
3128 	}
3129 
3130 	return NOTIFY_DONE;
3131 }
3132 
3133 static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
3134 					 unsigned long event, void *ptr)
3135 {
3136 	switch (event) {
3137 	case NETDEV_PRECHANGEUPPER:
3138 	case NETDEV_CHANGEUPPER:
3139 		return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr);
3140 	case NETDEV_CHANGELOWERSTATE:
3141 		return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
3142 	}
3143 
3144 	return NOTIFY_DONE;
3145 }
3146 
3147 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
3148 					unsigned long event, void *ptr)
3149 {
3150 	struct net_device *dev;
3151 	struct list_head *iter;
3152 	int ret;
3153 
3154 	netdev_for_each_lower_dev(lag_dev, dev, iter) {
3155 		if (mlxsw_sp_port_dev_check(dev)) {
3156 			ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
3157 			if (ret == NOTIFY_BAD)
3158 				return ret;
3159 		}
3160 	}
3161 
3162 	return NOTIFY_DONE;
3163 }
3164 
3165 static struct mlxsw_sp_vfid *
3166 mlxsw_sp_br_vfid_find(const struct mlxsw_sp *mlxsw_sp,
3167 		      const struct net_device *br_dev)
3168 {
3169 	struct mlxsw_sp_vfid *vfid;
3170 
3171 	list_for_each_entry(vfid, &mlxsw_sp->br_vfids.list, list) {
3172 		if (vfid->br_dev == br_dev)
3173 			return vfid;
3174 	}
3175 
3176 	return NULL;
3177 }
3178 
3179 static u16 mlxsw_sp_vfid_to_br_vfid(u16 vfid)
3180 {
3181 	return vfid - MLXSW_SP_VFID_PORT_MAX;
3182 }
3183 
3184 static u16 mlxsw_sp_br_vfid_to_vfid(u16 br_vfid)
3185 {
3186 	return MLXSW_SP_VFID_PORT_MAX + br_vfid;
3187 }
3188 
3189 static u16 mlxsw_sp_avail_br_vfid_get(const struct mlxsw_sp *mlxsw_sp)
3190 {
3191 	return find_first_zero_bit(mlxsw_sp->br_vfids.mapped,
3192 				   MLXSW_SP_VFID_BR_MAX);
3193 }
3194 
3195 static struct mlxsw_sp_vfid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp,
3196 						     struct net_device *br_dev)
3197 {
3198 	struct device *dev = mlxsw_sp->bus_info->dev;
3199 	struct mlxsw_sp_vfid *vfid;
3200 	u16 n_vfid;
3201 	int err;
3202 
3203 	n_vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp));
3204 	if (n_vfid == MLXSW_SP_VFID_MAX) {
3205 		dev_err(dev, "No available vFIDs\n");
3206 		return ERR_PTR(-ERANGE);
3207 	}
3208 
3209 	err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid);
3210 	if (err) {
3211 		dev_err(dev, "Failed to create vFID=%d\n", n_vfid);
3212 		return ERR_PTR(err);
3213 	}
3214 
3215 	vfid = kzalloc(sizeof(*vfid), GFP_KERNEL);
3216 	if (!vfid)
3217 		goto err_allocate_vfid;
3218 
3219 	vfid->vfid = n_vfid;
3220 	vfid->br_dev = br_dev;
3221 
3222 	list_add(&vfid->list, &mlxsw_sp->br_vfids.list);
3223 	set_bit(mlxsw_sp_vfid_to_br_vfid(n_vfid), mlxsw_sp->br_vfids.mapped);
3224 
3225 	return vfid;
3226 
3227 err_allocate_vfid:
3228 	__mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid);
3229 	return ERR_PTR(-ENOMEM);
3230 }
3231 
3232 static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
3233 				     struct mlxsw_sp_vfid *vfid)
3234 {
3235 	u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid->vfid);
3236 
3237 	clear_bit(br_vfid, mlxsw_sp->br_vfids.mapped);
3238 	list_del(&vfid->list);
3239 
3240 	__mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid);
3241 
3242 	kfree(vfid);
3243 }
3244 
3245 static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
3246 				       struct net_device *br_dev,
3247 				       bool flush_fdb)
3248 {
3249 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
3250 	u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3251 	struct net_device *dev = mlxsw_sp_vport->dev;
3252 	struct mlxsw_sp_vfid *vfid, *new_vfid;
3253 	int err;
3254 
3255 	vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev);
3256 	if (!vfid) {
3257 		WARN_ON(!vfid);
3258 		return -EINVAL;
3259 	}
3260 
3261 	/* We need a vFID to go back to after leaving the bridge's vFID. */
3262 	new_vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid);
3263 	if (!new_vfid) {
3264 		new_vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
3265 		if (IS_ERR(new_vfid)) {
3266 			netdev_err(dev, "Failed to create vFID for VID=%d\n",
3267 				   vid);
3268 			return PTR_ERR(new_vfid);
3269 		}
3270 	}
3271 
3272 	/* Invalidate existing {Port, VID} to vFID mapping and create a new
3273 	 * one for the new vFID.
3274 	 */
3275 	err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3276 					   MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
3277 					   false,
3278 					   mlxsw_sp_vfid_to_fid(vfid->vfid),
3279 					   vid);
3280 	if (err) {
3281 		netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
3282 			   vfid->vfid);
3283 		goto err_port_vid_to_fid_invalidate;
3284 	}
3285 
3286 	err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3287 					   MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
3288 					   true,
3289 					   mlxsw_sp_vfid_to_fid(new_vfid->vfid),
3290 					   vid);
3291 	if (err) {
3292 		netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
3293 			   new_vfid->vfid);
3294 		goto err_port_vid_to_fid_validate;
3295 	}
3296 
3297 	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
3298 	if (err) {
3299 		netdev_err(dev, "Failed to disable learning\n");
3300 		goto err_port_vid_learning_set;
3301 	}
3302 
3303 	err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false,
3304 				       false);
3305 	if (err) {
3306 		netdev_err(dev, "Failed clear to clear flooding\n");
3307 		goto err_vport_flood_set;
3308 	}
3309 
3310 	err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
3311 					  MLXSW_REG_SPMS_STATE_FORWARDING);
3312 	if (err) {
3313 		netdev_err(dev, "Failed to set STP state\n");
3314 		goto err_port_stp_state_set;
3315 	}
3316 
3317 	if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport))
3318 		netdev_err(dev, "Failed to flush FDB\n");
3319 
3320 	/* Switch between the vFIDs and destroy the old one if needed. */
3321 	new_vfid->nr_vports++;
3322 	mlxsw_sp_vport->vport.vfid = new_vfid;
3323 	vfid->nr_vports--;
3324 	if (!vfid->nr_vports)
3325 		mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
3326 
3327 	mlxsw_sp_vport->learning = 0;
3328 	mlxsw_sp_vport->learning_sync = 0;
3329 	mlxsw_sp_vport->uc_flood = 0;
3330 	mlxsw_sp_vport->bridged = 0;
3331 
3332 	return 0;
3333 
3334 err_port_stp_state_set:
3335 err_vport_flood_set:
3336 err_port_vid_learning_set:
3337 err_port_vid_to_fid_validate:
3338 err_port_vid_to_fid_invalidate:
3339 	/* Rollback vFID only if new. */
3340 	if (!new_vfid->nr_vports)
3341 		mlxsw_sp_vfid_destroy(mlxsw_sp, new_vfid);
3342 	return err;
3343 }
3344 
3345 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3346 				      struct net_device *br_dev)
3347 {
3348 	struct mlxsw_sp_vfid *old_vfid = mlxsw_sp_vport->vport.vfid;
3349 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
3350 	u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3351 	struct net_device *dev = mlxsw_sp_vport->dev;
3352 	struct mlxsw_sp_vfid *vfid;
3353 	int err;
3354 
3355 	vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev);
3356 	if (!vfid) {
3357 		vfid = mlxsw_sp_br_vfid_create(mlxsw_sp, br_dev);
3358 		if (IS_ERR(vfid)) {
3359 			netdev_err(dev, "Failed to create bridge vFID\n");
3360 			return PTR_ERR(vfid);
3361 		}
3362 	}
3363 
3364 	err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, true, false);
3365 	if (err) {
3366 		netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
3367 			   vfid->vfid);
3368 		goto err_port_flood_set;
3369 	}
3370 
3371 	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
3372 	if (err) {
3373 		netdev_err(dev, "Failed to enable learning\n");
3374 		goto err_port_vid_learning_set;
3375 	}
3376 
3377 	/* We need to invalidate existing {Port, VID} to vFID mapping and
3378 	 * create a new one for the bridge's vFID.
3379 	 */
3380 	err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3381 					   MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
3382 					   false,
3383 					   mlxsw_sp_vfid_to_fid(old_vfid->vfid),
3384 					   vid);
3385 	if (err) {
3386 		netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
3387 			   old_vfid->vfid);
3388 		goto err_port_vid_to_fid_invalidate;
3389 	}
3390 
3391 	err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3392 					   MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
3393 					   true,
3394 					   mlxsw_sp_vfid_to_fid(vfid->vfid),
3395 					   vid);
3396 	if (err) {
3397 		netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
3398 			   vfid->vfid);
3399 		goto err_port_vid_to_fid_validate;
3400 	}
3401 
3402 	/* Switch between the vFIDs and destroy the old one if needed. */
3403 	vfid->nr_vports++;
3404 	mlxsw_sp_vport->vport.vfid = vfid;
3405 	old_vfid->nr_vports--;
3406 	if (!old_vfid->nr_vports)
3407 		mlxsw_sp_vfid_destroy(mlxsw_sp, old_vfid);
3408 
3409 	mlxsw_sp_vport->learning = 1;
3410 	mlxsw_sp_vport->learning_sync = 1;
3411 	mlxsw_sp_vport->uc_flood = 1;
3412 	mlxsw_sp_vport->bridged = 1;
3413 
3414 	return 0;
3415 
3416 err_port_vid_to_fid_validate:
3417 	mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3418 				     MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
3419 				     mlxsw_sp_vfid_to_fid(old_vfid->vfid), vid);
3420 err_port_vid_to_fid_invalidate:
3421 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
3422 err_port_vid_learning_set:
3423 	mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, false);
3424 err_port_flood_set:
3425 	if (!vfid->nr_vports)
3426 		mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
3427 	return err;
3428 }
3429 
3430 static bool
3431 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
3432 				  const struct net_device *br_dev)
3433 {
3434 	struct mlxsw_sp_port *mlxsw_sp_vport;
3435 
3436 	list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
3437 			    vport.list) {
3438 		if (mlxsw_sp_vport_br_get(mlxsw_sp_vport) == br_dev)
3439 			return false;
3440 	}
3441 
3442 	return true;
3443 }
3444 
3445 static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
3446 					  unsigned long event, void *ptr,
3447 					  u16 vid)
3448 {
3449 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
3450 	struct netdev_notifier_changeupper_info *info = ptr;
3451 	struct mlxsw_sp_port *mlxsw_sp_vport;
3452 	struct net_device *upper_dev;
3453 	int err;
3454 
3455 	mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3456 
3457 	switch (event) {
3458 	case NETDEV_PRECHANGEUPPER:
3459 		upper_dev = info->upper_dev;
3460 		if (!info->master || !info->linking)
3461 			break;
3462 		if (!netif_is_bridge_master(upper_dev))
3463 			return NOTIFY_BAD;
3464 		/* We can't have multiple VLAN interfaces configured on
3465 		 * the same port and being members in the same bridge.
3466 		 */
3467 		if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
3468 						       upper_dev))
3469 			return NOTIFY_BAD;
3470 		break;
3471 	case NETDEV_CHANGEUPPER:
3472 		upper_dev = info->upper_dev;
3473 		if (!info->master)
3474 			break;
3475 		if (info->linking) {
3476 			if (!mlxsw_sp_vport) {
3477 				WARN_ON(!mlxsw_sp_vport);
3478 				return NOTIFY_BAD;
3479 			}
3480 			err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
3481 							 upper_dev);
3482 			if (err) {
3483 				netdev_err(dev, "Failed to join bridge\n");
3484 				return NOTIFY_BAD;
3485 			}
3486 		} else {
3487 			/* We ignore bridge's unlinking notifications if vPort
3488 			 * is gone, since we already left the bridge when the
3489 			 * VLAN device was unlinked from the real device.
3490 			 */
3491 			if (!mlxsw_sp_vport)
3492 				return NOTIFY_DONE;
3493 			err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport,
3494 							  upper_dev, true);
3495 			if (err) {
3496 				netdev_err(dev, "Failed to leave bridge\n");
3497 				return NOTIFY_BAD;
3498 			}
3499 		}
3500 	}
3501 
3502 	return NOTIFY_DONE;
3503 }
3504 
3505 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
3506 					      unsigned long event, void *ptr,
3507 					      u16 vid)
3508 {
3509 	struct net_device *dev;
3510 	struct list_head *iter;
3511 	int ret;
3512 
3513 	netdev_for_each_lower_dev(lag_dev, dev, iter) {
3514 		if (mlxsw_sp_port_dev_check(dev)) {
3515 			ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
3516 							     vid);
3517 			if (ret == NOTIFY_BAD)
3518 				return ret;
3519 		}
3520 	}
3521 
3522 	return NOTIFY_DONE;
3523 }
3524 
3525 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
3526 					 unsigned long event, void *ptr)
3527 {
3528 	struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
3529 	u16 vid = vlan_dev_vlan_id(vlan_dev);
3530 
3531 	if (mlxsw_sp_port_dev_check(real_dev))
3532 		return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr,
3533 						      vid);
3534 	else if (netif_is_lag_master(real_dev))
3535 		return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
3536 							  vid);
3537 
3538 	return NOTIFY_DONE;
3539 }
3540 
3541 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
3542 				    unsigned long event, void *ptr)
3543 {
3544 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3545 
3546 	if (mlxsw_sp_port_dev_check(dev))
3547 		return mlxsw_sp_netdevice_port_event(dev, event, ptr);
3548 
3549 	if (netif_is_lag_master(dev))
3550 		return mlxsw_sp_netdevice_lag_event(dev, event, ptr);
3551 
3552 	if (is_vlan_dev(dev))
3553 		return mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
3554 
3555 	return NOTIFY_DONE;
3556 }
3557 
3558 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
3559 	.notifier_call = mlxsw_sp_netdevice_event,
3560 };
3561 
3562 static int __init mlxsw_sp_module_init(void)
3563 {
3564 	int err;
3565 
3566 	register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3567 	err = mlxsw_core_driver_register(&mlxsw_sp_driver);
3568 	if (err)
3569 		goto err_core_driver_register;
3570 	return 0;
3571 
3572 err_core_driver_register:
3573 	unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3574 	return err;
3575 }
3576 
3577 static void __exit mlxsw_sp_module_exit(void)
3578 {
3579 	mlxsw_core_driver_unregister(&mlxsw_sp_driver);
3580 	unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3581 }
3582 
3583 module_init(mlxsw_sp_module_init);
3584 module_exit(mlxsw_sp_module_exit);
3585 
3586 MODULE_LICENSE("Dual BSD/GPL");
3587 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
3588 MODULE_DESCRIPTION("Mellanox Spectrum driver");
3589 MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM);
3590