xref: /linux/drivers/net/ethernet/mellanox/mlxsw/spectrum.c (revision 230ead0141be9668fbaf6c0b708533064d46a9a2)
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3  * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
5  * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6  * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/pci.h>
41 #include <linux/netdevice.h>
42 #include <linux/etherdevice.h>
43 #include <linux/ethtool.h>
44 #include <linux/slab.h>
45 #include <linux/device.h>
46 #include <linux/skbuff.h>
47 #include <linux/if_vlan.h>
48 #include <linux/if_bridge.h>
49 #include <linux/workqueue.h>
50 #include <linux/jiffies.h>
51 #include <linux/bitops.h>
52 #include <linux/list.h>
53 #include <linux/notifier.h>
54 #include <linux/dcbnl.h>
55 #include <linux/inetdevice.h>
56 #include <net/switchdev.h>
57 #include <net/pkt_cls.h>
58 #include <net/tc_act/tc_mirred.h>
59 #include <net/netevent.h>
60 #include <net/tc_act/tc_sample.h>
61 
62 #include "spectrum.h"
63 #include "pci.h"
64 #include "core.h"
65 #include "reg.h"
66 #include "port.h"
67 #include "trap.h"
68 #include "txheader.h"
69 #include "spectrum_cnt.h"
70 #include "spectrum_dpipe.h"
71 
72 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
73 static const char mlxsw_sp_driver_version[] = "1.0";
74 
75 /* tx_hdr_version
76  * Tx header version.
77  * Must be set to 1.
78  */
79 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
80 
81 /* tx_hdr_ctl
82  * Packet control type.
83  * 0 - Ethernet control (e.g. EMADs, LACP)
84  * 1 - Ethernet data
85  */
86 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
87 
88 /* tx_hdr_proto
89  * Packet protocol type. Must be set to 1 (Ethernet).
90  */
91 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
92 
93 /* tx_hdr_rx_is_router
94  * Packet is sent from the router. Valid for data packets only.
95  */
96 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
97 
98 /* tx_hdr_fid_valid
99  * Indicates if the 'fid' field is valid and should be used for
100  * forwarding lookup. Valid for data packets only.
101  */
102 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
103 
104 /* tx_hdr_swid
105  * Switch partition ID. Must be set to 0.
106  */
107 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
108 
109 /* tx_hdr_control_tclass
110  * Indicates if the packet should use the control TClass and not one
111  * of the data TClasses.
112  */
113 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
114 
115 /* tx_hdr_etclass
116  * Egress TClass to be used on the egress device on the egress port.
117  */
118 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
119 
120 /* tx_hdr_port_mid
121  * Destination local port for unicast packets.
122  * Destination multicast ID for multicast packets.
123  *
124  * Control packets are directed to a specific egress port, while data
125  * packets are transmitted through the CPU port (0) into the switch partition,
126  * where forwarding rules are applied.
127  */
128 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
129 
130 /* tx_hdr_fid
131  * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
132  * set, otherwise calculated based on the packet's VID using VID to FID mapping.
133  * Valid for data packets only.
134  */
135 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
136 
137 /* tx_hdr_type
138  * 0 - Data packets
139  * 6 - Control packets
140  */
141 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
142 
143 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
144 			      unsigned int counter_index, u64 *packets,
145 			      u64 *bytes)
146 {
147 	char mgpc_pl[MLXSW_REG_MGPC_LEN];
148 	int err;
149 
150 	mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
151 			    MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES);
152 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
153 	if (err)
154 		return err;
155 	*packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
156 	*bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
157 	return 0;
158 }
159 
160 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
161 				       unsigned int counter_index)
162 {
163 	char mgpc_pl[MLXSW_REG_MGPC_LEN];
164 
165 	mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
166 			    MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES);
167 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
168 }
169 
170 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
171 				unsigned int *p_counter_index)
172 {
173 	int err;
174 
175 	err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
176 				     p_counter_index);
177 	if (err)
178 		return err;
179 	err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
180 	if (err)
181 		goto err_counter_clear;
182 	return 0;
183 
184 err_counter_clear:
185 	mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
186 			      *p_counter_index);
187 	return err;
188 }
189 
190 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
191 				unsigned int counter_index)
192 {
193 	 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
194 			       counter_index);
195 }
196 
197 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
198 				     const struct mlxsw_tx_info *tx_info)
199 {
200 	char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
201 
202 	memset(txhdr, 0, MLXSW_TXHDR_LEN);
203 
204 	mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
205 	mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
206 	mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
207 	mlxsw_tx_hdr_swid_set(txhdr, 0);
208 	mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
209 	mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
210 	mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
211 }
212 
213 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
214 {
215 	char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
216 	int err;
217 
218 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
219 	if (err)
220 		return err;
221 	mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
222 	return 0;
223 }
224 
225 static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
226 {
227 	int i;
228 
229 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
230 		return -EIO;
231 
232 	mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
233 							  MAX_SPAN);
234 	mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
235 					 sizeof(struct mlxsw_sp_span_entry),
236 					 GFP_KERNEL);
237 	if (!mlxsw_sp->span.entries)
238 		return -ENOMEM;
239 
240 	for (i = 0; i < mlxsw_sp->span.entries_count; i++)
241 		INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list);
242 
243 	return 0;
244 }
245 
246 static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
247 {
248 	int i;
249 
250 	for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
251 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
252 
253 		WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
254 	}
255 	kfree(mlxsw_sp->span.entries);
256 }
257 
258 static struct mlxsw_sp_span_entry *
259 mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port)
260 {
261 	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
262 	struct mlxsw_sp_span_entry *span_entry;
263 	char mpat_pl[MLXSW_REG_MPAT_LEN];
264 	u8 local_port = port->local_port;
265 	int index;
266 	int i;
267 	int err;
268 
269 	/* find a free entry to use */
270 	index = -1;
271 	for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
272 		if (!mlxsw_sp->span.entries[i].used) {
273 			index = i;
274 			span_entry = &mlxsw_sp->span.entries[i];
275 			break;
276 		}
277 	}
278 	if (index < 0)
279 		return NULL;
280 
281 	/* create a new port analayzer entry for local_port */
282 	mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true);
283 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
284 	if (err)
285 		return NULL;
286 
287 	span_entry->used = true;
288 	span_entry->id = index;
289 	span_entry->ref_count = 1;
290 	span_entry->local_port = local_port;
291 	return span_entry;
292 }
293 
294 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
295 					struct mlxsw_sp_span_entry *span_entry)
296 {
297 	u8 local_port = span_entry->local_port;
298 	char mpat_pl[MLXSW_REG_MPAT_LEN];
299 	int pa_id = span_entry->id;
300 
301 	mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false);
302 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
303 	span_entry->used = false;
304 }
305 
306 static struct mlxsw_sp_span_entry *
307 mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port)
308 {
309 	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
310 	int i;
311 
312 	for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
313 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
314 
315 		if (curr->used && curr->local_port == port->local_port)
316 			return curr;
317 	}
318 	return NULL;
319 }
320 
321 static struct mlxsw_sp_span_entry
322 *mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port)
323 {
324 	struct mlxsw_sp_span_entry *span_entry;
325 
326 	span_entry = mlxsw_sp_span_entry_find(port);
327 	if (span_entry) {
328 		/* Already exists, just take a reference */
329 		span_entry->ref_count++;
330 		return span_entry;
331 	}
332 
333 	return mlxsw_sp_span_entry_create(port);
334 }
335 
336 static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
337 				   struct mlxsw_sp_span_entry *span_entry)
338 {
339 	WARN_ON(!span_entry->ref_count);
340 	if (--span_entry->ref_count == 0)
341 		mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
342 	return 0;
343 }
344 
345 static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
346 {
347 	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
348 	struct mlxsw_sp_span_inspected_port *p;
349 	int i;
350 
351 	for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
352 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
353 
354 		list_for_each_entry(p, &curr->bound_ports_list, list)
355 			if (p->local_port == port->local_port &&
356 			    p->type == MLXSW_SP_SPAN_EGRESS)
357 				return true;
358 	}
359 
360 	return false;
361 }
362 
363 static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp,
364 					 int mtu)
365 {
366 	return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1;
367 }
368 
369 static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
370 {
371 	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
372 	char sbib_pl[MLXSW_REG_SBIB_LEN];
373 	int err;
374 
375 	/* If port is egress mirrored, the shared buffer size should be
376 	 * updated according to the mtu value
377 	 */
378 	if (mlxsw_sp_span_is_egress_mirror(port)) {
379 		u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu);
380 
381 		mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
382 		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
383 		if (err) {
384 			netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
385 			return err;
386 		}
387 	}
388 
389 	return 0;
390 }
391 
392 static struct mlxsw_sp_span_inspected_port *
393 mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port,
394 				    struct mlxsw_sp_span_entry *span_entry)
395 {
396 	struct mlxsw_sp_span_inspected_port *p;
397 
398 	list_for_each_entry(p, &span_entry->bound_ports_list, list)
399 		if (port->local_port == p->local_port)
400 			return p;
401 	return NULL;
402 }
403 
404 static int
405 mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
406 				  struct mlxsw_sp_span_entry *span_entry,
407 				  enum mlxsw_sp_span_type type)
408 {
409 	struct mlxsw_sp_span_inspected_port *inspected_port;
410 	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
411 	char mpar_pl[MLXSW_REG_MPAR_LEN];
412 	char sbib_pl[MLXSW_REG_SBIB_LEN];
413 	int pa_id = span_entry->id;
414 	int err;
415 
416 	/* if it is an egress SPAN, bind a shared buffer to it */
417 	if (type == MLXSW_SP_SPAN_EGRESS) {
418 		u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp,
419 							     port->dev->mtu);
420 
421 		mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
422 		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
423 		if (err) {
424 			netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
425 			return err;
426 		}
427 	}
428 
429 	/* bind the port to the SPAN entry */
430 	mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
431 			    (enum mlxsw_reg_mpar_i_e) type, true, pa_id);
432 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
433 	if (err)
434 		goto err_mpar_reg_write;
435 
436 	inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
437 	if (!inspected_port) {
438 		err = -ENOMEM;
439 		goto err_inspected_port_alloc;
440 	}
441 	inspected_port->local_port = port->local_port;
442 	inspected_port->type = type;
443 	list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
444 
445 	return 0;
446 
447 err_mpar_reg_write:
448 err_inspected_port_alloc:
449 	if (type == MLXSW_SP_SPAN_EGRESS) {
450 		mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
451 		mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
452 	}
453 	return err;
454 }
455 
456 static void
457 mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port,
458 				    struct mlxsw_sp_span_entry *span_entry,
459 				    enum mlxsw_sp_span_type type)
460 {
461 	struct mlxsw_sp_span_inspected_port *inspected_port;
462 	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
463 	char mpar_pl[MLXSW_REG_MPAR_LEN];
464 	char sbib_pl[MLXSW_REG_SBIB_LEN];
465 	int pa_id = span_entry->id;
466 
467 	inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry);
468 	if (!inspected_port)
469 		return;
470 
471 	/* remove the inspected port */
472 	mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
473 			    (enum mlxsw_reg_mpar_i_e) type, false, pa_id);
474 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
475 
476 	/* remove the SBIB buffer if it was egress SPAN */
477 	if (type == MLXSW_SP_SPAN_EGRESS) {
478 		mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
479 		mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
480 	}
481 
482 	mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
483 
484 	list_del(&inspected_port->list);
485 	kfree(inspected_port);
486 }
487 
488 static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
489 				    struct mlxsw_sp_port *to,
490 				    enum mlxsw_sp_span_type type)
491 {
492 	struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
493 	struct mlxsw_sp_span_entry *span_entry;
494 	int err;
495 
496 	span_entry = mlxsw_sp_span_entry_get(to);
497 	if (!span_entry)
498 		return -ENOENT;
499 
500 	netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
501 		   span_entry->id);
502 
503 	err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type);
504 	if (err)
505 		goto err_port_bind;
506 
507 	return 0;
508 
509 err_port_bind:
510 	mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
511 	return err;
512 }
513 
514 static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from,
515 					struct mlxsw_sp_port *to,
516 					enum mlxsw_sp_span_type type)
517 {
518 	struct mlxsw_sp_span_entry *span_entry;
519 
520 	span_entry = mlxsw_sp_span_entry_find(to);
521 	if (!span_entry) {
522 		netdev_err(from->dev, "no span entry found\n");
523 		return;
524 	}
525 
526 	netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
527 		   span_entry->id);
528 	mlxsw_sp_span_inspected_port_unbind(from, span_entry, type);
529 }
530 
531 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
532 				    bool enable, u32 rate)
533 {
534 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
535 	char mpsc_pl[MLXSW_REG_MPSC_LEN];
536 
537 	mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
538 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
539 }
540 
541 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
542 					  bool is_up)
543 {
544 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
545 	char paos_pl[MLXSW_REG_PAOS_LEN];
546 
547 	mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
548 			    is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
549 			    MLXSW_PORT_ADMIN_STATUS_DOWN);
550 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
551 }
552 
553 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
554 				      unsigned char *addr)
555 {
556 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
557 	char ppad_pl[MLXSW_REG_PPAD_LEN];
558 
559 	mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
560 	mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
561 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
562 }
563 
564 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
565 {
566 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
567 	unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
568 
569 	ether_addr_copy(addr, mlxsw_sp->base_mac);
570 	addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
571 	return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
572 }
573 
574 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
575 {
576 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
577 	char pmtu_pl[MLXSW_REG_PMTU_LEN];
578 	int max_mtu;
579 	int err;
580 
581 	mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
582 	mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
583 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
584 	if (err)
585 		return err;
586 	max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
587 
588 	if (mtu > max_mtu)
589 		return -EINVAL;
590 
591 	mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
592 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
593 }
594 
595 static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
596 				    u8 swid)
597 {
598 	char pspa_pl[MLXSW_REG_PSPA_LEN];
599 
600 	mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
601 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
602 }
603 
604 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
605 {
606 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
607 
608 	return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
609 					swid);
610 }
611 
612 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
613 				     bool enable)
614 {
615 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
616 	char svpe_pl[MLXSW_REG_SVPE_LEN];
617 
618 	mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
619 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
620 }
621 
622 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
623 				 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
624 				 u16 vid)
625 {
626 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
627 	char svfa_pl[MLXSW_REG_SVFA_LEN];
628 
629 	mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
630 			    fid, vid);
631 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
632 }
633 
634 int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
635 				     u16 vid_begin, u16 vid_end,
636 				     bool learn_enable)
637 {
638 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
639 	char *spvmlr_pl;
640 	int err;
641 
642 	spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
643 	if (!spvmlr_pl)
644 		return -ENOMEM;
645 	mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid_begin,
646 			      vid_end, learn_enable);
647 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
648 	kfree(spvmlr_pl);
649 	return err;
650 }
651 
652 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
653 					  u16 vid, bool learn_enable)
654 {
655 	return __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
656 						learn_enable);
657 }
658 
659 static int
660 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
661 {
662 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
663 	char sspr_pl[MLXSW_REG_SSPR_LEN];
664 
665 	mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
666 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
667 }
668 
669 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
670 					 u8 local_port, u8 *p_module,
671 					 u8 *p_width, u8 *p_lane)
672 {
673 	char pmlp_pl[MLXSW_REG_PMLP_LEN];
674 	int err;
675 
676 	mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
677 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
678 	if (err)
679 		return err;
680 	*p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
681 	*p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
682 	*p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
683 	return 0;
684 }
685 
686 static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
687 				    u8 module, u8 width, u8 lane)
688 {
689 	char pmlp_pl[MLXSW_REG_PMLP_LEN];
690 	int i;
691 
692 	mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
693 	mlxsw_reg_pmlp_width_set(pmlp_pl, width);
694 	for (i = 0; i < width; i++) {
695 		mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
696 		mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i);  /* Rx & Tx */
697 	}
698 
699 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
700 }
701 
702 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port)
703 {
704 	char pmlp_pl[MLXSW_REG_PMLP_LEN];
705 
706 	mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
707 	mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
708 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
709 }
710 
711 static int mlxsw_sp_port_open(struct net_device *dev)
712 {
713 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
714 	int err;
715 
716 	err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
717 	if (err)
718 		return err;
719 	netif_start_queue(dev);
720 	return 0;
721 }
722 
723 static int mlxsw_sp_port_stop(struct net_device *dev)
724 {
725 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
726 
727 	netif_stop_queue(dev);
728 	return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
729 }
730 
731 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
732 				      struct net_device *dev)
733 {
734 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
735 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
736 	struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
737 	const struct mlxsw_tx_info tx_info = {
738 		.local_port = mlxsw_sp_port->local_port,
739 		.is_emad = false,
740 	};
741 	u64 len;
742 	int err;
743 
744 	if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
745 		return NETDEV_TX_BUSY;
746 
747 	if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
748 		struct sk_buff *skb_orig = skb;
749 
750 		skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
751 		if (!skb) {
752 			this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
753 			dev_kfree_skb_any(skb_orig);
754 			return NETDEV_TX_OK;
755 		}
756 		dev_consume_skb_any(skb_orig);
757 	}
758 
759 	if (eth_skb_pad(skb)) {
760 		this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
761 		return NETDEV_TX_OK;
762 	}
763 
764 	mlxsw_sp_txhdr_construct(skb, &tx_info);
765 	/* TX header is consumed by HW on the way so we shouldn't count its
766 	 * bytes as being sent.
767 	 */
768 	len = skb->len - MLXSW_TXHDR_LEN;
769 
770 	/* Due to a race we might fail here because of a full queue. In that
771 	 * unlikely case we simply drop the packet.
772 	 */
773 	err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
774 
775 	if (!err) {
776 		pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
777 		u64_stats_update_begin(&pcpu_stats->syncp);
778 		pcpu_stats->tx_packets++;
779 		pcpu_stats->tx_bytes += len;
780 		u64_stats_update_end(&pcpu_stats->syncp);
781 	} else {
782 		this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
783 		dev_kfree_skb_any(skb);
784 	}
785 	return NETDEV_TX_OK;
786 }
787 
788 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
789 {
790 }
791 
792 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
793 {
794 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
795 	struct sockaddr *addr = p;
796 	int err;
797 
798 	if (!is_valid_ether_addr(addr->sa_data))
799 		return -EADDRNOTAVAIL;
800 
801 	err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
802 	if (err)
803 		return err;
804 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
805 	return 0;
806 }
807 
808 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp,
809 					 int mtu)
810 {
811 	return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu);
812 }
813 
814 #define MLXSW_SP_CELL_FACTOR 2	/* 2 * cell_size / (IPG + cell_size + 1) */
815 
816 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
817 				  u16 delay)
818 {
819 	delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay,
820 							    BITS_PER_BYTE));
821 	return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp,
822 								   mtu);
823 }
824 
825 /* Maximum delay buffer needed in case of PAUSE frames, in bytes.
826  * Assumes 100m cable and maximum MTU.
827  */
828 #define MLXSW_SP_PAUSE_DELAY 58752
829 
830 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
831 				     u16 delay, bool pfc, bool pause)
832 {
833 	if (pfc)
834 		return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay);
835 	else if (pause)
836 		return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY);
837 	else
838 		return 0;
839 }
840 
841 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres,
842 				 bool lossy)
843 {
844 	if (lossy)
845 		mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size);
846 	else
847 		mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size,
848 						    thres);
849 }
850 
851 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
852 				 u8 *prio_tc, bool pause_en,
853 				 struct ieee_pfc *my_pfc)
854 {
855 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
856 	u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
857 	u16 delay = !!my_pfc ? my_pfc->delay : 0;
858 	char pbmc_pl[MLXSW_REG_PBMC_LEN];
859 	int i, j, err;
860 
861 	mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
862 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
863 	if (err)
864 		return err;
865 
866 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
867 		bool configure = false;
868 		bool pfc = false;
869 		bool lossy;
870 		u16 thres;
871 
872 		for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
873 			if (prio_tc[j] == i) {
874 				pfc = pfc_en & BIT(j);
875 				configure = true;
876 				break;
877 			}
878 		}
879 
880 		if (!configure)
881 			continue;
882 
883 		lossy = !(pfc || pause_en);
884 		thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
885 		delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc,
886 						  pause_en);
887 		mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy);
888 	}
889 
890 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
891 }
892 
893 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
894 				      int mtu, bool pause_en)
895 {
896 	u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
897 	bool dcb_en = !!mlxsw_sp_port->dcb.ets;
898 	struct ieee_pfc *my_pfc;
899 	u8 *prio_tc;
900 
901 	prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
902 	my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
903 
904 	return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
905 					    pause_en, my_pfc);
906 }
907 
908 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
909 {
910 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
911 	bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
912 	int err;
913 
914 	err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
915 	if (err)
916 		return err;
917 	err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu);
918 	if (err)
919 		goto err_span_port_mtu_update;
920 	err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
921 	if (err)
922 		goto err_port_mtu_set;
923 	dev->mtu = mtu;
924 	return 0;
925 
926 err_port_mtu_set:
927 	mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu);
928 err_span_port_mtu_update:
929 	mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
930 	return err;
931 }
932 
933 static int
934 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
935 			     struct rtnl_link_stats64 *stats)
936 {
937 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
938 	struct mlxsw_sp_port_pcpu_stats *p;
939 	u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
940 	u32 tx_dropped = 0;
941 	unsigned int start;
942 	int i;
943 
944 	for_each_possible_cpu(i) {
945 		p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
946 		do {
947 			start = u64_stats_fetch_begin_irq(&p->syncp);
948 			rx_packets	= p->rx_packets;
949 			rx_bytes	= p->rx_bytes;
950 			tx_packets	= p->tx_packets;
951 			tx_bytes	= p->tx_bytes;
952 		} while (u64_stats_fetch_retry_irq(&p->syncp, start));
953 
954 		stats->rx_packets	+= rx_packets;
955 		stats->rx_bytes		+= rx_bytes;
956 		stats->tx_packets	+= tx_packets;
957 		stats->tx_bytes		+= tx_bytes;
958 		/* tx_dropped is u32, updated without syncp protection. */
959 		tx_dropped	+= p->tx_dropped;
960 	}
961 	stats->tx_dropped	= tx_dropped;
962 	return 0;
963 }
964 
965 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
966 {
967 	switch (attr_id) {
968 	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
969 		return true;
970 	}
971 
972 	return false;
973 }
974 
975 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
976 					   void *sp)
977 {
978 	switch (attr_id) {
979 	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
980 		return mlxsw_sp_port_get_sw_stats64(dev, sp);
981 	}
982 
983 	return -EINVAL;
984 }
985 
986 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
987 				       int prio, char *ppcnt_pl)
988 {
989 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
990 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
991 
992 	mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
993 	return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
994 }
995 
996 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
997 				      struct rtnl_link_stats64 *stats)
998 {
999 	char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1000 	int err;
1001 
1002 	err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
1003 					  0, ppcnt_pl);
1004 	if (err)
1005 		goto out;
1006 
1007 	stats->tx_packets =
1008 		mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
1009 	stats->rx_packets =
1010 		mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
1011 	stats->tx_bytes =
1012 		mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
1013 	stats->rx_bytes =
1014 		mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
1015 	stats->multicast =
1016 		mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
1017 
1018 	stats->rx_crc_errors =
1019 		mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
1020 	stats->rx_frame_errors =
1021 		mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
1022 
1023 	stats->rx_length_errors = (
1024 		mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
1025 		mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
1026 		mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
1027 
1028 	stats->rx_errors = (stats->rx_crc_errors +
1029 		stats->rx_frame_errors + stats->rx_length_errors);
1030 
1031 out:
1032 	return err;
1033 }
1034 
1035 static void update_stats_cache(struct work_struct *work)
1036 {
1037 	struct mlxsw_sp_port *mlxsw_sp_port =
1038 		container_of(work, struct mlxsw_sp_port,
1039 			     hw_stats.update_dw.work);
1040 
1041 	if (!netif_carrier_ok(mlxsw_sp_port->dev))
1042 		goto out;
1043 
1044 	mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
1045 				   mlxsw_sp_port->hw_stats.cache);
1046 
1047 out:
1048 	mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw,
1049 			       MLXSW_HW_STATS_UPDATE_TIME);
1050 }
1051 
1052 /* Return the stats from a cache that is updated periodically,
1053  * as this function might get called in an atomic context.
1054  */
1055 static void
1056 mlxsw_sp_port_get_stats64(struct net_device *dev,
1057 			  struct rtnl_link_stats64 *stats)
1058 {
1059 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1060 
1061 	memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats));
1062 }
1063 
1064 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
1065 			   u16 vid_end, bool is_member, bool untagged)
1066 {
1067 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1068 	char *spvm_pl;
1069 	int err;
1070 
1071 	spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
1072 	if (!spvm_pl)
1073 		return -ENOMEM;
1074 
1075 	mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port,	vid_begin,
1076 			    vid_end, is_member, untagged);
1077 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
1078 	kfree(spvm_pl);
1079 	return err;
1080 }
1081 
1082 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
1083 {
1084 	enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
1085 	u16 vid, last_visited_vid;
1086 	int err;
1087 
1088 	for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
1089 		err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
1090 						   vid);
1091 		if (err) {
1092 			last_visited_vid = vid;
1093 			goto err_port_vid_to_fid_set;
1094 		}
1095 	}
1096 
1097 	err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
1098 	if (err) {
1099 		last_visited_vid = VLAN_N_VID;
1100 		goto err_port_vid_to_fid_set;
1101 	}
1102 
1103 	return 0;
1104 
1105 err_port_vid_to_fid_set:
1106 	for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
1107 		mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
1108 					     vid);
1109 	return err;
1110 }
1111 
1112 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
1113 {
1114 	enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
1115 	u16 vid;
1116 	int err;
1117 
1118 	err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
1119 	if (err)
1120 		return err;
1121 
1122 	for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
1123 		err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
1124 						   vid, vid);
1125 		if (err)
1126 			return err;
1127 	}
1128 
1129 	return 0;
1130 }
1131 
1132 static struct mlxsw_sp_port *
1133 mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1134 {
1135 	struct mlxsw_sp_port *mlxsw_sp_vport;
1136 
1137 	mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL);
1138 	if (!mlxsw_sp_vport)
1139 		return NULL;
1140 
1141 	/* dev will be set correctly after the VLAN device is linked
1142 	 * with the real device. In case of bridge SELF invocation, dev
1143 	 * will remain as is.
1144 	 */
1145 	mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
1146 	mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1147 	mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port;
1148 	mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
1149 	mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
1150 	mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
1151 	mlxsw_sp_vport->vport.vid = vid;
1152 
1153 	list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
1154 
1155 	return mlxsw_sp_vport;
1156 }
1157 
1158 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
1159 {
1160 	list_del(&mlxsw_sp_vport->vport.list);
1161 	kfree(mlxsw_sp_vport);
1162 }
1163 
1164 static int mlxsw_sp_port_add_vid(struct net_device *dev,
1165 				 __be16 __always_unused proto, u16 vid)
1166 {
1167 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1168 	struct mlxsw_sp_port *mlxsw_sp_vport;
1169 	bool untagged = vid == 1;
1170 	int err;
1171 
1172 	/* VLAN 0 is added to HW filter when device goes up, but it is
1173 	 * reserved in our case, so simply return.
1174 	 */
1175 	if (!vid)
1176 		return 0;
1177 
1178 	if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid))
1179 		return 0;
1180 
1181 	mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid);
1182 	if (!mlxsw_sp_vport)
1183 		return -ENOMEM;
1184 
1185 	/* When adding the first VLAN interface on a bridged port we need to
1186 	 * transition all the active 802.1Q bridge VLANs to use explicit
1187 	 * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
1188 	 */
1189 	if (list_is_singular(&mlxsw_sp_port->vports_list)) {
1190 		err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
1191 		if (err)
1192 			goto err_port_vp_mode_trans;
1193 	}
1194 
1195 	err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged);
1196 	if (err)
1197 		goto err_port_add_vid;
1198 
1199 	return 0;
1200 
1201 err_port_add_vid:
1202 	if (list_is_singular(&mlxsw_sp_port->vports_list))
1203 		mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
1204 err_port_vp_mode_trans:
1205 	mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
1206 	return err;
1207 }
1208 
1209 static int mlxsw_sp_port_kill_vid(struct net_device *dev,
1210 				  __be16 __always_unused proto, u16 vid)
1211 {
1212 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1213 	struct mlxsw_sp_port *mlxsw_sp_vport;
1214 	struct mlxsw_sp_fid *f;
1215 
1216 	/* VLAN 0 is removed from HW filter when device goes down, but
1217 	 * it is reserved in our case, so simply return.
1218 	 */
1219 	if (!vid)
1220 		return 0;
1221 
1222 	mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
1223 	if (WARN_ON(!mlxsw_sp_vport))
1224 		return 0;
1225 
1226 	mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
1227 
1228 	/* Drop FID reference. If this was the last reference the
1229 	 * resources will be freed.
1230 	 */
1231 	f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
1232 	if (f && !WARN_ON(!f->leave))
1233 		f->leave(mlxsw_sp_vport);
1234 
1235 	/* When removing the last VLAN interface on a bridged port we need to
1236 	 * transition all active 802.1Q bridge VLANs to use VID to FID
1237 	 * mappings and set port's mode to VLAN mode.
1238 	 */
1239 	if (list_is_singular(&mlxsw_sp_port->vports_list))
1240 		mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
1241 
1242 	mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
1243 
1244 	return 0;
1245 }
1246 
1247 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
1248 					    size_t len)
1249 {
1250 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1251 	u8 module = mlxsw_sp_port->mapping.module;
1252 	u8 width = mlxsw_sp_port->mapping.width;
1253 	u8 lane = mlxsw_sp_port->mapping.lane;
1254 	int err;
1255 
1256 	if (!mlxsw_sp_port->split)
1257 		err = snprintf(name, len, "p%d", module + 1);
1258 	else
1259 		err = snprintf(name, len, "p%ds%d", module + 1,
1260 			       lane / width);
1261 
1262 	if (err >= len)
1263 		return -EINVAL;
1264 
1265 	return 0;
1266 }
1267 
1268 static struct mlxsw_sp_port_mall_tc_entry *
1269 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port,
1270 				 unsigned long cookie) {
1271 	struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1272 
1273 	list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list)
1274 		if (mall_tc_entry->cookie == cookie)
1275 			return mall_tc_entry;
1276 
1277 	return NULL;
1278 }
1279 
1280 static int
1281 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1282 				      struct mlxsw_sp_port_mall_mirror_tc_entry *mirror,
1283 				      const struct tc_action *a,
1284 				      bool ingress)
1285 {
1286 	struct net *net = dev_net(mlxsw_sp_port->dev);
1287 	enum mlxsw_sp_span_type span_type;
1288 	struct mlxsw_sp_port *to_port;
1289 	struct net_device *to_dev;
1290 	int ifindex;
1291 
1292 	ifindex = tcf_mirred_ifindex(a);
1293 	to_dev = __dev_get_by_index(net, ifindex);
1294 	if (!to_dev) {
1295 		netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
1296 		return -EINVAL;
1297 	}
1298 
1299 	if (!mlxsw_sp_port_dev_check(to_dev)) {
1300 		netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port");
1301 		return -EOPNOTSUPP;
1302 	}
1303 	to_port = netdev_priv(to_dev);
1304 
1305 	mirror->to_local_port = to_port->local_port;
1306 	mirror->ingress = ingress;
1307 	span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1308 	return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type);
1309 }
1310 
1311 static void
1312 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1313 				      struct mlxsw_sp_port_mall_mirror_tc_entry *mirror)
1314 {
1315 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1316 	enum mlxsw_sp_span_type span_type;
1317 	struct mlxsw_sp_port *to_port;
1318 
1319 	to_port = mlxsw_sp->ports[mirror->to_local_port];
1320 	span_type = mirror->ingress ?
1321 			MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1322 	mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type);
1323 }
1324 
1325 static int
1326 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
1327 				      struct tc_cls_matchall_offload *cls,
1328 				      const struct tc_action *a,
1329 				      bool ingress)
1330 {
1331 	int err;
1332 
1333 	if (!mlxsw_sp_port->sample)
1334 		return -EOPNOTSUPP;
1335 	if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) {
1336 		netdev_err(mlxsw_sp_port->dev, "sample already active\n");
1337 		return -EEXIST;
1338 	}
1339 	if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) {
1340 		netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n");
1341 		return -EOPNOTSUPP;
1342 	}
1343 
1344 	rcu_assign_pointer(mlxsw_sp_port->sample->psample_group,
1345 			   tcf_sample_psample_group(a));
1346 	mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a);
1347 	mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a);
1348 	mlxsw_sp_port->sample->rate = tcf_sample_rate(a);
1349 
1350 	err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a));
1351 	if (err)
1352 		goto err_port_sample_set;
1353 	return 0;
1354 
1355 err_port_sample_set:
1356 	RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1357 	return err;
1358 }
1359 
1360 static void
1361 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port)
1362 {
1363 	if (!mlxsw_sp_port->sample)
1364 		return;
1365 
1366 	mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1);
1367 	RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1368 }
1369 
1370 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1371 					  __be16 protocol,
1372 					  struct tc_cls_matchall_offload *cls,
1373 					  bool ingress)
1374 {
1375 	struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1376 	const struct tc_action *a;
1377 	LIST_HEAD(actions);
1378 	int err;
1379 
1380 	if (!tc_single_action(cls->exts)) {
1381 		netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
1382 		return -EOPNOTSUPP;
1383 	}
1384 
1385 	mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1386 	if (!mall_tc_entry)
1387 		return -ENOMEM;
1388 	mall_tc_entry->cookie = cls->cookie;
1389 
1390 	tcf_exts_to_list(cls->exts, &actions);
1391 	a = list_first_entry(&actions, struct tc_action, list);
1392 
1393 	if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
1394 		struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
1395 
1396 		mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
1397 		mirror = &mall_tc_entry->mirror;
1398 		err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port,
1399 							    mirror, a, ingress);
1400 	} else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) {
1401 		mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE;
1402 		err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, cls,
1403 							    a, ingress);
1404 	} else {
1405 		err = -EOPNOTSUPP;
1406 	}
1407 
1408 	if (err)
1409 		goto err_add_action;
1410 
1411 	list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list);
1412 	return 0;
1413 
1414 err_add_action:
1415 	kfree(mall_tc_entry);
1416 	return err;
1417 }
1418 
1419 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1420 					   struct tc_cls_matchall_offload *cls)
1421 {
1422 	struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1423 
1424 	mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port,
1425 							 cls->cookie);
1426 	if (!mall_tc_entry) {
1427 		netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n");
1428 		return;
1429 	}
1430 	list_del(&mall_tc_entry->list);
1431 
1432 	switch (mall_tc_entry->type) {
1433 	case MLXSW_SP_PORT_MALL_MIRROR:
1434 		mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port,
1435 						      &mall_tc_entry->mirror);
1436 		break;
1437 	case MLXSW_SP_PORT_MALL_SAMPLE:
1438 		mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port);
1439 		break;
1440 	default:
1441 		WARN_ON(1);
1442 	}
1443 
1444 	kfree(mall_tc_entry);
1445 }
1446 
1447 static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle,
1448 			     __be16 proto, struct tc_to_netdev *tc)
1449 {
1450 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1451 	bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS);
1452 
1453 	switch (tc->type) {
1454 	case TC_SETUP_MATCHALL:
1455 		switch (tc->cls_mall->command) {
1456 		case TC_CLSMATCHALL_REPLACE:
1457 			return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port,
1458 							      proto,
1459 							      tc->cls_mall,
1460 							      ingress);
1461 		case TC_CLSMATCHALL_DESTROY:
1462 			mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port,
1463 						       tc->cls_mall);
1464 			return 0;
1465 		default:
1466 			return -EOPNOTSUPP;
1467 		}
1468 	case TC_SETUP_CLSFLOWER:
1469 		switch (tc->cls_flower->command) {
1470 		case TC_CLSFLOWER_REPLACE:
1471 			return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress,
1472 						       proto, tc->cls_flower);
1473 		case TC_CLSFLOWER_DESTROY:
1474 			mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress,
1475 						tc->cls_flower);
1476 			return 0;
1477 		case TC_CLSFLOWER_STATS:
1478 			return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress,
1479 						     tc->cls_flower);
1480 		default:
1481 			return -EOPNOTSUPP;
1482 		}
1483 	}
1484 
1485 	return -EOPNOTSUPP;
1486 }
1487 
1488 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1489 	.ndo_open		= mlxsw_sp_port_open,
1490 	.ndo_stop		= mlxsw_sp_port_stop,
1491 	.ndo_start_xmit		= mlxsw_sp_port_xmit,
1492 	.ndo_setup_tc           = mlxsw_sp_setup_tc,
1493 	.ndo_set_rx_mode	= mlxsw_sp_set_rx_mode,
1494 	.ndo_set_mac_address	= mlxsw_sp_port_set_mac_address,
1495 	.ndo_change_mtu		= mlxsw_sp_port_change_mtu,
1496 	.ndo_get_stats64	= mlxsw_sp_port_get_stats64,
1497 	.ndo_has_offload_stats	= mlxsw_sp_port_has_offload_stats,
1498 	.ndo_get_offload_stats	= mlxsw_sp_port_get_offload_stats,
1499 	.ndo_vlan_rx_add_vid	= mlxsw_sp_port_add_vid,
1500 	.ndo_vlan_rx_kill_vid	= mlxsw_sp_port_kill_vid,
1501 	.ndo_fdb_add		= switchdev_port_fdb_add,
1502 	.ndo_fdb_del		= switchdev_port_fdb_del,
1503 	.ndo_fdb_dump		= switchdev_port_fdb_dump,
1504 	.ndo_bridge_setlink	= switchdev_port_bridge_setlink,
1505 	.ndo_bridge_getlink	= switchdev_port_bridge_getlink,
1506 	.ndo_bridge_dellink	= switchdev_port_bridge_dellink,
1507 	.ndo_get_phys_port_name	= mlxsw_sp_port_get_phys_port_name,
1508 };
1509 
1510 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
1511 				      struct ethtool_drvinfo *drvinfo)
1512 {
1513 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1514 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1515 
1516 	strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
1517 	strlcpy(drvinfo->version, mlxsw_sp_driver_version,
1518 		sizeof(drvinfo->version));
1519 	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1520 		 "%d.%d.%d",
1521 		 mlxsw_sp->bus_info->fw_rev.major,
1522 		 mlxsw_sp->bus_info->fw_rev.minor,
1523 		 mlxsw_sp->bus_info->fw_rev.subminor);
1524 	strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
1525 		sizeof(drvinfo->bus_info));
1526 }
1527 
1528 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
1529 					 struct ethtool_pauseparam *pause)
1530 {
1531 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1532 
1533 	pause->rx_pause = mlxsw_sp_port->link.rx_pause;
1534 	pause->tx_pause = mlxsw_sp_port->link.tx_pause;
1535 }
1536 
1537 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
1538 				   struct ethtool_pauseparam *pause)
1539 {
1540 	char pfcc_pl[MLXSW_REG_PFCC_LEN];
1541 
1542 	mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
1543 	mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
1544 	mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
1545 
1546 	return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
1547 			       pfcc_pl);
1548 }
1549 
1550 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
1551 					struct ethtool_pauseparam *pause)
1552 {
1553 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1554 	bool pause_en = pause->tx_pause || pause->rx_pause;
1555 	int err;
1556 
1557 	if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
1558 		netdev_err(dev, "PFC already enabled on port\n");
1559 		return -EINVAL;
1560 	}
1561 
1562 	if (pause->autoneg) {
1563 		netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
1564 		return -EINVAL;
1565 	}
1566 
1567 	err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1568 	if (err) {
1569 		netdev_err(dev, "Failed to configure port's headroom\n");
1570 		return err;
1571 	}
1572 
1573 	err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
1574 	if (err) {
1575 		netdev_err(dev, "Failed to set PAUSE parameters\n");
1576 		goto err_port_pause_configure;
1577 	}
1578 
1579 	mlxsw_sp_port->link.rx_pause = pause->rx_pause;
1580 	mlxsw_sp_port->link.tx_pause = pause->tx_pause;
1581 
1582 	return 0;
1583 
1584 err_port_pause_configure:
1585 	pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1586 	mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1587 	return err;
1588 }
1589 
1590 struct mlxsw_sp_port_hw_stats {
1591 	char str[ETH_GSTRING_LEN];
1592 	u64 (*getter)(const char *payload);
1593 	bool cells_bytes;
1594 };
1595 
1596 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
1597 	{
1598 		.str = "a_frames_transmitted_ok",
1599 		.getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
1600 	},
1601 	{
1602 		.str = "a_frames_received_ok",
1603 		.getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
1604 	},
1605 	{
1606 		.str = "a_frame_check_sequence_errors",
1607 		.getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
1608 	},
1609 	{
1610 		.str = "a_alignment_errors",
1611 		.getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
1612 	},
1613 	{
1614 		.str = "a_octets_transmitted_ok",
1615 		.getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
1616 	},
1617 	{
1618 		.str = "a_octets_received_ok",
1619 		.getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
1620 	},
1621 	{
1622 		.str = "a_multicast_frames_xmitted_ok",
1623 		.getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
1624 	},
1625 	{
1626 		.str = "a_broadcast_frames_xmitted_ok",
1627 		.getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
1628 	},
1629 	{
1630 		.str = "a_multicast_frames_received_ok",
1631 		.getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
1632 	},
1633 	{
1634 		.str = "a_broadcast_frames_received_ok",
1635 		.getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
1636 	},
1637 	{
1638 		.str = "a_in_range_length_errors",
1639 		.getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
1640 	},
1641 	{
1642 		.str = "a_out_of_range_length_field",
1643 		.getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
1644 	},
1645 	{
1646 		.str = "a_frame_too_long_errors",
1647 		.getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
1648 	},
1649 	{
1650 		.str = "a_symbol_error_during_carrier",
1651 		.getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
1652 	},
1653 	{
1654 		.str = "a_mac_control_frames_transmitted",
1655 		.getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
1656 	},
1657 	{
1658 		.str = "a_mac_control_frames_received",
1659 		.getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
1660 	},
1661 	{
1662 		.str = "a_unsupported_opcodes_received",
1663 		.getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
1664 	},
1665 	{
1666 		.str = "a_pause_mac_ctrl_frames_received",
1667 		.getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
1668 	},
1669 	{
1670 		.str = "a_pause_mac_ctrl_frames_xmitted",
1671 		.getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
1672 	},
1673 };
1674 
1675 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1676 
1677 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
1678 	{
1679 		.str = "rx_octets_prio",
1680 		.getter = mlxsw_reg_ppcnt_rx_octets_get,
1681 	},
1682 	{
1683 		.str = "rx_frames_prio",
1684 		.getter = mlxsw_reg_ppcnt_rx_frames_get,
1685 	},
1686 	{
1687 		.str = "tx_octets_prio",
1688 		.getter = mlxsw_reg_ppcnt_tx_octets_get,
1689 	},
1690 	{
1691 		.str = "tx_frames_prio",
1692 		.getter = mlxsw_reg_ppcnt_tx_frames_get,
1693 	},
1694 	{
1695 		.str = "rx_pause_prio",
1696 		.getter = mlxsw_reg_ppcnt_rx_pause_get,
1697 	},
1698 	{
1699 		.str = "rx_pause_duration_prio",
1700 		.getter = mlxsw_reg_ppcnt_rx_pause_duration_get,
1701 	},
1702 	{
1703 		.str = "tx_pause_prio",
1704 		.getter = mlxsw_reg_ppcnt_tx_pause_get,
1705 	},
1706 	{
1707 		.str = "tx_pause_duration_prio",
1708 		.getter = mlxsw_reg_ppcnt_tx_pause_duration_get,
1709 	},
1710 };
1711 
1712 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
1713 
1714 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
1715 	{
1716 		.str = "tc_transmit_queue_tc",
1717 		.getter = mlxsw_reg_ppcnt_tc_transmit_queue_get,
1718 		.cells_bytes = true,
1719 	},
1720 	{
1721 		.str = "tc_no_buffer_discard_uc_tc",
1722 		.getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get,
1723 	},
1724 };
1725 
1726 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
1727 
1728 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
1729 					 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \
1730 					  MLXSW_SP_PORT_HW_TC_STATS_LEN) * \
1731 					 IEEE_8021QAZ_MAX_TCS)
1732 
1733 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
1734 {
1735 	int i;
1736 
1737 	for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
1738 		snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
1739 			 mlxsw_sp_port_hw_prio_stats[i].str, prio);
1740 		*p += ETH_GSTRING_LEN;
1741 	}
1742 }
1743 
1744 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
1745 {
1746 	int i;
1747 
1748 	for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
1749 		snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
1750 			 mlxsw_sp_port_hw_tc_stats[i].str, tc);
1751 		*p += ETH_GSTRING_LEN;
1752 	}
1753 }
1754 
1755 static void mlxsw_sp_port_get_strings(struct net_device *dev,
1756 				      u32 stringset, u8 *data)
1757 {
1758 	u8 *p = data;
1759 	int i;
1760 
1761 	switch (stringset) {
1762 	case ETH_SS_STATS:
1763 		for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
1764 			memcpy(p, mlxsw_sp_port_hw_stats[i].str,
1765 			       ETH_GSTRING_LEN);
1766 			p += ETH_GSTRING_LEN;
1767 		}
1768 
1769 		for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
1770 			mlxsw_sp_port_get_prio_strings(&p, i);
1771 
1772 		for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
1773 			mlxsw_sp_port_get_tc_strings(&p, i);
1774 
1775 		break;
1776 	}
1777 }
1778 
1779 static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
1780 				     enum ethtool_phys_id_state state)
1781 {
1782 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1783 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1784 	char mlcr_pl[MLXSW_REG_MLCR_LEN];
1785 	bool active;
1786 
1787 	switch (state) {
1788 	case ETHTOOL_ID_ACTIVE:
1789 		active = true;
1790 		break;
1791 	case ETHTOOL_ID_INACTIVE:
1792 		active = false;
1793 		break;
1794 	default:
1795 		return -EOPNOTSUPP;
1796 	}
1797 
1798 	mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
1799 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
1800 }
1801 
1802 static int
1803 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
1804 			       int *p_len, enum mlxsw_reg_ppcnt_grp grp)
1805 {
1806 	switch (grp) {
1807 	case  MLXSW_REG_PPCNT_IEEE_8023_CNT:
1808 		*p_hw_stats = mlxsw_sp_port_hw_stats;
1809 		*p_len = MLXSW_SP_PORT_HW_STATS_LEN;
1810 		break;
1811 	case MLXSW_REG_PPCNT_PRIO_CNT:
1812 		*p_hw_stats = mlxsw_sp_port_hw_prio_stats;
1813 		*p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
1814 		break;
1815 	case MLXSW_REG_PPCNT_TC_CNT:
1816 		*p_hw_stats = mlxsw_sp_port_hw_tc_stats;
1817 		*p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN;
1818 		break;
1819 	default:
1820 		WARN_ON(1);
1821 		return -EOPNOTSUPP;
1822 	}
1823 	return 0;
1824 }
1825 
1826 static void __mlxsw_sp_port_get_stats(struct net_device *dev,
1827 				      enum mlxsw_reg_ppcnt_grp grp, int prio,
1828 				      u64 *data, int data_index)
1829 {
1830 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1831 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1832 	struct mlxsw_sp_port_hw_stats *hw_stats;
1833 	char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1834 	int i, len;
1835 	int err;
1836 
1837 	err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
1838 	if (err)
1839 		return;
1840 	mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
1841 	for (i = 0; i < len; i++) {
1842 		data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
1843 		if (!hw_stats[i].cells_bytes)
1844 			continue;
1845 		data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp,
1846 							    data[data_index + i]);
1847 	}
1848 }
1849 
1850 static void mlxsw_sp_port_get_stats(struct net_device *dev,
1851 				    struct ethtool_stats *stats, u64 *data)
1852 {
1853 	int i, data_index = 0;
1854 
1855 	/* IEEE 802.3 Counters */
1856 	__mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0,
1857 				  data, data_index);
1858 	data_index = MLXSW_SP_PORT_HW_STATS_LEN;
1859 
1860 	/* Per-Priority Counters */
1861 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1862 		__mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
1863 					  data, data_index);
1864 		data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
1865 	}
1866 
1867 	/* Per-TC Counters */
1868 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1869 		__mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i,
1870 					  data, data_index);
1871 		data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN;
1872 	}
1873 }
1874 
1875 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
1876 {
1877 	switch (sset) {
1878 	case ETH_SS_STATS:
1879 		return MLXSW_SP_PORT_ETHTOOL_STATS_LEN;
1880 	default:
1881 		return -EOPNOTSUPP;
1882 	}
1883 }
1884 
1885 struct mlxsw_sp_port_link_mode {
1886 	enum ethtool_link_mode_bit_indices mask_ethtool;
1887 	u32 mask;
1888 	u32 speed;
1889 };
1890 
1891 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
1892 	{
1893 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
1894 		.mask_ethtool	= ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1895 		.speed		= SPEED_100,
1896 	},
1897 	{
1898 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_SGMII |
1899 				  MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
1900 		.mask_ethtool	= ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1901 		.speed		= SPEED_1000,
1902 	},
1903 	{
1904 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
1905 		.mask_ethtool	= ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
1906 		.speed		= SPEED_10000,
1907 	},
1908 	{
1909 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
1910 				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
1911 		.mask_ethtool	= ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
1912 		.speed		= SPEED_10000,
1913 	},
1914 	{
1915 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1916 				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1917 				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1918 				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
1919 		.mask_ethtool	= ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1920 		.speed		= SPEED_10000,
1921 	},
1922 	{
1923 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
1924 		.mask_ethtool	= ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
1925 		.speed		= SPEED_20000,
1926 	},
1927 	{
1928 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
1929 		.mask_ethtool	= ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1930 		.speed		= SPEED_40000,
1931 	},
1932 	{
1933 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
1934 		.mask_ethtool	= ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1935 		.speed		= SPEED_40000,
1936 	},
1937 	{
1938 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
1939 		.mask_ethtool	= ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1940 		.speed		= SPEED_40000,
1941 	},
1942 	{
1943 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
1944 		.mask_ethtool	= ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1945 		.speed		= SPEED_40000,
1946 	},
1947 	{
1948 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR,
1949 		.mask_ethtool	= ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1950 		.speed		= SPEED_25000,
1951 	},
1952 	{
1953 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR,
1954 		.mask_ethtool	= ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1955 		.speed		= SPEED_25000,
1956 	},
1957 	{
1958 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
1959 		.mask_ethtool	= ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1960 		.speed		= SPEED_25000,
1961 	},
1962 	{
1963 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
1964 		.mask_ethtool	= ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1965 		.speed		= SPEED_25000,
1966 	},
1967 	{
1968 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2,
1969 		.mask_ethtool	= ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1970 		.speed		= SPEED_50000,
1971 	},
1972 	{
1973 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
1974 		.mask_ethtool	= ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1975 		.speed		= SPEED_50000,
1976 	},
1977 	{
1978 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2,
1979 		.mask_ethtool	= ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1980 		.speed		= SPEED_50000,
1981 	},
1982 	{
1983 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1984 		.mask_ethtool	= ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
1985 		.speed		= SPEED_56000,
1986 	},
1987 	{
1988 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1989 		.mask_ethtool	= ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT,
1990 		.speed		= SPEED_56000,
1991 	},
1992 	{
1993 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1994 		.mask_ethtool	= ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT,
1995 		.speed		= SPEED_56000,
1996 	},
1997 	{
1998 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1999 		.mask_ethtool	= ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
2000 		.speed		= SPEED_56000,
2001 	},
2002 	{
2003 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4,
2004 		.mask_ethtool	= ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
2005 		.speed		= SPEED_100000,
2006 	},
2007 	{
2008 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4,
2009 		.mask_ethtool	= ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
2010 		.speed		= SPEED_100000,
2011 	},
2012 	{
2013 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4,
2014 		.mask_ethtool	= ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
2015 		.speed		= SPEED_100000,
2016 	},
2017 	{
2018 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
2019 		.mask_ethtool	= ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
2020 		.speed		= SPEED_100000,
2021 	},
2022 };
2023 
2024 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
2025 
2026 static void
2027 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto,
2028 				  struct ethtool_link_ksettings *cmd)
2029 {
2030 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2031 			      MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2032 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
2033 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
2034 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
2035 			      MLXSW_REG_PTYS_ETH_SPEED_SGMII))
2036 		ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
2037 
2038 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2039 			      MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
2040 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
2041 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
2042 			      MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
2043 		ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
2044 }
2045 
2046 static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode)
2047 {
2048 	int i;
2049 
2050 	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2051 		if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
2052 			__set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
2053 				  mode);
2054 	}
2055 }
2056 
2057 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
2058 					    struct ethtool_link_ksettings *cmd)
2059 {
2060 	u32 speed = SPEED_UNKNOWN;
2061 	u8 duplex = DUPLEX_UNKNOWN;
2062 	int i;
2063 
2064 	if (!carrier_ok)
2065 		goto out;
2066 
2067 	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2068 		if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
2069 			speed = mlxsw_sp_port_link_mode[i].speed;
2070 			duplex = DUPLEX_FULL;
2071 			break;
2072 		}
2073 	}
2074 out:
2075 	cmd->base.speed = speed;
2076 	cmd->base.duplex = duplex;
2077 }
2078 
2079 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
2080 {
2081 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2082 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
2083 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
2084 			      MLXSW_REG_PTYS_ETH_SPEED_SGMII))
2085 		return PORT_FIBRE;
2086 
2087 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2088 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
2089 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
2090 		return PORT_DA;
2091 
2092 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2093 			      MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
2094 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
2095 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
2096 		return PORT_NONE;
2097 
2098 	return PORT_OTHER;
2099 }
2100 
2101 static u32
2102 mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd)
2103 {
2104 	u32 ptys_proto = 0;
2105 	int i;
2106 
2107 	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2108 		if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
2109 			     cmd->link_modes.advertising))
2110 			ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2111 	}
2112 	return ptys_proto;
2113 }
2114 
2115 static u32 mlxsw_sp_to_ptys_speed(u32 speed)
2116 {
2117 	u32 ptys_proto = 0;
2118 	int i;
2119 
2120 	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2121 		if (speed == mlxsw_sp_port_link_mode[i].speed)
2122 			ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2123 	}
2124 	return ptys_proto;
2125 }
2126 
2127 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
2128 {
2129 	u32 ptys_proto = 0;
2130 	int i;
2131 
2132 	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2133 		if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
2134 			ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2135 	}
2136 	return ptys_proto;
2137 }
2138 
2139 static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap,
2140 					     struct ethtool_link_ksettings *cmd)
2141 {
2142 	ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
2143 	ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
2144 	ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
2145 
2146 	mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd);
2147 	mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported);
2148 }
2149 
2150 static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg,
2151 					     struct ethtool_link_ksettings *cmd)
2152 {
2153 	if (!autoneg)
2154 		return;
2155 
2156 	ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
2157 	mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising);
2158 }
2159 
2160 static void
2161 mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status,
2162 				    struct ethtool_link_ksettings *cmd)
2163 {
2164 	if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp)
2165 		return;
2166 
2167 	ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg);
2168 	mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising);
2169 }
2170 
2171 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
2172 					    struct ethtool_link_ksettings *cmd)
2173 {
2174 	u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp;
2175 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2176 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2177 	char ptys_pl[MLXSW_REG_PTYS_LEN];
2178 	u8 autoneg_status;
2179 	bool autoneg;
2180 	int err;
2181 
2182 	autoneg = mlxsw_sp_port->link.autoneg;
2183 	mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
2184 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2185 	if (err)
2186 		return err;
2187 	mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin,
2188 				  &eth_proto_oper);
2189 
2190 	mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd);
2191 
2192 	mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd);
2193 
2194 	eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl);
2195 	autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl);
2196 	mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd);
2197 
2198 	cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
2199 	cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper);
2200 	mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper,
2201 					cmd);
2202 
2203 	return 0;
2204 }
2205 
2206 static int
2207 mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
2208 				 const struct ethtool_link_ksettings *cmd)
2209 {
2210 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2211 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2212 	char ptys_pl[MLXSW_REG_PTYS_LEN];
2213 	u32 eth_proto_cap, eth_proto_new;
2214 	bool autoneg;
2215 	int err;
2216 
2217 	mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
2218 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2219 	if (err)
2220 		return err;
2221 	mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, NULL, NULL);
2222 
2223 	autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
2224 	eth_proto_new = autoneg ?
2225 		mlxsw_sp_to_ptys_advert_link(cmd) :
2226 		mlxsw_sp_to_ptys_speed(cmd->base.speed);
2227 
2228 	eth_proto_new = eth_proto_new & eth_proto_cap;
2229 	if (!eth_proto_new) {
2230 		netdev_err(dev, "No supported speed requested\n");
2231 		return -EINVAL;
2232 	}
2233 
2234 	mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2235 				eth_proto_new);
2236 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2237 	if (err)
2238 		return err;
2239 
2240 	if (!netif_running(dev))
2241 		return 0;
2242 
2243 	mlxsw_sp_port->link.autoneg = autoneg;
2244 
2245 	mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2246 	mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
2247 
2248 	return 0;
2249 }
2250 
2251 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
2252 	.get_drvinfo		= mlxsw_sp_port_get_drvinfo,
2253 	.get_link		= ethtool_op_get_link,
2254 	.get_pauseparam		= mlxsw_sp_port_get_pauseparam,
2255 	.set_pauseparam		= mlxsw_sp_port_set_pauseparam,
2256 	.get_strings		= mlxsw_sp_port_get_strings,
2257 	.set_phys_id		= mlxsw_sp_port_set_phys_id,
2258 	.get_ethtool_stats	= mlxsw_sp_port_get_stats,
2259 	.get_sset_count		= mlxsw_sp_port_get_sset_count,
2260 	.get_link_ksettings	= mlxsw_sp_port_get_link_ksettings,
2261 	.set_link_ksettings	= mlxsw_sp_port_set_link_ksettings,
2262 };
2263 
2264 static int
2265 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
2266 {
2267 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2268 	u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
2269 	char ptys_pl[MLXSW_REG_PTYS_LEN];
2270 	u32 eth_proto_admin;
2271 
2272 	eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
2273 	mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2274 				eth_proto_admin);
2275 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2276 }
2277 
2278 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
2279 			  enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
2280 			  bool dwrr, u8 dwrr_weight)
2281 {
2282 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2283 	char qeec_pl[MLXSW_REG_QEEC_LEN];
2284 
2285 	mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2286 			    next_index);
2287 	mlxsw_reg_qeec_de_set(qeec_pl, true);
2288 	mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
2289 	mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
2290 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2291 }
2292 
2293 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
2294 				  enum mlxsw_reg_qeec_hr hr, u8 index,
2295 				  u8 next_index, u32 maxrate)
2296 {
2297 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2298 	char qeec_pl[MLXSW_REG_QEEC_LEN];
2299 
2300 	mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2301 			    next_index);
2302 	mlxsw_reg_qeec_mase_set(qeec_pl, true);
2303 	mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
2304 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2305 }
2306 
2307 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
2308 			      u8 switch_prio, u8 tclass)
2309 {
2310 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2311 	char qtct_pl[MLXSW_REG_QTCT_LEN];
2312 
2313 	mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
2314 			    tclass);
2315 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
2316 }
2317 
2318 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
2319 {
2320 	int err, i;
2321 
2322 	/* Setup the elements hierarcy, so that each TC is linked to
2323 	 * one subgroup, which are all member in the same group.
2324 	 */
2325 	err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2326 				    MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
2327 				    0);
2328 	if (err)
2329 		return err;
2330 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2331 		err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2332 					    MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
2333 					    0, false, 0);
2334 		if (err)
2335 			return err;
2336 	}
2337 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2338 		err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2339 					    MLXSW_REG_QEEC_HIERARCY_TC, i, i,
2340 					    false, 0);
2341 		if (err)
2342 			return err;
2343 	}
2344 
2345 	/* Make sure the max shaper is disabled in all hierarcies that
2346 	 * support it.
2347 	 */
2348 	err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2349 					    MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
2350 					    MLXSW_REG_QEEC_MAS_DIS);
2351 	if (err)
2352 		return err;
2353 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2354 		err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2355 						    MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
2356 						    i, 0,
2357 						    MLXSW_REG_QEEC_MAS_DIS);
2358 		if (err)
2359 			return err;
2360 	}
2361 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2362 		err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2363 						    MLXSW_REG_QEEC_HIERARCY_TC,
2364 						    i, i,
2365 						    MLXSW_REG_QEEC_MAS_DIS);
2366 		if (err)
2367 			return err;
2368 	}
2369 
2370 	/* Map all priorities to traffic class 0. */
2371 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2372 		err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
2373 		if (err)
2374 			return err;
2375 	}
2376 
2377 	return 0;
2378 }
2379 
2380 static int mlxsw_sp_port_pvid_vport_create(struct mlxsw_sp_port *mlxsw_sp_port)
2381 {
2382 	mlxsw_sp_port->pvid = 1;
2383 
2384 	return mlxsw_sp_port_add_vid(mlxsw_sp_port->dev, 0, 1);
2385 }
2386 
2387 static int mlxsw_sp_port_pvid_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_port)
2388 {
2389 	return mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
2390 }
2391 
2392 static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2393 				  bool split, u8 module, u8 width, u8 lane)
2394 {
2395 	struct mlxsw_sp_port *mlxsw_sp_port;
2396 	struct net_device *dev;
2397 	size_t bytes;
2398 	int err;
2399 
2400 	dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
2401 	if (!dev)
2402 		return -ENOMEM;
2403 	SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
2404 	mlxsw_sp_port = netdev_priv(dev);
2405 	mlxsw_sp_port->dev = dev;
2406 	mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
2407 	mlxsw_sp_port->local_port = local_port;
2408 	mlxsw_sp_port->split = split;
2409 	mlxsw_sp_port->mapping.module = module;
2410 	mlxsw_sp_port->mapping.width = width;
2411 	mlxsw_sp_port->mapping.lane = lane;
2412 	mlxsw_sp_port->link.autoneg = 1;
2413 	bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
2414 	mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
2415 	if (!mlxsw_sp_port->active_vlans) {
2416 		err = -ENOMEM;
2417 		goto err_port_active_vlans_alloc;
2418 	}
2419 	mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL);
2420 	if (!mlxsw_sp_port->untagged_vlans) {
2421 		err = -ENOMEM;
2422 		goto err_port_untagged_vlans_alloc;
2423 	}
2424 	INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
2425 	INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list);
2426 
2427 	mlxsw_sp_port->pcpu_stats =
2428 		netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
2429 	if (!mlxsw_sp_port->pcpu_stats) {
2430 		err = -ENOMEM;
2431 		goto err_alloc_stats;
2432 	}
2433 
2434 	mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample),
2435 					GFP_KERNEL);
2436 	if (!mlxsw_sp_port->sample) {
2437 		err = -ENOMEM;
2438 		goto err_alloc_sample;
2439 	}
2440 
2441 	mlxsw_sp_port->hw_stats.cache =
2442 		kzalloc(sizeof(*mlxsw_sp_port->hw_stats.cache), GFP_KERNEL);
2443 
2444 	if (!mlxsw_sp_port->hw_stats.cache) {
2445 		err = -ENOMEM;
2446 		goto err_alloc_hw_stats;
2447 	}
2448 	INIT_DELAYED_WORK(&mlxsw_sp_port->hw_stats.update_dw,
2449 			  &update_stats_cache);
2450 
2451 	dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
2452 	dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
2453 
2454 	err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
2455 	if (err) {
2456 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
2457 			mlxsw_sp_port->local_port);
2458 		goto err_port_swid_set;
2459 	}
2460 
2461 	err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
2462 	if (err) {
2463 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
2464 			mlxsw_sp_port->local_port);
2465 		goto err_dev_addr_init;
2466 	}
2467 
2468 	netif_carrier_off(dev);
2469 
2470 	dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
2471 			 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
2472 	dev->hw_features |= NETIF_F_HW_TC;
2473 
2474 	dev->min_mtu = 0;
2475 	dev->max_mtu = ETH_MAX_MTU;
2476 
2477 	/* Each packet needs to have a Tx header (metadata) on top all other
2478 	 * headers.
2479 	 */
2480 	dev->needed_headroom = MLXSW_TXHDR_LEN;
2481 
2482 	err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
2483 	if (err) {
2484 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
2485 			mlxsw_sp_port->local_port);
2486 		goto err_port_system_port_mapping_set;
2487 	}
2488 
2489 	err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
2490 	if (err) {
2491 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
2492 			mlxsw_sp_port->local_port);
2493 		goto err_port_speed_by_width_set;
2494 	}
2495 
2496 	err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
2497 	if (err) {
2498 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
2499 			mlxsw_sp_port->local_port);
2500 		goto err_port_mtu_set;
2501 	}
2502 
2503 	err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2504 	if (err)
2505 		goto err_port_admin_status_set;
2506 
2507 	err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
2508 	if (err) {
2509 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
2510 			mlxsw_sp_port->local_port);
2511 		goto err_port_buffers_init;
2512 	}
2513 
2514 	err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
2515 	if (err) {
2516 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
2517 			mlxsw_sp_port->local_port);
2518 		goto err_port_ets_init;
2519 	}
2520 
2521 	/* ETS and buffers must be initialized before DCB. */
2522 	err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
2523 	if (err) {
2524 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
2525 			mlxsw_sp_port->local_port);
2526 		goto err_port_dcb_init;
2527 	}
2528 
2529 	err = mlxsw_sp_port_pvid_vport_create(mlxsw_sp_port);
2530 	if (err) {
2531 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create PVID vPort\n",
2532 			mlxsw_sp_port->local_port);
2533 		goto err_port_pvid_vport_create;
2534 	}
2535 
2536 	mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
2537 	mlxsw_sp->ports[local_port] = mlxsw_sp_port;
2538 	err = register_netdev(dev);
2539 	if (err) {
2540 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
2541 			mlxsw_sp_port->local_port);
2542 		goto err_register_netdev;
2543 	}
2544 
2545 	mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
2546 				mlxsw_sp_port, dev, mlxsw_sp_port->split,
2547 				module);
2548 	mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, 0);
2549 	return 0;
2550 
2551 err_register_netdev:
2552 	mlxsw_sp->ports[local_port] = NULL;
2553 	mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
2554 	mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
2555 err_port_pvid_vport_create:
2556 	mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
2557 err_port_dcb_init:
2558 err_port_ets_init:
2559 err_port_buffers_init:
2560 err_port_admin_status_set:
2561 err_port_mtu_set:
2562 err_port_speed_by_width_set:
2563 err_port_system_port_mapping_set:
2564 err_dev_addr_init:
2565 	mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
2566 err_port_swid_set:
2567 	kfree(mlxsw_sp_port->hw_stats.cache);
2568 err_alloc_hw_stats:
2569 	kfree(mlxsw_sp_port->sample);
2570 err_alloc_sample:
2571 	free_percpu(mlxsw_sp_port->pcpu_stats);
2572 err_alloc_stats:
2573 	kfree(mlxsw_sp_port->untagged_vlans);
2574 err_port_untagged_vlans_alloc:
2575 	kfree(mlxsw_sp_port->active_vlans);
2576 err_port_active_vlans_alloc:
2577 	free_netdev(dev);
2578 	return err;
2579 }
2580 
2581 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2582 				bool split, u8 module, u8 width, u8 lane)
2583 {
2584 	int err;
2585 
2586 	err = mlxsw_core_port_init(mlxsw_sp->core, local_port);
2587 	if (err) {
2588 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
2589 			local_port);
2590 		return err;
2591 	}
2592 	err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split,
2593 				     module, width, lane);
2594 	if (err)
2595 		goto err_port_create;
2596 	return 0;
2597 
2598 err_port_create:
2599 	mlxsw_core_port_fini(mlxsw_sp->core, local_port);
2600 	return err;
2601 }
2602 
2603 static void __mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
2604 {
2605 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2606 
2607 	cancel_delayed_work_sync(&mlxsw_sp_port->hw_stats.update_dw);
2608 	mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
2609 	unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
2610 	mlxsw_sp->ports[local_port] = NULL;
2611 	mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
2612 	mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
2613 	mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
2614 	mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
2615 	mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
2616 	kfree(mlxsw_sp_port->hw_stats.cache);
2617 	kfree(mlxsw_sp_port->sample);
2618 	free_percpu(mlxsw_sp_port->pcpu_stats);
2619 	kfree(mlxsw_sp_port->untagged_vlans);
2620 	kfree(mlxsw_sp_port->active_vlans);
2621 	WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list));
2622 	free_netdev(mlxsw_sp_port->dev);
2623 }
2624 
2625 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
2626 {
2627 	__mlxsw_sp_port_remove(mlxsw_sp, local_port);
2628 	mlxsw_core_port_fini(mlxsw_sp->core, local_port);
2629 }
2630 
2631 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
2632 {
2633 	return mlxsw_sp->ports[local_port] != NULL;
2634 }
2635 
2636 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
2637 {
2638 	int i;
2639 
2640 	for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
2641 		if (mlxsw_sp_port_created(mlxsw_sp, i))
2642 			mlxsw_sp_port_remove(mlxsw_sp, i);
2643 	kfree(mlxsw_sp->port_to_module);
2644 	kfree(mlxsw_sp->ports);
2645 }
2646 
2647 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
2648 {
2649 	unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
2650 	u8 module, width, lane;
2651 	size_t alloc_size;
2652 	int i;
2653 	int err;
2654 
2655 	alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
2656 	mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
2657 	if (!mlxsw_sp->ports)
2658 		return -ENOMEM;
2659 
2660 	mlxsw_sp->port_to_module = kcalloc(max_ports, sizeof(u8), GFP_KERNEL);
2661 	if (!mlxsw_sp->port_to_module) {
2662 		err = -ENOMEM;
2663 		goto err_port_to_module_alloc;
2664 	}
2665 
2666 	for (i = 1; i < max_ports; i++) {
2667 		err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
2668 						    &width, &lane);
2669 		if (err)
2670 			goto err_port_module_info_get;
2671 		if (!width)
2672 			continue;
2673 		mlxsw_sp->port_to_module[i] = module;
2674 		err = mlxsw_sp_port_create(mlxsw_sp, i, false,
2675 					   module, width, lane);
2676 		if (err)
2677 			goto err_port_create;
2678 	}
2679 	return 0;
2680 
2681 err_port_create:
2682 err_port_module_info_get:
2683 	for (i--; i >= 1; i--)
2684 		if (mlxsw_sp_port_created(mlxsw_sp, i))
2685 			mlxsw_sp_port_remove(mlxsw_sp, i);
2686 	kfree(mlxsw_sp->port_to_module);
2687 err_port_to_module_alloc:
2688 	kfree(mlxsw_sp->ports);
2689 	return err;
2690 }
2691 
2692 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
2693 {
2694 	u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
2695 
2696 	return local_port - offset;
2697 }
2698 
2699 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
2700 				      u8 module, unsigned int count)
2701 {
2702 	u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
2703 	int err, i;
2704 
2705 	for (i = 0; i < count; i++) {
2706 		err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
2707 					       width, i * width);
2708 		if (err)
2709 			goto err_port_module_map;
2710 	}
2711 
2712 	for (i = 0; i < count; i++) {
2713 		err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
2714 		if (err)
2715 			goto err_port_swid_set;
2716 	}
2717 
2718 	for (i = 0; i < count; i++) {
2719 		err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
2720 					   module, width, i * width);
2721 		if (err)
2722 			goto err_port_create;
2723 	}
2724 
2725 	return 0;
2726 
2727 err_port_create:
2728 	for (i--; i >= 0; i--)
2729 		if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
2730 			mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2731 	i = count;
2732 err_port_swid_set:
2733 	for (i--; i >= 0; i--)
2734 		__mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
2735 					 MLXSW_PORT_SWID_DISABLED_PORT);
2736 	i = count;
2737 err_port_module_map:
2738 	for (i--; i >= 0; i--)
2739 		mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
2740 	return err;
2741 }
2742 
2743 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
2744 					 u8 base_port, unsigned int count)
2745 {
2746 	u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
2747 	int i;
2748 
2749 	/* Split by four means we need to re-create two ports, otherwise
2750 	 * only one.
2751 	 */
2752 	count = count / 2;
2753 
2754 	for (i = 0; i < count; i++) {
2755 		local_port = base_port + i * 2;
2756 		module = mlxsw_sp->port_to_module[local_port];
2757 
2758 		mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
2759 					 0);
2760 	}
2761 
2762 	for (i = 0; i < count; i++)
2763 		__mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
2764 
2765 	for (i = 0; i < count; i++) {
2766 		local_port = base_port + i * 2;
2767 		module = mlxsw_sp->port_to_module[local_port];
2768 
2769 		mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
2770 				     width, 0);
2771 	}
2772 }
2773 
2774 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
2775 			       unsigned int count)
2776 {
2777 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2778 	struct mlxsw_sp_port *mlxsw_sp_port;
2779 	u8 module, cur_width, base_port;
2780 	int i;
2781 	int err;
2782 
2783 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
2784 	if (!mlxsw_sp_port) {
2785 		dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2786 			local_port);
2787 		return -EINVAL;
2788 	}
2789 
2790 	module = mlxsw_sp_port->mapping.module;
2791 	cur_width = mlxsw_sp_port->mapping.width;
2792 
2793 	if (count != 2 && count != 4) {
2794 		netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
2795 		return -EINVAL;
2796 	}
2797 
2798 	if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
2799 		netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
2800 		return -EINVAL;
2801 	}
2802 
2803 	/* Make sure we have enough slave (even) ports for the split. */
2804 	if (count == 2) {
2805 		base_port = local_port;
2806 		if (mlxsw_sp->ports[base_port + 1]) {
2807 			netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2808 			return -EINVAL;
2809 		}
2810 	} else {
2811 		base_port = mlxsw_sp_cluster_base_port_get(local_port);
2812 		if (mlxsw_sp->ports[base_port + 1] ||
2813 		    mlxsw_sp->ports[base_port + 3]) {
2814 			netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2815 			return -EINVAL;
2816 		}
2817 	}
2818 
2819 	for (i = 0; i < count; i++)
2820 		if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
2821 			mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2822 
2823 	err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
2824 	if (err) {
2825 		dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
2826 		goto err_port_split_create;
2827 	}
2828 
2829 	return 0;
2830 
2831 err_port_split_create:
2832 	mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
2833 	return err;
2834 }
2835 
2836 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
2837 {
2838 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2839 	struct mlxsw_sp_port *mlxsw_sp_port;
2840 	u8 cur_width, base_port;
2841 	unsigned int count;
2842 	int i;
2843 
2844 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
2845 	if (!mlxsw_sp_port) {
2846 		dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2847 			local_port);
2848 		return -EINVAL;
2849 	}
2850 
2851 	if (!mlxsw_sp_port->split) {
2852 		netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
2853 		return -EINVAL;
2854 	}
2855 
2856 	cur_width = mlxsw_sp_port->mapping.width;
2857 	count = cur_width == 1 ? 4 : 2;
2858 
2859 	base_port = mlxsw_sp_cluster_base_port_get(local_port);
2860 
2861 	/* Determine which ports to remove. */
2862 	if (count == 2 && local_port >= base_port + 2)
2863 		base_port = base_port + 2;
2864 
2865 	for (i = 0; i < count; i++)
2866 		if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
2867 			mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2868 
2869 	mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
2870 
2871 	return 0;
2872 }
2873 
2874 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2875 				     char *pude_pl, void *priv)
2876 {
2877 	struct mlxsw_sp *mlxsw_sp = priv;
2878 	struct mlxsw_sp_port *mlxsw_sp_port;
2879 	enum mlxsw_reg_pude_oper_status status;
2880 	u8 local_port;
2881 
2882 	local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2883 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
2884 	if (!mlxsw_sp_port)
2885 		return;
2886 
2887 	status = mlxsw_reg_pude_oper_status_get(pude_pl);
2888 	if (status == MLXSW_PORT_OPER_STATUS_UP) {
2889 		netdev_info(mlxsw_sp_port->dev, "link up\n");
2890 		netif_carrier_on(mlxsw_sp_port->dev);
2891 	} else {
2892 		netdev_info(mlxsw_sp_port->dev, "link down\n");
2893 		netif_carrier_off(mlxsw_sp_port->dev);
2894 	}
2895 }
2896 
2897 static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
2898 					      u8 local_port, void *priv)
2899 {
2900 	struct mlxsw_sp *mlxsw_sp = priv;
2901 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2902 	struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2903 
2904 	if (unlikely(!mlxsw_sp_port)) {
2905 		dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2906 				     local_port);
2907 		return;
2908 	}
2909 
2910 	skb->dev = mlxsw_sp_port->dev;
2911 
2912 	pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2913 	u64_stats_update_begin(&pcpu_stats->syncp);
2914 	pcpu_stats->rx_packets++;
2915 	pcpu_stats->rx_bytes += skb->len;
2916 	u64_stats_update_end(&pcpu_stats->syncp);
2917 
2918 	skb->protocol = eth_type_trans(skb, skb->dev);
2919 	netif_receive_skb(skb);
2920 }
2921 
2922 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
2923 					   void *priv)
2924 {
2925 	skb->offload_fwd_mark = 1;
2926 	return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2927 }
2928 
2929 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port,
2930 					     void *priv)
2931 {
2932 	struct mlxsw_sp *mlxsw_sp = priv;
2933 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2934 	struct psample_group *psample_group;
2935 	u32 size;
2936 
2937 	if (unlikely(!mlxsw_sp_port)) {
2938 		dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n",
2939 				     local_port);
2940 		goto out;
2941 	}
2942 	if (unlikely(!mlxsw_sp_port->sample)) {
2943 		dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n",
2944 				     local_port);
2945 		goto out;
2946 	}
2947 
2948 	size = mlxsw_sp_port->sample->truncate ?
2949 		  mlxsw_sp_port->sample->trunc_size : skb->len;
2950 
2951 	rcu_read_lock();
2952 	psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group);
2953 	if (!psample_group)
2954 		goto out_unlock;
2955 	psample_sample_packet(psample_group, skb, size,
2956 			      mlxsw_sp_port->dev->ifindex, 0,
2957 			      mlxsw_sp_port->sample->rate);
2958 out_unlock:
2959 	rcu_read_unlock();
2960 out:
2961 	consume_skb(skb);
2962 }
2963 
2964 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl)	\
2965 	MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action,	\
2966 		  _is_ctrl, SP_##_trap_group, DISCARD)
2967 
2968 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl)	\
2969 	MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action,	\
2970 		_is_ctrl, SP_##_trap_group, DISCARD)
2971 
2972 #define MLXSW_SP_EVENTL(_func, _trap_id)		\
2973 	MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
2974 
2975 static const struct mlxsw_listener mlxsw_sp_listener[] = {
2976 	/* Events */
2977 	MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
2978 	/* L2 traps */
2979 	MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true),
2980 	MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true),
2981 	MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true),
2982 	MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false),
2983 	MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false),
2984 	MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false),
2985 	MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false),
2986 	MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false),
2987 	MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false),
2988 	MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false),
2989 	MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false),
2990 	/* L3 traps */
2991 	MLXSW_SP_RXL_NO_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false),
2992 	MLXSW_SP_RXL_NO_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false),
2993 	MLXSW_SP_RXL_NO_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false),
2994 	MLXSW_SP_RXL_MARK(OSPF, TRAP_TO_CPU, OSPF, false),
2995 	MLXSW_SP_RXL_NO_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false),
2996 	MLXSW_SP_RXL_NO_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false),
2997 	MLXSW_SP_RXL_NO_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, ARP_MISS, false),
2998 	MLXSW_SP_RXL_NO_MARK(BGP_IPV4, TRAP_TO_CPU, BGP_IPV4, false),
2999 	/* PKT Sample trap */
3000 	MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
3001 		  false, SP_IP2ME, DISCARD)
3002 };
3003 
3004 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
3005 {
3006 	char qpcr_pl[MLXSW_REG_QPCR_LEN];
3007 	enum mlxsw_reg_qpcr_ir_units ir_units;
3008 	int max_cpu_policers;
3009 	bool is_bytes;
3010 	u8 burst_size;
3011 	u32 rate;
3012 	int i, err;
3013 
3014 	if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
3015 		return -EIO;
3016 
3017 	max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
3018 
3019 	ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
3020 	for (i = 0; i < max_cpu_policers; i++) {
3021 		is_bytes = false;
3022 		switch (i) {
3023 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
3024 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
3025 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
3026 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
3027 			rate = 128;
3028 			burst_size = 7;
3029 			break;
3030 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
3031 			rate = 16 * 1024;
3032 			burst_size = 10;
3033 			break;
3034 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4:
3035 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
3036 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
3037 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS:
3038 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
3039 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
3040 			rate = 1024;
3041 			burst_size = 7;
3042 			break;
3043 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
3044 			is_bytes = true;
3045 			rate = 4 * 1024;
3046 			burst_size = 4;
3047 			break;
3048 		default:
3049 			continue;
3050 		}
3051 
3052 		mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
3053 				    burst_size);
3054 		err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
3055 		if (err)
3056 			return err;
3057 	}
3058 
3059 	return 0;
3060 }
3061 
3062 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
3063 {
3064 	char htgt_pl[MLXSW_REG_HTGT_LEN];
3065 	enum mlxsw_reg_htgt_trap_group i;
3066 	int max_cpu_policers;
3067 	int max_trap_groups;
3068 	u8 priority, tc;
3069 	u16 policer_id;
3070 	int err;
3071 
3072 	if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
3073 		return -EIO;
3074 
3075 	max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
3076 	max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
3077 
3078 	for (i = 0; i < max_trap_groups; i++) {
3079 		policer_id = i;
3080 		switch (i) {
3081 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
3082 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
3083 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
3084 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
3085 			priority = 5;
3086 			tc = 5;
3087 			break;
3088 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4:
3089 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
3090 			priority = 4;
3091 			tc = 4;
3092 			break;
3093 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
3094 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
3095 			priority = 3;
3096 			tc = 3;
3097 			break;
3098 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
3099 			priority = 2;
3100 			tc = 2;
3101 			break;
3102 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS:
3103 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
3104 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
3105 			priority = 1;
3106 			tc = 1;
3107 			break;
3108 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
3109 			priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
3110 			tc = MLXSW_REG_HTGT_DEFAULT_TC;
3111 			policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
3112 			break;
3113 		default:
3114 			continue;
3115 		}
3116 
3117 		if (max_cpu_policers <= policer_id &&
3118 		    policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
3119 			return -EIO;
3120 
3121 		mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
3122 		err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3123 		if (err)
3124 			return err;
3125 	}
3126 
3127 	return 0;
3128 }
3129 
3130 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
3131 {
3132 	int i;
3133 	int err;
3134 
3135 	err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
3136 	if (err)
3137 		return err;
3138 
3139 	err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
3140 	if (err)
3141 		return err;
3142 
3143 	for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
3144 		err = mlxsw_core_trap_register(mlxsw_sp->core,
3145 					       &mlxsw_sp_listener[i],
3146 					       mlxsw_sp);
3147 		if (err)
3148 			goto err_listener_register;
3149 
3150 	}
3151 	return 0;
3152 
3153 err_listener_register:
3154 	for (i--; i >= 0; i--) {
3155 		mlxsw_core_trap_unregister(mlxsw_sp->core,
3156 					   &mlxsw_sp_listener[i],
3157 					   mlxsw_sp);
3158 	}
3159 	return err;
3160 }
3161 
3162 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
3163 {
3164 	int i;
3165 
3166 	for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
3167 		mlxsw_core_trap_unregister(mlxsw_sp->core,
3168 					   &mlxsw_sp_listener[i],
3169 					   mlxsw_sp);
3170 	}
3171 }
3172 
3173 static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
3174 				 enum mlxsw_reg_sfgc_type type,
3175 				 enum mlxsw_reg_sfgc_bridge_type bridge_type)
3176 {
3177 	enum mlxsw_flood_table_type table_type;
3178 	enum mlxsw_sp_flood_table flood_table;
3179 	char sfgc_pl[MLXSW_REG_SFGC_LEN];
3180 
3181 	if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID)
3182 		table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
3183 	else
3184 		table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
3185 
3186 	switch (type) {
3187 	case MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST:
3188 		flood_table = MLXSW_SP_FLOOD_TABLE_UC;
3189 		break;
3190 	case MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4:
3191 		flood_table = MLXSW_SP_FLOOD_TABLE_MC;
3192 		break;
3193 	default:
3194 		flood_table = MLXSW_SP_FLOOD_TABLE_BC;
3195 	}
3196 
3197 	mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
3198 			    flood_table);
3199 	return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
3200 }
3201 
3202 static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
3203 {
3204 	int type, err;
3205 
3206 	for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
3207 		if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
3208 			continue;
3209 
3210 		err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
3211 					    MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
3212 		if (err)
3213 			return err;
3214 
3215 		err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
3216 					    MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
3217 		if (err)
3218 			return err;
3219 	}
3220 
3221 	return 0;
3222 }
3223 
3224 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
3225 {
3226 	char slcr_pl[MLXSW_REG_SLCR_LEN];
3227 	int err;
3228 
3229 	mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
3230 				     MLXSW_REG_SLCR_LAG_HASH_DMAC |
3231 				     MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
3232 				     MLXSW_REG_SLCR_LAG_HASH_VLANID |
3233 				     MLXSW_REG_SLCR_LAG_HASH_SIP |
3234 				     MLXSW_REG_SLCR_LAG_HASH_DIP |
3235 				     MLXSW_REG_SLCR_LAG_HASH_SPORT |
3236 				     MLXSW_REG_SLCR_LAG_HASH_DPORT |
3237 				     MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
3238 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
3239 	if (err)
3240 		return err;
3241 
3242 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) ||
3243 	    !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
3244 		return -EIO;
3245 
3246 	mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG),
3247 				 sizeof(struct mlxsw_sp_upper),
3248 				 GFP_KERNEL);
3249 	if (!mlxsw_sp->lags)
3250 		return -ENOMEM;
3251 
3252 	return 0;
3253 }
3254 
3255 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
3256 {
3257 	kfree(mlxsw_sp->lags);
3258 }
3259 
3260 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
3261 {
3262 	char htgt_pl[MLXSW_REG_HTGT_LEN];
3263 
3264 	mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
3265 			    MLXSW_REG_HTGT_INVALID_POLICER,
3266 			    MLXSW_REG_HTGT_DEFAULT_PRIORITY,
3267 			    MLXSW_REG_HTGT_DEFAULT_TC);
3268 	return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3269 }
3270 
3271 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
3272 			 const struct mlxsw_bus_info *mlxsw_bus_info)
3273 {
3274 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3275 	int err;
3276 
3277 	mlxsw_sp->core = mlxsw_core;
3278 	mlxsw_sp->bus_info = mlxsw_bus_info;
3279 	INIT_LIST_HEAD(&mlxsw_sp->fids);
3280 	INIT_LIST_HEAD(&mlxsw_sp->vfids.list);
3281 	INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
3282 
3283 	err = mlxsw_sp_base_mac_get(mlxsw_sp);
3284 	if (err) {
3285 		dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
3286 		return err;
3287 	}
3288 
3289 	err = mlxsw_sp_traps_init(mlxsw_sp);
3290 	if (err) {
3291 		dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
3292 		return err;
3293 	}
3294 
3295 	err = mlxsw_sp_flood_init(mlxsw_sp);
3296 	if (err) {
3297 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
3298 		goto err_flood_init;
3299 	}
3300 
3301 	err = mlxsw_sp_buffers_init(mlxsw_sp);
3302 	if (err) {
3303 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
3304 		goto err_buffers_init;
3305 	}
3306 
3307 	err = mlxsw_sp_lag_init(mlxsw_sp);
3308 	if (err) {
3309 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
3310 		goto err_lag_init;
3311 	}
3312 
3313 	err = mlxsw_sp_switchdev_init(mlxsw_sp);
3314 	if (err) {
3315 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
3316 		goto err_switchdev_init;
3317 	}
3318 
3319 	err = mlxsw_sp_router_init(mlxsw_sp);
3320 	if (err) {
3321 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
3322 		goto err_router_init;
3323 	}
3324 
3325 	err = mlxsw_sp_span_init(mlxsw_sp);
3326 	if (err) {
3327 		dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
3328 		goto err_span_init;
3329 	}
3330 
3331 	err = mlxsw_sp_acl_init(mlxsw_sp);
3332 	if (err) {
3333 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
3334 		goto err_acl_init;
3335 	}
3336 
3337 	err = mlxsw_sp_counter_pool_init(mlxsw_sp);
3338 	if (err) {
3339 		dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
3340 		goto err_counter_pool_init;
3341 	}
3342 
3343 	err = mlxsw_sp_dpipe_init(mlxsw_sp);
3344 	if (err) {
3345 		dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
3346 		goto err_dpipe_init;
3347 	}
3348 
3349 	err = mlxsw_sp_ports_create(mlxsw_sp);
3350 	if (err) {
3351 		dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
3352 		goto err_ports_create;
3353 	}
3354 
3355 	return 0;
3356 
3357 err_ports_create:
3358 	mlxsw_sp_dpipe_fini(mlxsw_sp);
3359 err_dpipe_init:
3360 	mlxsw_sp_counter_pool_fini(mlxsw_sp);
3361 err_counter_pool_init:
3362 	mlxsw_sp_acl_fini(mlxsw_sp);
3363 err_acl_init:
3364 	mlxsw_sp_span_fini(mlxsw_sp);
3365 err_span_init:
3366 	mlxsw_sp_router_fini(mlxsw_sp);
3367 err_router_init:
3368 	mlxsw_sp_switchdev_fini(mlxsw_sp);
3369 err_switchdev_init:
3370 	mlxsw_sp_lag_fini(mlxsw_sp);
3371 err_lag_init:
3372 	mlxsw_sp_buffers_fini(mlxsw_sp);
3373 err_buffers_init:
3374 err_flood_init:
3375 	mlxsw_sp_traps_fini(mlxsw_sp);
3376 	return err;
3377 }
3378 
3379 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
3380 {
3381 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3382 
3383 	mlxsw_sp_ports_remove(mlxsw_sp);
3384 	mlxsw_sp_dpipe_fini(mlxsw_sp);
3385 	mlxsw_sp_counter_pool_fini(mlxsw_sp);
3386 	mlxsw_sp_acl_fini(mlxsw_sp);
3387 	mlxsw_sp_span_fini(mlxsw_sp);
3388 	mlxsw_sp_router_fini(mlxsw_sp);
3389 	mlxsw_sp_switchdev_fini(mlxsw_sp);
3390 	mlxsw_sp_lag_fini(mlxsw_sp);
3391 	mlxsw_sp_buffers_fini(mlxsw_sp);
3392 	mlxsw_sp_traps_fini(mlxsw_sp);
3393 	WARN_ON(!list_empty(&mlxsw_sp->vfids.list));
3394 	WARN_ON(!list_empty(&mlxsw_sp->fids));
3395 }
3396 
3397 static struct mlxsw_config_profile mlxsw_sp_config_profile = {
3398 	.used_max_vepa_channels		= 1,
3399 	.max_vepa_channels		= 0,
3400 	.used_max_mid			= 1,
3401 	.max_mid			= MLXSW_SP_MID_MAX,
3402 	.used_max_pgt			= 1,
3403 	.max_pgt			= 0,
3404 	.used_flood_tables		= 1,
3405 	.used_flood_mode		= 1,
3406 	.flood_mode			= 3,
3407 	.max_fid_offset_flood_tables	= 3,
3408 	.fid_offset_flood_table_size	= VLAN_N_VID - 1,
3409 	.max_fid_flood_tables		= 3,
3410 	.fid_flood_table_size		= MLXSW_SP_VFID_MAX,
3411 	.used_max_ib_mc			= 1,
3412 	.max_ib_mc			= 0,
3413 	.used_max_pkey			= 1,
3414 	.max_pkey			= 0,
3415 	.used_kvd_split_data		= 1,
3416 	.kvd_hash_granularity		= MLXSW_SP_KVD_GRANULARITY,
3417 	.kvd_hash_single_parts		= 2,
3418 	.kvd_hash_double_parts		= 1,
3419 	.kvd_linear_size		= MLXSW_SP_KVD_LINEAR_SIZE,
3420 	.swid_config			= {
3421 		{
3422 			.used_type	= 1,
3423 			.type		= MLXSW_PORT_SWID_TYPE_ETH,
3424 		}
3425 	},
3426 	.resource_query_enable		= 1,
3427 };
3428 
3429 static struct mlxsw_driver mlxsw_sp_driver = {
3430 	.kind				= mlxsw_sp_driver_name,
3431 	.priv_size			= sizeof(struct mlxsw_sp),
3432 	.init				= mlxsw_sp_init,
3433 	.fini				= mlxsw_sp_fini,
3434 	.basic_trap_groups_set		= mlxsw_sp_basic_trap_groups_set,
3435 	.port_split			= mlxsw_sp_port_split,
3436 	.port_unsplit			= mlxsw_sp_port_unsplit,
3437 	.sb_pool_get			= mlxsw_sp_sb_pool_get,
3438 	.sb_pool_set			= mlxsw_sp_sb_pool_set,
3439 	.sb_port_pool_get		= mlxsw_sp_sb_port_pool_get,
3440 	.sb_port_pool_set		= mlxsw_sp_sb_port_pool_set,
3441 	.sb_tc_pool_bind_get		= mlxsw_sp_sb_tc_pool_bind_get,
3442 	.sb_tc_pool_bind_set		= mlxsw_sp_sb_tc_pool_bind_set,
3443 	.sb_occ_snapshot		= mlxsw_sp_sb_occ_snapshot,
3444 	.sb_occ_max_clear		= mlxsw_sp_sb_occ_max_clear,
3445 	.sb_occ_port_pool_get		= mlxsw_sp_sb_occ_port_pool_get,
3446 	.sb_occ_tc_port_bind_get	= mlxsw_sp_sb_occ_tc_port_bind_get,
3447 	.txhdr_construct		= mlxsw_sp_txhdr_construct,
3448 	.txhdr_len			= MLXSW_TXHDR_LEN,
3449 	.profile			= &mlxsw_sp_config_profile,
3450 };
3451 
3452 bool mlxsw_sp_port_dev_check(const struct net_device *dev)
3453 {
3454 	return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
3455 }
3456 
3457 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data)
3458 {
3459 	struct mlxsw_sp_port **p_mlxsw_sp_port = data;
3460 	int ret = 0;
3461 
3462 	if (mlxsw_sp_port_dev_check(lower_dev)) {
3463 		*p_mlxsw_sp_port = netdev_priv(lower_dev);
3464 		ret = 1;
3465 	}
3466 
3467 	return ret;
3468 }
3469 
3470 static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
3471 {
3472 	struct mlxsw_sp_port *mlxsw_sp_port;
3473 
3474 	if (mlxsw_sp_port_dev_check(dev))
3475 		return netdev_priv(dev);
3476 
3477 	mlxsw_sp_port = NULL;
3478 	netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port);
3479 
3480 	return mlxsw_sp_port;
3481 }
3482 
3483 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
3484 {
3485 	struct mlxsw_sp_port *mlxsw_sp_port;
3486 
3487 	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
3488 	return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
3489 }
3490 
3491 static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
3492 {
3493 	struct mlxsw_sp_port *mlxsw_sp_port;
3494 
3495 	if (mlxsw_sp_port_dev_check(dev))
3496 		return netdev_priv(dev);
3497 
3498 	mlxsw_sp_port = NULL;
3499 	netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
3500 				      &mlxsw_sp_port);
3501 
3502 	return mlxsw_sp_port;
3503 }
3504 
3505 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
3506 {
3507 	struct mlxsw_sp_port *mlxsw_sp_port;
3508 
3509 	rcu_read_lock();
3510 	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
3511 	if (mlxsw_sp_port)
3512 		dev_hold(mlxsw_sp_port->dev);
3513 	rcu_read_unlock();
3514 	return mlxsw_sp_port;
3515 }
3516 
3517 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
3518 {
3519 	dev_put(mlxsw_sp_port->dev);
3520 }
3521 
3522 static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port,
3523 					 u16 fid)
3524 {
3525 	if (mlxsw_sp_fid_is_vfid(fid))
3526 		return mlxsw_sp_port_vport_find_by_fid(lag_port, fid);
3527 	else
3528 		return test_bit(fid, lag_port->active_vlans);
3529 }
3530 
3531 static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port,
3532 					   u16 fid)
3533 {
3534 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3535 	u8 local_port = mlxsw_sp_port->local_port;
3536 	u16 lag_id = mlxsw_sp_port->lag_id;
3537 	u64 max_lag_members;
3538 	int i, count = 0;
3539 
3540 	if (!mlxsw_sp_port->lagged)
3541 		return true;
3542 
3543 	max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
3544 					     MAX_LAG_MEMBERS);
3545 	for (i = 0; i < max_lag_members; i++) {
3546 		struct mlxsw_sp_port *lag_port;
3547 
3548 		lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
3549 		if (!lag_port || lag_port->local_port == local_port)
3550 			continue;
3551 		if (mlxsw_sp_lag_port_fid_member(lag_port, fid))
3552 			count++;
3553 	}
3554 
3555 	return !count;
3556 }
3557 
3558 static int
3559 mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
3560 				    u16 fid)
3561 {
3562 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3563 	char sfdf_pl[MLXSW_REG_SFDF_LEN];
3564 
3565 	mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
3566 	mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
3567 	mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
3568 						mlxsw_sp_port->local_port);
3569 
3570 	netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using Port=%d, FID=%d\n",
3571 		   mlxsw_sp_port->local_port, fid);
3572 
3573 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
3574 }
3575 
3576 static int
3577 mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
3578 				      u16 fid)
3579 {
3580 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3581 	char sfdf_pl[MLXSW_REG_SFDF_LEN];
3582 
3583 	mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
3584 	mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
3585 	mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
3586 
3587 	netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using LAG ID=%d, FID=%d\n",
3588 		   mlxsw_sp_port->lag_id, fid);
3589 
3590 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
3591 }
3592 
3593 int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
3594 {
3595 	if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port, fid))
3596 		return 0;
3597 
3598 	if (mlxsw_sp_port->lagged)
3599 		return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port,
3600 							     fid);
3601 	else
3602 		return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid);
3603 }
3604 
3605 static void mlxsw_sp_master_bridge_gone_sync(struct mlxsw_sp *mlxsw_sp)
3606 {
3607 	struct mlxsw_sp_fid *f, *tmp;
3608 
3609 	list_for_each_entry_safe(f, tmp, &mlxsw_sp->fids, list)
3610 		if (--f->ref_count == 0)
3611 			mlxsw_sp_fid_destroy(mlxsw_sp, f);
3612 		else
3613 			WARN_ON_ONCE(1);
3614 }
3615 
3616 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
3617 					 struct net_device *br_dev)
3618 {
3619 	return !mlxsw_sp->master_bridge.dev ||
3620 	       mlxsw_sp->master_bridge.dev == br_dev;
3621 }
3622 
3623 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
3624 				       struct net_device *br_dev)
3625 {
3626 	mlxsw_sp->master_bridge.dev = br_dev;
3627 	mlxsw_sp->master_bridge.ref_count++;
3628 }
3629 
3630 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp)
3631 {
3632 	if (--mlxsw_sp->master_bridge.ref_count == 0) {
3633 		mlxsw_sp->master_bridge.dev = NULL;
3634 		/* It's possible upper VLAN devices are still holding
3635 		 * references to underlying FIDs. Drop the reference
3636 		 * and release the resources if it was the last one.
3637 		 * If it wasn't, then something bad happened.
3638 		 */
3639 		mlxsw_sp_master_bridge_gone_sync(mlxsw_sp);
3640 	}
3641 }
3642 
3643 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
3644 				     struct net_device *br_dev)
3645 {
3646 	struct net_device *dev = mlxsw_sp_port->dev;
3647 	int err;
3648 
3649 	/* When port is not bridged untagged packets are tagged with
3650 	 * PVID=VID=1, thereby creating an implicit VLAN interface in
3651 	 * the device. Remove it and let bridge code take care of its
3652 	 * own VLANs.
3653 	 */
3654 	err = mlxsw_sp_port_kill_vid(dev, 0, 1);
3655 	if (err)
3656 		return err;
3657 
3658 	mlxsw_sp_master_bridge_inc(mlxsw_sp_port->mlxsw_sp, br_dev);
3659 
3660 	mlxsw_sp_port->learning = 1;
3661 	mlxsw_sp_port->learning_sync = 1;
3662 	mlxsw_sp_port->uc_flood = 1;
3663 	mlxsw_sp_port->mc_flood = 1;
3664 	mlxsw_sp_port->mc_router = 0;
3665 	mlxsw_sp_port->mc_disabled = 1;
3666 	mlxsw_sp_port->bridged = 1;
3667 
3668 	return 0;
3669 }
3670 
3671 static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port)
3672 {
3673 	struct net_device *dev = mlxsw_sp_port->dev;
3674 
3675 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
3676 
3677 	mlxsw_sp_master_bridge_dec(mlxsw_sp_port->mlxsw_sp);
3678 
3679 	mlxsw_sp_port->learning = 0;
3680 	mlxsw_sp_port->learning_sync = 0;
3681 	mlxsw_sp_port->uc_flood = 0;
3682 	mlxsw_sp_port->mc_flood = 0;
3683 	mlxsw_sp_port->mc_router = 0;
3684 	mlxsw_sp_port->bridged = 0;
3685 
3686 	/* Add implicit VLAN interface in the device, so that untagged
3687 	 * packets will be classified to the default vFID.
3688 	 */
3689 	mlxsw_sp_port_add_vid(dev, 0, 1);
3690 }
3691 
3692 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3693 {
3694 	char sldr_pl[MLXSW_REG_SLDR_LEN];
3695 
3696 	mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
3697 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3698 }
3699 
3700 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3701 {
3702 	char sldr_pl[MLXSW_REG_SLDR_LEN];
3703 
3704 	mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
3705 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3706 }
3707 
3708 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3709 				     u16 lag_id, u8 port_index)
3710 {
3711 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3712 	char slcor_pl[MLXSW_REG_SLCOR_LEN];
3713 
3714 	mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
3715 				      lag_id, port_index);
3716 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3717 }
3718 
3719 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3720 					u16 lag_id)
3721 {
3722 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3723 	char slcor_pl[MLXSW_REG_SLCOR_LEN];
3724 
3725 	mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
3726 					 lag_id);
3727 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3728 }
3729 
3730 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
3731 					u16 lag_id)
3732 {
3733 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3734 	char slcor_pl[MLXSW_REG_SLCOR_LEN];
3735 
3736 	mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
3737 					lag_id);
3738 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3739 }
3740 
3741 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
3742 					 u16 lag_id)
3743 {
3744 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3745 	char slcor_pl[MLXSW_REG_SLCOR_LEN];
3746 
3747 	mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
3748 					 lag_id);
3749 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3750 }
3751 
3752 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3753 				  struct net_device *lag_dev,
3754 				  u16 *p_lag_id)
3755 {
3756 	struct mlxsw_sp_upper *lag;
3757 	int free_lag_id = -1;
3758 	u64 max_lag;
3759 	int i;
3760 
3761 	max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG);
3762 	for (i = 0; i < max_lag; i++) {
3763 		lag = mlxsw_sp_lag_get(mlxsw_sp, i);
3764 		if (lag->ref_count) {
3765 			if (lag->dev == lag_dev) {
3766 				*p_lag_id = i;
3767 				return 0;
3768 			}
3769 		} else if (free_lag_id < 0) {
3770 			free_lag_id = i;
3771 		}
3772 	}
3773 	if (free_lag_id < 0)
3774 		return -EBUSY;
3775 	*p_lag_id = free_lag_id;
3776 	return 0;
3777 }
3778 
3779 static bool
3780 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
3781 			  struct net_device *lag_dev,
3782 			  struct netdev_lag_upper_info *lag_upper_info)
3783 {
3784 	u16 lag_id;
3785 
3786 	if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
3787 		return false;
3788 	if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
3789 		return false;
3790 	return true;
3791 }
3792 
3793 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3794 				       u16 lag_id, u8 *p_port_index)
3795 {
3796 	u64 max_lag_members;
3797 	int i;
3798 
3799 	max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
3800 					     MAX_LAG_MEMBERS);
3801 	for (i = 0; i < max_lag_members; i++) {
3802 		if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
3803 			*p_port_index = i;
3804 			return 0;
3805 		}
3806 	}
3807 	return -EBUSY;
3808 }
3809 
3810 static void
3811 mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
3812 				  struct net_device *lag_dev, u16 lag_id)
3813 {
3814 	struct mlxsw_sp_port *mlxsw_sp_vport;
3815 	struct mlxsw_sp_fid *f;
3816 
3817 	mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
3818 	if (WARN_ON(!mlxsw_sp_vport))
3819 		return;
3820 
3821 	/* If vPort is assigned a RIF, then leave it since it's no
3822 	 * longer valid.
3823 	 */
3824 	f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3825 	if (f)
3826 		f->leave(mlxsw_sp_vport);
3827 
3828 	mlxsw_sp_vport->lag_id = lag_id;
3829 	mlxsw_sp_vport->lagged = 1;
3830 	mlxsw_sp_vport->dev = lag_dev;
3831 }
3832 
3833 static void
3834 mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port)
3835 {
3836 	struct mlxsw_sp_port *mlxsw_sp_vport;
3837 	struct mlxsw_sp_fid *f;
3838 
3839 	mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
3840 	if (WARN_ON(!mlxsw_sp_vport))
3841 		return;
3842 
3843 	f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3844 	if (f)
3845 		f->leave(mlxsw_sp_vport);
3846 
3847 	mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
3848 	mlxsw_sp_vport->lagged = 0;
3849 }
3850 
3851 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
3852 				  struct net_device *lag_dev)
3853 {
3854 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3855 	struct mlxsw_sp_upper *lag;
3856 	u16 lag_id;
3857 	u8 port_index;
3858 	int err;
3859 
3860 	err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
3861 	if (err)
3862 		return err;
3863 	lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3864 	if (!lag->ref_count) {
3865 		err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
3866 		if (err)
3867 			return err;
3868 		lag->dev = lag_dev;
3869 	}
3870 
3871 	err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
3872 	if (err)
3873 		return err;
3874 	err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
3875 	if (err)
3876 		goto err_col_port_add;
3877 	err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
3878 	if (err)
3879 		goto err_col_port_enable;
3880 
3881 	mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
3882 				   mlxsw_sp_port->local_port);
3883 	mlxsw_sp_port->lag_id = lag_id;
3884 	mlxsw_sp_port->lagged = 1;
3885 	lag->ref_count++;
3886 
3887 	mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_dev, lag_id);
3888 
3889 	return 0;
3890 
3891 err_col_port_enable:
3892 	mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
3893 err_col_port_add:
3894 	if (!lag->ref_count)
3895 		mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3896 	return err;
3897 }
3898 
3899 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
3900 				    struct net_device *lag_dev)
3901 {
3902 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3903 	u16 lag_id = mlxsw_sp_port->lag_id;
3904 	struct mlxsw_sp_upper *lag;
3905 
3906 	if (!mlxsw_sp_port->lagged)
3907 		return;
3908 	lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3909 	WARN_ON(lag->ref_count == 0);
3910 
3911 	mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
3912 	mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
3913 
3914 	if (mlxsw_sp_port->bridged) {
3915 		mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
3916 		mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
3917 	}
3918 
3919 	if (lag->ref_count == 1)
3920 		mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3921 
3922 	mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
3923 				     mlxsw_sp_port->local_port);
3924 	mlxsw_sp_port->lagged = 0;
3925 	lag->ref_count--;
3926 
3927 	mlxsw_sp_port_pvid_vport_lag_leave(mlxsw_sp_port);
3928 }
3929 
3930 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3931 				      u16 lag_id)
3932 {
3933 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3934 	char sldr_pl[MLXSW_REG_SLDR_LEN];
3935 
3936 	mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
3937 					 mlxsw_sp_port->local_port);
3938 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3939 }
3940 
3941 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3942 					 u16 lag_id)
3943 {
3944 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3945 	char sldr_pl[MLXSW_REG_SLDR_LEN];
3946 
3947 	mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
3948 					    mlxsw_sp_port->local_port);
3949 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3950 }
3951 
3952 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
3953 				       bool lag_tx_enabled)
3954 {
3955 	if (lag_tx_enabled)
3956 		return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
3957 						  mlxsw_sp_port->lag_id);
3958 	else
3959 		return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
3960 						     mlxsw_sp_port->lag_id);
3961 }
3962 
3963 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
3964 				     struct netdev_lag_lower_state_info *info)
3965 {
3966 	return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
3967 }
3968 
3969 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
3970 				   struct net_device *vlan_dev)
3971 {
3972 	struct mlxsw_sp_port *mlxsw_sp_vport;
3973 	u16 vid = vlan_dev_vlan_id(vlan_dev);
3974 
3975 	mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3976 	if (WARN_ON(!mlxsw_sp_vport))
3977 		return -EINVAL;
3978 
3979 	mlxsw_sp_vport->dev = vlan_dev;
3980 
3981 	return 0;
3982 }
3983 
3984 static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
3985 				      struct net_device *vlan_dev)
3986 {
3987 	struct mlxsw_sp_port *mlxsw_sp_vport;
3988 	u16 vid = vlan_dev_vlan_id(vlan_dev);
3989 
3990 	mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3991 	if (WARN_ON(!mlxsw_sp_vport))
3992 		return;
3993 
3994 	mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
3995 }
3996 
3997 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
3998 					       unsigned long event, void *ptr)
3999 {
4000 	struct netdev_notifier_changeupper_info *info;
4001 	struct mlxsw_sp_port *mlxsw_sp_port;
4002 	struct net_device *upper_dev;
4003 	struct mlxsw_sp *mlxsw_sp;
4004 	int err = 0;
4005 
4006 	mlxsw_sp_port = netdev_priv(dev);
4007 	mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4008 	info = ptr;
4009 
4010 	switch (event) {
4011 	case NETDEV_PRECHANGEUPPER:
4012 		upper_dev = info->upper_dev;
4013 		if (!is_vlan_dev(upper_dev) &&
4014 		    !netif_is_lag_master(upper_dev) &&
4015 		    !netif_is_bridge_master(upper_dev) &&
4016 		    !netif_is_l3_master(upper_dev))
4017 			return -EINVAL;
4018 		if (!info->linking)
4019 			break;
4020 		/* HW limitation forbids to put ports to multiple bridges. */
4021 		if (netif_is_bridge_master(upper_dev) &&
4022 		    !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
4023 			return -EINVAL;
4024 		if (netif_is_lag_master(upper_dev) &&
4025 		    !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
4026 					       info->upper_info))
4027 			return -EINVAL;
4028 		if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev))
4029 			return -EINVAL;
4030 		if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
4031 		    !netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
4032 			return -EINVAL;
4033 		break;
4034 	case NETDEV_CHANGEUPPER:
4035 		upper_dev = info->upper_dev;
4036 		if (is_vlan_dev(upper_dev)) {
4037 			if (info->linking)
4038 				err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
4039 							      upper_dev);
4040 			else
4041 				 mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
4042 							   upper_dev);
4043 		} else if (netif_is_bridge_master(upper_dev)) {
4044 			if (info->linking)
4045 				err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4046 								upper_dev);
4047 			else
4048 				mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
4049 		} else if (netif_is_lag_master(upper_dev)) {
4050 			if (info->linking)
4051 				err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
4052 							     upper_dev);
4053 			else
4054 				mlxsw_sp_port_lag_leave(mlxsw_sp_port,
4055 							upper_dev);
4056 		} else if (netif_is_l3_master(upper_dev)) {
4057 			if (info->linking)
4058 				err = mlxsw_sp_port_vrf_join(mlxsw_sp_port);
4059 			else
4060 				mlxsw_sp_port_vrf_leave(mlxsw_sp_port);
4061 		} else {
4062 			err = -EINVAL;
4063 			WARN_ON(1);
4064 		}
4065 		break;
4066 	}
4067 
4068 	return err;
4069 }
4070 
4071 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
4072 					       unsigned long event, void *ptr)
4073 {
4074 	struct netdev_notifier_changelowerstate_info *info;
4075 	struct mlxsw_sp_port *mlxsw_sp_port;
4076 	int err;
4077 
4078 	mlxsw_sp_port = netdev_priv(dev);
4079 	info = ptr;
4080 
4081 	switch (event) {
4082 	case NETDEV_CHANGELOWERSTATE:
4083 		if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
4084 			err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
4085 							info->lower_state_info);
4086 			if (err)
4087 				netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
4088 		}
4089 		break;
4090 	}
4091 
4092 	return 0;
4093 }
4094 
4095 static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
4096 					 unsigned long event, void *ptr)
4097 {
4098 	switch (event) {
4099 	case NETDEV_PRECHANGEUPPER:
4100 	case NETDEV_CHANGEUPPER:
4101 		return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr);
4102 	case NETDEV_CHANGELOWERSTATE:
4103 		return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
4104 	}
4105 
4106 	return 0;
4107 }
4108 
4109 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
4110 					unsigned long event, void *ptr)
4111 {
4112 	struct net_device *dev;
4113 	struct list_head *iter;
4114 	int ret;
4115 
4116 	netdev_for_each_lower_dev(lag_dev, dev, iter) {
4117 		if (mlxsw_sp_port_dev_check(dev)) {
4118 			ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
4119 			if (ret)
4120 				return ret;
4121 		}
4122 	}
4123 
4124 	return 0;
4125 }
4126 
4127 static int mlxsw_sp_master_bridge_vlan_link(struct mlxsw_sp *mlxsw_sp,
4128 					    struct net_device *vlan_dev)
4129 {
4130 	u16 fid = vlan_dev_vlan_id(vlan_dev);
4131 	struct mlxsw_sp_fid *f;
4132 
4133 	f = mlxsw_sp_fid_find(mlxsw_sp, fid);
4134 	if (!f) {
4135 		f = mlxsw_sp_fid_create(mlxsw_sp, fid);
4136 		if (IS_ERR(f))
4137 			return PTR_ERR(f);
4138 	}
4139 
4140 	f->ref_count++;
4141 
4142 	return 0;
4143 }
4144 
4145 static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp *mlxsw_sp,
4146 					       struct net_device *vlan_dev)
4147 {
4148 	u16 fid = vlan_dev_vlan_id(vlan_dev);
4149 	struct mlxsw_sp_fid *f;
4150 
4151 	f = mlxsw_sp_fid_find(mlxsw_sp, fid);
4152 	if (f && f->rif)
4153 		mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
4154 	if (f && --f->ref_count == 0)
4155 		mlxsw_sp_fid_destroy(mlxsw_sp, f);
4156 }
4157 
4158 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
4159 					   unsigned long event, void *ptr)
4160 {
4161 	struct netdev_notifier_changeupper_info *info;
4162 	struct net_device *upper_dev;
4163 	struct mlxsw_sp *mlxsw_sp;
4164 	int err = 0;
4165 
4166 	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
4167 	if (!mlxsw_sp)
4168 		return 0;
4169 
4170 	info = ptr;
4171 
4172 	switch (event) {
4173 	case NETDEV_PRECHANGEUPPER:
4174 		upper_dev = info->upper_dev;
4175 		if (!is_vlan_dev(upper_dev) && !netif_is_l3_master(upper_dev))
4176 			return -EINVAL;
4177 		if (is_vlan_dev(upper_dev) &&
4178 		    br_dev != mlxsw_sp->master_bridge.dev)
4179 			return -EINVAL;
4180 		break;
4181 	case NETDEV_CHANGEUPPER:
4182 		upper_dev = info->upper_dev;
4183 		if (is_vlan_dev(upper_dev)) {
4184 			if (info->linking)
4185 				err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp,
4186 								       upper_dev);
4187 			else
4188 				mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp,
4189 								   upper_dev);
4190 		} else if (netif_is_l3_master(upper_dev)) {
4191 			if (info->linking)
4192 				err = mlxsw_sp_bridge_vrf_join(mlxsw_sp,
4193 							       br_dev);
4194 			else
4195 				mlxsw_sp_bridge_vrf_leave(mlxsw_sp, br_dev);
4196 		} else {
4197 			err = -EINVAL;
4198 			WARN_ON(1);
4199 		}
4200 		break;
4201 	}
4202 
4203 	return err;
4204 }
4205 
4206 static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
4207 {
4208 	return find_first_zero_bit(mlxsw_sp->vfids.mapped,
4209 				   MLXSW_SP_VFID_MAX);
4210 }
4211 
4212 static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
4213 {
4214 	char sfmr_pl[MLXSW_REG_SFMR_LEN];
4215 
4216 	mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0);
4217 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
4218 }
4219 
4220 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
4221 
4222 static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
4223 						 struct net_device *br_dev)
4224 {
4225 	struct device *dev = mlxsw_sp->bus_info->dev;
4226 	struct mlxsw_sp_fid *f;
4227 	u16 vfid, fid;
4228 	int err;
4229 
4230 	vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
4231 	if (vfid == MLXSW_SP_VFID_MAX) {
4232 		dev_err(dev, "No available vFIDs\n");
4233 		return ERR_PTR(-ERANGE);
4234 	}
4235 
4236 	fid = mlxsw_sp_vfid_to_fid(vfid);
4237 	err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true);
4238 	if (err) {
4239 		dev_err(dev, "Failed to create FID=%d\n", fid);
4240 		return ERR_PTR(err);
4241 	}
4242 
4243 	f = kzalloc(sizeof(*f), GFP_KERNEL);
4244 	if (!f)
4245 		goto err_allocate_vfid;
4246 
4247 	f->leave = mlxsw_sp_vport_vfid_leave;
4248 	f->fid = fid;
4249 	f->dev = br_dev;
4250 
4251 	list_add(&f->list, &mlxsw_sp->vfids.list);
4252 	set_bit(vfid, mlxsw_sp->vfids.mapped);
4253 
4254 	return f;
4255 
4256 err_allocate_vfid:
4257 	mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
4258 	return ERR_PTR(-ENOMEM);
4259 }
4260 
4261 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
4262 				  struct mlxsw_sp_fid *f)
4263 {
4264 	u16 vfid = mlxsw_sp_fid_to_vfid(f->fid);
4265 	u16 fid = f->fid;
4266 
4267 	clear_bit(vfid, mlxsw_sp->vfids.mapped);
4268 	list_del(&f->list);
4269 
4270 	if (f->rif)
4271 		mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
4272 
4273 	kfree(f);
4274 
4275 	mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
4276 }
4277 
4278 static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
4279 				  bool valid)
4280 {
4281 	enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
4282 	u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
4283 
4284 	return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid,
4285 					    vid);
4286 }
4287 
4288 static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport,
4289 				    struct net_device *br_dev)
4290 {
4291 	struct mlxsw_sp_fid *f;
4292 	int err;
4293 
4294 	f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev);
4295 	if (!f) {
4296 		f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev);
4297 		if (IS_ERR(f))
4298 			return PTR_ERR(f);
4299 	}
4300 
4301 	err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true);
4302 	if (err)
4303 		goto err_vport_flood_set;
4304 
4305 	err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true);
4306 	if (err)
4307 		goto err_vport_fid_map;
4308 
4309 	mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f);
4310 	f->ref_count++;
4311 
4312 	netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", f->fid);
4313 
4314 	return 0;
4315 
4316 err_vport_fid_map:
4317 	mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
4318 err_vport_flood_set:
4319 	if (!f->ref_count)
4320 		mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
4321 	return err;
4322 }
4323 
4324 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
4325 {
4326 	struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
4327 
4328 	netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
4329 
4330 	mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false);
4331 
4332 	mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
4333 
4334 	mlxsw_sp_port_fdb_flush(mlxsw_sp_vport, f->fid);
4335 
4336 	mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
4337 	if (--f->ref_count == 0)
4338 		mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
4339 }
4340 
4341 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
4342 				      struct net_device *br_dev)
4343 {
4344 	struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
4345 	u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
4346 	struct net_device *dev = mlxsw_sp_vport->dev;
4347 	int err;
4348 
4349 	if (f && !WARN_ON(!f->leave))
4350 		f->leave(mlxsw_sp_vport);
4351 
4352 	err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport, br_dev);
4353 	if (err) {
4354 		netdev_err(dev, "Failed to join vFID\n");
4355 		return err;
4356 	}
4357 
4358 	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
4359 	if (err) {
4360 		netdev_err(dev, "Failed to enable learning\n");
4361 		goto err_port_vid_learning_set;
4362 	}
4363 
4364 	mlxsw_sp_vport->learning = 1;
4365 	mlxsw_sp_vport->learning_sync = 1;
4366 	mlxsw_sp_vport->uc_flood = 1;
4367 	mlxsw_sp_vport->mc_flood = 1;
4368 	mlxsw_sp_vport->mc_router = 0;
4369 	mlxsw_sp_vport->mc_disabled = 1;
4370 	mlxsw_sp_vport->bridged = 1;
4371 
4372 	return 0;
4373 
4374 err_port_vid_learning_set:
4375 	mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
4376 	return err;
4377 }
4378 
4379 static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
4380 {
4381 	u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
4382 
4383 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
4384 
4385 	mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
4386 
4387 	mlxsw_sp_vport->learning = 0;
4388 	mlxsw_sp_vport->learning_sync = 0;
4389 	mlxsw_sp_vport->uc_flood = 0;
4390 	mlxsw_sp_vport->mc_flood = 0;
4391 	mlxsw_sp_vport->mc_router = 0;
4392 	mlxsw_sp_vport->bridged = 0;
4393 }
4394 
4395 static bool
4396 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
4397 				  const struct net_device *br_dev)
4398 {
4399 	struct mlxsw_sp_port *mlxsw_sp_vport;
4400 
4401 	list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
4402 			    vport.list) {
4403 		struct net_device *dev = mlxsw_sp_vport_dev_get(mlxsw_sp_vport);
4404 
4405 		if (dev && dev == br_dev)
4406 			return false;
4407 	}
4408 
4409 	return true;
4410 }
4411 
4412 static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
4413 					  unsigned long event, void *ptr,
4414 					  u16 vid)
4415 {
4416 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
4417 	struct netdev_notifier_changeupper_info *info = ptr;
4418 	struct mlxsw_sp_port *mlxsw_sp_vport;
4419 	struct net_device *upper_dev;
4420 	int err = 0;
4421 
4422 	mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
4423 	if (!mlxsw_sp_vport)
4424 		return 0;
4425 
4426 	switch (event) {
4427 	case NETDEV_PRECHANGEUPPER:
4428 		upper_dev = info->upper_dev;
4429 		if (!netif_is_bridge_master(upper_dev) &&
4430 		    !netif_is_l3_master(upper_dev))
4431 			return -EINVAL;
4432 		if (!info->linking)
4433 			break;
4434 		/* We can't have multiple VLAN interfaces configured on
4435 		 * the same port and being members in the same bridge.
4436 		 */
4437 		if (netif_is_bridge_master(upper_dev) &&
4438 		    !mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
4439 						       upper_dev))
4440 			return -EINVAL;
4441 		break;
4442 	case NETDEV_CHANGEUPPER:
4443 		upper_dev = info->upper_dev;
4444 		if (netif_is_bridge_master(upper_dev)) {
4445 			if (info->linking)
4446 				err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
4447 								 upper_dev);
4448 			else
4449 				mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport);
4450 		} else if (netif_is_l3_master(upper_dev)) {
4451 			if (info->linking)
4452 				err = mlxsw_sp_vport_vrf_join(mlxsw_sp_vport);
4453 			else
4454 				mlxsw_sp_vport_vrf_leave(mlxsw_sp_vport);
4455 		} else {
4456 			err = -EINVAL;
4457 			WARN_ON(1);
4458 		}
4459 		break;
4460 	}
4461 
4462 	return err;
4463 }
4464 
4465 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
4466 					      unsigned long event, void *ptr,
4467 					      u16 vid)
4468 {
4469 	struct net_device *dev;
4470 	struct list_head *iter;
4471 	int ret;
4472 
4473 	netdev_for_each_lower_dev(lag_dev, dev, iter) {
4474 		if (mlxsw_sp_port_dev_check(dev)) {
4475 			ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
4476 							     vid);
4477 			if (ret)
4478 				return ret;
4479 		}
4480 	}
4481 
4482 	return 0;
4483 }
4484 
4485 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev,
4486 						unsigned long event, void *ptr)
4487 {
4488 	struct netdev_notifier_changeupper_info *info;
4489 	struct mlxsw_sp *mlxsw_sp;
4490 	int err = 0;
4491 
4492 	mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
4493 	if (!mlxsw_sp)
4494 		return 0;
4495 
4496 	info = ptr;
4497 
4498 	switch (event) {
4499 	case NETDEV_PRECHANGEUPPER:
4500 		/* VLAN devices are only allowed on top of the
4501 		 * VLAN-aware bridge.
4502 		 */
4503 		if (WARN_ON(vlan_dev_real_dev(vlan_dev) !=
4504 			    mlxsw_sp->master_bridge.dev))
4505 			return -EINVAL;
4506 		if (!netif_is_l3_master(info->upper_dev))
4507 			return -EINVAL;
4508 		break;
4509 	case NETDEV_CHANGEUPPER:
4510 		if (netif_is_l3_master(info->upper_dev)) {
4511 			if (info->linking)
4512 				err = mlxsw_sp_bridge_vrf_join(mlxsw_sp,
4513 							       vlan_dev);
4514 			else
4515 				mlxsw_sp_bridge_vrf_leave(mlxsw_sp, vlan_dev);
4516 		} else {
4517 			err = -EINVAL;
4518 			WARN_ON(1);
4519 		}
4520 		break;
4521 	}
4522 
4523 	return err;
4524 }
4525 
4526 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
4527 					 unsigned long event, void *ptr)
4528 {
4529 	struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
4530 	u16 vid = vlan_dev_vlan_id(vlan_dev);
4531 
4532 	if (mlxsw_sp_port_dev_check(real_dev))
4533 		return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr,
4534 						      vid);
4535 	else if (netif_is_lag_master(real_dev))
4536 		return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
4537 							  vid);
4538 	else if (netif_is_bridge_master(real_dev))
4539 		return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, event,
4540 							    ptr);
4541 
4542 	return 0;
4543 }
4544 
4545 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
4546 				    unsigned long event, void *ptr)
4547 {
4548 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4549 	int err = 0;
4550 
4551 	if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU)
4552 		err = mlxsw_sp_netdevice_router_port_event(dev);
4553 	else if (mlxsw_sp_port_dev_check(dev))
4554 		err = mlxsw_sp_netdevice_port_event(dev, event, ptr);
4555 	else if (netif_is_lag_master(dev))
4556 		err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
4557 	else if (netif_is_bridge_master(dev))
4558 		err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
4559 	else if (is_vlan_dev(dev))
4560 		err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
4561 
4562 	return notifier_from_errno(err);
4563 }
4564 
4565 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
4566 	.notifier_call = mlxsw_sp_netdevice_event,
4567 };
4568 
4569 static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
4570 	.notifier_call = mlxsw_sp_inetaddr_event,
4571 	.priority = 10,	/* Must be called before FIB notifier block */
4572 };
4573 
4574 static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
4575 	.notifier_call = mlxsw_sp_router_netevent_event,
4576 };
4577 
4578 static const struct pci_device_id mlxsw_sp_pci_id_table[] = {
4579 	{PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
4580 	{0, },
4581 };
4582 
4583 static struct pci_driver mlxsw_sp_pci_driver = {
4584 	.name = mlxsw_sp_driver_name,
4585 	.id_table = mlxsw_sp_pci_id_table,
4586 };
4587 
4588 static int __init mlxsw_sp_module_init(void)
4589 {
4590 	int err;
4591 
4592 	register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4593 	register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4594 	register_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4595 
4596 	err = mlxsw_core_driver_register(&mlxsw_sp_driver);
4597 	if (err)
4598 		goto err_core_driver_register;
4599 
4600 	err = mlxsw_pci_driver_register(&mlxsw_sp_pci_driver);
4601 	if (err)
4602 		goto err_pci_driver_register;
4603 
4604 	return 0;
4605 
4606 err_pci_driver_register:
4607 	mlxsw_core_driver_unregister(&mlxsw_sp_driver);
4608 err_core_driver_register:
4609 	unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4610 	unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4611 	unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4612 	return err;
4613 }
4614 
4615 static void __exit mlxsw_sp_module_exit(void)
4616 {
4617 	mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver);
4618 	mlxsw_core_driver_unregister(&mlxsw_sp_driver);
4619 	unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4620 	unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4621 	unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4622 }
4623 
4624 module_init(mlxsw_sp_module_init);
4625 module_exit(mlxsw_sp_module_exit);
4626 
4627 MODULE_LICENSE("Dual BSD/GPL");
4628 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
4629 MODULE_DESCRIPTION("Mellanox Spectrum driver");
4630 MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table);
4631