xref: /linux/drivers/net/ethernet/mellanox/mlxsw/spectrum.c (revision a5d9265e017f081f0dc133c0e2f45103d027b874)
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/types.h>
7 #include <linux/pci.h>
8 #include <linux/netdevice.h>
9 #include <linux/etherdevice.h>
10 #include <linux/ethtool.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/skbuff.h>
14 #include <linux/if_vlan.h>
15 #include <linux/if_bridge.h>
16 #include <linux/workqueue.h>
17 #include <linux/jiffies.h>
18 #include <linux/bitops.h>
19 #include <linux/list.h>
20 #include <linux/notifier.h>
21 #include <linux/dcbnl.h>
22 #include <linux/inetdevice.h>
23 #include <linux/netlink.h>
24 #include <linux/random.h>
25 #include <net/switchdev.h>
26 #include <net/pkt_cls.h>
27 #include <net/tc_act/tc_mirred.h>
28 #include <net/netevent.h>
29 #include <net/tc_act/tc_sample.h>
30 #include <net/addrconf.h>
31 
32 #include "spectrum.h"
33 #include "pci.h"
34 #include "core.h"
35 #include "reg.h"
36 #include "port.h"
37 #include "trap.h"
38 #include "txheader.h"
39 #include "spectrum_cnt.h"
40 #include "spectrum_dpipe.h"
41 #include "spectrum_acl_flex_actions.h"
42 #include "spectrum_span.h"
43 #include "../mlxfw/mlxfw.h"
44 
45 #define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
46 
47 #define MLXSW_SP1_FWREV_MAJOR 13
48 #define MLXSW_SP1_FWREV_MINOR 1910
49 #define MLXSW_SP1_FWREV_SUBMINOR 622
50 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
51 
52 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
53 	.major = MLXSW_SP1_FWREV_MAJOR,
54 	.minor = MLXSW_SP1_FWREV_MINOR,
55 	.subminor = MLXSW_SP1_FWREV_SUBMINOR,
56 	.can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR,
57 };
58 
59 #define MLXSW_SP1_FW_FILENAME \
60 	"mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \
61 	"." __stringify(MLXSW_SP1_FWREV_MINOR) \
62 	"." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2"
63 
64 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
65 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
66 static const char mlxsw_sp_driver_version[] = "1.0";
67 
68 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = {
69 	0xff, 0xff, 0xff, 0xff, 0xfc, 0x00
70 };
71 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = {
72 	0xff, 0xff, 0xff, 0xff, 0xf0, 0x00
73 };
74 
75 /* tx_hdr_version
76  * Tx header version.
77  * Must be set to 1.
78  */
79 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
80 
81 /* tx_hdr_ctl
82  * Packet control type.
83  * 0 - Ethernet control (e.g. EMADs, LACP)
84  * 1 - Ethernet data
85  */
86 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
87 
88 /* tx_hdr_proto
89  * Packet protocol type. Must be set to 1 (Ethernet).
90  */
91 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
92 
93 /* tx_hdr_rx_is_router
94  * Packet is sent from the router. Valid for data packets only.
95  */
96 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
97 
98 /* tx_hdr_fid_valid
99  * Indicates if the 'fid' field is valid and should be used for
100  * forwarding lookup. Valid for data packets only.
101  */
102 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
103 
104 /* tx_hdr_swid
105  * Switch partition ID. Must be set to 0.
106  */
107 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
108 
109 /* tx_hdr_control_tclass
110  * Indicates if the packet should use the control TClass and not one
111  * of the data TClasses.
112  */
113 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
114 
115 /* tx_hdr_etclass
116  * Egress TClass to be used on the egress device on the egress port.
117  */
118 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
119 
120 /* tx_hdr_port_mid
121  * Destination local port for unicast packets.
122  * Destination multicast ID for multicast packets.
123  *
124  * Control packets are directed to a specific egress port, while data
125  * packets are transmitted through the CPU port (0) into the switch partition,
126  * where forwarding rules are applied.
127  */
128 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
129 
130 /* tx_hdr_fid
131  * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
132  * set, otherwise calculated based on the packet's VID using VID to FID mapping.
133  * Valid for data packets only.
134  */
135 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
136 
137 /* tx_hdr_type
138  * 0 - Data packets
139  * 6 - Control packets
140  */
141 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
142 
143 struct mlxsw_sp_mlxfw_dev {
144 	struct mlxfw_dev mlxfw_dev;
145 	struct mlxsw_sp *mlxsw_sp;
146 };
147 
148 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev,
149 				    u16 component_index, u32 *p_max_size,
150 				    u8 *p_align_bits, u16 *p_max_write_size)
151 {
152 	struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
153 		container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
154 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
155 	char mcqi_pl[MLXSW_REG_MCQI_LEN];
156 	int err;
157 
158 	mlxsw_reg_mcqi_pack(mcqi_pl, component_index);
159 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl);
160 	if (err)
161 		return err;
162 	mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits,
163 			      p_max_write_size);
164 
165 	*p_align_bits = max_t(u8, *p_align_bits, 2);
166 	*p_max_write_size = min_t(u16, *p_max_write_size,
167 				  MLXSW_REG_MCDA_MAX_DATA_LEN);
168 	return 0;
169 }
170 
171 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
172 {
173 	struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
174 		container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
175 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
176 	char mcc_pl[MLXSW_REG_MCC_LEN];
177 	u8 control_state;
178 	int err;
179 
180 	mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0);
181 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
182 	if (err)
183 		return err;
184 
185 	mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state);
186 	if (control_state != MLXFW_FSM_STATE_IDLE)
187 		return -EBUSY;
188 
189 	mlxsw_reg_mcc_pack(mcc_pl,
190 			   MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE,
191 			   0, *fwhandle, 0);
192 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
193 }
194 
195 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev,
196 					 u32 fwhandle, u16 component_index,
197 					 u32 component_size)
198 {
199 	struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
200 		container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
201 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
202 	char mcc_pl[MLXSW_REG_MCC_LEN];
203 
204 	mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
205 			   component_index, fwhandle, component_size);
206 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
207 }
208 
209 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev,
210 				       u32 fwhandle, u8 *data, u16 size,
211 				       u32 offset)
212 {
213 	struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
214 		container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
215 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
216 	char mcda_pl[MLXSW_REG_MCDA_LEN];
217 
218 	mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data);
219 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl);
220 }
221 
222 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev,
223 					 u32 fwhandle, u16 component_index)
224 {
225 	struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
226 		container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
227 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
228 	char mcc_pl[MLXSW_REG_MCC_LEN];
229 
230 	mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
231 			   component_index, fwhandle, 0);
232 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
233 }
234 
235 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
236 {
237 	struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
238 		container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
239 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
240 	char mcc_pl[MLXSW_REG_MCC_LEN];
241 
242 	mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0,
243 			   fwhandle, 0);
244 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
245 }
246 
247 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
248 				    enum mlxfw_fsm_state *fsm_state,
249 				    enum mlxfw_fsm_state_err *fsm_state_err)
250 {
251 	struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
252 		container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
253 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
254 	char mcc_pl[MLXSW_REG_MCC_LEN];
255 	u8 control_state;
256 	u8 error_code;
257 	int err;
258 
259 	mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0);
260 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
261 	if (err)
262 		return err;
263 
264 	mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state);
265 	*fsm_state = control_state;
266 	*fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code,
267 			       MLXFW_FSM_STATE_ERR_MAX);
268 	return 0;
269 }
270 
271 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
272 {
273 	struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
274 		container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
275 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
276 	char mcc_pl[MLXSW_REG_MCC_LEN];
277 
278 	mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0,
279 			   fwhandle, 0);
280 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
281 }
282 
283 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
284 {
285 	struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
286 		container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
287 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
288 	char mcc_pl[MLXSW_REG_MCC_LEN];
289 
290 	mlxsw_reg_mcc_pack(mcc_pl,
291 			   MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0,
292 			   fwhandle, 0);
293 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
294 }
295 
296 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = {
297 	.component_query	= mlxsw_sp_component_query,
298 	.fsm_lock		= mlxsw_sp_fsm_lock,
299 	.fsm_component_update	= mlxsw_sp_fsm_component_update,
300 	.fsm_block_download	= mlxsw_sp_fsm_block_download,
301 	.fsm_component_verify	= mlxsw_sp_fsm_component_verify,
302 	.fsm_activate		= mlxsw_sp_fsm_activate,
303 	.fsm_query_state	= mlxsw_sp_fsm_query_state,
304 	.fsm_cancel		= mlxsw_sp_fsm_cancel,
305 	.fsm_release		= mlxsw_sp_fsm_release
306 };
307 
308 static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
309 				   const struct firmware *firmware)
310 {
311 	struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = {
312 		.mlxfw_dev = {
313 			.ops = &mlxsw_sp_mlxfw_dev_ops,
314 			.psid = mlxsw_sp->bus_info->psid,
315 			.psid_size = strlen(mlxsw_sp->bus_info->psid),
316 		},
317 		.mlxsw_sp = mlxsw_sp
318 	};
319 	int err;
320 
321 	mlxsw_core_fw_flash_start(mlxsw_sp->core);
322 	err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
323 	mlxsw_core_fw_flash_end(mlxsw_sp->core);
324 
325 	return err;
326 }
327 
328 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
329 {
330 	const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev;
331 	const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev;
332 	const char *fw_filename = mlxsw_sp->fw_filename;
333 	union devlink_param_value value;
334 	const struct firmware *firmware;
335 	int err;
336 
337 	/* Don't check if driver does not require it */
338 	if (!req_rev || !fw_filename)
339 		return 0;
340 
341 	/* Don't check if devlink 'fw_load_policy' param is 'flash' */
342 	err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_sp->core),
343 						 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
344 						 &value);
345 	if (err)
346 		return err;
347 	if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)
348 		return 0;
349 
350 	/* Validate driver & FW are compatible */
351 	if (rev->major != req_rev->major) {
352 		WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n",
353 		     rev->major, req_rev->major);
354 		return -EINVAL;
355 	}
356 	if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) ==
357 	    MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor) &&
358 	    (rev->minor > req_rev->minor ||
359 	     (rev->minor == req_rev->minor &&
360 	      rev->subminor >= req_rev->subminor)))
361 		return 0;
362 
363 	dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n",
364 		 rev->major, rev->minor, rev->subminor);
365 	dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n",
366 		 fw_filename);
367 
368 	err = request_firmware_direct(&firmware, fw_filename,
369 				      mlxsw_sp->bus_info->dev);
370 	if (err) {
371 		dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n",
372 			fw_filename);
373 		return err;
374 	}
375 
376 	err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware);
377 	release_firmware(firmware);
378 	if (err)
379 		dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n");
380 
381 	/* On FW flash success, tell the caller FW reset is needed
382 	 * if current FW supports it.
383 	 */
384 	if (rev->minor >= req_rev->can_reset_minor)
385 		return err ? err : -EAGAIN;
386 	else
387 		return 0;
388 }
389 
390 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
391 			      unsigned int counter_index, u64 *packets,
392 			      u64 *bytes)
393 {
394 	char mgpc_pl[MLXSW_REG_MGPC_LEN];
395 	int err;
396 
397 	mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
398 			    MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
399 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
400 	if (err)
401 		return err;
402 	if (packets)
403 		*packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
404 	if (bytes)
405 		*bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
406 	return 0;
407 }
408 
409 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
410 				       unsigned int counter_index)
411 {
412 	char mgpc_pl[MLXSW_REG_MGPC_LEN];
413 
414 	mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
415 			    MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
416 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
417 }
418 
419 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
420 				unsigned int *p_counter_index)
421 {
422 	int err;
423 
424 	err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
425 				     p_counter_index);
426 	if (err)
427 		return err;
428 	err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
429 	if (err)
430 		goto err_counter_clear;
431 	return 0;
432 
433 err_counter_clear:
434 	mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
435 			      *p_counter_index);
436 	return err;
437 }
438 
439 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
440 				unsigned int counter_index)
441 {
442 	 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
443 			       counter_index);
444 }
445 
446 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
447 				     const struct mlxsw_tx_info *tx_info)
448 {
449 	char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
450 
451 	memset(txhdr, 0, MLXSW_TXHDR_LEN);
452 
453 	mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
454 	mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
455 	mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
456 	mlxsw_tx_hdr_swid_set(txhdr, 0);
457 	mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
458 	mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
459 	mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
460 }
461 
462 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
463 {
464 	switch (state) {
465 	case BR_STATE_FORWARDING:
466 		return MLXSW_REG_SPMS_STATE_FORWARDING;
467 	case BR_STATE_LEARNING:
468 		return MLXSW_REG_SPMS_STATE_LEARNING;
469 	case BR_STATE_LISTENING: /* fall-through */
470 	case BR_STATE_DISABLED: /* fall-through */
471 	case BR_STATE_BLOCKING:
472 		return MLXSW_REG_SPMS_STATE_DISCARDING;
473 	default:
474 		BUG();
475 	}
476 }
477 
478 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
479 			      u8 state)
480 {
481 	enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state);
482 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
483 	char *spms_pl;
484 	int err;
485 
486 	spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
487 	if (!spms_pl)
488 		return -ENOMEM;
489 	mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
490 	mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
491 
492 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
493 	kfree(spms_pl);
494 	return err;
495 }
496 
497 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
498 {
499 	char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
500 	int err;
501 
502 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
503 	if (err)
504 		return err;
505 	mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
506 	return 0;
507 }
508 
509 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
510 				    bool enable, u32 rate)
511 {
512 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
513 	char mpsc_pl[MLXSW_REG_MPSC_LEN];
514 
515 	mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
516 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
517 }
518 
519 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
520 					  bool is_up)
521 {
522 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
523 	char paos_pl[MLXSW_REG_PAOS_LEN];
524 
525 	mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
526 			    is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
527 			    MLXSW_PORT_ADMIN_STATUS_DOWN);
528 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
529 }
530 
531 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
532 				      unsigned char *addr)
533 {
534 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
535 	char ppad_pl[MLXSW_REG_PPAD_LEN];
536 
537 	mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
538 	mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
539 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
540 }
541 
542 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
543 {
544 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
545 	unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
546 
547 	ether_addr_copy(addr, mlxsw_sp->base_mac);
548 	addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
549 	return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
550 }
551 
552 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
553 {
554 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
555 	char pmtu_pl[MLXSW_REG_PMTU_LEN];
556 	int max_mtu;
557 	int err;
558 
559 	mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
560 	mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
561 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
562 	if (err)
563 		return err;
564 	max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
565 
566 	if (mtu > max_mtu)
567 		return -EINVAL;
568 
569 	mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
570 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
571 }
572 
573 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
574 {
575 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
576 	char pspa_pl[MLXSW_REG_PSPA_LEN];
577 
578 	mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
579 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
580 }
581 
582 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
583 {
584 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
585 	char svpe_pl[MLXSW_REG_SVPE_LEN];
586 
587 	mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
588 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
589 }
590 
591 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
592 				   bool learn_enable)
593 {
594 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
595 	char *spvmlr_pl;
596 	int err;
597 
598 	spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
599 	if (!spvmlr_pl)
600 		return -ENOMEM;
601 	mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
602 			      learn_enable);
603 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
604 	kfree(spvmlr_pl);
605 	return err;
606 }
607 
608 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
609 				    u16 vid)
610 {
611 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
612 	char spvid_pl[MLXSW_REG_SPVID_LEN];
613 
614 	mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
615 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
616 }
617 
618 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
619 					    bool allow)
620 {
621 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
622 	char spaft_pl[MLXSW_REG_SPAFT_LEN];
623 
624 	mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
625 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
626 }
627 
628 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
629 {
630 	int err;
631 
632 	if (!vid) {
633 		err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
634 		if (err)
635 			return err;
636 	} else {
637 		err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
638 		if (err)
639 			return err;
640 		err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
641 		if (err)
642 			goto err_port_allow_untagged_set;
643 	}
644 
645 	mlxsw_sp_port->pvid = vid;
646 	return 0;
647 
648 err_port_allow_untagged_set:
649 	__mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
650 	return err;
651 }
652 
653 static int
654 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
655 {
656 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
657 	char sspr_pl[MLXSW_REG_SSPR_LEN];
658 
659 	mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
660 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
661 }
662 
663 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
664 					 u8 local_port, u8 *p_module,
665 					 u8 *p_width, u8 *p_lane)
666 {
667 	char pmlp_pl[MLXSW_REG_PMLP_LEN];
668 	int err;
669 
670 	mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
671 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
672 	if (err)
673 		return err;
674 	*p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
675 	*p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
676 	*p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
677 	return 0;
678 }
679 
680 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port,
681 				    u8 module, u8 width, u8 lane)
682 {
683 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
684 	char pmlp_pl[MLXSW_REG_PMLP_LEN];
685 	int i;
686 
687 	mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
688 	mlxsw_reg_pmlp_width_set(pmlp_pl, width);
689 	for (i = 0; i < width; i++) {
690 		mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
691 		mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i);  /* Rx & Tx */
692 	}
693 
694 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
695 }
696 
697 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port)
698 {
699 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
700 	char pmlp_pl[MLXSW_REG_PMLP_LEN];
701 
702 	mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
703 	mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
704 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
705 }
706 
707 static int mlxsw_sp_port_open(struct net_device *dev)
708 {
709 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
710 	int err;
711 
712 	err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
713 	if (err)
714 		return err;
715 	netif_start_queue(dev);
716 	return 0;
717 }
718 
719 static int mlxsw_sp_port_stop(struct net_device *dev)
720 {
721 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
722 
723 	netif_stop_queue(dev);
724 	return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
725 }
726 
727 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
728 				      struct net_device *dev)
729 {
730 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
731 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
732 	struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
733 	const struct mlxsw_tx_info tx_info = {
734 		.local_port = mlxsw_sp_port->local_port,
735 		.is_emad = false,
736 	};
737 	u64 len;
738 	int err;
739 
740 	if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
741 		return NETDEV_TX_BUSY;
742 
743 	if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
744 		struct sk_buff *skb_orig = skb;
745 
746 		skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
747 		if (!skb) {
748 			this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
749 			dev_kfree_skb_any(skb_orig);
750 			return NETDEV_TX_OK;
751 		}
752 		dev_consume_skb_any(skb_orig);
753 	}
754 
755 	if (eth_skb_pad(skb)) {
756 		this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
757 		return NETDEV_TX_OK;
758 	}
759 
760 	mlxsw_sp_txhdr_construct(skb, &tx_info);
761 	/* TX header is consumed by HW on the way so we shouldn't count its
762 	 * bytes as being sent.
763 	 */
764 	len = skb->len - MLXSW_TXHDR_LEN;
765 
766 	/* Due to a race we might fail here because of a full queue. In that
767 	 * unlikely case we simply drop the packet.
768 	 */
769 	err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
770 
771 	if (!err) {
772 		pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
773 		u64_stats_update_begin(&pcpu_stats->syncp);
774 		pcpu_stats->tx_packets++;
775 		pcpu_stats->tx_bytes += len;
776 		u64_stats_update_end(&pcpu_stats->syncp);
777 	} else {
778 		this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
779 		dev_kfree_skb_any(skb);
780 	}
781 	return NETDEV_TX_OK;
782 }
783 
784 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
785 {
786 }
787 
788 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
789 {
790 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
791 	struct sockaddr *addr = p;
792 	int err;
793 
794 	if (!is_valid_ether_addr(addr->sa_data))
795 		return -EADDRNOTAVAIL;
796 
797 	err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
798 	if (err)
799 		return err;
800 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
801 	return 0;
802 }
803 
804 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp,
805 					 int mtu)
806 {
807 	return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu);
808 }
809 
810 #define MLXSW_SP_CELL_FACTOR 2	/* 2 * cell_size / (IPG + cell_size + 1) */
811 
812 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
813 				  u16 delay)
814 {
815 	delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay,
816 							    BITS_PER_BYTE));
817 	return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp,
818 								   mtu);
819 }
820 
821 /* Maximum delay buffer needed in case of PAUSE frames, in bytes.
822  * Assumes 100m cable and maximum MTU.
823  */
824 #define MLXSW_SP_PAUSE_DELAY 58752
825 
826 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
827 				     u16 delay, bool pfc, bool pause)
828 {
829 	if (pfc)
830 		return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay);
831 	else if (pause)
832 		return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY);
833 	else
834 		return 0;
835 }
836 
837 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres,
838 				 bool lossy)
839 {
840 	if (lossy)
841 		mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size);
842 	else
843 		mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size,
844 						    thres);
845 }
846 
847 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
848 				 u8 *prio_tc, bool pause_en,
849 				 struct ieee_pfc *my_pfc)
850 {
851 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
852 	u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
853 	u16 delay = !!my_pfc ? my_pfc->delay : 0;
854 	char pbmc_pl[MLXSW_REG_PBMC_LEN];
855 	int i, j, err;
856 
857 	mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
858 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
859 	if (err)
860 		return err;
861 
862 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
863 		bool configure = false;
864 		bool pfc = false;
865 		bool lossy;
866 		u16 thres;
867 
868 		for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
869 			if (prio_tc[j] == i) {
870 				pfc = pfc_en & BIT(j);
871 				configure = true;
872 				break;
873 			}
874 		}
875 
876 		if (!configure)
877 			continue;
878 
879 		lossy = !(pfc || pause_en);
880 		thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
881 		delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc,
882 						  pause_en);
883 		mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy);
884 	}
885 
886 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
887 }
888 
889 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
890 				      int mtu, bool pause_en)
891 {
892 	u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
893 	bool dcb_en = !!mlxsw_sp_port->dcb.ets;
894 	struct ieee_pfc *my_pfc;
895 	u8 *prio_tc;
896 
897 	prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
898 	my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
899 
900 	return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
901 					    pause_en, my_pfc);
902 }
903 
904 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
905 {
906 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
907 	bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
908 	int err;
909 
910 	err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
911 	if (err)
912 		return err;
913 	err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu);
914 	if (err)
915 		goto err_span_port_mtu_update;
916 	err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
917 	if (err)
918 		goto err_port_mtu_set;
919 	dev->mtu = mtu;
920 	return 0;
921 
922 err_port_mtu_set:
923 	mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu);
924 err_span_port_mtu_update:
925 	mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
926 	return err;
927 }
928 
929 static int
930 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
931 			     struct rtnl_link_stats64 *stats)
932 {
933 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
934 	struct mlxsw_sp_port_pcpu_stats *p;
935 	u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
936 	u32 tx_dropped = 0;
937 	unsigned int start;
938 	int i;
939 
940 	for_each_possible_cpu(i) {
941 		p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
942 		do {
943 			start = u64_stats_fetch_begin_irq(&p->syncp);
944 			rx_packets	= p->rx_packets;
945 			rx_bytes	= p->rx_bytes;
946 			tx_packets	= p->tx_packets;
947 			tx_bytes	= p->tx_bytes;
948 		} while (u64_stats_fetch_retry_irq(&p->syncp, start));
949 
950 		stats->rx_packets	+= rx_packets;
951 		stats->rx_bytes		+= rx_bytes;
952 		stats->tx_packets	+= tx_packets;
953 		stats->tx_bytes		+= tx_bytes;
954 		/* tx_dropped is u32, updated without syncp protection. */
955 		tx_dropped	+= p->tx_dropped;
956 	}
957 	stats->tx_dropped	= tx_dropped;
958 	return 0;
959 }
960 
961 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
962 {
963 	switch (attr_id) {
964 	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
965 		return true;
966 	}
967 
968 	return false;
969 }
970 
971 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
972 					   void *sp)
973 {
974 	switch (attr_id) {
975 	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
976 		return mlxsw_sp_port_get_sw_stats64(dev, sp);
977 	}
978 
979 	return -EINVAL;
980 }
981 
982 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
983 				       int prio, char *ppcnt_pl)
984 {
985 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
986 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
987 
988 	mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
989 	return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
990 }
991 
992 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
993 				      struct rtnl_link_stats64 *stats)
994 {
995 	char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
996 	int err;
997 
998 	err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
999 					  0, ppcnt_pl);
1000 	if (err)
1001 		goto out;
1002 
1003 	stats->tx_packets =
1004 		mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
1005 	stats->rx_packets =
1006 		mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
1007 	stats->tx_bytes =
1008 		mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
1009 	stats->rx_bytes =
1010 		mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
1011 	stats->multicast =
1012 		mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
1013 
1014 	stats->rx_crc_errors =
1015 		mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
1016 	stats->rx_frame_errors =
1017 		mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
1018 
1019 	stats->rx_length_errors = (
1020 		mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
1021 		mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
1022 		mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
1023 
1024 	stats->rx_errors = (stats->rx_crc_errors +
1025 		stats->rx_frame_errors + stats->rx_length_errors);
1026 
1027 out:
1028 	return err;
1029 }
1030 
1031 static void
1032 mlxsw_sp_port_get_hw_xstats(struct net_device *dev,
1033 			    struct mlxsw_sp_port_xstats *xstats)
1034 {
1035 	char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1036 	int err, i;
1037 
1038 	err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0,
1039 					  ppcnt_pl);
1040 	if (!err)
1041 		xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl);
1042 
1043 	for (i = 0; i < TC_MAX_QUEUE; i++) {
1044 		err = mlxsw_sp_port_get_stats_raw(dev,
1045 						  MLXSW_REG_PPCNT_TC_CONG_TC,
1046 						  i, ppcnt_pl);
1047 		if (!err)
1048 			xstats->wred_drop[i] =
1049 				mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl);
1050 
1051 		err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT,
1052 						  i, ppcnt_pl);
1053 		if (err)
1054 			continue;
1055 
1056 		xstats->backlog[i] =
1057 			mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
1058 		xstats->tail_drop[i] =
1059 			mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl);
1060 	}
1061 
1062 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1063 		err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT,
1064 						  i, ppcnt_pl);
1065 		if (err)
1066 			continue;
1067 
1068 		xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl);
1069 		xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl);
1070 	}
1071 }
1072 
1073 static void update_stats_cache(struct work_struct *work)
1074 {
1075 	struct mlxsw_sp_port *mlxsw_sp_port =
1076 		container_of(work, struct mlxsw_sp_port,
1077 			     periodic_hw_stats.update_dw.work);
1078 
1079 	if (!netif_carrier_ok(mlxsw_sp_port->dev))
1080 		goto out;
1081 
1082 	mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
1083 				   &mlxsw_sp_port->periodic_hw_stats.stats);
1084 	mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev,
1085 				    &mlxsw_sp_port->periodic_hw_stats.xstats);
1086 
1087 out:
1088 	mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw,
1089 			       MLXSW_HW_STATS_UPDATE_TIME);
1090 }
1091 
1092 /* Return the stats from a cache that is updated periodically,
1093  * as this function might get called in an atomic context.
1094  */
1095 static void
1096 mlxsw_sp_port_get_stats64(struct net_device *dev,
1097 			  struct rtnl_link_stats64 *stats)
1098 {
1099 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1100 
1101 	memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats));
1102 }
1103 
1104 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
1105 				    u16 vid_begin, u16 vid_end,
1106 				    bool is_member, bool untagged)
1107 {
1108 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1109 	char *spvm_pl;
1110 	int err;
1111 
1112 	spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
1113 	if (!spvm_pl)
1114 		return -ENOMEM;
1115 
1116 	mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port,	vid_begin,
1117 			    vid_end, is_member, untagged);
1118 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
1119 	kfree(spvm_pl);
1120 	return err;
1121 }
1122 
1123 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
1124 			   u16 vid_end, bool is_member, bool untagged)
1125 {
1126 	u16 vid, vid_e;
1127 	int err;
1128 
1129 	for (vid = vid_begin; vid <= vid_end;
1130 	     vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
1131 		vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
1132 			    vid_end);
1133 
1134 		err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
1135 					       is_member, untagged);
1136 		if (err)
1137 			return err;
1138 	}
1139 
1140 	return 0;
1141 }
1142 
1143 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port,
1144 				     bool flush_default)
1145 {
1146 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
1147 
1148 	list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
1149 				 &mlxsw_sp_port->vlans_list, list) {
1150 		if (!flush_default &&
1151 		    mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID)
1152 			continue;
1153 		mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1154 	}
1155 }
1156 
1157 static void
1158 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1159 {
1160 	if (mlxsw_sp_port_vlan->bridge_port)
1161 		mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1162 	else if (mlxsw_sp_port_vlan->fid)
1163 		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
1164 }
1165 
1166 struct mlxsw_sp_port_vlan *
1167 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1168 {
1169 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1170 	bool untagged = vid == MLXSW_SP_DEFAULT_VID;
1171 	int err;
1172 
1173 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1174 	if (mlxsw_sp_port_vlan)
1175 		return ERR_PTR(-EEXIST);
1176 
1177 	err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
1178 	if (err)
1179 		return ERR_PTR(err);
1180 
1181 	mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
1182 	if (!mlxsw_sp_port_vlan) {
1183 		err = -ENOMEM;
1184 		goto err_port_vlan_alloc;
1185 	}
1186 
1187 	mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
1188 	mlxsw_sp_port_vlan->vid = vid;
1189 	list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
1190 
1191 	return mlxsw_sp_port_vlan;
1192 
1193 err_port_vlan_alloc:
1194 	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1195 	return ERR_PTR(err);
1196 }
1197 
1198 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1199 {
1200 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1201 	u16 vid = mlxsw_sp_port_vlan->vid;
1202 
1203 	mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan);
1204 	list_del(&mlxsw_sp_port_vlan->list);
1205 	kfree(mlxsw_sp_port_vlan);
1206 	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1207 }
1208 
1209 static int mlxsw_sp_port_add_vid(struct net_device *dev,
1210 				 __be16 __always_unused proto, u16 vid)
1211 {
1212 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1213 
1214 	/* VLAN 0 is added to HW filter when device goes up, but it is
1215 	 * reserved in our case, so simply return.
1216 	 */
1217 	if (!vid)
1218 		return 0;
1219 
1220 	return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid));
1221 }
1222 
1223 static int mlxsw_sp_port_kill_vid(struct net_device *dev,
1224 				  __be16 __always_unused proto, u16 vid)
1225 {
1226 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1227 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1228 
1229 	/* VLAN 0 is removed from HW filter when device goes down, but
1230 	 * it is reserved in our case, so simply return.
1231 	 */
1232 	if (!vid)
1233 		return 0;
1234 
1235 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1236 	if (!mlxsw_sp_port_vlan)
1237 		return 0;
1238 	mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1239 
1240 	return 0;
1241 }
1242 
1243 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
1244 					    size_t len)
1245 {
1246 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1247 
1248 	return mlxsw_core_port_get_phys_port_name(mlxsw_sp_port->mlxsw_sp->core,
1249 						  mlxsw_sp_port->local_port,
1250 						  name, len);
1251 }
1252 
1253 static struct mlxsw_sp_port_mall_tc_entry *
1254 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port,
1255 				 unsigned long cookie) {
1256 	struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1257 
1258 	list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list)
1259 		if (mall_tc_entry->cookie == cookie)
1260 			return mall_tc_entry;
1261 
1262 	return NULL;
1263 }
1264 
1265 static int
1266 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1267 				      struct mlxsw_sp_port_mall_mirror_tc_entry *mirror,
1268 				      const struct tc_action *a,
1269 				      bool ingress)
1270 {
1271 	enum mlxsw_sp_span_type span_type;
1272 	struct net_device *to_dev;
1273 
1274 	to_dev = tcf_mirred_dev(a);
1275 	if (!to_dev) {
1276 		netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
1277 		return -EINVAL;
1278 	}
1279 
1280 	mirror->ingress = ingress;
1281 	span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1282 	return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_dev, span_type,
1283 					true, &mirror->span_id);
1284 }
1285 
1286 static void
1287 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1288 				      struct mlxsw_sp_port_mall_mirror_tc_entry *mirror)
1289 {
1290 	enum mlxsw_sp_span_type span_type;
1291 
1292 	span_type = mirror->ingress ?
1293 			MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1294 	mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->span_id,
1295 				 span_type, true);
1296 }
1297 
1298 static int
1299 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
1300 				      struct tc_cls_matchall_offload *cls,
1301 				      const struct tc_action *a,
1302 				      bool ingress)
1303 {
1304 	int err;
1305 
1306 	if (!mlxsw_sp_port->sample)
1307 		return -EOPNOTSUPP;
1308 	if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) {
1309 		netdev_err(mlxsw_sp_port->dev, "sample already active\n");
1310 		return -EEXIST;
1311 	}
1312 	if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) {
1313 		netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n");
1314 		return -EOPNOTSUPP;
1315 	}
1316 
1317 	rcu_assign_pointer(mlxsw_sp_port->sample->psample_group,
1318 			   tcf_sample_psample_group(a));
1319 	mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a);
1320 	mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a);
1321 	mlxsw_sp_port->sample->rate = tcf_sample_rate(a);
1322 
1323 	err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a));
1324 	if (err)
1325 		goto err_port_sample_set;
1326 	return 0;
1327 
1328 err_port_sample_set:
1329 	RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1330 	return err;
1331 }
1332 
1333 static void
1334 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port)
1335 {
1336 	if (!mlxsw_sp_port->sample)
1337 		return;
1338 
1339 	mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1);
1340 	RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1341 }
1342 
1343 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1344 					  struct tc_cls_matchall_offload *f,
1345 					  bool ingress)
1346 {
1347 	struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1348 	__be16 protocol = f->common.protocol;
1349 	const struct tc_action *a;
1350 	int err;
1351 
1352 	if (!tcf_exts_has_one_action(f->exts)) {
1353 		netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
1354 		return -EOPNOTSUPP;
1355 	}
1356 
1357 	mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1358 	if (!mall_tc_entry)
1359 		return -ENOMEM;
1360 	mall_tc_entry->cookie = f->cookie;
1361 
1362 	a = tcf_exts_first_action(f->exts);
1363 
1364 	if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
1365 		struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
1366 
1367 		mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
1368 		mirror = &mall_tc_entry->mirror;
1369 		err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port,
1370 							    mirror, a, ingress);
1371 	} else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) {
1372 		mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE;
1373 		err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f,
1374 							    a, ingress);
1375 	} else {
1376 		err = -EOPNOTSUPP;
1377 	}
1378 
1379 	if (err)
1380 		goto err_add_action;
1381 
1382 	list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list);
1383 	return 0;
1384 
1385 err_add_action:
1386 	kfree(mall_tc_entry);
1387 	return err;
1388 }
1389 
1390 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1391 					   struct tc_cls_matchall_offload *f)
1392 {
1393 	struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1394 
1395 	mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port,
1396 							 f->cookie);
1397 	if (!mall_tc_entry) {
1398 		netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n");
1399 		return;
1400 	}
1401 	list_del(&mall_tc_entry->list);
1402 
1403 	switch (mall_tc_entry->type) {
1404 	case MLXSW_SP_PORT_MALL_MIRROR:
1405 		mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port,
1406 						      &mall_tc_entry->mirror);
1407 		break;
1408 	case MLXSW_SP_PORT_MALL_SAMPLE:
1409 		mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port);
1410 		break;
1411 	default:
1412 		WARN_ON(1);
1413 	}
1414 
1415 	kfree(mall_tc_entry);
1416 }
1417 
1418 static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1419 					  struct tc_cls_matchall_offload *f,
1420 					  bool ingress)
1421 {
1422 	switch (f->command) {
1423 	case TC_CLSMATCHALL_REPLACE:
1424 		return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f,
1425 						      ingress);
1426 	case TC_CLSMATCHALL_DESTROY:
1427 		mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f);
1428 		return 0;
1429 	default:
1430 		return -EOPNOTSUPP;
1431 	}
1432 }
1433 
1434 static int
1435 mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block,
1436 			     struct tc_cls_flower_offload *f)
1437 {
1438 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block);
1439 
1440 	switch (f->command) {
1441 	case TC_CLSFLOWER_REPLACE:
1442 		return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f);
1443 	case TC_CLSFLOWER_DESTROY:
1444 		mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f);
1445 		return 0;
1446 	case TC_CLSFLOWER_STATS:
1447 		return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f);
1448 	case TC_CLSFLOWER_TMPLT_CREATE:
1449 		return mlxsw_sp_flower_tmplt_create(mlxsw_sp, acl_block, f);
1450 	case TC_CLSFLOWER_TMPLT_DESTROY:
1451 		mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, acl_block, f);
1452 		return 0;
1453 	default:
1454 		return -EOPNOTSUPP;
1455 	}
1456 }
1457 
1458 static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type,
1459 					       void *type_data,
1460 					       void *cb_priv, bool ingress)
1461 {
1462 	struct mlxsw_sp_port *mlxsw_sp_port = cb_priv;
1463 
1464 	switch (type) {
1465 	case TC_SETUP_CLSMATCHALL:
1466 		if (!tc_cls_can_offload_and_chain0(mlxsw_sp_port->dev,
1467 						   type_data))
1468 			return -EOPNOTSUPP;
1469 
1470 		return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data,
1471 						      ingress);
1472 	case TC_SETUP_CLSFLOWER:
1473 		return 0;
1474 	default:
1475 		return -EOPNOTSUPP;
1476 	}
1477 }
1478 
1479 static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type,
1480 						  void *type_data,
1481 						  void *cb_priv)
1482 {
1483 	return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data,
1484 						   cb_priv, true);
1485 }
1486 
1487 static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type,
1488 						  void *type_data,
1489 						  void *cb_priv)
1490 {
1491 	return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data,
1492 						   cb_priv, false);
1493 }
1494 
1495 static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type,
1496 					     void *type_data, void *cb_priv)
1497 {
1498 	struct mlxsw_sp_acl_block *acl_block = cb_priv;
1499 
1500 	switch (type) {
1501 	case TC_SETUP_CLSMATCHALL:
1502 		return 0;
1503 	case TC_SETUP_CLSFLOWER:
1504 		if (mlxsw_sp_acl_block_disabled(acl_block))
1505 			return -EOPNOTSUPP;
1506 
1507 		return mlxsw_sp_setup_tc_cls_flower(acl_block, type_data);
1508 	default:
1509 		return -EOPNOTSUPP;
1510 	}
1511 }
1512 
1513 static int
1514 mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
1515 				    struct tcf_block *block, bool ingress,
1516 				    struct netlink_ext_ack *extack)
1517 {
1518 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1519 	struct mlxsw_sp_acl_block *acl_block;
1520 	struct tcf_block_cb *block_cb;
1521 	int err;
1522 
1523 	block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower,
1524 				       mlxsw_sp);
1525 	if (!block_cb) {
1526 		acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, block->net);
1527 		if (!acl_block)
1528 			return -ENOMEM;
1529 		block_cb = __tcf_block_cb_register(block,
1530 						   mlxsw_sp_setup_tc_block_cb_flower,
1531 						   mlxsw_sp, acl_block, extack);
1532 		if (IS_ERR(block_cb)) {
1533 			err = PTR_ERR(block_cb);
1534 			goto err_cb_register;
1535 		}
1536 	} else {
1537 		acl_block = tcf_block_cb_priv(block_cb);
1538 	}
1539 	tcf_block_cb_incref(block_cb);
1540 	err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block,
1541 				      mlxsw_sp_port, ingress);
1542 	if (err)
1543 		goto err_block_bind;
1544 
1545 	if (ingress)
1546 		mlxsw_sp_port->ing_acl_block = acl_block;
1547 	else
1548 		mlxsw_sp_port->eg_acl_block = acl_block;
1549 
1550 	return 0;
1551 
1552 err_block_bind:
1553 	if (!tcf_block_cb_decref(block_cb)) {
1554 		__tcf_block_cb_unregister(block, block_cb);
1555 err_cb_register:
1556 		mlxsw_sp_acl_block_destroy(acl_block);
1557 	}
1558 	return err;
1559 }
1560 
1561 static void
1562 mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
1563 				      struct tcf_block *block, bool ingress)
1564 {
1565 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1566 	struct mlxsw_sp_acl_block *acl_block;
1567 	struct tcf_block_cb *block_cb;
1568 	int err;
1569 
1570 	block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower,
1571 				       mlxsw_sp);
1572 	if (!block_cb)
1573 		return;
1574 
1575 	if (ingress)
1576 		mlxsw_sp_port->ing_acl_block = NULL;
1577 	else
1578 		mlxsw_sp_port->eg_acl_block = NULL;
1579 
1580 	acl_block = tcf_block_cb_priv(block_cb);
1581 	err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block,
1582 					mlxsw_sp_port, ingress);
1583 	if (!err && !tcf_block_cb_decref(block_cb)) {
1584 		__tcf_block_cb_unregister(block, block_cb);
1585 		mlxsw_sp_acl_block_destroy(acl_block);
1586 	}
1587 }
1588 
1589 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
1590 				   struct tc_block_offload *f)
1591 {
1592 	tc_setup_cb_t *cb;
1593 	bool ingress;
1594 	int err;
1595 
1596 	if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
1597 		cb = mlxsw_sp_setup_tc_block_cb_matchall_ig;
1598 		ingress = true;
1599 	} else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
1600 		cb = mlxsw_sp_setup_tc_block_cb_matchall_eg;
1601 		ingress = false;
1602 	} else {
1603 		return -EOPNOTSUPP;
1604 	}
1605 
1606 	switch (f->command) {
1607 	case TC_BLOCK_BIND:
1608 		err = tcf_block_cb_register(f->block, cb, mlxsw_sp_port,
1609 					    mlxsw_sp_port, f->extack);
1610 		if (err)
1611 			return err;
1612 		err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port,
1613 							  f->block, ingress,
1614 							  f->extack);
1615 		if (err) {
1616 			tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port);
1617 			return err;
1618 		}
1619 		return 0;
1620 	case TC_BLOCK_UNBIND:
1621 		mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port,
1622 						      f->block, ingress);
1623 		tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port);
1624 		return 0;
1625 	default:
1626 		return -EOPNOTSUPP;
1627 	}
1628 }
1629 
1630 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
1631 			     void *type_data)
1632 {
1633 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1634 
1635 	switch (type) {
1636 	case TC_SETUP_BLOCK:
1637 		return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data);
1638 	case TC_SETUP_QDISC_RED:
1639 		return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data);
1640 	case TC_SETUP_QDISC_PRIO:
1641 		return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data);
1642 	default:
1643 		return -EOPNOTSUPP;
1644 	}
1645 }
1646 
1647 
1648 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
1649 {
1650 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1651 
1652 	if (!enable) {
1653 		if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->ing_acl_block) ||
1654 		    mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->eg_acl_block) ||
1655 		    !list_empty(&mlxsw_sp_port->mall_tc_list)) {
1656 			netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
1657 			return -EINVAL;
1658 		}
1659 		mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->ing_acl_block);
1660 		mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->eg_acl_block);
1661 	} else {
1662 		mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->ing_acl_block);
1663 		mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->eg_acl_block);
1664 	}
1665 	return 0;
1666 }
1667 
1668 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable);
1669 
1670 static int mlxsw_sp_handle_feature(struct net_device *dev,
1671 				   netdev_features_t wanted_features,
1672 				   netdev_features_t feature,
1673 				   mlxsw_sp_feature_handler feature_handler)
1674 {
1675 	netdev_features_t changes = wanted_features ^ dev->features;
1676 	bool enable = !!(wanted_features & feature);
1677 	int err;
1678 
1679 	if (!(changes & feature))
1680 		return 0;
1681 
1682 	err = feature_handler(dev, enable);
1683 	if (err) {
1684 		netdev_err(dev, "%s feature %pNF failed, err %d\n",
1685 			   enable ? "Enable" : "Disable", &feature, err);
1686 		return err;
1687 	}
1688 
1689 	if (enable)
1690 		dev->features |= feature;
1691 	else
1692 		dev->features &= ~feature;
1693 
1694 	return 0;
1695 }
1696 static int mlxsw_sp_set_features(struct net_device *dev,
1697 				 netdev_features_t features)
1698 {
1699 	return mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC,
1700 				       mlxsw_sp_feature_hw_tc);
1701 }
1702 
1703 static int mlxsw_sp_port_get_port_parent_id(struct net_device *dev,
1704 					    struct netdev_phys_item_id *ppid)
1705 {
1706 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1707 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1708 
1709 	ppid->id_len = sizeof(mlxsw_sp->base_mac);
1710 	memcpy(&ppid->id, &mlxsw_sp->base_mac, ppid->id_len);
1711 
1712 	return 0;
1713 }
1714 
1715 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1716 	.ndo_open		= mlxsw_sp_port_open,
1717 	.ndo_stop		= mlxsw_sp_port_stop,
1718 	.ndo_start_xmit		= mlxsw_sp_port_xmit,
1719 	.ndo_setup_tc           = mlxsw_sp_setup_tc,
1720 	.ndo_set_rx_mode	= mlxsw_sp_set_rx_mode,
1721 	.ndo_set_mac_address	= mlxsw_sp_port_set_mac_address,
1722 	.ndo_change_mtu		= mlxsw_sp_port_change_mtu,
1723 	.ndo_get_stats64	= mlxsw_sp_port_get_stats64,
1724 	.ndo_has_offload_stats	= mlxsw_sp_port_has_offload_stats,
1725 	.ndo_get_offload_stats	= mlxsw_sp_port_get_offload_stats,
1726 	.ndo_vlan_rx_add_vid	= mlxsw_sp_port_add_vid,
1727 	.ndo_vlan_rx_kill_vid	= mlxsw_sp_port_kill_vid,
1728 	.ndo_get_phys_port_name	= mlxsw_sp_port_get_phys_port_name,
1729 	.ndo_set_features	= mlxsw_sp_set_features,
1730 	.ndo_get_port_parent_id	= mlxsw_sp_port_get_port_parent_id,
1731 };
1732 
1733 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
1734 				      struct ethtool_drvinfo *drvinfo)
1735 {
1736 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1737 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1738 
1739 	strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind,
1740 		sizeof(drvinfo->driver));
1741 	strlcpy(drvinfo->version, mlxsw_sp_driver_version,
1742 		sizeof(drvinfo->version));
1743 	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1744 		 "%d.%d.%d",
1745 		 mlxsw_sp->bus_info->fw_rev.major,
1746 		 mlxsw_sp->bus_info->fw_rev.minor,
1747 		 mlxsw_sp->bus_info->fw_rev.subminor);
1748 	strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
1749 		sizeof(drvinfo->bus_info));
1750 }
1751 
1752 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
1753 					 struct ethtool_pauseparam *pause)
1754 {
1755 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1756 
1757 	pause->rx_pause = mlxsw_sp_port->link.rx_pause;
1758 	pause->tx_pause = mlxsw_sp_port->link.tx_pause;
1759 }
1760 
1761 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
1762 				   struct ethtool_pauseparam *pause)
1763 {
1764 	char pfcc_pl[MLXSW_REG_PFCC_LEN];
1765 
1766 	mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
1767 	mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
1768 	mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
1769 
1770 	return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
1771 			       pfcc_pl);
1772 }
1773 
1774 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
1775 					struct ethtool_pauseparam *pause)
1776 {
1777 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1778 	bool pause_en = pause->tx_pause || pause->rx_pause;
1779 	int err;
1780 
1781 	if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
1782 		netdev_err(dev, "PFC already enabled on port\n");
1783 		return -EINVAL;
1784 	}
1785 
1786 	if (pause->autoneg) {
1787 		netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
1788 		return -EINVAL;
1789 	}
1790 
1791 	err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1792 	if (err) {
1793 		netdev_err(dev, "Failed to configure port's headroom\n");
1794 		return err;
1795 	}
1796 
1797 	err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
1798 	if (err) {
1799 		netdev_err(dev, "Failed to set PAUSE parameters\n");
1800 		goto err_port_pause_configure;
1801 	}
1802 
1803 	mlxsw_sp_port->link.rx_pause = pause->rx_pause;
1804 	mlxsw_sp_port->link.tx_pause = pause->tx_pause;
1805 
1806 	return 0;
1807 
1808 err_port_pause_configure:
1809 	pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1810 	mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1811 	return err;
1812 }
1813 
1814 struct mlxsw_sp_port_hw_stats {
1815 	char str[ETH_GSTRING_LEN];
1816 	u64 (*getter)(const char *payload);
1817 	bool cells_bytes;
1818 };
1819 
1820 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
1821 	{
1822 		.str = "a_frames_transmitted_ok",
1823 		.getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
1824 	},
1825 	{
1826 		.str = "a_frames_received_ok",
1827 		.getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
1828 	},
1829 	{
1830 		.str = "a_frame_check_sequence_errors",
1831 		.getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
1832 	},
1833 	{
1834 		.str = "a_alignment_errors",
1835 		.getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
1836 	},
1837 	{
1838 		.str = "a_octets_transmitted_ok",
1839 		.getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
1840 	},
1841 	{
1842 		.str = "a_octets_received_ok",
1843 		.getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
1844 	},
1845 	{
1846 		.str = "a_multicast_frames_xmitted_ok",
1847 		.getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
1848 	},
1849 	{
1850 		.str = "a_broadcast_frames_xmitted_ok",
1851 		.getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
1852 	},
1853 	{
1854 		.str = "a_multicast_frames_received_ok",
1855 		.getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
1856 	},
1857 	{
1858 		.str = "a_broadcast_frames_received_ok",
1859 		.getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
1860 	},
1861 	{
1862 		.str = "a_in_range_length_errors",
1863 		.getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
1864 	},
1865 	{
1866 		.str = "a_out_of_range_length_field",
1867 		.getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
1868 	},
1869 	{
1870 		.str = "a_frame_too_long_errors",
1871 		.getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
1872 	},
1873 	{
1874 		.str = "a_symbol_error_during_carrier",
1875 		.getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
1876 	},
1877 	{
1878 		.str = "a_mac_control_frames_transmitted",
1879 		.getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
1880 	},
1881 	{
1882 		.str = "a_mac_control_frames_received",
1883 		.getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
1884 	},
1885 	{
1886 		.str = "a_unsupported_opcodes_received",
1887 		.getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
1888 	},
1889 	{
1890 		.str = "a_pause_mac_ctrl_frames_received",
1891 		.getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
1892 	},
1893 	{
1894 		.str = "a_pause_mac_ctrl_frames_xmitted",
1895 		.getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
1896 	},
1897 };
1898 
1899 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1900 
1901 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2863_stats[] = {
1902 	{
1903 		.str = "if_in_discards",
1904 		.getter = mlxsw_reg_ppcnt_if_in_discards_get,
1905 	},
1906 	{
1907 		.str = "if_out_discards",
1908 		.getter = mlxsw_reg_ppcnt_if_out_discards_get,
1909 	},
1910 	{
1911 		.str = "if_out_errors",
1912 		.getter = mlxsw_reg_ppcnt_if_out_errors_get,
1913 	},
1914 };
1915 
1916 #define MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN \
1917 	ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2863_stats)
1918 
1919 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = {
1920 	{
1921 		.str = "ether_stats_undersize_pkts",
1922 		.getter = mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get,
1923 	},
1924 	{
1925 		.str = "ether_stats_oversize_pkts",
1926 		.getter = mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get,
1927 	},
1928 	{
1929 		.str = "ether_stats_fragments",
1930 		.getter = mlxsw_reg_ppcnt_ether_stats_fragments_get,
1931 	},
1932 	{
1933 		.str = "ether_pkts64octets",
1934 		.getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get,
1935 	},
1936 	{
1937 		.str = "ether_pkts65to127octets",
1938 		.getter = mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get,
1939 	},
1940 	{
1941 		.str = "ether_pkts128to255octets",
1942 		.getter = mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get,
1943 	},
1944 	{
1945 		.str = "ether_pkts256to511octets",
1946 		.getter = mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get,
1947 	},
1948 	{
1949 		.str = "ether_pkts512to1023octets",
1950 		.getter = mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get,
1951 	},
1952 	{
1953 		.str = "ether_pkts1024to1518octets",
1954 		.getter = mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get,
1955 	},
1956 	{
1957 		.str = "ether_pkts1519to2047octets",
1958 		.getter = mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get,
1959 	},
1960 	{
1961 		.str = "ether_pkts2048to4095octets",
1962 		.getter = mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get,
1963 	},
1964 	{
1965 		.str = "ether_pkts4096to8191octets",
1966 		.getter = mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get,
1967 	},
1968 	{
1969 		.str = "ether_pkts8192to10239octets",
1970 		.getter = mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get,
1971 	},
1972 };
1973 
1974 #define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \
1975 	ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats)
1976 
1977 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_3635_stats[] = {
1978 	{
1979 		.str = "dot3stats_fcs_errors",
1980 		.getter = mlxsw_reg_ppcnt_dot3stats_fcs_errors_get,
1981 	},
1982 	{
1983 		.str = "dot3stats_symbol_errors",
1984 		.getter = mlxsw_reg_ppcnt_dot3stats_symbol_errors_get,
1985 	},
1986 	{
1987 		.str = "dot3control_in_unknown_opcodes",
1988 		.getter = mlxsw_reg_ppcnt_dot3control_in_unknown_opcodes_get,
1989 	},
1990 	{
1991 		.str = "dot3in_pause_frames",
1992 		.getter = mlxsw_reg_ppcnt_dot3in_pause_frames_get,
1993 	},
1994 };
1995 
1996 #define MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN \
1997 	ARRAY_SIZE(mlxsw_sp_port_hw_rfc_3635_stats)
1998 
1999 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_discard_stats[] = {
2000 	{
2001 		.str = "discard_ingress_general",
2002 		.getter = mlxsw_reg_ppcnt_ingress_general_get,
2003 	},
2004 	{
2005 		.str = "discard_ingress_policy_engine",
2006 		.getter = mlxsw_reg_ppcnt_ingress_policy_engine_get,
2007 	},
2008 	{
2009 		.str = "discard_ingress_vlan_membership",
2010 		.getter = mlxsw_reg_ppcnt_ingress_vlan_membership_get,
2011 	},
2012 	{
2013 		.str = "discard_ingress_tag_frame_type",
2014 		.getter = mlxsw_reg_ppcnt_ingress_tag_frame_type_get,
2015 	},
2016 	{
2017 		.str = "discard_egress_vlan_membership",
2018 		.getter = mlxsw_reg_ppcnt_egress_vlan_membership_get,
2019 	},
2020 	{
2021 		.str = "discard_loopback_filter",
2022 		.getter = mlxsw_reg_ppcnt_loopback_filter_get,
2023 	},
2024 	{
2025 		.str = "discard_egress_general",
2026 		.getter = mlxsw_reg_ppcnt_egress_general_get,
2027 	},
2028 	{
2029 		.str = "discard_egress_hoq",
2030 		.getter = mlxsw_reg_ppcnt_egress_hoq_get,
2031 	},
2032 	{
2033 		.str = "discard_egress_policy_engine",
2034 		.getter = mlxsw_reg_ppcnt_egress_policy_engine_get,
2035 	},
2036 	{
2037 		.str = "discard_ingress_tx_link_down",
2038 		.getter = mlxsw_reg_ppcnt_ingress_tx_link_down_get,
2039 	},
2040 	{
2041 		.str = "discard_egress_stp_filter",
2042 		.getter = mlxsw_reg_ppcnt_egress_stp_filter_get,
2043 	},
2044 	{
2045 		.str = "discard_egress_sll",
2046 		.getter = mlxsw_reg_ppcnt_egress_sll_get,
2047 	},
2048 };
2049 
2050 #define MLXSW_SP_PORT_HW_DISCARD_STATS_LEN \
2051 	ARRAY_SIZE(mlxsw_sp_port_hw_discard_stats)
2052 
2053 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
2054 	{
2055 		.str = "rx_octets_prio",
2056 		.getter = mlxsw_reg_ppcnt_rx_octets_get,
2057 	},
2058 	{
2059 		.str = "rx_frames_prio",
2060 		.getter = mlxsw_reg_ppcnt_rx_frames_get,
2061 	},
2062 	{
2063 		.str = "tx_octets_prio",
2064 		.getter = mlxsw_reg_ppcnt_tx_octets_get,
2065 	},
2066 	{
2067 		.str = "tx_frames_prio",
2068 		.getter = mlxsw_reg_ppcnt_tx_frames_get,
2069 	},
2070 	{
2071 		.str = "rx_pause_prio",
2072 		.getter = mlxsw_reg_ppcnt_rx_pause_get,
2073 	},
2074 	{
2075 		.str = "rx_pause_duration_prio",
2076 		.getter = mlxsw_reg_ppcnt_rx_pause_duration_get,
2077 	},
2078 	{
2079 		.str = "tx_pause_prio",
2080 		.getter = mlxsw_reg_ppcnt_tx_pause_get,
2081 	},
2082 	{
2083 		.str = "tx_pause_duration_prio",
2084 		.getter = mlxsw_reg_ppcnt_tx_pause_duration_get,
2085 	},
2086 };
2087 
2088 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
2089 
2090 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
2091 	{
2092 		.str = "tc_transmit_queue_tc",
2093 		.getter = mlxsw_reg_ppcnt_tc_transmit_queue_get,
2094 		.cells_bytes = true,
2095 	},
2096 	{
2097 		.str = "tc_no_buffer_discard_uc_tc",
2098 		.getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get,
2099 	},
2100 };
2101 
2102 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
2103 
2104 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
2105 					 MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN + \
2106 					 MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \
2107 					 MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN + \
2108 					 MLXSW_SP_PORT_HW_DISCARD_STATS_LEN + \
2109 					 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \
2110 					  IEEE_8021QAZ_MAX_TCS) + \
2111 					 (MLXSW_SP_PORT_HW_TC_STATS_LEN * \
2112 					  TC_MAX_QUEUE))
2113 
2114 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
2115 {
2116 	int i;
2117 
2118 	for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
2119 		snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
2120 			 mlxsw_sp_port_hw_prio_stats[i].str, prio);
2121 		*p += ETH_GSTRING_LEN;
2122 	}
2123 }
2124 
2125 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
2126 {
2127 	int i;
2128 
2129 	for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
2130 		snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
2131 			 mlxsw_sp_port_hw_tc_stats[i].str, tc);
2132 		*p += ETH_GSTRING_LEN;
2133 	}
2134 }
2135 
2136 static void mlxsw_sp_port_get_strings(struct net_device *dev,
2137 				      u32 stringset, u8 *data)
2138 {
2139 	u8 *p = data;
2140 	int i;
2141 
2142 	switch (stringset) {
2143 	case ETH_SS_STATS:
2144 		for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
2145 			memcpy(p, mlxsw_sp_port_hw_stats[i].str,
2146 			       ETH_GSTRING_LEN);
2147 			p += ETH_GSTRING_LEN;
2148 		}
2149 
2150 		for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; i++) {
2151 			memcpy(p, mlxsw_sp_port_hw_rfc_2863_stats[i].str,
2152 			       ETH_GSTRING_LEN);
2153 			p += ETH_GSTRING_LEN;
2154 		}
2155 
2156 		for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) {
2157 			memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str,
2158 			       ETH_GSTRING_LEN);
2159 			p += ETH_GSTRING_LEN;
2160 		}
2161 
2162 		for (i = 0; i < MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; i++) {
2163 			memcpy(p, mlxsw_sp_port_hw_rfc_3635_stats[i].str,
2164 			       ETH_GSTRING_LEN);
2165 			p += ETH_GSTRING_LEN;
2166 		}
2167 
2168 		for (i = 0; i < MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; i++) {
2169 			memcpy(p, mlxsw_sp_port_hw_discard_stats[i].str,
2170 			       ETH_GSTRING_LEN);
2171 			p += ETH_GSTRING_LEN;
2172 		}
2173 
2174 		for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
2175 			mlxsw_sp_port_get_prio_strings(&p, i);
2176 
2177 		for (i = 0; i < TC_MAX_QUEUE; i++)
2178 			mlxsw_sp_port_get_tc_strings(&p, i);
2179 
2180 		break;
2181 	}
2182 }
2183 
2184 static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
2185 				     enum ethtool_phys_id_state state)
2186 {
2187 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2188 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2189 	char mlcr_pl[MLXSW_REG_MLCR_LEN];
2190 	bool active;
2191 
2192 	switch (state) {
2193 	case ETHTOOL_ID_ACTIVE:
2194 		active = true;
2195 		break;
2196 	case ETHTOOL_ID_INACTIVE:
2197 		active = false;
2198 		break;
2199 	default:
2200 		return -EOPNOTSUPP;
2201 	}
2202 
2203 	mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
2204 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
2205 }
2206 
2207 static int
2208 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
2209 			       int *p_len, enum mlxsw_reg_ppcnt_grp grp)
2210 {
2211 	switch (grp) {
2212 	case MLXSW_REG_PPCNT_IEEE_8023_CNT:
2213 		*p_hw_stats = mlxsw_sp_port_hw_stats;
2214 		*p_len = MLXSW_SP_PORT_HW_STATS_LEN;
2215 		break;
2216 	case MLXSW_REG_PPCNT_RFC_2863_CNT:
2217 		*p_hw_stats = mlxsw_sp_port_hw_rfc_2863_stats;
2218 		*p_len = MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN;
2219 		break;
2220 	case MLXSW_REG_PPCNT_RFC_2819_CNT:
2221 		*p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats;
2222 		*p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN;
2223 		break;
2224 	case MLXSW_REG_PPCNT_RFC_3635_CNT:
2225 		*p_hw_stats = mlxsw_sp_port_hw_rfc_3635_stats;
2226 		*p_len = MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN;
2227 		break;
2228 	case MLXSW_REG_PPCNT_DISCARD_CNT:
2229 		*p_hw_stats = mlxsw_sp_port_hw_discard_stats;
2230 		*p_len = MLXSW_SP_PORT_HW_DISCARD_STATS_LEN;
2231 		break;
2232 	case MLXSW_REG_PPCNT_PRIO_CNT:
2233 		*p_hw_stats = mlxsw_sp_port_hw_prio_stats;
2234 		*p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
2235 		break;
2236 	case MLXSW_REG_PPCNT_TC_CNT:
2237 		*p_hw_stats = mlxsw_sp_port_hw_tc_stats;
2238 		*p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN;
2239 		break;
2240 	default:
2241 		WARN_ON(1);
2242 		return -EOPNOTSUPP;
2243 	}
2244 	return 0;
2245 }
2246 
2247 static void __mlxsw_sp_port_get_stats(struct net_device *dev,
2248 				      enum mlxsw_reg_ppcnt_grp grp, int prio,
2249 				      u64 *data, int data_index)
2250 {
2251 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2252 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2253 	struct mlxsw_sp_port_hw_stats *hw_stats;
2254 	char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
2255 	int i, len;
2256 	int err;
2257 
2258 	err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
2259 	if (err)
2260 		return;
2261 	mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
2262 	for (i = 0; i < len; i++) {
2263 		data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
2264 		if (!hw_stats[i].cells_bytes)
2265 			continue;
2266 		data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp,
2267 							    data[data_index + i]);
2268 	}
2269 }
2270 
2271 static void mlxsw_sp_port_get_stats(struct net_device *dev,
2272 				    struct ethtool_stats *stats, u64 *data)
2273 {
2274 	int i, data_index = 0;
2275 
2276 	/* IEEE 802.3 Counters */
2277 	__mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0,
2278 				  data, data_index);
2279 	data_index = MLXSW_SP_PORT_HW_STATS_LEN;
2280 
2281 	/* RFC 2863 Counters */
2282 	__mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2863_CNT, 0,
2283 				  data, data_index);
2284 	data_index += MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN;
2285 
2286 	/* RFC 2819 Counters */
2287 	__mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0,
2288 				  data, data_index);
2289 	data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN;
2290 
2291 	/* RFC 3635 Counters */
2292 	__mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_3635_CNT, 0,
2293 				  data, data_index);
2294 	data_index += MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN;
2295 
2296 	/* Discard Counters */
2297 	__mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_DISCARD_CNT, 0,
2298 				  data, data_index);
2299 	data_index += MLXSW_SP_PORT_HW_DISCARD_STATS_LEN;
2300 
2301 	/* Per-Priority Counters */
2302 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2303 		__mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
2304 					  data, data_index);
2305 		data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
2306 	}
2307 
2308 	/* Per-TC Counters */
2309 	for (i = 0; i < TC_MAX_QUEUE; i++) {
2310 		__mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i,
2311 					  data, data_index);
2312 		data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN;
2313 	}
2314 }
2315 
2316 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
2317 {
2318 	switch (sset) {
2319 	case ETH_SS_STATS:
2320 		return MLXSW_SP_PORT_ETHTOOL_STATS_LEN;
2321 	default:
2322 		return -EOPNOTSUPP;
2323 	}
2324 }
2325 
2326 struct mlxsw_sp_port_link_mode {
2327 	enum ethtool_link_mode_bit_indices mask_ethtool;
2328 	u32 mask;
2329 	u32 speed;
2330 };
2331 
2332 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
2333 	{
2334 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
2335 		.mask_ethtool	= ETHTOOL_LINK_MODE_100baseT_Full_BIT,
2336 		.speed		= SPEED_100,
2337 	},
2338 	{
2339 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_SGMII |
2340 				  MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
2341 		.mask_ethtool	= ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
2342 		.speed		= SPEED_1000,
2343 	},
2344 	{
2345 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
2346 		.mask_ethtool	= ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
2347 		.speed		= SPEED_10000,
2348 	},
2349 	{
2350 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
2351 				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
2352 		.mask_ethtool	= ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
2353 		.speed		= SPEED_10000,
2354 	},
2355 	{
2356 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2357 				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2358 				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2359 				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
2360 		.mask_ethtool	= ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
2361 		.speed		= SPEED_10000,
2362 	},
2363 	{
2364 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
2365 		.mask_ethtool	= ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
2366 		.speed		= SPEED_20000,
2367 	},
2368 	{
2369 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
2370 		.mask_ethtool	= ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
2371 		.speed		= SPEED_40000,
2372 	},
2373 	{
2374 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
2375 		.mask_ethtool	= ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
2376 		.speed		= SPEED_40000,
2377 	},
2378 	{
2379 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
2380 		.mask_ethtool	= ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
2381 		.speed		= SPEED_40000,
2382 	},
2383 	{
2384 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
2385 		.mask_ethtool	= ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
2386 		.speed		= SPEED_40000,
2387 	},
2388 	{
2389 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR,
2390 		.mask_ethtool	= ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
2391 		.speed		= SPEED_25000,
2392 	},
2393 	{
2394 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR,
2395 		.mask_ethtool	= ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
2396 		.speed		= SPEED_25000,
2397 	},
2398 	{
2399 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
2400 		.mask_ethtool	= ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
2401 		.speed		= SPEED_25000,
2402 	},
2403 	{
2404 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
2405 		.mask_ethtool	= ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
2406 		.speed		= SPEED_25000,
2407 	},
2408 	{
2409 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2,
2410 		.mask_ethtool	= ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
2411 		.speed		= SPEED_50000,
2412 	},
2413 	{
2414 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
2415 		.mask_ethtool	= ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
2416 		.speed		= SPEED_50000,
2417 	},
2418 	{
2419 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2,
2420 		.mask_ethtool	= ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
2421 		.speed		= SPEED_50000,
2422 	},
2423 	{
2424 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2425 		.mask_ethtool	= ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
2426 		.speed		= SPEED_56000,
2427 	},
2428 	{
2429 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2430 		.mask_ethtool	= ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT,
2431 		.speed		= SPEED_56000,
2432 	},
2433 	{
2434 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2435 		.mask_ethtool	= ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT,
2436 		.speed		= SPEED_56000,
2437 	},
2438 	{
2439 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2440 		.mask_ethtool	= ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
2441 		.speed		= SPEED_56000,
2442 	},
2443 	{
2444 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4,
2445 		.mask_ethtool	= ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
2446 		.speed		= SPEED_100000,
2447 	},
2448 	{
2449 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4,
2450 		.mask_ethtool	= ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
2451 		.speed		= SPEED_100000,
2452 	},
2453 	{
2454 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4,
2455 		.mask_ethtool	= ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
2456 		.speed		= SPEED_100000,
2457 	},
2458 	{
2459 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
2460 		.mask_ethtool	= ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
2461 		.speed		= SPEED_100000,
2462 	},
2463 };
2464 
2465 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
2466 
2467 static void
2468 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto,
2469 				  struct ethtool_link_ksettings *cmd)
2470 {
2471 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2472 			      MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2473 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
2474 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
2475 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
2476 			      MLXSW_REG_PTYS_ETH_SPEED_SGMII))
2477 		ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
2478 
2479 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2480 			      MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
2481 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
2482 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
2483 			      MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
2484 		ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
2485 }
2486 
2487 static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode)
2488 {
2489 	int i;
2490 
2491 	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2492 		if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
2493 			__set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
2494 				  mode);
2495 	}
2496 }
2497 
2498 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
2499 					    struct ethtool_link_ksettings *cmd)
2500 {
2501 	u32 speed = SPEED_UNKNOWN;
2502 	u8 duplex = DUPLEX_UNKNOWN;
2503 	int i;
2504 
2505 	if (!carrier_ok)
2506 		goto out;
2507 
2508 	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2509 		if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
2510 			speed = mlxsw_sp_port_link_mode[i].speed;
2511 			duplex = DUPLEX_FULL;
2512 			break;
2513 		}
2514 	}
2515 out:
2516 	cmd->base.speed = speed;
2517 	cmd->base.duplex = duplex;
2518 }
2519 
2520 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
2521 {
2522 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2523 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
2524 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
2525 			      MLXSW_REG_PTYS_ETH_SPEED_SGMII))
2526 		return PORT_FIBRE;
2527 
2528 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2529 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
2530 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
2531 		return PORT_DA;
2532 
2533 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2534 			      MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
2535 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
2536 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
2537 		return PORT_NONE;
2538 
2539 	return PORT_OTHER;
2540 }
2541 
2542 static u32
2543 mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd)
2544 {
2545 	u32 ptys_proto = 0;
2546 	int i;
2547 
2548 	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2549 		if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
2550 			     cmd->link_modes.advertising))
2551 			ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2552 	}
2553 	return ptys_proto;
2554 }
2555 
2556 static u32 mlxsw_sp_to_ptys_speed(u32 speed)
2557 {
2558 	u32 ptys_proto = 0;
2559 	int i;
2560 
2561 	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2562 		if (speed == mlxsw_sp_port_link_mode[i].speed)
2563 			ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2564 	}
2565 	return ptys_proto;
2566 }
2567 
2568 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
2569 {
2570 	u32 ptys_proto = 0;
2571 	int i;
2572 
2573 	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2574 		if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
2575 			ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2576 	}
2577 	return ptys_proto;
2578 }
2579 
2580 static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap,
2581 					     struct ethtool_link_ksettings *cmd)
2582 {
2583 	ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
2584 	ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
2585 	ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
2586 
2587 	mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd);
2588 	mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported);
2589 }
2590 
2591 static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg,
2592 					     struct ethtool_link_ksettings *cmd)
2593 {
2594 	if (!autoneg)
2595 		return;
2596 
2597 	ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
2598 	mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising);
2599 }
2600 
2601 static void
2602 mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status,
2603 				    struct ethtool_link_ksettings *cmd)
2604 {
2605 	if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp)
2606 		return;
2607 
2608 	ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg);
2609 	mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising);
2610 }
2611 
2612 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
2613 					    struct ethtool_link_ksettings *cmd)
2614 {
2615 	u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp;
2616 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2617 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2618 	char ptys_pl[MLXSW_REG_PTYS_LEN];
2619 	u8 autoneg_status;
2620 	bool autoneg;
2621 	int err;
2622 
2623 	autoneg = mlxsw_sp_port->link.autoneg;
2624 	mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0, false);
2625 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2626 	if (err)
2627 		return err;
2628 	mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin,
2629 				  &eth_proto_oper);
2630 
2631 	mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd);
2632 
2633 	mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd);
2634 
2635 	eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl);
2636 	autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl);
2637 	mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd);
2638 
2639 	cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
2640 	cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper);
2641 	mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper,
2642 					cmd);
2643 
2644 	return 0;
2645 }
2646 
2647 static int
2648 mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
2649 				 const struct ethtool_link_ksettings *cmd)
2650 {
2651 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2652 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2653 	char ptys_pl[MLXSW_REG_PTYS_LEN];
2654 	u32 eth_proto_cap, eth_proto_new;
2655 	bool autoneg;
2656 	int err;
2657 
2658 	mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0, false);
2659 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2660 	if (err)
2661 		return err;
2662 	mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, NULL, NULL);
2663 
2664 	autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
2665 	eth_proto_new = autoneg ?
2666 		mlxsw_sp_to_ptys_advert_link(cmd) :
2667 		mlxsw_sp_to_ptys_speed(cmd->base.speed);
2668 
2669 	eth_proto_new = eth_proto_new & eth_proto_cap;
2670 	if (!eth_proto_new) {
2671 		netdev_err(dev, "No supported speed requested\n");
2672 		return -EINVAL;
2673 	}
2674 
2675 	mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2676 				eth_proto_new, autoneg);
2677 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2678 	if (err)
2679 		return err;
2680 
2681 	if (!netif_running(dev))
2682 		return 0;
2683 
2684 	mlxsw_sp_port->link.autoneg = autoneg;
2685 
2686 	mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2687 	mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
2688 
2689 	return 0;
2690 }
2691 
2692 static int mlxsw_sp_flash_device(struct net_device *dev,
2693 				 struct ethtool_flash *flash)
2694 {
2695 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2696 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2697 	const struct firmware *firmware;
2698 	int err;
2699 
2700 	if (flash->region != ETHTOOL_FLASH_ALL_REGIONS)
2701 		return -EOPNOTSUPP;
2702 
2703 	dev_hold(dev);
2704 	rtnl_unlock();
2705 
2706 	err = request_firmware_direct(&firmware, flash->data, &dev->dev);
2707 	if (err)
2708 		goto out;
2709 	err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware);
2710 	release_firmware(firmware);
2711 out:
2712 	rtnl_lock();
2713 	dev_put(dev);
2714 	return err;
2715 }
2716 
2717 #define MLXSW_SP_I2C_ADDR_LOW 0x50
2718 #define MLXSW_SP_I2C_ADDR_HIGH 0x51
2719 #define MLXSW_SP_EEPROM_PAGE_LENGTH 256
2720 
2721 static int mlxsw_sp_query_module_eeprom(struct mlxsw_sp_port *mlxsw_sp_port,
2722 					u16 offset, u16 size, void *data,
2723 					unsigned int *p_read_size)
2724 {
2725 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2726 	char eeprom_tmp[MLXSW_REG_MCIA_EEPROM_SIZE];
2727 	char mcia_pl[MLXSW_REG_MCIA_LEN];
2728 	u16 i2c_addr;
2729 	int status;
2730 	int err;
2731 
2732 	size = min_t(u16, size, MLXSW_REG_MCIA_EEPROM_SIZE);
2733 
2734 	if (offset < MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH &&
2735 	    offset + size > MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH)
2736 		/* Cross pages read, read until offset 256 in low page */
2737 		size = MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH - offset;
2738 
2739 	i2c_addr = MLXSW_REG_MCIA_I2C_ADDR_LOW;
2740 	if (offset >= MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH) {
2741 		i2c_addr = MLXSW_REG_MCIA_I2C_ADDR_HIGH;
2742 		offset -= MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH;
2743 	}
2744 
2745 	mlxsw_reg_mcia_pack(mcia_pl, mlxsw_sp_port->mapping.module,
2746 			    0, 0, offset, size, i2c_addr);
2747 
2748 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcia), mcia_pl);
2749 	if (err)
2750 		return err;
2751 
2752 	status = mlxsw_reg_mcia_status_get(mcia_pl);
2753 	if (status)
2754 		return -EIO;
2755 
2756 	mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp);
2757 	memcpy(data, eeprom_tmp, size);
2758 	*p_read_size = size;
2759 
2760 	return 0;
2761 }
2762 
2763 static int mlxsw_sp_get_module_info(struct net_device *netdev,
2764 				    struct ethtool_modinfo *modinfo)
2765 {
2766 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
2767 	u8 module_info[MLXSW_REG_MCIA_EEPROM_MODULE_INFO_SIZE];
2768 	u16 offset = MLXSW_REG_MCIA_EEPROM_MODULE_INFO_SIZE;
2769 	u8 module_rev_id, module_id;
2770 	unsigned int read_size;
2771 	int err;
2772 
2773 	err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, 0, offset,
2774 					   module_info, &read_size);
2775 	if (err)
2776 		return err;
2777 
2778 	if (read_size < offset)
2779 		return -EIO;
2780 
2781 	module_rev_id = module_info[MLXSW_REG_MCIA_EEPROM_MODULE_INFO_REV_ID];
2782 	module_id = module_info[MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID];
2783 
2784 	switch (module_id) {
2785 	case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP:
2786 		modinfo->type       = ETH_MODULE_SFF_8436;
2787 		modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
2788 		break;
2789 	case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_PLUS: /* fall-through */
2790 	case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP28:
2791 		if (module_id == MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP28 ||
2792 		    module_rev_id >=
2793 		    MLXSW_REG_MCIA_EEPROM_MODULE_INFO_REV_ID_8636) {
2794 			modinfo->type       = ETH_MODULE_SFF_8636;
2795 			modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
2796 		} else {
2797 			modinfo->type       = ETH_MODULE_SFF_8436;
2798 			modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
2799 		}
2800 		break;
2801 	case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_SFP:
2802 		modinfo->type       = ETH_MODULE_SFF_8472;
2803 		modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
2804 		break;
2805 	default:
2806 		return -EINVAL;
2807 	}
2808 
2809 	return 0;
2810 }
2811 
2812 static int mlxsw_sp_get_module_eeprom(struct net_device *netdev,
2813 				      struct ethtool_eeprom *ee,
2814 				      u8 *data)
2815 {
2816 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
2817 	int offset = ee->offset;
2818 	unsigned int read_size;
2819 	int i = 0;
2820 	int err;
2821 
2822 	if (!ee->len)
2823 		return -EINVAL;
2824 
2825 	memset(data, 0, ee->len);
2826 
2827 	while (i < ee->len) {
2828 		err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, offset,
2829 						   ee->len - i, data + i,
2830 						   &read_size);
2831 		if (err) {
2832 			netdev_err(mlxsw_sp_port->dev, "Eeprom query failed\n");
2833 			return err;
2834 		}
2835 
2836 		i += read_size;
2837 		offset += read_size;
2838 	}
2839 
2840 	return 0;
2841 }
2842 
2843 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
2844 	.get_drvinfo		= mlxsw_sp_port_get_drvinfo,
2845 	.get_link		= ethtool_op_get_link,
2846 	.get_pauseparam		= mlxsw_sp_port_get_pauseparam,
2847 	.set_pauseparam		= mlxsw_sp_port_set_pauseparam,
2848 	.get_strings		= mlxsw_sp_port_get_strings,
2849 	.set_phys_id		= mlxsw_sp_port_set_phys_id,
2850 	.get_ethtool_stats	= mlxsw_sp_port_get_stats,
2851 	.get_sset_count		= mlxsw_sp_port_get_sset_count,
2852 	.get_link_ksettings	= mlxsw_sp_port_get_link_ksettings,
2853 	.set_link_ksettings	= mlxsw_sp_port_set_link_ksettings,
2854 	.flash_device		= mlxsw_sp_flash_device,
2855 	.get_module_info	= mlxsw_sp_get_module_info,
2856 	.get_module_eeprom	= mlxsw_sp_get_module_eeprom,
2857 };
2858 
2859 static int
2860 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
2861 {
2862 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2863 	u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
2864 	char ptys_pl[MLXSW_REG_PTYS_LEN];
2865 	u32 eth_proto_admin;
2866 
2867 	eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
2868 	mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2869 				eth_proto_admin, mlxsw_sp_port->link.autoneg);
2870 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2871 }
2872 
2873 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
2874 			  enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
2875 			  bool dwrr, u8 dwrr_weight)
2876 {
2877 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2878 	char qeec_pl[MLXSW_REG_QEEC_LEN];
2879 
2880 	mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2881 			    next_index);
2882 	mlxsw_reg_qeec_de_set(qeec_pl, true);
2883 	mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
2884 	mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
2885 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2886 }
2887 
2888 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
2889 				  enum mlxsw_reg_qeec_hr hr, u8 index,
2890 				  u8 next_index, u32 maxrate)
2891 {
2892 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2893 	char qeec_pl[MLXSW_REG_QEEC_LEN];
2894 
2895 	mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2896 			    next_index);
2897 	mlxsw_reg_qeec_mase_set(qeec_pl, true);
2898 	mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
2899 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2900 }
2901 
2902 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port,
2903 				    enum mlxsw_reg_qeec_hr hr, u8 index,
2904 				    u8 next_index, u32 minrate)
2905 {
2906 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2907 	char qeec_pl[MLXSW_REG_QEEC_LEN];
2908 
2909 	mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2910 			    next_index);
2911 	mlxsw_reg_qeec_mise_set(qeec_pl, true);
2912 	mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate);
2913 
2914 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2915 }
2916 
2917 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
2918 			      u8 switch_prio, u8 tclass)
2919 {
2920 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2921 	char qtct_pl[MLXSW_REG_QTCT_LEN];
2922 
2923 	mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
2924 			    tclass);
2925 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
2926 }
2927 
2928 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
2929 {
2930 	int err, i;
2931 
2932 	/* Setup the elements hierarcy, so that each TC is linked to
2933 	 * one subgroup, which are all member in the same group.
2934 	 */
2935 	err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2936 				    MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
2937 				    0);
2938 	if (err)
2939 		return err;
2940 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2941 		err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2942 					    MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
2943 					    0, false, 0);
2944 		if (err)
2945 			return err;
2946 	}
2947 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2948 		err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2949 					    MLXSW_REG_QEEC_HIERARCY_TC, i, i,
2950 					    false, 0);
2951 		if (err)
2952 			return err;
2953 
2954 		err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2955 					    MLXSW_REG_QEEC_HIERARCY_TC,
2956 					    i + 8, i,
2957 					    false, 0);
2958 		if (err)
2959 			return err;
2960 	}
2961 
2962 	/* Make sure the max shaper is disabled in all hierarchies that
2963 	 * support it.
2964 	 */
2965 	err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2966 					    MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
2967 					    MLXSW_REG_QEEC_MAS_DIS);
2968 	if (err)
2969 		return err;
2970 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2971 		err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2972 						    MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
2973 						    i, 0,
2974 						    MLXSW_REG_QEEC_MAS_DIS);
2975 		if (err)
2976 			return err;
2977 	}
2978 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2979 		err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2980 						    MLXSW_REG_QEEC_HIERARCY_TC,
2981 						    i, i,
2982 						    MLXSW_REG_QEEC_MAS_DIS);
2983 		if (err)
2984 			return err;
2985 
2986 		err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2987 						    MLXSW_REG_QEEC_HIERARCY_TC,
2988 						    i + 8, i,
2989 						    MLXSW_REG_QEEC_MAS_DIS);
2990 		if (err)
2991 			return err;
2992 	}
2993 
2994 	/* Configure the min shaper for multicast TCs. */
2995 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2996 		err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port,
2997 					       MLXSW_REG_QEEC_HIERARCY_TC,
2998 					       i + 8, i,
2999 					       MLXSW_REG_QEEC_MIS_MIN);
3000 		if (err)
3001 			return err;
3002 	}
3003 
3004 	/* Map all priorities to traffic class 0. */
3005 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
3006 		err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
3007 		if (err)
3008 			return err;
3009 	}
3010 
3011 	return 0;
3012 }
3013 
3014 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
3015 					bool enable)
3016 {
3017 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3018 	char qtctm_pl[MLXSW_REG_QTCTM_LEN];
3019 
3020 	mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable);
3021 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl);
3022 }
3023 
3024 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
3025 				bool split, u8 module, u8 width, u8 lane)
3026 {
3027 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
3028 	struct mlxsw_sp_port *mlxsw_sp_port;
3029 	struct net_device *dev;
3030 	int err;
3031 
3032 	err = mlxsw_core_port_init(mlxsw_sp->core, local_port);
3033 	if (err) {
3034 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
3035 			local_port);
3036 		return err;
3037 	}
3038 
3039 	dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
3040 	if (!dev) {
3041 		err = -ENOMEM;
3042 		goto err_alloc_etherdev;
3043 	}
3044 	SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
3045 	mlxsw_sp_port = netdev_priv(dev);
3046 	mlxsw_sp_port->dev = dev;
3047 	mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
3048 	mlxsw_sp_port->local_port = local_port;
3049 	mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID;
3050 	mlxsw_sp_port->split = split;
3051 	mlxsw_sp_port->mapping.module = module;
3052 	mlxsw_sp_port->mapping.width = width;
3053 	mlxsw_sp_port->mapping.lane = lane;
3054 	mlxsw_sp_port->link.autoneg = 1;
3055 	INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
3056 	INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list);
3057 
3058 	mlxsw_sp_port->pcpu_stats =
3059 		netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
3060 	if (!mlxsw_sp_port->pcpu_stats) {
3061 		err = -ENOMEM;
3062 		goto err_alloc_stats;
3063 	}
3064 
3065 	mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample),
3066 					GFP_KERNEL);
3067 	if (!mlxsw_sp_port->sample) {
3068 		err = -ENOMEM;
3069 		goto err_alloc_sample;
3070 	}
3071 
3072 	INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw,
3073 			  &update_stats_cache);
3074 
3075 	dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
3076 	dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
3077 
3078 	err = mlxsw_sp_port_module_map(mlxsw_sp_port, module, width, lane);
3079 	if (err) {
3080 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
3081 			mlxsw_sp_port->local_port);
3082 		goto err_port_module_map;
3083 	}
3084 
3085 	err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
3086 	if (err) {
3087 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
3088 			mlxsw_sp_port->local_port);
3089 		goto err_port_swid_set;
3090 	}
3091 
3092 	err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
3093 	if (err) {
3094 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
3095 			mlxsw_sp_port->local_port);
3096 		goto err_dev_addr_init;
3097 	}
3098 
3099 	netif_carrier_off(dev);
3100 
3101 	dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
3102 			 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
3103 	dev->hw_features |= NETIF_F_HW_TC;
3104 
3105 	dev->min_mtu = 0;
3106 	dev->max_mtu = ETH_MAX_MTU;
3107 
3108 	/* Each packet needs to have a Tx header (metadata) on top all other
3109 	 * headers.
3110 	 */
3111 	dev->needed_headroom = MLXSW_TXHDR_LEN;
3112 
3113 	err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
3114 	if (err) {
3115 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
3116 			mlxsw_sp_port->local_port);
3117 		goto err_port_system_port_mapping_set;
3118 	}
3119 
3120 	err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
3121 	if (err) {
3122 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
3123 			mlxsw_sp_port->local_port);
3124 		goto err_port_speed_by_width_set;
3125 	}
3126 
3127 	err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
3128 	if (err) {
3129 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
3130 			mlxsw_sp_port->local_port);
3131 		goto err_port_mtu_set;
3132 	}
3133 
3134 	err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
3135 	if (err)
3136 		goto err_port_admin_status_set;
3137 
3138 	err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
3139 	if (err) {
3140 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
3141 			mlxsw_sp_port->local_port);
3142 		goto err_port_buffers_init;
3143 	}
3144 
3145 	err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
3146 	if (err) {
3147 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
3148 			mlxsw_sp_port->local_port);
3149 		goto err_port_ets_init;
3150 	}
3151 
3152 	err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true);
3153 	if (err) {
3154 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n",
3155 			mlxsw_sp_port->local_port);
3156 		goto err_port_tc_mc_mode;
3157 	}
3158 
3159 	/* ETS and buffers must be initialized before DCB. */
3160 	err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
3161 	if (err) {
3162 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
3163 			mlxsw_sp_port->local_port);
3164 		goto err_port_dcb_init;
3165 	}
3166 
3167 	err = mlxsw_sp_port_fids_init(mlxsw_sp_port);
3168 	if (err) {
3169 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n",
3170 			mlxsw_sp_port->local_port);
3171 		goto err_port_fids_init;
3172 	}
3173 
3174 	err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port);
3175 	if (err) {
3176 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n",
3177 			mlxsw_sp_port->local_port);
3178 		goto err_port_qdiscs_init;
3179 	}
3180 
3181 	err = mlxsw_sp_port_nve_init(mlxsw_sp_port);
3182 	if (err) {
3183 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n",
3184 			mlxsw_sp_port->local_port);
3185 		goto err_port_nve_init;
3186 	}
3187 
3188 	err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID);
3189 	if (err) {
3190 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n",
3191 			mlxsw_sp_port->local_port);
3192 		goto err_port_pvid_set;
3193 	}
3194 
3195 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
3196 						       MLXSW_SP_DEFAULT_VID);
3197 	if (IS_ERR(mlxsw_sp_port_vlan)) {
3198 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
3199 			mlxsw_sp_port->local_port);
3200 		err = PTR_ERR(mlxsw_sp_port_vlan);
3201 		goto err_port_vlan_create;
3202 	}
3203 	mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan;
3204 
3205 	mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
3206 	mlxsw_sp->ports[local_port] = mlxsw_sp_port;
3207 	err = register_netdev(dev);
3208 	if (err) {
3209 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
3210 			mlxsw_sp_port->local_port);
3211 		goto err_register_netdev;
3212 	}
3213 
3214 	mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
3215 				mlxsw_sp_port, dev, module + 1,
3216 				mlxsw_sp_port->split, lane / width);
3217 	mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0);
3218 	return 0;
3219 
3220 err_register_netdev:
3221 	mlxsw_sp->ports[local_port] = NULL;
3222 	mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
3223 	mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
3224 err_port_vlan_create:
3225 err_port_pvid_set:
3226 	mlxsw_sp_port_nve_fini(mlxsw_sp_port);
3227 err_port_nve_init:
3228 	mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
3229 err_port_qdiscs_init:
3230 	mlxsw_sp_port_fids_fini(mlxsw_sp_port);
3231 err_port_fids_init:
3232 	mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
3233 err_port_dcb_init:
3234 	mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
3235 err_port_tc_mc_mode:
3236 err_port_ets_init:
3237 err_port_buffers_init:
3238 err_port_admin_status_set:
3239 err_port_mtu_set:
3240 err_port_speed_by_width_set:
3241 err_port_system_port_mapping_set:
3242 err_dev_addr_init:
3243 	mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
3244 err_port_swid_set:
3245 	mlxsw_sp_port_module_unmap(mlxsw_sp_port);
3246 err_port_module_map:
3247 	kfree(mlxsw_sp_port->sample);
3248 err_alloc_sample:
3249 	free_percpu(mlxsw_sp_port->pcpu_stats);
3250 err_alloc_stats:
3251 	free_netdev(dev);
3252 err_alloc_etherdev:
3253 	mlxsw_core_port_fini(mlxsw_sp->core, local_port);
3254 	return err;
3255 }
3256 
3257 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
3258 {
3259 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
3260 
3261 	cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
3262 	mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
3263 	unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
3264 	mlxsw_sp->ports[local_port] = NULL;
3265 	mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
3266 	mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true);
3267 	mlxsw_sp_port_nve_fini(mlxsw_sp_port);
3268 	mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
3269 	mlxsw_sp_port_fids_fini(mlxsw_sp_port);
3270 	mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
3271 	mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
3272 	mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
3273 	mlxsw_sp_port_module_unmap(mlxsw_sp_port);
3274 	kfree(mlxsw_sp_port->sample);
3275 	free_percpu(mlxsw_sp_port->pcpu_stats);
3276 	WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
3277 	free_netdev(mlxsw_sp_port->dev);
3278 	mlxsw_core_port_fini(mlxsw_sp->core, local_port);
3279 }
3280 
3281 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
3282 {
3283 	return mlxsw_sp->ports[local_port] != NULL;
3284 }
3285 
3286 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
3287 {
3288 	int i;
3289 
3290 	for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
3291 		if (mlxsw_sp_port_created(mlxsw_sp, i))
3292 			mlxsw_sp_port_remove(mlxsw_sp, i);
3293 	kfree(mlxsw_sp->port_to_module);
3294 	kfree(mlxsw_sp->ports);
3295 }
3296 
3297 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
3298 {
3299 	unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
3300 	u8 module, width, lane;
3301 	size_t alloc_size;
3302 	int i;
3303 	int err;
3304 
3305 	alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
3306 	mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
3307 	if (!mlxsw_sp->ports)
3308 		return -ENOMEM;
3309 
3310 	mlxsw_sp->port_to_module = kmalloc_array(max_ports, sizeof(int),
3311 						 GFP_KERNEL);
3312 	if (!mlxsw_sp->port_to_module) {
3313 		err = -ENOMEM;
3314 		goto err_port_to_module_alloc;
3315 	}
3316 
3317 	for (i = 1; i < max_ports; i++) {
3318 		/* Mark as invalid */
3319 		mlxsw_sp->port_to_module[i] = -1;
3320 
3321 		err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
3322 						    &width, &lane);
3323 		if (err)
3324 			goto err_port_module_info_get;
3325 		if (!width)
3326 			continue;
3327 		mlxsw_sp->port_to_module[i] = module;
3328 		err = mlxsw_sp_port_create(mlxsw_sp, i, false,
3329 					   module, width, lane);
3330 		if (err)
3331 			goto err_port_create;
3332 	}
3333 	return 0;
3334 
3335 err_port_create:
3336 err_port_module_info_get:
3337 	for (i--; i >= 1; i--)
3338 		if (mlxsw_sp_port_created(mlxsw_sp, i))
3339 			mlxsw_sp_port_remove(mlxsw_sp, i);
3340 	kfree(mlxsw_sp->port_to_module);
3341 err_port_to_module_alloc:
3342 	kfree(mlxsw_sp->ports);
3343 	return err;
3344 }
3345 
3346 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
3347 {
3348 	u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
3349 
3350 	return local_port - offset;
3351 }
3352 
3353 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
3354 				      u8 module, unsigned int count)
3355 {
3356 	u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
3357 	int err, i;
3358 
3359 	for (i = 0; i < count; i++) {
3360 		err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
3361 					   module, width, i * width);
3362 		if (err)
3363 			goto err_port_create;
3364 	}
3365 
3366 	return 0;
3367 
3368 err_port_create:
3369 	for (i--; i >= 0; i--)
3370 		if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
3371 			mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
3372 	return err;
3373 }
3374 
3375 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
3376 					 u8 base_port, unsigned int count)
3377 {
3378 	u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
3379 	int i;
3380 
3381 	/* Split by four means we need to re-create two ports, otherwise
3382 	 * only one.
3383 	 */
3384 	count = count / 2;
3385 
3386 	for (i = 0; i < count; i++) {
3387 		local_port = base_port + i * 2;
3388 		if (mlxsw_sp->port_to_module[local_port] < 0)
3389 			continue;
3390 		module = mlxsw_sp->port_to_module[local_port];
3391 
3392 		mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
3393 				     width, 0);
3394 	}
3395 }
3396 
3397 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
3398 			       unsigned int count,
3399 			       struct netlink_ext_ack *extack)
3400 {
3401 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3402 	struct mlxsw_sp_port *mlxsw_sp_port;
3403 	u8 module, cur_width, base_port;
3404 	int i;
3405 	int err;
3406 
3407 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
3408 	if (!mlxsw_sp_port) {
3409 		dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
3410 			local_port);
3411 		NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
3412 		return -EINVAL;
3413 	}
3414 
3415 	module = mlxsw_sp_port->mapping.module;
3416 	cur_width = mlxsw_sp_port->mapping.width;
3417 
3418 	if (count != 2 && count != 4) {
3419 		netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
3420 		NL_SET_ERR_MSG_MOD(extack, "Port can only be split into 2 or 4 ports");
3421 		return -EINVAL;
3422 	}
3423 
3424 	if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
3425 		netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
3426 		NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further");
3427 		return -EINVAL;
3428 	}
3429 
3430 	/* Make sure we have enough slave (even) ports for the split. */
3431 	if (count == 2) {
3432 		base_port = local_port;
3433 		if (mlxsw_sp->ports[base_port + 1]) {
3434 			netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
3435 			NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration");
3436 			return -EINVAL;
3437 		}
3438 	} else {
3439 		base_port = mlxsw_sp_cluster_base_port_get(local_port);
3440 		if (mlxsw_sp->ports[base_port + 1] ||
3441 		    mlxsw_sp->ports[base_port + 3]) {
3442 			netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
3443 			NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration");
3444 			return -EINVAL;
3445 		}
3446 	}
3447 
3448 	for (i = 0; i < count; i++)
3449 		if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
3450 			mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
3451 
3452 	err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
3453 	if (err) {
3454 		dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
3455 		goto err_port_split_create;
3456 	}
3457 
3458 	return 0;
3459 
3460 err_port_split_create:
3461 	mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
3462 	return err;
3463 }
3464 
3465 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
3466 				 struct netlink_ext_ack *extack)
3467 {
3468 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3469 	struct mlxsw_sp_port *mlxsw_sp_port;
3470 	u8 cur_width, base_port;
3471 	unsigned int count;
3472 	int i;
3473 
3474 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
3475 	if (!mlxsw_sp_port) {
3476 		dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
3477 			local_port);
3478 		NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
3479 		return -EINVAL;
3480 	}
3481 
3482 	if (!mlxsw_sp_port->split) {
3483 		netdev_err(mlxsw_sp_port->dev, "Port was not split\n");
3484 		NL_SET_ERR_MSG_MOD(extack, "Port was not split");
3485 		return -EINVAL;
3486 	}
3487 
3488 	cur_width = mlxsw_sp_port->mapping.width;
3489 	count = cur_width == 1 ? 4 : 2;
3490 
3491 	base_port = mlxsw_sp_cluster_base_port_get(local_port);
3492 
3493 	/* Determine which ports to remove. */
3494 	if (count == 2 && local_port >= base_port + 2)
3495 		base_port = base_port + 2;
3496 
3497 	for (i = 0; i < count; i++)
3498 		if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
3499 			mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
3500 
3501 	mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
3502 
3503 	return 0;
3504 }
3505 
3506 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
3507 				     char *pude_pl, void *priv)
3508 {
3509 	struct mlxsw_sp *mlxsw_sp = priv;
3510 	struct mlxsw_sp_port *mlxsw_sp_port;
3511 	enum mlxsw_reg_pude_oper_status status;
3512 	u8 local_port;
3513 
3514 	local_port = mlxsw_reg_pude_local_port_get(pude_pl);
3515 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
3516 	if (!mlxsw_sp_port)
3517 		return;
3518 
3519 	status = mlxsw_reg_pude_oper_status_get(pude_pl);
3520 	if (status == MLXSW_PORT_OPER_STATUS_UP) {
3521 		netdev_info(mlxsw_sp_port->dev, "link up\n");
3522 		netif_carrier_on(mlxsw_sp_port->dev);
3523 	} else {
3524 		netdev_info(mlxsw_sp_port->dev, "link down\n");
3525 		netif_carrier_off(mlxsw_sp_port->dev);
3526 	}
3527 }
3528 
3529 static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
3530 					      u8 local_port, void *priv)
3531 {
3532 	struct mlxsw_sp *mlxsw_sp = priv;
3533 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
3534 	struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
3535 
3536 	if (unlikely(!mlxsw_sp_port)) {
3537 		dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
3538 				     local_port);
3539 		return;
3540 	}
3541 
3542 	skb->dev = mlxsw_sp_port->dev;
3543 
3544 	pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
3545 	u64_stats_update_begin(&pcpu_stats->syncp);
3546 	pcpu_stats->rx_packets++;
3547 	pcpu_stats->rx_bytes += skb->len;
3548 	u64_stats_update_end(&pcpu_stats->syncp);
3549 
3550 	skb->protocol = eth_type_trans(skb, skb->dev);
3551 	netif_receive_skb(skb);
3552 }
3553 
3554 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
3555 					   void *priv)
3556 {
3557 	skb->offload_fwd_mark = 1;
3558 	return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
3559 }
3560 
3561 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb,
3562 					      u8 local_port, void *priv)
3563 {
3564 	skb->offload_l3_fwd_mark = 1;
3565 	skb->offload_fwd_mark = 1;
3566 	return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
3567 }
3568 
3569 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port,
3570 					     void *priv)
3571 {
3572 	struct mlxsw_sp *mlxsw_sp = priv;
3573 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
3574 	struct psample_group *psample_group;
3575 	u32 size;
3576 
3577 	if (unlikely(!mlxsw_sp_port)) {
3578 		dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n",
3579 				     local_port);
3580 		goto out;
3581 	}
3582 	if (unlikely(!mlxsw_sp_port->sample)) {
3583 		dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n",
3584 				     local_port);
3585 		goto out;
3586 	}
3587 
3588 	size = mlxsw_sp_port->sample->truncate ?
3589 		  mlxsw_sp_port->sample->trunc_size : skb->len;
3590 
3591 	rcu_read_lock();
3592 	psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group);
3593 	if (!psample_group)
3594 		goto out_unlock;
3595 	psample_sample_packet(psample_group, skb, size,
3596 			      mlxsw_sp_port->dev->ifindex, 0,
3597 			      mlxsw_sp_port->sample->rate);
3598 out_unlock:
3599 	rcu_read_unlock();
3600 out:
3601 	consume_skb(skb);
3602 }
3603 
3604 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl)	\
3605 	MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action,	\
3606 		  _is_ctrl, SP_##_trap_group, DISCARD)
3607 
3608 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl)	\
3609 	MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action,	\
3610 		_is_ctrl, SP_##_trap_group, DISCARD)
3611 
3612 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl)	\
3613 	MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action,	\
3614 		_is_ctrl, SP_##_trap_group, DISCARD)
3615 
3616 #define MLXSW_SP_EVENTL(_func, _trap_id)		\
3617 	MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
3618 
3619 static const struct mlxsw_listener mlxsw_sp_listener[] = {
3620 	/* Events */
3621 	MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
3622 	/* L2 traps */
3623 	MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true),
3624 	MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true),
3625 	MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true),
3626 	MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false),
3627 	MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false),
3628 	MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false),
3629 	MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false),
3630 	MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false),
3631 	MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false),
3632 	MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false),
3633 	MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false),
3634 	MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false),
3635 	MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD,
3636 			  false),
3637 	MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD,
3638 			     false),
3639 	MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD,
3640 			     false),
3641 	MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD,
3642 			     false),
3643 	/* L3 traps */
3644 	MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3645 	MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3646 	MLXSW_SP_RXL_L3_MARK(LBERROR, MIRROR_TO_CPU, LBERROR, false),
3647 	MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false),
3648 	MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
3649 			  false),
3650 	MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false),
3651 	MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false),
3652 	MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false),
3653 	MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP,
3654 			  false),
3655 	MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false),
3656 	MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false),
3657 	MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false),
3658 	MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false),
3659 	MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false),
3660 	MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false),
3661 	MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND,
3662 			  false),
3663 	MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND,
3664 			  false),
3665 	MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND,
3666 			  false),
3667 	MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND,
3668 			  false),
3669 	MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false),
3670 	MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP,
3671 			  false),
3672 	MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, HOST_MISS, false),
3673 	MLXSW_SP_RXL_MARK(HOST_MISS_IPV6, TRAP_TO_CPU, HOST_MISS, false),
3674 	MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false),
3675 	MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false),
3676 	MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3677 	MLXSW_SP_RXL_MARK(DECAP_ECN0, TRAP_TO_CPU, ROUTER_EXP, false),
3678 	MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
3679 	MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
3680 	/* PKT Sample trap */
3681 	MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
3682 		  false, SP_IP2ME, DISCARD),
3683 	/* ACL trap */
3684 	MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false),
3685 	/* Multicast Router Traps */
3686 	MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false),
3687 	MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false),
3688 	MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false),
3689 	MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
3690 	MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
3691 	/* NVE traps */
3692 	MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false),
3693 	MLXSW_SP_RXL_NO_MARK(NVE_DECAP_ARP, TRAP_TO_CPU, ARP, false),
3694 };
3695 
3696 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
3697 {
3698 	char qpcr_pl[MLXSW_REG_QPCR_LEN];
3699 	enum mlxsw_reg_qpcr_ir_units ir_units;
3700 	int max_cpu_policers;
3701 	bool is_bytes;
3702 	u8 burst_size;
3703 	u32 rate;
3704 	int i, err;
3705 
3706 	if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
3707 		return -EIO;
3708 
3709 	max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
3710 
3711 	ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
3712 	for (i = 0; i < max_cpu_policers; i++) {
3713 		is_bytes = false;
3714 		switch (i) {
3715 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
3716 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
3717 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
3718 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
3719 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM:
3720 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF:
3721 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR:
3722 			rate = 128;
3723 			burst_size = 7;
3724 			break;
3725 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
3726 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD:
3727 			rate = 16 * 1024;
3728 			burst_size = 10;
3729 			break;
3730 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP:
3731 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
3732 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
3733 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS:
3734 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
3735 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
3736 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND:
3737 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
3738 			rate = 1024;
3739 			burst_size = 7;
3740 			break;
3741 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
3742 			rate = 1024;
3743 			burst_size = 7;
3744 			break;
3745 		default:
3746 			continue;
3747 		}
3748 
3749 		mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
3750 				    burst_size);
3751 		err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
3752 		if (err)
3753 			return err;
3754 	}
3755 
3756 	return 0;
3757 }
3758 
3759 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
3760 {
3761 	char htgt_pl[MLXSW_REG_HTGT_LEN];
3762 	enum mlxsw_reg_htgt_trap_group i;
3763 	int max_cpu_policers;
3764 	int max_trap_groups;
3765 	u8 priority, tc;
3766 	u16 policer_id;
3767 	int err;
3768 
3769 	if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
3770 		return -EIO;
3771 
3772 	max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
3773 	max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
3774 
3775 	for (i = 0; i < max_trap_groups; i++) {
3776 		policer_id = i;
3777 		switch (i) {
3778 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
3779 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
3780 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
3781 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
3782 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM:
3783 			priority = 5;
3784 			tc = 5;
3785 			break;
3786 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP:
3787 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
3788 			priority = 4;
3789 			tc = 4;
3790 			break;
3791 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
3792 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
3793 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD:
3794 			priority = 3;
3795 			tc = 3;
3796 			break;
3797 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
3798 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND:
3799 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF:
3800 			priority = 2;
3801 			tc = 2;
3802 			break;
3803 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS:
3804 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
3805 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
3806 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
3807 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR:
3808 			priority = 1;
3809 			tc = 1;
3810 			break;
3811 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
3812 			priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
3813 			tc = MLXSW_REG_HTGT_DEFAULT_TC;
3814 			policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
3815 			break;
3816 		default:
3817 			continue;
3818 		}
3819 
3820 		if (max_cpu_policers <= policer_id &&
3821 		    policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
3822 			return -EIO;
3823 
3824 		mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
3825 		err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3826 		if (err)
3827 			return err;
3828 	}
3829 
3830 	return 0;
3831 }
3832 
3833 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
3834 {
3835 	int i;
3836 	int err;
3837 
3838 	err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
3839 	if (err)
3840 		return err;
3841 
3842 	err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
3843 	if (err)
3844 		return err;
3845 
3846 	for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
3847 		err = mlxsw_core_trap_register(mlxsw_sp->core,
3848 					       &mlxsw_sp_listener[i],
3849 					       mlxsw_sp);
3850 		if (err)
3851 			goto err_listener_register;
3852 
3853 	}
3854 	return 0;
3855 
3856 err_listener_register:
3857 	for (i--; i >= 0; i--) {
3858 		mlxsw_core_trap_unregister(mlxsw_sp->core,
3859 					   &mlxsw_sp_listener[i],
3860 					   mlxsw_sp);
3861 	}
3862 	return err;
3863 }
3864 
3865 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
3866 {
3867 	int i;
3868 
3869 	for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
3870 		mlxsw_core_trap_unregister(mlxsw_sp->core,
3871 					   &mlxsw_sp_listener[i],
3872 					   mlxsw_sp);
3873 	}
3874 }
3875 
3876 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
3877 {
3878 	char slcr_pl[MLXSW_REG_SLCR_LEN];
3879 	u32 seed;
3880 	int err;
3881 
3882 	get_random_bytes(&seed, sizeof(seed));
3883 	mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
3884 				     MLXSW_REG_SLCR_LAG_HASH_DMAC |
3885 				     MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
3886 				     MLXSW_REG_SLCR_LAG_HASH_VLANID |
3887 				     MLXSW_REG_SLCR_LAG_HASH_SIP |
3888 				     MLXSW_REG_SLCR_LAG_HASH_DIP |
3889 				     MLXSW_REG_SLCR_LAG_HASH_SPORT |
3890 				     MLXSW_REG_SLCR_LAG_HASH_DPORT |
3891 				     MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed);
3892 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
3893 	if (err)
3894 		return err;
3895 
3896 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) ||
3897 	    !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
3898 		return -EIO;
3899 
3900 	mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG),
3901 				 sizeof(struct mlxsw_sp_upper),
3902 				 GFP_KERNEL);
3903 	if (!mlxsw_sp->lags)
3904 		return -ENOMEM;
3905 
3906 	return 0;
3907 }
3908 
3909 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
3910 {
3911 	kfree(mlxsw_sp->lags);
3912 }
3913 
3914 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
3915 {
3916 	char htgt_pl[MLXSW_REG_HTGT_LEN];
3917 
3918 	mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
3919 			    MLXSW_REG_HTGT_INVALID_POLICER,
3920 			    MLXSW_REG_HTGT_DEFAULT_PRIORITY,
3921 			    MLXSW_REG_HTGT_DEFAULT_TC);
3922 	return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3923 }
3924 
3925 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
3926 				    unsigned long event, void *ptr);
3927 
3928 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
3929 			 const struct mlxsw_bus_info *mlxsw_bus_info)
3930 {
3931 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3932 	int err;
3933 
3934 	mlxsw_sp->core = mlxsw_core;
3935 	mlxsw_sp->bus_info = mlxsw_bus_info;
3936 
3937 	err = mlxsw_sp_fw_rev_validate(mlxsw_sp);
3938 	if (err)
3939 		return err;
3940 
3941 	err = mlxsw_sp_base_mac_get(mlxsw_sp);
3942 	if (err) {
3943 		dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
3944 		return err;
3945 	}
3946 
3947 	err = mlxsw_sp_kvdl_init(mlxsw_sp);
3948 	if (err) {
3949 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n");
3950 		return err;
3951 	}
3952 
3953 	err = mlxsw_sp_fids_init(mlxsw_sp);
3954 	if (err) {
3955 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
3956 		goto err_fids_init;
3957 	}
3958 
3959 	err = mlxsw_sp_traps_init(mlxsw_sp);
3960 	if (err) {
3961 		dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
3962 		goto err_traps_init;
3963 	}
3964 
3965 	err = mlxsw_sp_buffers_init(mlxsw_sp);
3966 	if (err) {
3967 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
3968 		goto err_buffers_init;
3969 	}
3970 
3971 	err = mlxsw_sp_lag_init(mlxsw_sp);
3972 	if (err) {
3973 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
3974 		goto err_lag_init;
3975 	}
3976 
3977 	/* Initialize SPAN before router and switchdev, so that those components
3978 	 * can call mlxsw_sp_span_respin().
3979 	 */
3980 	err = mlxsw_sp_span_init(mlxsw_sp);
3981 	if (err) {
3982 		dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
3983 		goto err_span_init;
3984 	}
3985 
3986 	err = mlxsw_sp_switchdev_init(mlxsw_sp);
3987 	if (err) {
3988 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
3989 		goto err_switchdev_init;
3990 	}
3991 
3992 	err = mlxsw_sp_counter_pool_init(mlxsw_sp);
3993 	if (err) {
3994 		dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
3995 		goto err_counter_pool_init;
3996 	}
3997 
3998 	err = mlxsw_sp_afa_init(mlxsw_sp);
3999 	if (err) {
4000 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n");
4001 		goto err_afa_init;
4002 	}
4003 
4004 	err = mlxsw_sp_nve_init(mlxsw_sp);
4005 	if (err) {
4006 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n");
4007 		goto err_nve_init;
4008 	}
4009 
4010 	err = mlxsw_sp_acl_init(mlxsw_sp);
4011 	if (err) {
4012 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
4013 		goto err_acl_init;
4014 	}
4015 
4016 	err = mlxsw_sp_router_init(mlxsw_sp);
4017 	if (err) {
4018 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
4019 		goto err_router_init;
4020 	}
4021 
4022 	/* Initialize netdevice notifier after router and SPAN is initialized,
4023 	 * so that the event handler can use router structures and call SPAN
4024 	 * respin.
4025 	 */
4026 	mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event;
4027 	err = register_netdevice_notifier(&mlxsw_sp->netdevice_nb);
4028 	if (err) {
4029 		dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n");
4030 		goto err_netdev_notifier;
4031 	}
4032 
4033 	err = mlxsw_sp_dpipe_init(mlxsw_sp);
4034 	if (err) {
4035 		dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
4036 		goto err_dpipe_init;
4037 	}
4038 
4039 	err = mlxsw_sp_ports_create(mlxsw_sp);
4040 	if (err) {
4041 		dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
4042 		goto err_ports_create;
4043 	}
4044 
4045 	return 0;
4046 
4047 err_ports_create:
4048 	mlxsw_sp_dpipe_fini(mlxsw_sp);
4049 err_dpipe_init:
4050 	unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb);
4051 err_netdev_notifier:
4052 	mlxsw_sp_router_fini(mlxsw_sp);
4053 err_router_init:
4054 	mlxsw_sp_acl_fini(mlxsw_sp);
4055 err_acl_init:
4056 	mlxsw_sp_nve_fini(mlxsw_sp);
4057 err_nve_init:
4058 	mlxsw_sp_afa_fini(mlxsw_sp);
4059 err_afa_init:
4060 	mlxsw_sp_counter_pool_fini(mlxsw_sp);
4061 err_counter_pool_init:
4062 	mlxsw_sp_switchdev_fini(mlxsw_sp);
4063 err_switchdev_init:
4064 	mlxsw_sp_span_fini(mlxsw_sp);
4065 err_span_init:
4066 	mlxsw_sp_lag_fini(mlxsw_sp);
4067 err_lag_init:
4068 	mlxsw_sp_buffers_fini(mlxsw_sp);
4069 err_buffers_init:
4070 	mlxsw_sp_traps_fini(mlxsw_sp);
4071 err_traps_init:
4072 	mlxsw_sp_fids_fini(mlxsw_sp);
4073 err_fids_init:
4074 	mlxsw_sp_kvdl_fini(mlxsw_sp);
4075 	return err;
4076 }
4077 
4078 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
4079 			  const struct mlxsw_bus_info *mlxsw_bus_info)
4080 {
4081 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
4082 
4083 	mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev;
4084 	mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME;
4085 	mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
4086 	mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
4087 	mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
4088 	mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
4089 	mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
4090 	mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr;
4091 	mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask;
4092 	mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr;
4093 
4094 	return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info);
4095 }
4096 
4097 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
4098 			  const struct mlxsw_bus_info *mlxsw_bus_info)
4099 {
4100 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
4101 
4102 	mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
4103 	mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
4104 	mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
4105 	mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
4106 	mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
4107 	mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
4108 	mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
4109 	mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
4110 
4111 	return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info);
4112 }
4113 
4114 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
4115 {
4116 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
4117 
4118 	mlxsw_sp_ports_remove(mlxsw_sp);
4119 	mlxsw_sp_dpipe_fini(mlxsw_sp);
4120 	unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb);
4121 	mlxsw_sp_router_fini(mlxsw_sp);
4122 	mlxsw_sp_acl_fini(mlxsw_sp);
4123 	mlxsw_sp_nve_fini(mlxsw_sp);
4124 	mlxsw_sp_afa_fini(mlxsw_sp);
4125 	mlxsw_sp_counter_pool_fini(mlxsw_sp);
4126 	mlxsw_sp_switchdev_fini(mlxsw_sp);
4127 	mlxsw_sp_span_fini(mlxsw_sp);
4128 	mlxsw_sp_lag_fini(mlxsw_sp);
4129 	mlxsw_sp_buffers_fini(mlxsw_sp);
4130 	mlxsw_sp_traps_fini(mlxsw_sp);
4131 	mlxsw_sp_fids_fini(mlxsw_sp);
4132 	mlxsw_sp_kvdl_fini(mlxsw_sp);
4133 }
4134 
4135 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated
4136  * 802.1Q FIDs
4137  */
4138 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE	(MLXSW_SP_FID_8021D_MAX + \
4139 					 VLAN_VID_MASK - 1)
4140 
4141 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
4142 	.used_max_mid			= 1,
4143 	.max_mid			= MLXSW_SP_MID_MAX,
4144 	.used_flood_tables		= 1,
4145 	.used_flood_mode		= 1,
4146 	.flood_mode			= 3,
4147 	.max_fid_flood_tables		= 3,
4148 	.fid_flood_table_size		= MLXSW_SP_FID_FLOOD_TABLE_SIZE,
4149 	.used_max_ib_mc			= 1,
4150 	.max_ib_mc			= 0,
4151 	.used_max_pkey			= 1,
4152 	.max_pkey			= 0,
4153 	.used_kvd_sizes			= 1,
4154 	.kvd_hash_single_parts		= 59,
4155 	.kvd_hash_double_parts		= 41,
4156 	.kvd_linear_size		= MLXSW_SP_KVD_LINEAR_SIZE,
4157 	.swid_config			= {
4158 		{
4159 			.used_type	= 1,
4160 			.type		= MLXSW_PORT_SWID_TYPE_ETH,
4161 		}
4162 	},
4163 };
4164 
4165 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
4166 	.used_max_mid			= 1,
4167 	.max_mid			= MLXSW_SP_MID_MAX,
4168 	.used_flood_tables		= 1,
4169 	.used_flood_mode		= 1,
4170 	.flood_mode			= 3,
4171 	.max_fid_flood_tables		= 3,
4172 	.fid_flood_table_size		= MLXSW_SP_FID_FLOOD_TABLE_SIZE,
4173 	.used_max_ib_mc			= 1,
4174 	.max_ib_mc			= 0,
4175 	.used_max_pkey			= 1,
4176 	.max_pkey			= 0,
4177 	.swid_config			= {
4178 		{
4179 			.used_type	= 1,
4180 			.type		= MLXSW_PORT_SWID_TYPE_ETH,
4181 		}
4182 	},
4183 };
4184 
4185 static void
4186 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
4187 				      struct devlink_resource_size_params *kvd_size_params,
4188 				      struct devlink_resource_size_params *linear_size_params,
4189 				      struct devlink_resource_size_params *hash_double_size_params,
4190 				      struct devlink_resource_size_params *hash_single_size_params)
4191 {
4192 	u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
4193 						 KVD_SINGLE_MIN_SIZE);
4194 	u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
4195 						 KVD_DOUBLE_MIN_SIZE);
4196 	u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
4197 	u32 linear_size_min = 0;
4198 
4199 	devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size,
4200 					  MLXSW_SP_KVD_GRANULARITY,
4201 					  DEVLINK_RESOURCE_UNIT_ENTRY);
4202 	devlink_resource_size_params_init(linear_size_params, linear_size_min,
4203 					  kvd_size - single_size_min -
4204 					  double_size_min,
4205 					  MLXSW_SP_KVD_GRANULARITY,
4206 					  DEVLINK_RESOURCE_UNIT_ENTRY);
4207 	devlink_resource_size_params_init(hash_double_size_params,
4208 					  double_size_min,
4209 					  kvd_size - single_size_min -
4210 					  linear_size_min,
4211 					  MLXSW_SP_KVD_GRANULARITY,
4212 					  DEVLINK_RESOURCE_UNIT_ENTRY);
4213 	devlink_resource_size_params_init(hash_single_size_params,
4214 					  single_size_min,
4215 					  kvd_size - double_size_min -
4216 					  linear_size_min,
4217 					  MLXSW_SP_KVD_GRANULARITY,
4218 					  DEVLINK_RESOURCE_UNIT_ENTRY);
4219 }
4220 
4221 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
4222 {
4223 	struct devlink *devlink = priv_to_devlink(mlxsw_core);
4224 	struct devlink_resource_size_params hash_single_size_params;
4225 	struct devlink_resource_size_params hash_double_size_params;
4226 	struct devlink_resource_size_params linear_size_params;
4227 	struct devlink_resource_size_params kvd_size_params;
4228 	u32 kvd_size, single_size, double_size, linear_size;
4229 	const struct mlxsw_config_profile *profile;
4230 	int err;
4231 
4232 	profile = &mlxsw_sp1_config_profile;
4233 	if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
4234 		return -EIO;
4235 
4236 	mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params,
4237 					      &linear_size_params,
4238 					      &hash_double_size_params,
4239 					      &hash_single_size_params);
4240 
4241 	kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
4242 	err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
4243 					kvd_size, MLXSW_SP_RESOURCE_KVD,
4244 					DEVLINK_RESOURCE_ID_PARENT_TOP,
4245 					&kvd_size_params);
4246 	if (err)
4247 		return err;
4248 
4249 	linear_size = profile->kvd_linear_size;
4250 	err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
4251 					linear_size,
4252 					MLXSW_SP_RESOURCE_KVD_LINEAR,
4253 					MLXSW_SP_RESOURCE_KVD,
4254 					&linear_size_params);
4255 	if (err)
4256 		return err;
4257 
4258 	err = mlxsw_sp1_kvdl_resources_register(mlxsw_core);
4259 	if  (err)
4260 		return err;
4261 
4262 	double_size = kvd_size - linear_size;
4263 	double_size *= profile->kvd_hash_double_parts;
4264 	double_size /= profile->kvd_hash_double_parts +
4265 		       profile->kvd_hash_single_parts;
4266 	double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY);
4267 	err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
4268 					double_size,
4269 					MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
4270 					MLXSW_SP_RESOURCE_KVD,
4271 					&hash_double_size_params);
4272 	if (err)
4273 		return err;
4274 
4275 	single_size = kvd_size - double_size - linear_size;
4276 	err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
4277 					single_size,
4278 					MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
4279 					MLXSW_SP_RESOURCE_KVD,
4280 					&hash_single_size_params);
4281 	if (err)
4282 		return err;
4283 
4284 	return 0;
4285 }
4286 
4287 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
4288 {
4289 	return mlxsw_sp1_resources_kvd_register(mlxsw_core);
4290 }
4291 
4292 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
4293 {
4294 	return 0;
4295 }
4296 
4297 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
4298 				  const struct mlxsw_config_profile *profile,
4299 				  u64 *p_single_size, u64 *p_double_size,
4300 				  u64 *p_linear_size)
4301 {
4302 	struct devlink *devlink = priv_to_devlink(mlxsw_core);
4303 	u32 double_size;
4304 	int err;
4305 
4306 	if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
4307 	    !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE))
4308 		return -EIO;
4309 
4310 	/* The hash part is what left of the kvd without the
4311 	 * linear part. It is split to the single size and
4312 	 * double size by the parts ratio from the profile.
4313 	 * Both sizes must be a multiplications of the
4314 	 * granularity from the profile. In case the user
4315 	 * provided the sizes they are obtained via devlink.
4316 	 */
4317 	err = devlink_resource_size_get(devlink,
4318 					MLXSW_SP_RESOURCE_KVD_LINEAR,
4319 					p_linear_size);
4320 	if (err)
4321 		*p_linear_size = profile->kvd_linear_size;
4322 
4323 	err = devlink_resource_size_get(devlink,
4324 					MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
4325 					p_double_size);
4326 	if (err) {
4327 		double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
4328 			      *p_linear_size;
4329 		double_size *= profile->kvd_hash_double_parts;
4330 		double_size /= profile->kvd_hash_double_parts +
4331 			       profile->kvd_hash_single_parts;
4332 		*p_double_size = rounddown(double_size,
4333 					   MLXSW_SP_KVD_GRANULARITY);
4334 	}
4335 
4336 	err = devlink_resource_size_get(devlink,
4337 					MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
4338 					p_single_size);
4339 	if (err)
4340 		*p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
4341 				 *p_double_size - *p_linear_size;
4342 
4343 	/* Check results are legal. */
4344 	if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
4345 	    *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
4346 	    MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size)
4347 		return -EIO;
4348 
4349 	return 0;
4350 }
4351 
4352 static int
4353 mlxsw_sp_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id,
4354 					       union devlink_param_value val,
4355 					       struct netlink_ext_ack *extack)
4356 {
4357 	if ((val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER) &&
4358 	    (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)) {
4359 		NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'");
4360 		return -EINVAL;
4361 	}
4362 
4363 	return 0;
4364 }
4365 
4366 static const struct devlink_param mlxsw_sp_devlink_params[] = {
4367 	DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY,
4368 			      BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
4369 			      NULL, NULL,
4370 			      mlxsw_sp_devlink_param_fw_load_policy_validate),
4371 };
4372 
4373 static int mlxsw_sp_params_register(struct mlxsw_core *mlxsw_core)
4374 {
4375 	struct devlink *devlink = priv_to_devlink(mlxsw_core);
4376 	union devlink_param_value value;
4377 	int err;
4378 
4379 	err = devlink_params_register(devlink, mlxsw_sp_devlink_params,
4380 				      ARRAY_SIZE(mlxsw_sp_devlink_params));
4381 	if (err)
4382 		return err;
4383 
4384 	value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER;
4385 	devlink_param_driverinit_value_set(devlink,
4386 					   DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
4387 					   value);
4388 	return 0;
4389 }
4390 
4391 static void mlxsw_sp_params_unregister(struct mlxsw_core *mlxsw_core)
4392 {
4393 	devlink_params_unregister(priv_to_devlink(mlxsw_core),
4394 				  mlxsw_sp_devlink_params,
4395 				  ARRAY_SIZE(mlxsw_sp_devlink_params));
4396 }
4397 
4398 static int
4399 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id,
4400 					     struct devlink_param_gset_ctx *ctx)
4401 {
4402 	struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
4403 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
4404 
4405 	ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp);
4406 	return 0;
4407 }
4408 
4409 static int
4410 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id,
4411 					     struct devlink_param_gset_ctx *ctx)
4412 {
4413 	struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
4414 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
4415 
4416 	return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32);
4417 }
4418 
4419 static const struct devlink_param mlxsw_sp2_devlink_params[] = {
4420 	DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
4421 			     "acl_region_rehash_interval",
4422 			     DEVLINK_PARAM_TYPE_U32,
4423 			     BIT(DEVLINK_PARAM_CMODE_RUNTIME),
4424 			     mlxsw_sp_params_acl_region_rehash_intrvl_get,
4425 			     mlxsw_sp_params_acl_region_rehash_intrvl_set,
4426 			     NULL),
4427 };
4428 
4429 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core)
4430 {
4431 	struct devlink *devlink = priv_to_devlink(mlxsw_core);
4432 	union devlink_param_value value;
4433 	int err;
4434 
4435 	err = mlxsw_sp_params_register(mlxsw_core);
4436 	if (err)
4437 		return err;
4438 
4439 	err = devlink_params_register(devlink, mlxsw_sp2_devlink_params,
4440 				      ARRAY_SIZE(mlxsw_sp2_devlink_params));
4441 	if (err)
4442 		goto err_devlink_params_register;
4443 
4444 	value.vu32 = 0;
4445 	devlink_param_driverinit_value_set(devlink,
4446 					   MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
4447 					   value);
4448 	return 0;
4449 
4450 err_devlink_params_register:
4451 	mlxsw_sp_params_unregister(mlxsw_core);
4452 	return err;
4453 }
4454 
4455 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core)
4456 {
4457 	devlink_params_unregister(priv_to_devlink(mlxsw_core),
4458 				  mlxsw_sp2_devlink_params,
4459 				  ARRAY_SIZE(mlxsw_sp2_devlink_params));
4460 	mlxsw_sp_params_unregister(mlxsw_core);
4461 }
4462 
4463 static struct mlxsw_driver mlxsw_sp1_driver = {
4464 	.kind				= mlxsw_sp1_driver_name,
4465 	.priv_size			= sizeof(struct mlxsw_sp),
4466 	.init				= mlxsw_sp1_init,
4467 	.fini				= mlxsw_sp_fini,
4468 	.basic_trap_groups_set		= mlxsw_sp_basic_trap_groups_set,
4469 	.port_split			= mlxsw_sp_port_split,
4470 	.port_unsplit			= mlxsw_sp_port_unsplit,
4471 	.sb_pool_get			= mlxsw_sp_sb_pool_get,
4472 	.sb_pool_set			= mlxsw_sp_sb_pool_set,
4473 	.sb_port_pool_get		= mlxsw_sp_sb_port_pool_get,
4474 	.sb_port_pool_set		= mlxsw_sp_sb_port_pool_set,
4475 	.sb_tc_pool_bind_get		= mlxsw_sp_sb_tc_pool_bind_get,
4476 	.sb_tc_pool_bind_set		= mlxsw_sp_sb_tc_pool_bind_set,
4477 	.sb_occ_snapshot		= mlxsw_sp_sb_occ_snapshot,
4478 	.sb_occ_max_clear		= mlxsw_sp_sb_occ_max_clear,
4479 	.sb_occ_port_pool_get		= mlxsw_sp_sb_occ_port_pool_get,
4480 	.sb_occ_tc_port_bind_get	= mlxsw_sp_sb_occ_tc_port_bind_get,
4481 	.txhdr_construct		= mlxsw_sp_txhdr_construct,
4482 	.resources_register		= mlxsw_sp1_resources_register,
4483 	.kvd_sizes_get			= mlxsw_sp_kvd_sizes_get,
4484 	.params_register		= mlxsw_sp_params_register,
4485 	.params_unregister		= mlxsw_sp_params_unregister,
4486 	.txhdr_len			= MLXSW_TXHDR_LEN,
4487 	.profile			= &mlxsw_sp1_config_profile,
4488 	.res_query_enabled		= true,
4489 };
4490 
4491 static struct mlxsw_driver mlxsw_sp2_driver = {
4492 	.kind				= mlxsw_sp2_driver_name,
4493 	.priv_size			= sizeof(struct mlxsw_sp),
4494 	.init				= mlxsw_sp2_init,
4495 	.fini				= mlxsw_sp_fini,
4496 	.basic_trap_groups_set		= mlxsw_sp_basic_trap_groups_set,
4497 	.port_split			= mlxsw_sp_port_split,
4498 	.port_unsplit			= mlxsw_sp_port_unsplit,
4499 	.sb_pool_get			= mlxsw_sp_sb_pool_get,
4500 	.sb_pool_set			= mlxsw_sp_sb_pool_set,
4501 	.sb_port_pool_get		= mlxsw_sp_sb_port_pool_get,
4502 	.sb_port_pool_set		= mlxsw_sp_sb_port_pool_set,
4503 	.sb_tc_pool_bind_get		= mlxsw_sp_sb_tc_pool_bind_get,
4504 	.sb_tc_pool_bind_set		= mlxsw_sp_sb_tc_pool_bind_set,
4505 	.sb_occ_snapshot		= mlxsw_sp_sb_occ_snapshot,
4506 	.sb_occ_max_clear		= mlxsw_sp_sb_occ_max_clear,
4507 	.sb_occ_port_pool_get		= mlxsw_sp_sb_occ_port_pool_get,
4508 	.sb_occ_tc_port_bind_get	= mlxsw_sp_sb_occ_tc_port_bind_get,
4509 	.txhdr_construct		= mlxsw_sp_txhdr_construct,
4510 	.resources_register		= mlxsw_sp2_resources_register,
4511 	.params_register		= mlxsw_sp2_params_register,
4512 	.params_unregister		= mlxsw_sp2_params_unregister,
4513 	.txhdr_len			= MLXSW_TXHDR_LEN,
4514 	.profile			= &mlxsw_sp2_config_profile,
4515 	.res_query_enabled		= true,
4516 };
4517 
4518 bool mlxsw_sp_port_dev_check(const struct net_device *dev)
4519 {
4520 	return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
4521 }
4522 
4523 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data)
4524 {
4525 	struct mlxsw_sp_port **p_mlxsw_sp_port = data;
4526 	int ret = 0;
4527 
4528 	if (mlxsw_sp_port_dev_check(lower_dev)) {
4529 		*p_mlxsw_sp_port = netdev_priv(lower_dev);
4530 		ret = 1;
4531 	}
4532 
4533 	return ret;
4534 }
4535 
4536 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
4537 {
4538 	struct mlxsw_sp_port *mlxsw_sp_port;
4539 
4540 	if (mlxsw_sp_port_dev_check(dev))
4541 		return netdev_priv(dev);
4542 
4543 	mlxsw_sp_port = NULL;
4544 	netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port);
4545 
4546 	return mlxsw_sp_port;
4547 }
4548 
4549 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
4550 {
4551 	struct mlxsw_sp_port *mlxsw_sp_port;
4552 
4553 	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
4554 	return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
4555 }
4556 
4557 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
4558 {
4559 	struct mlxsw_sp_port *mlxsw_sp_port;
4560 
4561 	if (mlxsw_sp_port_dev_check(dev))
4562 		return netdev_priv(dev);
4563 
4564 	mlxsw_sp_port = NULL;
4565 	netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
4566 				      &mlxsw_sp_port);
4567 
4568 	return mlxsw_sp_port;
4569 }
4570 
4571 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
4572 {
4573 	struct mlxsw_sp_port *mlxsw_sp_port;
4574 
4575 	rcu_read_lock();
4576 	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
4577 	if (mlxsw_sp_port)
4578 		dev_hold(mlxsw_sp_port->dev);
4579 	rcu_read_unlock();
4580 	return mlxsw_sp_port;
4581 }
4582 
4583 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
4584 {
4585 	dev_put(mlxsw_sp_port->dev);
4586 }
4587 
4588 static void
4589 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port,
4590 				 struct net_device *lag_dev)
4591 {
4592 	struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev);
4593 	struct net_device *upper_dev;
4594 	struct list_head *iter;
4595 
4596 	if (netif_is_bridge_port(lag_dev))
4597 		mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev);
4598 
4599 	netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
4600 		if (!netif_is_bridge_port(upper_dev))
4601 			continue;
4602 		br_dev = netdev_master_upper_dev_get(upper_dev);
4603 		mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev);
4604 	}
4605 }
4606 
4607 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
4608 {
4609 	char sldr_pl[MLXSW_REG_SLDR_LEN];
4610 
4611 	mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
4612 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4613 }
4614 
4615 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
4616 {
4617 	char sldr_pl[MLXSW_REG_SLDR_LEN];
4618 
4619 	mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
4620 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4621 }
4622 
4623 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
4624 				     u16 lag_id, u8 port_index)
4625 {
4626 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4627 	char slcor_pl[MLXSW_REG_SLCOR_LEN];
4628 
4629 	mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
4630 				      lag_id, port_index);
4631 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4632 }
4633 
4634 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
4635 					u16 lag_id)
4636 {
4637 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4638 	char slcor_pl[MLXSW_REG_SLCOR_LEN];
4639 
4640 	mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
4641 					 lag_id);
4642 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4643 }
4644 
4645 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
4646 					u16 lag_id)
4647 {
4648 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4649 	char slcor_pl[MLXSW_REG_SLCOR_LEN];
4650 
4651 	mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
4652 					lag_id);
4653 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4654 }
4655 
4656 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
4657 					 u16 lag_id)
4658 {
4659 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4660 	char slcor_pl[MLXSW_REG_SLCOR_LEN];
4661 
4662 	mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
4663 					 lag_id);
4664 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4665 }
4666 
4667 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
4668 				  struct net_device *lag_dev,
4669 				  u16 *p_lag_id)
4670 {
4671 	struct mlxsw_sp_upper *lag;
4672 	int free_lag_id = -1;
4673 	u64 max_lag;
4674 	int i;
4675 
4676 	max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG);
4677 	for (i = 0; i < max_lag; i++) {
4678 		lag = mlxsw_sp_lag_get(mlxsw_sp, i);
4679 		if (lag->ref_count) {
4680 			if (lag->dev == lag_dev) {
4681 				*p_lag_id = i;
4682 				return 0;
4683 			}
4684 		} else if (free_lag_id < 0) {
4685 			free_lag_id = i;
4686 		}
4687 	}
4688 	if (free_lag_id < 0)
4689 		return -EBUSY;
4690 	*p_lag_id = free_lag_id;
4691 	return 0;
4692 }
4693 
4694 static bool
4695 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
4696 			  struct net_device *lag_dev,
4697 			  struct netdev_lag_upper_info *lag_upper_info,
4698 			  struct netlink_ext_ack *extack)
4699 {
4700 	u16 lag_id;
4701 
4702 	if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) {
4703 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices");
4704 		return false;
4705 	}
4706 	if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
4707 		NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
4708 		return false;
4709 	}
4710 	return true;
4711 }
4712 
4713 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
4714 				       u16 lag_id, u8 *p_port_index)
4715 {
4716 	u64 max_lag_members;
4717 	int i;
4718 
4719 	max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
4720 					     MAX_LAG_MEMBERS);
4721 	for (i = 0; i < max_lag_members; i++) {
4722 		if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
4723 			*p_port_index = i;
4724 			return 0;
4725 		}
4726 	}
4727 	return -EBUSY;
4728 }
4729 
4730 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
4731 				  struct net_device *lag_dev)
4732 {
4733 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4734 	struct mlxsw_sp_upper *lag;
4735 	u16 lag_id;
4736 	u8 port_index;
4737 	int err;
4738 
4739 	err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
4740 	if (err)
4741 		return err;
4742 	lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
4743 	if (!lag->ref_count) {
4744 		err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
4745 		if (err)
4746 			return err;
4747 		lag->dev = lag_dev;
4748 	}
4749 
4750 	err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
4751 	if (err)
4752 		return err;
4753 	err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
4754 	if (err)
4755 		goto err_col_port_add;
4756 
4757 	mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
4758 				   mlxsw_sp_port->local_port);
4759 	mlxsw_sp_port->lag_id = lag_id;
4760 	mlxsw_sp_port->lagged = 1;
4761 	lag->ref_count++;
4762 
4763 	/* Port is no longer usable as a router interface */
4764 	if (mlxsw_sp_port->default_vlan->fid)
4765 		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
4766 
4767 	return 0;
4768 
4769 err_col_port_add:
4770 	if (!lag->ref_count)
4771 		mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
4772 	return err;
4773 }
4774 
4775 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
4776 				    struct net_device *lag_dev)
4777 {
4778 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4779 	u16 lag_id = mlxsw_sp_port->lag_id;
4780 	struct mlxsw_sp_upper *lag;
4781 
4782 	if (!mlxsw_sp_port->lagged)
4783 		return;
4784 	lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
4785 	WARN_ON(lag->ref_count == 0);
4786 
4787 	mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
4788 
4789 	/* Any VLANs configured on the port are no longer valid */
4790 	mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false);
4791 	mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan);
4792 	/* Make the LAG and its directly linked uppers leave bridges they
4793 	 * are memeber in
4794 	 */
4795 	mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev);
4796 
4797 	if (lag->ref_count == 1)
4798 		mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
4799 
4800 	mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
4801 				     mlxsw_sp_port->local_port);
4802 	mlxsw_sp_port->lagged = 0;
4803 	lag->ref_count--;
4804 
4805 	/* Make sure untagged frames are allowed to ingress */
4806 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID);
4807 }
4808 
4809 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
4810 				      u16 lag_id)
4811 {
4812 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4813 	char sldr_pl[MLXSW_REG_SLDR_LEN];
4814 
4815 	mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
4816 					 mlxsw_sp_port->local_port);
4817 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4818 }
4819 
4820 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
4821 					 u16 lag_id)
4822 {
4823 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4824 	char sldr_pl[MLXSW_REG_SLDR_LEN];
4825 
4826 	mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
4827 					    mlxsw_sp_port->local_port);
4828 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4829 }
4830 
4831 static int
4832 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port)
4833 {
4834 	int err;
4835 
4836 	err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port,
4837 					   mlxsw_sp_port->lag_id);
4838 	if (err)
4839 		return err;
4840 
4841 	err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4842 	if (err)
4843 		goto err_dist_port_add;
4844 
4845 	return 0;
4846 
4847 err_dist_port_add:
4848 	mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4849 	return err;
4850 }
4851 
4852 static int
4853 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port)
4854 {
4855 	int err;
4856 
4857 	err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
4858 					    mlxsw_sp_port->lag_id);
4859 	if (err)
4860 		return err;
4861 
4862 	err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port,
4863 					    mlxsw_sp_port->lag_id);
4864 	if (err)
4865 		goto err_col_port_disable;
4866 
4867 	return 0;
4868 
4869 err_col_port_disable:
4870 	mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4871 	return err;
4872 }
4873 
4874 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
4875 				     struct netdev_lag_lower_state_info *info)
4876 {
4877 	if (info->tx_enabled)
4878 		return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port);
4879 	else
4880 		return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
4881 }
4882 
4883 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
4884 				 bool enable)
4885 {
4886 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4887 	enum mlxsw_reg_spms_state spms_state;
4888 	char *spms_pl;
4889 	u16 vid;
4890 	int err;
4891 
4892 	spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
4893 			      MLXSW_REG_SPMS_STATE_DISCARDING;
4894 
4895 	spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
4896 	if (!spms_pl)
4897 		return -ENOMEM;
4898 	mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
4899 
4900 	for (vid = 0; vid < VLAN_N_VID; vid++)
4901 		mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
4902 
4903 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
4904 	kfree(spms_pl);
4905 	return err;
4906 }
4907 
4908 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
4909 {
4910 	u16 vid = 1;
4911 	int err;
4912 
4913 	err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
4914 	if (err)
4915 		return err;
4916 	err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
4917 	if (err)
4918 		goto err_port_stp_set;
4919 	err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
4920 				     true, false);
4921 	if (err)
4922 		goto err_port_vlan_set;
4923 
4924 	for (; vid <= VLAN_N_VID - 1; vid++) {
4925 		err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4926 						     vid, false);
4927 		if (err)
4928 			goto err_vid_learning_set;
4929 	}
4930 
4931 	return 0;
4932 
4933 err_vid_learning_set:
4934 	for (vid--; vid >= 1; vid--)
4935 		mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
4936 err_port_vlan_set:
4937 	mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4938 err_port_stp_set:
4939 	mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
4940 	return err;
4941 }
4942 
4943 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
4944 {
4945 	u16 vid;
4946 
4947 	for (vid = VLAN_N_VID - 1; vid >= 1; vid--)
4948 		mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4949 					       vid, true);
4950 
4951 	mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
4952 			       false, false);
4953 	mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4954 	mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
4955 }
4956 
4957 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev)
4958 {
4959 	unsigned int num_vxlans = 0;
4960 	struct net_device *dev;
4961 	struct list_head *iter;
4962 
4963 	netdev_for_each_lower_dev(br_dev, dev, iter) {
4964 		if (netif_is_vxlan(dev))
4965 			num_vxlans++;
4966 	}
4967 
4968 	return num_vxlans > 1;
4969 }
4970 
4971 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev)
4972 {
4973 	DECLARE_BITMAP(vlans, VLAN_N_VID) = {0};
4974 	struct net_device *dev;
4975 	struct list_head *iter;
4976 
4977 	netdev_for_each_lower_dev(br_dev, dev, iter) {
4978 		u16 pvid;
4979 		int err;
4980 
4981 		if (!netif_is_vxlan(dev))
4982 			continue;
4983 
4984 		err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
4985 		if (err || !pvid)
4986 			continue;
4987 
4988 		if (test_and_set_bit(pvid, vlans))
4989 			return false;
4990 	}
4991 
4992 	return true;
4993 }
4994 
4995 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev,
4996 					   struct netlink_ext_ack *extack)
4997 {
4998 	if (br_multicast_enabled(br_dev)) {
4999 		NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device");
5000 		return false;
5001 	}
5002 
5003 	if (!br_vlan_enabled(br_dev) &&
5004 	    mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) {
5005 		NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge");
5006 		return false;
5007 	}
5008 
5009 	if (br_vlan_enabled(br_dev) &&
5010 	    !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) {
5011 		NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged");
5012 		return false;
5013 	}
5014 
5015 	return true;
5016 }
5017 
5018 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
5019 					       struct net_device *dev,
5020 					       unsigned long event, void *ptr)
5021 {
5022 	struct netdev_notifier_changeupper_info *info;
5023 	struct mlxsw_sp_port *mlxsw_sp_port;
5024 	struct netlink_ext_ack *extack;
5025 	struct net_device *upper_dev;
5026 	struct mlxsw_sp *mlxsw_sp;
5027 	int err = 0;
5028 
5029 	mlxsw_sp_port = netdev_priv(dev);
5030 	mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
5031 	info = ptr;
5032 	extack = netdev_notifier_info_to_extack(&info->info);
5033 
5034 	switch (event) {
5035 	case NETDEV_PRECHANGEUPPER:
5036 		upper_dev = info->upper_dev;
5037 		if (!is_vlan_dev(upper_dev) &&
5038 		    !netif_is_lag_master(upper_dev) &&
5039 		    !netif_is_bridge_master(upper_dev) &&
5040 		    !netif_is_ovs_master(upper_dev) &&
5041 		    !netif_is_macvlan(upper_dev)) {
5042 			NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5043 			return -EINVAL;
5044 		}
5045 		if (!info->linking)
5046 			break;
5047 		if (netif_is_bridge_master(upper_dev) &&
5048 		    !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
5049 		    mlxsw_sp_bridge_has_vxlan(upper_dev) &&
5050 		    !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
5051 			return -EOPNOTSUPP;
5052 		if (netdev_has_any_upper_dev(upper_dev) &&
5053 		    (!netif_is_bridge_master(upper_dev) ||
5054 		     !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
5055 							  upper_dev))) {
5056 			NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
5057 			return -EINVAL;
5058 		}
5059 		if (netif_is_lag_master(upper_dev) &&
5060 		    !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
5061 					       info->upper_info, extack))
5062 			return -EINVAL;
5063 		if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) {
5064 			NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN");
5065 			return -EINVAL;
5066 		}
5067 		if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
5068 		    !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) {
5069 			NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port");
5070 			return -EINVAL;
5071 		}
5072 		if (netif_is_macvlan(upper_dev) &&
5073 		    !mlxsw_sp_rif_find_by_dev(mlxsw_sp, lower_dev)) {
5074 			NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
5075 			return -EOPNOTSUPP;
5076 		}
5077 		if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) {
5078 			NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN");
5079 			return -EINVAL;
5080 		}
5081 		if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) {
5082 			NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
5083 			return -EINVAL;
5084 		}
5085 		break;
5086 	case NETDEV_CHANGEUPPER:
5087 		upper_dev = info->upper_dev;
5088 		if (netif_is_bridge_master(upper_dev)) {
5089 			if (info->linking)
5090 				err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
5091 								lower_dev,
5092 								upper_dev,
5093 								extack);
5094 			else
5095 				mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
5096 							   lower_dev,
5097 							   upper_dev);
5098 		} else if (netif_is_lag_master(upper_dev)) {
5099 			if (info->linking) {
5100 				err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
5101 							     upper_dev);
5102 			} else {
5103 				mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
5104 				mlxsw_sp_port_lag_leave(mlxsw_sp_port,
5105 							upper_dev);
5106 			}
5107 		} else if (netif_is_ovs_master(upper_dev)) {
5108 			if (info->linking)
5109 				err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
5110 			else
5111 				mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
5112 		} else if (netif_is_macvlan(upper_dev)) {
5113 			if (!info->linking)
5114 				mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
5115 		} else if (is_vlan_dev(upper_dev)) {
5116 			struct net_device *br_dev;
5117 
5118 			if (!netif_is_bridge_port(upper_dev))
5119 				break;
5120 			if (info->linking)
5121 				break;
5122 			br_dev = netdev_master_upper_dev_get(upper_dev);
5123 			mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev,
5124 						   br_dev);
5125 		}
5126 		break;
5127 	}
5128 
5129 	return err;
5130 }
5131 
5132 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
5133 					       unsigned long event, void *ptr)
5134 {
5135 	struct netdev_notifier_changelowerstate_info *info;
5136 	struct mlxsw_sp_port *mlxsw_sp_port;
5137 	int err;
5138 
5139 	mlxsw_sp_port = netdev_priv(dev);
5140 	info = ptr;
5141 
5142 	switch (event) {
5143 	case NETDEV_CHANGELOWERSTATE:
5144 		if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
5145 			err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
5146 							info->lower_state_info);
5147 			if (err)
5148 				netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
5149 		}
5150 		break;
5151 	}
5152 
5153 	return 0;
5154 }
5155 
5156 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
5157 					 struct net_device *port_dev,
5158 					 unsigned long event, void *ptr)
5159 {
5160 	switch (event) {
5161 	case NETDEV_PRECHANGEUPPER:
5162 	case NETDEV_CHANGEUPPER:
5163 		return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
5164 							   event, ptr);
5165 	case NETDEV_CHANGELOWERSTATE:
5166 		return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
5167 							   ptr);
5168 	}
5169 
5170 	return 0;
5171 }
5172 
5173 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
5174 					unsigned long event, void *ptr)
5175 {
5176 	struct net_device *dev;
5177 	struct list_head *iter;
5178 	int ret;
5179 
5180 	netdev_for_each_lower_dev(lag_dev, dev, iter) {
5181 		if (mlxsw_sp_port_dev_check(dev)) {
5182 			ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
5183 							    ptr);
5184 			if (ret)
5185 				return ret;
5186 		}
5187 	}
5188 
5189 	return 0;
5190 }
5191 
5192 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
5193 					      struct net_device *dev,
5194 					      unsigned long event, void *ptr,
5195 					      u16 vid)
5196 {
5197 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
5198 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
5199 	struct netdev_notifier_changeupper_info *info = ptr;
5200 	struct netlink_ext_ack *extack;
5201 	struct net_device *upper_dev;
5202 	int err = 0;
5203 
5204 	extack = netdev_notifier_info_to_extack(&info->info);
5205 
5206 	switch (event) {
5207 	case NETDEV_PRECHANGEUPPER:
5208 		upper_dev = info->upper_dev;
5209 		if (!netif_is_bridge_master(upper_dev) &&
5210 		    !netif_is_macvlan(upper_dev)) {
5211 			NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5212 			return -EINVAL;
5213 		}
5214 		if (!info->linking)
5215 			break;
5216 		if (netif_is_bridge_master(upper_dev) &&
5217 		    !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
5218 		    mlxsw_sp_bridge_has_vxlan(upper_dev) &&
5219 		    !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
5220 			return -EOPNOTSUPP;
5221 		if (netdev_has_any_upper_dev(upper_dev) &&
5222 		    (!netif_is_bridge_master(upper_dev) ||
5223 		     !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
5224 							  upper_dev))) {
5225 			NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
5226 			return -EINVAL;
5227 		}
5228 		if (netif_is_macvlan(upper_dev) &&
5229 		    !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) {
5230 			NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
5231 			return -EOPNOTSUPP;
5232 		}
5233 		break;
5234 	case NETDEV_CHANGEUPPER:
5235 		upper_dev = info->upper_dev;
5236 		if (netif_is_bridge_master(upper_dev)) {
5237 			if (info->linking)
5238 				err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
5239 								vlan_dev,
5240 								upper_dev,
5241 								extack);
5242 			else
5243 				mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
5244 							   vlan_dev,
5245 							   upper_dev);
5246 		} else if (netif_is_macvlan(upper_dev)) {
5247 			if (!info->linking)
5248 				mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
5249 		} else {
5250 			err = -EINVAL;
5251 			WARN_ON(1);
5252 		}
5253 		break;
5254 	}
5255 
5256 	return err;
5257 }
5258 
5259 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
5260 						  struct net_device *lag_dev,
5261 						  unsigned long event,
5262 						  void *ptr, u16 vid)
5263 {
5264 	struct net_device *dev;
5265 	struct list_head *iter;
5266 	int ret;
5267 
5268 	netdev_for_each_lower_dev(lag_dev, dev, iter) {
5269 		if (mlxsw_sp_port_dev_check(dev)) {
5270 			ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
5271 								 event, ptr,
5272 								 vid);
5273 			if (ret)
5274 				return ret;
5275 		}
5276 	}
5277 
5278 	return 0;
5279 }
5280 
5281 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev,
5282 						struct net_device *br_dev,
5283 						unsigned long event, void *ptr,
5284 						u16 vid)
5285 {
5286 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
5287 	struct netdev_notifier_changeupper_info *info = ptr;
5288 	struct netlink_ext_ack *extack;
5289 	struct net_device *upper_dev;
5290 
5291 	if (!mlxsw_sp)
5292 		return 0;
5293 
5294 	extack = netdev_notifier_info_to_extack(&info->info);
5295 
5296 	switch (event) {
5297 	case NETDEV_PRECHANGEUPPER:
5298 		upper_dev = info->upper_dev;
5299 		if (!netif_is_macvlan(upper_dev)) {
5300 			NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5301 			return -EOPNOTSUPP;
5302 		}
5303 		if (!info->linking)
5304 			break;
5305 		if (netif_is_macvlan(upper_dev) &&
5306 		    !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) {
5307 			NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
5308 			return -EOPNOTSUPP;
5309 		}
5310 		break;
5311 	case NETDEV_CHANGEUPPER:
5312 		upper_dev = info->upper_dev;
5313 		if (info->linking)
5314 			break;
5315 		if (netif_is_macvlan(upper_dev))
5316 			mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
5317 		break;
5318 	}
5319 
5320 	return 0;
5321 }
5322 
5323 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
5324 					 unsigned long event, void *ptr)
5325 {
5326 	struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
5327 	u16 vid = vlan_dev_vlan_id(vlan_dev);
5328 
5329 	if (mlxsw_sp_port_dev_check(real_dev))
5330 		return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
5331 							  event, ptr, vid);
5332 	else if (netif_is_lag_master(real_dev))
5333 		return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
5334 							      real_dev, event,
5335 							      ptr, vid);
5336 	else if (netif_is_bridge_master(real_dev))
5337 		return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev,
5338 							    event, ptr, vid);
5339 
5340 	return 0;
5341 }
5342 
5343 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
5344 					   unsigned long event, void *ptr)
5345 {
5346 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev);
5347 	struct netdev_notifier_changeupper_info *info = ptr;
5348 	struct netlink_ext_ack *extack;
5349 	struct net_device *upper_dev;
5350 
5351 	if (!mlxsw_sp)
5352 		return 0;
5353 
5354 	extack = netdev_notifier_info_to_extack(&info->info);
5355 
5356 	switch (event) {
5357 	case NETDEV_PRECHANGEUPPER:
5358 		upper_dev = info->upper_dev;
5359 		if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) {
5360 			NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5361 			return -EOPNOTSUPP;
5362 		}
5363 		if (!info->linking)
5364 			break;
5365 		if (netif_is_macvlan(upper_dev) &&
5366 		    !mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev)) {
5367 			NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
5368 			return -EOPNOTSUPP;
5369 		}
5370 		break;
5371 	case NETDEV_CHANGEUPPER:
5372 		upper_dev = info->upper_dev;
5373 		if (info->linking)
5374 			break;
5375 		if (is_vlan_dev(upper_dev))
5376 			mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev);
5377 		if (netif_is_macvlan(upper_dev))
5378 			mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
5379 		break;
5380 	}
5381 
5382 	return 0;
5383 }
5384 
5385 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev,
5386 					    unsigned long event, void *ptr)
5387 {
5388 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
5389 	struct netdev_notifier_changeupper_info *info = ptr;
5390 	struct netlink_ext_ack *extack;
5391 
5392 	if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER)
5393 		return 0;
5394 
5395 	extack = netdev_notifier_info_to_extack(&info->info);
5396 
5397 	/* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */
5398 	NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5399 
5400 	return -EOPNOTSUPP;
5401 }
5402 
5403 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
5404 {
5405 	struct netdev_notifier_changeupper_info *info = ptr;
5406 
5407 	if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
5408 		return false;
5409 	return netif_is_l3_master(info->upper_dev);
5410 }
5411 
5412 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
5413 					  struct net_device *dev,
5414 					  unsigned long event, void *ptr)
5415 {
5416 	struct netdev_notifier_changeupper_info *cu_info;
5417 	struct netdev_notifier_info *info = ptr;
5418 	struct netlink_ext_ack *extack;
5419 	struct net_device *upper_dev;
5420 
5421 	extack = netdev_notifier_info_to_extack(info);
5422 
5423 	switch (event) {
5424 	case NETDEV_CHANGEUPPER:
5425 		cu_info = container_of(info,
5426 				       struct netdev_notifier_changeupper_info,
5427 				       info);
5428 		upper_dev = cu_info->upper_dev;
5429 		if (!netif_is_bridge_master(upper_dev))
5430 			return 0;
5431 		if (!mlxsw_sp_lower_get(upper_dev))
5432 			return 0;
5433 		if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
5434 			return -EOPNOTSUPP;
5435 		if (cu_info->linking) {
5436 			if (!netif_running(dev))
5437 				return 0;
5438 			/* When the bridge is VLAN-aware, the VNI of the VxLAN
5439 			 * device needs to be mapped to a VLAN, but at this
5440 			 * point no VLANs are configured on the VxLAN device
5441 			 */
5442 			if (br_vlan_enabled(upper_dev))
5443 				return 0;
5444 			return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev,
5445 							  dev, 0, extack);
5446 		} else {
5447 			/* VLANs were already flushed, which triggered the
5448 			 * necessary cleanup
5449 			 */
5450 			if (br_vlan_enabled(upper_dev))
5451 				return 0;
5452 			mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
5453 		}
5454 		break;
5455 	case NETDEV_PRE_UP:
5456 		upper_dev = netdev_master_upper_dev_get(dev);
5457 		if (!upper_dev)
5458 			return 0;
5459 		if (!netif_is_bridge_master(upper_dev))
5460 			return 0;
5461 		if (!mlxsw_sp_lower_get(upper_dev))
5462 			return 0;
5463 		return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0,
5464 						  extack);
5465 	case NETDEV_DOWN:
5466 		upper_dev = netdev_master_upper_dev_get(dev);
5467 		if (!upper_dev)
5468 			return 0;
5469 		if (!netif_is_bridge_master(upper_dev))
5470 			return 0;
5471 		if (!mlxsw_sp_lower_get(upper_dev))
5472 			return 0;
5473 		mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
5474 		break;
5475 	}
5476 
5477 	return 0;
5478 }
5479 
5480 static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
5481 				    unsigned long event, void *ptr)
5482 {
5483 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5484 	struct mlxsw_sp_span_entry *span_entry;
5485 	struct mlxsw_sp *mlxsw_sp;
5486 	int err = 0;
5487 
5488 	mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb);
5489 	if (event == NETDEV_UNREGISTER) {
5490 		span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev);
5491 		if (span_entry)
5492 			mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry);
5493 	}
5494 	mlxsw_sp_span_respin(mlxsw_sp);
5495 
5496 	if (netif_is_vxlan(dev))
5497 		err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr);
5498 	if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev))
5499 		err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev,
5500 						       event, ptr);
5501 	else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev))
5502 		err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev,
5503 						       event, ptr);
5504 	else if (event == NETDEV_PRE_CHANGEADDR ||
5505 		 event == NETDEV_CHANGEADDR ||
5506 		 event == NETDEV_CHANGEMTU)
5507 		err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr);
5508 	else if (mlxsw_sp_is_vrf_event(event, ptr))
5509 		err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
5510 	else if (mlxsw_sp_port_dev_check(dev))
5511 		err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr);
5512 	else if (netif_is_lag_master(dev))
5513 		err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
5514 	else if (is_vlan_dev(dev))
5515 		err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
5516 	else if (netif_is_bridge_master(dev))
5517 		err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
5518 	else if (netif_is_macvlan(dev))
5519 		err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr);
5520 
5521 	return notifier_from_errno(err);
5522 }
5523 
5524 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = {
5525 	.notifier_call = mlxsw_sp_inetaddr_valid_event,
5526 };
5527 
5528 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = {
5529 	.notifier_call = mlxsw_sp_inet6addr_valid_event,
5530 };
5531 
5532 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = {
5533 	{PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
5534 	{0, },
5535 };
5536 
5537 static struct pci_driver mlxsw_sp1_pci_driver = {
5538 	.name = mlxsw_sp1_driver_name,
5539 	.id_table = mlxsw_sp1_pci_id_table,
5540 };
5541 
5542 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = {
5543 	{PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0},
5544 	{0, },
5545 };
5546 
5547 static struct pci_driver mlxsw_sp2_pci_driver = {
5548 	.name = mlxsw_sp2_driver_name,
5549 	.id_table = mlxsw_sp2_pci_id_table,
5550 };
5551 
5552 static int __init mlxsw_sp_module_init(void)
5553 {
5554 	int err;
5555 
5556 	register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
5557 	register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
5558 
5559 	err = mlxsw_core_driver_register(&mlxsw_sp1_driver);
5560 	if (err)
5561 		goto err_sp1_core_driver_register;
5562 
5563 	err = mlxsw_core_driver_register(&mlxsw_sp2_driver);
5564 	if (err)
5565 		goto err_sp2_core_driver_register;
5566 
5567 	err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver);
5568 	if (err)
5569 		goto err_sp1_pci_driver_register;
5570 
5571 	err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver);
5572 	if (err)
5573 		goto err_sp2_pci_driver_register;
5574 
5575 	return 0;
5576 
5577 err_sp2_pci_driver_register:
5578 	mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
5579 err_sp1_pci_driver_register:
5580 	mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
5581 err_sp2_core_driver_register:
5582 	mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
5583 err_sp1_core_driver_register:
5584 	unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
5585 	unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
5586 	return err;
5587 }
5588 
5589 static void __exit mlxsw_sp_module_exit(void)
5590 {
5591 	mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
5592 	mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
5593 	mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
5594 	mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
5595 	unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
5596 	unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
5597 }
5598 
5599 module_init(mlxsw_sp_module_init);
5600 module_exit(mlxsw_sp_module_exit);
5601 
5602 MODULE_LICENSE("Dual BSD/GPL");
5603 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
5604 MODULE_DESCRIPTION("Mellanox Spectrum driver");
5605 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table);
5606 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
5607 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
5608