xref: /linux/drivers/net/ethernet/sfc/ef10.c (revision 2c97b5ae83dca56718774e7b4bf9640f05d11867)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3  * Driver for Solarflare network controllers and boards
4  * Copyright 2012-2013 Solarflare Communications Inc.
5  */
6 
7 #include "net_driver.h"
8 #include "ef10_regs.h"
9 #include "io.h"
10 #include "mcdi.h"
11 #include "mcdi_pcol.h"
12 #include "nic.h"
13 #include "workarounds.h"
14 #include "selftest.h"
15 #include "ef10_sriov.h"
16 #include <linux/in.h>
17 #include <linux/jhash.h>
18 #include <linux/wait.h>
19 #include <linux/workqueue.h>
20 
21 /* Hardware control for EF10 architecture including 'Huntington'. */
22 
23 #define EFX_EF10_DRVGEN_EV		7
24 enum {
25 	EFX_EF10_TEST = 1,
26 	EFX_EF10_REFILL,
27 };
28 /* The maximum size of a shared RSS context */
29 /* TODO: this should really be from the mcdi protocol export */
30 #define EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE 64UL
31 
32 /* The filter table(s) are managed by firmware and we have write-only
33  * access.  When removing filters we must identify them to the
34  * firmware by a 64-bit handle, but this is too wide for Linux kernel
35  * interfaces (32-bit for RX NFC, 16-bit for RFS).  Also, we need to
36  * be able to tell in advance whether a requested insertion will
37  * replace an existing filter.  Therefore we maintain a software hash
38  * table, which should be at least as large as the hardware hash
39  * table.
40  *
41  * Huntington has a single 8K filter table shared between all filter
42  * types and both ports.
43  */
44 #define HUNT_FILTER_TBL_ROWS 8192
45 
46 #define EFX_EF10_FILTER_ID_INVALID 0xffff
47 
48 #define EFX_EF10_FILTER_DEV_UC_MAX	32
49 #define EFX_EF10_FILTER_DEV_MC_MAX	256
50 
51 /* VLAN list entry */
52 struct efx_ef10_vlan {
53 	struct list_head list;
54 	u16 vid;
55 };
56 
57 enum efx_ef10_default_filters {
58 	EFX_EF10_BCAST,
59 	EFX_EF10_UCDEF,
60 	EFX_EF10_MCDEF,
61 	EFX_EF10_VXLAN4_UCDEF,
62 	EFX_EF10_VXLAN4_MCDEF,
63 	EFX_EF10_VXLAN6_UCDEF,
64 	EFX_EF10_VXLAN6_MCDEF,
65 	EFX_EF10_NVGRE4_UCDEF,
66 	EFX_EF10_NVGRE4_MCDEF,
67 	EFX_EF10_NVGRE6_UCDEF,
68 	EFX_EF10_NVGRE6_MCDEF,
69 	EFX_EF10_GENEVE4_UCDEF,
70 	EFX_EF10_GENEVE4_MCDEF,
71 	EFX_EF10_GENEVE6_UCDEF,
72 	EFX_EF10_GENEVE6_MCDEF,
73 
74 	EFX_EF10_NUM_DEFAULT_FILTERS
75 };
76 
77 /* Per-VLAN filters information */
78 struct efx_ef10_filter_vlan {
79 	struct list_head list;
80 	u16 vid;
81 	u16 uc[EFX_EF10_FILTER_DEV_UC_MAX];
82 	u16 mc[EFX_EF10_FILTER_DEV_MC_MAX];
83 	u16 default_filters[EFX_EF10_NUM_DEFAULT_FILTERS];
84 };
85 
86 struct efx_ef10_dev_addr {
87 	u8 addr[ETH_ALEN];
88 };
89 
90 struct efx_ef10_filter_table {
91 /* The MCDI match masks supported by this fw & hw, in order of priority */
92 	u32 rx_match_mcdi_flags[
93 		MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM * 2];
94 	unsigned int rx_match_count;
95 
96 	struct rw_semaphore lock; /* Protects entries */
97 	struct {
98 		unsigned long spec;	/* pointer to spec plus flag bits */
99 /* AUTO_OLD is used to mark and sweep MAC filters for the device address lists. */
100 /* unused flag	1UL */
101 #define EFX_EF10_FILTER_FLAG_AUTO_OLD	2UL
102 #define EFX_EF10_FILTER_FLAGS		3UL
103 		u64 handle;		/* firmware handle */
104 	} *entry;
105 /* Shadow of net_device address lists, guarded by mac_lock */
106 	struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX];
107 	struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
108 	int dev_uc_count;
109 	int dev_mc_count;
110 	bool uc_promisc;
111 	bool mc_promisc;
112 /* Whether in multicast promiscuous mode when last changed */
113 	bool mc_promisc_last;
114 	bool mc_overflow; /* Too many MC addrs; should always imply mc_promisc */
115 	bool vlan_filter;
116 	struct list_head vlan_list;
117 };
118 
119 /* An arbitrary search limit for the software hash table */
120 #define EFX_EF10_FILTER_SEARCH_LIMIT 200
121 
122 static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
123 static void efx_ef10_filter_table_remove(struct efx_nic *efx);
124 static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid);
125 static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx,
126 					      struct efx_ef10_filter_vlan *vlan);
127 static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid);
128 static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading);
129 
130 static u32 efx_ef10_filter_get_unsafe_id(u32 filter_id)
131 {
132 	WARN_ON_ONCE(filter_id == EFX_EF10_FILTER_ID_INVALID);
133 	return filter_id & (HUNT_FILTER_TBL_ROWS - 1);
134 }
135 
136 static unsigned int efx_ef10_filter_get_unsafe_pri(u32 filter_id)
137 {
138 	return filter_id / (HUNT_FILTER_TBL_ROWS * 2);
139 }
140 
141 static u32 efx_ef10_make_filter_id(unsigned int pri, u16 idx)
142 {
143 	return pri * HUNT_FILTER_TBL_ROWS * 2 + idx;
144 }
145 
146 static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
147 {
148 	efx_dword_t reg;
149 
150 	efx_readd(efx, &reg, ER_DZ_BIU_MC_SFT_STATUS);
151 	return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ?
152 		EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO;
153 }
154 
155 /* On all EF10s up to and including SFC9220 (Medford1), all PFs use BAR 0 for
156  * I/O space and BAR 2(&3) for memory.  On SFC9250 (Medford2), there is no I/O
157  * bar; PFs use BAR 0/1 for memory.
158  */
159 static unsigned int efx_ef10_pf_mem_bar(struct efx_nic *efx)
160 {
161 	switch (efx->pci_dev->device) {
162 	case 0x0b03: /* SFC9250 PF */
163 		return 0;
164 	default:
165 		return 2;
166 	}
167 }
168 
169 /* All VFs use BAR 0/1 for memory */
170 static unsigned int efx_ef10_vf_mem_bar(struct efx_nic *efx)
171 {
172 	return 0;
173 }
174 
175 static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
176 {
177 	int bar;
178 
179 	bar = efx->type->mem_bar(efx);
180 	return resource_size(&efx->pci_dev->resource[bar]);
181 }
182 
183 static bool efx_ef10_is_vf(struct efx_nic *efx)
184 {
185 	return efx->type->is_vf;
186 }
187 
188 static int efx_ef10_get_pf_index(struct efx_nic *efx)
189 {
190 	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
191 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
192 	size_t outlen;
193 	int rc;
194 
195 	rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
196 			  sizeof(outbuf), &outlen);
197 	if (rc)
198 		return rc;
199 	if (outlen < sizeof(outbuf))
200 		return -EIO;
201 
202 	nic_data->pf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_PF);
203 	return 0;
204 }
205 
206 #ifdef CONFIG_SFC_SRIOV
207 static int efx_ef10_get_vf_index(struct efx_nic *efx)
208 {
209 	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
210 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
211 	size_t outlen;
212 	int rc;
213 
214 	rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
215 			  sizeof(outbuf), &outlen);
216 	if (rc)
217 		return rc;
218 	if (outlen < sizeof(outbuf))
219 		return -EIO;
220 
221 	nic_data->vf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_VF);
222 	return 0;
223 }
224 #endif
225 
226 static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
227 {
228 	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V4_OUT_LEN);
229 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
230 	size_t outlen;
231 	int rc;
232 
233 	BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
234 
235 	rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
236 			  outbuf, sizeof(outbuf), &outlen);
237 	if (rc)
238 		return rc;
239 	if (outlen < MC_CMD_GET_CAPABILITIES_OUT_LEN) {
240 		netif_err(efx, drv, efx->net_dev,
241 			  "unable to read datapath firmware capabilities\n");
242 		return -EIO;
243 	}
244 
245 	nic_data->datapath_caps =
246 		MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
247 
248 	if (outlen >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) {
249 		nic_data->datapath_caps2 = MCDI_DWORD(outbuf,
250 				GET_CAPABILITIES_V2_OUT_FLAGS2);
251 		nic_data->piobuf_size = MCDI_WORD(outbuf,
252 				GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF);
253 	} else {
254 		nic_data->datapath_caps2 = 0;
255 		nic_data->piobuf_size = ER_DZ_TX_PIOBUF_SIZE;
256 	}
257 
258 	/* record the DPCPU firmware IDs to determine VEB vswitching support.
259 	 */
260 	nic_data->rx_dpcpu_fw_id =
261 		MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID);
262 	nic_data->tx_dpcpu_fw_id =
263 		MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID);
264 
265 	if (!(nic_data->datapath_caps &
266 	      (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) {
267 		netif_err(efx, probe, efx->net_dev,
268 			  "current firmware does not support an RX prefix\n");
269 		return -ENODEV;
270 	}
271 
272 	if (outlen >= MC_CMD_GET_CAPABILITIES_V3_OUT_LEN) {
273 		u8 vi_window_mode = MCDI_BYTE(outbuf,
274 				GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE);
275 
276 		switch (vi_window_mode) {
277 		case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K:
278 			efx->vi_stride = 8192;
279 			break;
280 		case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K:
281 			efx->vi_stride = 16384;
282 			break;
283 		case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K:
284 			efx->vi_stride = 65536;
285 			break;
286 		default:
287 			netif_err(efx, probe, efx->net_dev,
288 				  "Unrecognised VI window mode %d\n",
289 				  vi_window_mode);
290 			return -EIO;
291 		}
292 		netif_dbg(efx, probe, efx->net_dev, "vi_stride = %u\n",
293 			  efx->vi_stride);
294 	} else {
295 		/* keep default VI stride */
296 		netif_dbg(efx, probe, efx->net_dev,
297 			  "firmware did not report VI window mode, assuming vi_stride = %u\n",
298 			  efx->vi_stride);
299 	}
300 
301 	if (outlen >= MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) {
302 		efx->num_mac_stats = MCDI_WORD(outbuf,
303 				GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS);
304 		netif_dbg(efx, probe, efx->net_dev,
305 			  "firmware reports num_mac_stats = %u\n",
306 			  efx->num_mac_stats);
307 	} else {
308 		/* leave num_mac_stats as the default value, MC_CMD_MAC_NSTATS */
309 		netif_dbg(efx, probe, efx->net_dev,
310 			  "firmware did not report num_mac_stats, assuming %u\n",
311 			  efx->num_mac_stats);
312 	}
313 
314 	return 0;
315 }
316 
317 static void efx_ef10_read_licensed_features(struct efx_nic *efx)
318 {
319 	MCDI_DECLARE_BUF(inbuf, MC_CMD_LICENSING_V3_IN_LEN);
320 	MCDI_DECLARE_BUF(outbuf, MC_CMD_LICENSING_V3_OUT_LEN);
321 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
322 	size_t outlen;
323 	int rc;
324 
325 	MCDI_SET_DWORD(inbuf, LICENSING_V3_IN_OP,
326 		       MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE);
327 	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_LICENSING_V3, inbuf, sizeof(inbuf),
328 				outbuf, sizeof(outbuf), &outlen);
329 	if (rc || (outlen < MC_CMD_LICENSING_V3_OUT_LEN))
330 		return;
331 
332 	nic_data->licensed_features = MCDI_QWORD(outbuf,
333 					 LICENSING_V3_OUT_LICENSED_FEATURES);
334 }
335 
336 static int efx_ef10_get_sysclk_freq(struct efx_nic *efx)
337 {
338 	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN);
339 	int rc;
340 
341 	rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0,
342 			  outbuf, sizeof(outbuf), NULL);
343 	if (rc)
344 		return rc;
345 	rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ);
346 	return rc > 0 ? rc : -ERANGE;
347 }
348 
349 static int efx_ef10_get_timer_workarounds(struct efx_nic *efx)
350 {
351 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
352 	unsigned int implemented;
353 	unsigned int enabled;
354 	int rc;
355 
356 	nic_data->workaround_35388 = false;
357 	nic_data->workaround_61265 = false;
358 
359 	rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
360 
361 	if (rc == -ENOSYS) {
362 		/* Firmware without GET_WORKAROUNDS - not a problem. */
363 		rc = 0;
364 	} else if (rc == 0) {
365 		/* Bug61265 workaround is always enabled if implemented. */
366 		if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG61265)
367 			nic_data->workaround_61265 = true;
368 
369 		if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) {
370 			nic_data->workaround_35388 = true;
371 		} else if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) {
372 			/* Workaround is implemented but not enabled.
373 			 * Try to enable it.
374 			 */
375 			rc = efx_mcdi_set_workaround(efx,
376 						     MC_CMD_WORKAROUND_BUG35388,
377 						     true, NULL);
378 			if (rc == 0)
379 				nic_data->workaround_35388 = true;
380 			/* If we failed to set the workaround just carry on. */
381 			rc = 0;
382 		}
383 	}
384 
385 	netif_dbg(efx, probe, efx->net_dev,
386 		  "workaround for bug 35388 is %sabled\n",
387 		  nic_data->workaround_35388 ? "en" : "dis");
388 	netif_dbg(efx, probe, efx->net_dev,
389 		  "workaround for bug 61265 is %sabled\n",
390 		  nic_data->workaround_61265 ? "en" : "dis");
391 
392 	return rc;
393 }
394 
395 static void efx_ef10_process_timer_config(struct efx_nic *efx,
396 					  const efx_dword_t *data)
397 {
398 	unsigned int max_count;
399 
400 	if (EFX_EF10_WORKAROUND_61265(efx)) {
401 		efx->timer_quantum_ns = MCDI_DWORD(data,
402 			GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS);
403 		efx->timer_max_ns = MCDI_DWORD(data,
404 			GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS);
405 	} else if (EFX_EF10_WORKAROUND_35388(efx)) {
406 		efx->timer_quantum_ns = MCDI_DWORD(data,
407 			GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT);
408 		max_count = MCDI_DWORD(data,
409 			GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT);
410 		efx->timer_max_ns = max_count * efx->timer_quantum_ns;
411 	} else {
412 		efx->timer_quantum_ns = MCDI_DWORD(data,
413 			GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT);
414 		max_count = MCDI_DWORD(data,
415 			GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT);
416 		efx->timer_max_ns = max_count * efx->timer_quantum_ns;
417 	}
418 
419 	netif_dbg(efx, probe, efx->net_dev,
420 		  "got timer properties from MC: quantum %u ns; max %u ns\n",
421 		  efx->timer_quantum_ns, efx->timer_max_ns);
422 }
423 
424 static int efx_ef10_get_timer_config(struct efx_nic *efx)
425 {
426 	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN);
427 	int rc;
428 
429 	rc = efx_ef10_get_timer_workarounds(efx);
430 	if (rc)
431 		return rc;
432 
433 	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES, NULL, 0,
434 				outbuf, sizeof(outbuf), NULL);
435 
436 	if (rc == 0) {
437 		efx_ef10_process_timer_config(efx, outbuf);
438 	} else if (rc == -ENOSYS || rc == -EPERM) {
439 		/* Not available - fall back to Huntington defaults. */
440 		unsigned int quantum;
441 
442 		rc = efx_ef10_get_sysclk_freq(efx);
443 		if (rc < 0)
444 			return rc;
445 
446 		quantum = 1536000 / rc; /* 1536 cycles */
447 		efx->timer_quantum_ns = quantum;
448 		efx->timer_max_ns = efx->type->timer_period_max * quantum;
449 		rc = 0;
450 	} else {
451 		efx_mcdi_display_error(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES,
452 				       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN,
453 				       NULL, 0, rc);
454 	}
455 
456 	return rc;
457 }
458 
459 static int efx_ef10_get_mac_address_pf(struct efx_nic *efx, u8 *mac_address)
460 {
461 	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
462 	size_t outlen;
463 	int rc;
464 
465 	BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0);
466 
467 	rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0,
468 			  outbuf, sizeof(outbuf), &outlen);
469 	if (rc)
470 		return rc;
471 	if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)
472 		return -EIO;
473 
474 	ether_addr_copy(mac_address,
475 			MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE));
476 	return 0;
477 }
478 
479 static int efx_ef10_get_mac_address_vf(struct efx_nic *efx, u8 *mac_address)
480 {
481 	MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN);
482 	MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX);
483 	size_t outlen;
484 	int num_addrs, rc;
485 
486 	MCDI_SET_DWORD(inbuf, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID,
487 		       EVB_PORT_ID_ASSIGNED);
488 	rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_GET_MAC_ADDRESSES, inbuf,
489 			  sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
490 
491 	if (rc)
492 		return rc;
493 	if (outlen < MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN)
494 		return -EIO;
495 
496 	num_addrs = MCDI_DWORD(outbuf,
497 			       VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT);
498 
499 	WARN_ON(num_addrs != 1);
500 
501 	ether_addr_copy(mac_address,
502 			MCDI_PTR(outbuf, VPORT_GET_MAC_ADDRESSES_OUT_MACADDR));
503 
504 	return 0;
505 }
506 
507 static ssize_t efx_ef10_show_link_control_flag(struct device *dev,
508 					       struct device_attribute *attr,
509 					       char *buf)
510 {
511 	struct efx_nic *efx = dev_get_drvdata(dev);
512 
513 	return sprintf(buf, "%d\n",
514 		       ((efx->mcdi->fn_flags) &
515 			(1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
516 		       ? 1 : 0);
517 }
518 
519 static ssize_t efx_ef10_show_primary_flag(struct device *dev,
520 					  struct device_attribute *attr,
521 					  char *buf)
522 {
523 	struct efx_nic *efx = dev_get_drvdata(dev);
524 
525 	return sprintf(buf, "%d\n",
526 		       ((efx->mcdi->fn_flags) &
527 			(1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))
528 		       ? 1 : 0);
529 }
530 
531 static struct efx_ef10_vlan *efx_ef10_find_vlan(struct efx_nic *efx, u16 vid)
532 {
533 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
534 	struct efx_ef10_vlan *vlan;
535 
536 	WARN_ON(!mutex_is_locked(&nic_data->vlan_lock));
537 
538 	list_for_each_entry(vlan, &nic_data->vlan_list, list) {
539 		if (vlan->vid == vid)
540 			return vlan;
541 	}
542 
543 	return NULL;
544 }
545 
546 static int efx_ef10_add_vlan(struct efx_nic *efx, u16 vid)
547 {
548 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
549 	struct efx_ef10_vlan *vlan;
550 	int rc;
551 
552 	mutex_lock(&nic_data->vlan_lock);
553 
554 	vlan = efx_ef10_find_vlan(efx, vid);
555 	if (vlan) {
556 		/* We add VID 0 on init. 8021q adds it on module init
557 		 * for all interfaces with VLAN filtring feature.
558 		 */
559 		if (vid == 0)
560 			goto done_unlock;
561 		netif_warn(efx, drv, efx->net_dev,
562 			   "VLAN %u already added\n", vid);
563 		rc = -EALREADY;
564 		goto fail_exist;
565 	}
566 
567 	rc = -ENOMEM;
568 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
569 	if (!vlan)
570 		goto fail_alloc;
571 
572 	vlan->vid = vid;
573 
574 	list_add_tail(&vlan->list, &nic_data->vlan_list);
575 
576 	if (efx->filter_state) {
577 		mutex_lock(&efx->mac_lock);
578 		down_write(&efx->filter_sem);
579 		rc = efx_ef10_filter_add_vlan(efx, vlan->vid);
580 		up_write(&efx->filter_sem);
581 		mutex_unlock(&efx->mac_lock);
582 		if (rc)
583 			goto fail_filter_add_vlan;
584 	}
585 
586 done_unlock:
587 	mutex_unlock(&nic_data->vlan_lock);
588 	return 0;
589 
590 fail_filter_add_vlan:
591 	list_del(&vlan->list);
592 	kfree(vlan);
593 fail_alloc:
594 fail_exist:
595 	mutex_unlock(&nic_data->vlan_lock);
596 	return rc;
597 }
598 
599 static void efx_ef10_del_vlan_internal(struct efx_nic *efx,
600 				       struct efx_ef10_vlan *vlan)
601 {
602 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
603 
604 	WARN_ON(!mutex_is_locked(&nic_data->vlan_lock));
605 
606 	if (efx->filter_state) {
607 		down_write(&efx->filter_sem);
608 		efx_ef10_filter_del_vlan(efx, vlan->vid);
609 		up_write(&efx->filter_sem);
610 	}
611 
612 	list_del(&vlan->list);
613 	kfree(vlan);
614 }
615 
616 static int efx_ef10_del_vlan(struct efx_nic *efx, u16 vid)
617 {
618 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
619 	struct efx_ef10_vlan *vlan;
620 	int rc = 0;
621 
622 	/* 8021q removes VID 0 on module unload for all interfaces
623 	 * with VLAN filtering feature. We need to keep it to receive
624 	 * untagged traffic.
625 	 */
626 	if (vid == 0)
627 		return 0;
628 
629 	mutex_lock(&nic_data->vlan_lock);
630 
631 	vlan = efx_ef10_find_vlan(efx, vid);
632 	if (!vlan) {
633 		netif_err(efx, drv, efx->net_dev,
634 			  "VLAN %u to be deleted not found\n", vid);
635 		rc = -ENOENT;
636 	} else {
637 		efx_ef10_del_vlan_internal(efx, vlan);
638 	}
639 
640 	mutex_unlock(&nic_data->vlan_lock);
641 
642 	return rc;
643 }
644 
645 static void efx_ef10_cleanup_vlans(struct efx_nic *efx)
646 {
647 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
648 	struct efx_ef10_vlan *vlan, *next_vlan;
649 
650 	mutex_lock(&nic_data->vlan_lock);
651 	list_for_each_entry_safe(vlan, next_vlan, &nic_data->vlan_list, list)
652 		efx_ef10_del_vlan_internal(efx, vlan);
653 	mutex_unlock(&nic_data->vlan_lock);
654 }
655 
656 static DEVICE_ATTR(link_control_flag, 0444, efx_ef10_show_link_control_flag,
657 		   NULL);
658 static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL);
659 
660 static int efx_ef10_probe(struct efx_nic *efx)
661 {
662 	struct efx_ef10_nic_data *nic_data;
663 	int i, rc;
664 
665 	nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
666 	if (!nic_data)
667 		return -ENOMEM;
668 	efx->nic_data = nic_data;
669 
670 	/* we assume later that we can copy from this buffer in dwords */
671 	BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4);
672 
673 	rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf,
674 				  8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL);
675 	if (rc)
676 		goto fail1;
677 
678 	/* Get the MC's warm boot count.  In case it's rebooting right
679 	 * now, be prepared to retry.
680 	 */
681 	i = 0;
682 	for (;;) {
683 		rc = efx_ef10_get_warm_boot_count(efx);
684 		if (rc >= 0)
685 			break;
686 		if (++i == 5)
687 			goto fail2;
688 		ssleep(1);
689 	}
690 	nic_data->warm_boot_count = rc;
691 
692 	efx->rss_context.context_id = EFX_EF10_RSS_CONTEXT_INVALID;
693 
694 	nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
695 
696 	/* In case we're recovering from a crash (kexec), we want to
697 	 * cancel any outstanding request by the previous user of this
698 	 * function.  We send a special message using the least
699 	 * significant bits of the 'high' (doorbell) register.
700 	 */
701 	_efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD);
702 
703 	rc = efx_mcdi_init(efx);
704 	if (rc)
705 		goto fail2;
706 
707 	mutex_init(&nic_data->udp_tunnels_lock);
708 
709 	/* Reset (most) configuration for this function */
710 	rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
711 	if (rc)
712 		goto fail3;
713 
714 	/* Enable event logging */
715 	rc = efx_mcdi_log_ctrl(efx, true, false, 0);
716 	if (rc)
717 		goto fail3;
718 
719 	rc = device_create_file(&efx->pci_dev->dev,
720 				&dev_attr_link_control_flag);
721 	if (rc)
722 		goto fail3;
723 
724 	rc = device_create_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
725 	if (rc)
726 		goto fail4;
727 
728 	rc = efx_ef10_get_pf_index(efx);
729 	if (rc)
730 		goto fail5;
731 
732 	rc = efx_ef10_init_datapath_caps(efx);
733 	if (rc < 0)
734 		goto fail5;
735 
736 	efx_ef10_read_licensed_features(efx);
737 
738 	/* We can have one VI for each vi_stride-byte region.
739 	 * However, until we use TX option descriptors we need two TX queues
740 	 * per channel.
741 	 */
742 	efx->max_channels = min_t(unsigned int,
743 				  EFX_MAX_CHANNELS,
744 				  efx_ef10_mem_map_size(efx) /
745 				  (efx->vi_stride * EFX_TXQ_TYPES));
746 	efx->max_tx_channels = efx->max_channels;
747 	if (WARN_ON(efx->max_channels == 0)) {
748 		rc = -EIO;
749 		goto fail5;
750 	}
751 
752 	efx->rx_packet_len_offset =
753 		ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
754 
755 	if (nic_data->datapath_caps &
756 	    (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_LBN))
757 		efx->net_dev->hw_features |= NETIF_F_RXFCS;
758 
759 	rc = efx_mcdi_port_get_number(efx);
760 	if (rc < 0)
761 		goto fail5;
762 	efx->port_num = rc;
763 
764 	rc = efx->type->get_mac_address(efx, efx->net_dev->perm_addr);
765 	if (rc)
766 		goto fail5;
767 
768 	rc = efx_ef10_get_timer_config(efx);
769 	if (rc < 0)
770 		goto fail5;
771 
772 	rc = efx_mcdi_mon_probe(efx);
773 	if (rc && rc != -EPERM)
774 		goto fail5;
775 
776 	efx_ptp_defer_probe_with_channel(efx);
777 
778 #ifdef CONFIG_SFC_SRIOV
779 	if ((efx->pci_dev->physfn) && (!efx->pci_dev->is_physfn)) {
780 		struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
781 		struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
782 
783 		efx_pf->type->get_mac_address(efx_pf, nic_data->port_id);
784 	} else
785 #endif
786 		ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr);
787 
788 	INIT_LIST_HEAD(&nic_data->vlan_list);
789 	mutex_init(&nic_data->vlan_lock);
790 
791 	/* Add unspecified VID to support VLAN filtering being disabled */
792 	rc = efx_ef10_add_vlan(efx, EFX_FILTER_VID_UNSPEC);
793 	if (rc)
794 		goto fail_add_vid_unspec;
795 
796 	/* If VLAN filtering is enabled, we need VID 0 to get untagged
797 	 * traffic.  It is added automatically if 8021q module is loaded,
798 	 * but we can't rely on it since module may be not loaded.
799 	 */
800 	rc = efx_ef10_add_vlan(efx, 0);
801 	if (rc)
802 		goto fail_add_vid_0;
803 
804 	return 0;
805 
806 fail_add_vid_0:
807 	efx_ef10_cleanup_vlans(efx);
808 fail_add_vid_unspec:
809 	mutex_destroy(&nic_data->vlan_lock);
810 	efx_ptp_remove(efx);
811 	efx_mcdi_mon_remove(efx);
812 fail5:
813 	device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
814 fail4:
815 	device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
816 fail3:
817 	efx_mcdi_detach(efx);
818 
819 	mutex_lock(&nic_data->udp_tunnels_lock);
820 	memset(nic_data->udp_tunnels, 0, sizeof(nic_data->udp_tunnels));
821 	(void)efx_ef10_set_udp_tnl_ports(efx, true);
822 	mutex_unlock(&nic_data->udp_tunnels_lock);
823 	mutex_destroy(&nic_data->udp_tunnels_lock);
824 
825 	efx_mcdi_fini(efx);
826 fail2:
827 	efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
828 fail1:
829 	kfree(nic_data);
830 	efx->nic_data = NULL;
831 	return rc;
832 }
833 
834 static int efx_ef10_free_vis(struct efx_nic *efx)
835 {
836 	MCDI_DECLARE_BUF_ERR(outbuf);
837 	size_t outlen;
838 	int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0,
839 				    outbuf, sizeof(outbuf), &outlen);
840 
841 	/* -EALREADY means nothing to free, so ignore */
842 	if (rc == -EALREADY)
843 		rc = 0;
844 	if (rc)
845 		efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen,
846 				       rc);
847 	return rc;
848 }
849 
850 #ifdef EFX_USE_PIO
851 
852 static void efx_ef10_free_piobufs(struct efx_nic *efx)
853 {
854 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
855 	MCDI_DECLARE_BUF(inbuf, MC_CMD_FREE_PIOBUF_IN_LEN);
856 	unsigned int i;
857 	int rc;
858 
859 	BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN != 0);
860 
861 	for (i = 0; i < nic_data->n_piobufs; i++) {
862 		MCDI_SET_DWORD(inbuf, FREE_PIOBUF_IN_PIOBUF_HANDLE,
863 			       nic_data->piobuf_handle[i]);
864 		rc = efx_mcdi_rpc(efx, MC_CMD_FREE_PIOBUF, inbuf, sizeof(inbuf),
865 				  NULL, 0, NULL);
866 		WARN_ON(rc);
867 	}
868 
869 	nic_data->n_piobufs = 0;
870 }
871 
872 static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
873 {
874 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
875 	MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_PIOBUF_OUT_LEN);
876 	unsigned int i;
877 	size_t outlen;
878 	int rc = 0;
879 
880 	BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0);
881 
882 	for (i = 0; i < n; i++) {
883 		rc = efx_mcdi_rpc_quiet(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0,
884 					outbuf, sizeof(outbuf), &outlen);
885 		if (rc) {
886 			/* Don't display the MC error if we didn't have space
887 			 * for a VF.
888 			 */
889 			if (!(efx_ef10_is_vf(efx) && rc == -ENOSPC))
890 				efx_mcdi_display_error(efx, MC_CMD_ALLOC_PIOBUF,
891 						       0, outbuf, outlen, rc);
892 			break;
893 		}
894 		if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
895 			rc = -EIO;
896 			break;
897 		}
898 		nic_data->piobuf_handle[i] =
899 			MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
900 		netif_dbg(efx, probe, efx->net_dev,
901 			  "allocated PIO buffer %u handle %x\n", i,
902 			  nic_data->piobuf_handle[i]);
903 	}
904 
905 	nic_data->n_piobufs = i;
906 	if (rc)
907 		efx_ef10_free_piobufs(efx);
908 	return rc;
909 }
910 
911 static int efx_ef10_link_piobufs(struct efx_nic *efx)
912 {
913 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
914 	MCDI_DECLARE_BUF(inbuf, MC_CMD_LINK_PIOBUF_IN_LEN);
915 	struct efx_channel *channel;
916 	struct efx_tx_queue *tx_queue;
917 	unsigned int offset, index;
918 	int rc;
919 
920 	BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0);
921 	BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0);
922 
923 	/* Link a buffer to each VI in the write-combining mapping */
924 	for (index = 0; index < nic_data->n_piobufs; ++index) {
925 		MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE,
926 			       nic_data->piobuf_handle[index]);
927 		MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE,
928 			       nic_data->pio_write_vi_base + index);
929 		rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
930 				  inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
931 				  NULL, 0, NULL);
932 		if (rc) {
933 			netif_err(efx, drv, efx->net_dev,
934 				  "failed to link VI %u to PIO buffer %u (%d)\n",
935 				  nic_data->pio_write_vi_base + index, index,
936 				  rc);
937 			goto fail;
938 		}
939 		netif_dbg(efx, probe, efx->net_dev,
940 			  "linked VI %u to PIO buffer %u\n",
941 			  nic_data->pio_write_vi_base + index, index);
942 	}
943 
944 	/* Link a buffer to each TX queue */
945 	efx_for_each_channel(channel, efx) {
946 		/* Extra channels, even those with TXQs (PTP), do not require
947 		 * PIO resources.
948 		 */
949 		if (!channel->type->want_pio ||
950 		    channel->channel >= efx->xdp_channel_offset)
951 			continue;
952 
953 		efx_for_each_channel_tx_queue(tx_queue, channel) {
954 			/* We assign the PIO buffers to queues in
955 			 * reverse order to allow for the following
956 			 * special case.
957 			 */
958 			offset = ((efx->tx_channel_offset + efx->n_tx_channels -
959 				   tx_queue->channel->channel - 1) *
960 				  efx_piobuf_size);
961 			index = offset / nic_data->piobuf_size;
962 			offset = offset % nic_data->piobuf_size;
963 
964 			/* When the host page size is 4K, the first
965 			 * host page in the WC mapping may be within
966 			 * the same VI page as the last TX queue.  We
967 			 * can only link one buffer to each VI.
968 			 */
969 			if (tx_queue->queue == nic_data->pio_write_vi_base) {
970 				BUG_ON(index != 0);
971 				rc = 0;
972 			} else {
973 				MCDI_SET_DWORD(inbuf,
974 					       LINK_PIOBUF_IN_PIOBUF_HANDLE,
975 					       nic_data->piobuf_handle[index]);
976 				MCDI_SET_DWORD(inbuf,
977 					       LINK_PIOBUF_IN_TXQ_INSTANCE,
978 					       tx_queue->queue);
979 				rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
980 						  inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
981 						  NULL, 0, NULL);
982 			}
983 
984 			if (rc) {
985 				/* This is non-fatal; the TX path just
986 				 * won't use PIO for this queue
987 				 */
988 				netif_err(efx, drv, efx->net_dev,
989 					  "failed to link VI %u to PIO buffer %u (%d)\n",
990 					  tx_queue->queue, index, rc);
991 				tx_queue->piobuf = NULL;
992 			} else {
993 				tx_queue->piobuf =
994 					nic_data->pio_write_base +
995 					index * efx->vi_stride + offset;
996 				tx_queue->piobuf_offset = offset;
997 				netif_dbg(efx, probe, efx->net_dev,
998 					  "linked VI %u to PIO buffer %u offset %x addr %p\n",
999 					  tx_queue->queue, index,
1000 					  tx_queue->piobuf_offset,
1001 					  tx_queue->piobuf);
1002 			}
1003 		}
1004 	}
1005 
1006 	return 0;
1007 
1008 fail:
1009 	/* inbuf was defined for MC_CMD_LINK_PIOBUF.  We can use the same
1010 	 * buffer for MC_CMD_UNLINK_PIOBUF because it's shorter.
1011 	 */
1012 	BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_IN_LEN < MC_CMD_UNLINK_PIOBUF_IN_LEN);
1013 	while (index--) {
1014 		MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE,
1015 			       nic_data->pio_write_vi_base + index);
1016 		efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF,
1017 			     inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN,
1018 			     NULL, 0, NULL);
1019 	}
1020 	return rc;
1021 }
1022 
1023 static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
1024 {
1025 	struct efx_channel *channel;
1026 	struct efx_tx_queue *tx_queue;
1027 
1028 	/* All our existing PIO buffers went away */
1029 	efx_for_each_channel(channel, efx)
1030 		efx_for_each_channel_tx_queue(tx_queue, channel)
1031 			tx_queue->piobuf = NULL;
1032 }
1033 
1034 #else /* !EFX_USE_PIO */
1035 
1036 static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
1037 {
1038 	return n == 0 ? 0 : -ENOBUFS;
1039 }
1040 
1041 static int efx_ef10_link_piobufs(struct efx_nic *efx)
1042 {
1043 	return 0;
1044 }
1045 
1046 static void efx_ef10_free_piobufs(struct efx_nic *efx)
1047 {
1048 }
1049 
1050 static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
1051 {
1052 }
1053 
1054 #endif /* EFX_USE_PIO */
1055 
1056 static void efx_ef10_remove(struct efx_nic *efx)
1057 {
1058 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
1059 	int rc;
1060 
1061 #ifdef CONFIG_SFC_SRIOV
1062 	struct efx_ef10_nic_data *nic_data_pf;
1063 	struct pci_dev *pci_dev_pf;
1064 	struct efx_nic *efx_pf;
1065 	struct ef10_vf *vf;
1066 
1067 	if (efx->pci_dev->is_virtfn) {
1068 		pci_dev_pf = efx->pci_dev->physfn;
1069 		if (pci_dev_pf) {
1070 			efx_pf = pci_get_drvdata(pci_dev_pf);
1071 			nic_data_pf = efx_pf->nic_data;
1072 			vf = nic_data_pf->vf + nic_data->vf_index;
1073 			vf->efx = NULL;
1074 		} else
1075 			netif_info(efx, drv, efx->net_dev,
1076 				   "Could not get the PF id from VF\n");
1077 	}
1078 #endif
1079 
1080 	efx_ef10_cleanup_vlans(efx);
1081 	mutex_destroy(&nic_data->vlan_lock);
1082 
1083 	efx_ptp_remove(efx);
1084 
1085 	efx_mcdi_mon_remove(efx);
1086 
1087 	efx_ef10_rx_free_indir_table(efx);
1088 
1089 	if (nic_data->wc_membase)
1090 		iounmap(nic_data->wc_membase);
1091 
1092 	rc = efx_ef10_free_vis(efx);
1093 	WARN_ON(rc != 0);
1094 
1095 	if (!nic_data->must_restore_piobufs)
1096 		efx_ef10_free_piobufs(efx);
1097 
1098 	device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
1099 	device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
1100 
1101 	efx_mcdi_detach(efx);
1102 
1103 	memset(nic_data->udp_tunnels, 0, sizeof(nic_data->udp_tunnels));
1104 	mutex_lock(&nic_data->udp_tunnels_lock);
1105 	(void)efx_ef10_set_udp_tnl_ports(efx, true);
1106 	mutex_unlock(&nic_data->udp_tunnels_lock);
1107 
1108 	mutex_destroy(&nic_data->udp_tunnels_lock);
1109 
1110 	efx_mcdi_fini(efx);
1111 	efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
1112 	kfree(nic_data);
1113 }
1114 
1115 static int efx_ef10_probe_pf(struct efx_nic *efx)
1116 {
1117 	return efx_ef10_probe(efx);
1118 }
1119 
1120 int efx_ef10_vadaptor_query(struct efx_nic *efx, unsigned int port_id,
1121 			    u32 *port_flags, u32 *vadaptor_flags,
1122 			    unsigned int *vlan_tags)
1123 {
1124 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
1125 	MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_QUERY_IN_LEN);
1126 	MCDI_DECLARE_BUF(outbuf, MC_CMD_VADAPTOR_QUERY_OUT_LEN);
1127 	size_t outlen;
1128 	int rc;
1129 
1130 	if (nic_data->datapath_caps &
1131 	    (1 << MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN)) {
1132 		MCDI_SET_DWORD(inbuf, VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID,
1133 			       port_id);
1134 
1135 		rc = efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_QUERY, inbuf, sizeof(inbuf),
1136 				  outbuf, sizeof(outbuf), &outlen);
1137 		if (rc)
1138 			return rc;
1139 
1140 		if (outlen < sizeof(outbuf)) {
1141 			rc = -EIO;
1142 			return rc;
1143 		}
1144 	}
1145 
1146 	if (port_flags)
1147 		*port_flags = MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_PORT_FLAGS);
1148 	if (vadaptor_flags)
1149 		*vadaptor_flags =
1150 			MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS);
1151 	if (vlan_tags)
1152 		*vlan_tags =
1153 			MCDI_DWORD(outbuf,
1154 				   VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS);
1155 
1156 	return 0;
1157 }
1158 
1159 int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id)
1160 {
1161 	MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN);
1162 
1163 	MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
1164 	return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf),
1165 			    NULL, 0, NULL);
1166 }
1167 
1168 int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id)
1169 {
1170 	MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN);
1171 
1172 	MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
1173 	return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf),
1174 			    NULL, 0, NULL);
1175 }
1176 
1177 int efx_ef10_vport_add_mac(struct efx_nic *efx,
1178 			   unsigned int port_id, u8 *mac)
1179 {
1180 	MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN);
1181 
1182 	MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id);
1183 	ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac);
1184 
1185 	return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf,
1186 			    sizeof(inbuf), NULL, 0, NULL);
1187 }
1188 
1189 int efx_ef10_vport_del_mac(struct efx_nic *efx,
1190 			   unsigned int port_id, u8 *mac)
1191 {
1192 	MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN);
1193 
1194 	MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id);
1195 	ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac);
1196 
1197 	return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf,
1198 			    sizeof(inbuf), NULL, 0, NULL);
1199 }
1200 
1201 #ifdef CONFIG_SFC_SRIOV
1202 static int efx_ef10_probe_vf(struct efx_nic *efx)
1203 {
1204 	int rc;
1205 	struct pci_dev *pci_dev_pf;
1206 
1207 	/* If the parent PF has no VF data structure, it doesn't know about this
1208 	 * VF so fail probe.  The VF needs to be re-created.  This can happen
1209 	 * if the PF driver is unloaded while the VF is assigned to a guest.
1210 	 */
1211 	pci_dev_pf = efx->pci_dev->physfn;
1212 	if (pci_dev_pf) {
1213 		struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
1214 		struct efx_ef10_nic_data *nic_data_pf = efx_pf->nic_data;
1215 
1216 		if (!nic_data_pf->vf) {
1217 			netif_info(efx, drv, efx->net_dev,
1218 				   "The VF cannot link to its parent PF; "
1219 				   "please destroy and re-create the VF\n");
1220 			return -EBUSY;
1221 		}
1222 	}
1223 
1224 	rc = efx_ef10_probe(efx);
1225 	if (rc)
1226 		return rc;
1227 
1228 	rc = efx_ef10_get_vf_index(efx);
1229 	if (rc)
1230 		goto fail;
1231 
1232 	if (efx->pci_dev->is_virtfn) {
1233 		if (efx->pci_dev->physfn) {
1234 			struct efx_nic *efx_pf =
1235 				pci_get_drvdata(efx->pci_dev->physfn);
1236 			struct efx_ef10_nic_data *nic_data_p = efx_pf->nic_data;
1237 			struct efx_ef10_nic_data *nic_data = efx->nic_data;
1238 
1239 			nic_data_p->vf[nic_data->vf_index].efx = efx;
1240 			nic_data_p->vf[nic_data->vf_index].pci_dev =
1241 				efx->pci_dev;
1242 		} else
1243 			netif_info(efx, drv, efx->net_dev,
1244 				   "Could not get the PF id from VF\n");
1245 	}
1246 
1247 	return 0;
1248 
1249 fail:
1250 	efx_ef10_remove(efx);
1251 	return rc;
1252 }
1253 #else
1254 static int efx_ef10_probe_vf(struct efx_nic *efx __attribute__ ((unused)))
1255 {
1256 	return 0;
1257 }
1258 #endif
1259 
1260 static int efx_ef10_alloc_vis(struct efx_nic *efx,
1261 			      unsigned int min_vis, unsigned int max_vis)
1262 {
1263 	MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN);
1264 	MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN);
1265 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
1266 	size_t outlen;
1267 	int rc;
1268 
1269 	MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis);
1270 	MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis);
1271 	rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf),
1272 			  outbuf, sizeof(outbuf), &outlen);
1273 	if (rc != 0)
1274 		return rc;
1275 
1276 	if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN)
1277 		return -EIO;
1278 
1279 	netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n",
1280 		  MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE));
1281 
1282 	nic_data->vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE);
1283 	nic_data->n_allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT);
1284 	return 0;
1285 }
1286 
1287 /* Note that the failure path of this function does not free
1288  * resources, as this will be done by efx_ef10_remove().
1289  */
1290 static int efx_ef10_dimension_resources(struct efx_nic *efx)
1291 {
1292 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
1293 	unsigned int uc_mem_map_size, wc_mem_map_size;
1294 	unsigned int min_vis = max(EFX_TXQ_TYPES,
1295 				   efx_separate_tx_channels ? 2 : 1);
1296 	unsigned int channel_vis, pio_write_vi_base, max_vis;
1297 	void __iomem *membase;
1298 	int rc;
1299 
1300 	channel_vis = max(efx->n_channels,
1301 			  ((efx->n_tx_channels + efx->n_extra_tx_channels) *
1302 			   EFX_TXQ_TYPES) +
1303 			   efx->n_xdp_channels * efx->xdp_tx_per_channel);
1304 
1305 #ifdef EFX_USE_PIO
1306 	/* Try to allocate PIO buffers if wanted and if the full
1307 	 * number of PIO buffers would be sufficient to allocate one
1308 	 * copy-buffer per TX channel.  Failure is non-fatal, as there
1309 	 * are only a small number of PIO buffers shared between all
1310 	 * functions of the controller.
1311 	 */
1312 	if (efx_piobuf_size != 0 &&
1313 	    nic_data->piobuf_size / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >=
1314 	    efx->n_tx_channels) {
1315 		unsigned int n_piobufs =
1316 			DIV_ROUND_UP(efx->n_tx_channels,
1317 				     nic_data->piobuf_size / efx_piobuf_size);
1318 
1319 		rc = efx_ef10_alloc_piobufs(efx, n_piobufs);
1320 		if (rc == -ENOSPC)
1321 			netif_dbg(efx, probe, efx->net_dev,
1322 				  "out of PIO buffers; cannot allocate more\n");
1323 		else if (rc == -EPERM)
1324 			netif_dbg(efx, probe, efx->net_dev,
1325 				  "not permitted to allocate PIO buffers\n");
1326 		else if (rc)
1327 			netif_err(efx, probe, efx->net_dev,
1328 				  "failed to allocate PIO buffers (%d)\n", rc);
1329 		else
1330 			netif_dbg(efx, probe, efx->net_dev,
1331 				  "allocated %u PIO buffers\n", n_piobufs);
1332 	}
1333 #else
1334 	nic_data->n_piobufs = 0;
1335 #endif
1336 
1337 	/* PIO buffers should be mapped with write-combining enabled,
1338 	 * and we want to make single UC and WC mappings rather than
1339 	 * several of each (in fact that's the only option if host
1340 	 * page size is >4K).  So we may allocate some extra VIs just
1341 	 * for writing PIO buffers through.
1342 	 *
1343 	 * The UC mapping contains (channel_vis - 1) complete VIs and the
1344 	 * first 4K of the next VI.  Then the WC mapping begins with
1345 	 * the remainder of this last VI.
1346 	 */
1347 	uc_mem_map_size = PAGE_ALIGN((channel_vis - 1) * efx->vi_stride +
1348 				     ER_DZ_TX_PIOBUF);
1349 	if (nic_data->n_piobufs) {
1350 		/* pio_write_vi_base rounds down to give the number of complete
1351 		 * VIs inside the UC mapping.
1352 		 */
1353 		pio_write_vi_base = uc_mem_map_size / efx->vi_stride;
1354 		wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base +
1355 					       nic_data->n_piobufs) *
1356 					      efx->vi_stride) -
1357 				   uc_mem_map_size);
1358 		max_vis = pio_write_vi_base + nic_data->n_piobufs;
1359 	} else {
1360 		pio_write_vi_base = 0;
1361 		wc_mem_map_size = 0;
1362 		max_vis = channel_vis;
1363 	}
1364 
1365 	/* In case the last attached driver failed to free VIs, do it now */
1366 	rc = efx_ef10_free_vis(efx);
1367 	if (rc != 0)
1368 		return rc;
1369 
1370 	rc = efx_ef10_alloc_vis(efx, min_vis, max_vis);
1371 	if (rc != 0)
1372 		return rc;
1373 
1374 	if (nic_data->n_allocated_vis < channel_vis) {
1375 		netif_info(efx, drv, efx->net_dev,
1376 			   "Could not allocate enough VIs to satisfy RSS"
1377 			   " requirements. Performance may not be optimal.\n");
1378 		/* We didn't get the VIs to populate our channels.
1379 		 * We could keep what we got but then we'd have more
1380 		 * interrupts than we need.
1381 		 * Instead calculate new max_channels and restart
1382 		 */
1383 		efx->max_channels = nic_data->n_allocated_vis;
1384 		efx->max_tx_channels =
1385 			nic_data->n_allocated_vis / EFX_TXQ_TYPES;
1386 
1387 		efx_ef10_free_vis(efx);
1388 		return -EAGAIN;
1389 	}
1390 
1391 	/* If we didn't get enough VIs to map all the PIO buffers, free the
1392 	 * PIO buffers
1393 	 */
1394 	if (nic_data->n_piobufs &&
1395 	    nic_data->n_allocated_vis <
1396 	    pio_write_vi_base + nic_data->n_piobufs) {
1397 		netif_dbg(efx, probe, efx->net_dev,
1398 			  "%u VIs are not sufficient to map %u PIO buffers\n",
1399 			  nic_data->n_allocated_vis, nic_data->n_piobufs);
1400 		efx_ef10_free_piobufs(efx);
1401 	}
1402 
1403 	/* Shrink the original UC mapping of the memory BAR */
1404 	membase = ioremap_nocache(efx->membase_phys, uc_mem_map_size);
1405 	if (!membase) {
1406 		netif_err(efx, probe, efx->net_dev,
1407 			  "could not shrink memory BAR to %x\n",
1408 			  uc_mem_map_size);
1409 		return -ENOMEM;
1410 	}
1411 	iounmap(efx->membase);
1412 	efx->membase = membase;
1413 
1414 	/* Set up the WC mapping if needed */
1415 	if (wc_mem_map_size) {
1416 		nic_data->wc_membase = ioremap_wc(efx->membase_phys +
1417 						  uc_mem_map_size,
1418 						  wc_mem_map_size);
1419 		if (!nic_data->wc_membase) {
1420 			netif_err(efx, probe, efx->net_dev,
1421 				  "could not allocate WC mapping of size %x\n",
1422 				  wc_mem_map_size);
1423 			return -ENOMEM;
1424 		}
1425 		nic_data->pio_write_vi_base = pio_write_vi_base;
1426 		nic_data->pio_write_base =
1427 			nic_data->wc_membase +
1428 			(pio_write_vi_base * efx->vi_stride + ER_DZ_TX_PIOBUF -
1429 			 uc_mem_map_size);
1430 
1431 		rc = efx_ef10_link_piobufs(efx);
1432 		if (rc)
1433 			efx_ef10_free_piobufs(efx);
1434 	}
1435 
1436 	netif_dbg(efx, probe, efx->net_dev,
1437 		  "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n",
1438 		  &efx->membase_phys, efx->membase, uc_mem_map_size,
1439 		  nic_data->wc_membase, wc_mem_map_size);
1440 
1441 	return 0;
1442 }
1443 
1444 static int efx_ef10_init_nic(struct efx_nic *efx)
1445 {
1446 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
1447 	int rc;
1448 
1449 	if (nic_data->must_check_datapath_caps) {
1450 		rc = efx_ef10_init_datapath_caps(efx);
1451 		if (rc)
1452 			return rc;
1453 		nic_data->must_check_datapath_caps = false;
1454 	}
1455 
1456 	if (nic_data->must_realloc_vis) {
1457 		/* We cannot let the number of VIs change now */
1458 		rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis,
1459 					nic_data->n_allocated_vis);
1460 		if (rc)
1461 			return rc;
1462 		nic_data->must_realloc_vis = false;
1463 	}
1464 
1465 	if (nic_data->must_restore_piobufs && nic_data->n_piobufs) {
1466 		rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs);
1467 		if (rc == 0) {
1468 			rc = efx_ef10_link_piobufs(efx);
1469 			if (rc)
1470 				efx_ef10_free_piobufs(efx);
1471 		}
1472 
1473 		/* Log an error on failure, but this is non-fatal.
1474 		 * Permission errors are less important - we've presumably
1475 		 * had the PIO buffer licence removed.
1476 		 */
1477 		if (rc == -EPERM)
1478 			netif_dbg(efx, drv, efx->net_dev,
1479 				  "not permitted to restore PIO buffers\n");
1480 		else if (rc)
1481 			netif_err(efx, drv, efx->net_dev,
1482 				  "failed to restore PIO buffers (%d)\n", rc);
1483 		nic_data->must_restore_piobufs = false;
1484 	}
1485 
1486 	/* don't fail init if RSS setup doesn't work */
1487 	rc = efx->type->rx_push_rss_config(efx, false,
1488 					   efx->rss_context.rx_indir_table, NULL);
1489 
1490 	return 0;
1491 }
1492 
1493 static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
1494 {
1495 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
1496 #ifdef CONFIG_SFC_SRIOV
1497 	unsigned int i;
1498 #endif
1499 
1500 	/* All our allocations have been reset */
1501 	nic_data->must_realloc_vis = true;
1502 	nic_data->must_restore_rss_contexts = true;
1503 	nic_data->must_restore_filters = true;
1504 	nic_data->must_restore_piobufs = true;
1505 	efx_ef10_forget_old_piobufs(efx);
1506 	efx->rss_context.context_id = EFX_EF10_RSS_CONTEXT_INVALID;
1507 
1508 	/* Driver-created vswitches and vports must be re-created */
1509 	nic_data->must_probe_vswitching = true;
1510 	nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
1511 #ifdef CONFIG_SFC_SRIOV
1512 	if (nic_data->vf)
1513 		for (i = 0; i < efx->vf_count; i++)
1514 			nic_data->vf[i].vport_id = 0;
1515 #endif
1516 }
1517 
1518 static enum reset_type efx_ef10_map_reset_reason(enum reset_type reason)
1519 {
1520 	if (reason == RESET_TYPE_MC_FAILURE)
1521 		return RESET_TYPE_DATAPATH;
1522 
1523 	return efx_mcdi_map_reset_reason(reason);
1524 }
1525 
1526 static int efx_ef10_map_reset_flags(u32 *flags)
1527 {
1528 	enum {
1529 		EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) <<
1530 				   ETH_RESET_SHARED_SHIFT),
1531 		EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER |
1532 				  ETH_RESET_OFFLOAD | ETH_RESET_MAC |
1533 				  ETH_RESET_PHY | ETH_RESET_MGMT) <<
1534 				 ETH_RESET_SHARED_SHIFT)
1535 	};
1536 
1537 	/* We assume for now that our PCI function is permitted to
1538 	 * reset everything.
1539 	 */
1540 
1541 	if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) {
1542 		*flags &= ~EF10_RESET_MC;
1543 		return RESET_TYPE_WORLD;
1544 	}
1545 
1546 	if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) {
1547 		*flags &= ~EF10_RESET_PORT;
1548 		return RESET_TYPE_ALL;
1549 	}
1550 
1551 	/* no invisible reset implemented */
1552 
1553 	return -EINVAL;
1554 }
1555 
1556 static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
1557 {
1558 	int rc = efx_mcdi_reset(efx, reset_type);
1559 
1560 	/* Unprivileged functions return -EPERM, but need to return success
1561 	 * here so that the datapath is brought back up.
1562 	 */
1563 	if (reset_type == RESET_TYPE_WORLD && rc == -EPERM)
1564 		rc = 0;
1565 
1566 	/* If it was a port reset, trigger reallocation of MC resources.
1567 	 * Note that on an MC reset nothing needs to be done now because we'll
1568 	 * detect the MC reset later and handle it then.
1569 	 * For an FLR, we never get an MC reset event, but the MC has reset all
1570 	 * resources assigned to us, so we have to trigger reallocation now.
1571 	 */
1572 	if ((reset_type == RESET_TYPE_ALL ||
1573 	     reset_type == RESET_TYPE_MCDI_TIMEOUT) && !rc)
1574 		efx_ef10_reset_mc_allocations(efx);
1575 	return rc;
1576 }
1577 
1578 #define EF10_DMA_STAT(ext_name, mcdi_name)			\
1579 	[EF10_STAT_ ## ext_name] =				\
1580 	{ #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
1581 #define EF10_DMA_INVIS_STAT(int_name, mcdi_name)		\
1582 	[EF10_STAT_ ## int_name] =				\
1583 	{ NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
1584 #define EF10_OTHER_STAT(ext_name)				\
1585 	[EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 }
1586 #define GENERIC_SW_STAT(ext_name)				\
1587 	[GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 }
1588 
1589 static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
1590 	EF10_DMA_STAT(port_tx_bytes, TX_BYTES),
1591 	EF10_DMA_STAT(port_tx_packets, TX_PKTS),
1592 	EF10_DMA_STAT(port_tx_pause, TX_PAUSE_PKTS),
1593 	EF10_DMA_STAT(port_tx_control, TX_CONTROL_PKTS),
1594 	EF10_DMA_STAT(port_tx_unicast, TX_UNICAST_PKTS),
1595 	EF10_DMA_STAT(port_tx_multicast, TX_MULTICAST_PKTS),
1596 	EF10_DMA_STAT(port_tx_broadcast, TX_BROADCAST_PKTS),
1597 	EF10_DMA_STAT(port_tx_lt64, TX_LT64_PKTS),
1598 	EF10_DMA_STAT(port_tx_64, TX_64_PKTS),
1599 	EF10_DMA_STAT(port_tx_65_to_127, TX_65_TO_127_PKTS),
1600 	EF10_DMA_STAT(port_tx_128_to_255, TX_128_TO_255_PKTS),
1601 	EF10_DMA_STAT(port_tx_256_to_511, TX_256_TO_511_PKTS),
1602 	EF10_DMA_STAT(port_tx_512_to_1023, TX_512_TO_1023_PKTS),
1603 	EF10_DMA_STAT(port_tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
1604 	EF10_DMA_STAT(port_tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
1605 	EF10_DMA_STAT(port_rx_bytes, RX_BYTES),
1606 	EF10_DMA_INVIS_STAT(port_rx_bytes_minus_good_bytes, RX_BAD_BYTES),
1607 	EF10_OTHER_STAT(port_rx_good_bytes),
1608 	EF10_OTHER_STAT(port_rx_bad_bytes),
1609 	EF10_DMA_STAT(port_rx_packets, RX_PKTS),
1610 	EF10_DMA_STAT(port_rx_good, RX_GOOD_PKTS),
1611 	EF10_DMA_STAT(port_rx_bad, RX_BAD_FCS_PKTS),
1612 	EF10_DMA_STAT(port_rx_pause, RX_PAUSE_PKTS),
1613 	EF10_DMA_STAT(port_rx_control, RX_CONTROL_PKTS),
1614 	EF10_DMA_STAT(port_rx_unicast, RX_UNICAST_PKTS),
1615 	EF10_DMA_STAT(port_rx_multicast, RX_MULTICAST_PKTS),
1616 	EF10_DMA_STAT(port_rx_broadcast, RX_BROADCAST_PKTS),
1617 	EF10_DMA_STAT(port_rx_lt64, RX_UNDERSIZE_PKTS),
1618 	EF10_DMA_STAT(port_rx_64, RX_64_PKTS),
1619 	EF10_DMA_STAT(port_rx_65_to_127, RX_65_TO_127_PKTS),
1620 	EF10_DMA_STAT(port_rx_128_to_255, RX_128_TO_255_PKTS),
1621 	EF10_DMA_STAT(port_rx_256_to_511, RX_256_TO_511_PKTS),
1622 	EF10_DMA_STAT(port_rx_512_to_1023, RX_512_TO_1023_PKTS),
1623 	EF10_DMA_STAT(port_rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
1624 	EF10_DMA_STAT(port_rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
1625 	EF10_DMA_STAT(port_rx_gtjumbo, RX_GTJUMBO_PKTS),
1626 	EF10_DMA_STAT(port_rx_bad_gtjumbo, RX_JABBER_PKTS),
1627 	EF10_DMA_STAT(port_rx_overflow, RX_OVERFLOW_PKTS),
1628 	EF10_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS),
1629 	EF10_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS),
1630 	EF10_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS),
1631 	GENERIC_SW_STAT(rx_nodesc_trunc),
1632 	GENERIC_SW_STAT(rx_noskb_drops),
1633 	EF10_DMA_STAT(port_rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
1634 	EF10_DMA_STAT(port_rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
1635 	EF10_DMA_STAT(port_rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
1636 	EF10_DMA_STAT(port_rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
1637 	EF10_DMA_STAT(port_rx_pm_trunc_qbb, PM_TRUNC_QBB),
1638 	EF10_DMA_STAT(port_rx_pm_discard_qbb, PM_DISCARD_QBB),
1639 	EF10_DMA_STAT(port_rx_pm_discard_mapping, PM_DISCARD_MAPPING),
1640 	EF10_DMA_STAT(port_rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
1641 	EF10_DMA_STAT(port_rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
1642 	EF10_DMA_STAT(port_rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
1643 	EF10_DMA_STAT(port_rx_dp_hlb_fetch, RXDP_HLB_FETCH_CONDITIONS),
1644 	EF10_DMA_STAT(port_rx_dp_hlb_wait, RXDP_HLB_WAIT_CONDITIONS),
1645 	EF10_DMA_STAT(rx_unicast, VADAPTER_RX_UNICAST_PACKETS),
1646 	EF10_DMA_STAT(rx_unicast_bytes, VADAPTER_RX_UNICAST_BYTES),
1647 	EF10_DMA_STAT(rx_multicast, VADAPTER_RX_MULTICAST_PACKETS),
1648 	EF10_DMA_STAT(rx_multicast_bytes, VADAPTER_RX_MULTICAST_BYTES),
1649 	EF10_DMA_STAT(rx_broadcast, VADAPTER_RX_BROADCAST_PACKETS),
1650 	EF10_DMA_STAT(rx_broadcast_bytes, VADAPTER_RX_BROADCAST_BYTES),
1651 	EF10_DMA_STAT(rx_bad, VADAPTER_RX_BAD_PACKETS),
1652 	EF10_DMA_STAT(rx_bad_bytes, VADAPTER_RX_BAD_BYTES),
1653 	EF10_DMA_STAT(rx_overflow, VADAPTER_RX_OVERFLOW),
1654 	EF10_DMA_STAT(tx_unicast, VADAPTER_TX_UNICAST_PACKETS),
1655 	EF10_DMA_STAT(tx_unicast_bytes, VADAPTER_TX_UNICAST_BYTES),
1656 	EF10_DMA_STAT(tx_multicast, VADAPTER_TX_MULTICAST_PACKETS),
1657 	EF10_DMA_STAT(tx_multicast_bytes, VADAPTER_TX_MULTICAST_BYTES),
1658 	EF10_DMA_STAT(tx_broadcast, VADAPTER_TX_BROADCAST_PACKETS),
1659 	EF10_DMA_STAT(tx_broadcast_bytes, VADAPTER_TX_BROADCAST_BYTES),
1660 	EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS),
1661 	EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES),
1662 	EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW),
1663 	EF10_DMA_STAT(fec_uncorrected_errors, FEC_UNCORRECTED_ERRORS),
1664 	EF10_DMA_STAT(fec_corrected_errors, FEC_CORRECTED_ERRORS),
1665 	EF10_DMA_STAT(fec_corrected_symbols_lane0, FEC_CORRECTED_SYMBOLS_LANE0),
1666 	EF10_DMA_STAT(fec_corrected_symbols_lane1, FEC_CORRECTED_SYMBOLS_LANE1),
1667 	EF10_DMA_STAT(fec_corrected_symbols_lane2, FEC_CORRECTED_SYMBOLS_LANE2),
1668 	EF10_DMA_STAT(fec_corrected_symbols_lane3, FEC_CORRECTED_SYMBOLS_LANE3),
1669 	EF10_DMA_STAT(ctpio_vi_busy_fallback, CTPIO_VI_BUSY_FALLBACK),
1670 	EF10_DMA_STAT(ctpio_long_write_success, CTPIO_LONG_WRITE_SUCCESS),
1671 	EF10_DMA_STAT(ctpio_missing_dbell_fail, CTPIO_MISSING_DBELL_FAIL),
1672 	EF10_DMA_STAT(ctpio_overflow_fail, CTPIO_OVERFLOW_FAIL),
1673 	EF10_DMA_STAT(ctpio_underflow_fail, CTPIO_UNDERFLOW_FAIL),
1674 	EF10_DMA_STAT(ctpio_timeout_fail, CTPIO_TIMEOUT_FAIL),
1675 	EF10_DMA_STAT(ctpio_noncontig_wr_fail, CTPIO_NONCONTIG_WR_FAIL),
1676 	EF10_DMA_STAT(ctpio_frm_clobber_fail, CTPIO_FRM_CLOBBER_FAIL),
1677 	EF10_DMA_STAT(ctpio_invalid_wr_fail, CTPIO_INVALID_WR_FAIL),
1678 	EF10_DMA_STAT(ctpio_vi_clobber_fallback, CTPIO_VI_CLOBBER_FALLBACK),
1679 	EF10_DMA_STAT(ctpio_unqualified_fallback, CTPIO_UNQUALIFIED_FALLBACK),
1680 	EF10_DMA_STAT(ctpio_runt_fallback, CTPIO_RUNT_FALLBACK),
1681 	EF10_DMA_STAT(ctpio_success, CTPIO_SUCCESS),
1682 	EF10_DMA_STAT(ctpio_fallback, CTPIO_FALLBACK),
1683 	EF10_DMA_STAT(ctpio_poison, CTPIO_POISON),
1684 	EF10_DMA_STAT(ctpio_erase, CTPIO_ERASE),
1685 };
1686 
1687 #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) |	\
1688 			       (1ULL << EF10_STAT_port_tx_packets) |	\
1689 			       (1ULL << EF10_STAT_port_tx_pause) |	\
1690 			       (1ULL << EF10_STAT_port_tx_unicast) |	\
1691 			       (1ULL << EF10_STAT_port_tx_multicast) |	\
1692 			       (1ULL << EF10_STAT_port_tx_broadcast) |	\
1693 			       (1ULL << EF10_STAT_port_rx_bytes) |	\
1694 			       (1ULL <<                                 \
1695 				EF10_STAT_port_rx_bytes_minus_good_bytes) | \
1696 			       (1ULL << EF10_STAT_port_rx_good_bytes) |	\
1697 			       (1ULL << EF10_STAT_port_rx_bad_bytes) |	\
1698 			       (1ULL << EF10_STAT_port_rx_packets) |	\
1699 			       (1ULL << EF10_STAT_port_rx_good) |	\
1700 			       (1ULL << EF10_STAT_port_rx_bad) |	\
1701 			       (1ULL << EF10_STAT_port_rx_pause) |	\
1702 			       (1ULL << EF10_STAT_port_rx_control) |	\
1703 			       (1ULL << EF10_STAT_port_rx_unicast) |	\
1704 			       (1ULL << EF10_STAT_port_rx_multicast) |	\
1705 			       (1ULL << EF10_STAT_port_rx_broadcast) |	\
1706 			       (1ULL << EF10_STAT_port_rx_lt64) |	\
1707 			       (1ULL << EF10_STAT_port_rx_64) |		\
1708 			       (1ULL << EF10_STAT_port_rx_65_to_127) |	\
1709 			       (1ULL << EF10_STAT_port_rx_128_to_255) |	\
1710 			       (1ULL << EF10_STAT_port_rx_256_to_511) |	\
1711 			       (1ULL << EF10_STAT_port_rx_512_to_1023) |\
1712 			       (1ULL << EF10_STAT_port_rx_1024_to_15xx) |\
1713 			       (1ULL << EF10_STAT_port_rx_15xx_to_jumbo) |\
1714 			       (1ULL << EF10_STAT_port_rx_gtjumbo) |	\
1715 			       (1ULL << EF10_STAT_port_rx_bad_gtjumbo) |\
1716 			       (1ULL << EF10_STAT_port_rx_overflow) |	\
1717 			       (1ULL << EF10_STAT_port_rx_nodesc_drops) |\
1718 			       (1ULL << GENERIC_STAT_rx_nodesc_trunc) |	\
1719 			       (1ULL << GENERIC_STAT_rx_noskb_drops))
1720 
1721 /* On 7000 series NICs, these statistics are only provided by the 10G MAC.
1722  * For a 10G/40G switchable port we do not expose these because they might
1723  * not include all the packets they should.
1724  * On 8000 series NICs these statistics are always provided.
1725  */
1726 #define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_port_tx_control) |	\
1727 				 (1ULL << EF10_STAT_port_tx_lt64) |	\
1728 				 (1ULL << EF10_STAT_port_tx_64) |	\
1729 				 (1ULL << EF10_STAT_port_tx_65_to_127) |\
1730 				 (1ULL << EF10_STAT_port_tx_128_to_255) |\
1731 				 (1ULL << EF10_STAT_port_tx_256_to_511) |\
1732 				 (1ULL << EF10_STAT_port_tx_512_to_1023) |\
1733 				 (1ULL << EF10_STAT_port_tx_1024_to_15xx) |\
1734 				 (1ULL << EF10_STAT_port_tx_15xx_to_jumbo))
1735 
1736 /* These statistics are only provided by the 40G MAC.  For a 10G/40G
1737  * switchable port we do expose these because the errors will otherwise
1738  * be silent.
1739  */
1740 #define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_port_rx_align_error) |\
1741 				  (1ULL << EF10_STAT_port_rx_length_error))
1742 
1743 /* These statistics are only provided if the firmware supports the
1744  * capability PM_AND_RXDP_COUNTERS.
1745  */
1746 #define HUNT_PM_AND_RXDP_STAT_MASK (					\
1747 	(1ULL << EF10_STAT_port_rx_pm_trunc_bb_overflow) |		\
1748 	(1ULL << EF10_STAT_port_rx_pm_discard_bb_overflow) |		\
1749 	(1ULL << EF10_STAT_port_rx_pm_trunc_vfifo_full) |		\
1750 	(1ULL << EF10_STAT_port_rx_pm_discard_vfifo_full) |		\
1751 	(1ULL << EF10_STAT_port_rx_pm_trunc_qbb) |			\
1752 	(1ULL << EF10_STAT_port_rx_pm_discard_qbb) |			\
1753 	(1ULL << EF10_STAT_port_rx_pm_discard_mapping) |		\
1754 	(1ULL << EF10_STAT_port_rx_dp_q_disabled_packets) |		\
1755 	(1ULL << EF10_STAT_port_rx_dp_di_dropped_packets) |		\
1756 	(1ULL << EF10_STAT_port_rx_dp_streaming_packets) |		\
1757 	(1ULL << EF10_STAT_port_rx_dp_hlb_fetch) |			\
1758 	(1ULL << EF10_STAT_port_rx_dp_hlb_wait))
1759 
1760 /* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V2,
1761  * indicated by returning a value >= MC_CMD_MAC_NSTATS_V2 in
1762  * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS.
1763  * These bits are in the second u64 of the raw mask.
1764  */
1765 #define EF10_FEC_STAT_MASK (						\
1766 	(1ULL << (EF10_STAT_fec_uncorrected_errors - 64)) |		\
1767 	(1ULL << (EF10_STAT_fec_corrected_errors - 64)) |		\
1768 	(1ULL << (EF10_STAT_fec_corrected_symbols_lane0 - 64)) |	\
1769 	(1ULL << (EF10_STAT_fec_corrected_symbols_lane1 - 64)) |	\
1770 	(1ULL << (EF10_STAT_fec_corrected_symbols_lane2 - 64)) |	\
1771 	(1ULL << (EF10_STAT_fec_corrected_symbols_lane3 - 64)))
1772 
1773 /* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V3,
1774  * indicated by returning a value >= MC_CMD_MAC_NSTATS_V3 in
1775  * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS.
1776  * These bits are in the second u64 of the raw mask.
1777  */
1778 #define EF10_CTPIO_STAT_MASK (						\
1779 	(1ULL << (EF10_STAT_ctpio_vi_busy_fallback - 64)) |		\
1780 	(1ULL << (EF10_STAT_ctpio_long_write_success - 64)) |		\
1781 	(1ULL << (EF10_STAT_ctpio_missing_dbell_fail - 64)) |		\
1782 	(1ULL << (EF10_STAT_ctpio_overflow_fail - 64)) |		\
1783 	(1ULL << (EF10_STAT_ctpio_underflow_fail - 64)) |		\
1784 	(1ULL << (EF10_STAT_ctpio_timeout_fail - 64)) |			\
1785 	(1ULL << (EF10_STAT_ctpio_noncontig_wr_fail - 64)) |		\
1786 	(1ULL << (EF10_STAT_ctpio_frm_clobber_fail - 64)) |		\
1787 	(1ULL << (EF10_STAT_ctpio_invalid_wr_fail - 64)) |		\
1788 	(1ULL << (EF10_STAT_ctpio_vi_clobber_fallback - 64)) |		\
1789 	(1ULL << (EF10_STAT_ctpio_unqualified_fallback - 64)) |		\
1790 	(1ULL << (EF10_STAT_ctpio_runt_fallback - 64)) |		\
1791 	(1ULL << (EF10_STAT_ctpio_success - 64)) |			\
1792 	(1ULL << (EF10_STAT_ctpio_fallback - 64)) |			\
1793 	(1ULL << (EF10_STAT_ctpio_poison - 64)) |			\
1794 	(1ULL << (EF10_STAT_ctpio_erase - 64)))
1795 
1796 static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
1797 {
1798 	u64 raw_mask = HUNT_COMMON_STAT_MASK;
1799 	u32 port_caps = efx_mcdi_phy_get_caps(efx);
1800 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
1801 
1802 	if (!(efx->mcdi->fn_flags &
1803 	      1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
1804 		return 0;
1805 
1806 	if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) {
1807 		raw_mask |= HUNT_40G_EXTRA_STAT_MASK;
1808 		/* 8000 series have everything even at 40G */
1809 		if (nic_data->datapath_caps2 &
1810 		    (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN))
1811 			raw_mask |= HUNT_10G_ONLY_STAT_MASK;
1812 	} else {
1813 		raw_mask |= HUNT_10G_ONLY_STAT_MASK;
1814 	}
1815 
1816 	if (nic_data->datapath_caps &
1817 	    (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN))
1818 		raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK;
1819 
1820 	return raw_mask;
1821 }
1822 
1823 static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
1824 {
1825 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
1826 	u64 raw_mask[2];
1827 
1828 	raw_mask[0] = efx_ef10_raw_stat_mask(efx);
1829 
1830 	/* Only show vadaptor stats when EVB capability is present */
1831 	if (nic_data->datapath_caps &
1832 	    (1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) {
1833 		raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1);
1834 		raw_mask[1] = (1ULL << (EF10_STAT_V1_COUNT - 64)) - 1;
1835 	} else {
1836 		raw_mask[1] = 0;
1837 	}
1838 	/* Only show FEC stats when NIC supports MC_CMD_MAC_STATS_V2 */
1839 	if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V2)
1840 		raw_mask[1] |= EF10_FEC_STAT_MASK;
1841 
1842 	/* CTPIO stats appear in V3. Only show them on devices that actually
1843 	 * support CTPIO. Although this driver doesn't use CTPIO others might,
1844 	 * and we may be reporting the stats for the underlying port.
1845 	 */
1846 	if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V3 &&
1847 	    (nic_data->datapath_caps2 &
1848 	     (1 << MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN)))
1849 		raw_mask[1] |= EF10_CTPIO_STAT_MASK;
1850 
1851 #if BITS_PER_LONG == 64
1852 	BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 2);
1853 	mask[0] = raw_mask[0];
1854 	mask[1] = raw_mask[1];
1855 #else
1856 	BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 3);
1857 	mask[0] = raw_mask[0] & 0xffffffff;
1858 	mask[1] = raw_mask[0] >> 32;
1859 	mask[2] = raw_mask[1] & 0xffffffff;
1860 #endif
1861 }
1862 
1863 static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
1864 {
1865 	DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1866 
1867 	efx_ef10_get_stat_mask(efx, mask);
1868 	return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
1869 				      mask, names);
1870 }
1871 
1872 static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats,
1873 					   struct rtnl_link_stats64 *core_stats)
1874 {
1875 	DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1876 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
1877 	u64 *stats = nic_data->stats;
1878 	size_t stats_count = 0, index;
1879 
1880 	efx_ef10_get_stat_mask(efx, mask);
1881 
1882 	if (full_stats) {
1883 		for_each_set_bit(index, mask, EF10_STAT_COUNT) {
1884 			if (efx_ef10_stat_desc[index].name) {
1885 				*full_stats++ = stats[index];
1886 				++stats_count;
1887 			}
1888 		}
1889 	}
1890 
1891 	if (!core_stats)
1892 		return stats_count;
1893 
1894 	if (nic_data->datapath_caps &
1895 			1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN) {
1896 		/* Use vadaptor stats. */
1897 		core_stats->rx_packets = stats[EF10_STAT_rx_unicast] +
1898 					 stats[EF10_STAT_rx_multicast] +
1899 					 stats[EF10_STAT_rx_broadcast];
1900 		core_stats->tx_packets = stats[EF10_STAT_tx_unicast] +
1901 					 stats[EF10_STAT_tx_multicast] +
1902 					 stats[EF10_STAT_tx_broadcast];
1903 		core_stats->rx_bytes = stats[EF10_STAT_rx_unicast_bytes] +
1904 				       stats[EF10_STAT_rx_multicast_bytes] +
1905 				       stats[EF10_STAT_rx_broadcast_bytes];
1906 		core_stats->tx_bytes = stats[EF10_STAT_tx_unicast_bytes] +
1907 				       stats[EF10_STAT_tx_multicast_bytes] +
1908 				       stats[EF10_STAT_tx_broadcast_bytes];
1909 		core_stats->rx_dropped = stats[GENERIC_STAT_rx_nodesc_trunc] +
1910 					 stats[GENERIC_STAT_rx_noskb_drops];
1911 		core_stats->multicast = stats[EF10_STAT_rx_multicast];
1912 		core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad];
1913 		core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
1914 		core_stats->rx_errors = core_stats->rx_crc_errors;
1915 		core_stats->tx_errors = stats[EF10_STAT_tx_bad];
1916 	} else {
1917 		/* Use port stats. */
1918 		core_stats->rx_packets = stats[EF10_STAT_port_rx_packets];
1919 		core_stats->tx_packets = stats[EF10_STAT_port_tx_packets];
1920 		core_stats->rx_bytes = stats[EF10_STAT_port_rx_bytes];
1921 		core_stats->tx_bytes = stats[EF10_STAT_port_tx_bytes];
1922 		core_stats->rx_dropped = stats[EF10_STAT_port_rx_nodesc_drops] +
1923 					 stats[GENERIC_STAT_rx_nodesc_trunc] +
1924 					 stats[GENERIC_STAT_rx_noskb_drops];
1925 		core_stats->multicast = stats[EF10_STAT_port_rx_multicast];
1926 		core_stats->rx_length_errors =
1927 				stats[EF10_STAT_port_rx_gtjumbo] +
1928 				stats[EF10_STAT_port_rx_length_error];
1929 		core_stats->rx_crc_errors = stats[EF10_STAT_port_rx_bad];
1930 		core_stats->rx_frame_errors =
1931 				stats[EF10_STAT_port_rx_align_error];
1932 		core_stats->rx_fifo_errors = stats[EF10_STAT_port_rx_overflow];
1933 		core_stats->rx_errors = (core_stats->rx_length_errors +
1934 					 core_stats->rx_crc_errors +
1935 					 core_stats->rx_frame_errors);
1936 	}
1937 
1938 	return stats_count;
1939 }
1940 
1941 static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx)
1942 {
1943 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
1944 	DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1945 	__le64 generation_start, generation_end;
1946 	u64 *stats = nic_data->stats;
1947 	__le64 *dma_stats;
1948 
1949 	efx_ef10_get_stat_mask(efx, mask);
1950 
1951 	dma_stats = efx->stats_buffer.addr;
1952 
1953 	generation_end = dma_stats[efx->num_mac_stats - 1];
1954 	if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
1955 		return 0;
1956 	rmb();
1957 	efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
1958 			     stats, efx->stats_buffer.addr, false);
1959 	rmb();
1960 	generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
1961 	if (generation_end != generation_start)
1962 		return -EAGAIN;
1963 
1964 	/* Update derived statistics */
1965 	efx_nic_fix_nodesc_drop_stat(efx,
1966 				     &stats[EF10_STAT_port_rx_nodesc_drops]);
1967 	stats[EF10_STAT_port_rx_good_bytes] =
1968 		stats[EF10_STAT_port_rx_bytes] -
1969 		stats[EF10_STAT_port_rx_bytes_minus_good_bytes];
1970 	efx_update_diff_stat(&stats[EF10_STAT_port_rx_bad_bytes],
1971 			     stats[EF10_STAT_port_rx_bytes_minus_good_bytes]);
1972 	efx_update_sw_stats(efx, stats);
1973 	return 0;
1974 }
1975 
1976 
1977 static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats,
1978 				       struct rtnl_link_stats64 *core_stats)
1979 {
1980 	int retry;
1981 
1982 	/* If we're unlucky enough to read statistics during the DMA, wait
1983 	 * up to 10ms for it to finish (typically takes <500us)
1984 	 */
1985 	for (retry = 0; retry < 100; ++retry) {
1986 		if (efx_ef10_try_update_nic_stats_pf(efx) == 0)
1987 			break;
1988 		udelay(100);
1989 	}
1990 
1991 	return efx_ef10_update_stats_common(efx, full_stats, core_stats);
1992 }
1993 
1994 static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
1995 {
1996 	MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
1997 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
1998 	DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1999 	__le64 generation_start, generation_end;
2000 	u64 *stats = nic_data->stats;
2001 	u32 dma_len = efx->num_mac_stats * sizeof(u64);
2002 	struct efx_buffer stats_buf;
2003 	__le64 *dma_stats;
2004 	int rc;
2005 
2006 	spin_unlock_bh(&efx->stats_lock);
2007 
2008 	if (in_interrupt()) {
2009 		/* If in atomic context, cannot update stats.  Just update the
2010 		 * software stats and return so the caller can continue.
2011 		 */
2012 		spin_lock_bh(&efx->stats_lock);
2013 		efx_update_sw_stats(efx, stats);
2014 		return 0;
2015 	}
2016 
2017 	efx_ef10_get_stat_mask(efx, mask);
2018 
2019 	rc = efx_nic_alloc_buffer(efx, &stats_buf, dma_len, GFP_ATOMIC);
2020 	if (rc) {
2021 		spin_lock_bh(&efx->stats_lock);
2022 		return rc;
2023 	}
2024 
2025 	dma_stats = stats_buf.addr;
2026 	dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID;
2027 
2028 	MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr);
2029 	MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD,
2030 			      MAC_STATS_IN_DMA, 1);
2031 	MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
2032 	MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
2033 
2034 	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
2035 				NULL, 0, NULL);
2036 	spin_lock_bh(&efx->stats_lock);
2037 	if (rc) {
2038 		/* Expect ENOENT if DMA queues have not been set up */
2039 		if (rc != -ENOENT || atomic_read(&efx->active_queues))
2040 			efx_mcdi_display_error(efx, MC_CMD_MAC_STATS,
2041 					       sizeof(inbuf), NULL, 0, rc);
2042 		goto out;
2043 	}
2044 
2045 	generation_end = dma_stats[efx->num_mac_stats - 1];
2046 	if (generation_end == EFX_MC_STATS_GENERATION_INVALID) {
2047 		WARN_ON_ONCE(1);
2048 		goto out;
2049 	}
2050 	rmb();
2051 	efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
2052 			     stats, stats_buf.addr, false);
2053 	rmb();
2054 	generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
2055 	if (generation_end != generation_start) {
2056 		rc = -EAGAIN;
2057 		goto out;
2058 	}
2059 
2060 	efx_update_sw_stats(efx, stats);
2061 out:
2062 	efx_nic_free_buffer(efx, &stats_buf);
2063 	return rc;
2064 }
2065 
2066 static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats,
2067 				       struct rtnl_link_stats64 *core_stats)
2068 {
2069 	if (efx_ef10_try_update_nic_stats_vf(efx))
2070 		return 0;
2071 
2072 	return efx_ef10_update_stats_common(efx, full_stats, core_stats);
2073 }
2074 
2075 static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
2076 {
2077 	struct efx_nic *efx = channel->efx;
2078 	unsigned int mode, usecs;
2079 	efx_dword_t timer_cmd;
2080 
2081 	if (channel->irq_moderation_us) {
2082 		mode = 3;
2083 		usecs = channel->irq_moderation_us;
2084 	} else {
2085 		mode = 0;
2086 		usecs = 0;
2087 	}
2088 
2089 	if (EFX_EF10_WORKAROUND_61265(efx)) {
2090 		MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_EVQ_TMR_IN_LEN);
2091 		unsigned int ns = usecs * 1000;
2092 
2093 		MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_INSTANCE,
2094 			       channel->channel);
2095 		MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, ns);
2096 		MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, ns);
2097 		MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_MODE, mode);
2098 
2099 		efx_mcdi_rpc_async(efx, MC_CMD_SET_EVQ_TMR,
2100 				   inbuf, sizeof(inbuf), 0, NULL, 0);
2101 	} else if (EFX_EF10_WORKAROUND_35388(efx)) {
2102 		unsigned int ticks = efx_usecs_to_ticks(efx, usecs);
2103 
2104 		EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS,
2105 				     EFE_DD_EVQ_IND_TIMER_FLAGS,
2106 				     ERF_DD_EVQ_IND_TIMER_MODE, mode,
2107 				     ERF_DD_EVQ_IND_TIMER_VAL, ticks);
2108 		efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT,
2109 				channel->channel);
2110 	} else {
2111 		unsigned int ticks = efx_usecs_to_ticks(efx, usecs);
2112 
2113 		EFX_POPULATE_DWORD_3(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode,
2114 				     ERF_DZ_TC_TIMER_VAL, ticks,
2115 				     ERF_FZ_TC_TMR_REL_VAL, ticks);
2116 		efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR,
2117 				channel->channel);
2118 	}
2119 }
2120 
2121 static void efx_ef10_get_wol_vf(struct efx_nic *efx,
2122 				struct ethtool_wolinfo *wol) {}
2123 
2124 static int efx_ef10_set_wol_vf(struct efx_nic *efx, u32 type)
2125 {
2126 	return -EOPNOTSUPP;
2127 }
2128 
2129 static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
2130 {
2131 	wol->supported = 0;
2132 	wol->wolopts = 0;
2133 	memset(&wol->sopass, 0, sizeof(wol->sopass));
2134 }
2135 
2136 static int efx_ef10_set_wol(struct efx_nic *efx, u32 type)
2137 {
2138 	if (type != 0)
2139 		return -EINVAL;
2140 	return 0;
2141 }
2142 
2143 static void efx_ef10_mcdi_request(struct efx_nic *efx,
2144 				  const efx_dword_t *hdr, size_t hdr_len,
2145 				  const efx_dword_t *sdu, size_t sdu_len)
2146 {
2147 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
2148 	u8 *pdu = nic_data->mcdi_buf.addr;
2149 
2150 	memcpy(pdu, hdr, hdr_len);
2151 	memcpy(pdu + hdr_len, sdu, sdu_len);
2152 	wmb();
2153 
2154 	/* The hardware provides 'low' and 'high' (doorbell) registers
2155 	 * for passing the 64-bit address of an MCDI request to
2156 	 * firmware.  However the dwords are swapped by firmware.  The
2157 	 * least significant bits of the doorbell are then 0 for all
2158 	 * MCDI requests due to alignment.
2159 	 */
2160 	_efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32),
2161 		    ER_DZ_MC_DB_LWRD);
2162 	_efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr),
2163 		    ER_DZ_MC_DB_HWRD);
2164 }
2165 
2166 static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx)
2167 {
2168 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
2169 	const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr;
2170 
2171 	rmb();
2172 	return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE);
2173 }
2174 
2175 static void
2176 efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf,
2177 			    size_t offset, size_t outlen)
2178 {
2179 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
2180 	const u8 *pdu = nic_data->mcdi_buf.addr;
2181 
2182 	memcpy(outbuf, pdu + offset, outlen);
2183 }
2184 
2185 static void efx_ef10_mcdi_reboot_detected(struct efx_nic *efx)
2186 {
2187 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
2188 
2189 	/* All our allocations have been reset */
2190 	efx_ef10_reset_mc_allocations(efx);
2191 
2192 	/* The datapath firmware might have been changed */
2193 	nic_data->must_check_datapath_caps = true;
2194 
2195 	/* MAC statistics have been cleared on the NIC; clear the local
2196 	 * statistic that we update with efx_update_diff_stat().
2197 	 */
2198 	nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0;
2199 }
2200 
2201 static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
2202 {
2203 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
2204 	int rc;
2205 
2206 	rc = efx_ef10_get_warm_boot_count(efx);
2207 	if (rc < 0) {
2208 		/* The firmware is presumably in the process of
2209 		 * rebooting.  However, we are supposed to report each
2210 		 * reboot just once, so we must only do that once we
2211 		 * can read and store the updated warm boot count.
2212 		 */
2213 		return 0;
2214 	}
2215 
2216 	if (rc == nic_data->warm_boot_count)
2217 		return 0;
2218 
2219 	nic_data->warm_boot_count = rc;
2220 	efx_ef10_mcdi_reboot_detected(efx);
2221 
2222 	return -EIO;
2223 }
2224 
2225 /* Handle an MSI interrupt
2226  *
2227  * Handle an MSI hardware interrupt.  This routine schedules event
2228  * queue processing.  No interrupt acknowledgement cycle is necessary.
2229  * Also, we never need to check that the interrupt is for us, since
2230  * MSI interrupts cannot be shared.
2231  */
2232 static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id)
2233 {
2234 	struct efx_msi_context *context = dev_id;
2235 	struct efx_nic *efx = context->efx;
2236 
2237 	netif_vdbg(efx, intr, efx->net_dev,
2238 		   "IRQ %d on CPU %d\n", irq, raw_smp_processor_id());
2239 
2240 	if (likely(READ_ONCE(efx->irq_soft_enabled))) {
2241 		/* Note test interrupts */
2242 		if (context->index == efx->irq_level)
2243 			efx->last_irq_cpu = raw_smp_processor_id();
2244 
2245 		/* Schedule processing of the channel */
2246 		efx_schedule_channel_irq(efx->channel[context->index]);
2247 	}
2248 
2249 	return IRQ_HANDLED;
2250 }
2251 
2252 static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id)
2253 {
2254 	struct efx_nic *efx = dev_id;
2255 	bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
2256 	struct efx_channel *channel;
2257 	efx_dword_t reg;
2258 	u32 queues;
2259 
2260 	/* Read the ISR which also ACKs the interrupts */
2261 	efx_readd(efx, &reg, ER_DZ_BIU_INT_ISR);
2262 	queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG);
2263 
2264 	if (queues == 0)
2265 		return IRQ_NONE;
2266 
2267 	if (likely(soft_enabled)) {
2268 		/* Note test interrupts */
2269 		if (queues & (1U << efx->irq_level))
2270 			efx->last_irq_cpu = raw_smp_processor_id();
2271 
2272 		efx_for_each_channel(channel, efx) {
2273 			if (queues & 1)
2274 				efx_schedule_channel_irq(channel);
2275 			queues >>= 1;
2276 		}
2277 	}
2278 
2279 	netif_vdbg(efx, intr, efx->net_dev,
2280 		   "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
2281 		   irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
2282 
2283 	return IRQ_HANDLED;
2284 }
2285 
2286 static int efx_ef10_irq_test_generate(struct efx_nic *efx)
2287 {
2288 	MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN);
2289 
2290 	if (efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG41750, true,
2291 				    NULL) == 0)
2292 		return -ENOTSUPP;
2293 
2294 	BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0);
2295 
2296 	MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level);
2297 	return efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT,
2298 			    inbuf, sizeof(inbuf), NULL, 0, NULL);
2299 }
2300 
2301 static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue)
2302 {
2303 	return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf,
2304 				    (tx_queue->ptr_mask + 1) *
2305 				    sizeof(efx_qword_t),
2306 				    GFP_KERNEL);
2307 }
2308 
2309 /* This writes to the TX_DESC_WPTR and also pushes data */
2310 static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue,
2311 					 const efx_qword_t *txd)
2312 {
2313 	unsigned int write_ptr;
2314 	efx_oword_t reg;
2315 
2316 	write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
2317 	EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr);
2318 	reg.qword[0] = *txd;
2319 	efx_writeo_page(tx_queue->efx, &reg,
2320 			ER_DZ_TX_DESC_UPD, tx_queue->queue);
2321 }
2322 
2323 /* Add Firmware-Assisted TSO v2 option descriptors to a queue.
2324  */
2325 static int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue,
2326 				struct sk_buff *skb,
2327 				bool *data_mapped)
2328 {
2329 	struct efx_tx_buffer *buffer;
2330 	struct tcphdr *tcp;
2331 	struct iphdr *ip;
2332 
2333 	u16 ipv4_id;
2334 	u32 seqnum;
2335 	u32 mss;
2336 
2337 	EFX_WARN_ON_ONCE_PARANOID(tx_queue->tso_version != 2);
2338 
2339 	mss = skb_shinfo(skb)->gso_size;
2340 
2341 	if (unlikely(mss < 4)) {
2342 		WARN_ONCE(1, "MSS of %u is too small for TSO v2\n", mss);
2343 		return -EINVAL;
2344 	}
2345 
2346 	ip = ip_hdr(skb);
2347 	if (ip->version == 4) {
2348 		/* Modify IPv4 header if needed. */
2349 		ip->tot_len = 0;
2350 		ip->check = 0;
2351 		ipv4_id = ntohs(ip->id);
2352 	} else {
2353 		/* Modify IPv6 header if needed. */
2354 		struct ipv6hdr *ipv6 = ipv6_hdr(skb);
2355 
2356 		ipv6->payload_len = 0;
2357 		ipv4_id = 0;
2358 	}
2359 
2360 	tcp = tcp_hdr(skb);
2361 	seqnum = ntohl(tcp->seq);
2362 
2363 	buffer = efx_tx_queue_get_insert_buffer(tx_queue);
2364 
2365 	buffer->flags = EFX_TX_BUF_OPTION;
2366 	buffer->len = 0;
2367 	buffer->unmap_len = 0;
2368 	EFX_POPULATE_QWORD_5(buffer->option,
2369 			ESF_DZ_TX_DESC_IS_OPT, 1,
2370 			ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO,
2371 			ESF_DZ_TX_TSO_OPTION_TYPE,
2372 			ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A,
2373 			ESF_DZ_TX_TSO_IP_ID, ipv4_id,
2374 			ESF_DZ_TX_TSO_TCP_SEQNO, seqnum
2375 			);
2376 	++tx_queue->insert_count;
2377 
2378 	buffer = efx_tx_queue_get_insert_buffer(tx_queue);
2379 
2380 	buffer->flags = EFX_TX_BUF_OPTION;
2381 	buffer->len = 0;
2382 	buffer->unmap_len = 0;
2383 	EFX_POPULATE_QWORD_4(buffer->option,
2384 			ESF_DZ_TX_DESC_IS_OPT, 1,
2385 			ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO,
2386 			ESF_DZ_TX_TSO_OPTION_TYPE,
2387 			ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B,
2388 			ESF_DZ_TX_TSO_TCP_MSS, mss
2389 			);
2390 	++tx_queue->insert_count;
2391 
2392 	return 0;
2393 }
2394 
2395 static u32 efx_ef10_tso_versions(struct efx_nic *efx)
2396 {
2397 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
2398 	u32 tso_versions = 0;
2399 
2400 	if (nic_data->datapath_caps &
2401 	    (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))
2402 		tso_versions |= BIT(1);
2403 	if (nic_data->datapath_caps2 &
2404 	    (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN))
2405 		tso_versions |= BIT(2);
2406 	return tso_versions;
2407 }
2408 
2409 static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
2410 {
2411 	MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
2412 						       EFX_BUF_SIZE));
2413 	bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
2414 	size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
2415 	struct efx_channel *channel = tx_queue->channel;
2416 	struct efx_nic *efx = tx_queue->efx;
2417 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
2418 	bool tso_v2 = false;
2419 	size_t inlen;
2420 	dma_addr_t dma_addr;
2421 	efx_qword_t *txd;
2422 	int rc;
2423 	int i;
2424 	BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0);
2425 
2426 	/* Only attempt to enable TX timestamping if we have the license for it,
2427 	 * otherwise TXQ init will fail
2428 	 */
2429 	if (!(nic_data->licensed_features &
2430 	      (1 << LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN))) {
2431 		tx_queue->timestamping = false;
2432 		/* Disable sync events on this channel. */
2433 		if (efx->type->ptp_set_ts_sync_events)
2434 			efx->type->ptp_set_ts_sync_events(efx, false, false);
2435 	}
2436 
2437 	/* TSOv2 is a limited resource that can only be configured on a limited
2438 	 * number of queues. TSO without checksum offload is not really a thing,
2439 	 * so we only enable it for those queues.
2440 	 * TSOv2 cannot be used with Hardware timestamping, and is never needed
2441 	 * for XDP tx.
2442 	 */
2443 	if (csum_offload && (nic_data->datapath_caps2 &
2444 			(1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN)) &&
2445 	    !tx_queue->timestamping && !tx_queue->xdp_tx) {
2446 		tso_v2 = true;
2447 		netif_dbg(efx, hw, efx->net_dev, "Using TSOv2 for channel %u\n",
2448 				channel->channel);
2449 	}
2450 
2451 	MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
2452 	MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
2453 	MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue);
2454 	MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
2455 	MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
2456 	MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id);
2457 
2458 	dma_addr = tx_queue->txd.buf.dma_addr;
2459 
2460 	netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n",
2461 		  tx_queue->queue, entries, (u64)dma_addr);
2462 
2463 	for (i = 0; i < entries; ++i) {
2464 		MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr);
2465 		dma_addr += EFX_BUF_SIZE;
2466 	}
2467 
2468 	inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
2469 
2470 	do {
2471 		MCDI_POPULATE_DWORD_4(inbuf, INIT_TXQ_IN_FLAGS,
2472 				/* This flag was removed from mcdi_pcol.h for
2473 				 * the non-_EXT version of INIT_TXQ.  However,
2474 				 * firmware still honours it.
2475 				 */
2476 				INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, tso_v2,
2477 				INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
2478 				INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload,
2479 				INIT_TXQ_EXT_IN_FLAG_TIMESTAMP,
2480 						tx_queue->timestamping);
2481 
2482 		rc = efx_mcdi_rpc_quiet(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
2483 					NULL, 0, NULL);
2484 		if (rc == -ENOSPC && tso_v2) {
2485 			/* Retry without TSOv2 if we're short on contexts. */
2486 			tso_v2 = false;
2487 			netif_warn(efx, probe, efx->net_dev,
2488 				   "TSOv2 context not available to segment in hardware. TCP performance may be reduced.\n");
2489 		} else if (rc) {
2490 			efx_mcdi_display_error(efx, MC_CMD_INIT_TXQ,
2491 					       MC_CMD_INIT_TXQ_EXT_IN_LEN,
2492 					       NULL, 0, rc);
2493 			goto fail;
2494 		}
2495 	} while (rc);
2496 
2497 	/* A previous user of this TX queue might have set us up the
2498 	 * bomb by writing a descriptor to the TX push collector but
2499 	 * not the doorbell.  (Each collector belongs to a port, not a
2500 	 * queue or function, so cannot easily be reset.)  We must
2501 	 * attempt to push a no-op descriptor in its place.
2502 	 */
2503 	tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION;
2504 	tx_queue->insert_count = 1;
2505 	txd = efx_tx_desc(tx_queue, 0);
2506 	EFX_POPULATE_QWORD_5(*txd,
2507 			     ESF_DZ_TX_DESC_IS_OPT, true,
2508 			     ESF_DZ_TX_OPTION_TYPE,
2509 			     ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
2510 			     ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload,
2511 			     ESF_DZ_TX_OPTION_IP_CSUM, csum_offload,
2512 			     ESF_DZ_TX_TIMESTAMP, tx_queue->timestamping);
2513 	tx_queue->write_count = 1;
2514 
2515 	if (tso_v2) {
2516 		tx_queue->handle_tso = efx_ef10_tx_tso_desc;
2517 		tx_queue->tso_version = 2;
2518 	} else if (nic_data->datapath_caps &
2519 			(1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN)) {
2520 		tx_queue->tso_version = 1;
2521 	}
2522 
2523 	wmb();
2524 	efx_ef10_push_tx_desc(tx_queue, txd);
2525 
2526 	return;
2527 
2528 fail:
2529 	netdev_WARN(efx->net_dev, "failed to initialise TXQ %d\n",
2530 		    tx_queue->queue);
2531 }
2532 
2533 static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
2534 {
2535 	MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
2536 	MCDI_DECLARE_BUF_ERR(outbuf);
2537 	struct efx_nic *efx = tx_queue->efx;
2538 	size_t outlen;
2539 	int rc;
2540 
2541 	MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
2542 		       tx_queue->queue);
2543 
2544 	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
2545 			  outbuf, sizeof(outbuf), &outlen);
2546 
2547 	if (rc && rc != -EALREADY)
2548 		goto fail;
2549 
2550 	return;
2551 
2552 fail:
2553 	efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN,
2554 			       outbuf, outlen, rc);
2555 }
2556 
2557 static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue)
2558 {
2559 	efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf);
2560 }
2561 
2562 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
2563 static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue)
2564 {
2565 	unsigned int write_ptr;
2566 	efx_dword_t reg;
2567 
2568 	write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
2569 	EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr);
2570 	efx_writed_page(tx_queue->efx, &reg,
2571 			ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue);
2572 }
2573 
2574 #define EFX_EF10_MAX_TX_DESCRIPTOR_LEN 0x3fff
2575 
2576 static unsigned int efx_ef10_tx_limit_len(struct efx_tx_queue *tx_queue,
2577 					  dma_addr_t dma_addr, unsigned int len)
2578 {
2579 	if (len > EFX_EF10_MAX_TX_DESCRIPTOR_LEN) {
2580 		/* If we need to break across multiple descriptors we should
2581 		 * stop at a page boundary. This assumes the length limit is
2582 		 * greater than the page size.
2583 		 */
2584 		dma_addr_t end = dma_addr + EFX_EF10_MAX_TX_DESCRIPTOR_LEN;
2585 
2586 		BUILD_BUG_ON(EFX_EF10_MAX_TX_DESCRIPTOR_LEN < EFX_PAGE_SIZE);
2587 		len = (end & (~(EFX_PAGE_SIZE - 1))) - dma_addr;
2588 	}
2589 
2590 	return len;
2591 }
2592 
2593 static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
2594 {
2595 	unsigned int old_write_count = tx_queue->write_count;
2596 	struct efx_tx_buffer *buffer;
2597 	unsigned int write_ptr;
2598 	efx_qword_t *txd;
2599 
2600 	tx_queue->xmit_more_available = false;
2601 	if (unlikely(tx_queue->write_count == tx_queue->insert_count))
2602 		return;
2603 
2604 	do {
2605 		write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
2606 		buffer = &tx_queue->buffer[write_ptr];
2607 		txd = efx_tx_desc(tx_queue, write_ptr);
2608 		++tx_queue->write_count;
2609 
2610 		/* Create TX descriptor ring entry */
2611 		if (buffer->flags & EFX_TX_BUF_OPTION) {
2612 			*txd = buffer->option;
2613 			if (EFX_QWORD_FIELD(*txd, ESF_DZ_TX_OPTION_TYPE) == 1)
2614 				/* PIO descriptor */
2615 				tx_queue->packet_write_count = tx_queue->write_count;
2616 		} else {
2617 			tx_queue->packet_write_count = tx_queue->write_count;
2618 			BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
2619 			EFX_POPULATE_QWORD_3(
2620 				*txd,
2621 				ESF_DZ_TX_KER_CONT,
2622 				buffer->flags & EFX_TX_BUF_CONT,
2623 				ESF_DZ_TX_KER_BYTE_CNT, buffer->len,
2624 				ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr);
2625 		}
2626 	} while (tx_queue->write_count != tx_queue->insert_count);
2627 
2628 	wmb(); /* Ensure descriptors are written before they are fetched */
2629 
2630 	if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
2631 		txd = efx_tx_desc(tx_queue,
2632 				  old_write_count & tx_queue->ptr_mask);
2633 		efx_ef10_push_tx_desc(tx_queue, txd);
2634 		++tx_queue->pushes;
2635 	} else {
2636 		efx_ef10_notify_tx_desc(tx_queue);
2637 	}
2638 }
2639 
2640 #define RSS_MODE_HASH_ADDRS	(1 << RSS_MODE_HASH_SRC_ADDR_LBN |\
2641 				 1 << RSS_MODE_HASH_DST_ADDR_LBN)
2642 #define RSS_MODE_HASH_PORTS	(1 << RSS_MODE_HASH_SRC_PORT_LBN |\
2643 				 1 << RSS_MODE_HASH_DST_PORT_LBN)
2644 #define RSS_CONTEXT_FLAGS_DEFAULT	(1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN |\
2645 					 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN |\
2646 					 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN |\
2647 					 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN |\
2648 					 (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN |\
2649 					 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN |\
2650 					 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN |\
2651 					 (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN |\
2652 					 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN |\
2653 					 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN)
2654 
2655 static int efx_ef10_get_rss_flags(struct efx_nic *efx, u32 context, u32 *flags)
2656 {
2657 	/* Firmware had a bug (sfc bug 61952) where it would not actually
2658 	 * fill in the flags field in the response to MC_CMD_RSS_CONTEXT_GET_FLAGS.
2659 	 * This meant that it would always contain whatever was previously
2660 	 * in the MCDI buffer.  Fortunately, all firmware versions with
2661 	 * this bug have the same default flags value for a newly-allocated
2662 	 * RSS context, and the only time we want to get the flags is just
2663 	 * after allocating.  Moreover, the response has a 32-bit hole
2664 	 * where the context ID would be in the request, so we can use an
2665 	 * overlength buffer in the request and pre-fill the flags field
2666 	 * with what we believe the default to be.  Thus if the firmware
2667 	 * has the bug, it will leave our pre-filled value in the flags
2668 	 * field of the response, and we will get the right answer.
2669 	 *
2670 	 * However, this does mean that this function should NOT be used if
2671 	 * the RSS context flags might not be their defaults - it is ONLY
2672 	 * reliably correct for a newly-allocated RSS context.
2673 	 */
2674 	MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN);
2675 	MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN);
2676 	size_t outlen;
2677 	int rc;
2678 
2679 	/* Check we have a hole for the context ID */
2680 	BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN != MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST);
2681 	MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID, context);
2682 	MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS,
2683 		       RSS_CONTEXT_FLAGS_DEFAULT);
2684 	rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_FLAGS, inbuf,
2685 			  sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
2686 	if (rc == 0) {
2687 		if (outlen < MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN)
2688 			rc = -EIO;
2689 		else
2690 			*flags = MCDI_DWORD(outbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS);
2691 	}
2692 	return rc;
2693 }
2694 
2695 /* Attempt to enable 4-tuple UDP hashing on the specified RSS context.
2696  * If we fail, we just leave the RSS context at its default hash settings,
2697  * which is safe but may slightly reduce performance.
2698  * Defaults are 4-tuple for TCP and 2-tuple for UDP and other-IP, so we
2699  * just need to set the UDP ports flags (for both IP versions).
2700  */
2701 static void efx_ef10_set_rss_flags(struct efx_nic *efx,
2702 				   struct efx_rss_context *ctx)
2703 {
2704 	MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN);
2705 	u32 flags;
2706 
2707 	BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN != 0);
2708 
2709 	if (efx_ef10_get_rss_flags(efx, ctx->context_id, &flags) != 0)
2710 		return;
2711 	MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID,
2712 		       ctx->context_id);
2713 	flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN;
2714 	flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN;
2715 	MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_FLAGS, flags);
2716 	if (!efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_FLAGS, inbuf, sizeof(inbuf),
2717 			  NULL, 0, NULL))
2718 		/* Succeeded, so UDP 4-tuple is now enabled */
2719 		ctx->rx_hash_udp_4tuple = true;
2720 }
2721 
2722 static int efx_ef10_alloc_rss_context(struct efx_nic *efx, bool exclusive,
2723 				      struct efx_rss_context *ctx,
2724 				      unsigned *context_size)
2725 {
2726 	MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
2727 	MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
2728 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
2729 	size_t outlen;
2730 	int rc;
2731 	u32 alloc_type = exclusive ?
2732 				MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE :
2733 				MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED;
2734 	unsigned rss_spread = exclusive ?
2735 				efx->rss_spread :
2736 				min(rounddown_pow_of_two(efx->rss_spread),
2737 				    EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE);
2738 
2739 	if (!exclusive && rss_spread == 1) {
2740 		ctx->context_id = EFX_EF10_RSS_CONTEXT_INVALID;
2741 		if (context_size)
2742 			*context_size = 1;
2743 		return 0;
2744 	}
2745 
2746 	if (nic_data->datapath_caps &
2747 	    1 << MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN)
2748 		return -EOPNOTSUPP;
2749 
2750 	MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
2751 		       nic_data->vport_id);
2752 	MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type);
2753 	MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, rss_spread);
2754 
2755 	rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf),
2756 		outbuf, sizeof(outbuf), &outlen);
2757 	if (rc != 0)
2758 		return rc;
2759 
2760 	if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)
2761 		return -EIO;
2762 
2763 	ctx->context_id = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
2764 
2765 	if (context_size)
2766 		*context_size = rss_spread;
2767 
2768 	if (nic_data->datapath_caps &
2769 	    1 << MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN)
2770 		efx_ef10_set_rss_flags(efx, ctx);
2771 
2772 	return 0;
2773 }
2774 
2775 static int efx_ef10_free_rss_context(struct efx_nic *efx, u32 context)
2776 {
2777 	MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN);
2778 
2779 	MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID,
2780 		       context);
2781 	return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf),
2782 			    NULL, 0, NULL);
2783 }
2784 
2785 static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context,
2786 				       const u32 *rx_indir_table, const u8 *key)
2787 {
2788 	MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN);
2789 	MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN);
2790 	int i, rc;
2791 
2792 	MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID,
2793 		       context);
2794 	BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) !=
2795 		     MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN);
2796 
2797 	/* This iterates over the length of efx->rss_context.rx_indir_table, but
2798 	 * copies bytes from rx_indir_table.  That's because the latter is a
2799 	 * pointer rather than an array, but should have the same length.
2800 	 * The efx->rss_context.rx_hash_key loop below is similar.
2801 	 */
2802 	for (i = 0; i < ARRAY_SIZE(efx->rss_context.rx_indir_table); ++i)
2803 		MCDI_PTR(tablebuf,
2804 			 RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] =
2805 				(u8) rx_indir_table[i];
2806 
2807 	rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf,
2808 			  sizeof(tablebuf), NULL, 0, NULL);
2809 	if (rc != 0)
2810 		return rc;
2811 
2812 	MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID,
2813 		       context);
2814 	BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_hash_key) !=
2815 		     MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
2816 	for (i = 0; i < ARRAY_SIZE(efx->rss_context.rx_hash_key); ++i)
2817 		MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] = key[i];
2818 
2819 	return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf,
2820 			    sizeof(keybuf), NULL, 0, NULL);
2821 }
2822 
2823 static void efx_ef10_rx_free_indir_table(struct efx_nic *efx)
2824 {
2825 	int rc;
2826 
2827 	if (efx->rss_context.context_id != EFX_EF10_RSS_CONTEXT_INVALID) {
2828 		rc = efx_ef10_free_rss_context(efx, efx->rss_context.context_id);
2829 		WARN_ON(rc != 0);
2830 	}
2831 	efx->rss_context.context_id = EFX_EF10_RSS_CONTEXT_INVALID;
2832 }
2833 
2834 static int efx_ef10_rx_push_shared_rss_config(struct efx_nic *efx,
2835 					      unsigned *context_size)
2836 {
2837 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
2838 	int rc = efx_ef10_alloc_rss_context(efx, false, &efx->rss_context,
2839 					    context_size);
2840 
2841 	if (rc != 0)
2842 		return rc;
2843 
2844 	nic_data->rx_rss_context_exclusive = false;
2845 	efx_set_default_rx_indir_table(efx, &efx->rss_context);
2846 	return 0;
2847 }
2848 
2849 static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx,
2850 						 const u32 *rx_indir_table,
2851 						 const u8 *key)
2852 {
2853 	u32 old_rx_rss_context = efx->rss_context.context_id;
2854 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
2855 	int rc;
2856 
2857 	if (efx->rss_context.context_id == EFX_EF10_RSS_CONTEXT_INVALID ||
2858 	    !nic_data->rx_rss_context_exclusive) {
2859 		rc = efx_ef10_alloc_rss_context(efx, true, &efx->rss_context,
2860 						NULL);
2861 		if (rc == -EOPNOTSUPP)
2862 			return rc;
2863 		else if (rc != 0)
2864 			goto fail1;
2865 	}
2866 
2867 	rc = efx_ef10_populate_rss_table(efx, efx->rss_context.context_id,
2868 					 rx_indir_table, key);
2869 	if (rc != 0)
2870 		goto fail2;
2871 
2872 	if (efx->rss_context.context_id != old_rx_rss_context &&
2873 	    old_rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
2874 		WARN_ON(efx_ef10_free_rss_context(efx, old_rx_rss_context) != 0);
2875 	nic_data->rx_rss_context_exclusive = true;
2876 	if (rx_indir_table != efx->rss_context.rx_indir_table)
2877 		memcpy(efx->rss_context.rx_indir_table, rx_indir_table,
2878 		       sizeof(efx->rss_context.rx_indir_table));
2879 	if (key != efx->rss_context.rx_hash_key)
2880 		memcpy(efx->rss_context.rx_hash_key, key,
2881 		       efx->type->rx_hash_key_size);
2882 
2883 	return 0;
2884 
2885 fail2:
2886 	if (old_rx_rss_context != efx->rss_context.context_id) {
2887 		WARN_ON(efx_ef10_free_rss_context(efx, efx->rss_context.context_id) != 0);
2888 		efx->rss_context.context_id = old_rx_rss_context;
2889 	}
2890 fail1:
2891 	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
2892 	return rc;
2893 }
2894 
2895 static int efx_ef10_rx_push_rss_context_config(struct efx_nic *efx,
2896 					       struct efx_rss_context *ctx,
2897 					       const u32 *rx_indir_table,
2898 					       const u8 *key)
2899 {
2900 	int rc;
2901 
2902 	WARN_ON(!mutex_is_locked(&efx->rss_lock));
2903 
2904 	if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID) {
2905 		rc = efx_ef10_alloc_rss_context(efx, true, ctx, NULL);
2906 		if (rc)
2907 			return rc;
2908 	}
2909 
2910 	if (!rx_indir_table) /* Delete this context */
2911 		return efx_ef10_free_rss_context(efx, ctx->context_id);
2912 
2913 	rc = efx_ef10_populate_rss_table(efx, ctx->context_id,
2914 					 rx_indir_table, key);
2915 	if (rc)
2916 		return rc;
2917 
2918 	memcpy(ctx->rx_indir_table, rx_indir_table,
2919 	       sizeof(efx->rss_context.rx_indir_table));
2920 	memcpy(ctx->rx_hash_key, key, efx->type->rx_hash_key_size);
2921 
2922 	return 0;
2923 }
2924 
2925 static int efx_ef10_rx_pull_rss_context_config(struct efx_nic *efx,
2926 					       struct efx_rss_context *ctx)
2927 {
2928 	MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN);
2929 	MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN);
2930 	MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN);
2931 	size_t outlen;
2932 	int rc, i;
2933 
2934 	WARN_ON(!mutex_is_locked(&efx->rss_lock));
2935 
2936 	BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN !=
2937 		     MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN);
2938 
2939 	if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID)
2940 		return -ENOENT;
2941 
2942 	MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID,
2943 		       ctx->context_id);
2944 	BUILD_BUG_ON(ARRAY_SIZE(ctx->rx_indir_table) !=
2945 		     MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN);
2946 	rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_TABLE, inbuf, sizeof(inbuf),
2947 			  tablebuf, sizeof(tablebuf), &outlen);
2948 	if (rc != 0)
2949 		return rc;
2950 
2951 	if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN))
2952 		return -EIO;
2953 
2954 	for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++)
2955 		ctx->rx_indir_table[i] = MCDI_PTR(tablebuf,
2956 				RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE)[i];
2957 
2958 	MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID,
2959 		       ctx->context_id);
2960 	BUILD_BUG_ON(ARRAY_SIZE(ctx->rx_hash_key) !=
2961 		     MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
2962 	rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_KEY, inbuf, sizeof(inbuf),
2963 			  keybuf, sizeof(keybuf), &outlen);
2964 	if (rc != 0)
2965 		return rc;
2966 
2967 	if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN))
2968 		return -EIO;
2969 
2970 	for (i = 0; i < ARRAY_SIZE(ctx->rx_hash_key); ++i)
2971 		ctx->rx_hash_key[i] = MCDI_PTR(
2972 				keybuf, RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY)[i];
2973 
2974 	return 0;
2975 }
2976 
2977 static int efx_ef10_rx_pull_rss_config(struct efx_nic *efx)
2978 {
2979 	int rc;
2980 
2981 	mutex_lock(&efx->rss_lock);
2982 	rc = efx_ef10_rx_pull_rss_context_config(efx, &efx->rss_context);
2983 	mutex_unlock(&efx->rss_lock);
2984 	return rc;
2985 }
2986 
2987 static void efx_ef10_rx_restore_rss_contexts(struct efx_nic *efx)
2988 {
2989 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
2990 	struct efx_rss_context *ctx;
2991 	int rc;
2992 
2993 	WARN_ON(!mutex_is_locked(&efx->rss_lock));
2994 
2995 	if (!nic_data->must_restore_rss_contexts)
2996 		return;
2997 
2998 	list_for_each_entry(ctx, &efx->rss_context.list, list) {
2999 		/* previous NIC RSS context is gone */
3000 		ctx->context_id = EFX_EF10_RSS_CONTEXT_INVALID;
3001 		/* so try to allocate a new one */
3002 		rc = efx_ef10_rx_push_rss_context_config(efx, ctx,
3003 							 ctx->rx_indir_table,
3004 							 ctx->rx_hash_key);
3005 		if (rc)
3006 			netif_warn(efx, probe, efx->net_dev,
3007 				   "failed to restore RSS context %u, rc=%d"
3008 				   "; RSS filters may fail to be applied\n",
3009 				   ctx->user_id, rc);
3010 	}
3011 	nic_data->must_restore_rss_contexts = false;
3012 }
3013 
3014 static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
3015 					  const u32 *rx_indir_table,
3016 					  const u8 *key)
3017 {
3018 	int rc;
3019 
3020 	if (efx->rss_spread == 1)
3021 		return 0;
3022 
3023 	if (!key)
3024 		key = efx->rss_context.rx_hash_key;
3025 
3026 	rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table, key);
3027 
3028 	if (rc == -ENOBUFS && !user) {
3029 		unsigned context_size;
3030 		bool mismatch = false;
3031 		size_t i;
3032 
3033 		for (i = 0;
3034 		     i < ARRAY_SIZE(efx->rss_context.rx_indir_table) && !mismatch;
3035 		     i++)
3036 			mismatch = rx_indir_table[i] !=
3037 				ethtool_rxfh_indir_default(i, efx->rss_spread);
3038 
3039 		rc = efx_ef10_rx_push_shared_rss_config(efx, &context_size);
3040 		if (rc == 0) {
3041 			if (context_size != efx->rss_spread)
3042 				netif_warn(efx, probe, efx->net_dev,
3043 					   "Could not allocate an exclusive RSS"
3044 					   " context; allocated a shared one of"
3045 					   " different size."
3046 					   " Wanted %u, got %u.\n",
3047 					   efx->rss_spread, context_size);
3048 			else if (mismatch)
3049 				netif_warn(efx, probe, efx->net_dev,
3050 					   "Could not allocate an exclusive RSS"
3051 					   " context; allocated a shared one but"
3052 					   " could not apply custom"
3053 					   " indirection.\n");
3054 			else
3055 				netif_info(efx, probe, efx->net_dev,
3056 					   "Could not allocate an exclusive RSS"
3057 					   " context; allocated a shared one.\n");
3058 		}
3059 	}
3060 	return rc;
3061 }
3062 
3063 static int efx_ef10_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
3064 					  const u32 *rx_indir_table
3065 					  __attribute__ ((unused)),
3066 					  const u8 *key
3067 					  __attribute__ ((unused)))
3068 {
3069 	if (user)
3070 		return -EOPNOTSUPP;
3071 	if (efx->rss_context.context_id != EFX_EF10_RSS_CONTEXT_INVALID)
3072 		return 0;
3073 	return efx_ef10_rx_push_shared_rss_config(efx, NULL);
3074 }
3075 
3076 static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue)
3077 {
3078 	return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf,
3079 				    (rx_queue->ptr_mask + 1) *
3080 				    sizeof(efx_qword_t),
3081 				    GFP_KERNEL);
3082 }
3083 
3084 static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
3085 {
3086 	MCDI_DECLARE_BUF(inbuf,
3087 			 MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
3088 						EFX_BUF_SIZE));
3089 	struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
3090 	size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
3091 	struct efx_nic *efx = rx_queue->efx;
3092 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
3093 	size_t inlen;
3094 	dma_addr_t dma_addr;
3095 	int rc;
3096 	int i;
3097 	BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0);
3098 
3099 	rx_queue->scatter_n = 0;
3100 	rx_queue->scatter_len = 0;
3101 
3102 	MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1);
3103 	MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel);
3104 	MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
3105 	MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
3106 		       efx_rx_queue_index(rx_queue));
3107 	MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS,
3108 			      INIT_RXQ_IN_FLAG_PREFIX, 1,
3109 			      INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
3110 	MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
3111 	MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, nic_data->vport_id);
3112 
3113 	dma_addr = rx_queue->rxd.buf.dma_addr;
3114 
3115 	netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n",
3116 		  efx_rx_queue_index(rx_queue), entries, (u64)dma_addr);
3117 
3118 	for (i = 0; i < entries; ++i) {
3119 		MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr);
3120 		dma_addr += EFX_BUF_SIZE;
3121 	}
3122 
3123 	inlen = MC_CMD_INIT_RXQ_IN_LEN(entries);
3124 
3125 	rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
3126 			  NULL, 0, NULL);
3127 	if (rc)
3128 		netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n",
3129 			    efx_rx_queue_index(rx_queue));
3130 }
3131 
3132 static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
3133 {
3134 	MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
3135 	MCDI_DECLARE_BUF_ERR(outbuf);
3136 	struct efx_nic *efx = rx_queue->efx;
3137 	size_t outlen;
3138 	int rc;
3139 
3140 	MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
3141 		       efx_rx_queue_index(rx_queue));
3142 
3143 	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
3144 			  outbuf, sizeof(outbuf), &outlen);
3145 
3146 	if (rc && rc != -EALREADY)
3147 		goto fail;
3148 
3149 	return;
3150 
3151 fail:
3152 	efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN,
3153 			       outbuf, outlen, rc);
3154 }
3155 
3156 static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue)
3157 {
3158 	efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf);
3159 }
3160 
3161 /* This creates an entry in the RX descriptor queue */
3162 static inline void
3163 efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
3164 {
3165 	struct efx_rx_buffer *rx_buf;
3166 	efx_qword_t *rxd;
3167 
3168 	rxd = efx_rx_desc(rx_queue, index);
3169 	rx_buf = efx_rx_buffer(rx_queue, index);
3170 	EFX_POPULATE_QWORD_2(*rxd,
3171 			     ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len,
3172 			     ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
3173 }
3174 
3175 static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue)
3176 {
3177 	struct efx_nic *efx = rx_queue->efx;
3178 	unsigned int write_count;
3179 	efx_dword_t reg;
3180 
3181 	/* Firmware requires that RX_DESC_WPTR be a multiple of 8 */
3182 	write_count = rx_queue->added_count & ~7;
3183 	if (rx_queue->notified_count == write_count)
3184 		return;
3185 
3186 	do
3187 		efx_ef10_build_rx_desc(
3188 			rx_queue,
3189 			rx_queue->notified_count & rx_queue->ptr_mask);
3190 	while (++rx_queue->notified_count != write_count);
3191 
3192 	wmb();
3193 	EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR,
3194 			     write_count & rx_queue->ptr_mask);
3195 	efx_writed_page(efx, &reg, ER_DZ_RX_DESC_UPD,
3196 			efx_rx_queue_index(rx_queue));
3197 }
3198 
3199 static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete;
3200 
3201 static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue)
3202 {
3203 	struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
3204 	MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
3205 	efx_qword_t event;
3206 
3207 	EFX_POPULATE_QWORD_2(event,
3208 			     ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
3209 			     ESF_DZ_EV_DATA, EFX_EF10_REFILL);
3210 
3211 	MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
3212 
3213 	/* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
3214 	 * already swapped the data to little-endian order.
3215 	 */
3216 	memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
3217 	       sizeof(efx_qword_t));
3218 
3219 	efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT,
3220 			   inbuf, sizeof(inbuf), 0,
3221 			   efx_ef10_rx_defer_refill_complete, 0);
3222 }
3223 
3224 static void
3225 efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie,
3226 				  int rc, efx_dword_t *outbuf,
3227 				  size_t outlen_actual)
3228 {
3229 	/* nothing to do */
3230 }
3231 
3232 static int efx_ef10_ev_probe(struct efx_channel *channel)
3233 {
3234 	return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
3235 				    (channel->eventq_mask + 1) *
3236 				    sizeof(efx_qword_t),
3237 				    GFP_KERNEL);
3238 }
3239 
3240 static void efx_ef10_ev_fini(struct efx_channel *channel)
3241 {
3242 	MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
3243 	MCDI_DECLARE_BUF_ERR(outbuf);
3244 	struct efx_nic *efx = channel->efx;
3245 	size_t outlen;
3246 	int rc;
3247 
3248 	MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
3249 
3250 	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
3251 			  outbuf, sizeof(outbuf), &outlen);
3252 
3253 	if (rc && rc != -EALREADY)
3254 		goto fail;
3255 
3256 	return;
3257 
3258 fail:
3259 	efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
3260 			       outbuf, outlen, rc);
3261 }
3262 
3263 static int efx_ef10_ev_init(struct efx_channel *channel)
3264 {
3265 	MCDI_DECLARE_BUF(inbuf,
3266 			 MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
3267 						   EFX_BUF_SIZE));
3268 	MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_V2_OUT_LEN);
3269 	size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE;
3270 	struct efx_nic *efx = channel->efx;
3271 	struct efx_ef10_nic_data *nic_data;
3272 	size_t inlen, outlen;
3273 	unsigned int enabled, implemented;
3274 	dma_addr_t dma_addr;
3275 	int rc;
3276 	int i;
3277 
3278 	nic_data = efx->nic_data;
3279 
3280 	/* Fill event queue with all ones (i.e. empty events) */
3281 	memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
3282 
3283 	MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1);
3284 	MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel);
3285 	/* INIT_EVQ expects index in vector table, not absolute */
3286 	MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel);
3287 	MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE,
3288 		       MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
3289 	MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0);
3290 	MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0);
3291 	MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE,
3292 		       MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
3293 	MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0);
3294 
3295 	if (nic_data->datapath_caps2 &
3296 	    1 << MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN) {
3297 		/* Use the new generic approach to specifying event queue
3298 		 * configuration, requesting lower latency or higher throughput.
3299 		 * The options that actually get used appear in the output.
3300 		 */
3301 		MCDI_POPULATE_DWORD_2(inbuf, INIT_EVQ_V2_IN_FLAGS,
3302 				      INIT_EVQ_V2_IN_FLAG_INTERRUPTING, 1,
3303 				      INIT_EVQ_V2_IN_FLAG_TYPE,
3304 				      MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO);
3305 	} else {
3306 		bool cut_thru = !(nic_data->datapath_caps &
3307 			1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
3308 
3309 		MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
3310 				      INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
3311 				      INIT_EVQ_IN_FLAG_RX_MERGE, 1,
3312 				      INIT_EVQ_IN_FLAG_TX_MERGE, 1,
3313 				      INIT_EVQ_IN_FLAG_CUT_THRU, cut_thru);
3314 	}
3315 
3316 	dma_addr = channel->eventq.buf.dma_addr;
3317 	for (i = 0; i < entries; ++i) {
3318 		MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr);
3319 		dma_addr += EFX_BUF_SIZE;
3320 	}
3321 
3322 	inlen = MC_CMD_INIT_EVQ_IN_LEN(entries);
3323 
3324 	rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
3325 			  outbuf, sizeof(outbuf), &outlen);
3326 
3327 	if (outlen >= MC_CMD_INIT_EVQ_V2_OUT_LEN)
3328 		netif_dbg(efx, drv, efx->net_dev,
3329 			  "Channel %d using event queue flags %08x\n",
3330 			  channel->channel,
3331 			  MCDI_DWORD(outbuf, INIT_EVQ_V2_OUT_FLAGS));
3332 
3333 	/* IRQ return is ignored */
3334 	if (channel->channel || rc)
3335 		return rc;
3336 
3337 	/* Successfully created event queue on channel 0 */
3338 	rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
3339 	if (rc == -ENOSYS) {
3340 		/* GET_WORKAROUNDS was implemented before this workaround,
3341 		 * thus it must be unavailable in this firmware.
3342 		 */
3343 		nic_data->workaround_26807 = false;
3344 		rc = 0;
3345 	} else if (rc) {
3346 		goto fail;
3347 	} else {
3348 		nic_data->workaround_26807 =
3349 			!!(enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807);
3350 
3351 		if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 &&
3352 		    !nic_data->workaround_26807) {
3353 			unsigned int flags;
3354 
3355 			rc = efx_mcdi_set_workaround(efx,
3356 						     MC_CMD_WORKAROUND_BUG26807,
3357 						     true, &flags);
3358 
3359 			if (!rc) {
3360 				if (flags &
3361 				    1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) {
3362 					netif_info(efx, drv, efx->net_dev,
3363 						   "other functions on NIC have been reset\n");
3364 
3365 					/* With MCFW v4.6.x and earlier, the
3366 					 * boot count will have incremented,
3367 					 * so re-read the warm_boot_count
3368 					 * value now to ensure this function
3369 					 * doesn't think it has changed next
3370 					 * time it checks.
3371 					 */
3372 					rc = efx_ef10_get_warm_boot_count(efx);
3373 					if (rc >= 0) {
3374 						nic_data->warm_boot_count = rc;
3375 						rc = 0;
3376 					}
3377 				}
3378 				nic_data->workaround_26807 = true;
3379 			} else if (rc == -EPERM) {
3380 				rc = 0;
3381 			}
3382 		}
3383 	}
3384 
3385 	if (!rc)
3386 		return 0;
3387 
3388 fail:
3389 	efx_ef10_ev_fini(channel);
3390 	return rc;
3391 }
3392 
3393 static void efx_ef10_ev_remove(struct efx_channel *channel)
3394 {
3395 	efx_nic_free_buffer(channel->efx, &channel->eventq.buf);
3396 }
3397 
3398 static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue,
3399 					   unsigned int rx_queue_label)
3400 {
3401 	struct efx_nic *efx = rx_queue->efx;
3402 
3403 	netif_info(efx, hw, efx->net_dev,
3404 		   "rx event arrived on queue %d labeled as queue %u\n",
3405 		   efx_rx_queue_index(rx_queue), rx_queue_label);
3406 
3407 	efx_schedule_reset(efx, RESET_TYPE_DISABLE);
3408 }
3409 
3410 static void
3411 efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue,
3412 			     unsigned int actual, unsigned int expected)
3413 {
3414 	unsigned int dropped = (actual - expected) & rx_queue->ptr_mask;
3415 	struct efx_nic *efx = rx_queue->efx;
3416 
3417 	netif_info(efx, hw, efx->net_dev,
3418 		   "dropped %d events (index=%d expected=%d)\n",
3419 		   dropped, actual, expected);
3420 
3421 	efx_schedule_reset(efx, RESET_TYPE_DISABLE);
3422 }
3423 
3424 /* partially received RX was aborted. clean up. */
3425 static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue)
3426 {
3427 	unsigned int rx_desc_ptr;
3428 
3429 	netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev,
3430 		  "scattered RX aborted (dropping %u buffers)\n",
3431 		  rx_queue->scatter_n);
3432 
3433 	rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
3434 
3435 	efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n,
3436 		      0, EFX_RX_PKT_DISCARD);
3437 
3438 	rx_queue->removed_count += rx_queue->scatter_n;
3439 	rx_queue->scatter_n = 0;
3440 	rx_queue->scatter_len = 0;
3441 	++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc;
3442 }
3443 
3444 static u16 efx_ef10_handle_rx_event_errors(struct efx_channel *channel,
3445 					   unsigned int n_packets,
3446 					   unsigned int rx_encap_hdr,
3447 					   unsigned int rx_l3_class,
3448 					   unsigned int rx_l4_class,
3449 					   const efx_qword_t *event)
3450 {
3451 	struct efx_nic *efx = channel->efx;
3452 	bool handled = false;
3453 
3454 	if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)) {
3455 		if (!(efx->net_dev->features & NETIF_F_RXALL)) {
3456 			if (!efx->loopback_selftest)
3457 				channel->n_rx_eth_crc_err += n_packets;
3458 			return EFX_RX_PKT_DISCARD;
3459 		}
3460 		handled = true;
3461 	}
3462 	if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR)) {
3463 		if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN &&
3464 			     rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
3465 			     rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG &&
3466 			     rx_l3_class != ESE_DZ_L3_CLASS_IP6 &&
3467 			     rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG))
3468 			netdev_WARN(efx->net_dev,
3469 				    "invalid class for RX_IPCKSUM_ERR: event="
3470 				    EFX_QWORD_FMT "\n",
3471 				    EFX_QWORD_VAL(*event));
3472 		if (!efx->loopback_selftest)
3473 			*(rx_encap_hdr ?
3474 			  &channel->n_rx_outer_ip_hdr_chksum_err :
3475 			  &channel->n_rx_ip_hdr_chksum_err) += n_packets;
3476 		return 0;
3477 	}
3478 	if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) {
3479 		if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN &&
3480 			     ((rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
3481 			       rx_l3_class != ESE_DZ_L3_CLASS_IP6) ||
3482 			      (rx_l4_class != ESE_FZ_L4_CLASS_TCP &&
3483 			       rx_l4_class != ESE_FZ_L4_CLASS_UDP))))
3484 			netdev_WARN(efx->net_dev,
3485 				    "invalid class for RX_TCPUDP_CKSUM_ERR: event="
3486 				    EFX_QWORD_FMT "\n",
3487 				    EFX_QWORD_VAL(*event));
3488 		if (!efx->loopback_selftest)
3489 			*(rx_encap_hdr ?
3490 			  &channel->n_rx_outer_tcp_udp_chksum_err :
3491 			  &channel->n_rx_tcp_udp_chksum_err) += n_packets;
3492 		return 0;
3493 	}
3494 	if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_IP_INNER_CHKSUM_ERR)) {
3495 		if (unlikely(!rx_encap_hdr))
3496 			netdev_WARN(efx->net_dev,
3497 				    "invalid encapsulation type for RX_IP_INNER_CHKSUM_ERR: event="
3498 				    EFX_QWORD_FMT "\n",
3499 				    EFX_QWORD_VAL(*event));
3500 		else if (unlikely(rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
3501 				  rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG &&
3502 				  rx_l3_class != ESE_DZ_L3_CLASS_IP6 &&
3503 				  rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG))
3504 			netdev_WARN(efx->net_dev,
3505 				    "invalid class for RX_IP_INNER_CHKSUM_ERR: event="
3506 				    EFX_QWORD_FMT "\n",
3507 				    EFX_QWORD_VAL(*event));
3508 		if (!efx->loopback_selftest)
3509 			channel->n_rx_inner_ip_hdr_chksum_err += n_packets;
3510 		return 0;
3511 	}
3512 	if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR)) {
3513 		if (unlikely(!rx_encap_hdr))
3514 			netdev_WARN(efx->net_dev,
3515 				    "invalid encapsulation type for RX_TCP_UDP_INNER_CHKSUM_ERR: event="
3516 				    EFX_QWORD_FMT "\n",
3517 				    EFX_QWORD_VAL(*event));
3518 		else if (unlikely((rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
3519 				   rx_l3_class != ESE_DZ_L3_CLASS_IP6) ||
3520 				  (rx_l4_class != ESE_FZ_L4_CLASS_TCP &&
3521 				   rx_l4_class != ESE_FZ_L4_CLASS_UDP)))
3522 			netdev_WARN(efx->net_dev,
3523 				    "invalid class for RX_TCP_UDP_INNER_CHKSUM_ERR: event="
3524 				    EFX_QWORD_FMT "\n",
3525 				    EFX_QWORD_VAL(*event));
3526 		if (!efx->loopback_selftest)
3527 			channel->n_rx_inner_tcp_udp_chksum_err += n_packets;
3528 		return 0;
3529 	}
3530 
3531 	WARN_ON(!handled); /* No error bits were recognised */
3532 	return 0;
3533 }
3534 
3535 static int efx_ef10_handle_rx_event(struct efx_channel *channel,
3536 				    const efx_qword_t *event)
3537 {
3538 	unsigned int rx_bytes, next_ptr_lbits, rx_queue_label;
3539 	unsigned int rx_l3_class, rx_l4_class, rx_encap_hdr;
3540 	unsigned int n_descs, n_packets, i;
3541 	struct efx_nic *efx = channel->efx;
3542 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
3543 	struct efx_rx_queue *rx_queue;
3544 	efx_qword_t errors;
3545 	bool rx_cont;
3546 	u16 flags = 0;
3547 
3548 	if (unlikely(READ_ONCE(efx->reset_pending)))
3549 		return 0;
3550 
3551 	/* Basic packet information */
3552 	rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES);
3553 	next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS);
3554 	rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL);
3555 	rx_l3_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L3_CLASS);
3556 	rx_l4_class = EFX_QWORD_FIELD(*event, ESF_FZ_RX_L4_CLASS);
3557 	rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT);
3558 	rx_encap_hdr =
3559 		nic_data->datapath_caps &
3560 			(1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN) ?
3561 		EFX_QWORD_FIELD(*event, ESF_EZ_RX_ENCAP_HDR) :
3562 		ESE_EZ_ENCAP_HDR_NONE;
3563 
3564 	if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT))
3565 		netdev_WARN(efx->net_dev, "saw RX_DROP_EVENT: event="
3566 			    EFX_QWORD_FMT "\n",
3567 			    EFX_QWORD_VAL(*event));
3568 
3569 	rx_queue = efx_channel_get_rx_queue(channel);
3570 
3571 	if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue)))
3572 		efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label);
3573 
3574 	n_descs = ((next_ptr_lbits - rx_queue->removed_count) &
3575 		   ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
3576 
3577 	if (n_descs != rx_queue->scatter_n + 1) {
3578 		struct efx_ef10_nic_data *nic_data = efx->nic_data;
3579 
3580 		/* detect rx abort */
3581 		if (unlikely(n_descs == rx_queue->scatter_n)) {
3582 			if (rx_queue->scatter_n == 0 || rx_bytes != 0)
3583 				netdev_WARN(efx->net_dev,
3584 					    "invalid RX abort: scatter_n=%u event="
3585 					    EFX_QWORD_FMT "\n",
3586 					    rx_queue->scatter_n,
3587 					    EFX_QWORD_VAL(*event));
3588 			efx_ef10_handle_rx_abort(rx_queue);
3589 			return 0;
3590 		}
3591 
3592 		/* Check that RX completion merging is valid, i.e.
3593 		 * the current firmware supports it and this is a
3594 		 * non-scattered packet.
3595 		 */
3596 		if (!(nic_data->datapath_caps &
3597 		      (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN)) ||
3598 		    rx_queue->scatter_n != 0 || rx_cont) {
3599 			efx_ef10_handle_rx_bad_lbits(
3600 				rx_queue, next_ptr_lbits,
3601 				(rx_queue->removed_count +
3602 				 rx_queue->scatter_n + 1) &
3603 				((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
3604 			return 0;
3605 		}
3606 
3607 		/* Merged completion for multiple non-scattered packets */
3608 		rx_queue->scatter_n = 1;
3609 		rx_queue->scatter_len = 0;
3610 		n_packets = n_descs;
3611 		++channel->n_rx_merge_events;
3612 		channel->n_rx_merge_packets += n_packets;
3613 		flags |= EFX_RX_PKT_PREFIX_LEN;
3614 	} else {
3615 		++rx_queue->scatter_n;
3616 		rx_queue->scatter_len += rx_bytes;
3617 		if (rx_cont)
3618 			return 0;
3619 		n_packets = 1;
3620 	}
3621 
3622 	EFX_POPULATE_QWORD_5(errors, ESF_DZ_RX_ECRC_ERR, 1,
3623 				     ESF_DZ_RX_IPCKSUM_ERR, 1,
3624 				     ESF_DZ_RX_TCPUDP_CKSUM_ERR, 1,
3625 				     ESF_EZ_RX_IP_INNER_CHKSUM_ERR, 1,
3626 				     ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR, 1);
3627 	EFX_AND_QWORD(errors, *event, errors);
3628 	if (unlikely(!EFX_QWORD_IS_ZERO(errors))) {
3629 		flags |= efx_ef10_handle_rx_event_errors(channel, n_packets,
3630 							 rx_encap_hdr,
3631 							 rx_l3_class, rx_l4_class,
3632 							 event);
3633 	} else {
3634 		bool tcpudp = rx_l4_class == ESE_FZ_L4_CLASS_TCP ||
3635 			      rx_l4_class == ESE_FZ_L4_CLASS_UDP;
3636 
3637 		switch (rx_encap_hdr) {
3638 		case ESE_EZ_ENCAP_HDR_VXLAN: /* VxLAN or GENEVE */
3639 			flags |= EFX_RX_PKT_CSUMMED; /* outer UDP csum */
3640 			if (tcpudp)
3641 				flags |= EFX_RX_PKT_CSUM_LEVEL; /* inner L4 */
3642 			break;
3643 		case ESE_EZ_ENCAP_HDR_GRE:
3644 		case ESE_EZ_ENCAP_HDR_NONE:
3645 			if (tcpudp)
3646 				flags |= EFX_RX_PKT_CSUMMED;
3647 			break;
3648 		default:
3649 			netdev_WARN(efx->net_dev,
3650 				    "unknown encapsulation type: event="
3651 				    EFX_QWORD_FMT "\n",
3652 				    EFX_QWORD_VAL(*event));
3653 		}
3654 	}
3655 
3656 	if (rx_l4_class == ESE_FZ_L4_CLASS_TCP)
3657 		flags |= EFX_RX_PKT_TCP;
3658 
3659 	channel->irq_mod_score += 2 * n_packets;
3660 
3661 	/* Handle received packet(s) */
3662 	for (i = 0; i < n_packets; i++) {
3663 		efx_rx_packet(rx_queue,
3664 			      rx_queue->removed_count & rx_queue->ptr_mask,
3665 			      rx_queue->scatter_n, rx_queue->scatter_len,
3666 			      flags);
3667 		rx_queue->removed_count += rx_queue->scatter_n;
3668 	}
3669 
3670 	rx_queue->scatter_n = 0;
3671 	rx_queue->scatter_len = 0;
3672 
3673 	return n_packets;
3674 }
3675 
3676 static u32 efx_ef10_extract_event_ts(efx_qword_t *event)
3677 {
3678 	u32 tstamp;
3679 
3680 	tstamp = EFX_QWORD_FIELD(*event, TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI);
3681 	tstamp <<= 16;
3682 	tstamp |= EFX_QWORD_FIELD(*event, TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO);
3683 
3684 	return tstamp;
3685 }
3686 
3687 static void
3688 efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
3689 {
3690 	struct efx_nic *efx = channel->efx;
3691 	struct efx_tx_queue *tx_queue;
3692 	unsigned int tx_ev_desc_ptr;
3693 	unsigned int tx_ev_q_label;
3694 	unsigned int tx_ev_type;
3695 	u64 ts_part;
3696 
3697 	if (unlikely(READ_ONCE(efx->reset_pending)))
3698 		return;
3699 
3700 	if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT)))
3701 		return;
3702 
3703 	/* Get the transmit queue */
3704 	tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
3705 	tx_queue = efx_channel_get_tx_queue(channel,
3706 					    tx_ev_q_label % EFX_TXQ_TYPES);
3707 
3708 	if (!tx_queue->timestamping) {
3709 		/* Transmit completion */
3710 		tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX);
3711 		efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask);
3712 		return;
3713 	}
3714 
3715 	/* Transmit timestamps are only available for 8XXX series. They result
3716 	 * in three events per packet. These occur in order, and are:
3717 	 *  - the normal completion event
3718 	 *  - the low part of the timestamp
3719 	 *  - the high part of the timestamp
3720 	 *
3721 	 * Each part of the timestamp is itself split across two 16 bit
3722 	 * fields in the event.
3723 	 */
3724 	tx_ev_type = EFX_QWORD_FIELD(*event, ESF_EZ_TX_SOFT1);
3725 
3726 	switch (tx_ev_type) {
3727 	case TX_TIMESTAMP_EVENT_TX_EV_COMPLETION:
3728 		/* In case of Queue flush or FLR, we might have received
3729 		 * the previous TX completion event but not the Timestamp
3730 		 * events.
3731 		 */
3732 		if (tx_queue->completed_desc_ptr != tx_queue->ptr_mask)
3733 			efx_xmit_done(tx_queue, tx_queue->completed_desc_ptr);
3734 
3735 		tx_ev_desc_ptr = EFX_QWORD_FIELD(*event,
3736 						 ESF_DZ_TX_DESCR_INDX);
3737 		tx_queue->completed_desc_ptr =
3738 					tx_ev_desc_ptr & tx_queue->ptr_mask;
3739 		break;
3740 
3741 	case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO:
3742 		ts_part = efx_ef10_extract_event_ts(event);
3743 		tx_queue->completed_timestamp_minor = ts_part;
3744 		break;
3745 
3746 	case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI:
3747 		ts_part = efx_ef10_extract_event_ts(event);
3748 		tx_queue->completed_timestamp_major = ts_part;
3749 
3750 		efx_xmit_done(tx_queue, tx_queue->completed_desc_ptr);
3751 		tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
3752 		break;
3753 
3754 	default:
3755 		netif_err(efx, hw, efx->net_dev,
3756 			  "channel %d unknown tx event type %d (data "
3757 			  EFX_QWORD_FMT ")\n",
3758 			  channel->channel, tx_ev_type,
3759 			  EFX_QWORD_VAL(*event));
3760 		break;
3761 	}
3762 }
3763 
3764 static void
3765 efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
3766 {
3767 	struct efx_nic *efx = channel->efx;
3768 	int subcode;
3769 
3770 	subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE);
3771 
3772 	switch (subcode) {
3773 	case ESE_DZ_DRV_TIMER_EV:
3774 	case ESE_DZ_DRV_WAKE_UP_EV:
3775 		break;
3776 	case ESE_DZ_DRV_START_UP_EV:
3777 		/* event queue init complete. ok. */
3778 		break;
3779 	default:
3780 		netif_err(efx, hw, efx->net_dev,
3781 			  "channel %d unknown driver event type %d"
3782 			  " (data " EFX_QWORD_FMT ")\n",
3783 			  channel->channel, subcode,
3784 			  EFX_QWORD_VAL(*event));
3785 
3786 	}
3787 }
3788 
3789 static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel,
3790 						   efx_qword_t *event)
3791 {
3792 	struct efx_nic *efx = channel->efx;
3793 	u32 subcode;
3794 
3795 	subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0);
3796 
3797 	switch (subcode) {
3798 	case EFX_EF10_TEST:
3799 		channel->event_test_cpu = raw_smp_processor_id();
3800 		break;
3801 	case EFX_EF10_REFILL:
3802 		/* The queue must be empty, so we won't receive any rx
3803 		 * events, so efx_process_channel() won't refill the
3804 		 * queue. Refill it here
3805 		 */
3806 		efx_fast_push_rx_descriptors(&channel->rx_queue, true);
3807 		break;
3808 	default:
3809 		netif_err(efx, hw, efx->net_dev,
3810 			  "channel %d unknown driver event type %u"
3811 			  " (data " EFX_QWORD_FMT ")\n",
3812 			  channel->channel, (unsigned) subcode,
3813 			  EFX_QWORD_VAL(*event));
3814 	}
3815 }
3816 
3817 static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
3818 {
3819 	struct efx_nic *efx = channel->efx;
3820 	efx_qword_t event, *p_event;
3821 	unsigned int read_ptr;
3822 	int ev_code;
3823 	int spent = 0;
3824 
3825 	if (quota <= 0)
3826 		return spent;
3827 
3828 	read_ptr = channel->eventq_read_ptr;
3829 
3830 	for (;;) {
3831 		p_event = efx_event(channel, read_ptr);
3832 		event = *p_event;
3833 
3834 		if (!efx_event_present(&event))
3835 			break;
3836 
3837 		EFX_SET_QWORD(*p_event);
3838 
3839 		++read_ptr;
3840 
3841 		ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE);
3842 
3843 		netif_vdbg(efx, drv, efx->net_dev,
3844 			   "processing event on %d " EFX_QWORD_FMT "\n",
3845 			   channel->channel, EFX_QWORD_VAL(event));
3846 
3847 		switch (ev_code) {
3848 		case ESE_DZ_EV_CODE_MCDI_EV:
3849 			efx_mcdi_process_event(channel, &event);
3850 			break;
3851 		case ESE_DZ_EV_CODE_RX_EV:
3852 			spent += efx_ef10_handle_rx_event(channel, &event);
3853 			if (spent >= quota) {
3854 				/* XXX can we split a merged event to
3855 				 * avoid going over-quota?
3856 				 */
3857 				spent = quota;
3858 				goto out;
3859 			}
3860 			break;
3861 		case ESE_DZ_EV_CODE_TX_EV:
3862 			efx_ef10_handle_tx_event(channel, &event);
3863 			break;
3864 		case ESE_DZ_EV_CODE_DRIVER_EV:
3865 			efx_ef10_handle_driver_event(channel, &event);
3866 			if (++spent == quota)
3867 				goto out;
3868 			break;
3869 		case EFX_EF10_DRVGEN_EV:
3870 			efx_ef10_handle_driver_generated_event(channel, &event);
3871 			break;
3872 		default:
3873 			netif_err(efx, hw, efx->net_dev,
3874 				  "channel %d unknown event type %d"
3875 				  " (data " EFX_QWORD_FMT ")\n",
3876 				  channel->channel, ev_code,
3877 				  EFX_QWORD_VAL(event));
3878 		}
3879 	}
3880 
3881 out:
3882 	channel->eventq_read_ptr = read_ptr;
3883 	return spent;
3884 }
3885 
3886 static void efx_ef10_ev_read_ack(struct efx_channel *channel)
3887 {
3888 	struct efx_nic *efx = channel->efx;
3889 	efx_dword_t rptr;
3890 
3891 	if (EFX_EF10_WORKAROUND_35388(efx)) {
3892 		BUILD_BUG_ON(EFX_MIN_EVQ_SIZE <
3893 			     (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
3894 		BUILD_BUG_ON(EFX_MAX_EVQ_SIZE >
3895 			     (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
3896 
3897 		EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
3898 				     EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
3899 				     ERF_DD_EVQ_IND_RPTR,
3900 				     (channel->eventq_read_ptr &
3901 				      channel->eventq_mask) >>
3902 				     ERF_DD_EVQ_IND_RPTR_WIDTH);
3903 		efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
3904 				channel->channel);
3905 		EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
3906 				     EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
3907 				     ERF_DD_EVQ_IND_RPTR,
3908 				     channel->eventq_read_ptr &
3909 				     ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
3910 		efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
3911 				channel->channel);
3912 	} else {
3913 		EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR,
3914 				     channel->eventq_read_ptr &
3915 				     channel->eventq_mask);
3916 		efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel);
3917 	}
3918 }
3919 
3920 static void efx_ef10_ev_test_generate(struct efx_channel *channel)
3921 {
3922 	MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
3923 	struct efx_nic *efx = channel->efx;
3924 	efx_qword_t event;
3925 	int rc;
3926 
3927 	EFX_POPULATE_QWORD_2(event,
3928 			     ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
3929 			     ESF_DZ_EV_DATA, EFX_EF10_TEST);
3930 
3931 	MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
3932 
3933 	/* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
3934 	 * already swapped the data to little-endian order.
3935 	 */
3936 	memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
3937 	       sizeof(efx_qword_t));
3938 
3939 	rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf),
3940 			  NULL, 0, NULL);
3941 	if (rc != 0)
3942 		goto fail;
3943 
3944 	return;
3945 
3946 fail:
3947 	WARN_ON(true);
3948 	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
3949 }
3950 
3951 void efx_ef10_handle_drain_event(struct efx_nic *efx)
3952 {
3953 	if (atomic_dec_and_test(&efx->active_queues))
3954 		wake_up(&efx->flush_wq);
3955 
3956 	WARN_ON(atomic_read(&efx->active_queues) < 0);
3957 }
3958 
3959 static int efx_ef10_fini_dmaq(struct efx_nic *efx)
3960 {
3961 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
3962 	struct efx_channel *channel;
3963 	struct efx_tx_queue *tx_queue;
3964 	struct efx_rx_queue *rx_queue;
3965 	int pending;
3966 
3967 	/* If the MC has just rebooted, the TX/RX queues will have already been
3968 	 * torn down, but efx->active_queues needs to be set to zero.
3969 	 */
3970 	if (nic_data->must_realloc_vis) {
3971 		atomic_set(&efx->active_queues, 0);
3972 		return 0;
3973 	}
3974 
3975 	/* Do not attempt to write to the NIC during EEH recovery */
3976 	if (efx->state != STATE_RECOVERY) {
3977 		efx_for_each_channel(channel, efx) {
3978 			efx_for_each_channel_rx_queue(rx_queue, channel)
3979 				efx_ef10_rx_fini(rx_queue);
3980 			efx_for_each_channel_tx_queue(tx_queue, channel)
3981 				efx_ef10_tx_fini(tx_queue);
3982 		}
3983 
3984 		wait_event_timeout(efx->flush_wq,
3985 				   atomic_read(&efx->active_queues) == 0,
3986 				   msecs_to_jiffies(EFX_MAX_FLUSH_TIME));
3987 		pending = atomic_read(&efx->active_queues);
3988 		if (pending) {
3989 			netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n",
3990 				  pending);
3991 			return -ETIMEDOUT;
3992 		}
3993 	}
3994 
3995 	return 0;
3996 }
3997 
3998 static void efx_ef10_prepare_flr(struct efx_nic *efx)
3999 {
4000 	atomic_set(&efx->active_queues, 0);
4001 }
4002 
4003 /* Decide whether a filter should be exclusive or else should allow
4004  * delivery to additional recipients.  Currently we decide that
4005  * filters for specific local unicast MAC and IP addresses are
4006  * exclusive.
4007  */
4008 static bool efx_ef10_filter_is_exclusive(const struct efx_filter_spec *spec)
4009 {
4010 	if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC &&
4011 	    !is_multicast_ether_addr(spec->loc_mac))
4012 		return true;
4013 
4014 	if ((spec->match_flags &
4015 	     (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
4016 	    (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
4017 		if (spec->ether_type == htons(ETH_P_IP) &&
4018 		    !ipv4_is_multicast(spec->loc_host[0]))
4019 			return true;
4020 		if (spec->ether_type == htons(ETH_P_IPV6) &&
4021 		    ((const u8 *)spec->loc_host)[0] != 0xff)
4022 			return true;
4023 	}
4024 
4025 	return false;
4026 }
4027 
4028 static struct efx_filter_spec *
4029 efx_ef10_filter_entry_spec(const struct efx_ef10_filter_table *table,
4030 			   unsigned int filter_idx)
4031 {
4032 	return (struct efx_filter_spec *)(table->entry[filter_idx].spec &
4033 					  ~EFX_EF10_FILTER_FLAGS);
4034 }
4035 
4036 static unsigned int
4037 efx_ef10_filter_entry_flags(const struct efx_ef10_filter_table *table,
4038 			   unsigned int filter_idx)
4039 {
4040 	return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS;
4041 }
4042 
4043 static void
4044 efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table,
4045 			  unsigned int filter_idx,
4046 			  const struct efx_filter_spec *spec,
4047 			  unsigned int flags)
4048 {
4049 	table->entry[filter_idx].spec =	(unsigned long)spec | flags;
4050 }
4051 
4052 static void
4053 efx_ef10_filter_push_prep_set_match_fields(struct efx_nic *efx,
4054 					   const struct efx_filter_spec *spec,
4055 					   efx_dword_t *inbuf)
4056 {
4057 	enum efx_encap_type encap_type = efx_filter_get_encap_type(spec);
4058 	u32 match_fields = 0, uc_match, mc_match;
4059 
4060 	MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
4061 		       efx_ef10_filter_is_exclusive(spec) ?
4062 		       MC_CMD_FILTER_OP_IN_OP_INSERT :
4063 		       MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE);
4064 
4065 	/* Convert match flags and values.  Unlike almost
4066 	 * everything else in MCDI, these fields are in
4067 	 * network byte order.
4068 	 */
4069 #define COPY_VALUE(value, mcdi_field)					     \
4070 	do {							     \
4071 		match_fields |=					     \
4072 			1 << MC_CMD_FILTER_OP_IN_MATCH_ ##	     \
4073 			mcdi_field ## _LBN;			     \
4074 		BUILD_BUG_ON(					     \
4075 			MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \
4076 			sizeof(value));				     \
4077 		memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ##	mcdi_field), \
4078 		       &value, sizeof(value));			     \
4079 	} while (0)
4080 #define COPY_FIELD(gen_flag, gen_field, mcdi_field)			     \
4081 	if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) {     \
4082 		COPY_VALUE(spec->gen_field, mcdi_field);	     \
4083 	}
4084 	/* Handle encap filters first.  They will always be mismatch
4085 	 * (unknown UC or MC) filters
4086 	 */
4087 	if (encap_type) {
4088 		/* ether_type and outer_ip_proto need to be variables
4089 		 * because COPY_VALUE wants to memcpy them
4090 		 */
4091 		__be16 ether_type =
4092 			htons(encap_type & EFX_ENCAP_FLAG_IPV6 ?
4093 			      ETH_P_IPV6 : ETH_P_IP);
4094 		u8 vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE;
4095 		u8 outer_ip_proto;
4096 
4097 		switch (encap_type & EFX_ENCAP_TYPES_MASK) {
4098 		case EFX_ENCAP_TYPE_VXLAN:
4099 			vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN;
4100 			/* fallthrough */
4101 		case EFX_ENCAP_TYPE_GENEVE:
4102 			COPY_VALUE(ether_type, ETHER_TYPE);
4103 			outer_ip_proto = IPPROTO_UDP;
4104 			COPY_VALUE(outer_ip_proto, IP_PROTO);
4105 			/* We always need to set the type field, even
4106 			 * though we're not matching on the TNI.
4107 			 */
4108 			MCDI_POPULATE_DWORD_1(inbuf,
4109 				FILTER_OP_EXT_IN_VNI_OR_VSID,
4110 				FILTER_OP_EXT_IN_VNI_TYPE,
4111 				vni_type);
4112 			break;
4113 		case EFX_ENCAP_TYPE_NVGRE:
4114 			COPY_VALUE(ether_type, ETHER_TYPE);
4115 			outer_ip_proto = IPPROTO_GRE;
4116 			COPY_VALUE(outer_ip_proto, IP_PROTO);
4117 			break;
4118 		default:
4119 			WARN_ON(1);
4120 		}
4121 
4122 		uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN;
4123 		mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN;
4124 	} else {
4125 		uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
4126 		mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN;
4127 	}
4128 
4129 	if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG)
4130 		match_fields |=
4131 			is_multicast_ether_addr(spec->loc_mac) ?
4132 			1 << mc_match :
4133 			1 << uc_match;
4134 	COPY_FIELD(REM_HOST, rem_host, SRC_IP);
4135 	COPY_FIELD(LOC_HOST, loc_host, DST_IP);
4136 	COPY_FIELD(REM_MAC, rem_mac, SRC_MAC);
4137 	COPY_FIELD(REM_PORT, rem_port, SRC_PORT);
4138 	COPY_FIELD(LOC_MAC, loc_mac, DST_MAC);
4139 	COPY_FIELD(LOC_PORT, loc_port, DST_PORT);
4140 	COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE);
4141 	COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN);
4142 	COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN);
4143 	COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO);
4144 #undef COPY_FIELD
4145 #undef COPY_VALUE
4146 	MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS,
4147 		       match_fields);
4148 }
4149 
4150 static void efx_ef10_filter_push_prep(struct efx_nic *efx,
4151 				      const struct efx_filter_spec *spec,
4152 				      efx_dword_t *inbuf, u64 handle,
4153 				      struct efx_rss_context *ctx,
4154 				      bool replacing)
4155 {
4156 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
4157 	u32 flags = spec->flags;
4158 
4159 	memset(inbuf, 0, MC_CMD_FILTER_OP_EXT_IN_LEN);
4160 
4161 	/* If RSS filter, caller better have given us an RSS context */
4162 	if (flags & EFX_FILTER_FLAG_RX_RSS) {
4163 		/* We don't have the ability to return an error, so we'll just
4164 		 * log a warning and disable RSS for the filter.
4165 		 */
4166 		if (WARN_ON_ONCE(!ctx))
4167 			flags &= ~EFX_FILTER_FLAG_RX_RSS;
4168 		else if (WARN_ON_ONCE(ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID))
4169 			flags &= ~EFX_FILTER_FLAG_RX_RSS;
4170 	}
4171 
4172 	if (replacing) {
4173 		MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
4174 			       MC_CMD_FILTER_OP_IN_OP_REPLACE);
4175 		MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle);
4176 	} else {
4177 		efx_ef10_filter_push_prep_set_match_fields(efx, spec, inbuf);
4178 	}
4179 
4180 	MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id);
4181 	MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
4182 		       spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
4183 		       MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
4184 		       MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
4185 	MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DOMAIN, 0);
4186 	MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
4187 		       MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
4188 	MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE,
4189 		       spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
4190 		       0 : spec->dmaq_id);
4191 	MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
4192 		       (flags & EFX_FILTER_FLAG_RX_RSS) ?
4193 		       MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
4194 		       MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
4195 	if (flags & EFX_FILTER_FLAG_RX_RSS)
4196 		MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT, ctx->context_id);
4197 }
4198 
4199 static int efx_ef10_filter_push(struct efx_nic *efx,
4200 				const struct efx_filter_spec *spec, u64 *handle,
4201 				struct efx_rss_context *ctx, bool replacing)
4202 {
4203 	MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
4204 	MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_EXT_OUT_LEN);
4205 	size_t outlen;
4206 	int rc;
4207 
4208 	efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, ctx, replacing);
4209 	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
4210 				outbuf, sizeof(outbuf), &outlen);
4211 	if (rc && spec->priority != EFX_FILTER_PRI_HINT)
4212 		efx_mcdi_display_error(efx, MC_CMD_FILTER_OP, sizeof(inbuf),
4213 				       outbuf, outlen, rc);
4214 	if (rc == 0)
4215 		*handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
4216 	if (rc == -ENOSPC)
4217 		rc = -EBUSY; /* to match efx_farch_filter_insert() */
4218 	return rc;
4219 }
4220 
4221 static u32 efx_ef10_filter_mcdi_flags_from_spec(const struct efx_filter_spec *spec)
4222 {
4223 	enum efx_encap_type encap_type = efx_filter_get_encap_type(spec);
4224 	unsigned int match_flags = spec->match_flags;
4225 	unsigned int uc_match, mc_match;
4226 	u32 mcdi_flags = 0;
4227 
4228 #define MAP_FILTER_TO_MCDI_FLAG(gen_flag, mcdi_field, encap) {		\
4229 		unsigned int  old_match_flags = match_flags;		\
4230 		match_flags &= ~EFX_FILTER_MATCH_ ## gen_flag;		\
4231 		if (match_flags != old_match_flags)			\
4232 			mcdi_flags |=					\
4233 				(1 << ((encap) ?			\
4234 				       MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ ## \
4235 				       mcdi_field ## _LBN :		\
4236 				       MC_CMD_FILTER_OP_EXT_IN_MATCH_ ##\
4237 				       mcdi_field ## _LBN));		\
4238 	}
4239 	/* inner or outer based on encap type */
4240 	MAP_FILTER_TO_MCDI_FLAG(REM_HOST, SRC_IP, encap_type);
4241 	MAP_FILTER_TO_MCDI_FLAG(LOC_HOST, DST_IP, encap_type);
4242 	MAP_FILTER_TO_MCDI_FLAG(REM_MAC, SRC_MAC, encap_type);
4243 	MAP_FILTER_TO_MCDI_FLAG(REM_PORT, SRC_PORT, encap_type);
4244 	MAP_FILTER_TO_MCDI_FLAG(LOC_MAC, DST_MAC, encap_type);
4245 	MAP_FILTER_TO_MCDI_FLAG(LOC_PORT, DST_PORT, encap_type);
4246 	MAP_FILTER_TO_MCDI_FLAG(ETHER_TYPE, ETHER_TYPE, encap_type);
4247 	MAP_FILTER_TO_MCDI_FLAG(IP_PROTO, IP_PROTO, encap_type);
4248 	/* always outer */
4249 	MAP_FILTER_TO_MCDI_FLAG(INNER_VID, INNER_VLAN, false);
4250 	MAP_FILTER_TO_MCDI_FLAG(OUTER_VID, OUTER_VLAN, false);
4251 #undef MAP_FILTER_TO_MCDI_FLAG
4252 
4253 	/* special handling for encap type, and mismatch */
4254 	if (encap_type) {
4255 		match_flags &= ~EFX_FILTER_MATCH_ENCAP_TYPE;
4256 		mcdi_flags |=
4257 			(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN);
4258 		mcdi_flags |= (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN);
4259 
4260 		uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN;
4261 		mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN;
4262 	} else {
4263 		uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
4264 		mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN;
4265 	}
4266 
4267 	if (match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) {
4268 		match_flags &= ~EFX_FILTER_MATCH_LOC_MAC_IG;
4269 		mcdi_flags |=
4270 			is_multicast_ether_addr(spec->loc_mac) ?
4271 			1 << mc_match :
4272 			1 << uc_match;
4273 	}
4274 
4275 	/* Did we map them all? */
4276 	WARN_ON_ONCE(match_flags);
4277 
4278 	return mcdi_flags;
4279 }
4280 
4281 static int efx_ef10_filter_pri(struct efx_ef10_filter_table *table,
4282 			       const struct efx_filter_spec *spec)
4283 {
4284 	u32 mcdi_flags = efx_ef10_filter_mcdi_flags_from_spec(spec);
4285 	unsigned int match_pri;
4286 
4287 	for (match_pri = 0;
4288 	     match_pri < table->rx_match_count;
4289 	     match_pri++)
4290 		if (table->rx_match_mcdi_flags[match_pri] == mcdi_flags)
4291 			return match_pri;
4292 
4293 	return -EPROTONOSUPPORT;
4294 }
4295 
4296 static s32 efx_ef10_filter_insert_locked(struct efx_nic *efx,
4297 					 struct efx_filter_spec *spec,
4298 					 bool replace_equal)
4299 {
4300 	DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
4301 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
4302 	struct efx_ef10_filter_table *table;
4303 	struct efx_filter_spec *saved_spec;
4304 	struct efx_rss_context *ctx = NULL;
4305 	unsigned int match_pri, hash;
4306 	unsigned int priv_flags;
4307 	bool rss_locked = false;
4308 	bool replacing = false;
4309 	unsigned int depth, i;
4310 	int ins_index = -1;
4311 	DEFINE_WAIT(wait);
4312 	bool is_mc_recip;
4313 	s32 rc;
4314 
4315 	WARN_ON(!rwsem_is_locked(&efx->filter_sem));
4316 	table = efx->filter_state;
4317 	down_write(&table->lock);
4318 
4319 	/* For now, only support RX filters */
4320 	if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) !=
4321 	    EFX_FILTER_FLAG_RX) {
4322 		rc = -EINVAL;
4323 		goto out_unlock;
4324 	}
4325 
4326 	rc = efx_ef10_filter_pri(table, spec);
4327 	if (rc < 0)
4328 		goto out_unlock;
4329 	match_pri = rc;
4330 
4331 	hash = efx_filter_spec_hash(spec);
4332 	is_mc_recip = efx_filter_is_mc_recipient(spec);
4333 	if (is_mc_recip)
4334 		bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
4335 
4336 	if (spec->flags & EFX_FILTER_FLAG_RX_RSS) {
4337 		mutex_lock(&efx->rss_lock);
4338 		rss_locked = true;
4339 		if (spec->rss_context)
4340 			ctx = efx_find_rss_context_entry(efx, spec->rss_context);
4341 		else
4342 			ctx = &efx->rss_context;
4343 		if (!ctx) {
4344 			rc = -ENOENT;
4345 			goto out_unlock;
4346 		}
4347 		if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID) {
4348 			rc = -EOPNOTSUPP;
4349 			goto out_unlock;
4350 		}
4351 	}
4352 
4353 	/* Find any existing filters with the same match tuple or
4354 	 * else a free slot to insert at.
4355 	 */
4356 	for (depth = 1; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
4357 		i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
4358 		saved_spec = efx_ef10_filter_entry_spec(table, i);
4359 
4360 		if (!saved_spec) {
4361 			if (ins_index < 0)
4362 				ins_index = i;
4363 		} else if (efx_filter_spec_equal(spec, saved_spec)) {
4364 			if (spec->priority < saved_spec->priority &&
4365 			    spec->priority != EFX_FILTER_PRI_AUTO) {
4366 				rc = -EPERM;
4367 				goto out_unlock;
4368 			}
4369 			if (!is_mc_recip) {
4370 				/* This is the only one */
4371 				if (spec->priority ==
4372 				    saved_spec->priority &&
4373 				    !replace_equal) {
4374 					rc = -EEXIST;
4375 					goto out_unlock;
4376 				}
4377 				ins_index = i;
4378 				break;
4379 			} else if (spec->priority >
4380 				   saved_spec->priority ||
4381 				   (spec->priority ==
4382 				    saved_spec->priority &&
4383 				    replace_equal)) {
4384 				if (ins_index < 0)
4385 					ins_index = i;
4386 				else
4387 					__set_bit(depth, mc_rem_map);
4388 			}
4389 		}
4390 	}
4391 
4392 	/* Once we reach the maximum search depth, use the first suitable
4393 	 * slot, or return -EBUSY if there was none
4394 	 */
4395 	if (ins_index < 0) {
4396 		rc = -EBUSY;
4397 		goto out_unlock;
4398 	}
4399 
4400 	/* Create a software table entry if necessary. */
4401 	saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
4402 	if (saved_spec) {
4403 		if (spec->priority == EFX_FILTER_PRI_AUTO &&
4404 		    saved_spec->priority >= EFX_FILTER_PRI_AUTO) {
4405 			/* Just make sure it won't be removed */
4406 			if (saved_spec->priority > EFX_FILTER_PRI_AUTO)
4407 				saved_spec->flags |= EFX_FILTER_FLAG_RX_OVER_AUTO;
4408 			table->entry[ins_index].spec &=
4409 				~EFX_EF10_FILTER_FLAG_AUTO_OLD;
4410 			rc = ins_index;
4411 			goto out_unlock;
4412 		}
4413 		replacing = true;
4414 		priv_flags = efx_ef10_filter_entry_flags(table, ins_index);
4415 	} else {
4416 		saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
4417 		if (!saved_spec) {
4418 			rc = -ENOMEM;
4419 			goto out_unlock;
4420 		}
4421 		*saved_spec = *spec;
4422 		priv_flags = 0;
4423 	}
4424 	efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags);
4425 
4426 	/* Actually insert the filter on the HW */
4427 	rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle,
4428 				  ctx, replacing);
4429 
4430 	if (rc == -EINVAL && nic_data->must_realloc_vis)
4431 		/* The MC rebooted under us, causing it to reject our filter
4432 		 * insertion as pointing to an invalid VI (spec->dmaq_id).
4433 		 */
4434 		rc = -EAGAIN;
4435 
4436 	/* Finalise the software table entry */
4437 	if (rc == 0) {
4438 		if (replacing) {
4439 			/* Update the fields that may differ */
4440 			if (saved_spec->priority == EFX_FILTER_PRI_AUTO)
4441 				saved_spec->flags |=
4442 					EFX_FILTER_FLAG_RX_OVER_AUTO;
4443 			saved_spec->priority = spec->priority;
4444 			saved_spec->flags &= EFX_FILTER_FLAG_RX_OVER_AUTO;
4445 			saved_spec->flags |= spec->flags;
4446 			saved_spec->rss_context = spec->rss_context;
4447 			saved_spec->dmaq_id = spec->dmaq_id;
4448 		}
4449 	} else if (!replacing) {
4450 		kfree(saved_spec);
4451 		saved_spec = NULL;
4452 	} else {
4453 		/* We failed to replace, so the old filter is still present.
4454 		 * Roll back the software table to reflect this.  In fact the
4455 		 * efx_ef10_filter_set_entry() call below will do the right
4456 		 * thing, so nothing extra is needed here.
4457 		 */
4458 	}
4459 	efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags);
4460 
4461 	/* Remove and finalise entries for lower-priority multicast
4462 	 * recipients
4463 	 */
4464 	if (is_mc_recip) {
4465 		MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
4466 		unsigned int depth, i;
4467 
4468 		memset(inbuf, 0, sizeof(inbuf));
4469 
4470 		for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
4471 			if (!test_bit(depth, mc_rem_map))
4472 				continue;
4473 
4474 			i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
4475 			saved_spec = efx_ef10_filter_entry_spec(table, i);
4476 			priv_flags = efx_ef10_filter_entry_flags(table, i);
4477 
4478 			if (rc == 0) {
4479 				MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
4480 					       MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
4481 				MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
4482 					       table->entry[i].handle);
4483 				rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
4484 						  inbuf, sizeof(inbuf),
4485 						  NULL, 0, NULL);
4486 			}
4487 
4488 			if (rc == 0) {
4489 				kfree(saved_spec);
4490 				saved_spec = NULL;
4491 				priv_flags = 0;
4492 			}
4493 			efx_ef10_filter_set_entry(table, i, saved_spec,
4494 						  priv_flags);
4495 		}
4496 	}
4497 
4498 	/* If successful, return the inserted filter ID */
4499 	if (rc == 0)
4500 		rc = efx_ef10_make_filter_id(match_pri, ins_index);
4501 
4502 out_unlock:
4503 	if (rss_locked)
4504 		mutex_unlock(&efx->rss_lock);
4505 	up_write(&table->lock);
4506 	return rc;
4507 }
4508 
4509 static s32 efx_ef10_filter_insert(struct efx_nic *efx,
4510 				  struct efx_filter_spec *spec,
4511 				  bool replace_equal)
4512 {
4513 	s32 ret;
4514 
4515 	down_read(&efx->filter_sem);
4516 	ret = efx_ef10_filter_insert_locked(efx, spec, replace_equal);
4517 	up_read(&efx->filter_sem);
4518 
4519 	return ret;
4520 }
4521 
4522 static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
4523 {
4524 	/* no need to do anything here on EF10 */
4525 }
4526 
4527 /* Remove a filter.
4528  * If !by_index, remove by ID
4529  * If by_index, remove by index
4530  * Filter ID may come from userland and must be range-checked.
4531  * Caller must hold efx->filter_sem for read, and efx->filter_state->lock
4532  * for write.
4533  */
4534 static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
4535 					   unsigned int priority_mask,
4536 					   u32 filter_id, bool by_index)
4537 {
4538 	unsigned int filter_idx = efx_ef10_filter_get_unsafe_id(filter_id);
4539 	struct efx_ef10_filter_table *table = efx->filter_state;
4540 	MCDI_DECLARE_BUF(inbuf,
4541 			 MC_CMD_FILTER_OP_IN_HANDLE_OFST +
4542 			 MC_CMD_FILTER_OP_IN_HANDLE_LEN);
4543 	struct efx_filter_spec *spec;
4544 	DEFINE_WAIT(wait);
4545 	int rc;
4546 
4547 	spec = efx_ef10_filter_entry_spec(table, filter_idx);
4548 	if (!spec ||
4549 	    (!by_index &&
4550 	     efx_ef10_filter_pri(table, spec) !=
4551 	     efx_ef10_filter_get_unsafe_pri(filter_id)))
4552 		return -ENOENT;
4553 
4554 	if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO &&
4555 	    priority_mask == (1U << EFX_FILTER_PRI_AUTO)) {
4556 		/* Just remove flags */
4557 		spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO;
4558 		table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
4559 		return 0;
4560 	}
4561 
4562 	if (!(priority_mask & (1U << spec->priority)))
4563 		return -ENOENT;
4564 
4565 	if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
4566 		/* Reset to an automatic filter */
4567 
4568 		struct efx_filter_spec new_spec = *spec;
4569 
4570 		new_spec.priority = EFX_FILTER_PRI_AUTO;
4571 		new_spec.flags = (EFX_FILTER_FLAG_RX |
4572 				  (efx_rss_active(&efx->rss_context) ?
4573 				   EFX_FILTER_FLAG_RX_RSS : 0));
4574 		new_spec.dmaq_id = 0;
4575 		new_spec.rss_context = 0;
4576 		rc = efx_ef10_filter_push(efx, &new_spec,
4577 					  &table->entry[filter_idx].handle,
4578 					  &efx->rss_context,
4579 					  true);
4580 
4581 		if (rc == 0)
4582 			*spec = new_spec;
4583 	} else {
4584 		/* Really remove the filter */
4585 
4586 		MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
4587 			       efx_ef10_filter_is_exclusive(spec) ?
4588 			       MC_CMD_FILTER_OP_IN_OP_REMOVE :
4589 			       MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
4590 		MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
4591 			       table->entry[filter_idx].handle);
4592 		rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP,
4593 					inbuf, sizeof(inbuf), NULL, 0, NULL);
4594 
4595 		if ((rc == 0) || (rc == -ENOENT)) {
4596 			/* Filter removed OK or didn't actually exist */
4597 			kfree(spec);
4598 			efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
4599 		} else {
4600 			efx_mcdi_display_error(efx, MC_CMD_FILTER_OP,
4601 					       MC_CMD_FILTER_OP_EXT_IN_LEN,
4602 					       NULL, 0, rc);
4603 		}
4604 	}
4605 
4606 	return rc;
4607 }
4608 
4609 static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
4610 				       enum efx_filter_priority priority,
4611 				       u32 filter_id)
4612 {
4613 	struct efx_ef10_filter_table *table;
4614 	int rc;
4615 
4616 	down_read(&efx->filter_sem);
4617 	table = efx->filter_state;
4618 	down_write(&table->lock);
4619 	rc = efx_ef10_filter_remove_internal(efx, 1U << priority, filter_id,
4620 					     false);
4621 	up_write(&table->lock);
4622 	up_read(&efx->filter_sem);
4623 	return rc;
4624 }
4625 
4626 /* Caller must hold efx->filter_sem for read */
4627 static void efx_ef10_filter_remove_unsafe(struct efx_nic *efx,
4628 					  enum efx_filter_priority priority,
4629 					  u32 filter_id)
4630 {
4631 	struct efx_ef10_filter_table *table = efx->filter_state;
4632 
4633 	if (filter_id == EFX_EF10_FILTER_ID_INVALID)
4634 		return;
4635 
4636 	down_write(&table->lock);
4637 	efx_ef10_filter_remove_internal(efx, 1U << priority, filter_id,
4638 					true);
4639 	up_write(&table->lock);
4640 }
4641 
4642 static int efx_ef10_filter_get_safe(struct efx_nic *efx,
4643 				    enum efx_filter_priority priority,
4644 				    u32 filter_id, struct efx_filter_spec *spec)
4645 {
4646 	unsigned int filter_idx = efx_ef10_filter_get_unsafe_id(filter_id);
4647 	const struct efx_filter_spec *saved_spec;
4648 	struct efx_ef10_filter_table *table;
4649 	int rc;
4650 
4651 	down_read(&efx->filter_sem);
4652 	table = efx->filter_state;
4653 	down_read(&table->lock);
4654 	saved_spec = efx_ef10_filter_entry_spec(table, filter_idx);
4655 	if (saved_spec && saved_spec->priority == priority &&
4656 	    efx_ef10_filter_pri(table, saved_spec) ==
4657 	    efx_ef10_filter_get_unsafe_pri(filter_id)) {
4658 		*spec = *saved_spec;
4659 		rc = 0;
4660 	} else {
4661 		rc = -ENOENT;
4662 	}
4663 	up_read(&table->lock);
4664 	up_read(&efx->filter_sem);
4665 	return rc;
4666 }
4667 
4668 static int efx_ef10_filter_clear_rx(struct efx_nic *efx,
4669 				    enum efx_filter_priority priority)
4670 {
4671 	struct efx_ef10_filter_table *table;
4672 	unsigned int priority_mask;
4673 	unsigned int i;
4674 	int rc;
4675 
4676 	priority_mask = (((1U << (priority + 1)) - 1) &
4677 			 ~(1U << EFX_FILTER_PRI_AUTO));
4678 
4679 	down_read(&efx->filter_sem);
4680 	table = efx->filter_state;
4681 	down_write(&table->lock);
4682 	for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
4683 		rc = efx_ef10_filter_remove_internal(efx, priority_mask,
4684 						     i, true);
4685 		if (rc && rc != -ENOENT)
4686 			break;
4687 		rc = 0;
4688 	}
4689 
4690 	up_write(&table->lock);
4691 	up_read(&efx->filter_sem);
4692 	return rc;
4693 }
4694 
4695 static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx,
4696 					 enum efx_filter_priority priority)
4697 {
4698 	struct efx_ef10_filter_table *table;
4699 	unsigned int filter_idx;
4700 	s32 count = 0;
4701 
4702 	down_read(&efx->filter_sem);
4703 	table = efx->filter_state;
4704 	down_read(&table->lock);
4705 	for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
4706 		if (table->entry[filter_idx].spec &&
4707 		    efx_ef10_filter_entry_spec(table, filter_idx)->priority ==
4708 		    priority)
4709 			++count;
4710 	}
4711 	up_read(&table->lock);
4712 	up_read(&efx->filter_sem);
4713 	return count;
4714 }
4715 
4716 static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx)
4717 {
4718 	struct efx_ef10_filter_table *table = efx->filter_state;
4719 
4720 	return table->rx_match_count * HUNT_FILTER_TBL_ROWS * 2;
4721 }
4722 
4723 static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
4724 				      enum efx_filter_priority priority,
4725 				      u32 *buf, u32 size)
4726 {
4727 	struct efx_ef10_filter_table *table;
4728 	struct efx_filter_spec *spec;
4729 	unsigned int filter_idx;
4730 	s32 count = 0;
4731 
4732 	down_read(&efx->filter_sem);
4733 	table = efx->filter_state;
4734 	down_read(&table->lock);
4735 
4736 	for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
4737 		spec = efx_ef10_filter_entry_spec(table, filter_idx);
4738 		if (spec && spec->priority == priority) {
4739 			if (count == size) {
4740 				count = -EMSGSIZE;
4741 				break;
4742 			}
4743 			buf[count++] =
4744 				efx_ef10_make_filter_id(
4745 					efx_ef10_filter_pri(table, spec),
4746 					filter_idx);
4747 		}
4748 	}
4749 	up_read(&table->lock);
4750 	up_read(&efx->filter_sem);
4751 	return count;
4752 }
4753 
4754 #ifdef CONFIG_RFS_ACCEL
4755 
4756 static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
4757 					   unsigned int filter_idx)
4758 {
4759 	struct efx_filter_spec *spec, saved_spec;
4760 	struct efx_ef10_filter_table *table;
4761 	struct efx_arfs_rule *rule = NULL;
4762 	bool ret = true, force = false;
4763 	u16 arfs_id;
4764 
4765 	down_read(&efx->filter_sem);
4766 	table = efx->filter_state;
4767 	down_write(&table->lock);
4768 	spec = efx_ef10_filter_entry_spec(table, filter_idx);
4769 
4770 	if (!spec || spec->priority != EFX_FILTER_PRI_HINT)
4771 		goto out_unlock;
4772 
4773 	spin_lock_bh(&efx->rps_hash_lock);
4774 	if (!efx->rps_hash_table) {
4775 		/* In the absence of the table, we always return 0 to ARFS. */
4776 		arfs_id = 0;
4777 	} else {
4778 		rule = efx_rps_hash_find(efx, spec);
4779 		if (!rule)
4780 			/* ARFS table doesn't know of this filter, so remove it */
4781 			goto expire;
4782 		arfs_id = rule->arfs_id;
4783 		ret = efx_rps_check_rule(rule, filter_idx, &force);
4784 		if (force)
4785 			goto expire;
4786 		if (!ret) {
4787 			spin_unlock_bh(&efx->rps_hash_lock);
4788 			goto out_unlock;
4789 		}
4790 	}
4791 	if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, flow_id, arfs_id))
4792 		ret = false;
4793 	else if (rule)
4794 		rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
4795 expire:
4796 	saved_spec = *spec; /* remove operation will kfree spec */
4797 	spin_unlock_bh(&efx->rps_hash_lock);
4798 	/* At this point (since we dropped the lock), another thread might queue
4799 	 * up a fresh insertion request (but the actual insertion will be held
4800 	 * up by our possession of the filter table lock).  In that case, it
4801 	 * will set rule->filter_id to EFX_ARFS_FILTER_ID_PENDING, meaning that
4802 	 * the rule is not removed by efx_rps_hash_del() below.
4803 	 */
4804 	if (ret)
4805 		ret = efx_ef10_filter_remove_internal(efx, 1U << spec->priority,
4806 						      filter_idx, true) == 0;
4807 	/* While we can't safely dereference rule (we dropped the lock), we can
4808 	 * still test it for NULL.
4809 	 */
4810 	if (ret && rule) {
4811 		/* Expiring, so remove entry from ARFS table */
4812 		spin_lock_bh(&efx->rps_hash_lock);
4813 		efx_rps_hash_del(efx, &saved_spec);
4814 		spin_unlock_bh(&efx->rps_hash_lock);
4815 	}
4816 out_unlock:
4817 	up_write(&table->lock);
4818 	up_read(&efx->filter_sem);
4819 	return ret;
4820 }
4821 
4822 #endif /* CONFIG_RFS_ACCEL */
4823 
4824 static int efx_ef10_filter_match_flags_from_mcdi(bool encap, u32 mcdi_flags)
4825 {
4826 	int match_flags = 0;
4827 
4828 #define MAP_FLAG(gen_flag, mcdi_field) do {				\
4829 		u32 old_mcdi_flags = mcdi_flags;			\
4830 		mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ ##	\
4831 				     mcdi_field ## _LBN);		\
4832 		if (mcdi_flags != old_mcdi_flags)			\
4833 			match_flags |= EFX_FILTER_MATCH_ ## gen_flag;	\
4834 	} while (0)
4835 
4836 	if (encap) {
4837 		/* encap filters must specify encap type */
4838 		match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
4839 		/* and imply ethertype and ip proto */
4840 		mcdi_flags &=
4841 			~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN);
4842 		mcdi_flags &=
4843 			~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN);
4844 		/* VLAN tags refer to the outer packet */
4845 		MAP_FLAG(INNER_VID, INNER_VLAN);
4846 		MAP_FLAG(OUTER_VID, OUTER_VLAN);
4847 		/* everything else refers to the inner packet */
4848 		MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_UCAST_DST);
4849 		MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_MCAST_DST);
4850 		MAP_FLAG(REM_HOST, IFRM_SRC_IP);
4851 		MAP_FLAG(LOC_HOST, IFRM_DST_IP);
4852 		MAP_FLAG(REM_MAC, IFRM_SRC_MAC);
4853 		MAP_FLAG(REM_PORT, IFRM_SRC_PORT);
4854 		MAP_FLAG(LOC_MAC, IFRM_DST_MAC);
4855 		MAP_FLAG(LOC_PORT, IFRM_DST_PORT);
4856 		MAP_FLAG(ETHER_TYPE, IFRM_ETHER_TYPE);
4857 		MAP_FLAG(IP_PROTO, IFRM_IP_PROTO);
4858 	} else {
4859 		MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST);
4860 		MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST);
4861 		MAP_FLAG(REM_HOST, SRC_IP);
4862 		MAP_FLAG(LOC_HOST, DST_IP);
4863 		MAP_FLAG(REM_MAC, SRC_MAC);
4864 		MAP_FLAG(REM_PORT, SRC_PORT);
4865 		MAP_FLAG(LOC_MAC, DST_MAC);
4866 		MAP_FLAG(LOC_PORT, DST_PORT);
4867 		MAP_FLAG(ETHER_TYPE, ETHER_TYPE);
4868 		MAP_FLAG(INNER_VID, INNER_VLAN);
4869 		MAP_FLAG(OUTER_VID, OUTER_VLAN);
4870 		MAP_FLAG(IP_PROTO, IP_PROTO);
4871 	}
4872 #undef MAP_FLAG
4873 
4874 	/* Did we map them all? */
4875 	if (mcdi_flags)
4876 		return -EINVAL;
4877 
4878 	return match_flags;
4879 }
4880 
4881 static void efx_ef10_filter_cleanup_vlans(struct efx_nic *efx)
4882 {
4883 	struct efx_ef10_filter_table *table = efx->filter_state;
4884 	struct efx_ef10_filter_vlan *vlan, *next_vlan;
4885 
4886 	/* See comment in efx_ef10_filter_table_remove() */
4887 	if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
4888 		return;
4889 
4890 	if (!table)
4891 		return;
4892 
4893 	list_for_each_entry_safe(vlan, next_vlan, &table->vlan_list, list)
4894 		efx_ef10_filter_del_vlan_internal(efx, vlan);
4895 }
4896 
4897 static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table,
4898 					    bool encap,
4899 					    enum efx_filter_match_flags match_flags)
4900 {
4901 	unsigned int match_pri;
4902 	int mf;
4903 
4904 	for (match_pri = 0;
4905 	     match_pri < table->rx_match_count;
4906 	     match_pri++) {
4907 		mf = efx_ef10_filter_match_flags_from_mcdi(encap,
4908 				table->rx_match_mcdi_flags[match_pri]);
4909 		if (mf == match_flags)
4910 			return true;
4911 	}
4912 
4913 	return false;
4914 }
4915 
4916 static int
4917 efx_ef10_filter_table_probe_matches(struct efx_nic *efx,
4918 				    struct efx_ef10_filter_table *table,
4919 				    bool encap)
4920 {
4921 	MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN);
4922 	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX);
4923 	unsigned int pd_match_pri, pd_match_count;
4924 	size_t outlen;
4925 	int rc;
4926 
4927 	/* Find out which RX filter types are supported, and their priorities */
4928 	MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP,
4929 		       encap ?
4930 		       MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES :
4931 		       MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
4932 	rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO,
4933 			  inbuf, sizeof(inbuf), outbuf, sizeof(outbuf),
4934 			  &outlen);
4935 	if (rc)
4936 		return rc;
4937 
4938 	pd_match_count = MCDI_VAR_ARRAY_LEN(
4939 		outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES);
4940 
4941 	for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) {
4942 		u32 mcdi_flags =
4943 			MCDI_ARRAY_DWORD(
4944 				outbuf,
4945 				GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES,
4946 				pd_match_pri);
4947 		rc = efx_ef10_filter_match_flags_from_mcdi(encap, mcdi_flags);
4948 		if (rc < 0) {
4949 			netif_dbg(efx, probe, efx->net_dev,
4950 				  "%s: fw flags %#x pri %u not supported in driver\n",
4951 				  __func__, mcdi_flags, pd_match_pri);
4952 		} else {
4953 			netif_dbg(efx, probe, efx->net_dev,
4954 				  "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n",
4955 				  __func__, mcdi_flags, pd_match_pri,
4956 				  rc, table->rx_match_count);
4957 			table->rx_match_mcdi_flags[table->rx_match_count] = mcdi_flags;
4958 			table->rx_match_count++;
4959 		}
4960 	}
4961 
4962 	return 0;
4963 }
4964 
4965 static int efx_ef10_filter_table_probe(struct efx_nic *efx)
4966 {
4967 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
4968 	struct net_device *net_dev = efx->net_dev;
4969 	struct efx_ef10_filter_table *table;
4970 	struct efx_ef10_vlan *vlan;
4971 	int rc;
4972 
4973 	if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
4974 		return -EINVAL;
4975 
4976 	if (efx->filter_state) /* already probed */
4977 		return 0;
4978 
4979 	table = kzalloc(sizeof(*table), GFP_KERNEL);
4980 	if (!table)
4981 		return -ENOMEM;
4982 
4983 	table->rx_match_count = 0;
4984 	rc = efx_ef10_filter_table_probe_matches(efx, table, false);
4985 	if (rc)
4986 		goto fail;
4987 	if (nic_data->datapath_caps &
4988 		   (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
4989 		rc = efx_ef10_filter_table_probe_matches(efx, table, true);
4990 	if (rc)
4991 		goto fail;
4992 	if ((efx_supported_features(efx) & NETIF_F_HW_VLAN_CTAG_FILTER) &&
4993 	    !(efx_ef10_filter_match_supported(table, false,
4994 		(EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC)) &&
4995 	      efx_ef10_filter_match_supported(table, false,
4996 		(EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC_IG)))) {
4997 		netif_info(efx, probe, net_dev,
4998 			   "VLAN filters are not supported in this firmware variant\n");
4999 		net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
5000 		efx->fixed_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
5001 		net_dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
5002 	}
5003 
5004 	table->entry = vzalloc(array_size(HUNT_FILTER_TBL_ROWS,
5005 					  sizeof(*table->entry)));
5006 	if (!table->entry) {
5007 		rc = -ENOMEM;
5008 		goto fail;
5009 	}
5010 
5011 	table->mc_promisc_last = false;
5012 	table->vlan_filter =
5013 		!!(efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER);
5014 	INIT_LIST_HEAD(&table->vlan_list);
5015 	init_rwsem(&table->lock);
5016 
5017 	efx->filter_state = table;
5018 
5019 	list_for_each_entry(vlan, &nic_data->vlan_list, list) {
5020 		rc = efx_ef10_filter_add_vlan(efx, vlan->vid);
5021 		if (rc)
5022 			goto fail_add_vlan;
5023 	}
5024 
5025 	return 0;
5026 
5027 fail_add_vlan:
5028 	efx_ef10_filter_cleanup_vlans(efx);
5029 	efx->filter_state = NULL;
5030 fail:
5031 	kfree(table);
5032 	return rc;
5033 }
5034 
5035 /* Caller must hold efx->filter_sem for read if race against
5036  * efx_ef10_filter_table_remove() is possible
5037  */
5038 static void efx_ef10_filter_table_restore(struct efx_nic *efx)
5039 {
5040 	struct efx_ef10_filter_table *table = efx->filter_state;
5041 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
5042 	unsigned int invalid_filters = 0, failed = 0;
5043 	struct efx_ef10_filter_vlan *vlan;
5044 	struct efx_filter_spec *spec;
5045 	struct efx_rss_context *ctx;
5046 	unsigned int filter_idx;
5047 	u32 mcdi_flags;
5048 	int match_pri;
5049 	int rc, i;
5050 
5051 	WARN_ON(!rwsem_is_locked(&efx->filter_sem));
5052 
5053 	if (!nic_data->must_restore_filters)
5054 		return;
5055 
5056 	if (!table)
5057 		return;
5058 
5059 	down_write(&table->lock);
5060 	mutex_lock(&efx->rss_lock);
5061 
5062 	for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
5063 		spec = efx_ef10_filter_entry_spec(table, filter_idx);
5064 		if (!spec)
5065 			continue;
5066 
5067 		mcdi_flags = efx_ef10_filter_mcdi_flags_from_spec(spec);
5068 		match_pri = 0;
5069 		while (match_pri < table->rx_match_count &&
5070 		       table->rx_match_mcdi_flags[match_pri] != mcdi_flags)
5071 			++match_pri;
5072 		if (match_pri >= table->rx_match_count) {
5073 			invalid_filters++;
5074 			goto not_restored;
5075 		}
5076 		if (spec->rss_context)
5077 			ctx = efx_find_rss_context_entry(efx, spec->rss_context);
5078 		else
5079 			ctx = &efx->rss_context;
5080 		if (spec->flags & EFX_FILTER_FLAG_RX_RSS) {
5081 			if (!ctx) {
5082 				netif_warn(efx, drv, efx->net_dev,
5083 					   "Warning: unable to restore a filter with nonexistent RSS context %u.\n",
5084 					   spec->rss_context);
5085 				invalid_filters++;
5086 				goto not_restored;
5087 			}
5088 			if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID) {
5089 				netif_warn(efx, drv, efx->net_dev,
5090 					   "Warning: unable to restore a filter with RSS context %u as it was not created.\n",
5091 					   spec->rss_context);
5092 				invalid_filters++;
5093 				goto not_restored;
5094 			}
5095 		}
5096 
5097 		rc = efx_ef10_filter_push(efx, spec,
5098 					  &table->entry[filter_idx].handle,
5099 					  ctx, false);
5100 		if (rc)
5101 			failed++;
5102 
5103 		if (rc) {
5104 not_restored:
5105 			list_for_each_entry(vlan, &table->vlan_list, list)
5106 				for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; ++i)
5107 					if (vlan->default_filters[i] == filter_idx)
5108 						vlan->default_filters[i] =
5109 							EFX_EF10_FILTER_ID_INVALID;
5110 
5111 			kfree(spec);
5112 			efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
5113 		}
5114 	}
5115 
5116 	mutex_unlock(&efx->rss_lock);
5117 	up_write(&table->lock);
5118 
5119 	/* This can happen validly if the MC's capabilities have changed, so
5120 	 * is not an error.
5121 	 */
5122 	if (invalid_filters)
5123 		netif_dbg(efx, drv, efx->net_dev,
5124 			  "Did not restore %u filters that are now unsupported.\n",
5125 			  invalid_filters);
5126 
5127 	if (failed)
5128 		netif_err(efx, hw, efx->net_dev,
5129 			  "unable to restore %u filters\n", failed);
5130 	else
5131 		nic_data->must_restore_filters = false;
5132 }
5133 
5134 static void efx_ef10_filter_table_remove(struct efx_nic *efx)
5135 {
5136 	struct efx_ef10_filter_table *table = efx->filter_state;
5137 	MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
5138 	struct efx_filter_spec *spec;
5139 	unsigned int filter_idx;
5140 	int rc;
5141 
5142 	efx_ef10_filter_cleanup_vlans(efx);
5143 	efx->filter_state = NULL;
5144 	/* If we were called without locking, then it's not safe to free
5145 	 * the table as others might be using it.  So we just WARN, leak
5146 	 * the memory, and potentially get an inconsistent filter table
5147 	 * state.
5148 	 * This should never actually happen.
5149 	 */
5150 	if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
5151 		return;
5152 
5153 	if (!table)
5154 		return;
5155 
5156 	for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
5157 		spec = efx_ef10_filter_entry_spec(table, filter_idx);
5158 		if (!spec)
5159 			continue;
5160 
5161 		MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
5162 			       efx_ef10_filter_is_exclusive(spec) ?
5163 			       MC_CMD_FILTER_OP_IN_OP_REMOVE :
5164 			       MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
5165 		MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
5166 			       table->entry[filter_idx].handle);
5167 		rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf,
5168 					sizeof(inbuf), NULL, 0, NULL);
5169 		if (rc)
5170 			netif_info(efx, drv, efx->net_dev,
5171 				   "%s: filter %04x remove failed\n",
5172 				   __func__, filter_idx);
5173 		kfree(spec);
5174 	}
5175 
5176 	vfree(table->entry);
5177 	kfree(table);
5178 }
5179 
5180 static void efx_ef10_filter_mark_one_old(struct efx_nic *efx, uint16_t *id)
5181 {
5182 	struct efx_ef10_filter_table *table = efx->filter_state;
5183 	unsigned int filter_idx;
5184 
5185 	efx_rwsem_assert_write_locked(&table->lock);
5186 
5187 	if (*id != EFX_EF10_FILTER_ID_INVALID) {
5188 		filter_idx = efx_ef10_filter_get_unsafe_id(*id);
5189 		if (!table->entry[filter_idx].spec)
5190 			netif_dbg(efx, drv, efx->net_dev,
5191 				  "marked null spec old %04x:%04x\n", *id,
5192 				  filter_idx);
5193 		table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
5194 		*id = EFX_EF10_FILTER_ID_INVALID;
5195 	}
5196 }
5197 
5198 /* Mark old per-VLAN filters that may need to be removed */
5199 static void _efx_ef10_filter_vlan_mark_old(struct efx_nic *efx,
5200 					   struct efx_ef10_filter_vlan *vlan)
5201 {
5202 	struct efx_ef10_filter_table *table = efx->filter_state;
5203 	unsigned int i;
5204 
5205 	for (i = 0; i < table->dev_uc_count; i++)
5206 		efx_ef10_filter_mark_one_old(efx, &vlan->uc[i]);
5207 	for (i = 0; i < table->dev_mc_count; i++)
5208 		efx_ef10_filter_mark_one_old(efx, &vlan->mc[i]);
5209 	for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
5210 		efx_ef10_filter_mark_one_old(efx, &vlan->default_filters[i]);
5211 }
5212 
5213 /* Mark old filters that may need to be removed.
5214  * Caller must hold efx->filter_sem for read if race against
5215  * efx_ef10_filter_table_remove() is possible
5216  */
5217 static void efx_ef10_filter_mark_old(struct efx_nic *efx)
5218 {
5219 	struct efx_ef10_filter_table *table = efx->filter_state;
5220 	struct efx_ef10_filter_vlan *vlan;
5221 
5222 	down_write(&table->lock);
5223 	list_for_each_entry(vlan, &table->vlan_list, list)
5224 		_efx_ef10_filter_vlan_mark_old(efx, vlan);
5225 	up_write(&table->lock);
5226 }
5227 
5228 static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx)
5229 {
5230 	struct efx_ef10_filter_table *table = efx->filter_state;
5231 	struct net_device *net_dev = efx->net_dev;
5232 	struct netdev_hw_addr *uc;
5233 	unsigned int i;
5234 
5235 	table->uc_promisc = !!(net_dev->flags & IFF_PROMISC);
5236 	ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
5237 	i = 1;
5238 	netdev_for_each_uc_addr(uc, net_dev) {
5239 		if (i >= EFX_EF10_FILTER_DEV_UC_MAX) {
5240 			table->uc_promisc = true;
5241 			break;
5242 		}
5243 		ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
5244 		i++;
5245 	}
5246 
5247 	table->dev_uc_count = i;
5248 }
5249 
5250 static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx)
5251 {
5252 	struct efx_ef10_filter_table *table = efx->filter_state;
5253 	struct net_device *net_dev = efx->net_dev;
5254 	struct netdev_hw_addr *mc;
5255 	unsigned int i;
5256 
5257 	table->mc_overflow = false;
5258 	table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI));
5259 
5260 	i = 0;
5261 	netdev_for_each_mc_addr(mc, net_dev) {
5262 		if (i >= EFX_EF10_FILTER_DEV_MC_MAX) {
5263 			table->mc_promisc = true;
5264 			table->mc_overflow = true;
5265 			break;
5266 		}
5267 		ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
5268 		i++;
5269 	}
5270 
5271 	table->dev_mc_count = i;
5272 }
5273 
5274 static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
5275 					    struct efx_ef10_filter_vlan *vlan,
5276 					    bool multicast, bool rollback)
5277 {
5278 	struct efx_ef10_filter_table *table = efx->filter_state;
5279 	struct efx_ef10_dev_addr *addr_list;
5280 	enum efx_filter_flags filter_flags;
5281 	struct efx_filter_spec spec;
5282 	u8 baddr[ETH_ALEN];
5283 	unsigned int i, j;
5284 	int addr_count;
5285 	u16 *ids;
5286 	int rc;
5287 
5288 	if (multicast) {
5289 		addr_list = table->dev_mc_list;
5290 		addr_count = table->dev_mc_count;
5291 		ids = vlan->mc;
5292 	} else {
5293 		addr_list = table->dev_uc_list;
5294 		addr_count = table->dev_uc_count;
5295 		ids = vlan->uc;
5296 	}
5297 
5298 	filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0;
5299 
5300 	/* Insert/renew filters */
5301 	for (i = 0; i < addr_count; i++) {
5302 		EFX_WARN_ON_PARANOID(ids[i] != EFX_EF10_FILTER_ID_INVALID);
5303 		efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
5304 		efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr);
5305 		rc = efx_ef10_filter_insert_locked(efx, &spec, true);
5306 		if (rc < 0) {
5307 			if (rollback) {
5308 				netif_info(efx, drv, efx->net_dev,
5309 					   "efx_ef10_filter_insert failed rc=%d\n",
5310 					   rc);
5311 				/* Fall back to promiscuous */
5312 				for (j = 0; j < i; j++) {
5313 					efx_ef10_filter_remove_unsafe(
5314 						efx, EFX_FILTER_PRI_AUTO,
5315 						ids[j]);
5316 					ids[j] = EFX_EF10_FILTER_ID_INVALID;
5317 				}
5318 				return rc;
5319 			} else {
5320 				/* keep invalid ID, and carry on */
5321 			}
5322 		} else {
5323 			ids[i] = efx_ef10_filter_get_unsafe_id(rc);
5324 		}
5325 	}
5326 
5327 	if (multicast && rollback) {
5328 		/* Also need an Ethernet broadcast filter */
5329 		EFX_WARN_ON_PARANOID(vlan->default_filters[EFX_EF10_BCAST] !=
5330 				     EFX_EF10_FILTER_ID_INVALID);
5331 		efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
5332 		eth_broadcast_addr(baddr);
5333 		efx_filter_set_eth_local(&spec, vlan->vid, baddr);
5334 		rc = efx_ef10_filter_insert_locked(efx, &spec, true);
5335 		if (rc < 0) {
5336 			netif_warn(efx, drv, efx->net_dev,
5337 				   "Broadcast filter insert failed rc=%d\n", rc);
5338 			/* Fall back to promiscuous */
5339 			for (j = 0; j < i; j++) {
5340 				efx_ef10_filter_remove_unsafe(
5341 					efx, EFX_FILTER_PRI_AUTO,
5342 					ids[j]);
5343 				ids[j] = EFX_EF10_FILTER_ID_INVALID;
5344 			}
5345 			return rc;
5346 		} else {
5347 			vlan->default_filters[EFX_EF10_BCAST] =
5348 				efx_ef10_filter_get_unsafe_id(rc);
5349 		}
5350 	}
5351 
5352 	return 0;
5353 }
5354 
5355 static int efx_ef10_filter_insert_def(struct efx_nic *efx,
5356 				      struct efx_ef10_filter_vlan *vlan,
5357 				      enum efx_encap_type encap_type,
5358 				      bool multicast, bool rollback)
5359 {
5360 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
5361 	enum efx_filter_flags filter_flags;
5362 	struct efx_filter_spec spec;
5363 	u8 baddr[ETH_ALEN];
5364 	int rc;
5365 	u16 *id;
5366 
5367 	filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0;
5368 
5369 	efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
5370 
5371 	if (multicast)
5372 		efx_filter_set_mc_def(&spec);
5373 	else
5374 		efx_filter_set_uc_def(&spec);
5375 
5376 	if (encap_type) {
5377 		if (nic_data->datapath_caps &
5378 		    (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
5379 			efx_filter_set_encap_type(&spec, encap_type);
5380 		else
5381 			/* don't insert encap filters on non-supporting
5382 			 * platforms. ID will be left as INVALID.
5383 			 */
5384 			return 0;
5385 	}
5386 
5387 	if (vlan->vid != EFX_FILTER_VID_UNSPEC)
5388 		efx_filter_set_eth_local(&spec, vlan->vid, NULL);
5389 
5390 	rc = efx_ef10_filter_insert_locked(efx, &spec, true);
5391 	if (rc < 0) {
5392 		const char *um = multicast ? "Multicast" : "Unicast";
5393 		const char *encap_name = "";
5394 		const char *encap_ipv = "";
5395 
5396 		if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
5397 		    EFX_ENCAP_TYPE_VXLAN)
5398 			encap_name = "VXLAN ";
5399 		else if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
5400 			 EFX_ENCAP_TYPE_NVGRE)
5401 			encap_name = "NVGRE ";
5402 		else if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
5403 			 EFX_ENCAP_TYPE_GENEVE)
5404 			encap_name = "GENEVE ";
5405 		if (encap_type & EFX_ENCAP_FLAG_IPV6)
5406 			encap_ipv = "IPv6 ";
5407 		else if (encap_type)
5408 			encap_ipv = "IPv4 ";
5409 
5410 		/* unprivileged functions can't insert mismatch filters
5411 		 * for encapsulated or unicast traffic, so downgrade
5412 		 * those warnings to debug.
5413 		 */
5414 		netif_cond_dbg(efx, drv, efx->net_dev,
5415 			       rc == -EPERM && (encap_type || !multicast), warn,
5416 			       "%s%s%s mismatch filter insert failed rc=%d\n",
5417 			       encap_name, encap_ipv, um, rc);
5418 	} else if (multicast) {
5419 		/* mapping from encap types to default filter IDs (multicast) */
5420 		static enum efx_ef10_default_filters map[] = {
5421 			[EFX_ENCAP_TYPE_NONE] = EFX_EF10_MCDEF,
5422 			[EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_MCDEF,
5423 			[EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_MCDEF,
5424 			[EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_MCDEF,
5425 			[EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] =
5426 				EFX_EF10_VXLAN6_MCDEF,
5427 			[EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] =
5428 				EFX_EF10_NVGRE6_MCDEF,
5429 			[EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] =
5430 				EFX_EF10_GENEVE6_MCDEF,
5431 		};
5432 
5433 		/* quick bounds check (BCAST result impossible) */
5434 		BUILD_BUG_ON(EFX_EF10_BCAST != 0);
5435 		if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) {
5436 			WARN_ON(1);
5437 			return -EINVAL;
5438 		}
5439 		/* then follow map */
5440 		id = &vlan->default_filters[map[encap_type]];
5441 
5442 		EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID);
5443 		*id = efx_ef10_filter_get_unsafe_id(rc);
5444 		if (!nic_data->workaround_26807 && !encap_type) {
5445 			/* Also need an Ethernet broadcast filter */
5446 			efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
5447 					   filter_flags, 0);
5448 			eth_broadcast_addr(baddr);
5449 			efx_filter_set_eth_local(&spec, vlan->vid, baddr);
5450 			rc = efx_ef10_filter_insert_locked(efx, &spec, true);
5451 			if (rc < 0) {
5452 				netif_warn(efx, drv, efx->net_dev,
5453 					   "Broadcast filter insert failed rc=%d\n",
5454 					   rc);
5455 				if (rollback) {
5456 					/* Roll back the mc_def filter */
5457 					efx_ef10_filter_remove_unsafe(
5458 							efx, EFX_FILTER_PRI_AUTO,
5459 							*id);
5460 					*id = EFX_EF10_FILTER_ID_INVALID;
5461 					return rc;
5462 				}
5463 			} else {
5464 				EFX_WARN_ON_PARANOID(
5465 					vlan->default_filters[EFX_EF10_BCAST] !=
5466 					EFX_EF10_FILTER_ID_INVALID);
5467 				vlan->default_filters[EFX_EF10_BCAST] =
5468 					efx_ef10_filter_get_unsafe_id(rc);
5469 			}
5470 		}
5471 		rc = 0;
5472 	} else {
5473 		/* mapping from encap types to default filter IDs (unicast) */
5474 		static enum efx_ef10_default_filters map[] = {
5475 			[EFX_ENCAP_TYPE_NONE] = EFX_EF10_UCDEF,
5476 			[EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_UCDEF,
5477 			[EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_UCDEF,
5478 			[EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_UCDEF,
5479 			[EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] =
5480 				EFX_EF10_VXLAN6_UCDEF,
5481 			[EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] =
5482 				EFX_EF10_NVGRE6_UCDEF,
5483 			[EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] =
5484 				EFX_EF10_GENEVE6_UCDEF,
5485 		};
5486 
5487 		/* quick bounds check (BCAST result impossible) */
5488 		BUILD_BUG_ON(EFX_EF10_BCAST != 0);
5489 		if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) {
5490 			WARN_ON(1);
5491 			return -EINVAL;
5492 		}
5493 		/* then follow map */
5494 		id = &vlan->default_filters[map[encap_type]];
5495 		EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID);
5496 		*id = rc;
5497 		rc = 0;
5498 	}
5499 	return rc;
5500 }
5501 
5502 /* Remove filters that weren't renewed. */
5503 static void efx_ef10_filter_remove_old(struct efx_nic *efx)
5504 {
5505 	struct efx_ef10_filter_table *table = efx->filter_state;
5506 	int remove_failed = 0;
5507 	int remove_noent = 0;
5508 	int rc;
5509 	int i;
5510 
5511 	down_write(&table->lock);
5512 	for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
5513 		if (READ_ONCE(table->entry[i].spec) &
5514 		    EFX_EF10_FILTER_FLAG_AUTO_OLD) {
5515 			rc = efx_ef10_filter_remove_internal(efx,
5516 					1U << EFX_FILTER_PRI_AUTO, i, true);
5517 			if (rc == -ENOENT)
5518 				remove_noent++;
5519 			else if (rc)
5520 				remove_failed++;
5521 		}
5522 	}
5523 	up_write(&table->lock);
5524 
5525 	if (remove_failed)
5526 		netif_info(efx, drv, efx->net_dev,
5527 			   "%s: failed to remove %d filters\n",
5528 			   __func__, remove_failed);
5529 	if (remove_noent)
5530 		netif_info(efx, drv, efx->net_dev,
5531 			   "%s: failed to remove %d non-existent filters\n",
5532 			   __func__, remove_noent);
5533 }
5534 
5535 static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
5536 {
5537 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
5538 	u8 mac_old[ETH_ALEN];
5539 	int rc, rc2;
5540 
5541 	/* Only reconfigure a PF-created vport */
5542 	if (is_zero_ether_addr(nic_data->vport_mac))
5543 		return 0;
5544 
5545 	efx_device_detach_sync(efx);
5546 	efx_net_stop(efx->net_dev);
5547 	down_write(&efx->filter_sem);
5548 	efx_ef10_filter_table_remove(efx);
5549 	up_write(&efx->filter_sem);
5550 
5551 	rc = efx_ef10_vadaptor_free(efx, nic_data->vport_id);
5552 	if (rc)
5553 		goto restore_filters;
5554 
5555 	ether_addr_copy(mac_old, nic_data->vport_mac);
5556 	rc = efx_ef10_vport_del_mac(efx, nic_data->vport_id,
5557 				    nic_data->vport_mac);
5558 	if (rc)
5559 		goto restore_vadaptor;
5560 
5561 	rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id,
5562 				    efx->net_dev->dev_addr);
5563 	if (!rc) {
5564 		ether_addr_copy(nic_data->vport_mac, efx->net_dev->dev_addr);
5565 	} else {
5566 		rc2 = efx_ef10_vport_add_mac(efx, nic_data->vport_id, mac_old);
5567 		if (rc2) {
5568 			/* Failed to add original MAC, so clear vport_mac */
5569 			eth_zero_addr(nic_data->vport_mac);
5570 			goto reset_nic;
5571 		}
5572 	}
5573 
5574 restore_vadaptor:
5575 	rc2 = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
5576 	if (rc2)
5577 		goto reset_nic;
5578 restore_filters:
5579 	down_write(&efx->filter_sem);
5580 	rc2 = efx_ef10_filter_table_probe(efx);
5581 	up_write(&efx->filter_sem);
5582 	if (rc2)
5583 		goto reset_nic;
5584 
5585 	rc2 = efx_net_open(efx->net_dev);
5586 	if (rc2)
5587 		goto reset_nic;
5588 
5589 	efx_device_attach_if_not_resetting(efx);
5590 
5591 	return rc;
5592 
5593 reset_nic:
5594 	netif_err(efx, drv, efx->net_dev,
5595 		  "Failed to restore when changing MAC address - scheduling reset\n");
5596 	efx_schedule_reset(efx, RESET_TYPE_DATAPATH);
5597 
5598 	return rc ? rc : rc2;
5599 }
5600 
5601 /* Caller must hold efx->filter_sem for read if race against
5602  * efx_ef10_filter_table_remove() is possible
5603  */
5604 static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx,
5605 					      struct efx_ef10_filter_vlan *vlan)
5606 {
5607 	struct efx_ef10_filter_table *table = efx->filter_state;
5608 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
5609 
5610 	/* Do not install unspecified VID if VLAN filtering is enabled.
5611 	 * Do not install all specified VIDs if VLAN filtering is disabled.
5612 	 */
5613 	if ((vlan->vid == EFX_FILTER_VID_UNSPEC) == table->vlan_filter)
5614 		return;
5615 
5616 	/* Insert/renew unicast filters */
5617 	if (table->uc_promisc) {
5618 		efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NONE,
5619 					   false, false);
5620 		efx_ef10_filter_insert_addr_list(efx, vlan, false, false);
5621 	} else {
5622 		/* If any of the filters failed to insert, fall back to
5623 		 * promiscuous mode - add in the uc_def filter.  But keep
5624 		 * our individual unicast filters.
5625 		 */
5626 		if (efx_ef10_filter_insert_addr_list(efx, vlan, false, false))
5627 			efx_ef10_filter_insert_def(efx, vlan,
5628 						   EFX_ENCAP_TYPE_NONE,
5629 						   false, false);
5630 	}
5631 	efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN,
5632 				   false, false);
5633 	efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN |
5634 					      EFX_ENCAP_FLAG_IPV6,
5635 				   false, false);
5636 	efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE,
5637 				   false, false);
5638 	efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE |
5639 					      EFX_ENCAP_FLAG_IPV6,
5640 				   false, false);
5641 	efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE,
5642 				   false, false);
5643 	efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE |
5644 					      EFX_ENCAP_FLAG_IPV6,
5645 				   false, false);
5646 
5647 	/* Insert/renew multicast filters */
5648 	/* If changing promiscuous state with cascaded multicast filters, remove
5649 	 * old filters first, so that packets are dropped rather than duplicated
5650 	 */
5651 	if (nic_data->workaround_26807 &&
5652 	    table->mc_promisc_last != table->mc_promisc)
5653 		efx_ef10_filter_remove_old(efx);
5654 	if (table->mc_promisc) {
5655 		if (nic_data->workaround_26807) {
5656 			/* If we failed to insert promiscuous filters, rollback
5657 			 * and fall back to individual multicast filters
5658 			 */
5659 			if (efx_ef10_filter_insert_def(efx, vlan,
5660 						       EFX_ENCAP_TYPE_NONE,
5661 						       true, true)) {
5662 				/* Changing promisc state, so remove old filters */
5663 				efx_ef10_filter_remove_old(efx);
5664 				efx_ef10_filter_insert_addr_list(efx, vlan,
5665 								 true, false);
5666 			}
5667 		} else {
5668 			/* If we failed to insert promiscuous filters, don't
5669 			 * rollback.  Regardless, also insert the mc_list,
5670 			 * unless it's incomplete due to overflow
5671 			 */
5672 			efx_ef10_filter_insert_def(efx, vlan,
5673 						   EFX_ENCAP_TYPE_NONE,
5674 						   true, false);
5675 			if (!table->mc_overflow)
5676 				efx_ef10_filter_insert_addr_list(efx, vlan,
5677 								 true, false);
5678 		}
5679 	} else {
5680 		/* If any filters failed to insert, rollback and fall back to
5681 		 * promiscuous mode - mc_def filter and maybe broadcast.  If
5682 		 * that fails, roll back again and insert as many of our
5683 		 * individual multicast filters as we can.
5684 		 */
5685 		if (efx_ef10_filter_insert_addr_list(efx, vlan, true, true)) {
5686 			/* Changing promisc state, so remove old filters */
5687 			if (nic_data->workaround_26807)
5688 				efx_ef10_filter_remove_old(efx);
5689 			if (efx_ef10_filter_insert_def(efx, vlan,
5690 						       EFX_ENCAP_TYPE_NONE,
5691 						       true, true))
5692 				efx_ef10_filter_insert_addr_list(efx, vlan,
5693 								 true, false);
5694 		}
5695 	}
5696 	efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN,
5697 				   true, false);
5698 	efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN |
5699 					      EFX_ENCAP_FLAG_IPV6,
5700 				   true, false);
5701 	efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE,
5702 				   true, false);
5703 	efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE |
5704 					      EFX_ENCAP_FLAG_IPV6,
5705 				   true, false);
5706 	efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE,
5707 				   true, false);
5708 	efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE |
5709 					      EFX_ENCAP_FLAG_IPV6,
5710 				   true, false);
5711 }
5712 
5713 /* Caller must hold efx->filter_sem for read if race against
5714  * efx_ef10_filter_table_remove() is possible
5715  */
5716 static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
5717 {
5718 	struct efx_ef10_filter_table *table = efx->filter_state;
5719 	struct net_device *net_dev = efx->net_dev;
5720 	struct efx_ef10_filter_vlan *vlan;
5721 	bool vlan_filter;
5722 
5723 	if (!efx_dev_registered(efx))
5724 		return;
5725 
5726 	if (!table)
5727 		return;
5728 
5729 	efx_ef10_filter_mark_old(efx);
5730 
5731 	/* Copy/convert the address lists; add the primary station
5732 	 * address and broadcast address
5733 	 */
5734 	netif_addr_lock_bh(net_dev);
5735 	efx_ef10_filter_uc_addr_list(efx);
5736 	efx_ef10_filter_mc_addr_list(efx);
5737 	netif_addr_unlock_bh(net_dev);
5738 
5739 	/* If VLAN filtering changes, all old filters are finally removed.
5740 	 * Do it in advance to avoid conflicts for unicast untagged and
5741 	 * VLAN 0 tagged filters.
5742 	 */
5743 	vlan_filter = !!(net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER);
5744 	if (table->vlan_filter != vlan_filter) {
5745 		table->vlan_filter = vlan_filter;
5746 		efx_ef10_filter_remove_old(efx);
5747 	}
5748 
5749 	list_for_each_entry(vlan, &table->vlan_list, list)
5750 		efx_ef10_filter_vlan_sync_rx_mode(efx, vlan);
5751 
5752 	efx_ef10_filter_remove_old(efx);
5753 	table->mc_promisc_last = table->mc_promisc;
5754 }
5755 
5756 static struct efx_ef10_filter_vlan *efx_ef10_filter_find_vlan(struct efx_nic *efx, u16 vid)
5757 {
5758 	struct efx_ef10_filter_table *table = efx->filter_state;
5759 	struct efx_ef10_filter_vlan *vlan;
5760 
5761 	WARN_ON(!rwsem_is_locked(&efx->filter_sem));
5762 
5763 	list_for_each_entry(vlan, &table->vlan_list, list) {
5764 		if (vlan->vid == vid)
5765 			return vlan;
5766 	}
5767 
5768 	return NULL;
5769 }
5770 
5771 static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid)
5772 {
5773 	struct efx_ef10_filter_table *table = efx->filter_state;
5774 	struct efx_ef10_filter_vlan *vlan;
5775 	unsigned int i;
5776 
5777 	if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
5778 		return -EINVAL;
5779 
5780 	vlan = efx_ef10_filter_find_vlan(efx, vid);
5781 	if (WARN_ON(vlan)) {
5782 		netif_err(efx, drv, efx->net_dev,
5783 			  "VLAN %u already added\n", vid);
5784 		return -EALREADY;
5785 	}
5786 
5787 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
5788 	if (!vlan)
5789 		return -ENOMEM;
5790 
5791 	vlan->vid = vid;
5792 
5793 	for (i = 0; i < ARRAY_SIZE(vlan->uc); i++)
5794 		vlan->uc[i] = EFX_EF10_FILTER_ID_INVALID;
5795 	for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
5796 		vlan->mc[i] = EFX_EF10_FILTER_ID_INVALID;
5797 	for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
5798 		vlan->default_filters[i] = EFX_EF10_FILTER_ID_INVALID;
5799 
5800 	list_add_tail(&vlan->list, &table->vlan_list);
5801 
5802 	if (efx_dev_registered(efx))
5803 		efx_ef10_filter_vlan_sync_rx_mode(efx, vlan);
5804 
5805 	return 0;
5806 }
5807 
5808 static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx,
5809 					      struct efx_ef10_filter_vlan *vlan)
5810 {
5811 	unsigned int i;
5812 
5813 	/* See comment in efx_ef10_filter_table_remove() */
5814 	if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
5815 		return;
5816 
5817 	list_del(&vlan->list);
5818 
5819 	for (i = 0; i < ARRAY_SIZE(vlan->uc); i++)
5820 		efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
5821 					      vlan->uc[i]);
5822 	for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
5823 		efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
5824 					      vlan->mc[i]);
5825 	for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
5826 		if (vlan->default_filters[i] != EFX_EF10_FILTER_ID_INVALID)
5827 			efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
5828 						      vlan->default_filters[i]);
5829 
5830 	kfree(vlan);
5831 }
5832 
5833 static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid)
5834 {
5835 	struct efx_ef10_filter_vlan *vlan;
5836 
5837 	/* See comment in efx_ef10_filter_table_remove() */
5838 	if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
5839 		return;
5840 
5841 	vlan = efx_ef10_filter_find_vlan(efx, vid);
5842 	if (!vlan) {
5843 		netif_err(efx, drv, efx->net_dev,
5844 			  "VLAN %u not found in filter state\n", vid);
5845 		return;
5846 	}
5847 
5848 	efx_ef10_filter_del_vlan_internal(efx, vlan);
5849 }
5850 
5851 static int efx_ef10_set_mac_address(struct efx_nic *efx)
5852 {
5853 	MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN);
5854 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
5855 	bool was_enabled = efx->port_enabled;
5856 	int rc;
5857 
5858 	efx_device_detach_sync(efx);
5859 	efx_net_stop(efx->net_dev);
5860 
5861 	mutex_lock(&efx->mac_lock);
5862 	down_write(&efx->filter_sem);
5863 	efx_ef10_filter_table_remove(efx);
5864 
5865 	ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR),
5866 			efx->net_dev->dev_addr);
5867 	MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID,
5868 		       nic_data->vport_id);
5869 	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf,
5870 				sizeof(inbuf), NULL, 0, NULL);
5871 
5872 	efx_ef10_filter_table_probe(efx);
5873 	up_write(&efx->filter_sem);
5874 	mutex_unlock(&efx->mac_lock);
5875 
5876 	if (was_enabled)
5877 		efx_net_open(efx->net_dev);
5878 	efx_device_attach_if_not_resetting(efx);
5879 
5880 #ifdef CONFIG_SFC_SRIOV
5881 	if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) {
5882 		struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
5883 
5884 		if (rc == -EPERM) {
5885 			struct efx_nic *efx_pf;
5886 
5887 			/* Switch to PF and change MAC address on vport */
5888 			efx_pf = pci_get_drvdata(pci_dev_pf);
5889 
5890 			rc = efx_ef10_sriov_set_vf_mac(efx_pf,
5891 						       nic_data->vf_index,
5892 						       efx->net_dev->dev_addr);
5893 		} else if (!rc) {
5894 			struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
5895 			struct efx_ef10_nic_data *nic_data = efx_pf->nic_data;
5896 			unsigned int i;
5897 
5898 			/* MAC address successfully changed by VF (with MAC
5899 			 * spoofing) so update the parent PF if possible.
5900 			 */
5901 			for (i = 0; i < efx_pf->vf_count; ++i) {
5902 				struct ef10_vf *vf = nic_data->vf + i;
5903 
5904 				if (vf->efx == efx) {
5905 					ether_addr_copy(vf->mac,
5906 							efx->net_dev->dev_addr);
5907 					return 0;
5908 				}
5909 			}
5910 		}
5911 	} else
5912 #endif
5913 	if (rc == -EPERM) {
5914 		netif_err(efx, drv, efx->net_dev,
5915 			  "Cannot change MAC address; use sfboot to enable"
5916 			  " mac-spoofing on this interface\n");
5917 	} else if (rc == -ENOSYS && !efx_ef10_is_vf(efx)) {
5918 		/* If the active MCFW does not support MC_CMD_VADAPTOR_SET_MAC
5919 		 * fall-back to the method of changing the MAC address on the
5920 		 * vport.  This only applies to PFs because such versions of
5921 		 * MCFW do not support VFs.
5922 		 */
5923 		rc = efx_ef10_vport_set_mac_address(efx);
5924 	} else if (rc) {
5925 		efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC,
5926 				       sizeof(inbuf), NULL, 0, rc);
5927 	}
5928 
5929 	return rc;
5930 }
5931 
5932 static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
5933 {
5934 	efx_ef10_filter_sync_rx_mode(efx);
5935 
5936 	return efx_mcdi_set_mac(efx);
5937 }
5938 
5939 static int efx_ef10_mac_reconfigure_vf(struct efx_nic *efx)
5940 {
5941 	efx_ef10_filter_sync_rx_mode(efx);
5942 
5943 	return 0;
5944 }
5945 
5946 static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type)
5947 {
5948 	MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
5949 
5950 	MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_type);
5951 	return efx_mcdi_rpc(efx, MC_CMD_START_BIST, inbuf, sizeof(inbuf),
5952 			    NULL, 0, NULL);
5953 }
5954 
5955 /* MC BISTs follow a different poll mechanism to phy BISTs.
5956  * The BIST is done in the poll handler on the MC, and the MCDI command
5957  * will block until the BIST is done.
5958  */
5959 static int efx_ef10_poll_bist(struct efx_nic *efx)
5960 {
5961 	int rc;
5962 	MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_LEN);
5963 	size_t outlen;
5964 	u32 result;
5965 
5966 	rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
5967 			   outbuf, sizeof(outbuf), &outlen);
5968 	if (rc != 0)
5969 		return rc;
5970 
5971 	if (outlen < MC_CMD_POLL_BIST_OUT_LEN)
5972 		return -EIO;
5973 
5974 	result = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT);
5975 	switch (result) {
5976 	case MC_CMD_POLL_BIST_PASSED:
5977 		netif_dbg(efx, hw, efx->net_dev, "BIST passed.\n");
5978 		return 0;
5979 	case MC_CMD_POLL_BIST_TIMEOUT:
5980 		netif_err(efx, hw, efx->net_dev, "BIST timed out\n");
5981 		return -EIO;
5982 	case MC_CMD_POLL_BIST_FAILED:
5983 		netif_err(efx, hw, efx->net_dev, "BIST failed.\n");
5984 		return -EIO;
5985 	default:
5986 		netif_err(efx, hw, efx->net_dev,
5987 			  "BIST returned unknown result %u", result);
5988 		return -EIO;
5989 	}
5990 }
5991 
5992 static int efx_ef10_run_bist(struct efx_nic *efx, u32 bist_type)
5993 {
5994 	int rc;
5995 
5996 	netif_dbg(efx, drv, efx->net_dev, "starting BIST type %u\n", bist_type);
5997 
5998 	rc = efx_ef10_start_bist(efx, bist_type);
5999 	if (rc != 0)
6000 		return rc;
6001 
6002 	return efx_ef10_poll_bist(efx);
6003 }
6004 
6005 static int
6006 efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
6007 {
6008 	int rc, rc2;
6009 
6010 	efx_reset_down(efx, RESET_TYPE_WORLD);
6011 
6012 	rc = efx_mcdi_rpc(efx, MC_CMD_ENABLE_OFFLINE_BIST,
6013 			  NULL, 0, NULL, 0, NULL);
6014 	if (rc != 0)
6015 		goto out;
6016 
6017 	tests->memory = efx_ef10_run_bist(efx, MC_CMD_MC_MEM_BIST) ? -1 : 1;
6018 	tests->registers = efx_ef10_run_bist(efx, MC_CMD_REG_BIST) ? -1 : 1;
6019 
6020 	rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD);
6021 
6022 out:
6023 	if (rc == -EPERM)
6024 		rc = 0;
6025 	rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0);
6026 	return rc ? rc : rc2;
6027 }
6028 
6029 #ifdef CONFIG_SFC_MTD
6030 
6031 struct efx_ef10_nvram_type_info {
6032 	u16 type, type_mask;
6033 	u8 port;
6034 	const char *name;
6035 };
6036 
6037 static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
6038 	{ NVRAM_PARTITION_TYPE_MC_FIRMWARE,	   0,    0, "sfc_mcfw" },
6039 	{ NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0,    0, "sfc_mcfw_backup" },
6040 	{ NVRAM_PARTITION_TYPE_EXPANSION_ROM,	   0,    0, "sfc_exp_rom" },
6041 	{ NVRAM_PARTITION_TYPE_STATIC_CONFIG,	   0,    0, "sfc_static_cfg" },
6042 	{ NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG,	   0,    0, "sfc_dynamic_cfg" },
6043 	{ NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0,   0, "sfc_exp_rom_cfg" },
6044 	{ NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0,   1, "sfc_exp_rom_cfg" },
6045 	{ NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0,   2, "sfc_exp_rom_cfg" },
6046 	{ NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0,   3, "sfc_exp_rom_cfg" },
6047 	{ NVRAM_PARTITION_TYPE_LICENSE,		   0,    0, "sfc_license" },
6048 	{ NVRAM_PARTITION_TYPE_PHY_MIN,		   0xff, 0, "sfc_phy_fw" },
6049 	{ NVRAM_PARTITION_TYPE_MUM_FIRMWARE,	   0,    0, "sfc_mumfw" },
6050 	{ NVRAM_PARTITION_TYPE_EXPANSION_UEFI,	   0,    0, "sfc_uefi" },
6051 	{ NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS, 0,    0, "sfc_dynamic_cfg_dflt" },
6052 	{ NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS, 0,    0, "sfc_exp_rom_cfg_dflt" },
6053 	{ NVRAM_PARTITION_TYPE_STATUS,		   0,    0, "sfc_status" },
6054 	{ NVRAM_PARTITION_TYPE_BUNDLE,		   0,    0, "sfc_bundle" },
6055 	{ NVRAM_PARTITION_TYPE_BUNDLE_METADATA,	   0,    0, "sfc_bundle_metadata" },
6056 };
6057 #define EF10_NVRAM_PARTITION_COUNT	ARRAY_SIZE(efx_ef10_nvram_types)
6058 
6059 static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
6060 					struct efx_mcdi_mtd_partition *part,
6061 					unsigned int type,
6062 					unsigned long *found)
6063 {
6064 	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
6065 	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX);
6066 	const struct efx_ef10_nvram_type_info *info;
6067 	size_t size, erase_size, outlen;
6068 	int type_idx = 0;
6069 	bool protected;
6070 	int rc;
6071 
6072 	for (type_idx = 0; ; type_idx++) {
6073 		if (type_idx == EF10_NVRAM_PARTITION_COUNT)
6074 			return -ENODEV;
6075 		info = efx_ef10_nvram_types + type_idx;
6076 		if ((type & ~info->type_mask) == info->type)
6077 			break;
6078 	}
6079 	if (info->port != efx_port_num(efx))
6080 		return -ENODEV;
6081 
6082 	rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
6083 	if (rc)
6084 		return rc;
6085 	if (protected &&
6086 	    (type != NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS &&
6087 	     type != NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS))
6088 		/* Hide protected partitions that don't provide defaults. */
6089 		return -ENODEV;
6090 
6091 	if (protected)
6092 		/* Protected partitions are read only. */
6093 		erase_size = 0;
6094 
6095 	/* If we've already exposed a partition of this type, hide this
6096 	 * duplicate.  All operations on MTDs are keyed by the type anyway,
6097 	 * so we can't act on the duplicate.
6098 	 */
6099 	if (__test_and_set_bit(type_idx, found))
6100 		return -EEXIST;
6101 
6102 	part->nvram_type = type;
6103 
6104 	MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type);
6105 	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_METADATA, inbuf, sizeof(inbuf),
6106 			  outbuf, sizeof(outbuf), &outlen);
6107 	if (rc)
6108 		return rc;
6109 	if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN)
6110 		return -EIO;
6111 	if (MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS) &
6112 	    (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN))
6113 		part->fw_subtype = MCDI_DWORD(outbuf,
6114 					      NVRAM_METADATA_OUT_SUBTYPE);
6115 
6116 	part->common.dev_type_name = "EF10 NVRAM manager";
6117 	part->common.type_name = info->name;
6118 
6119 	part->common.mtd.type = MTD_NORFLASH;
6120 	part->common.mtd.flags = MTD_CAP_NORFLASH;
6121 	part->common.mtd.size = size;
6122 	part->common.mtd.erasesize = erase_size;
6123 	/* sfc_status is read-only */
6124 	if (!erase_size)
6125 		part->common.mtd.flags |= MTD_NO_ERASE;
6126 
6127 	return 0;
6128 }
6129 
6130 static int efx_ef10_mtd_probe(struct efx_nic *efx)
6131 {
6132 	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX);
6133 	DECLARE_BITMAP(found, EF10_NVRAM_PARTITION_COUNT) = { 0 };
6134 	struct efx_mcdi_mtd_partition *parts;
6135 	size_t outlen, n_parts_total, i, n_parts;
6136 	unsigned int type;
6137 	int rc;
6138 
6139 	ASSERT_RTNL();
6140 
6141 	BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0);
6142 	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0,
6143 			  outbuf, sizeof(outbuf), &outlen);
6144 	if (rc)
6145 		return rc;
6146 	if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN)
6147 		return -EIO;
6148 
6149 	n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS);
6150 	if (n_parts_total >
6151 	    MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID))
6152 		return -EIO;
6153 
6154 	parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL);
6155 	if (!parts)
6156 		return -ENOMEM;
6157 
6158 	n_parts = 0;
6159 	for (i = 0; i < n_parts_total; i++) {
6160 		type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID,
6161 					i);
6162 		rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type,
6163 						  found);
6164 		if (rc == -EEXIST || rc == -ENODEV)
6165 			continue;
6166 		if (rc)
6167 			goto fail;
6168 		n_parts++;
6169 	}
6170 
6171 	rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
6172 fail:
6173 	if (rc)
6174 		kfree(parts);
6175 	return rc;
6176 }
6177 
6178 #endif /* CONFIG_SFC_MTD */
6179 
6180 static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
6181 {
6182 	_efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD);
6183 }
6184 
6185 static void efx_ef10_ptp_write_host_time_vf(struct efx_nic *efx,
6186 					    u32 host_time) {}
6187 
6188 static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel,
6189 					   bool temp)
6190 {
6191 	MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN);
6192 	int rc;
6193 
6194 	if (channel->sync_events_state == SYNC_EVENTS_REQUESTED ||
6195 	    channel->sync_events_state == SYNC_EVENTS_VALID ||
6196 	    (temp && channel->sync_events_state == SYNC_EVENTS_DISABLED))
6197 		return 0;
6198 	channel->sync_events_state = SYNC_EVENTS_REQUESTED;
6199 
6200 	MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE);
6201 	MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
6202 	MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE,
6203 		       channel->channel);
6204 
6205 	rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
6206 			  inbuf, sizeof(inbuf), NULL, 0, NULL);
6207 
6208 	if (rc != 0)
6209 		channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
6210 						    SYNC_EVENTS_DISABLED;
6211 
6212 	return rc;
6213 }
6214 
6215 static int efx_ef10_rx_disable_timestamping(struct efx_channel *channel,
6216 					    bool temp)
6217 {
6218 	MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN);
6219 	int rc;
6220 
6221 	if (channel->sync_events_state == SYNC_EVENTS_DISABLED ||
6222 	    (temp && channel->sync_events_state == SYNC_EVENTS_QUIESCENT))
6223 		return 0;
6224 	if (channel->sync_events_state == SYNC_EVENTS_QUIESCENT) {
6225 		channel->sync_events_state = SYNC_EVENTS_DISABLED;
6226 		return 0;
6227 	}
6228 	channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
6229 					    SYNC_EVENTS_DISABLED;
6230 
6231 	MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE);
6232 	MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
6233 	MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL,
6234 		       MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE);
6235 	MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE,
6236 		       channel->channel);
6237 
6238 	rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
6239 			  inbuf, sizeof(inbuf), NULL, 0, NULL);
6240 
6241 	return rc;
6242 }
6243 
6244 static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en,
6245 					   bool temp)
6246 {
6247 	int (*set)(struct efx_channel *channel, bool temp);
6248 	struct efx_channel *channel;
6249 
6250 	set = en ?
6251 	      efx_ef10_rx_enable_timestamping :
6252 	      efx_ef10_rx_disable_timestamping;
6253 
6254 	channel = efx_ptp_channel(efx);
6255 	if (channel) {
6256 		int rc = set(channel, temp);
6257 		if (en && rc != 0) {
6258 			efx_ef10_ptp_set_ts_sync_events(efx, false, temp);
6259 			return rc;
6260 		}
6261 	}
6262 
6263 	return 0;
6264 }
6265 
6266 static int efx_ef10_ptp_set_ts_config_vf(struct efx_nic *efx,
6267 					 struct hwtstamp_config *init)
6268 {
6269 	return -EOPNOTSUPP;
6270 }
6271 
6272 static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
6273 				      struct hwtstamp_config *init)
6274 {
6275 	int rc;
6276 
6277 	switch (init->rx_filter) {
6278 	case HWTSTAMP_FILTER_NONE:
6279 		efx_ef10_ptp_set_ts_sync_events(efx, false, false);
6280 		/* if TX timestamping is still requested then leave PTP on */
6281 		return efx_ptp_change_mode(efx,
6282 					   init->tx_type != HWTSTAMP_TX_OFF, 0);
6283 	case HWTSTAMP_FILTER_ALL:
6284 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
6285 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
6286 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
6287 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
6288 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
6289 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
6290 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
6291 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
6292 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
6293 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
6294 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
6295 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
6296 	case HWTSTAMP_FILTER_NTP_ALL:
6297 		init->rx_filter = HWTSTAMP_FILTER_ALL;
6298 		rc = efx_ptp_change_mode(efx, true, 0);
6299 		if (!rc)
6300 			rc = efx_ef10_ptp_set_ts_sync_events(efx, true, false);
6301 		if (rc)
6302 			efx_ptp_change_mode(efx, false, 0);
6303 		return rc;
6304 	default:
6305 		return -ERANGE;
6306 	}
6307 }
6308 
6309 static int efx_ef10_get_phys_port_id(struct efx_nic *efx,
6310 				     struct netdev_phys_item_id *ppid)
6311 {
6312 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
6313 
6314 	if (!is_valid_ether_addr(nic_data->port_id))
6315 		return -EOPNOTSUPP;
6316 
6317 	ppid->id_len = ETH_ALEN;
6318 	memcpy(ppid->id, nic_data->port_id, ppid->id_len);
6319 
6320 	return 0;
6321 }
6322 
6323 static int efx_ef10_vlan_rx_add_vid(struct efx_nic *efx, __be16 proto, u16 vid)
6324 {
6325 	if (proto != htons(ETH_P_8021Q))
6326 		return -EINVAL;
6327 
6328 	return efx_ef10_add_vlan(efx, vid);
6329 }
6330 
6331 static int efx_ef10_vlan_rx_kill_vid(struct efx_nic *efx, __be16 proto, u16 vid)
6332 {
6333 	if (proto != htons(ETH_P_8021Q))
6334 		return -EINVAL;
6335 
6336 	return efx_ef10_del_vlan(efx, vid);
6337 }
6338 
6339 /* We rely on the MCDI wiping out our TX rings if it made any changes to the
6340  * ports table, ensuring that any TSO descriptors that were made on a now-
6341  * removed tunnel port will be blown away and won't break things when we try
6342  * to transmit them using the new ports table.
6343  */
6344 static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading)
6345 {
6346 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
6347 	MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX);
6348 	MCDI_DECLARE_BUF(outbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN);
6349 	bool will_reset = false;
6350 	size_t num_entries = 0;
6351 	size_t inlen, outlen;
6352 	size_t i;
6353 	int rc;
6354 	efx_dword_t flags_and_num_entries;
6355 
6356 	WARN_ON(!mutex_is_locked(&nic_data->udp_tunnels_lock));
6357 
6358 	nic_data->udp_tunnels_dirty = false;
6359 
6360 	if (!(nic_data->datapath_caps &
6361 	    (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) {
6362 		efx_device_attach_if_not_resetting(efx);
6363 		return 0;
6364 	}
6365 
6366 	BUILD_BUG_ON(ARRAY_SIZE(nic_data->udp_tunnels) >
6367 		     MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM);
6368 
6369 	for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) {
6370 		if (nic_data->udp_tunnels[i].count &&
6371 		    nic_data->udp_tunnels[i].port) {
6372 			efx_dword_t entry;
6373 
6374 			EFX_POPULATE_DWORD_2(entry,
6375 				TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT,
6376 					ntohs(nic_data->udp_tunnels[i].port),
6377 				TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL,
6378 					nic_data->udp_tunnels[i].type);
6379 			*_MCDI_ARRAY_DWORD(inbuf,
6380 				SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES,
6381 				num_entries++) = entry;
6382 		}
6383 	}
6384 
6385 	BUILD_BUG_ON((MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_OFST -
6386 		      MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST) * 8 !=
6387 		     EFX_WORD_1_LBN);
6388 	BUILD_BUG_ON(MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_LEN * 8 !=
6389 		     EFX_WORD_1_WIDTH);
6390 	EFX_POPULATE_DWORD_2(flags_and_num_entries,
6391 			     MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING,
6392 				!!unloading,
6393 			     EFX_WORD_1, num_entries);
6394 	*_MCDI_DWORD(inbuf, SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS) =
6395 		flags_and_num_entries;
6396 
6397 	inlen = MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num_entries);
6398 
6399 	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS,
6400 				inbuf, inlen, outbuf, sizeof(outbuf), &outlen);
6401 	if (rc == -EIO) {
6402 		/* Most likely the MC rebooted due to another function also
6403 		 * setting its tunnel port list. Mark the tunnel port list as
6404 		 * dirty, so it will be pushed upon coming up from the reboot.
6405 		 */
6406 		nic_data->udp_tunnels_dirty = true;
6407 		return 0;
6408 	}
6409 
6410 	if (rc) {
6411 		/* expected not available on unprivileged functions */
6412 		if (rc != -EPERM)
6413 			netif_warn(efx, drv, efx->net_dev,
6414 				   "Unable to set UDP tunnel ports; rc=%d.\n", rc);
6415 	} else if (MCDI_DWORD(outbuf, SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS) &
6416 		   (1 << MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN)) {
6417 		netif_info(efx, drv, efx->net_dev,
6418 			   "Rebooting MC due to UDP tunnel port list change\n");
6419 		will_reset = true;
6420 		if (unloading)
6421 			/* Delay for the MC reset to complete. This will make
6422 			 * unloading other functions a bit smoother. This is a
6423 			 * race, but the other unload will work whichever way
6424 			 * it goes, this just avoids an unnecessary error
6425 			 * message.
6426 			 */
6427 			msleep(100);
6428 	}
6429 	if (!will_reset && !unloading) {
6430 		/* The caller will have detached, relying on the MC reset to
6431 		 * trigger a re-attach.  Since there won't be an MC reset, we
6432 		 * have to do the attach ourselves.
6433 		 */
6434 		efx_device_attach_if_not_resetting(efx);
6435 	}
6436 
6437 	return rc;
6438 }
6439 
6440 static int efx_ef10_udp_tnl_push_ports(struct efx_nic *efx)
6441 {
6442 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
6443 	int rc = 0;
6444 
6445 	mutex_lock(&nic_data->udp_tunnels_lock);
6446 	if (nic_data->udp_tunnels_dirty) {
6447 		/* Make sure all TX are stopped while we modify the table, else
6448 		 * we might race against an efx_features_check().
6449 		 */
6450 		efx_device_detach_sync(efx);
6451 		rc = efx_ef10_set_udp_tnl_ports(efx, false);
6452 	}
6453 	mutex_unlock(&nic_data->udp_tunnels_lock);
6454 	return rc;
6455 }
6456 
6457 static struct efx_udp_tunnel *__efx_ef10_udp_tnl_lookup_port(struct efx_nic *efx,
6458 							     __be16 port)
6459 {
6460 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
6461 	size_t i;
6462 
6463 	for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) {
6464 		if (!nic_data->udp_tunnels[i].count)
6465 			continue;
6466 		if (nic_data->udp_tunnels[i].port == port)
6467 			return &nic_data->udp_tunnels[i];
6468 	}
6469 	return NULL;
6470 }
6471 
6472 static int efx_ef10_udp_tnl_add_port(struct efx_nic *efx,
6473 				     struct efx_udp_tunnel tnl)
6474 {
6475 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
6476 	struct efx_udp_tunnel *match;
6477 	char typebuf[8];
6478 	size_t i;
6479 	int rc;
6480 
6481 	if (!(nic_data->datapath_caps &
6482 	      (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)))
6483 		return 0;
6484 
6485 	efx_get_udp_tunnel_type_name(tnl.type, typebuf, sizeof(typebuf));
6486 	netif_dbg(efx, drv, efx->net_dev, "Adding UDP tunnel (%s) port %d\n",
6487 		  typebuf, ntohs(tnl.port));
6488 
6489 	mutex_lock(&nic_data->udp_tunnels_lock);
6490 	/* Make sure all TX are stopped while we add to the table, else we
6491 	 * might race against an efx_features_check().
6492 	 */
6493 	efx_device_detach_sync(efx);
6494 
6495 	match = __efx_ef10_udp_tnl_lookup_port(efx, tnl.port);
6496 	if (match != NULL) {
6497 		if (match->type == tnl.type) {
6498 			netif_dbg(efx, drv, efx->net_dev,
6499 				  "Referencing existing tunnel entry\n");
6500 			match->count++;
6501 			/* No need to cause an MCDI update */
6502 			rc = 0;
6503 			goto unlock_out;
6504 		}
6505 		efx_get_udp_tunnel_type_name(match->type,
6506 					     typebuf, sizeof(typebuf));
6507 		netif_dbg(efx, drv, efx->net_dev,
6508 			  "UDP port %d is already in use by %s\n",
6509 			  ntohs(tnl.port), typebuf);
6510 		rc = -EEXIST;
6511 		goto unlock_out;
6512 	}
6513 
6514 	for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i)
6515 		if (!nic_data->udp_tunnels[i].count) {
6516 			nic_data->udp_tunnels[i] = tnl;
6517 			nic_data->udp_tunnels[i].count = 1;
6518 			rc = efx_ef10_set_udp_tnl_ports(efx, false);
6519 			goto unlock_out;
6520 		}
6521 
6522 	netif_dbg(efx, drv, efx->net_dev,
6523 		  "Unable to add UDP tunnel (%s) port %d; insufficient resources.\n",
6524 		  typebuf, ntohs(tnl.port));
6525 
6526 	rc = -ENOMEM;
6527 
6528 unlock_out:
6529 	mutex_unlock(&nic_data->udp_tunnels_lock);
6530 	return rc;
6531 }
6532 
6533 /* Called under the TX lock with the TX queue running, hence no-one can be
6534  * in the middle of updating the UDP tunnels table.  However, they could
6535  * have tried and failed the MCDI, in which case they'll have set the dirty
6536  * flag before dropping their locks.
6537  */
6538 static bool efx_ef10_udp_tnl_has_port(struct efx_nic *efx, __be16 port)
6539 {
6540 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
6541 
6542 	if (!(nic_data->datapath_caps &
6543 	      (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)))
6544 		return false;
6545 
6546 	if (nic_data->udp_tunnels_dirty)
6547 		/* SW table may not match HW state, so just assume we can't
6548 		 * use any UDP tunnel offloads.
6549 		 */
6550 		return false;
6551 
6552 	return __efx_ef10_udp_tnl_lookup_port(efx, port) != NULL;
6553 }
6554 
6555 static int efx_ef10_udp_tnl_del_port(struct efx_nic *efx,
6556 				     struct efx_udp_tunnel tnl)
6557 {
6558 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
6559 	struct efx_udp_tunnel *match;
6560 	char typebuf[8];
6561 	int rc;
6562 
6563 	if (!(nic_data->datapath_caps &
6564 	      (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)))
6565 		return 0;
6566 
6567 	efx_get_udp_tunnel_type_name(tnl.type, typebuf, sizeof(typebuf));
6568 	netif_dbg(efx, drv, efx->net_dev, "Removing UDP tunnel (%s) port %d\n",
6569 		  typebuf, ntohs(tnl.port));
6570 
6571 	mutex_lock(&nic_data->udp_tunnels_lock);
6572 	/* Make sure all TX are stopped while we remove from the table, else we
6573 	 * might race against an efx_features_check().
6574 	 */
6575 	efx_device_detach_sync(efx);
6576 
6577 	match = __efx_ef10_udp_tnl_lookup_port(efx, tnl.port);
6578 	if (match != NULL) {
6579 		if (match->type == tnl.type) {
6580 			if (--match->count) {
6581 				/* Port is still in use, so nothing to do */
6582 				netif_dbg(efx, drv, efx->net_dev,
6583 					  "UDP tunnel port %d remains active\n",
6584 					  ntohs(tnl.port));
6585 				rc = 0;
6586 				goto out_unlock;
6587 			}
6588 			rc = efx_ef10_set_udp_tnl_ports(efx, false);
6589 			goto out_unlock;
6590 		}
6591 		efx_get_udp_tunnel_type_name(match->type,
6592 					     typebuf, sizeof(typebuf));
6593 		netif_warn(efx, drv, efx->net_dev,
6594 			   "UDP port %d is actually in use by %s, not removing\n",
6595 			   ntohs(tnl.port), typebuf);
6596 	}
6597 	rc = -ENOENT;
6598 
6599 out_unlock:
6600 	mutex_unlock(&nic_data->udp_tunnels_lock);
6601 	return rc;
6602 }
6603 
6604 #define EF10_OFFLOAD_FEATURES		\
6605 	(NETIF_F_IP_CSUM |		\
6606 	 NETIF_F_HW_VLAN_CTAG_FILTER |	\
6607 	 NETIF_F_IPV6_CSUM |		\
6608 	 NETIF_F_RXHASH |		\
6609 	 NETIF_F_NTUPLE)
6610 
6611 const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
6612 	.is_vf = true,
6613 	.mem_bar = efx_ef10_vf_mem_bar,
6614 	.mem_map_size = efx_ef10_mem_map_size,
6615 	.probe = efx_ef10_probe_vf,
6616 	.remove = efx_ef10_remove,
6617 	.dimension_resources = efx_ef10_dimension_resources,
6618 	.init = efx_ef10_init_nic,
6619 	.fini = efx_port_dummy_op_void,
6620 	.map_reset_reason = efx_ef10_map_reset_reason,
6621 	.map_reset_flags = efx_ef10_map_reset_flags,
6622 	.reset = efx_ef10_reset,
6623 	.probe_port = efx_mcdi_port_probe,
6624 	.remove_port = efx_mcdi_port_remove,
6625 	.fini_dmaq = efx_ef10_fini_dmaq,
6626 	.prepare_flr = efx_ef10_prepare_flr,
6627 	.finish_flr = efx_port_dummy_op_void,
6628 	.describe_stats = efx_ef10_describe_stats,
6629 	.update_stats = efx_ef10_update_stats_vf,
6630 	.start_stats = efx_port_dummy_op_void,
6631 	.pull_stats = efx_port_dummy_op_void,
6632 	.stop_stats = efx_port_dummy_op_void,
6633 	.set_id_led = efx_mcdi_set_id_led,
6634 	.push_irq_moderation = efx_ef10_push_irq_moderation,
6635 	.reconfigure_mac = efx_ef10_mac_reconfigure_vf,
6636 	.check_mac_fault = efx_mcdi_mac_check_fault,
6637 	.reconfigure_port = efx_mcdi_port_reconfigure,
6638 	.get_wol = efx_ef10_get_wol_vf,
6639 	.set_wol = efx_ef10_set_wol_vf,
6640 	.resume_wol = efx_port_dummy_op_void,
6641 	.mcdi_request = efx_ef10_mcdi_request,
6642 	.mcdi_poll_response = efx_ef10_mcdi_poll_response,
6643 	.mcdi_read_response = efx_ef10_mcdi_read_response,
6644 	.mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
6645 	.mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected,
6646 	.irq_enable_master = efx_port_dummy_op_void,
6647 	.irq_test_generate = efx_ef10_irq_test_generate,
6648 	.irq_disable_non_ev = efx_port_dummy_op_void,
6649 	.irq_handle_msi = efx_ef10_msi_interrupt,
6650 	.irq_handle_legacy = efx_ef10_legacy_interrupt,
6651 	.tx_probe = efx_ef10_tx_probe,
6652 	.tx_init = efx_ef10_tx_init,
6653 	.tx_remove = efx_ef10_tx_remove,
6654 	.tx_write = efx_ef10_tx_write,
6655 	.tx_limit_len = efx_ef10_tx_limit_len,
6656 	.rx_push_rss_config = efx_ef10_vf_rx_push_rss_config,
6657 	.rx_pull_rss_config = efx_ef10_rx_pull_rss_config,
6658 	.rx_probe = efx_ef10_rx_probe,
6659 	.rx_init = efx_ef10_rx_init,
6660 	.rx_remove = efx_ef10_rx_remove,
6661 	.rx_write = efx_ef10_rx_write,
6662 	.rx_defer_refill = efx_ef10_rx_defer_refill,
6663 	.ev_probe = efx_ef10_ev_probe,
6664 	.ev_init = efx_ef10_ev_init,
6665 	.ev_fini = efx_ef10_ev_fini,
6666 	.ev_remove = efx_ef10_ev_remove,
6667 	.ev_process = efx_ef10_ev_process,
6668 	.ev_read_ack = efx_ef10_ev_read_ack,
6669 	.ev_test_generate = efx_ef10_ev_test_generate,
6670 	.filter_table_probe = efx_ef10_filter_table_probe,
6671 	.filter_table_restore = efx_ef10_filter_table_restore,
6672 	.filter_table_remove = efx_ef10_filter_table_remove,
6673 	.filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
6674 	.filter_insert = efx_ef10_filter_insert,
6675 	.filter_remove_safe = efx_ef10_filter_remove_safe,
6676 	.filter_get_safe = efx_ef10_filter_get_safe,
6677 	.filter_clear_rx = efx_ef10_filter_clear_rx,
6678 	.filter_count_rx_used = efx_ef10_filter_count_rx_used,
6679 	.filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
6680 	.filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
6681 #ifdef CONFIG_RFS_ACCEL
6682 	.filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
6683 #endif
6684 #ifdef CONFIG_SFC_MTD
6685 	.mtd_probe = efx_port_dummy_op_int,
6686 #endif
6687 	.ptp_write_host_time = efx_ef10_ptp_write_host_time_vf,
6688 	.ptp_set_ts_config = efx_ef10_ptp_set_ts_config_vf,
6689 	.vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid,
6690 	.vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid,
6691 #ifdef CONFIG_SFC_SRIOV
6692 	.vswitching_probe = efx_ef10_vswitching_probe_vf,
6693 	.vswitching_restore = efx_ef10_vswitching_restore_vf,
6694 	.vswitching_remove = efx_ef10_vswitching_remove_vf,
6695 #endif
6696 	.get_mac_address = efx_ef10_get_mac_address_vf,
6697 	.set_mac_address = efx_ef10_set_mac_address,
6698 
6699 	.get_phys_port_id = efx_ef10_get_phys_port_id,
6700 	.revision = EFX_REV_HUNT_A0,
6701 	.max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
6702 	.rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
6703 	.rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
6704 	.rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
6705 	.can_rx_scatter = true,
6706 	.always_rx_scatter = true,
6707 	.min_interrupt_mode = EFX_INT_MODE_MSIX,
6708 	.max_interrupt_mode = EFX_INT_MODE_MSIX,
6709 	.timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
6710 	.offload_features = EF10_OFFLOAD_FEATURES,
6711 	.mcdi_max_ver = 2,
6712 	.max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
6713 	.hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
6714 			    1 << HWTSTAMP_FILTER_ALL,
6715 	.rx_hash_key_size = 40,
6716 };
6717 
6718 const struct efx_nic_type efx_hunt_a0_nic_type = {
6719 	.is_vf = false,
6720 	.mem_bar = efx_ef10_pf_mem_bar,
6721 	.mem_map_size = efx_ef10_mem_map_size,
6722 	.probe = efx_ef10_probe_pf,
6723 	.remove = efx_ef10_remove,
6724 	.dimension_resources = efx_ef10_dimension_resources,
6725 	.init = efx_ef10_init_nic,
6726 	.fini = efx_port_dummy_op_void,
6727 	.map_reset_reason = efx_ef10_map_reset_reason,
6728 	.map_reset_flags = efx_ef10_map_reset_flags,
6729 	.reset = efx_ef10_reset,
6730 	.probe_port = efx_mcdi_port_probe,
6731 	.remove_port = efx_mcdi_port_remove,
6732 	.fini_dmaq = efx_ef10_fini_dmaq,
6733 	.prepare_flr = efx_ef10_prepare_flr,
6734 	.finish_flr = efx_port_dummy_op_void,
6735 	.describe_stats = efx_ef10_describe_stats,
6736 	.update_stats = efx_ef10_update_stats_pf,
6737 	.start_stats = efx_mcdi_mac_start_stats,
6738 	.pull_stats = efx_mcdi_mac_pull_stats,
6739 	.stop_stats = efx_mcdi_mac_stop_stats,
6740 	.set_id_led = efx_mcdi_set_id_led,
6741 	.push_irq_moderation = efx_ef10_push_irq_moderation,
6742 	.reconfigure_mac = efx_ef10_mac_reconfigure,
6743 	.check_mac_fault = efx_mcdi_mac_check_fault,
6744 	.reconfigure_port = efx_mcdi_port_reconfigure,
6745 	.get_wol = efx_ef10_get_wol,
6746 	.set_wol = efx_ef10_set_wol,
6747 	.resume_wol = efx_port_dummy_op_void,
6748 	.test_chip = efx_ef10_test_chip,
6749 	.test_nvram = efx_mcdi_nvram_test_all,
6750 	.mcdi_request = efx_ef10_mcdi_request,
6751 	.mcdi_poll_response = efx_ef10_mcdi_poll_response,
6752 	.mcdi_read_response = efx_ef10_mcdi_read_response,
6753 	.mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
6754 	.mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected,
6755 	.irq_enable_master = efx_port_dummy_op_void,
6756 	.irq_test_generate = efx_ef10_irq_test_generate,
6757 	.irq_disable_non_ev = efx_port_dummy_op_void,
6758 	.irq_handle_msi = efx_ef10_msi_interrupt,
6759 	.irq_handle_legacy = efx_ef10_legacy_interrupt,
6760 	.tx_probe = efx_ef10_tx_probe,
6761 	.tx_init = efx_ef10_tx_init,
6762 	.tx_remove = efx_ef10_tx_remove,
6763 	.tx_write = efx_ef10_tx_write,
6764 	.tx_limit_len = efx_ef10_tx_limit_len,
6765 	.rx_push_rss_config = efx_ef10_pf_rx_push_rss_config,
6766 	.rx_pull_rss_config = efx_ef10_rx_pull_rss_config,
6767 	.rx_push_rss_context_config = efx_ef10_rx_push_rss_context_config,
6768 	.rx_pull_rss_context_config = efx_ef10_rx_pull_rss_context_config,
6769 	.rx_restore_rss_contexts = efx_ef10_rx_restore_rss_contexts,
6770 	.rx_probe = efx_ef10_rx_probe,
6771 	.rx_init = efx_ef10_rx_init,
6772 	.rx_remove = efx_ef10_rx_remove,
6773 	.rx_write = efx_ef10_rx_write,
6774 	.rx_defer_refill = efx_ef10_rx_defer_refill,
6775 	.ev_probe = efx_ef10_ev_probe,
6776 	.ev_init = efx_ef10_ev_init,
6777 	.ev_fini = efx_ef10_ev_fini,
6778 	.ev_remove = efx_ef10_ev_remove,
6779 	.ev_process = efx_ef10_ev_process,
6780 	.ev_read_ack = efx_ef10_ev_read_ack,
6781 	.ev_test_generate = efx_ef10_ev_test_generate,
6782 	.filter_table_probe = efx_ef10_filter_table_probe,
6783 	.filter_table_restore = efx_ef10_filter_table_restore,
6784 	.filter_table_remove = efx_ef10_filter_table_remove,
6785 	.filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
6786 	.filter_insert = efx_ef10_filter_insert,
6787 	.filter_remove_safe = efx_ef10_filter_remove_safe,
6788 	.filter_get_safe = efx_ef10_filter_get_safe,
6789 	.filter_clear_rx = efx_ef10_filter_clear_rx,
6790 	.filter_count_rx_used = efx_ef10_filter_count_rx_used,
6791 	.filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
6792 	.filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
6793 #ifdef CONFIG_RFS_ACCEL
6794 	.filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
6795 #endif
6796 #ifdef CONFIG_SFC_MTD
6797 	.mtd_probe = efx_ef10_mtd_probe,
6798 	.mtd_rename = efx_mcdi_mtd_rename,
6799 	.mtd_read = efx_mcdi_mtd_read,
6800 	.mtd_erase = efx_mcdi_mtd_erase,
6801 	.mtd_write = efx_mcdi_mtd_write,
6802 	.mtd_sync = efx_mcdi_mtd_sync,
6803 #endif
6804 	.ptp_write_host_time = efx_ef10_ptp_write_host_time,
6805 	.ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events,
6806 	.ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
6807 	.vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid,
6808 	.vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid,
6809 	.udp_tnl_push_ports = efx_ef10_udp_tnl_push_ports,
6810 	.udp_tnl_add_port = efx_ef10_udp_tnl_add_port,
6811 	.udp_tnl_has_port = efx_ef10_udp_tnl_has_port,
6812 	.udp_tnl_del_port = efx_ef10_udp_tnl_del_port,
6813 #ifdef CONFIG_SFC_SRIOV
6814 	.sriov_configure = efx_ef10_sriov_configure,
6815 	.sriov_init = efx_ef10_sriov_init,
6816 	.sriov_fini = efx_ef10_sriov_fini,
6817 	.sriov_wanted = efx_ef10_sriov_wanted,
6818 	.sriov_reset = efx_ef10_sriov_reset,
6819 	.sriov_flr = efx_ef10_sriov_flr,
6820 	.sriov_set_vf_mac = efx_ef10_sriov_set_vf_mac,
6821 	.sriov_set_vf_vlan = efx_ef10_sriov_set_vf_vlan,
6822 	.sriov_set_vf_spoofchk = efx_ef10_sriov_set_vf_spoofchk,
6823 	.sriov_get_vf_config = efx_ef10_sriov_get_vf_config,
6824 	.sriov_set_vf_link_state = efx_ef10_sriov_set_vf_link_state,
6825 	.vswitching_probe = efx_ef10_vswitching_probe_pf,
6826 	.vswitching_restore = efx_ef10_vswitching_restore_pf,
6827 	.vswitching_remove = efx_ef10_vswitching_remove_pf,
6828 #endif
6829 	.get_mac_address = efx_ef10_get_mac_address_pf,
6830 	.set_mac_address = efx_ef10_set_mac_address,
6831 	.tso_versions = efx_ef10_tso_versions,
6832 
6833 	.get_phys_port_id = efx_ef10_get_phys_port_id,
6834 	.revision = EFX_REV_HUNT_A0,
6835 	.max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
6836 	.rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
6837 	.rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
6838 	.rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
6839 	.can_rx_scatter = true,
6840 	.always_rx_scatter = true,
6841 	.option_descriptors = true,
6842 	.min_interrupt_mode = EFX_INT_MODE_LEGACY,
6843 	.max_interrupt_mode = EFX_INT_MODE_MSIX,
6844 	.timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
6845 	.offload_features = EF10_OFFLOAD_FEATURES,
6846 	.mcdi_max_ver = 2,
6847 	.max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
6848 	.hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
6849 			    1 << HWTSTAMP_FILTER_ALL,
6850 	.rx_hash_key_size = 40,
6851 };
6852