xref: /linux/drivers/net/ethernet/sfc/ef10.c (revision 1f2367a39f17bd553a75e179a747f9b257bc9478)
1 /****************************************************************************
2  * Driver for Solarflare network controllers and boards
3  * Copyright 2012-2013 Solarflare Communications Inc.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published
7  * by the Free Software Foundation, incorporated herein by reference.
8  */
9 
10 #include "net_driver.h"
11 #include "ef10_regs.h"
12 #include "io.h"
13 #include "mcdi.h"
14 #include "mcdi_pcol.h"
15 #include "nic.h"
16 #include "workarounds.h"
17 #include "selftest.h"
18 #include "ef10_sriov.h"
19 #include <linux/in.h>
20 #include <linux/jhash.h>
21 #include <linux/wait.h>
22 #include <linux/workqueue.h>
23 
24 /* Hardware control for EF10 architecture including 'Huntington'. */
25 
26 #define EFX_EF10_DRVGEN_EV		7
27 enum {
28 	EFX_EF10_TEST = 1,
29 	EFX_EF10_REFILL,
30 };
31 /* The maximum size of a shared RSS context */
32 /* TODO: this should really be from the mcdi protocol export */
33 #define EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE 64UL
34 
35 /* The filter table(s) are managed by firmware and we have write-only
36  * access.  When removing filters we must identify them to the
37  * firmware by a 64-bit handle, but this is too wide for Linux kernel
38  * interfaces (32-bit for RX NFC, 16-bit for RFS).  Also, we need to
39  * be able to tell in advance whether a requested insertion will
40  * replace an existing filter.  Therefore we maintain a software hash
41  * table, which should be at least as large as the hardware hash
42  * table.
43  *
44  * Huntington has a single 8K filter table shared between all filter
45  * types and both ports.
46  */
47 #define HUNT_FILTER_TBL_ROWS 8192
48 
49 #define EFX_EF10_FILTER_ID_INVALID 0xffff
50 
51 #define EFX_EF10_FILTER_DEV_UC_MAX	32
52 #define EFX_EF10_FILTER_DEV_MC_MAX	256
53 
54 /* VLAN list entry */
55 struct efx_ef10_vlan {
56 	struct list_head list;
57 	u16 vid;
58 };
59 
60 enum efx_ef10_default_filters {
61 	EFX_EF10_BCAST,
62 	EFX_EF10_UCDEF,
63 	EFX_EF10_MCDEF,
64 	EFX_EF10_VXLAN4_UCDEF,
65 	EFX_EF10_VXLAN4_MCDEF,
66 	EFX_EF10_VXLAN6_UCDEF,
67 	EFX_EF10_VXLAN6_MCDEF,
68 	EFX_EF10_NVGRE4_UCDEF,
69 	EFX_EF10_NVGRE4_MCDEF,
70 	EFX_EF10_NVGRE6_UCDEF,
71 	EFX_EF10_NVGRE6_MCDEF,
72 	EFX_EF10_GENEVE4_UCDEF,
73 	EFX_EF10_GENEVE4_MCDEF,
74 	EFX_EF10_GENEVE6_UCDEF,
75 	EFX_EF10_GENEVE6_MCDEF,
76 
77 	EFX_EF10_NUM_DEFAULT_FILTERS
78 };
79 
80 /* Per-VLAN filters information */
81 struct efx_ef10_filter_vlan {
82 	struct list_head list;
83 	u16 vid;
84 	u16 uc[EFX_EF10_FILTER_DEV_UC_MAX];
85 	u16 mc[EFX_EF10_FILTER_DEV_MC_MAX];
86 	u16 default_filters[EFX_EF10_NUM_DEFAULT_FILTERS];
87 };
88 
89 struct efx_ef10_dev_addr {
90 	u8 addr[ETH_ALEN];
91 };
92 
93 struct efx_ef10_filter_table {
94 /* The MCDI match masks supported by this fw & hw, in order of priority */
95 	u32 rx_match_mcdi_flags[
96 		MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM * 2];
97 	unsigned int rx_match_count;
98 
99 	struct rw_semaphore lock; /* Protects entries */
100 	struct {
101 		unsigned long spec;	/* pointer to spec plus flag bits */
102 /* AUTO_OLD is used to mark and sweep MAC filters for the device address lists. */
103 /* unused flag	1UL */
104 #define EFX_EF10_FILTER_FLAG_AUTO_OLD	2UL
105 #define EFX_EF10_FILTER_FLAGS		3UL
106 		u64 handle;		/* firmware handle */
107 	} *entry;
108 /* Shadow of net_device address lists, guarded by mac_lock */
109 	struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX];
110 	struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
111 	int dev_uc_count;
112 	int dev_mc_count;
113 	bool uc_promisc;
114 	bool mc_promisc;
115 /* Whether in multicast promiscuous mode when last changed */
116 	bool mc_promisc_last;
117 	bool mc_overflow; /* Too many MC addrs; should always imply mc_promisc */
118 	bool vlan_filter;
119 	struct list_head vlan_list;
120 };
121 
122 /* An arbitrary search limit for the software hash table */
123 #define EFX_EF10_FILTER_SEARCH_LIMIT 200
124 
125 static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
126 static void efx_ef10_filter_table_remove(struct efx_nic *efx);
127 static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid);
128 static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx,
129 					      struct efx_ef10_filter_vlan *vlan);
130 static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid);
131 static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading);
132 
133 static u32 efx_ef10_filter_get_unsafe_id(u32 filter_id)
134 {
135 	WARN_ON_ONCE(filter_id == EFX_EF10_FILTER_ID_INVALID);
136 	return filter_id & (HUNT_FILTER_TBL_ROWS - 1);
137 }
138 
139 static unsigned int efx_ef10_filter_get_unsafe_pri(u32 filter_id)
140 {
141 	return filter_id / (HUNT_FILTER_TBL_ROWS * 2);
142 }
143 
144 static u32 efx_ef10_make_filter_id(unsigned int pri, u16 idx)
145 {
146 	return pri * HUNT_FILTER_TBL_ROWS * 2 + idx;
147 }
148 
149 static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
150 {
151 	efx_dword_t reg;
152 
153 	efx_readd(efx, &reg, ER_DZ_BIU_MC_SFT_STATUS);
154 	return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ?
155 		EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO;
156 }
157 
158 /* On all EF10s up to and including SFC9220 (Medford1), all PFs use BAR 0 for
159  * I/O space and BAR 2(&3) for memory.  On SFC9250 (Medford2), there is no I/O
160  * bar; PFs use BAR 0/1 for memory.
161  */
162 static unsigned int efx_ef10_pf_mem_bar(struct efx_nic *efx)
163 {
164 	switch (efx->pci_dev->device) {
165 	case 0x0b03: /* SFC9250 PF */
166 		return 0;
167 	default:
168 		return 2;
169 	}
170 }
171 
172 /* All VFs use BAR 0/1 for memory */
173 static unsigned int efx_ef10_vf_mem_bar(struct efx_nic *efx)
174 {
175 	return 0;
176 }
177 
178 static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
179 {
180 	int bar;
181 
182 	bar = efx->type->mem_bar(efx);
183 	return resource_size(&efx->pci_dev->resource[bar]);
184 }
185 
186 static bool efx_ef10_is_vf(struct efx_nic *efx)
187 {
188 	return efx->type->is_vf;
189 }
190 
191 static int efx_ef10_get_pf_index(struct efx_nic *efx)
192 {
193 	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
194 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
195 	size_t outlen;
196 	int rc;
197 
198 	rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
199 			  sizeof(outbuf), &outlen);
200 	if (rc)
201 		return rc;
202 	if (outlen < sizeof(outbuf))
203 		return -EIO;
204 
205 	nic_data->pf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_PF);
206 	return 0;
207 }
208 
209 #ifdef CONFIG_SFC_SRIOV
210 static int efx_ef10_get_vf_index(struct efx_nic *efx)
211 {
212 	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
213 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
214 	size_t outlen;
215 	int rc;
216 
217 	rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
218 			  sizeof(outbuf), &outlen);
219 	if (rc)
220 		return rc;
221 	if (outlen < sizeof(outbuf))
222 		return -EIO;
223 
224 	nic_data->vf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_VF);
225 	return 0;
226 }
227 #endif
228 
229 static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
230 {
231 	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V4_OUT_LEN);
232 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
233 	size_t outlen;
234 	int rc;
235 
236 	BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
237 
238 	rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
239 			  outbuf, sizeof(outbuf), &outlen);
240 	if (rc)
241 		return rc;
242 	if (outlen < MC_CMD_GET_CAPABILITIES_OUT_LEN) {
243 		netif_err(efx, drv, efx->net_dev,
244 			  "unable to read datapath firmware capabilities\n");
245 		return -EIO;
246 	}
247 
248 	nic_data->datapath_caps =
249 		MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
250 
251 	if (outlen >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) {
252 		nic_data->datapath_caps2 = MCDI_DWORD(outbuf,
253 				GET_CAPABILITIES_V2_OUT_FLAGS2);
254 		nic_data->piobuf_size = MCDI_WORD(outbuf,
255 				GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF);
256 	} else {
257 		nic_data->datapath_caps2 = 0;
258 		nic_data->piobuf_size = ER_DZ_TX_PIOBUF_SIZE;
259 	}
260 
261 	/* record the DPCPU firmware IDs to determine VEB vswitching support.
262 	 */
263 	nic_data->rx_dpcpu_fw_id =
264 		MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID);
265 	nic_data->tx_dpcpu_fw_id =
266 		MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID);
267 
268 	if (!(nic_data->datapath_caps &
269 	      (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) {
270 		netif_err(efx, probe, efx->net_dev,
271 			  "current firmware does not support an RX prefix\n");
272 		return -ENODEV;
273 	}
274 
275 	if (outlen >= MC_CMD_GET_CAPABILITIES_V3_OUT_LEN) {
276 		u8 vi_window_mode = MCDI_BYTE(outbuf,
277 				GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE);
278 
279 		switch (vi_window_mode) {
280 		case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K:
281 			efx->vi_stride = 8192;
282 			break;
283 		case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K:
284 			efx->vi_stride = 16384;
285 			break;
286 		case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K:
287 			efx->vi_stride = 65536;
288 			break;
289 		default:
290 			netif_err(efx, probe, efx->net_dev,
291 				  "Unrecognised VI window mode %d\n",
292 				  vi_window_mode);
293 			return -EIO;
294 		}
295 		netif_dbg(efx, probe, efx->net_dev, "vi_stride = %u\n",
296 			  efx->vi_stride);
297 	} else {
298 		/* keep default VI stride */
299 		netif_dbg(efx, probe, efx->net_dev,
300 			  "firmware did not report VI window mode, assuming vi_stride = %u\n",
301 			  efx->vi_stride);
302 	}
303 
304 	if (outlen >= MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) {
305 		efx->num_mac_stats = MCDI_WORD(outbuf,
306 				GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS);
307 		netif_dbg(efx, probe, efx->net_dev,
308 			  "firmware reports num_mac_stats = %u\n",
309 			  efx->num_mac_stats);
310 	} else {
311 		/* leave num_mac_stats as the default value, MC_CMD_MAC_NSTATS */
312 		netif_dbg(efx, probe, efx->net_dev,
313 			  "firmware did not report num_mac_stats, assuming %u\n",
314 			  efx->num_mac_stats);
315 	}
316 
317 	return 0;
318 }
319 
320 static void efx_ef10_read_licensed_features(struct efx_nic *efx)
321 {
322 	MCDI_DECLARE_BUF(inbuf, MC_CMD_LICENSING_V3_IN_LEN);
323 	MCDI_DECLARE_BUF(outbuf, MC_CMD_LICENSING_V3_OUT_LEN);
324 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
325 	size_t outlen;
326 	int rc;
327 
328 	MCDI_SET_DWORD(inbuf, LICENSING_V3_IN_OP,
329 		       MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE);
330 	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_LICENSING_V3, inbuf, sizeof(inbuf),
331 				outbuf, sizeof(outbuf), &outlen);
332 	if (rc || (outlen < MC_CMD_LICENSING_V3_OUT_LEN))
333 		return;
334 
335 	nic_data->licensed_features = MCDI_QWORD(outbuf,
336 					 LICENSING_V3_OUT_LICENSED_FEATURES);
337 }
338 
339 static int efx_ef10_get_sysclk_freq(struct efx_nic *efx)
340 {
341 	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN);
342 	int rc;
343 
344 	rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0,
345 			  outbuf, sizeof(outbuf), NULL);
346 	if (rc)
347 		return rc;
348 	rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ);
349 	return rc > 0 ? rc : -ERANGE;
350 }
351 
352 static int efx_ef10_get_timer_workarounds(struct efx_nic *efx)
353 {
354 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
355 	unsigned int implemented;
356 	unsigned int enabled;
357 	int rc;
358 
359 	nic_data->workaround_35388 = false;
360 	nic_data->workaround_61265 = false;
361 
362 	rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
363 
364 	if (rc == -ENOSYS) {
365 		/* Firmware without GET_WORKAROUNDS - not a problem. */
366 		rc = 0;
367 	} else if (rc == 0) {
368 		/* Bug61265 workaround is always enabled if implemented. */
369 		if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG61265)
370 			nic_data->workaround_61265 = true;
371 
372 		if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) {
373 			nic_data->workaround_35388 = true;
374 		} else if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) {
375 			/* Workaround is implemented but not enabled.
376 			 * Try to enable it.
377 			 */
378 			rc = efx_mcdi_set_workaround(efx,
379 						     MC_CMD_WORKAROUND_BUG35388,
380 						     true, NULL);
381 			if (rc == 0)
382 				nic_data->workaround_35388 = true;
383 			/* If we failed to set the workaround just carry on. */
384 			rc = 0;
385 		}
386 	}
387 
388 	netif_dbg(efx, probe, efx->net_dev,
389 		  "workaround for bug 35388 is %sabled\n",
390 		  nic_data->workaround_35388 ? "en" : "dis");
391 	netif_dbg(efx, probe, efx->net_dev,
392 		  "workaround for bug 61265 is %sabled\n",
393 		  nic_data->workaround_61265 ? "en" : "dis");
394 
395 	return rc;
396 }
397 
398 static void efx_ef10_process_timer_config(struct efx_nic *efx,
399 					  const efx_dword_t *data)
400 {
401 	unsigned int max_count;
402 
403 	if (EFX_EF10_WORKAROUND_61265(efx)) {
404 		efx->timer_quantum_ns = MCDI_DWORD(data,
405 			GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS);
406 		efx->timer_max_ns = MCDI_DWORD(data,
407 			GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS);
408 	} else if (EFX_EF10_WORKAROUND_35388(efx)) {
409 		efx->timer_quantum_ns = MCDI_DWORD(data,
410 			GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT);
411 		max_count = MCDI_DWORD(data,
412 			GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT);
413 		efx->timer_max_ns = max_count * efx->timer_quantum_ns;
414 	} else {
415 		efx->timer_quantum_ns = MCDI_DWORD(data,
416 			GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT);
417 		max_count = MCDI_DWORD(data,
418 			GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT);
419 		efx->timer_max_ns = max_count * efx->timer_quantum_ns;
420 	}
421 
422 	netif_dbg(efx, probe, efx->net_dev,
423 		  "got timer properties from MC: quantum %u ns; max %u ns\n",
424 		  efx->timer_quantum_ns, efx->timer_max_ns);
425 }
426 
427 static int efx_ef10_get_timer_config(struct efx_nic *efx)
428 {
429 	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN);
430 	int rc;
431 
432 	rc = efx_ef10_get_timer_workarounds(efx);
433 	if (rc)
434 		return rc;
435 
436 	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES, NULL, 0,
437 				outbuf, sizeof(outbuf), NULL);
438 
439 	if (rc == 0) {
440 		efx_ef10_process_timer_config(efx, outbuf);
441 	} else if (rc == -ENOSYS || rc == -EPERM) {
442 		/* Not available - fall back to Huntington defaults. */
443 		unsigned int quantum;
444 
445 		rc = efx_ef10_get_sysclk_freq(efx);
446 		if (rc < 0)
447 			return rc;
448 
449 		quantum = 1536000 / rc; /* 1536 cycles */
450 		efx->timer_quantum_ns = quantum;
451 		efx->timer_max_ns = efx->type->timer_period_max * quantum;
452 		rc = 0;
453 	} else {
454 		efx_mcdi_display_error(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES,
455 				       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN,
456 				       NULL, 0, rc);
457 	}
458 
459 	return rc;
460 }
461 
462 static int efx_ef10_get_mac_address_pf(struct efx_nic *efx, u8 *mac_address)
463 {
464 	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
465 	size_t outlen;
466 	int rc;
467 
468 	BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0);
469 
470 	rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0,
471 			  outbuf, sizeof(outbuf), &outlen);
472 	if (rc)
473 		return rc;
474 	if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)
475 		return -EIO;
476 
477 	ether_addr_copy(mac_address,
478 			MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE));
479 	return 0;
480 }
481 
482 static int efx_ef10_get_mac_address_vf(struct efx_nic *efx, u8 *mac_address)
483 {
484 	MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN);
485 	MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX);
486 	size_t outlen;
487 	int num_addrs, rc;
488 
489 	MCDI_SET_DWORD(inbuf, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID,
490 		       EVB_PORT_ID_ASSIGNED);
491 	rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_GET_MAC_ADDRESSES, inbuf,
492 			  sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
493 
494 	if (rc)
495 		return rc;
496 	if (outlen < MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN)
497 		return -EIO;
498 
499 	num_addrs = MCDI_DWORD(outbuf,
500 			       VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT);
501 
502 	WARN_ON(num_addrs != 1);
503 
504 	ether_addr_copy(mac_address,
505 			MCDI_PTR(outbuf, VPORT_GET_MAC_ADDRESSES_OUT_MACADDR));
506 
507 	return 0;
508 }
509 
510 static ssize_t efx_ef10_show_link_control_flag(struct device *dev,
511 					       struct device_attribute *attr,
512 					       char *buf)
513 {
514 	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
515 
516 	return sprintf(buf, "%d\n",
517 		       ((efx->mcdi->fn_flags) &
518 			(1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
519 		       ? 1 : 0);
520 }
521 
522 static ssize_t efx_ef10_show_primary_flag(struct device *dev,
523 					  struct device_attribute *attr,
524 					  char *buf)
525 {
526 	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
527 
528 	return sprintf(buf, "%d\n",
529 		       ((efx->mcdi->fn_flags) &
530 			(1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))
531 		       ? 1 : 0);
532 }
533 
534 static struct efx_ef10_vlan *efx_ef10_find_vlan(struct efx_nic *efx, u16 vid)
535 {
536 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
537 	struct efx_ef10_vlan *vlan;
538 
539 	WARN_ON(!mutex_is_locked(&nic_data->vlan_lock));
540 
541 	list_for_each_entry(vlan, &nic_data->vlan_list, list) {
542 		if (vlan->vid == vid)
543 			return vlan;
544 	}
545 
546 	return NULL;
547 }
548 
549 static int efx_ef10_add_vlan(struct efx_nic *efx, u16 vid)
550 {
551 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
552 	struct efx_ef10_vlan *vlan;
553 	int rc;
554 
555 	mutex_lock(&nic_data->vlan_lock);
556 
557 	vlan = efx_ef10_find_vlan(efx, vid);
558 	if (vlan) {
559 		/* We add VID 0 on init. 8021q adds it on module init
560 		 * for all interfaces with VLAN filtring feature.
561 		 */
562 		if (vid == 0)
563 			goto done_unlock;
564 		netif_warn(efx, drv, efx->net_dev,
565 			   "VLAN %u already added\n", vid);
566 		rc = -EALREADY;
567 		goto fail_exist;
568 	}
569 
570 	rc = -ENOMEM;
571 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
572 	if (!vlan)
573 		goto fail_alloc;
574 
575 	vlan->vid = vid;
576 
577 	list_add_tail(&vlan->list, &nic_data->vlan_list);
578 
579 	if (efx->filter_state) {
580 		mutex_lock(&efx->mac_lock);
581 		down_write(&efx->filter_sem);
582 		rc = efx_ef10_filter_add_vlan(efx, vlan->vid);
583 		up_write(&efx->filter_sem);
584 		mutex_unlock(&efx->mac_lock);
585 		if (rc)
586 			goto fail_filter_add_vlan;
587 	}
588 
589 done_unlock:
590 	mutex_unlock(&nic_data->vlan_lock);
591 	return 0;
592 
593 fail_filter_add_vlan:
594 	list_del(&vlan->list);
595 	kfree(vlan);
596 fail_alloc:
597 fail_exist:
598 	mutex_unlock(&nic_data->vlan_lock);
599 	return rc;
600 }
601 
602 static void efx_ef10_del_vlan_internal(struct efx_nic *efx,
603 				       struct efx_ef10_vlan *vlan)
604 {
605 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
606 
607 	WARN_ON(!mutex_is_locked(&nic_data->vlan_lock));
608 
609 	if (efx->filter_state) {
610 		down_write(&efx->filter_sem);
611 		efx_ef10_filter_del_vlan(efx, vlan->vid);
612 		up_write(&efx->filter_sem);
613 	}
614 
615 	list_del(&vlan->list);
616 	kfree(vlan);
617 }
618 
619 static int efx_ef10_del_vlan(struct efx_nic *efx, u16 vid)
620 {
621 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
622 	struct efx_ef10_vlan *vlan;
623 	int rc = 0;
624 
625 	/* 8021q removes VID 0 on module unload for all interfaces
626 	 * with VLAN filtering feature. We need to keep it to receive
627 	 * untagged traffic.
628 	 */
629 	if (vid == 0)
630 		return 0;
631 
632 	mutex_lock(&nic_data->vlan_lock);
633 
634 	vlan = efx_ef10_find_vlan(efx, vid);
635 	if (!vlan) {
636 		netif_err(efx, drv, efx->net_dev,
637 			  "VLAN %u to be deleted not found\n", vid);
638 		rc = -ENOENT;
639 	} else {
640 		efx_ef10_del_vlan_internal(efx, vlan);
641 	}
642 
643 	mutex_unlock(&nic_data->vlan_lock);
644 
645 	return rc;
646 }
647 
648 static void efx_ef10_cleanup_vlans(struct efx_nic *efx)
649 {
650 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
651 	struct efx_ef10_vlan *vlan, *next_vlan;
652 
653 	mutex_lock(&nic_data->vlan_lock);
654 	list_for_each_entry_safe(vlan, next_vlan, &nic_data->vlan_list, list)
655 		efx_ef10_del_vlan_internal(efx, vlan);
656 	mutex_unlock(&nic_data->vlan_lock);
657 }
658 
659 static DEVICE_ATTR(link_control_flag, 0444, efx_ef10_show_link_control_flag,
660 		   NULL);
661 static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL);
662 
663 static int efx_ef10_probe(struct efx_nic *efx)
664 {
665 	struct efx_ef10_nic_data *nic_data;
666 	int i, rc;
667 
668 	nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
669 	if (!nic_data)
670 		return -ENOMEM;
671 	efx->nic_data = nic_data;
672 
673 	/* we assume later that we can copy from this buffer in dwords */
674 	BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4);
675 
676 	rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf,
677 				  8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL);
678 	if (rc)
679 		goto fail1;
680 
681 	/* Get the MC's warm boot count.  In case it's rebooting right
682 	 * now, be prepared to retry.
683 	 */
684 	i = 0;
685 	for (;;) {
686 		rc = efx_ef10_get_warm_boot_count(efx);
687 		if (rc >= 0)
688 			break;
689 		if (++i == 5)
690 			goto fail2;
691 		ssleep(1);
692 	}
693 	nic_data->warm_boot_count = rc;
694 
695 	efx->rss_context.context_id = EFX_EF10_RSS_CONTEXT_INVALID;
696 
697 	nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
698 
699 	/* In case we're recovering from a crash (kexec), we want to
700 	 * cancel any outstanding request by the previous user of this
701 	 * function.  We send a special message using the least
702 	 * significant bits of the 'high' (doorbell) register.
703 	 */
704 	_efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD);
705 
706 	rc = efx_mcdi_init(efx);
707 	if (rc)
708 		goto fail2;
709 
710 	mutex_init(&nic_data->udp_tunnels_lock);
711 
712 	/* Reset (most) configuration for this function */
713 	rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
714 	if (rc)
715 		goto fail3;
716 
717 	/* Enable event logging */
718 	rc = efx_mcdi_log_ctrl(efx, true, false, 0);
719 	if (rc)
720 		goto fail3;
721 
722 	rc = device_create_file(&efx->pci_dev->dev,
723 				&dev_attr_link_control_flag);
724 	if (rc)
725 		goto fail3;
726 
727 	rc = device_create_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
728 	if (rc)
729 		goto fail4;
730 
731 	rc = efx_ef10_get_pf_index(efx);
732 	if (rc)
733 		goto fail5;
734 
735 	rc = efx_ef10_init_datapath_caps(efx);
736 	if (rc < 0)
737 		goto fail5;
738 
739 	efx_ef10_read_licensed_features(efx);
740 
741 	/* We can have one VI for each vi_stride-byte region.
742 	 * However, until we use TX option descriptors we need two TX queues
743 	 * per channel.
744 	 */
745 	efx->max_channels = min_t(unsigned int,
746 				  EFX_MAX_CHANNELS,
747 				  efx_ef10_mem_map_size(efx) /
748 				  (efx->vi_stride * EFX_TXQ_TYPES));
749 	efx->max_tx_channels = efx->max_channels;
750 	if (WARN_ON(efx->max_channels == 0)) {
751 		rc = -EIO;
752 		goto fail5;
753 	}
754 
755 	efx->rx_packet_len_offset =
756 		ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
757 
758 	if (nic_data->datapath_caps &
759 	    (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_LBN))
760 		efx->net_dev->hw_features |= NETIF_F_RXFCS;
761 
762 	rc = efx_mcdi_port_get_number(efx);
763 	if (rc < 0)
764 		goto fail5;
765 	efx->port_num = rc;
766 
767 	rc = efx->type->get_mac_address(efx, efx->net_dev->perm_addr);
768 	if (rc)
769 		goto fail5;
770 
771 	rc = efx_ef10_get_timer_config(efx);
772 	if (rc < 0)
773 		goto fail5;
774 
775 	rc = efx_mcdi_mon_probe(efx);
776 	if (rc && rc != -EPERM)
777 		goto fail5;
778 
779 	efx_ptp_defer_probe_with_channel(efx);
780 
781 #ifdef CONFIG_SFC_SRIOV
782 	if ((efx->pci_dev->physfn) && (!efx->pci_dev->is_physfn)) {
783 		struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
784 		struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
785 
786 		efx_pf->type->get_mac_address(efx_pf, nic_data->port_id);
787 	} else
788 #endif
789 		ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr);
790 
791 	INIT_LIST_HEAD(&nic_data->vlan_list);
792 	mutex_init(&nic_data->vlan_lock);
793 
794 	/* Add unspecified VID to support VLAN filtering being disabled */
795 	rc = efx_ef10_add_vlan(efx, EFX_FILTER_VID_UNSPEC);
796 	if (rc)
797 		goto fail_add_vid_unspec;
798 
799 	/* If VLAN filtering is enabled, we need VID 0 to get untagged
800 	 * traffic.  It is added automatically if 8021q module is loaded,
801 	 * but we can't rely on it since module may be not loaded.
802 	 */
803 	rc = efx_ef10_add_vlan(efx, 0);
804 	if (rc)
805 		goto fail_add_vid_0;
806 
807 	return 0;
808 
809 fail_add_vid_0:
810 	efx_ef10_cleanup_vlans(efx);
811 fail_add_vid_unspec:
812 	mutex_destroy(&nic_data->vlan_lock);
813 	efx_ptp_remove(efx);
814 	efx_mcdi_mon_remove(efx);
815 fail5:
816 	device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
817 fail4:
818 	device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
819 fail3:
820 	efx_mcdi_detach(efx);
821 
822 	mutex_lock(&nic_data->udp_tunnels_lock);
823 	memset(nic_data->udp_tunnels, 0, sizeof(nic_data->udp_tunnels));
824 	(void)efx_ef10_set_udp_tnl_ports(efx, true);
825 	mutex_unlock(&nic_data->udp_tunnels_lock);
826 	mutex_destroy(&nic_data->udp_tunnels_lock);
827 
828 	efx_mcdi_fini(efx);
829 fail2:
830 	efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
831 fail1:
832 	kfree(nic_data);
833 	efx->nic_data = NULL;
834 	return rc;
835 }
836 
837 static int efx_ef10_free_vis(struct efx_nic *efx)
838 {
839 	MCDI_DECLARE_BUF_ERR(outbuf);
840 	size_t outlen;
841 	int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0,
842 				    outbuf, sizeof(outbuf), &outlen);
843 
844 	/* -EALREADY means nothing to free, so ignore */
845 	if (rc == -EALREADY)
846 		rc = 0;
847 	if (rc)
848 		efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen,
849 				       rc);
850 	return rc;
851 }
852 
853 #ifdef EFX_USE_PIO
854 
855 static void efx_ef10_free_piobufs(struct efx_nic *efx)
856 {
857 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
858 	MCDI_DECLARE_BUF(inbuf, MC_CMD_FREE_PIOBUF_IN_LEN);
859 	unsigned int i;
860 	int rc;
861 
862 	BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN != 0);
863 
864 	for (i = 0; i < nic_data->n_piobufs; i++) {
865 		MCDI_SET_DWORD(inbuf, FREE_PIOBUF_IN_PIOBUF_HANDLE,
866 			       nic_data->piobuf_handle[i]);
867 		rc = efx_mcdi_rpc(efx, MC_CMD_FREE_PIOBUF, inbuf, sizeof(inbuf),
868 				  NULL, 0, NULL);
869 		WARN_ON(rc);
870 	}
871 
872 	nic_data->n_piobufs = 0;
873 }
874 
875 static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
876 {
877 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
878 	MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_PIOBUF_OUT_LEN);
879 	unsigned int i;
880 	size_t outlen;
881 	int rc = 0;
882 
883 	BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0);
884 
885 	for (i = 0; i < n; i++) {
886 		rc = efx_mcdi_rpc_quiet(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0,
887 					outbuf, sizeof(outbuf), &outlen);
888 		if (rc) {
889 			/* Don't display the MC error if we didn't have space
890 			 * for a VF.
891 			 */
892 			if (!(efx_ef10_is_vf(efx) && rc == -ENOSPC))
893 				efx_mcdi_display_error(efx, MC_CMD_ALLOC_PIOBUF,
894 						       0, outbuf, outlen, rc);
895 			break;
896 		}
897 		if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
898 			rc = -EIO;
899 			break;
900 		}
901 		nic_data->piobuf_handle[i] =
902 			MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
903 		netif_dbg(efx, probe, efx->net_dev,
904 			  "allocated PIO buffer %u handle %x\n", i,
905 			  nic_data->piobuf_handle[i]);
906 	}
907 
908 	nic_data->n_piobufs = i;
909 	if (rc)
910 		efx_ef10_free_piobufs(efx);
911 	return rc;
912 }
913 
914 static int efx_ef10_link_piobufs(struct efx_nic *efx)
915 {
916 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
917 	MCDI_DECLARE_BUF(inbuf, MC_CMD_LINK_PIOBUF_IN_LEN);
918 	struct efx_channel *channel;
919 	struct efx_tx_queue *tx_queue;
920 	unsigned int offset, index;
921 	int rc;
922 
923 	BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0);
924 	BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0);
925 
926 	/* Link a buffer to each VI in the write-combining mapping */
927 	for (index = 0; index < nic_data->n_piobufs; ++index) {
928 		MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE,
929 			       nic_data->piobuf_handle[index]);
930 		MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE,
931 			       nic_data->pio_write_vi_base + index);
932 		rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
933 				  inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
934 				  NULL, 0, NULL);
935 		if (rc) {
936 			netif_err(efx, drv, efx->net_dev,
937 				  "failed to link VI %u to PIO buffer %u (%d)\n",
938 				  nic_data->pio_write_vi_base + index, index,
939 				  rc);
940 			goto fail;
941 		}
942 		netif_dbg(efx, probe, efx->net_dev,
943 			  "linked VI %u to PIO buffer %u\n",
944 			  nic_data->pio_write_vi_base + index, index);
945 	}
946 
947 	/* Link a buffer to each TX queue */
948 	efx_for_each_channel(channel, efx) {
949 		/* Extra channels, even those with TXQs (PTP), do not require
950 		 * PIO resources.
951 		 */
952 		if (!channel->type->want_pio)
953 			continue;
954 		efx_for_each_channel_tx_queue(tx_queue, channel) {
955 			/* We assign the PIO buffers to queues in
956 			 * reverse order to allow for the following
957 			 * special case.
958 			 */
959 			offset = ((efx->tx_channel_offset + efx->n_tx_channels -
960 				   tx_queue->channel->channel - 1) *
961 				  efx_piobuf_size);
962 			index = offset / nic_data->piobuf_size;
963 			offset = offset % nic_data->piobuf_size;
964 
965 			/* When the host page size is 4K, the first
966 			 * host page in the WC mapping may be within
967 			 * the same VI page as the last TX queue.  We
968 			 * can only link one buffer to each VI.
969 			 */
970 			if (tx_queue->queue == nic_data->pio_write_vi_base) {
971 				BUG_ON(index != 0);
972 				rc = 0;
973 			} else {
974 				MCDI_SET_DWORD(inbuf,
975 					       LINK_PIOBUF_IN_PIOBUF_HANDLE,
976 					       nic_data->piobuf_handle[index]);
977 				MCDI_SET_DWORD(inbuf,
978 					       LINK_PIOBUF_IN_TXQ_INSTANCE,
979 					       tx_queue->queue);
980 				rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
981 						  inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
982 						  NULL, 0, NULL);
983 			}
984 
985 			if (rc) {
986 				/* This is non-fatal; the TX path just
987 				 * won't use PIO for this queue
988 				 */
989 				netif_err(efx, drv, efx->net_dev,
990 					  "failed to link VI %u to PIO buffer %u (%d)\n",
991 					  tx_queue->queue, index, rc);
992 				tx_queue->piobuf = NULL;
993 			} else {
994 				tx_queue->piobuf =
995 					nic_data->pio_write_base +
996 					index * efx->vi_stride + offset;
997 				tx_queue->piobuf_offset = offset;
998 				netif_dbg(efx, probe, efx->net_dev,
999 					  "linked VI %u to PIO buffer %u offset %x addr %p\n",
1000 					  tx_queue->queue, index,
1001 					  tx_queue->piobuf_offset,
1002 					  tx_queue->piobuf);
1003 			}
1004 		}
1005 	}
1006 
1007 	return 0;
1008 
1009 fail:
1010 	/* inbuf was defined for MC_CMD_LINK_PIOBUF.  We can use the same
1011 	 * buffer for MC_CMD_UNLINK_PIOBUF because it's shorter.
1012 	 */
1013 	BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_IN_LEN < MC_CMD_UNLINK_PIOBUF_IN_LEN);
1014 	while (index--) {
1015 		MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE,
1016 			       nic_data->pio_write_vi_base + index);
1017 		efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF,
1018 			     inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN,
1019 			     NULL, 0, NULL);
1020 	}
1021 	return rc;
1022 }
1023 
1024 static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
1025 {
1026 	struct efx_channel *channel;
1027 	struct efx_tx_queue *tx_queue;
1028 
1029 	/* All our existing PIO buffers went away */
1030 	efx_for_each_channel(channel, efx)
1031 		efx_for_each_channel_tx_queue(tx_queue, channel)
1032 			tx_queue->piobuf = NULL;
1033 }
1034 
1035 #else /* !EFX_USE_PIO */
1036 
1037 static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
1038 {
1039 	return n == 0 ? 0 : -ENOBUFS;
1040 }
1041 
1042 static int efx_ef10_link_piobufs(struct efx_nic *efx)
1043 {
1044 	return 0;
1045 }
1046 
1047 static void efx_ef10_free_piobufs(struct efx_nic *efx)
1048 {
1049 }
1050 
1051 static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
1052 {
1053 }
1054 
1055 #endif /* EFX_USE_PIO */
1056 
1057 static void efx_ef10_remove(struct efx_nic *efx)
1058 {
1059 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
1060 	int rc;
1061 
1062 #ifdef CONFIG_SFC_SRIOV
1063 	struct efx_ef10_nic_data *nic_data_pf;
1064 	struct pci_dev *pci_dev_pf;
1065 	struct efx_nic *efx_pf;
1066 	struct ef10_vf *vf;
1067 
1068 	if (efx->pci_dev->is_virtfn) {
1069 		pci_dev_pf = efx->pci_dev->physfn;
1070 		if (pci_dev_pf) {
1071 			efx_pf = pci_get_drvdata(pci_dev_pf);
1072 			nic_data_pf = efx_pf->nic_data;
1073 			vf = nic_data_pf->vf + nic_data->vf_index;
1074 			vf->efx = NULL;
1075 		} else
1076 			netif_info(efx, drv, efx->net_dev,
1077 				   "Could not get the PF id from VF\n");
1078 	}
1079 #endif
1080 
1081 	efx_ef10_cleanup_vlans(efx);
1082 	mutex_destroy(&nic_data->vlan_lock);
1083 
1084 	efx_ptp_remove(efx);
1085 
1086 	efx_mcdi_mon_remove(efx);
1087 
1088 	efx_ef10_rx_free_indir_table(efx);
1089 
1090 	if (nic_data->wc_membase)
1091 		iounmap(nic_data->wc_membase);
1092 
1093 	rc = efx_ef10_free_vis(efx);
1094 	WARN_ON(rc != 0);
1095 
1096 	if (!nic_data->must_restore_piobufs)
1097 		efx_ef10_free_piobufs(efx);
1098 
1099 	device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
1100 	device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
1101 
1102 	efx_mcdi_detach(efx);
1103 
1104 	memset(nic_data->udp_tunnels, 0, sizeof(nic_data->udp_tunnels));
1105 	mutex_lock(&nic_data->udp_tunnels_lock);
1106 	(void)efx_ef10_set_udp_tnl_ports(efx, true);
1107 	mutex_unlock(&nic_data->udp_tunnels_lock);
1108 
1109 	mutex_destroy(&nic_data->udp_tunnels_lock);
1110 
1111 	efx_mcdi_fini(efx);
1112 	efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
1113 	kfree(nic_data);
1114 }
1115 
1116 static int efx_ef10_probe_pf(struct efx_nic *efx)
1117 {
1118 	return efx_ef10_probe(efx);
1119 }
1120 
1121 int efx_ef10_vadaptor_query(struct efx_nic *efx, unsigned int port_id,
1122 			    u32 *port_flags, u32 *vadaptor_flags,
1123 			    unsigned int *vlan_tags)
1124 {
1125 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
1126 	MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_QUERY_IN_LEN);
1127 	MCDI_DECLARE_BUF(outbuf, MC_CMD_VADAPTOR_QUERY_OUT_LEN);
1128 	size_t outlen;
1129 	int rc;
1130 
1131 	if (nic_data->datapath_caps &
1132 	    (1 << MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN)) {
1133 		MCDI_SET_DWORD(inbuf, VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID,
1134 			       port_id);
1135 
1136 		rc = efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_QUERY, inbuf, sizeof(inbuf),
1137 				  outbuf, sizeof(outbuf), &outlen);
1138 		if (rc)
1139 			return rc;
1140 
1141 		if (outlen < sizeof(outbuf)) {
1142 			rc = -EIO;
1143 			return rc;
1144 		}
1145 	}
1146 
1147 	if (port_flags)
1148 		*port_flags = MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_PORT_FLAGS);
1149 	if (vadaptor_flags)
1150 		*vadaptor_flags =
1151 			MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS);
1152 	if (vlan_tags)
1153 		*vlan_tags =
1154 			MCDI_DWORD(outbuf,
1155 				   VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS);
1156 
1157 	return 0;
1158 }
1159 
1160 int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id)
1161 {
1162 	MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN);
1163 
1164 	MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
1165 	return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf),
1166 			    NULL, 0, NULL);
1167 }
1168 
1169 int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id)
1170 {
1171 	MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN);
1172 
1173 	MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
1174 	return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf),
1175 			    NULL, 0, NULL);
1176 }
1177 
1178 int efx_ef10_vport_add_mac(struct efx_nic *efx,
1179 			   unsigned int port_id, u8 *mac)
1180 {
1181 	MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN);
1182 
1183 	MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id);
1184 	ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac);
1185 
1186 	return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf,
1187 			    sizeof(inbuf), NULL, 0, NULL);
1188 }
1189 
1190 int efx_ef10_vport_del_mac(struct efx_nic *efx,
1191 			   unsigned int port_id, u8 *mac)
1192 {
1193 	MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN);
1194 
1195 	MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id);
1196 	ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac);
1197 
1198 	return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf,
1199 			    sizeof(inbuf), NULL, 0, NULL);
1200 }
1201 
1202 #ifdef CONFIG_SFC_SRIOV
1203 static int efx_ef10_probe_vf(struct efx_nic *efx)
1204 {
1205 	int rc;
1206 	struct pci_dev *pci_dev_pf;
1207 
1208 	/* If the parent PF has no VF data structure, it doesn't know about this
1209 	 * VF so fail probe.  The VF needs to be re-created.  This can happen
1210 	 * if the PF driver is unloaded while the VF is assigned to a guest.
1211 	 */
1212 	pci_dev_pf = efx->pci_dev->physfn;
1213 	if (pci_dev_pf) {
1214 		struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
1215 		struct efx_ef10_nic_data *nic_data_pf = efx_pf->nic_data;
1216 
1217 		if (!nic_data_pf->vf) {
1218 			netif_info(efx, drv, efx->net_dev,
1219 				   "The VF cannot link to its parent PF; "
1220 				   "please destroy and re-create the VF\n");
1221 			return -EBUSY;
1222 		}
1223 	}
1224 
1225 	rc = efx_ef10_probe(efx);
1226 	if (rc)
1227 		return rc;
1228 
1229 	rc = efx_ef10_get_vf_index(efx);
1230 	if (rc)
1231 		goto fail;
1232 
1233 	if (efx->pci_dev->is_virtfn) {
1234 		if (efx->pci_dev->physfn) {
1235 			struct efx_nic *efx_pf =
1236 				pci_get_drvdata(efx->pci_dev->physfn);
1237 			struct efx_ef10_nic_data *nic_data_p = efx_pf->nic_data;
1238 			struct efx_ef10_nic_data *nic_data = efx->nic_data;
1239 
1240 			nic_data_p->vf[nic_data->vf_index].efx = efx;
1241 			nic_data_p->vf[nic_data->vf_index].pci_dev =
1242 				efx->pci_dev;
1243 		} else
1244 			netif_info(efx, drv, efx->net_dev,
1245 				   "Could not get the PF id from VF\n");
1246 	}
1247 
1248 	return 0;
1249 
1250 fail:
1251 	efx_ef10_remove(efx);
1252 	return rc;
1253 }
1254 #else
1255 static int efx_ef10_probe_vf(struct efx_nic *efx __attribute__ ((unused)))
1256 {
1257 	return 0;
1258 }
1259 #endif
1260 
1261 static int efx_ef10_alloc_vis(struct efx_nic *efx,
1262 			      unsigned int min_vis, unsigned int max_vis)
1263 {
1264 	MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN);
1265 	MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN);
1266 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
1267 	size_t outlen;
1268 	int rc;
1269 
1270 	MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis);
1271 	MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis);
1272 	rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf),
1273 			  outbuf, sizeof(outbuf), &outlen);
1274 	if (rc != 0)
1275 		return rc;
1276 
1277 	if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN)
1278 		return -EIO;
1279 
1280 	netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n",
1281 		  MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE));
1282 
1283 	nic_data->vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE);
1284 	nic_data->n_allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT);
1285 	return 0;
1286 }
1287 
1288 /* Note that the failure path of this function does not free
1289  * resources, as this will be done by efx_ef10_remove().
1290  */
1291 static int efx_ef10_dimension_resources(struct efx_nic *efx)
1292 {
1293 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
1294 	unsigned int uc_mem_map_size, wc_mem_map_size;
1295 	unsigned int min_vis = max(EFX_TXQ_TYPES,
1296 				   efx_separate_tx_channels ? 2 : 1);
1297 	unsigned int channel_vis, pio_write_vi_base, max_vis;
1298 	void __iomem *membase;
1299 	int rc;
1300 
1301 	channel_vis = max(efx->n_channels,
1302 			  (efx->n_tx_channels + efx->n_extra_tx_channels) *
1303 			  EFX_TXQ_TYPES);
1304 
1305 #ifdef EFX_USE_PIO
1306 	/* Try to allocate PIO buffers if wanted and if the full
1307 	 * number of PIO buffers would be sufficient to allocate one
1308 	 * copy-buffer per TX channel.  Failure is non-fatal, as there
1309 	 * are only a small number of PIO buffers shared between all
1310 	 * functions of the controller.
1311 	 */
1312 	if (efx_piobuf_size != 0 &&
1313 	    nic_data->piobuf_size / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >=
1314 	    efx->n_tx_channels) {
1315 		unsigned int n_piobufs =
1316 			DIV_ROUND_UP(efx->n_tx_channels,
1317 				     nic_data->piobuf_size / efx_piobuf_size);
1318 
1319 		rc = efx_ef10_alloc_piobufs(efx, n_piobufs);
1320 		if (rc == -ENOSPC)
1321 			netif_dbg(efx, probe, efx->net_dev,
1322 				  "out of PIO buffers; cannot allocate more\n");
1323 		else if (rc == -EPERM)
1324 			netif_dbg(efx, probe, efx->net_dev,
1325 				  "not permitted to allocate PIO buffers\n");
1326 		else if (rc)
1327 			netif_err(efx, probe, efx->net_dev,
1328 				  "failed to allocate PIO buffers (%d)\n", rc);
1329 		else
1330 			netif_dbg(efx, probe, efx->net_dev,
1331 				  "allocated %u PIO buffers\n", n_piobufs);
1332 	}
1333 #else
1334 	nic_data->n_piobufs = 0;
1335 #endif
1336 
1337 	/* PIO buffers should be mapped with write-combining enabled,
1338 	 * and we want to make single UC and WC mappings rather than
1339 	 * several of each (in fact that's the only option if host
1340 	 * page size is >4K).  So we may allocate some extra VIs just
1341 	 * for writing PIO buffers through.
1342 	 *
1343 	 * The UC mapping contains (channel_vis - 1) complete VIs and the
1344 	 * first 4K of the next VI.  Then the WC mapping begins with
1345 	 * the remainder of this last VI.
1346 	 */
1347 	uc_mem_map_size = PAGE_ALIGN((channel_vis - 1) * efx->vi_stride +
1348 				     ER_DZ_TX_PIOBUF);
1349 	if (nic_data->n_piobufs) {
1350 		/* pio_write_vi_base rounds down to give the number of complete
1351 		 * VIs inside the UC mapping.
1352 		 */
1353 		pio_write_vi_base = uc_mem_map_size / efx->vi_stride;
1354 		wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base +
1355 					       nic_data->n_piobufs) *
1356 					      efx->vi_stride) -
1357 				   uc_mem_map_size);
1358 		max_vis = pio_write_vi_base + nic_data->n_piobufs;
1359 	} else {
1360 		pio_write_vi_base = 0;
1361 		wc_mem_map_size = 0;
1362 		max_vis = channel_vis;
1363 	}
1364 
1365 	/* In case the last attached driver failed to free VIs, do it now */
1366 	rc = efx_ef10_free_vis(efx);
1367 	if (rc != 0)
1368 		return rc;
1369 
1370 	rc = efx_ef10_alloc_vis(efx, min_vis, max_vis);
1371 	if (rc != 0)
1372 		return rc;
1373 
1374 	if (nic_data->n_allocated_vis < channel_vis) {
1375 		netif_info(efx, drv, efx->net_dev,
1376 			   "Could not allocate enough VIs to satisfy RSS"
1377 			   " requirements. Performance may not be optimal.\n");
1378 		/* We didn't get the VIs to populate our channels.
1379 		 * We could keep what we got but then we'd have more
1380 		 * interrupts than we need.
1381 		 * Instead calculate new max_channels and restart
1382 		 */
1383 		efx->max_channels = nic_data->n_allocated_vis;
1384 		efx->max_tx_channels =
1385 			nic_data->n_allocated_vis / EFX_TXQ_TYPES;
1386 
1387 		efx_ef10_free_vis(efx);
1388 		return -EAGAIN;
1389 	}
1390 
1391 	/* If we didn't get enough VIs to map all the PIO buffers, free the
1392 	 * PIO buffers
1393 	 */
1394 	if (nic_data->n_piobufs &&
1395 	    nic_data->n_allocated_vis <
1396 	    pio_write_vi_base + nic_data->n_piobufs) {
1397 		netif_dbg(efx, probe, efx->net_dev,
1398 			  "%u VIs are not sufficient to map %u PIO buffers\n",
1399 			  nic_data->n_allocated_vis, nic_data->n_piobufs);
1400 		efx_ef10_free_piobufs(efx);
1401 	}
1402 
1403 	/* Shrink the original UC mapping of the memory BAR */
1404 	membase = ioremap_nocache(efx->membase_phys, uc_mem_map_size);
1405 	if (!membase) {
1406 		netif_err(efx, probe, efx->net_dev,
1407 			  "could not shrink memory BAR to %x\n",
1408 			  uc_mem_map_size);
1409 		return -ENOMEM;
1410 	}
1411 	iounmap(efx->membase);
1412 	efx->membase = membase;
1413 
1414 	/* Set up the WC mapping if needed */
1415 	if (wc_mem_map_size) {
1416 		nic_data->wc_membase = ioremap_wc(efx->membase_phys +
1417 						  uc_mem_map_size,
1418 						  wc_mem_map_size);
1419 		if (!nic_data->wc_membase) {
1420 			netif_err(efx, probe, efx->net_dev,
1421 				  "could not allocate WC mapping of size %x\n",
1422 				  wc_mem_map_size);
1423 			return -ENOMEM;
1424 		}
1425 		nic_data->pio_write_vi_base = pio_write_vi_base;
1426 		nic_data->pio_write_base =
1427 			nic_data->wc_membase +
1428 			(pio_write_vi_base * efx->vi_stride + ER_DZ_TX_PIOBUF -
1429 			 uc_mem_map_size);
1430 
1431 		rc = efx_ef10_link_piobufs(efx);
1432 		if (rc)
1433 			efx_ef10_free_piobufs(efx);
1434 	}
1435 
1436 	netif_dbg(efx, probe, efx->net_dev,
1437 		  "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n",
1438 		  &efx->membase_phys, efx->membase, uc_mem_map_size,
1439 		  nic_data->wc_membase, wc_mem_map_size);
1440 
1441 	return 0;
1442 }
1443 
1444 static int efx_ef10_init_nic(struct efx_nic *efx)
1445 {
1446 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
1447 	int rc;
1448 
1449 	if (nic_data->must_check_datapath_caps) {
1450 		rc = efx_ef10_init_datapath_caps(efx);
1451 		if (rc)
1452 			return rc;
1453 		nic_data->must_check_datapath_caps = false;
1454 	}
1455 
1456 	if (nic_data->must_realloc_vis) {
1457 		/* We cannot let the number of VIs change now */
1458 		rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis,
1459 					nic_data->n_allocated_vis);
1460 		if (rc)
1461 			return rc;
1462 		nic_data->must_realloc_vis = false;
1463 	}
1464 
1465 	if (nic_data->must_restore_piobufs && nic_data->n_piobufs) {
1466 		rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs);
1467 		if (rc == 0) {
1468 			rc = efx_ef10_link_piobufs(efx);
1469 			if (rc)
1470 				efx_ef10_free_piobufs(efx);
1471 		}
1472 
1473 		/* Log an error on failure, but this is non-fatal.
1474 		 * Permission errors are less important - we've presumably
1475 		 * had the PIO buffer licence removed.
1476 		 */
1477 		if (rc == -EPERM)
1478 			netif_dbg(efx, drv, efx->net_dev,
1479 				  "not permitted to restore PIO buffers\n");
1480 		else if (rc)
1481 			netif_err(efx, drv, efx->net_dev,
1482 				  "failed to restore PIO buffers (%d)\n", rc);
1483 		nic_data->must_restore_piobufs = false;
1484 	}
1485 
1486 	/* don't fail init if RSS setup doesn't work */
1487 	rc = efx->type->rx_push_rss_config(efx, false,
1488 					   efx->rss_context.rx_indir_table, NULL);
1489 
1490 	return 0;
1491 }
1492 
1493 static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
1494 {
1495 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
1496 #ifdef CONFIG_SFC_SRIOV
1497 	unsigned int i;
1498 #endif
1499 
1500 	/* All our allocations have been reset */
1501 	nic_data->must_realloc_vis = true;
1502 	nic_data->must_restore_rss_contexts = true;
1503 	nic_data->must_restore_filters = true;
1504 	nic_data->must_restore_piobufs = true;
1505 	efx_ef10_forget_old_piobufs(efx);
1506 	efx->rss_context.context_id = EFX_EF10_RSS_CONTEXT_INVALID;
1507 
1508 	/* Driver-created vswitches and vports must be re-created */
1509 	nic_data->must_probe_vswitching = true;
1510 	nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
1511 #ifdef CONFIG_SFC_SRIOV
1512 	if (nic_data->vf)
1513 		for (i = 0; i < efx->vf_count; i++)
1514 			nic_data->vf[i].vport_id = 0;
1515 #endif
1516 }
1517 
1518 static enum reset_type efx_ef10_map_reset_reason(enum reset_type reason)
1519 {
1520 	if (reason == RESET_TYPE_MC_FAILURE)
1521 		return RESET_TYPE_DATAPATH;
1522 
1523 	return efx_mcdi_map_reset_reason(reason);
1524 }
1525 
1526 static int efx_ef10_map_reset_flags(u32 *flags)
1527 {
1528 	enum {
1529 		EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) <<
1530 				   ETH_RESET_SHARED_SHIFT),
1531 		EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER |
1532 				  ETH_RESET_OFFLOAD | ETH_RESET_MAC |
1533 				  ETH_RESET_PHY | ETH_RESET_MGMT) <<
1534 				 ETH_RESET_SHARED_SHIFT)
1535 	};
1536 
1537 	/* We assume for now that our PCI function is permitted to
1538 	 * reset everything.
1539 	 */
1540 
1541 	if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) {
1542 		*flags &= ~EF10_RESET_MC;
1543 		return RESET_TYPE_WORLD;
1544 	}
1545 
1546 	if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) {
1547 		*flags &= ~EF10_RESET_PORT;
1548 		return RESET_TYPE_ALL;
1549 	}
1550 
1551 	/* no invisible reset implemented */
1552 
1553 	return -EINVAL;
1554 }
1555 
1556 static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
1557 {
1558 	int rc = efx_mcdi_reset(efx, reset_type);
1559 
1560 	/* Unprivileged functions return -EPERM, but need to return success
1561 	 * here so that the datapath is brought back up.
1562 	 */
1563 	if (reset_type == RESET_TYPE_WORLD && rc == -EPERM)
1564 		rc = 0;
1565 
1566 	/* If it was a port reset, trigger reallocation of MC resources.
1567 	 * Note that on an MC reset nothing needs to be done now because we'll
1568 	 * detect the MC reset later and handle it then.
1569 	 * For an FLR, we never get an MC reset event, but the MC has reset all
1570 	 * resources assigned to us, so we have to trigger reallocation now.
1571 	 */
1572 	if ((reset_type == RESET_TYPE_ALL ||
1573 	     reset_type == RESET_TYPE_MCDI_TIMEOUT) && !rc)
1574 		efx_ef10_reset_mc_allocations(efx);
1575 	return rc;
1576 }
1577 
1578 #define EF10_DMA_STAT(ext_name, mcdi_name)			\
1579 	[EF10_STAT_ ## ext_name] =				\
1580 	{ #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
1581 #define EF10_DMA_INVIS_STAT(int_name, mcdi_name)		\
1582 	[EF10_STAT_ ## int_name] =				\
1583 	{ NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
1584 #define EF10_OTHER_STAT(ext_name)				\
1585 	[EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 }
1586 #define GENERIC_SW_STAT(ext_name)				\
1587 	[GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 }
1588 
1589 static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
1590 	EF10_DMA_STAT(port_tx_bytes, TX_BYTES),
1591 	EF10_DMA_STAT(port_tx_packets, TX_PKTS),
1592 	EF10_DMA_STAT(port_tx_pause, TX_PAUSE_PKTS),
1593 	EF10_DMA_STAT(port_tx_control, TX_CONTROL_PKTS),
1594 	EF10_DMA_STAT(port_tx_unicast, TX_UNICAST_PKTS),
1595 	EF10_DMA_STAT(port_tx_multicast, TX_MULTICAST_PKTS),
1596 	EF10_DMA_STAT(port_tx_broadcast, TX_BROADCAST_PKTS),
1597 	EF10_DMA_STAT(port_tx_lt64, TX_LT64_PKTS),
1598 	EF10_DMA_STAT(port_tx_64, TX_64_PKTS),
1599 	EF10_DMA_STAT(port_tx_65_to_127, TX_65_TO_127_PKTS),
1600 	EF10_DMA_STAT(port_tx_128_to_255, TX_128_TO_255_PKTS),
1601 	EF10_DMA_STAT(port_tx_256_to_511, TX_256_TO_511_PKTS),
1602 	EF10_DMA_STAT(port_tx_512_to_1023, TX_512_TO_1023_PKTS),
1603 	EF10_DMA_STAT(port_tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
1604 	EF10_DMA_STAT(port_tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
1605 	EF10_DMA_STAT(port_rx_bytes, RX_BYTES),
1606 	EF10_DMA_INVIS_STAT(port_rx_bytes_minus_good_bytes, RX_BAD_BYTES),
1607 	EF10_OTHER_STAT(port_rx_good_bytes),
1608 	EF10_OTHER_STAT(port_rx_bad_bytes),
1609 	EF10_DMA_STAT(port_rx_packets, RX_PKTS),
1610 	EF10_DMA_STAT(port_rx_good, RX_GOOD_PKTS),
1611 	EF10_DMA_STAT(port_rx_bad, RX_BAD_FCS_PKTS),
1612 	EF10_DMA_STAT(port_rx_pause, RX_PAUSE_PKTS),
1613 	EF10_DMA_STAT(port_rx_control, RX_CONTROL_PKTS),
1614 	EF10_DMA_STAT(port_rx_unicast, RX_UNICAST_PKTS),
1615 	EF10_DMA_STAT(port_rx_multicast, RX_MULTICAST_PKTS),
1616 	EF10_DMA_STAT(port_rx_broadcast, RX_BROADCAST_PKTS),
1617 	EF10_DMA_STAT(port_rx_lt64, RX_UNDERSIZE_PKTS),
1618 	EF10_DMA_STAT(port_rx_64, RX_64_PKTS),
1619 	EF10_DMA_STAT(port_rx_65_to_127, RX_65_TO_127_PKTS),
1620 	EF10_DMA_STAT(port_rx_128_to_255, RX_128_TO_255_PKTS),
1621 	EF10_DMA_STAT(port_rx_256_to_511, RX_256_TO_511_PKTS),
1622 	EF10_DMA_STAT(port_rx_512_to_1023, RX_512_TO_1023_PKTS),
1623 	EF10_DMA_STAT(port_rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
1624 	EF10_DMA_STAT(port_rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
1625 	EF10_DMA_STAT(port_rx_gtjumbo, RX_GTJUMBO_PKTS),
1626 	EF10_DMA_STAT(port_rx_bad_gtjumbo, RX_JABBER_PKTS),
1627 	EF10_DMA_STAT(port_rx_overflow, RX_OVERFLOW_PKTS),
1628 	EF10_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS),
1629 	EF10_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS),
1630 	EF10_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS),
1631 	GENERIC_SW_STAT(rx_nodesc_trunc),
1632 	GENERIC_SW_STAT(rx_noskb_drops),
1633 	EF10_DMA_STAT(port_rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
1634 	EF10_DMA_STAT(port_rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
1635 	EF10_DMA_STAT(port_rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
1636 	EF10_DMA_STAT(port_rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
1637 	EF10_DMA_STAT(port_rx_pm_trunc_qbb, PM_TRUNC_QBB),
1638 	EF10_DMA_STAT(port_rx_pm_discard_qbb, PM_DISCARD_QBB),
1639 	EF10_DMA_STAT(port_rx_pm_discard_mapping, PM_DISCARD_MAPPING),
1640 	EF10_DMA_STAT(port_rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
1641 	EF10_DMA_STAT(port_rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
1642 	EF10_DMA_STAT(port_rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
1643 	EF10_DMA_STAT(port_rx_dp_hlb_fetch, RXDP_HLB_FETCH_CONDITIONS),
1644 	EF10_DMA_STAT(port_rx_dp_hlb_wait, RXDP_HLB_WAIT_CONDITIONS),
1645 	EF10_DMA_STAT(rx_unicast, VADAPTER_RX_UNICAST_PACKETS),
1646 	EF10_DMA_STAT(rx_unicast_bytes, VADAPTER_RX_UNICAST_BYTES),
1647 	EF10_DMA_STAT(rx_multicast, VADAPTER_RX_MULTICAST_PACKETS),
1648 	EF10_DMA_STAT(rx_multicast_bytes, VADAPTER_RX_MULTICAST_BYTES),
1649 	EF10_DMA_STAT(rx_broadcast, VADAPTER_RX_BROADCAST_PACKETS),
1650 	EF10_DMA_STAT(rx_broadcast_bytes, VADAPTER_RX_BROADCAST_BYTES),
1651 	EF10_DMA_STAT(rx_bad, VADAPTER_RX_BAD_PACKETS),
1652 	EF10_DMA_STAT(rx_bad_bytes, VADAPTER_RX_BAD_BYTES),
1653 	EF10_DMA_STAT(rx_overflow, VADAPTER_RX_OVERFLOW),
1654 	EF10_DMA_STAT(tx_unicast, VADAPTER_TX_UNICAST_PACKETS),
1655 	EF10_DMA_STAT(tx_unicast_bytes, VADAPTER_TX_UNICAST_BYTES),
1656 	EF10_DMA_STAT(tx_multicast, VADAPTER_TX_MULTICAST_PACKETS),
1657 	EF10_DMA_STAT(tx_multicast_bytes, VADAPTER_TX_MULTICAST_BYTES),
1658 	EF10_DMA_STAT(tx_broadcast, VADAPTER_TX_BROADCAST_PACKETS),
1659 	EF10_DMA_STAT(tx_broadcast_bytes, VADAPTER_TX_BROADCAST_BYTES),
1660 	EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS),
1661 	EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES),
1662 	EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW),
1663 	EF10_DMA_STAT(fec_uncorrected_errors, FEC_UNCORRECTED_ERRORS),
1664 	EF10_DMA_STAT(fec_corrected_errors, FEC_CORRECTED_ERRORS),
1665 	EF10_DMA_STAT(fec_corrected_symbols_lane0, FEC_CORRECTED_SYMBOLS_LANE0),
1666 	EF10_DMA_STAT(fec_corrected_symbols_lane1, FEC_CORRECTED_SYMBOLS_LANE1),
1667 	EF10_DMA_STAT(fec_corrected_symbols_lane2, FEC_CORRECTED_SYMBOLS_LANE2),
1668 	EF10_DMA_STAT(fec_corrected_symbols_lane3, FEC_CORRECTED_SYMBOLS_LANE3),
1669 	EF10_DMA_STAT(ctpio_vi_busy_fallback, CTPIO_VI_BUSY_FALLBACK),
1670 	EF10_DMA_STAT(ctpio_long_write_success, CTPIO_LONG_WRITE_SUCCESS),
1671 	EF10_DMA_STAT(ctpio_missing_dbell_fail, CTPIO_MISSING_DBELL_FAIL),
1672 	EF10_DMA_STAT(ctpio_overflow_fail, CTPIO_OVERFLOW_FAIL),
1673 	EF10_DMA_STAT(ctpio_underflow_fail, CTPIO_UNDERFLOW_FAIL),
1674 	EF10_DMA_STAT(ctpio_timeout_fail, CTPIO_TIMEOUT_FAIL),
1675 	EF10_DMA_STAT(ctpio_noncontig_wr_fail, CTPIO_NONCONTIG_WR_FAIL),
1676 	EF10_DMA_STAT(ctpio_frm_clobber_fail, CTPIO_FRM_CLOBBER_FAIL),
1677 	EF10_DMA_STAT(ctpio_invalid_wr_fail, CTPIO_INVALID_WR_FAIL),
1678 	EF10_DMA_STAT(ctpio_vi_clobber_fallback, CTPIO_VI_CLOBBER_FALLBACK),
1679 	EF10_DMA_STAT(ctpio_unqualified_fallback, CTPIO_UNQUALIFIED_FALLBACK),
1680 	EF10_DMA_STAT(ctpio_runt_fallback, CTPIO_RUNT_FALLBACK),
1681 	EF10_DMA_STAT(ctpio_success, CTPIO_SUCCESS),
1682 	EF10_DMA_STAT(ctpio_fallback, CTPIO_FALLBACK),
1683 	EF10_DMA_STAT(ctpio_poison, CTPIO_POISON),
1684 	EF10_DMA_STAT(ctpio_erase, CTPIO_ERASE),
1685 };
1686 
1687 #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) |	\
1688 			       (1ULL << EF10_STAT_port_tx_packets) |	\
1689 			       (1ULL << EF10_STAT_port_tx_pause) |	\
1690 			       (1ULL << EF10_STAT_port_tx_unicast) |	\
1691 			       (1ULL << EF10_STAT_port_tx_multicast) |	\
1692 			       (1ULL << EF10_STAT_port_tx_broadcast) |	\
1693 			       (1ULL << EF10_STAT_port_rx_bytes) |	\
1694 			       (1ULL <<                                 \
1695 				EF10_STAT_port_rx_bytes_minus_good_bytes) | \
1696 			       (1ULL << EF10_STAT_port_rx_good_bytes) |	\
1697 			       (1ULL << EF10_STAT_port_rx_bad_bytes) |	\
1698 			       (1ULL << EF10_STAT_port_rx_packets) |	\
1699 			       (1ULL << EF10_STAT_port_rx_good) |	\
1700 			       (1ULL << EF10_STAT_port_rx_bad) |	\
1701 			       (1ULL << EF10_STAT_port_rx_pause) |	\
1702 			       (1ULL << EF10_STAT_port_rx_control) |	\
1703 			       (1ULL << EF10_STAT_port_rx_unicast) |	\
1704 			       (1ULL << EF10_STAT_port_rx_multicast) |	\
1705 			       (1ULL << EF10_STAT_port_rx_broadcast) |	\
1706 			       (1ULL << EF10_STAT_port_rx_lt64) |	\
1707 			       (1ULL << EF10_STAT_port_rx_64) |		\
1708 			       (1ULL << EF10_STAT_port_rx_65_to_127) |	\
1709 			       (1ULL << EF10_STAT_port_rx_128_to_255) |	\
1710 			       (1ULL << EF10_STAT_port_rx_256_to_511) |	\
1711 			       (1ULL << EF10_STAT_port_rx_512_to_1023) |\
1712 			       (1ULL << EF10_STAT_port_rx_1024_to_15xx) |\
1713 			       (1ULL << EF10_STAT_port_rx_15xx_to_jumbo) |\
1714 			       (1ULL << EF10_STAT_port_rx_gtjumbo) |	\
1715 			       (1ULL << EF10_STAT_port_rx_bad_gtjumbo) |\
1716 			       (1ULL << EF10_STAT_port_rx_overflow) |	\
1717 			       (1ULL << EF10_STAT_port_rx_nodesc_drops) |\
1718 			       (1ULL << GENERIC_STAT_rx_nodesc_trunc) |	\
1719 			       (1ULL << GENERIC_STAT_rx_noskb_drops))
1720 
1721 /* On 7000 series NICs, these statistics are only provided by the 10G MAC.
1722  * For a 10G/40G switchable port we do not expose these because they might
1723  * not include all the packets they should.
1724  * On 8000 series NICs these statistics are always provided.
1725  */
1726 #define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_port_tx_control) |	\
1727 				 (1ULL << EF10_STAT_port_tx_lt64) |	\
1728 				 (1ULL << EF10_STAT_port_tx_64) |	\
1729 				 (1ULL << EF10_STAT_port_tx_65_to_127) |\
1730 				 (1ULL << EF10_STAT_port_tx_128_to_255) |\
1731 				 (1ULL << EF10_STAT_port_tx_256_to_511) |\
1732 				 (1ULL << EF10_STAT_port_tx_512_to_1023) |\
1733 				 (1ULL << EF10_STAT_port_tx_1024_to_15xx) |\
1734 				 (1ULL << EF10_STAT_port_tx_15xx_to_jumbo))
1735 
1736 /* These statistics are only provided by the 40G MAC.  For a 10G/40G
1737  * switchable port we do expose these because the errors will otherwise
1738  * be silent.
1739  */
1740 #define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_port_rx_align_error) |\
1741 				  (1ULL << EF10_STAT_port_rx_length_error))
1742 
1743 /* These statistics are only provided if the firmware supports the
1744  * capability PM_AND_RXDP_COUNTERS.
1745  */
1746 #define HUNT_PM_AND_RXDP_STAT_MASK (					\
1747 	(1ULL << EF10_STAT_port_rx_pm_trunc_bb_overflow) |		\
1748 	(1ULL << EF10_STAT_port_rx_pm_discard_bb_overflow) |		\
1749 	(1ULL << EF10_STAT_port_rx_pm_trunc_vfifo_full) |		\
1750 	(1ULL << EF10_STAT_port_rx_pm_discard_vfifo_full) |		\
1751 	(1ULL << EF10_STAT_port_rx_pm_trunc_qbb) |			\
1752 	(1ULL << EF10_STAT_port_rx_pm_discard_qbb) |			\
1753 	(1ULL << EF10_STAT_port_rx_pm_discard_mapping) |		\
1754 	(1ULL << EF10_STAT_port_rx_dp_q_disabled_packets) |		\
1755 	(1ULL << EF10_STAT_port_rx_dp_di_dropped_packets) |		\
1756 	(1ULL << EF10_STAT_port_rx_dp_streaming_packets) |		\
1757 	(1ULL << EF10_STAT_port_rx_dp_hlb_fetch) |			\
1758 	(1ULL << EF10_STAT_port_rx_dp_hlb_wait))
1759 
1760 /* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V2,
1761  * indicated by returning a value >= MC_CMD_MAC_NSTATS_V2 in
1762  * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS.
1763  * These bits are in the second u64 of the raw mask.
1764  */
1765 #define EF10_FEC_STAT_MASK (						\
1766 	(1ULL << (EF10_STAT_fec_uncorrected_errors - 64)) |		\
1767 	(1ULL << (EF10_STAT_fec_corrected_errors - 64)) |		\
1768 	(1ULL << (EF10_STAT_fec_corrected_symbols_lane0 - 64)) |	\
1769 	(1ULL << (EF10_STAT_fec_corrected_symbols_lane1 - 64)) |	\
1770 	(1ULL << (EF10_STAT_fec_corrected_symbols_lane2 - 64)) |	\
1771 	(1ULL << (EF10_STAT_fec_corrected_symbols_lane3 - 64)))
1772 
1773 /* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V3,
1774  * indicated by returning a value >= MC_CMD_MAC_NSTATS_V3 in
1775  * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS.
1776  * These bits are in the second u64 of the raw mask.
1777  */
1778 #define EF10_CTPIO_STAT_MASK (						\
1779 	(1ULL << (EF10_STAT_ctpio_vi_busy_fallback - 64)) |		\
1780 	(1ULL << (EF10_STAT_ctpio_long_write_success - 64)) |		\
1781 	(1ULL << (EF10_STAT_ctpio_missing_dbell_fail - 64)) |		\
1782 	(1ULL << (EF10_STAT_ctpio_overflow_fail - 64)) |		\
1783 	(1ULL << (EF10_STAT_ctpio_underflow_fail - 64)) |		\
1784 	(1ULL << (EF10_STAT_ctpio_timeout_fail - 64)) |			\
1785 	(1ULL << (EF10_STAT_ctpio_noncontig_wr_fail - 64)) |		\
1786 	(1ULL << (EF10_STAT_ctpio_frm_clobber_fail - 64)) |		\
1787 	(1ULL << (EF10_STAT_ctpio_invalid_wr_fail - 64)) |		\
1788 	(1ULL << (EF10_STAT_ctpio_vi_clobber_fallback - 64)) |		\
1789 	(1ULL << (EF10_STAT_ctpio_unqualified_fallback - 64)) |		\
1790 	(1ULL << (EF10_STAT_ctpio_runt_fallback - 64)) |		\
1791 	(1ULL << (EF10_STAT_ctpio_success - 64)) |			\
1792 	(1ULL << (EF10_STAT_ctpio_fallback - 64)) |			\
1793 	(1ULL << (EF10_STAT_ctpio_poison - 64)) |			\
1794 	(1ULL << (EF10_STAT_ctpio_erase - 64)))
1795 
1796 static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
1797 {
1798 	u64 raw_mask = HUNT_COMMON_STAT_MASK;
1799 	u32 port_caps = efx_mcdi_phy_get_caps(efx);
1800 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
1801 
1802 	if (!(efx->mcdi->fn_flags &
1803 	      1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
1804 		return 0;
1805 
1806 	if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) {
1807 		raw_mask |= HUNT_40G_EXTRA_STAT_MASK;
1808 		/* 8000 series have everything even at 40G */
1809 		if (nic_data->datapath_caps2 &
1810 		    (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN))
1811 			raw_mask |= HUNT_10G_ONLY_STAT_MASK;
1812 	} else {
1813 		raw_mask |= HUNT_10G_ONLY_STAT_MASK;
1814 	}
1815 
1816 	if (nic_data->datapath_caps &
1817 	    (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN))
1818 		raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK;
1819 
1820 	return raw_mask;
1821 }
1822 
1823 static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
1824 {
1825 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
1826 	u64 raw_mask[2];
1827 
1828 	raw_mask[0] = efx_ef10_raw_stat_mask(efx);
1829 
1830 	/* Only show vadaptor stats when EVB capability is present */
1831 	if (nic_data->datapath_caps &
1832 	    (1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) {
1833 		raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1);
1834 		raw_mask[1] = (1ULL << (EF10_STAT_V1_COUNT - 64)) - 1;
1835 	} else {
1836 		raw_mask[1] = 0;
1837 	}
1838 	/* Only show FEC stats when NIC supports MC_CMD_MAC_STATS_V2 */
1839 	if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V2)
1840 		raw_mask[1] |= EF10_FEC_STAT_MASK;
1841 
1842 	/* CTPIO stats appear in V3. Only show them on devices that actually
1843 	 * support CTPIO. Although this driver doesn't use CTPIO others might,
1844 	 * and we may be reporting the stats for the underlying port.
1845 	 */
1846 	if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V3 &&
1847 	    (nic_data->datapath_caps2 &
1848 	     (1 << MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN)))
1849 		raw_mask[1] |= EF10_CTPIO_STAT_MASK;
1850 
1851 #if BITS_PER_LONG == 64
1852 	BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 2);
1853 	mask[0] = raw_mask[0];
1854 	mask[1] = raw_mask[1];
1855 #else
1856 	BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 3);
1857 	mask[0] = raw_mask[0] & 0xffffffff;
1858 	mask[1] = raw_mask[0] >> 32;
1859 	mask[2] = raw_mask[1] & 0xffffffff;
1860 #endif
1861 }
1862 
1863 static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
1864 {
1865 	DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1866 
1867 	efx_ef10_get_stat_mask(efx, mask);
1868 	return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
1869 				      mask, names);
1870 }
1871 
1872 static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats,
1873 					   struct rtnl_link_stats64 *core_stats)
1874 {
1875 	DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1876 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
1877 	u64 *stats = nic_data->stats;
1878 	size_t stats_count = 0, index;
1879 
1880 	efx_ef10_get_stat_mask(efx, mask);
1881 
1882 	if (full_stats) {
1883 		for_each_set_bit(index, mask, EF10_STAT_COUNT) {
1884 			if (efx_ef10_stat_desc[index].name) {
1885 				*full_stats++ = stats[index];
1886 				++stats_count;
1887 			}
1888 		}
1889 	}
1890 
1891 	if (!core_stats)
1892 		return stats_count;
1893 
1894 	if (nic_data->datapath_caps &
1895 			1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN) {
1896 		/* Use vadaptor stats. */
1897 		core_stats->rx_packets = stats[EF10_STAT_rx_unicast] +
1898 					 stats[EF10_STAT_rx_multicast] +
1899 					 stats[EF10_STAT_rx_broadcast];
1900 		core_stats->tx_packets = stats[EF10_STAT_tx_unicast] +
1901 					 stats[EF10_STAT_tx_multicast] +
1902 					 stats[EF10_STAT_tx_broadcast];
1903 		core_stats->rx_bytes = stats[EF10_STAT_rx_unicast_bytes] +
1904 				       stats[EF10_STAT_rx_multicast_bytes] +
1905 				       stats[EF10_STAT_rx_broadcast_bytes];
1906 		core_stats->tx_bytes = stats[EF10_STAT_tx_unicast_bytes] +
1907 				       stats[EF10_STAT_tx_multicast_bytes] +
1908 				       stats[EF10_STAT_tx_broadcast_bytes];
1909 		core_stats->rx_dropped = stats[GENERIC_STAT_rx_nodesc_trunc] +
1910 					 stats[GENERIC_STAT_rx_noskb_drops];
1911 		core_stats->multicast = stats[EF10_STAT_rx_multicast];
1912 		core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad];
1913 		core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
1914 		core_stats->rx_errors = core_stats->rx_crc_errors;
1915 		core_stats->tx_errors = stats[EF10_STAT_tx_bad];
1916 	} else {
1917 		/* Use port stats. */
1918 		core_stats->rx_packets = stats[EF10_STAT_port_rx_packets];
1919 		core_stats->tx_packets = stats[EF10_STAT_port_tx_packets];
1920 		core_stats->rx_bytes = stats[EF10_STAT_port_rx_bytes];
1921 		core_stats->tx_bytes = stats[EF10_STAT_port_tx_bytes];
1922 		core_stats->rx_dropped = stats[EF10_STAT_port_rx_nodesc_drops] +
1923 					 stats[GENERIC_STAT_rx_nodesc_trunc] +
1924 					 stats[GENERIC_STAT_rx_noskb_drops];
1925 		core_stats->multicast = stats[EF10_STAT_port_rx_multicast];
1926 		core_stats->rx_length_errors =
1927 				stats[EF10_STAT_port_rx_gtjumbo] +
1928 				stats[EF10_STAT_port_rx_length_error];
1929 		core_stats->rx_crc_errors = stats[EF10_STAT_port_rx_bad];
1930 		core_stats->rx_frame_errors =
1931 				stats[EF10_STAT_port_rx_align_error];
1932 		core_stats->rx_fifo_errors = stats[EF10_STAT_port_rx_overflow];
1933 		core_stats->rx_errors = (core_stats->rx_length_errors +
1934 					 core_stats->rx_crc_errors +
1935 					 core_stats->rx_frame_errors);
1936 	}
1937 
1938 	return stats_count;
1939 }
1940 
1941 static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx)
1942 {
1943 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
1944 	DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1945 	__le64 generation_start, generation_end;
1946 	u64 *stats = nic_data->stats;
1947 	__le64 *dma_stats;
1948 
1949 	efx_ef10_get_stat_mask(efx, mask);
1950 
1951 	dma_stats = efx->stats_buffer.addr;
1952 
1953 	generation_end = dma_stats[efx->num_mac_stats - 1];
1954 	if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
1955 		return 0;
1956 	rmb();
1957 	efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
1958 			     stats, efx->stats_buffer.addr, false);
1959 	rmb();
1960 	generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
1961 	if (generation_end != generation_start)
1962 		return -EAGAIN;
1963 
1964 	/* Update derived statistics */
1965 	efx_nic_fix_nodesc_drop_stat(efx,
1966 				     &stats[EF10_STAT_port_rx_nodesc_drops]);
1967 	stats[EF10_STAT_port_rx_good_bytes] =
1968 		stats[EF10_STAT_port_rx_bytes] -
1969 		stats[EF10_STAT_port_rx_bytes_minus_good_bytes];
1970 	efx_update_diff_stat(&stats[EF10_STAT_port_rx_bad_bytes],
1971 			     stats[EF10_STAT_port_rx_bytes_minus_good_bytes]);
1972 	efx_update_sw_stats(efx, stats);
1973 	return 0;
1974 }
1975 
1976 
1977 static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats,
1978 				       struct rtnl_link_stats64 *core_stats)
1979 {
1980 	int retry;
1981 
1982 	/* If we're unlucky enough to read statistics during the DMA, wait
1983 	 * up to 10ms for it to finish (typically takes <500us)
1984 	 */
1985 	for (retry = 0; retry < 100; ++retry) {
1986 		if (efx_ef10_try_update_nic_stats_pf(efx) == 0)
1987 			break;
1988 		udelay(100);
1989 	}
1990 
1991 	return efx_ef10_update_stats_common(efx, full_stats, core_stats);
1992 }
1993 
1994 static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
1995 {
1996 	MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
1997 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
1998 	DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1999 	__le64 generation_start, generation_end;
2000 	u64 *stats = nic_data->stats;
2001 	u32 dma_len = efx->num_mac_stats * sizeof(u64);
2002 	struct efx_buffer stats_buf;
2003 	__le64 *dma_stats;
2004 	int rc;
2005 
2006 	spin_unlock_bh(&efx->stats_lock);
2007 
2008 	if (in_interrupt()) {
2009 		/* If in atomic context, cannot update stats.  Just update the
2010 		 * software stats and return so the caller can continue.
2011 		 */
2012 		spin_lock_bh(&efx->stats_lock);
2013 		efx_update_sw_stats(efx, stats);
2014 		return 0;
2015 	}
2016 
2017 	efx_ef10_get_stat_mask(efx, mask);
2018 
2019 	rc = efx_nic_alloc_buffer(efx, &stats_buf, dma_len, GFP_ATOMIC);
2020 	if (rc) {
2021 		spin_lock_bh(&efx->stats_lock);
2022 		return rc;
2023 	}
2024 
2025 	dma_stats = stats_buf.addr;
2026 	dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID;
2027 
2028 	MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr);
2029 	MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD,
2030 			      MAC_STATS_IN_DMA, 1);
2031 	MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
2032 	MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
2033 
2034 	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
2035 				NULL, 0, NULL);
2036 	spin_lock_bh(&efx->stats_lock);
2037 	if (rc) {
2038 		/* Expect ENOENT if DMA queues have not been set up */
2039 		if (rc != -ENOENT || atomic_read(&efx->active_queues))
2040 			efx_mcdi_display_error(efx, MC_CMD_MAC_STATS,
2041 					       sizeof(inbuf), NULL, 0, rc);
2042 		goto out;
2043 	}
2044 
2045 	generation_end = dma_stats[efx->num_mac_stats - 1];
2046 	if (generation_end == EFX_MC_STATS_GENERATION_INVALID) {
2047 		WARN_ON_ONCE(1);
2048 		goto out;
2049 	}
2050 	rmb();
2051 	efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
2052 			     stats, stats_buf.addr, false);
2053 	rmb();
2054 	generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
2055 	if (generation_end != generation_start) {
2056 		rc = -EAGAIN;
2057 		goto out;
2058 	}
2059 
2060 	efx_update_sw_stats(efx, stats);
2061 out:
2062 	efx_nic_free_buffer(efx, &stats_buf);
2063 	return rc;
2064 }
2065 
2066 static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats,
2067 				       struct rtnl_link_stats64 *core_stats)
2068 {
2069 	if (efx_ef10_try_update_nic_stats_vf(efx))
2070 		return 0;
2071 
2072 	return efx_ef10_update_stats_common(efx, full_stats, core_stats);
2073 }
2074 
2075 static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
2076 {
2077 	struct efx_nic *efx = channel->efx;
2078 	unsigned int mode, usecs;
2079 	efx_dword_t timer_cmd;
2080 
2081 	if (channel->irq_moderation_us) {
2082 		mode = 3;
2083 		usecs = channel->irq_moderation_us;
2084 	} else {
2085 		mode = 0;
2086 		usecs = 0;
2087 	}
2088 
2089 	if (EFX_EF10_WORKAROUND_61265(efx)) {
2090 		MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_EVQ_TMR_IN_LEN);
2091 		unsigned int ns = usecs * 1000;
2092 
2093 		MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_INSTANCE,
2094 			       channel->channel);
2095 		MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, ns);
2096 		MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, ns);
2097 		MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_MODE, mode);
2098 
2099 		efx_mcdi_rpc_async(efx, MC_CMD_SET_EVQ_TMR,
2100 				   inbuf, sizeof(inbuf), 0, NULL, 0);
2101 	} else if (EFX_EF10_WORKAROUND_35388(efx)) {
2102 		unsigned int ticks = efx_usecs_to_ticks(efx, usecs);
2103 
2104 		EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS,
2105 				     EFE_DD_EVQ_IND_TIMER_FLAGS,
2106 				     ERF_DD_EVQ_IND_TIMER_MODE, mode,
2107 				     ERF_DD_EVQ_IND_TIMER_VAL, ticks);
2108 		efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT,
2109 				channel->channel);
2110 	} else {
2111 		unsigned int ticks = efx_usecs_to_ticks(efx, usecs);
2112 
2113 		EFX_POPULATE_DWORD_3(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode,
2114 				     ERF_DZ_TC_TIMER_VAL, ticks,
2115 				     ERF_FZ_TC_TMR_REL_VAL, ticks);
2116 		efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR,
2117 				channel->channel);
2118 	}
2119 }
2120 
2121 static void efx_ef10_get_wol_vf(struct efx_nic *efx,
2122 				struct ethtool_wolinfo *wol) {}
2123 
2124 static int efx_ef10_set_wol_vf(struct efx_nic *efx, u32 type)
2125 {
2126 	return -EOPNOTSUPP;
2127 }
2128 
2129 static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
2130 {
2131 	wol->supported = 0;
2132 	wol->wolopts = 0;
2133 	memset(&wol->sopass, 0, sizeof(wol->sopass));
2134 }
2135 
2136 static int efx_ef10_set_wol(struct efx_nic *efx, u32 type)
2137 {
2138 	if (type != 0)
2139 		return -EINVAL;
2140 	return 0;
2141 }
2142 
2143 static void efx_ef10_mcdi_request(struct efx_nic *efx,
2144 				  const efx_dword_t *hdr, size_t hdr_len,
2145 				  const efx_dword_t *sdu, size_t sdu_len)
2146 {
2147 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
2148 	u8 *pdu = nic_data->mcdi_buf.addr;
2149 
2150 	memcpy(pdu, hdr, hdr_len);
2151 	memcpy(pdu + hdr_len, sdu, sdu_len);
2152 	wmb();
2153 
2154 	/* The hardware provides 'low' and 'high' (doorbell) registers
2155 	 * for passing the 64-bit address of an MCDI request to
2156 	 * firmware.  However the dwords are swapped by firmware.  The
2157 	 * least significant bits of the doorbell are then 0 for all
2158 	 * MCDI requests due to alignment.
2159 	 */
2160 	_efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32),
2161 		    ER_DZ_MC_DB_LWRD);
2162 	_efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr),
2163 		    ER_DZ_MC_DB_HWRD);
2164 }
2165 
2166 static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx)
2167 {
2168 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
2169 	const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr;
2170 
2171 	rmb();
2172 	return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE);
2173 }
2174 
2175 static void
2176 efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf,
2177 			    size_t offset, size_t outlen)
2178 {
2179 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
2180 	const u8 *pdu = nic_data->mcdi_buf.addr;
2181 
2182 	memcpy(outbuf, pdu + offset, outlen);
2183 }
2184 
2185 static void efx_ef10_mcdi_reboot_detected(struct efx_nic *efx)
2186 {
2187 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
2188 
2189 	/* All our allocations have been reset */
2190 	efx_ef10_reset_mc_allocations(efx);
2191 
2192 	/* The datapath firmware might have been changed */
2193 	nic_data->must_check_datapath_caps = true;
2194 
2195 	/* MAC statistics have been cleared on the NIC; clear the local
2196 	 * statistic that we update with efx_update_diff_stat().
2197 	 */
2198 	nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0;
2199 }
2200 
2201 static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
2202 {
2203 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
2204 	int rc;
2205 
2206 	rc = efx_ef10_get_warm_boot_count(efx);
2207 	if (rc < 0) {
2208 		/* The firmware is presumably in the process of
2209 		 * rebooting.  However, we are supposed to report each
2210 		 * reboot just once, so we must only do that once we
2211 		 * can read and store the updated warm boot count.
2212 		 */
2213 		return 0;
2214 	}
2215 
2216 	if (rc == nic_data->warm_boot_count)
2217 		return 0;
2218 
2219 	nic_data->warm_boot_count = rc;
2220 	efx_ef10_mcdi_reboot_detected(efx);
2221 
2222 	return -EIO;
2223 }
2224 
2225 /* Handle an MSI interrupt
2226  *
2227  * Handle an MSI hardware interrupt.  This routine schedules event
2228  * queue processing.  No interrupt acknowledgement cycle is necessary.
2229  * Also, we never need to check that the interrupt is for us, since
2230  * MSI interrupts cannot be shared.
2231  */
2232 static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id)
2233 {
2234 	struct efx_msi_context *context = dev_id;
2235 	struct efx_nic *efx = context->efx;
2236 
2237 	netif_vdbg(efx, intr, efx->net_dev,
2238 		   "IRQ %d on CPU %d\n", irq, raw_smp_processor_id());
2239 
2240 	if (likely(READ_ONCE(efx->irq_soft_enabled))) {
2241 		/* Note test interrupts */
2242 		if (context->index == efx->irq_level)
2243 			efx->last_irq_cpu = raw_smp_processor_id();
2244 
2245 		/* Schedule processing of the channel */
2246 		efx_schedule_channel_irq(efx->channel[context->index]);
2247 	}
2248 
2249 	return IRQ_HANDLED;
2250 }
2251 
2252 static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id)
2253 {
2254 	struct efx_nic *efx = dev_id;
2255 	bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
2256 	struct efx_channel *channel;
2257 	efx_dword_t reg;
2258 	u32 queues;
2259 
2260 	/* Read the ISR which also ACKs the interrupts */
2261 	efx_readd(efx, &reg, ER_DZ_BIU_INT_ISR);
2262 	queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG);
2263 
2264 	if (queues == 0)
2265 		return IRQ_NONE;
2266 
2267 	if (likely(soft_enabled)) {
2268 		/* Note test interrupts */
2269 		if (queues & (1U << efx->irq_level))
2270 			efx->last_irq_cpu = raw_smp_processor_id();
2271 
2272 		efx_for_each_channel(channel, efx) {
2273 			if (queues & 1)
2274 				efx_schedule_channel_irq(channel);
2275 			queues >>= 1;
2276 		}
2277 	}
2278 
2279 	netif_vdbg(efx, intr, efx->net_dev,
2280 		   "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
2281 		   irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
2282 
2283 	return IRQ_HANDLED;
2284 }
2285 
2286 static int efx_ef10_irq_test_generate(struct efx_nic *efx)
2287 {
2288 	MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN);
2289 
2290 	if (efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG41750, true,
2291 				    NULL) == 0)
2292 		return -ENOTSUPP;
2293 
2294 	BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0);
2295 
2296 	MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level);
2297 	return efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT,
2298 			    inbuf, sizeof(inbuf), NULL, 0, NULL);
2299 }
2300 
2301 static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue)
2302 {
2303 	return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf,
2304 				    (tx_queue->ptr_mask + 1) *
2305 				    sizeof(efx_qword_t),
2306 				    GFP_KERNEL);
2307 }
2308 
2309 /* This writes to the TX_DESC_WPTR and also pushes data */
2310 static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue,
2311 					 const efx_qword_t *txd)
2312 {
2313 	unsigned int write_ptr;
2314 	efx_oword_t reg;
2315 
2316 	write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
2317 	EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr);
2318 	reg.qword[0] = *txd;
2319 	efx_writeo_page(tx_queue->efx, &reg,
2320 			ER_DZ_TX_DESC_UPD, tx_queue->queue);
2321 }
2322 
2323 /* Add Firmware-Assisted TSO v2 option descriptors to a queue.
2324  */
2325 static int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue,
2326 				struct sk_buff *skb,
2327 				bool *data_mapped)
2328 {
2329 	struct efx_tx_buffer *buffer;
2330 	struct tcphdr *tcp;
2331 	struct iphdr *ip;
2332 
2333 	u16 ipv4_id;
2334 	u32 seqnum;
2335 	u32 mss;
2336 
2337 	EFX_WARN_ON_ONCE_PARANOID(tx_queue->tso_version != 2);
2338 
2339 	mss = skb_shinfo(skb)->gso_size;
2340 
2341 	if (unlikely(mss < 4)) {
2342 		WARN_ONCE(1, "MSS of %u is too small for TSO v2\n", mss);
2343 		return -EINVAL;
2344 	}
2345 
2346 	ip = ip_hdr(skb);
2347 	if (ip->version == 4) {
2348 		/* Modify IPv4 header if needed. */
2349 		ip->tot_len = 0;
2350 		ip->check = 0;
2351 		ipv4_id = ntohs(ip->id);
2352 	} else {
2353 		/* Modify IPv6 header if needed. */
2354 		struct ipv6hdr *ipv6 = ipv6_hdr(skb);
2355 
2356 		ipv6->payload_len = 0;
2357 		ipv4_id = 0;
2358 	}
2359 
2360 	tcp = tcp_hdr(skb);
2361 	seqnum = ntohl(tcp->seq);
2362 
2363 	buffer = efx_tx_queue_get_insert_buffer(tx_queue);
2364 
2365 	buffer->flags = EFX_TX_BUF_OPTION;
2366 	buffer->len = 0;
2367 	buffer->unmap_len = 0;
2368 	EFX_POPULATE_QWORD_5(buffer->option,
2369 			ESF_DZ_TX_DESC_IS_OPT, 1,
2370 			ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO,
2371 			ESF_DZ_TX_TSO_OPTION_TYPE,
2372 			ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A,
2373 			ESF_DZ_TX_TSO_IP_ID, ipv4_id,
2374 			ESF_DZ_TX_TSO_TCP_SEQNO, seqnum
2375 			);
2376 	++tx_queue->insert_count;
2377 
2378 	buffer = efx_tx_queue_get_insert_buffer(tx_queue);
2379 
2380 	buffer->flags = EFX_TX_BUF_OPTION;
2381 	buffer->len = 0;
2382 	buffer->unmap_len = 0;
2383 	EFX_POPULATE_QWORD_4(buffer->option,
2384 			ESF_DZ_TX_DESC_IS_OPT, 1,
2385 			ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO,
2386 			ESF_DZ_TX_TSO_OPTION_TYPE,
2387 			ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B,
2388 			ESF_DZ_TX_TSO_TCP_MSS, mss
2389 			);
2390 	++tx_queue->insert_count;
2391 
2392 	return 0;
2393 }
2394 
2395 static u32 efx_ef10_tso_versions(struct efx_nic *efx)
2396 {
2397 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
2398 	u32 tso_versions = 0;
2399 
2400 	if (nic_data->datapath_caps &
2401 	    (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))
2402 		tso_versions |= BIT(1);
2403 	if (nic_data->datapath_caps2 &
2404 	    (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN))
2405 		tso_versions |= BIT(2);
2406 	return tso_versions;
2407 }
2408 
2409 static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
2410 {
2411 	MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
2412 						       EFX_BUF_SIZE));
2413 	bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
2414 	size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
2415 	struct efx_channel *channel = tx_queue->channel;
2416 	struct efx_nic *efx = tx_queue->efx;
2417 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
2418 	bool tso_v2 = false;
2419 	size_t inlen;
2420 	dma_addr_t dma_addr;
2421 	efx_qword_t *txd;
2422 	int rc;
2423 	int i;
2424 	BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0);
2425 
2426 	/* Only attempt to enable TX timestamping if we have the license for it,
2427 	 * otherwise TXQ init will fail
2428 	 */
2429 	if (!(nic_data->licensed_features &
2430 	      (1 << LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN))) {
2431 		tx_queue->timestamping = false;
2432 		/* Disable sync events on this channel. */
2433 		if (efx->type->ptp_set_ts_sync_events)
2434 			efx->type->ptp_set_ts_sync_events(efx, false, false);
2435 	}
2436 
2437 	/* TSOv2 is a limited resource that can only be configured on a limited
2438 	 * number of queues. TSO without checksum offload is not really a thing,
2439 	 * so we only enable it for those queues.
2440 	 * TSOv2 cannot be used with Hardware timestamping.
2441 	 */
2442 	if (csum_offload && (nic_data->datapath_caps2 &
2443 			(1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN)) &&
2444 	    !tx_queue->timestamping) {
2445 		tso_v2 = true;
2446 		netif_dbg(efx, hw, efx->net_dev, "Using TSOv2 for channel %u\n",
2447 				channel->channel);
2448 	}
2449 
2450 	MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
2451 	MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
2452 	MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue);
2453 	MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
2454 	MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
2455 	MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id);
2456 
2457 	dma_addr = tx_queue->txd.buf.dma_addr;
2458 
2459 	netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n",
2460 		  tx_queue->queue, entries, (u64)dma_addr);
2461 
2462 	for (i = 0; i < entries; ++i) {
2463 		MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr);
2464 		dma_addr += EFX_BUF_SIZE;
2465 	}
2466 
2467 	inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
2468 
2469 	do {
2470 		MCDI_POPULATE_DWORD_4(inbuf, INIT_TXQ_IN_FLAGS,
2471 				/* This flag was removed from mcdi_pcol.h for
2472 				 * the non-_EXT version of INIT_TXQ.  However,
2473 				 * firmware still honours it.
2474 				 */
2475 				INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, tso_v2,
2476 				INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
2477 				INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload,
2478 				INIT_TXQ_EXT_IN_FLAG_TIMESTAMP,
2479 						tx_queue->timestamping);
2480 
2481 		rc = efx_mcdi_rpc_quiet(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
2482 					NULL, 0, NULL);
2483 		if (rc == -ENOSPC && tso_v2) {
2484 			/* Retry without TSOv2 if we're short on contexts. */
2485 			tso_v2 = false;
2486 			netif_warn(efx, probe, efx->net_dev,
2487 				   "TSOv2 context not available to segment in hardware. TCP performance may be reduced.\n");
2488 		} else if (rc) {
2489 			efx_mcdi_display_error(efx, MC_CMD_INIT_TXQ,
2490 					       MC_CMD_INIT_TXQ_EXT_IN_LEN,
2491 					       NULL, 0, rc);
2492 			goto fail;
2493 		}
2494 	} while (rc);
2495 
2496 	/* A previous user of this TX queue might have set us up the
2497 	 * bomb by writing a descriptor to the TX push collector but
2498 	 * not the doorbell.  (Each collector belongs to a port, not a
2499 	 * queue or function, so cannot easily be reset.)  We must
2500 	 * attempt to push a no-op descriptor in its place.
2501 	 */
2502 	tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION;
2503 	tx_queue->insert_count = 1;
2504 	txd = efx_tx_desc(tx_queue, 0);
2505 	EFX_POPULATE_QWORD_5(*txd,
2506 			     ESF_DZ_TX_DESC_IS_OPT, true,
2507 			     ESF_DZ_TX_OPTION_TYPE,
2508 			     ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
2509 			     ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload,
2510 			     ESF_DZ_TX_OPTION_IP_CSUM, csum_offload,
2511 			     ESF_DZ_TX_TIMESTAMP, tx_queue->timestamping);
2512 	tx_queue->write_count = 1;
2513 
2514 	if (tso_v2) {
2515 		tx_queue->handle_tso = efx_ef10_tx_tso_desc;
2516 		tx_queue->tso_version = 2;
2517 	} else if (nic_data->datapath_caps &
2518 			(1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN)) {
2519 		tx_queue->tso_version = 1;
2520 	}
2521 
2522 	wmb();
2523 	efx_ef10_push_tx_desc(tx_queue, txd);
2524 
2525 	return;
2526 
2527 fail:
2528 	netdev_WARN(efx->net_dev, "failed to initialise TXQ %d\n",
2529 		    tx_queue->queue);
2530 }
2531 
2532 static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
2533 {
2534 	MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
2535 	MCDI_DECLARE_BUF_ERR(outbuf);
2536 	struct efx_nic *efx = tx_queue->efx;
2537 	size_t outlen;
2538 	int rc;
2539 
2540 	MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
2541 		       tx_queue->queue);
2542 
2543 	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
2544 			  outbuf, sizeof(outbuf), &outlen);
2545 
2546 	if (rc && rc != -EALREADY)
2547 		goto fail;
2548 
2549 	return;
2550 
2551 fail:
2552 	efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN,
2553 			       outbuf, outlen, rc);
2554 }
2555 
2556 static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue)
2557 {
2558 	efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf);
2559 }
2560 
2561 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
2562 static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue)
2563 {
2564 	unsigned int write_ptr;
2565 	efx_dword_t reg;
2566 
2567 	write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
2568 	EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr);
2569 	efx_writed_page(tx_queue->efx, &reg,
2570 			ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue);
2571 }
2572 
2573 #define EFX_EF10_MAX_TX_DESCRIPTOR_LEN 0x3fff
2574 
2575 static unsigned int efx_ef10_tx_limit_len(struct efx_tx_queue *tx_queue,
2576 					  dma_addr_t dma_addr, unsigned int len)
2577 {
2578 	if (len > EFX_EF10_MAX_TX_DESCRIPTOR_LEN) {
2579 		/* If we need to break across multiple descriptors we should
2580 		 * stop at a page boundary. This assumes the length limit is
2581 		 * greater than the page size.
2582 		 */
2583 		dma_addr_t end = dma_addr + EFX_EF10_MAX_TX_DESCRIPTOR_LEN;
2584 
2585 		BUILD_BUG_ON(EFX_EF10_MAX_TX_DESCRIPTOR_LEN < EFX_PAGE_SIZE);
2586 		len = (end & (~(EFX_PAGE_SIZE - 1))) - dma_addr;
2587 	}
2588 
2589 	return len;
2590 }
2591 
2592 static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
2593 {
2594 	unsigned int old_write_count = tx_queue->write_count;
2595 	struct efx_tx_buffer *buffer;
2596 	unsigned int write_ptr;
2597 	efx_qword_t *txd;
2598 
2599 	tx_queue->xmit_more_available = false;
2600 	if (unlikely(tx_queue->write_count == tx_queue->insert_count))
2601 		return;
2602 
2603 	do {
2604 		write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
2605 		buffer = &tx_queue->buffer[write_ptr];
2606 		txd = efx_tx_desc(tx_queue, write_ptr);
2607 		++tx_queue->write_count;
2608 
2609 		/* Create TX descriptor ring entry */
2610 		if (buffer->flags & EFX_TX_BUF_OPTION) {
2611 			*txd = buffer->option;
2612 			if (EFX_QWORD_FIELD(*txd, ESF_DZ_TX_OPTION_TYPE) == 1)
2613 				/* PIO descriptor */
2614 				tx_queue->packet_write_count = tx_queue->write_count;
2615 		} else {
2616 			tx_queue->packet_write_count = tx_queue->write_count;
2617 			BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
2618 			EFX_POPULATE_QWORD_3(
2619 				*txd,
2620 				ESF_DZ_TX_KER_CONT,
2621 				buffer->flags & EFX_TX_BUF_CONT,
2622 				ESF_DZ_TX_KER_BYTE_CNT, buffer->len,
2623 				ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr);
2624 		}
2625 	} while (tx_queue->write_count != tx_queue->insert_count);
2626 
2627 	wmb(); /* Ensure descriptors are written before they are fetched */
2628 
2629 	if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
2630 		txd = efx_tx_desc(tx_queue,
2631 				  old_write_count & tx_queue->ptr_mask);
2632 		efx_ef10_push_tx_desc(tx_queue, txd);
2633 		++tx_queue->pushes;
2634 	} else {
2635 		efx_ef10_notify_tx_desc(tx_queue);
2636 	}
2637 }
2638 
2639 #define RSS_MODE_HASH_ADDRS	(1 << RSS_MODE_HASH_SRC_ADDR_LBN |\
2640 				 1 << RSS_MODE_HASH_DST_ADDR_LBN)
2641 #define RSS_MODE_HASH_PORTS	(1 << RSS_MODE_HASH_SRC_PORT_LBN |\
2642 				 1 << RSS_MODE_HASH_DST_PORT_LBN)
2643 #define RSS_CONTEXT_FLAGS_DEFAULT	(1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN |\
2644 					 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN |\
2645 					 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN |\
2646 					 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN |\
2647 					 (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN |\
2648 					 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN |\
2649 					 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN |\
2650 					 (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN |\
2651 					 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN |\
2652 					 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN)
2653 
2654 static int efx_ef10_get_rss_flags(struct efx_nic *efx, u32 context, u32 *flags)
2655 {
2656 	/* Firmware had a bug (sfc bug 61952) where it would not actually
2657 	 * fill in the flags field in the response to MC_CMD_RSS_CONTEXT_GET_FLAGS.
2658 	 * This meant that it would always contain whatever was previously
2659 	 * in the MCDI buffer.  Fortunately, all firmware versions with
2660 	 * this bug have the same default flags value for a newly-allocated
2661 	 * RSS context, and the only time we want to get the flags is just
2662 	 * after allocating.  Moreover, the response has a 32-bit hole
2663 	 * where the context ID would be in the request, so we can use an
2664 	 * overlength buffer in the request and pre-fill the flags field
2665 	 * with what we believe the default to be.  Thus if the firmware
2666 	 * has the bug, it will leave our pre-filled value in the flags
2667 	 * field of the response, and we will get the right answer.
2668 	 *
2669 	 * However, this does mean that this function should NOT be used if
2670 	 * the RSS context flags might not be their defaults - it is ONLY
2671 	 * reliably correct for a newly-allocated RSS context.
2672 	 */
2673 	MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN);
2674 	MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN);
2675 	size_t outlen;
2676 	int rc;
2677 
2678 	/* Check we have a hole for the context ID */
2679 	BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN != MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST);
2680 	MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID, context);
2681 	MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS,
2682 		       RSS_CONTEXT_FLAGS_DEFAULT);
2683 	rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_FLAGS, inbuf,
2684 			  sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
2685 	if (rc == 0) {
2686 		if (outlen < MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN)
2687 			rc = -EIO;
2688 		else
2689 			*flags = MCDI_DWORD(outbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS);
2690 	}
2691 	return rc;
2692 }
2693 
2694 /* Attempt to enable 4-tuple UDP hashing on the specified RSS context.
2695  * If we fail, we just leave the RSS context at its default hash settings,
2696  * which is safe but may slightly reduce performance.
2697  * Defaults are 4-tuple for TCP and 2-tuple for UDP and other-IP, so we
2698  * just need to set the UDP ports flags (for both IP versions).
2699  */
2700 static void efx_ef10_set_rss_flags(struct efx_nic *efx,
2701 				   struct efx_rss_context *ctx)
2702 {
2703 	MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN);
2704 	u32 flags;
2705 
2706 	BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN != 0);
2707 
2708 	if (efx_ef10_get_rss_flags(efx, ctx->context_id, &flags) != 0)
2709 		return;
2710 	MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID,
2711 		       ctx->context_id);
2712 	flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN;
2713 	flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN;
2714 	MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_FLAGS, flags);
2715 	if (!efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_FLAGS, inbuf, sizeof(inbuf),
2716 			  NULL, 0, NULL))
2717 		/* Succeeded, so UDP 4-tuple is now enabled */
2718 		ctx->rx_hash_udp_4tuple = true;
2719 }
2720 
2721 static int efx_ef10_alloc_rss_context(struct efx_nic *efx, bool exclusive,
2722 				      struct efx_rss_context *ctx,
2723 				      unsigned *context_size)
2724 {
2725 	MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
2726 	MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
2727 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
2728 	size_t outlen;
2729 	int rc;
2730 	u32 alloc_type = exclusive ?
2731 				MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE :
2732 				MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED;
2733 	unsigned rss_spread = exclusive ?
2734 				efx->rss_spread :
2735 				min(rounddown_pow_of_two(efx->rss_spread),
2736 				    EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE);
2737 
2738 	if (!exclusive && rss_spread == 1) {
2739 		ctx->context_id = EFX_EF10_RSS_CONTEXT_INVALID;
2740 		if (context_size)
2741 			*context_size = 1;
2742 		return 0;
2743 	}
2744 
2745 	if (nic_data->datapath_caps &
2746 	    1 << MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN)
2747 		return -EOPNOTSUPP;
2748 
2749 	MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
2750 		       nic_data->vport_id);
2751 	MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type);
2752 	MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, rss_spread);
2753 
2754 	rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf),
2755 		outbuf, sizeof(outbuf), &outlen);
2756 	if (rc != 0)
2757 		return rc;
2758 
2759 	if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)
2760 		return -EIO;
2761 
2762 	ctx->context_id = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
2763 
2764 	if (context_size)
2765 		*context_size = rss_spread;
2766 
2767 	if (nic_data->datapath_caps &
2768 	    1 << MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN)
2769 		efx_ef10_set_rss_flags(efx, ctx);
2770 
2771 	return 0;
2772 }
2773 
2774 static int efx_ef10_free_rss_context(struct efx_nic *efx, u32 context)
2775 {
2776 	MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN);
2777 
2778 	MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID,
2779 		       context);
2780 	return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf),
2781 			    NULL, 0, NULL);
2782 }
2783 
2784 static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context,
2785 				       const u32 *rx_indir_table, const u8 *key)
2786 {
2787 	MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN);
2788 	MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN);
2789 	int i, rc;
2790 
2791 	MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID,
2792 		       context);
2793 	BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) !=
2794 		     MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN);
2795 
2796 	/* This iterates over the length of efx->rss_context.rx_indir_table, but
2797 	 * copies bytes from rx_indir_table.  That's because the latter is a
2798 	 * pointer rather than an array, but should have the same length.
2799 	 * The efx->rss_context.rx_hash_key loop below is similar.
2800 	 */
2801 	for (i = 0; i < ARRAY_SIZE(efx->rss_context.rx_indir_table); ++i)
2802 		MCDI_PTR(tablebuf,
2803 			 RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] =
2804 				(u8) rx_indir_table[i];
2805 
2806 	rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf,
2807 			  sizeof(tablebuf), NULL, 0, NULL);
2808 	if (rc != 0)
2809 		return rc;
2810 
2811 	MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID,
2812 		       context);
2813 	BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_hash_key) !=
2814 		     MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
2815 	for (i = 0; i < ARRAY_SIZE(efx->rss_context.rx_hash_key); ++i)
2816 		MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] = key[i];
2817 
2818 	return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf,
2819 			    sizeof(keybuf), NULL, 0, NULL);
2820 }
2821 
2822 static void efx_ef10_rx_free_indir_table(struct efx_nic *efx)
2823 {
2824 	int rc;
2825 
2826 	if (efx->rss_context.context_id != EFX_EF10_RSS_CONTEXT_INVALID) {
2827 		rc = efx_ef10_free_rss_context(efx, efx->rss_context.context_id);
2828 		WARN_ON(rc != 0);
2829 	}
2830 	efx->rss_context.context_id = EFX_EF10_RSS_CONTEXT_INVALID;
2831 }
2832 
2833 static int efx_ef10_rx_push_shared_rss_config(struct efx_nic *efx,
2834 					      unsigned *context_size)
2835 {
2836 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
2837 	int rc = efx_ef10_alloc_rss_context(efx, false, &efx->rss_context,
2838 					    context_size);
2839 
2840 	if (rc != 0)
2841 		return rc;
2842 
2843 	nic_data->rx_rss_context_exclusive = false;
2844 	efx_set_default_rx_indir_table(efx, &efx->rss_context);
2845 	return 0;
2846 }
2847 
2848 static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx,
2849 						 const u32 *rx_indir_table,
2850 						 const u8 *key)
2851 {
2852 	u32 old_rx_rss_context = efx->rss_context.context_id;
2853 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
2854 	int rc;
2855 
2856 	if (efx->rss_context.context_id == EFX_EF10_RSS_CONTEXT_INVALID ||
2857 	    !nic_data->rx_rss_context_exclusive) {
2858 		rc = efx_ef10_alloc_rss_context(efx, true, &efx->rss_context,
2859 						NULL);
2860 		if (rc == -EOPNOTSUPP)
2861 			return rc;
2862 		else if (rc != 0)
2863 			goto fail1;
2864 	}
2865 
2866 	rc = efx_ef10_populate_rss_table(efx, efx->rss_context.context_id,
2867 					 rx_indir_table, key);
2868 	if (rc != 0)
2869 		goto fail2;
2870 
2871 	if (efx->rss_context.context_id != old_rx_rss_context &&
2872 	    old_rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
2873 		WARN_ON(efx_ef10_free_rss_context(efx, old_rx_rss_context) != 0);
2874 	nic_data->rx_rss_context_exclusive = true;
2875 	if (rx_indir_table != efx->rss_context.rx_indir_table)
2876 		memcpy(efx->rss_context.rx_indir_table, rx_indir_table,
2877 		       sizeof(efx->rss_context.rx_indir_table));
2878 	if (key != efx->rss_context.rx_hash_key)
2879 		memcpy(efx->rss_context.rx_hash_key, key,
2880 		       efx->type->rx_hash_key_size);
2881 
2882 	return 0;
2883 
2884 fail2:
2885 	if (old_rx_rss_context != efx->rss_context.context_id) {
2886 		WARN_ON(efx_ef10_free_rss_context(efx, efx->rss_context.context_id) != 0);
2887 		efx->rss_context.context_id = old_rx_rss_context;
2888 	}
2889 fail1:
2890 	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
2891 	return rc;
2892 }
2893 
2894 static int efx_ef10_rx_push_rss_context_config(struct efx_nic *efx,
2895 					       struct efx_rss_context *ctx,
2896 					       const u32 *rx_indir_table,
2897 					       const u8 *key)
2898 {
2899 	int rc;
2900 
2901 	WARN_ON(!mutex_is_locked(&efx->rss_lock));
2902 
2903 	if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID) {
2904 		rc = efx_ef10_alloc_rss_context(efx, true, ctx, NULL);
2905 		if (rc)
2906 			return rc;
2907 	}
2908 
2909 	if (!rx_indir_table) /* Delete this context */
2910 		return efx_ef10_free_rss_context(efx, ctx->context_id);
2911 
2912 	rc = efx_ef10_populate_rss_table(efx, ctx->context_id,
2913 					 rx_indir_table, key);
2914 	if (rc)
2915 		return rc;
2916 
2917 	memcpy(ctx->rx_indir_table, rx_indir_table,
2918 	       sizeof(efx->rss_context.rx_indir_table));
2919 	memcpy(ctx->rx_hash_key, key, efx->type->rx_hash_key_size);
2920 
2921 	return 0;
2922 }
2923 
2924 static int efx_ef10_rx_pull_rss_context_config(struct efx_nic *efx,
2925 					       struct efx_rss_context *ctx)
2926 {
2927 	MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN);
2928 	MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN);
2929 	MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN);
2930 	size_t outlen;
2931 	int rc, i;
2932 
2933 	WARN_ON(!mutex_is_locked(&efx->rss_lock));
2934 
2935 	BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN !=
2936 		     MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN);
2937 
2938 	if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID)
2939 		return -ENOENT;
2940 
2941 	MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID,
2942 		       ctx->context_id);
2943 	BUILD_BUG_ON(ARRAY_SIZE(ctx->rx_indir_table) !=
2944 		     MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN);
2945 	rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_TABLE, inbuf, sizeof(inbuf),
2946 			  tablebuf, sizeof(tablebuf), &outlen);
2947 	if (rc != 0)
2948 		return rc;
2949 
2950 	if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN))
2951 		return -EIO;
2952 
2953 	for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++)
2954 		ctx->rx_indir_table[i] = MCDI_PTR(tablebuf,
2955 				RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE)[i];
2956 
2957 	MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID,
2958 		       ctx->context_id);
2959 	BUILD_BUG_ON(ARRAY_SIZE(ctx->rx_hash_key) !=
2960 		     MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
2961 	rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_KEY, inbuf, sizeof(inbuf),
2962 			  keybuf, sizeof(keybuf), &outlen);
2963 	if (rc != 0)
2964 		return rc;
2965 
2966 	if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN))
2967 		return -EIO;
2968 
2969 	for (i = 0; i < ARRAY_SIZE(ctx->rx_hash_key); ++i)
2970 		ctx->rx_hash_key[i] = MCDI_PTR(
2971 				keybuf, RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY)[i];
2972 
2973 	return 0;
2974 }
2975 
2976 static int efx_ef10_rx_pull_rss_config(struct efx_nic *efx)
2977 {
2978 	int rc;
2979 
2980 	mutex_lock(&efx->rss_lock);
2981 	rc = efx_ef10_rx_pull_rss_context_config(efx, &efx->rss_context);
2982 	mutex_unlock(&efx->rss_lock);
2983 	return rc;
2984 }
2985 
2986 static void efx_ef10_rx_restore_rss_contexts(struct efx_nic *efx)
2987 {
2988 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
2989 	struct efx_rss_context *ctx;
2990 	int rc;
2991 
2992 	WARN_ON(!mutex_is_locked(&efx->rss_lock));
2993 
2994 	if (!nic_data->must_restore_rss_contexts)
2995 		return;
2996 
2997 	list_for_each_entry(ctx, &efx->rss_context.list, list) {
2998 		/* previous NIC RSS context is gone */
2999 		ctx->context_id = EFX_EF10_RSS_CONTEXT_INVALID;
3000 		/* so try to allocate a new one */
3001 		rc = efx_ef10_rx_push_rss_context_config(efx, ctx,
3002 							 ctx->rx_indir_table,
3003 							 ctx->rx_hash_key);
3004 		if (rc)
3005 			netif_warn(efx, probe, efx->net_dev,
3006 				   "failed to restore RSS context %u, rc=%d"
3007 				   "; RSS filters may fail to be applied\n",
3008 				   ctx->user_id, rc);
3009 	}
3010 	nic_data->must_restore_rss_contexts = false;
3011 }
3012 
3013 static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
3014 					  const u32 *rx_indir_table,
3015 					  const u8 *key)
3016 {
3017 	int rc;
3018 
3019 	if (efx->rss_spread == 1)
3020 		return 0;
3021 
3022 	if (!key)
3023 		key = efx->rss_context.rx_hash_key;
3024 
3025 	rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table, key);
3026 
3027 	if (rc == -ENOBUFS && !user) {
3028 		unsigned context_size;
3029 		bool mismatch = false;
3030 		size_t i;
3031 
3032 		for (i = 0;
3033 		     i < ARRAY_SIZE(efx->rss_context.rx_indir_table) && !mismatch;
3034 		     i++)
3035 			mismatch = rx_indir_table[i] !=
3036 				ethtool_rxfh_indir_default(i, efx->rss_spread);
3037 
3038 		rc = efx_ef10_rx_push_shared_rss_config(efx, &context_size);
3039 		if (rc == 0) {
3040 			if (context_size != efx->rss_spread)
3041 				netif_warn(efx, probe, efx->net_dev,
3042 					   "Could not allocate an exclusive RSS"
3043 					   " context; allocated a shared one of"
3044 					   " different size."
3045 					   " Wanted %u, got %u.\n",
3046 					   efx->rss_spread, context_size);
3047 			else if (mismatch)
3048 				netif_warn(efx, probe, efx->net_dev,
3049 					   "Could not allocate an exclusive RSS"
3050 					   " context; allocated a shared one but"
3051 					   " could not apply custom"
3052 					   " indirection.\n");
3053 			else
3054 				netif_info(efx, probe, efx->net_dev,
3055 					   "Could not allocate an exclusive RSS"
3056 					   " context; allocated a shared one.\n");
3057 		}
3058 	}
3059 	return rc;
3060 }
3061 
3062 static int efx_ef10_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
3063 					  const u32 *rx_indir_table
3064 					  __attribute__ ((unused)),
3065 					  const u8 *key
3066 					  __attribute__ ((unused)))
3067 {
3068 	if (user)
3069 		return -EOPNOTSUPP;
3070 	if (efx->rss_context.context_id != EFX_EF10_RSS_CONTEXT_INVALID)
3071 		return 0;
3072 	return efx_ef10_rx_push_shared_rss_config(efx, NULL);
3073 }
3074 
3075 static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue)
3076 {
3077 	return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf,
3078 				    (rx_queue->ptr_mask + 1) *
3079 				    sizeof(efx_qword_t),
3080 				    GFP_KERNEL);
3081 }
3082 
3083 static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
3084 {
3085 	MCDI_DECLARE_BUF(inbuf,
3086 			 MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
3087 						EFX_BUF_SIZE));
3088 	struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
3089 	size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
3090 	struct efx_nic *efx = rx_queue->efx;
3091 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
3092 	size_t inlen;
3093 	dma_addr_t dma_addr;
3094 	int rc;
3095 	int i;
3096 	BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0);
3097 
3098 	rx_queue->scatter_n = 0;
3099 	rx_queue->scatter_len = 0;
3100 
3101 	MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1);
3102 	MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel);
3103 	MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
3104 	MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
3105 		       efx_rx_queue_index(rx_queue));
3106 	MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS,
3107 			      INIT_RXQ_IN_FLAG_PREFIX, 1,
3108 			      INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
3109 	MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
3110 	MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, nic_data->vport_id);
3111 
3112 	dma_addr = rx_queue->rxd.buf.dma_addr;
3113 
3114 	netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n",
3115 		  efx_rx_queue_index(rx_queue), entries, (u64)dma_addr);
3116 
3117 	for (i = 0; i < entries; ++i) {
3118 		MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr);
3119 		dma_addr += EFX_BUF_SIZE;
3120 	}
3121 
3122 	inlen = MC_CMD_INIT_RXQ_IN_LEN(entries);
3123 
3124 	rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
3125 			  NULL, 0, NULL);
3126 	if (rc)
3127 		netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n",
3128 			    efx_rx_queue_index(rx_queue));
3129 }
3130 
3131 static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
3132 {
3133 	MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
3134 	MCDI_DECLARE_BUF_ERR(outbuf);
3135 	struct efx_nic *efx = rx_queue->efx;
3136 	size_t outlen;
3137 	int rc;
3138 
3139 	MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
3140 		       efx_rx_queue_index(rx_queue));
3141 
3142 	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
3143 			  outbuf, sizeof(outbuf), &outlen);
3144 
3145 	if (rc && rc != -EALREADY)
3146 		goto fail;
3147 
3148 	return;
3149 
3150 fail:
3151 	efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN,
3152 			       outbuf, outlen, rc);
3153 }
3154 
3155 static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue)
3156 {
3157 	efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf);
3158 }
3159 
3160 /* This creates an entry in the RX descriptor queue */
3161 static inline void
3162 efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
3163 {
3164 	struct efx_rx_buffer *rx_buf;
3165 	efx_qword_t *rxd;
3166 
3167 	rxd = efx_rx_desc(rx_queue, index);
3168 	rx_buf = efx_rx_buffer(rx_queue, index);
3169 	EFX_POPULATE_QWORD_2(*rxd,
3170 			     ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len,
3171 			     ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
3172 }
3173 
3174 static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue)
3175 {
3176 	struct efx_nic *efx = rx_queue->efx;
3177 	unsigned int write_count;
3178 	efx_dword_t reg;
3179 
3180 	/* Firmware requires that RX_DESC_WPTR be a multiple of 8 */
3181 	write_count = rx_queue->added_count & ~7;
3182 	if (rx_queue->notified_count == write_count)
3183 		return;
3184 
3185 	do
3186 		efx_ef10_build_rx_desc(
3187 			rx_queue,
3188 			rx_queue->notified_count & rx_queue->ptr_mask);
3189 	while (++rx_queue->notified_count != write_count);
3190 
3191 	wmb();
3192 	EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR,
3193 			     write_count & rx_queue->ptr_mask);
3194 	efx_writed_page(efx, &reg, ER_DZ_RX_DESC_UPD,
3195 			efx_rx_queue_index(rx_queue));
3196 }
3197 
3198 static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete;
3199 
3200 static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue)
3201 {
3202 	struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
3203 	MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
3204 	efx_qword_t event;
3205 
3206 	EFX_POPULATE_QWORD_2(event,
3207 			     ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
3208 			     ESF_DZ_EV_DATA, EFX_EF10_REFILL);
3209 
3210 	MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
3211 
3212 	/* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
3213 	 * already swapped the data to little-endian order.
3214 	 */
3215 	memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
3216 	       sizeof(efx_qword_t));
3217 
3218 	efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT,
3219 			   inbuf, sizeof(inbuf), 0,
3220 			   efx_ef10_rx_defer_refill_complete, 0);
3221 }
3222 
3223 static void
3224 efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie,
3225 				  int rc, efx_dword_t *outbuf,
3226 				  size_t outlen_actual)
3227 {
3228 	/* nothing to do */
3229 }
3230 
3231 static int efx_ef10_ev_probe(struct efx_channel *channel)
3232 {
3233 	return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
3234 				    (channel->eventq_mask + 1) *
3235 				    sizeof(efx_qword_t),
3236 				    GFP_KERNEL);
3237 }
3238 
3239 static void efx_ef10_ev_fini(struct efx_channel *channel)
3240 {
3241 	MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
3242 	MCDI_DECLARE_BUF_ERR(outbuf);
3243 	struct efx_nic *efx = channel->efx;
3244 	size_t outlen;
3245 	int rc;
3246 
3247 	MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
3248 
3249 	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
3250 			  outbuf, sizeof(outbuf), &outlen);
3251 
3252 	if (rc && rc != -EALREADY)
3253 		goto fail;
3254 
3255 	return;
3256 
3257 fail:
3258 	efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
3259 			       outbuf, outlen, rc);
3260 }
3261 
3262 static int efx_ef10_ev_init(struct efx_channel *channel)
3263 {
3264 	MCDI_DECLARE_BUF(inbuf,
3265 			 MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
3266 						   EFX_BUF_SIZE));
3267 	MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_V2_OUT_LEN);
3268 	size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE;
3269 	struct efx_nic *efx = channel->efx;
3270 	struct efx_ef10_nic_data *nic_data;
3271 	size_t inlen, outlen;
3272 	unsigned int enabled, implemented;
3273 	dma_addr_t dma_addr;
3274 	int rc;
3275 	int i;
3276 
3277 	nic_data = efx->nic_data;
3278 
3279 	/* Fill event queue with all ones (i.e. empty events) */
3280 	memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
3281 
3282 	MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1);
3283 	MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel);
3284 	/* INIT_EVQ expects index in vector table, not absolute */
3285 	MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel);
3286 	MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE,
3287 		       MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
3288 	MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0);
3289 	MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0);
3290 	MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE,
3291 		       MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
3292 	MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0);
3293 
3294 	if (nic_data->datapath_caps2 &
3295 	    1 << MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN) {
3296 		/* Use the new generic approach to specifying event queue
3297 		 * configuration, requesting lower latency or higher throughput.
3298 		 * The options that actually get used appear in the output.
3299 		 */
3300 		MCDI_POPULATE_DWORD_2(inbuf, INIT_EVQ_V2_IN_FLAGS,
3301 				      INIT_EVQ_V2_IN_FLAG_INTERRUPTING, 1,
3302 				      INIT_EVQ_V2_IN_FLAG_TYPE,
3303 				      MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO);
3304 	} else {
3305 		bool cut_thru = !(nic_data->datapath_caps &
3306 			1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
3307 
3308 		MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
3309 				      INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
3310 				      INIT_EVQ_IN_FLAG_RX_MERGE, 1,
3311 				      INIT_EVQ_IN_FLAG_TX_MERGE, 1,
3312 				      INIT_EVQ_IN_FLAG_CUT_THRU, cut_thru);
3313 	}
3314 
3315 	dma_addr = channel->eventq.buf.dma_addr;
3316 	for (i = 0; i < entries; ++i) {
3317 		MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr);
3318 		dma_addr += EFX_BUF_SIZE;
3319 	}
3320 
3321 	inlen = MC_CMD_INIT_EVQ_IN_LEN(entries);
3322 
3323 	rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
3324 			  outbuf, sizeof(outbuf), &outlen);
3325 
3326 	if (outlen >= MC_CMD_INIT_EVQ_V2_OUT_LEN)
3327 		netif_dbg(efx, drv, efx->net_dev,
3328 			  "Channel %d using event queue flags %08x\n",
3329 			  channel->channel,
3330 			  MCDI_DWORD(outbuf, INIT_EVQ_V2_OUT_FLAGS));
3331 
3332 	/* IRQ return is ignored */
3333 	if (channel->channel || rc)
3334 		return rc;
3335 
3336 	/* Successfully created event queue on channel 0 */
3337 	rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
3338 	if (rc == -ENOSYS) {
3339 		/* GET_WORKAROUNDS was implemented before this workaround,
3340 		 * thus it must be unavailable in this firmware.
3341 		 */
3342 		nic_data->workaround_26807 = false;
3343 		rc = 0;
3344 	} else if (rc) {
3345 		goto fail;
3346 	} else {
3347 		nic_data->workaround_26807 =
3348 			!!(enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807);
3349 
3350 		if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 &&
3351 		    !nic_data->workaround_26807) {
3352 			unsigned int flags;
3353 
3354 			rc = efx_mcdi_set_workaround(efx,
3355 						     MC_CMD_WORKAROUND_BUG26807,
3356 						     true, &flags);
3357 
3358 			if (!rc) {
3359 				if (flags &
3360 				    1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) {
3361 					netif_info(efx, drv, efx->net_dev,
3362 						   "other functions on NIC have been reset\n");
3363 
3364 					/* With MCFW v4.6.x and earlier, the
3365 					 * boot count will have incremented,
3366 					 * so re-read the warm_boot_count
3367 					 * value now to ensure this function
3368 					 * doesn't think it has changed next
3369 					 * time it checks.
3370 					 */
3371 					rc = efx_ef10_get_warm_boot_count(efx);
3372 					if (rc >= 0) {
3373 						nic_data->warm_boot_count = rc;
3374 						rc = 0;
3375 					}
3376 				}
3377 				nic_data->workaround_26807 = true;
3378 			} else if (rc == -EPERM) {
3379 				rc = 0;
3380 			}
3381 		}
3382 	}
3383 
3384 	if (!rc)
3385 		return 0;
3386 
3387 fail:
3388 	efx_ef10_ev_fini(channel);
3389 	return rc;
3390 }
3391 
3392 static void efx_ef10_ev_remove(struct efx_channel *channel)
3393 {
3394 	efx_nic_free_buffer(channel->efx, &channel->eventq.buf);
3395 }
3396 
3397 static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue,
3398 					   unsigned int rx_queue_label)
3399 {
3400 	struct efx_nic *efx = rx_queue->efx;
3401 
3402 	netif_info(efx, hw, efx->net_dev,
3403 		   "rx event arrived on queue %d labeled as queue %u\n",
3404 		   efx_rx_queue_index(rx_queue), rx_queue_label);
3405 
3406 	efx_schedule_reset(efx, RESET_TYPE_DISABLE);
3407 }
3408 
3409 static void
3410 efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue,
3411 			     unsigned int actual, unsigned int expected)
3412 {
3413 	unsigned int dropped = (actual - expected) & rx_queue->ptr_mask;
3414 	struct efx_nic *efx = rx_queue->efx;
3415 
3416 	netif_info(efx, hw, efx->net_dev,
3417 		   "dropped %d events (index=%d expected=%d)\n",
3418 		   dropped, actual, expected);
3419 
3420 	efx_schedule_reset(efx, RESET_TYPE_DISABLE);
3421 }
3422 
3423 /* partially received RX was aborted. clean up. */
3424 static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue)
3425 {
3426 	unsigned int rx_desc_ptr;
3427 
3428 	netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev,
3429 		  "scattered RX aborted (dropping %u buffers)\n",
3430 		  rx_queue->scatter_n);
3431 
3432 	rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
3433 
3434 	efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n,
3435 		      0, EFX_RX_PKT_DISCARD);
3436 
3437 	rx_queue->removed_count += rx_queue->scatter_n;
3438 	rx_queue->scatter_n = 0;
3439 	rx_queue->scatter_len = 0;
3440 	++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc;
3441 }
3442 
3443 static u16 efx_ef10_handle_rx_event_errors(struct efx_channel *channel,
3444 					   unsigned int n_packets,
3445 					   unsigned int rx_encap_hdr,
3446 					   unsigned int rx_l3_class,
3447 					   unsigned int rx_l4_class,
3448 					   const efx_qword_t *event)
3449 {
3450 	struct efx_nic *efx = channel->efx;
3451 	bool handled = false;
3452 
3453 	if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)) {
3454 		if (!(efx->net_dev->features & NETIF_F_RXALL)) {
3455 			if (!efx->loopback_selftest)
3456 				channel->n_rx_eth_crc_err += n_packets;
3457 			return EFX_RX_PKT_DISCARD;
3458 		}
3459 		handled = true;
3460 	}
3461 	if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR)) {
3462 		if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN &&
3463 			     rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
3464 			     rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG &&
3465 			     rx_l3_class != ESE_DZ_L3_CLASS_IP6 &&
3466 			     rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG))
3467 			netdev_WARN(efx->net_dev,
3468 				    "invalid class for RX_IPCKSUM_ERR: event="
3469 				    EFX_QWORD_FMT "\n",
3470 				    EFX_QWORD_VAL(*event));
3471 		if (!efx->loopback_selftest)
3472 			*(rx_encap_hdr ?
3473 			  &channel->n_rx_outer_ip_hdr_chksum_err :
3474 			  &channel->n_rx_ip_hdr_chksum_err) += n_packets;
3475 		return 0;
3476 	}
3477 	if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) {
3478 		if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN &&
3479 			     ((rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
3480 			       rx_l3_class != ESE_DZ_L3_CLASS_IP6) ||
3481 			      (rx_l4_class != ESE_FZ_L4_CLASS_TCP &&
3482 			       rx_l4_class != ESE_FZ_L4_CLASS_UDP))))
3483 			netdev_WARN(efx->net_dev,
3484 				    "invalid class for RX_TCPUDP_CKSUM_ERR: event="
3485 				    EFX_QWORD_FMT "\n",
3486 				    EFX_QWORD_VAL(*event));
3487 		if (!efx->loopback_selftest)
3488 			*(rx_encap_hdr ?
3489 			  &channel->n_rx_outer_tcp_udp_chksum_err :
3490 			  &channel->n_rx_tcp_udp_chksum_err) += n_packets;
3491 		return 0;
3492 	}
3493 	if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_IP_INNER_CHKSUM_ERR)) {
3494 		if (unlikely(!rx_encap_hdr))
3495 			netdev_WARN(efx->net_dev,
3496 				    "invalid encapsulation type for RX_IP_INNER_CHKSUM_ERR: event="
3497 				    EFX_QWORD_FMT "\n",
3498 				    EFX_QWORD_VAL(*event));
3499 		else if (unlikely(rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
3500 				  rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG &&
3501 				  rx_l3_class != ESE_DZ_L3_CLASS_IP6 &&
3502 				  rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG))
3503 			netdev_WARN(efx->net_dev,
3504 				    "invalid class for RX_IP_INNER_CHKSUM_ERR: event="
3505 				    EFX_QWORD_FMT "\n",
3506 				    EFX_QWORD_VAL(*event));
3507 		if (!efx->loopback_selftest)
3508 			channel->n_rx_inner_ip_hdr_chksum_err += n_packets;
3509 		return 0;
3510 	}
3511 	if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR)) {
3512 		if (unlikely(!rx_encap_hdr))
3513 			netdev_WARN(efx->net_dev,
3514 				    "invalid encapsulation type for RX_TCP_UDP_INNER_CHKSUM_ERR: event="
3515 				    EFX_QWORD_FMT "\n",
3516 				    EFX_QWORD_VAL(*event));
3517 		else if (unlikely((rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
3518 				   rx_l3_class != ESE_DZ_L3_CLASS_IP6) ||
3519 				  (rx_l4_class != ESE_FZ_L4_CLASS_TCP &&
3520 				   rx_l4_class != ESE_FZ_L4_CLASS_UDP)))
3521 			netdev_WARN(efx->net_dev,
3522 				    "invalid class for RX_TCP_UDP_INNER_CHKSUM_ERR: event="
3523 				    EFX_QWORD_FMT "\n",
3524 				    EFX_QWORD_VAL(*event));
3525 		if (!efx->loopback_selftest)
3526 			channel->n_rx_inner_tcp_udp_chksum_err += n_packets;
3527 		return 0;
3528 	}
3529 
3530 	WARN_ON(!handled); /* No error bits were recognised */
3531 	return 0;
3532 }
3533 
3534 static int efx_ef10_handle_rx_event(struct efx_channel *channel,
3535 				    const efx_qword_t *event)
3536 {
3537 	unsigned int rx_bytes, next_ptr_lbits, rx_queue_label;
3538 	unsigned int rx_l3_class, rx_l4_class, rx_encap_hdr;
3539 	unsigned int n_descs, n_packets, i;
3540 	struct efx_nic *efx = channel->efx;
3541 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
3542 	struct efx_rx_queue *rx_queue;
3543 	efx_qword_t errors;
3544 	bool rx_cont;
3545 	u16 flags = 0;
3546 
3547 	if (unlikely(READ_ONCE(efx->reset_pending)))
3548 		return 0;
3549 
3550 	/* Basic packet information */
3551 	rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES);
3552 	next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS);
3553 	rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL);
3554 	rx_l3_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L3_CLASS);
3555 	rx_l4_class = EFX_QWORD_FIELD(*event, ESF_FZ_RX_L4_CLASS);
3556 	rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT);
3557 	rx_encap_hdr =
3558 		nic_data->datapath_caps &
3559 			(1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN) ?
3560 		EFX_QWORD_FIELD(*event, ESF_EZ_RX_ENCAP_HDR) :
3561 		ESE_EZ_ENCAP_HDR_NONE;
3562 
3563 	if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT))
3564 		netdev_WARN(efx->net_dev, "saw RX_DROP_EVENT: event="
3565 			    EFX_QWORD_FMT "\n",
3566 			    EFX_QWORD_VAL(*event));
3567 
3568 	rx_queue = efx_channel_get_rx_queue(channel);
3569 
3570 	if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue)))
3571 		efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label);
3572 
3573 	n_descs = ((next_ptr_lbits - rx_queue->removed_count) &
3574 		   ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
3575 
3576 	if (n_descs != rx_queue->scatter_n + 1) {
3577 		struct efx_ef10_nic_data *nic_data = efx->nic_data;
3578 
3579 		/* detect rx abort */
3580 		if (unlikely(n_descs == rx_queue->scatter_n)) {
3581 			if (rx_queue->scatter_n == 0 || rx_bytes != 0)
3582 				netdev_WARN(efx->net_dev,
3583 					    "invalid RX abort: scatter_n=%u event="
3584 					    EFX_QWORD_FMT "\n",
3585 					    rx_queue->scatter_n,
3586 					    EFX_QWORD_VAL(*event));
3587 			efx_ef10_handle_rx_abort(rx_queue);
3588 			return 0;
3589 		}
3590 
3591 		/* Check that RX completion merging is valid, i.e.
3592 		 * the current firmware supports it and this is a
3593 		 * non-scattered packet.
3594 		 */
3595 		if (!(nic_data->datapath_caps &
3596 		      (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN)) ||
3597 		    rx_queue->scatter_n != 0 || rx_cont) {
3598 			efx_ef10_handle_rx_bad_lbits(
3599 				rx_queue, next_ptr_lbits,
3600 				(rx_queue->removed_count +
3601 				 rx_queue->scatter_n + 1) &
3602 				((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
3603 			return 0;
3604 		}
3605 
3606 		/* Merged completion for multiple non-scattered packets */
3607 		rx_queue->scatter_n = 1;
3608 		rx_queue->scatter_len = 0;
3609 		n_packets = n_descs;
3610 		++channel->n_rx_merge_events;
3611 		channel->n_rx_merge_packets += n_packets;
3612 		flags |= EFX_RX_PKT_PREFIX_LEN;
3613 	} else {
3614 		++rx_queue->scatter_n;
3615 		rx_queue->scatter_len += rx_bytes;
3616 		if (rx_cont)
3617 			return 0;
3618 		n_packets = 1;
3619 	}
3620 
3621 	EFX_POPULATE_QWORD_5(errors, ESF_DZ_RX_ECRC_ERR, 1,
3622 				     ESF_DZ_RX_IPCKSUM_ERR, 1,
3623 				     ESF_DZ_RX_TCPUDP_CKSUM_ERR, 1,
3624 				     ESF_EZ_RX_IP_INNER_CHKSUM_ERR, 1,
3625 				     ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR, 1);
3626 	EFX_AND_QWORD(errors, *event, errors);
3627 	if (unlikely(!EFX_QWORD_IS_ZERO(errors))) {
3628 		flags |= efx_ef10_handle_rx_event_errors(channel, n_packets,
3629 							 rx_encap_hdr,
3630 							 rx_l3_class, rx_l4_class,
3631 							 event);
3632 	} else {
3633 		bool tcpudp = rx_l4_class == ESE_FZ_L4_CLASS_TCP ||
3634 			      rx_l4_class == ESE_FZ_L4_CLASS_UDP;
3635 
3636 		switch (rx_encap_hdr) {
3637 		case ESE_EZ_ENCAP_HDR_VXLAN: /* VxLAN or GENEVE */
3638 			flags |= EFX_RX_PKT_CSUMMED; /* outer UDP csum */
3639 			if (tcpudp)
3640 				flags |= EFX_RX_PKT_CSUM_LEVEL; /* inner L4 */
3641 			break;
3642 		case ESE_EZ_ENCAP_HDR_GRE:
3643 		case ESE_EZ_ENCAP_HDR_NONE:
3644 			if (tcpudp)
3645 				flags |= EFX_RX_PKT_CSUMMED;
3646 			break;
3647 		default:
3648 			netdev_WARN(efx->net_dev,
3649 				    "unknown encapsulation type: event="
3650 				    EFX_QWORD_FMT "\n",
3651 				    EFX_QWORD_VAL(*event));
3652 		}
3653 	}
3654 
3655 	if (rx_l4_class == ESE_FZ_L4_CLASS_TCP)
3656 		flags |= EFX_RX_PKT_TCP;
3657 
3658 	channel->irq_mod_score += 2 * n_packets;
3659 
3660 	/* Handle received packet(s) */
3661 	for (i = 0; i < n_packets; i++) {
3662 		efx_rx_packet(rx_queue,
3663 			      rx_queue->removed_count & rx_queue->ptr_mask,
3664 			      rx_queue->scatter_n, rx_queue->scatter_len,
3665 			      flags);
3666 		rx_queue->removed_count += rx_queue->scatter_n;
3667 	}
3668 
3669 	rx_queue->scatter_n = 0;
3670 	rx_queue->scatter_len = 0;
3671 
3672 	return n_packets;
3673 }
3674 
3675 static u32 efx_ef10_extract_event_ts(efx_qword_t *event)
3676 {
3677 	u32 tstamp;
3678 
3679 	tstamp = EFX_QWORD_FIELD(*event, TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI);
3680 	tstamp <<= 16;
3681 	tstamp |= EFX_QWORD_FIELD(*event, TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO);
3682 
3683 	return tstamp;
3684 }
3685 
3686 static void
3687 efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
3688 {
3689 	struct efx_nic *efx = channel->efx;
3690 	struct efx_tx_queue *tx_queue;
3691 	unsigned int tx_ev_desc_ptr;
3692 	unsigned int tx_ev_q_label;
3693 	unsigned int tx_ev_type;
3694 	u64 ts_part;
3695 
3696 	if (unlikely(READ_ONCE(efx->reset_pending)))
3697 		return;
3698 
3699 	if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT)))
3700 		return;
3701 
3702 	/* Get the transmit queue */
3703 	tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
3704 	tx_queue = efx_channel_get_tx_queue(channel,
3705 					    tx_ev_q_label % EFX_TXQ_TYPES);
3706 
3707 	if (!tx_queue->timestamping) {
3708 		/* Transmit completion */
3709 		tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX);
3710 		efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask);
3711 		return;
3712 	}
3713 
3714 	/* Transmit timestamps are only available for 8XXX series. They result
3715 	 * in three events per packet. These occur in order, and are:
3716 	 *  - the normal completion event
3717 	 *  - the low part of the timestamp
3718 	 *  - the high part of the timestamp
3719 	 *
3720 	 * Each part of the timestamp is itself split across two 16 bit
3721 	 * fields in the event.
3722 	 */
3723 	tx_ev_type = EFX_QWORD_FIELD(*event, ESF_EZ_TX_SOFT1);
3724 
3725 	switch (tx_ev_type) {
3726 	case TX_TIMESTAMP_EVENT_TX_EV_COMPLETION:
3727 		/* In case of Queue flush or FLR, we might have received
3728 		 * the previous TX completion event but not the Timestamp
3729 		 * events.
3730 		 */
3731 		if (tx_queue->completed_desc_ptr != tx_queue->ptr_mask)
3732 			efx_xmit_done(tx_queue, tx_queue->completed_desc_ptr);
3733 
3734 		tx_ev_desc_ptr = EFX_QWORD_FIELD(*event,
3735 						 ESF_DZ_TX_DESCR_INDX);
3736 		tx_queue->completed_desc_ptr =
3737 					tx_ev_desc_ptr & tx_queue->ptr_mask;
3738 		break;
3739 
3740 	case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO:
3741 		ts_part = efx_ef10_extract_event_ts(event);
3742 		tx_queue->completed_timestamp_minor = ts_part;
3743 		break;
3744 
3745 	case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI:
3746 		ts_part = efx_ef10_extract_event_ts(event);
3747 		tx_queue->completed_timestamp_major = ts_part;
3748 
3749 		efx_xmit_done(tx_queue, tx_queue->completed_desc_ptr);
3750 		tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
3751 		break;
3752 
3753 	default:
3754 		netif_err(efx, hw, efx->net_dev,
3755 			  "channel %d unknown tx event type %d (data "
3756 			  EFX_QWORD_FMT ")\n",
3757 			  channel->channel, tx_ev_type,
3758 			  EFX_QWORD_VAL(*event));
3759 		break;
3760 	}
3761 }
3762 
3763 static void
3764 efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
3765 {
3766 	struct efx_nic *efx = channel->efx;
3767 	int subcode;
3768 
3769 	subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE);
3770 
3771 	switch (subcode) {
3772 	case ESE_DZ_DRV_TIMER_EV:
3773 	case ESE_DZ_DRV_WAKE_UP_EV:
3774 		break;
3775 	case ESE_DZ_DRV_START_UP_EV:
3776 		/* event queue init complete. ok. */
3777 		break;
3778 	default:
3779 		netif_err(efx, hw, efx->net_dev,
3780 			  "channel %d unknown driver event type %d"
3781 			  " (data " EFX_QWORD_FMT ")\n",
3782 			  channel->channel, subcode,
3783 			  EFX_QWORD_VAL(*event));
3784 
3785 	}
3786 }
3787 
3788 static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel,
3789 						   efx_qword_t *event)
3790 {
3791 	struct efx_nic *efx = channel->efx;
3792 	u32 subcode;
3793 
3794 	subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0);
3795 
3796 	switch (subcode) {
3797 	case EFX_EF10_TEST:
3798 		channel->event_test_cpu = raw_smp_processor_id();
3799 		break;
3800 	case EFX_EF10_REFILL:
3801 		/* The queue must be empty, so we won't receive any rx
3802 		 * events, so efx_process_channel() won't refill the
3803 		 * queue. Refill it here
3804 		 */
3805 		efx_fast_push_rx_descriptors(&channel->rx_queue, true);
3806 		break;
3807 	default:
3808 		netif_err(efx, hw, efx->net_dev,
3809 			  "channel %d unknown driver event type %u"
3810 			  " (data " EFX_QWORD_FMT ")\n",
3811 			  channel->channel, (unsigned) subcode,
3812 			  EFX_QWORD_VAL(*event));
3813 	}
3814 }
3815 
3816 static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
3817 {
3818 	struct efx_nic *efx = channel->efx;
3819 	efx_qword_t event, *p_event;
3820 	unsigned int read_ptr;
3821 	int ev_code;
3822 	int spent = 0;
3823 
3824 	if (quota <= 0)
3825 		return spent;
3826 
3827 	read_ptr = channel->eventq_read_ptr;
3828 
3829 	for (;;) {
3830 		p_event = efx_event(channel, read_ptr);
3831 		event = *p_event;
3832 
3833 		if (!efx_event_present(&event))
3834 			break;
3835 
3836 		EFX_SET_QWORD(*p_event);
3837 
3838 		++read_ptr;
3839 
3840 		ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE);
3841 
3842 		netif_vdbg(efx, drv, efx->net_dev,
3843 			   "processing event on %d " EFX_QWORD_FMT "\n",
3844 			   channel->channel, EFX_QWORD_VAL(event));
3845 
3846 		switch (ev_code) {
3847 		case ESE_DZ_EV_CODE_MCDI_EV:
3848 			efx_mcdi_process_event(channel, &event);
3849 			break;
3850 		case ESE_DZ_EV_CODE_RX_EV:
3851 			spent += efx_ef10_handle_rx_event(channel, &event);
3852 			if (spent >= quota) {
3853 				/* XXX can we split a merged event to
3854 				 * avoid going over-quota?
3855 				 */
3856 				spent = quota;
3857 				goto out;
3858 			}
3859 			break;
3860 		case ESE_DZ_EV_CODE_TX_EV:
3861 			efx_ef10_handle_tx_event(channel, &event);
3862 			break;
3863 		case ESE_DZ_EV_CODE_DRIVER_EV:
3864 			efx_ef10_handle_driver_event(channel, &event);
3865 			if (++spent == quota)
3866 				goto out;
3867 			break;
3868 		case EFX_EF10_DRVGEN_EV:
3869 			efx_ef10_handle_driver_generated_event(channel, &event);
3870 			break;
3871 		default:
3872 			netif_err(efx, hw, efx->net_dev,
3873 				  "channel %d unknown event type %d"
3874 				  " (data " EFX_QWORD_FMT ")\n",
3875 				  channel->channel, ev_code,
3876 				  EFX_QWORD_VAL(event));
3877 		}
3878 	}
3879 
3880 out:
3881 	channel->eventq_read_ptr = read_ptr;
3882 	return spent;
3883 }
3884 
3885 static void efx_ef10_ev_read_ack(struct efx_channel *channel)
3886 {
3887 	struct efx_nic *efx = channel->efx;
3888 	efx_dword_t rptr;
3889 
3890 	if (EFX_EF10_WORKAROUND_35388(efx)) {
3891 		BUILD_BUG_ON(EFX_MIN_EVQ_SIZE <
3892 			     (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
3893 		BUILD_BUG_ON(EFX_MAX_EVQ_SIZE >
3894 			     (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
3895 
3896 		EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
3897 				     EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
3898 				     ERF_DD_EVQ_IND_RPTR,
3899 				     (channel->eventq_read_ptr &
3900 				      channel->eventq_mask) >>
3901 				     ERF_DD_EVQ_IND_RPTR_WIDTH);
3902 		efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
3903 				channel->channel);
3904 		EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
3905 				     EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
3906 				     ERF_DD_EVQ_IND_RPTR,
3907 				     channel->eventq_read_ptr &
3908 				     ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
3909 		efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
3910 				channel->channel);
3911 	} else {
3912 		EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR,
3913 				     channel->eventq_read_ptr &
3914 				     channel->eventq_mask);
3915 		efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel);
3916 	}
3917 }
3918 
3919 static void efx_ef10_ev_test_generate(struct efx_channel *channel)
3920 {
3921 	MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
3922 	struct efx_nic *efx = channel->efx;
3923 	efx_qword_t event;
3924 	int rc;
3925 
3926 	EFX_POPULATE_QWORD_2(event,
3927 			     ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
3928 			     ESF_DZ_EV_DATA, EFX_EF10_TEST);
3929 
3930 	MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
3931 
3932 	/* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
3933 	 * already swapped the data to little-endian order.
3934 	 */
3935 	memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
3936 	       sizeof(efx_qword_t));
3937 
3938 	rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf),
3939 			  NULL, 0, NULL);
3940 	if (rc != 0)
3941 		goto fail;
3942 
3943 	return;
3944 
3945 fail:
3946 	WARN_ON(true);
3947 	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
3948 }
3949 
3950 void efx_ef10_handle_drain_event(struct efx_nic *efx)
3951 {
3952 	if (atomic_dec_and_test(&efx->active_queues))
3953 		wake_up(&efx->flush_wq);
3954 
3955 	WARN_ON(atomic_read(&efx->active_queues) < 0);
3956 }
3957 
3958 static int efx_ef10_fini_dmaq(struct efx_nic *efx)
3959 {
3960 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
3961 	struct efx_channel *channel;
3962 	struct efx_tx_queue *tx_queue;
3963 	struct efx_rx_queue *rx_queue;
3964 	int pending;
3965 
3966 	/* If the MC has just rebooted, the TX/RX queues will have already been
3967 	 * torn down, but efx->active_queues needs to be set to zero.
3968 	 */
3969 	if (nic_data->must_realloc_vis) {
3970 		atomic_set(&efx->active_queues, 0);
3971 		return 0;
3972 	}
3973 
3974 	/* Do not attempt to write to the NIC during EEH recovery */
3975 	if (efx->state != STATE_RECOVERY) {
3976 		efx_for_each_channel(channel, efx) {
3977 			efx_for_each_channel_rx_queue(rx_queue, channel)
3978 				efx_ef10_rx_fini(rx_queue);
3979 			efx_for_each_channel_tx_queue(tx_queue, channel)
3980 				efx_ef10_tx_fini(tx_queue);
3981 		}
3982 
3983 		wait_event_timeout(efx->flush_wq,
3984 				   atomic_read(&efx->active_queues) == 0,
3985 				   msecs_to_jiffies(EFX_MAX_FLUSH_TIME));
3986 		pending = atomic_read(&efx->active_queues);
3987 		if (pending) {
3988 			netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n",
3989 				  pending);
3990 			return -ETIMEDOUT;
3991 		}
3992 	}
3993 
3994 	return 0;
3995 }
3996 
3997 static void efx_ef10_prepare_flr(struct efx_nic *efx)
3998 {
3999 	atomic_set(&efx->active_queues, 0);
4000 }
4001 
4002 /* Decide whether a filter should be exclusive or else should allow
4003  * delivery to additional recipients.  Currently we decide that
4004  * filters for specific local unicast MAC and IP addresses are
4005  * exclusive.
4006  */
4007 static bool efx_ef10_filter_is_exclusive(const struct efx_filter_spec *spec)
4008 {
4009 	if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC &&
4010 	    !is_multicast_ether_addr(spec->loc_mac))
4011 		return true;
4012 
4013 	if ((spec->match_flags &
4014 	     (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
4015 	    (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
4016 		if (spec->ether_type == htons(ETH_P_IP) &&
4017 		    !ipv4_is_multicast(spec->loc_host[0]))
4018 			return true;
4019 		if (spec->ether_type == htons(ETH_P_IPV6) &&
4020 		    ((const u8 *)spec->loc_host)[0] != 0xff)
4021 			return true;
4022 	}
4023 
4024 	return false;
4025 }
4026 
4027 static struct efx_filter_spec *
4028 efx_ef10_filter_entry_spec(const struct efx_ef10_filter_table *table,
4029 			   unsigned int filter_idx)
4030 {
4031 	return (struct efx_filter_spec *)(table->entry[filter_idx].spec &
4032 					  ~EFX_EF10_FILTER_FLAGS);
4033 }
4034 
4035 static unsigned int
4036 efx_ef10_filter_entry_flags(const struct efx_ef10_filter_table *table,
4037 			   unsigned int filter_idx)
4038 {
4039 	return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS;
4040 }
4041 
4042 static void
4043 efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table,
4044 			  unsigned int filter_idx,
4045 			  const struct efx_filter_spec *spec,
4046 			  unsigned int flags)
4047 {
4048 	table->entry[filter_idx].spec =	(unsigned long)spec | flags;
4049 }
4050 
4051 static void
4052 efx_ef10_filter_push_prep_set_match_fields(struct efx_nic *efx,
4053 					   const struct efx_filter_spec *spec,
4054 					   efx_dword_t *inbuf)
4055 {
4056 	enum efx_encap_type encap_type = efx_filter_get_encap_type(spec);
4057 	u32 match_fields = 0, uc_match, mc_match;
4058 
4059 	MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
4060 		       efx_ef10_filter_is_exclusive(spec) ?
4061 		       MC_CMD_FILTER_OP_IN_OP_INSERT :
4062 		       MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE);
4063 
4064 	/* Convert match flags and values.  Unlike almost
4065 	 * everything else in MCDI, these fields are in
4066 	 * network byte order.
4067 	 */
4068 #define COPY_VALUE(value, mcdi_field)					     \
4069 	do {							     \
4070 		match_fields |=					     \
4071 			1 << MC_CMD_FILTER_OP_IN_MATCH_ ##	     \
4072 			mcdi_field ## _LBN;			     \
4073 		BUILD_BUG_ON(					     \
4074 			MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \
4075 			sizeof(value));				     \
4076 		memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ##	mcdi_field), \
4077 		       &value, sizeof(value));			     \
4078 	} while (0)
4079 #define COPY_FIELD(gen_flag, gen_field, mcdi_field)			     \
4080 	if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) {     \
4081 		COPY_VALUE(spec->gen_field, mcdi_field);	     \
4082 	}
4083 	/* Handle encap filters first.  They will always be mismatch
4084 	 * (unknown UC or MC) filters
4085 	 */
4086 	if (encap_type) {
4087 		/* ether_type and outer_ip_proto need to be variables
4088 		 * because COPY_VALUE wants to memcpy them
4089 		 */
4090 		__be16 ether_type =
4091 			htons(encap_type & EFX_ENCAP_FLAG_IPV6 ?
4092 			      ETH_P_IPV6 : ETH_P_IP);
4093 		u8 vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE;
4094 		u8 outer_ip_proto;
4095 
4096 		switch (encap_type & EFX_ENCAP_TYPES_MASK) {
4097 		case EFX_ENCAP_TYPE_VXLAN:
4098 			vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN;
4099 			/* fallthrough */
4100 		case EFX_ENCAP_TYPE_GENEVE:
4101 			COPY_VALUE(ether_type, ETHER_TYPE);
4102 			outer_ip_proto = IPPROTO_UDP;
4103 			COPY_VALUE(outer_ip_proto, IP_PROTO);
4104 			/* We always need to set the type field, even
4105 			 * though we're not matching on the TNI.
4106 			 */
4107 			MCDI_POPULATE_DWORD_1(inbuf,
4108 				FILTER_OP_EXT_IN_VNI_OR_VSID,
4109 				FILTER_OP_EXT_IN_VNI_TYPE,
4110 				vni_type);
4111 			break;
4112 		case EFX_ENCAP_TYPE_NVGRE:
4113 			COPY_VALUE(ether_type, ETHER_TYPE);
4114 			outer_ip_proto = IPPROTO_GRE;
4115 			COPY_VALUE(outer_ip_proto, IP_PROTO);
4116 			break;
4117 		default:
4118 			WARN_ON(1);
4119 		}
4120 
4121 		uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN;
4122 		mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN;
4123 	} else {
4124 		uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
4125 		mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN;
4126 	}
4127 
4128 	if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG)
4129 		match_fields |=
4130 			is_multicast_ether_addr(spec->loc_mac) ?
4131 			1 << mc_match :
4132 			1 << uc_match;
4133 	COPY_FIELD(REM_HOST, rem_host, SRC_IP);
4134 	COPY_FIELD(LOC_HOST, loc_host, DST_IP);
4135 	COPY_FIELD(REM_MAC, rem_mac, SRC_MAC);
4136 	COPY_FIELD(REM_PORT, rem_port, SRC_PORT);
4137 	COPY_FIELD(LOC_MAC, loc_mac, DST_MAC);
4138 	COPY_FIELD(LOC_PORT, loc_port, DST_PORT);
4139 	COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE);
4140 	COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN);
4141 	COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN);
4142 	COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO);
4143 #undef COPY_FIELD
4144 #undef COPY_VALUE
4145 	MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS,
4146 		       match_fields);
4147 }
4148 
4149 static void efx_ef10_filter_push_prep(struct efx_nic *efx,
4150 				      const struct efx_filter_spec *spec,
4151 				      efx_dword_t *inbuf, u64 handle,
4152 				      struct efx_rss_context *ctx,
4153 				      bool replacing)
4154 {
4155 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
4156 	u32 flags = spec->flags;
4157 
4158 	memset(inbuf, 0, MC_CMD_FILTER_OP_EXT_IN_LEN);
4159 
4160 	/* If RSS filter, caller better have given us an RSS context */
4161 	if (flags & EFX_FILTER_FLAG_RX_RSS) {
4162 		/* We don't have the ability to return an error, so we'll just
4163 		 * log a warning and disable RSS for the filter.
4164 		 */
4165 		if (WARN_ON_ONCE(!ctx))
4166 			flags &= ~EFX_FILTER_FLAG_RX_RSS;
4167 		else if (WARN_ON_ONCE(ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID))
4168 			flags &= ~EFX_FILTER_FLAG_RX_RSS;
4169 	}
4170 
4171 	if (replacing) {
4172 		MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
4173 			       MC_CMD_FILTER_OP_IN_OP_REPLACE);
4174 		MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle);
4175 	} else {
4176 		efx_ef10_filter_push_prep_set_match_fields(efx, spec, inbuf);
4177 	}
4178 
4179 	MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id);
4180 	MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
4181 		       spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
4182 		       MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
4183 		       MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
4184 	MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DOMAIN, 0);
4185 	MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
4186 		       MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
4187 	MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE,
4188 		       spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
4189 		       0 : spec->dmaq_id);
4190 	MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
4191 		       (flags & EFX_FILTER_FLAG_RX_RSS) ?
4192 		       MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
4193 		       MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
4194 	if (flags & EFX_FILTER_FLAG_RX_RSS)
4195 		MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT, ctx->context_id);
4196 }
4197 
4198 static int efx_ef10_filter_push(struct efx_nic *efx,
4199 				const struct efx_filter_spec *spec, u64 *handle,
4200 				struct efx_rss_context *ctx, bool replacing)
4201 {
4202 	MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
4203 	MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_EXT_OUT_LEN);
4204 	int rc;
4205 
4206 	efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, ctx, replacing);
4207 	rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
4208 			  outbuf, sizeof(outbuf), NULL);
4209 	if (rc == 0)
4210 		*handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
4211 	if (rc == -ENOSPC)
4212 		rc = -EBUSY; /* to match efx_farch_filter_insert() */
4213 	return rc;
4214 }
4215 
4216 static u32 efx_ef10_filter_mcdi_flags_from_spec(const struct efx_filter_spec *spec)
4217 {
4218 	enum efx_encap_type encap_type = efx_filter_get_encap_type(spec);
4219 	unsigned int match_flags = spec->match_flags;
4220 	unsigned int uc_match, mc_match;
4221 	u32 mcdi_flags = 0;
4222 
4223 #define MAP_FILTER_TO_MCDI_FLAG(gen_flag, mcdi_field, encap) {		\
4224 		unsigned int  old_match_flags = match_flags;		\
4225 		match_flags &= ~EFX_FILTER_MATCH_ ## gen_flag;		\
4226 		if (match_flags != old_match_flags)			\
4227 			mcdi_flags |=					\
4228 				(1 << ((encap) ?			\
4229 				       MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ ## \
4230 				       mcdi_field ## _LBN :		\
4231 				       MC_CMD_FILTER_OP_EXT_IN_MATCH_ ##\
4232 				       mcdi_field ## _LBN));		\
4233 	}
4234 	/* inner or outer based on encap type */
4235 	MAP_FILTER_TO_MCDI_FLAG(REM_HOST, SRC_IP, encap_type);
4236 	MAP_FILTER_TO_MCDI_FLAG(LOC_HOST, DST_IP, encap_type);
4237 	MAP_FILTER_TO_MCDI_FLAG(REM_MAC, SRC_MAC, encap_type);
4238 	MAP_FILTER_TO_MCDI_FLAG(REM_PORT, SRC_PORT, encap_type);
4239 	MAP_FILTER_TO_MCDI_FLAG(LOC_MAC, DST_MAC, encap_type);
4240 	MAP_FILTER_TO_MCDI_FLAG(LOC_PORT, DST_PORT, encap_type);
4241 	MAP_FILTER_TO_MCDI_FLAG(ETHER_TYPE, ETHER_TYPE, encap_type);
4242 	MAP_FILTER_TO_MCDI_FLAG(IP_PROTO, IP_PROTO, encap_type);
4243 	/* always outer */
4244 	MAP_FILTER_TO_MCDI_FLAG(INNER_VID, INNER_VLAN, false);
4245 	MAP_FILTER_TO_MCDI_FLAG(OUTER_VID, OUTER_VLAN, false);
4246 #undef MAP_FILTER_TO_MCDI_FLAG
4247 
4248 	/* special handling for encap type, and mismatch */
4249 	if (encap_type) {
4250 		match_flags &= ~EFX_FILTER_MATCH_ENCAP_TYPE;
4251 		mcdi_flags |=
4252 			(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN);
4253 		mcdi_flags |= (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN);
4254 
4255 		uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN;
4256 		mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN;
4257 	} else {
4258 		uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
4259 		mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN;
4260 	}
4261 
4262 	if (match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) {
4263 		match_flags &= ~EFX_FILTER_MATCH_LOC_MAC_IG;
4264 		mcdi_flags |=
4265 			is_multicast_ether_addr(spec->loc_mac) ?
4266 			1 << mc_match :
4267 			1 << uc_match;
4268 	}
4269 
4270 	/* Did we map them all? */
4271 	WARN_ON_ONCE(match_flags);
4272 
4273 	return mcdi_flags;
4274 }
4275 
4276 static int efx_ef10_filter_pri(struct efx_ef10_filter_table *table,
4277 			       const struct efx_filter_spec *spec)
4278 {
4279 	u32 mcdi_flags = efx_ef10_filter_mcdi_flags_from_spec(spec);
4280 	unsigned int match_pri;
4281 
4282 	for (match_pri = 0;
4283 	     match_pri < table->rx_match_count;
4284 	     match_pri++)
4285 		if (table->rx_match_mcdi_flags[match_pri] == mcdi_flags)
4286 			return match_pri;
4287 
4288 	return -EPROTONOSUPPORT;
4289 }
4290 
4291 static s32 efx_ef10_filter_insert_locked(struct efx_nic *efx,
4292 					 struct efx_filter_spec *spec,
4293 					 bool replace_equal)
4294 {
4295 	DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
4296 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
4297 	struct efx_ef10_filter_table *table;
4298 	struct efx_filter_spec *saved_spec;
4299 	struct efx_rss_context *ctx = NULL;
4300 	unsigned int match_pri, hash;
4301 	unsigned int priv_flags;
4302 	bool rss_locked = false;
4303 	bool replacing = false;
4304 	unsigned int depth, i;
4305 	int ins_index = -1;
4306 	DEFINE_WAIT(wait);
4307 	bool is_mc_recip;
4308 	s32 rc;
4309 
4310 	WARN_ON(!rwsem_is_locked(&efx->filter_sem));
4311 	table = efx->filter_state;
4312 	down_write(&table->lock);
4313 
4314 	/* For now, only support RX filters */
4315 	if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) !=
4316 	    EFX_FILTER_FLAG_RX) {
4317 		rc = -EINVAL;
4318 		goto out_unlock;
4319 	}
4320 
4321 	rc = efx_ef10_filter_pri(table, spec);
4322 	if (rc < 0)
4323 		goto out_unlock;
4324 	match_pri = rc;
4325 
4326 	hash = efx_filter_spec_hash(spec);
4327 	is_mc_recip = efx_filter_is_mc_recipient(spec);
4328 	if (is_mc_recip)
4329 		bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
4330 
4331 	if (spec->flags & EFX_FILTER_FLAG_RX_RSS) {
4332 		mutex_lock(&efx->rss_lock);
4333 		rss_locked = true;
4334 		if (spec->rss_context)
4335 			ctx = efx_find_rss_context_entry(efx, spec->rss_context);
4336 		else
4337 			ctx = &efx->rss_context;
4338 		if (!ctx) {
4339 			rc = -ENOENT;
4340 			goto out_unlock;
4341 		}
4342 		if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID) {
4343 			rc = -EOPNOTSUPP;
4344 			goto out_unlock;
4345 		}
4346 	}
4347 
4348 	/* Find any existing filters with the same match tuple or
4349 	 * else a free slot to insert at.
4350 	 */
4351 	for (depth = 1; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
4352 		i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
4353 		saved_spec = efx_ef10_filter_entry_spec(table, i);
4354 
4355 		if (!saved_spec) {
4356 			if (ins_index < 0)
4357 				ins_index = i;
4358 		} else if (efx_filter_spec_equal(spec, saved_spec)) {
4359 			if (spec->priority < saved_spec->priority &&
4360 			    spec->priority != EFX_FILTER_PRI_AUTO) {
4361 				rc = -EPERM;
4362 				goto out_unlock;
4363 			}
4364 			if (!is_mc_recip) {
4365 				/* This is the only one */
4366 				if (spec->priority ==
4367 				    saved_spec->priority &&
4368 				    !replace_equal) {
4369 					rc = -EEXIST;
4370 					goto out_unlock;
4371 				}
4372 				ins_index = i;
4373 				break;
4374 			} else if (spec->priority >
4375 				   saved_spec->priority ||
4376 				   (spec->priority ==
4377 				    saved_spec->priority &&
4378 				    replace_equal)) {
4379 				if (ins_index < 0)
4380 					ins_index = i;
4381 				else
4382 					__set_bit(depth, mc_rem_map);
4383 			}
4384 		}
4385 	}
4386 
4387 	/* Once we reach the maximum search depth, use the first suitable
4388 	 * slot, or return -EBUSY if there was none
4389 	 */
4390 	if (ins_index < 0) {
4391 		rc = -EBUSY;
4392 		goto out_unlock;
4393 	}
4394 
4395 	/* Create a software table entry if necessary. */
4396 	saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
4397 	if (saved_spec) {
4398 		if (spec->priority == EFX_FILTER_PRI_AUTO &&
4399 		    saved_spec->priority >= EFX_FILTER_PRI_AUTO) {
4400 			/* Just make sure it won't be removed */
4401 			if (saved_spec->priority > EFX_FILTER_PRI_AUTO)
4402 				saved_spec->flags |= EFX_FILTER_FLAG_RX_OVER_AUTO;
4403 			table->entry[ins_index].spec &=
4404 				~EFX_EF10_FILTER_FLAG_AUTO_OLD;
4405 			rc = ins_index;
4406 			goto out_unlock;
4407 		}
4408 		replacing = true;
4409 		priv_flags = efx_ef10_filter_entry_flags(table, ins_index);
4410 	} else {
4411 		saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
4412 		if (!saved_spec) {
4413 			rc = -ENOMEM;
4414 			goto out_unlock;
4415 		}
4416 		*saved_spec = *spec;
4417 		priv_flags = 0;
4418 	}
4419 	efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags);
4420 
4421 	/* Actually insert the filter on the HW */
4422 	rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle,
4423 				  ctx, replacing);
4424 
4425 	if (rc == -EINVAL && nic_data->must_realloc_vis)
4426 		/* The MC rebooted under us, causing it to reject our filter
4427 		 * insertion as pointing to an invalid VI (spec->dmaq_id).
4428 		 */
4429 		rc = -EAGAIN;
4430 
4431 	/* Finalise the software table entry */
4432 	if (rc == 0) {
4433 		if (replacing) {
4434 			/* Update the fields that may differ */
4435 			if (saved_spec->priority == EFX_FILTER_PRI_AUTO)
4436 				saved_spec->flags |=
4437 					EFX_FILTER_FLAG_RX_OVER_AUTO;
4438 			saved_spec->priority = spec->priority;
4439 			saved_spec->flags &= EFX_FILTER_FLAG_RX_OVER_AUTO;
4440 			saved_spec->flags |= spec->flags;
4441 			saved_spec->rss_context = spec->rss_context;
4442 			saved_spec->dmaq_id = spec->dmaq_id;
4443 		}
4444 	} else if (!replacing) {
4445 		kfree(saved_spec);
4446 		saved_spec = NULL;
4447 	} else {
4448 		/* We failed to replace, so the old filter is still present.
4449 		 * Roll back the software table to reflect this.  In fact the
4450 		 * efx_ef10_filter_set_entry() call below will do the right
4451 		 * thing, so nothing extra is needed here.
4452 		 */
4453 	}
4454 	efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags);
4455 
4456 	/* Remove and finalise entries for lower-priority multicast
4457 	 * recipients
4458 	 */
4459 	if (is_mc_recip) {
4460 		MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
4461 		unsigned int depth, i;
4462 
4463 		memset(inbuf, 0, sizeof(inbuf));
4464 
4465 		for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
4466 			if (!test_bit(depth, mc_rem_map))
4467 				continue;
4468 
4469 			i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
4470 			saved_spec = efx_ef10_filter_entry_spec(table, i);
4471 			priv_flags = efx_ef10_filter_entry_flags(table, i);
4472 
4473 			if (rc == 0) {
4474 				MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
4475 					       MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
4476 				MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
4477 					       table->entry[i].handle);
4478 				rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
4479 						  inbuf, sizeof(inbuf),
4480 						  NULL, 0, NULL);
4481 			}
4482 
4483 			if (rc == 0) {
4484 				kfree(saved_spec);
4485 				saved_spec = NULL;
4486 				priv_flags = 0;
4487 			}
4488 			efx_ef10_filter_set_entry(table, i, saved_spec,
4489 						  priv_flags);
4490 		}
4491 	}
4492 
4493 	/* If successful, return the inserted filter ID */
4494 	if (rc == 0)
4495 		rc = efx_ef10_make_filter_id(match_pri, ins_index);
4496 
4497 out_unlock:
4498 	if (rss_locked)
4499 		mutex_unlock(&efx->rss_lock);
4500 	up_write(&table->lock);
4501 	return rc;
4502 }
4503 
4504 static s32 efx_ef10_filter_insert(struct efx_nic *efx,
4505 				  struct efx_filter_spec *spec,
4506 				  bool replace_equal)
4507 {
4508 	s32 ret;
4509 
4510 	down_read(&efx->filter_sem);
4511 	ret = efx_ef10_filter_insert_locked(efx, spec, replace_equal);
4512 	up_read(&efx->filter_sem);
4513 
4514 	return ret;
4515 }
4516 
4517 static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
4518 {
4519 	/* no need to do anything here on EF10 */
4520 }
4521 
4522 /* Remove a filter.
4523  * If !by_index, remove by ID
4524  * If by_index, remove by index
4525  * Filter ID may come from userland and must be range-checked.
4526  * Caller must hold efx->filter_sem for read, and efx->filter_state->lock
4527  * for write.
4528  */
4529 static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
4530 					   unsigned int priority_mask,
4531 					   u32 filter_id, bool by_index)
4532 {
4533 	unsigned int filter_idx = efx_ef10_filter_get_unsafe_id(filter_id);
4534 	struct efx_ef10_filter_table *table = efx->filter_state;
4535 	MCDI_DECLARE_BUF(inbuf,
4536 			 MC_CMD_FILTER_OP_IN_HANDLE_OFST +
4537 			 MC_CMD_FILTER_OP_IN_HANDLE_LEN);
4538 	struct efx_filter_spec *spec;
4539 	DEFINE_WAIT(wait);
4540 	int rc;
4541 
4542 	spec = efx_ef10_filter_entry_spec(table, filter_idx);
4543 	if (!spec ||
4544 	    (!by_index &&
4545 	     efx_ef10_filter_pri(table, spec) !=
4546 	     efx_ef10_filter_get_unsafe_pri(filter_id)))
4547 		return -ENOENT;
4548 
4549 	if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO &&
4550 	    priority_mask == (1U << EFX_FILTER_PRI_AUTO)) {
4551 		/* Just remove flags */
4552 		spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO;
4553 		table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
4554 		return 0;
4555 	}
4556 
4557 	if (!(priority_mask & (1U << spec->priority)))
4558 		return -ENOENT;
4559 
4560 	if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
4561 		/* Reset to an automatic filter */
4562 
4563 		struct efx_filter_spec new_spec = *spec;
4564 
4565 		new_spec.priority = EFX_FILTER_PRI_AUTO;
4566 		new_spec.flags = (EFX_FILTER_FLAG_RX |
4567 				  (efx_rss_active(&efx->rss_context) ?
4568 				   EFX_FILTER_FLAG_RX_RSS : 0));
4569 		new_spec.dmaq_id = 0;
4570 		new_spec.rss_context = 0;
4571 		rc = efx_ef10_filter_push(efx, &new_spec,
4572 					  &table->entry[filter_idx].handle,
4573 					  &efx->rss_context,
4574 					  true);
4575 
4576 		if (rc == 0)
4577 			*spec = new_spec;
4578 	} else {
4579 		/* Really remove the filter */
4580 
4581 		MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
4582 			       efx_ef10_filter_is_exclusive(spec) ?
4583 			       MC_CMD_FILTER_OP_IN_OP_REMOVE :
4584 			       MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
4585 		MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
4586 			       table->entry[filter_idx].handle);
4587 		rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP,
4588 					inbuf, sizeof(inbuf), NULL, 0, NULL);
4589 
4590 		if ((rc == 0) || (rc == -ENOENT)) {
4591 			/* Filter removed OK or didn't actually exist */
4592 			kfree(spec);
4593 			efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
4594 		} else {
4595 			efx_mcdi_display_error(efx, MC_CMD_FILTER_OP,
4596 					       MC_CMD_FILTER_OP_EXT_IN_LEN,
4597 					       NULL, 0, rc);
4598 		}
4599 	}
4600 
4601 	return rc;
4602 }
4603 
4604 static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
4605 				       enum efx_filter_priority priority,
4606 				       u32 filter_id)
4607 {
4608 	struct efx_ef10_filter_table *table;
4609 	int rc;
4610 
4611 	down_read(&efx->filter_sem);
4612 	table = efx->filter_state;
4613 	down_write(&table->lock);
4614 	rc = efx_ef10_filter_remove_internal(efx, 1U << priority, filter_id,
4615 					     false);
4616 	up_write(&table->lock);
4617 	up_read(&efx->filter_sem);
4618 	return rc;
4619 }
4620 
4621 /* Caller must hold efx->filter_sem for read */
4622 static void efx_ef10_filter_remove_unsafe(struct efx_nic *efx,
4623 					  enum efx_filter_priority priority,
4624 					  u32 filter_id)
4625 {
4626 	struct efx_ef10_filter_table *table = efx->filter_state;
4627 
4628 	if (filter_id == EFX_EF10_FILTER_ID_INVALID)
4629 		return;
4630 
4631 	down_write(&table->lock);
4632 	efx_ef10_filter_remove_internal(efx, 1U << priority, filter_id,
4633 					true);
4634 	up_write(&table->lock);
4635 }
4636 
4637 static int efx_ef10_filter_get_safe(struct efx_nic *efx,
4638 				    enum efx_filter_priority priority,
4639 				    u32 filter_id, struct efx_filter_spec *spec)
4640 {
4641 	unsigned int filter_idx = efx_ef10_filter_get_unsafe_id(filter_id);
4642 	const struct efx_filter_spec *saved_spec;
4643 	struct efx_ef10_filter_table *table;
4644 	int rc;
4645 
4646 	down_read(&efx->filter_sem);
4647 	table = efx->filter_state;
4648 	down_read(&table->lock);
4649 	saved_spec = efx_ef10_filter_entry_spec(table, filter_idx);
4650 	if (saved_spec && saved_spec->priority == priority &&
4651 	    efx_ef10_filter_pri(table, saved_spec) ==
4652 	    efx_ef10_filter_get_unsafe_pri(filter_id)) {
4653 		*spec = *saved_spec;
4654 		rc = 0;
4655 	} else {
4656 		rc = -ENOENT;
4657 	}
4658 	up_read(&table->lock);
4659 	up_read(&efx->filter_sem);
4660 	return rc;
4661 }
4662 
4663 static int efx_ef10_filter_clear_rx(struct efx_nic *efx,
4664 				    enum efx_filter_priority priority)
4665 {
4666 	struct efx_ef10_filter_table *table;
4667 	unsigned int priority_mask;
4668 	unsigned int i;
4669 	int rc;
4670 
4671 	priority_mask = (((1U << (priority + 1)) - 1) &
4672 			 ~(1U << EFX_FILTER_PRI_AUTO));
4673 
4674 	down_read(&efx->filter_sem);
4675 	table = efx->filter_state;
4676 	down_write(&table->lock);
4677 	for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
4678 		rc = efx_ef10_filter_remove_internal(efx, priority_mask,
4679 						     i, true);
4680 		if (rc && rc != -ENOENT)
4681 			break;
4682 		rc = 0;
4683 	}
4684 
4685 	up_write(&table->lock);
4686 	up_read(&efx->filter_sem);
4687 	return rc;
4688 }
4689 
4690 static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx,
4691 					 enum efx_filter_priority priority)
4692 {
4693 	struct efx_ef10_filter_table *table;
4694 	unsigned int filter_idx;
4695 	s32 count = 0;
4696 
4697 	down_read(&efx->filter_sem);
4698 	table = efx->filter_state;
4699 	down_read(&table->lock);
4700 	for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
4701 		if (table->entry[filter_idx].spec &&
4702 		    efx_ef10_filter_entry_spec(table, filter_idx)->priority ==
4703 		    priority)
4704 			++count;
4705 	}
4706 	up_read(&table->lock);
4707 	up_read(&efx->filter_sem);
4708 	return count;
4709 }
4710 
4711 static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx)
4712 {
4713 	struct efx_ef10_filter_table *table = efx->filter_state;
4714 
4715 	return table->rx_match_count * HUNT_FILTER_TBL_ROWS * 2;
4716 }
4717 
4718 static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
4719 				      enum efx_filter_priority priority,
4720 				      u32 *buf, u32 size)
4721 {
4722 	struct efx_ef10_filter_table *table;
4723 	struct efx_filter_spec *spec;
4724 	unsigned int filter_idx;
4725 	s32 count = 0;
4726 
4727 	down_read(&efx->filter_sem);
4728 	table = efx->filter_state;
4729 	down_read(&table->lock);
4730 
4731 	for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
4732 		spec = efx_ef10_filter_entry_spec(table, filter_idx);
4733 		if (spec && spec->priority == priority) {
4734 			if (count == size) {
4735 				count = -EMSGSIZE;
4736 				break;
4737 			}
4738 			buf[count++] =
4739 				efx_ef10_make_filter_id(
4740 					efx_ef10_filter_pri(table, spec),
4741 					filter_idx);
4742 		}
4743 	}
4744 	up_read(&table->lock);
4745 	up_read(&efx->filter_sem);
4746 	return count;
4747 }
4748 
4749 #ifdef CONFIG_RFS_ACCEL
4750 
4751 static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
4752 					   unsigned int filter_idx)
4753 {
4754 	struct efx_filter_spec *spec, saved_spec;
4755 	struct efx_ef10_filter_table *table;
4756 	struct efx_arfs_rule *rule = NULL;
4757 	bool ret = true, force = false;
4758 	u16 arfs_id;
4759 
4760 	down_read(&efx->filter_sem);
4761 	table = efx->filter_state;
4762 	down_write(&table->lock);
4763 	spec = efx_ef10_filter_entry_spec(table, filter_idx);
4764 
4765 	if (!spec || spec->priority != EFX_FILTER_PRI_HINT)
4766 		goto out_unlock;
4767 
4768 	spin_lock_bh(&efx->rps_hash_lock);
4769 	if (!efx->rps_hash_table) {
4770 		/* In the absence of the table, we always return 0 to ARFS. */
4771 		arfs_id = 0;
4772 	} else {
4773 		rule = efx_rps_hash_find(efx, spec);
4774 		if (!rule)
4775 			/* ARFS table doesn't know of this filter, so remove it */
4776 			goto expire;
4777 		arfs_id = rule->arfs_id;
4778 		ret = efx_rps_check_rule(rule, filter_idx, &force);
4779 		if (force)
4780 			goto expire;
4781 		if (!ret) {
4782 			spin_unlock_bh(&efx->rps_hash_lock);
4783 			goto out_unlock;
4784 		}
4785 	}
4786 	if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, flow_id, arfs_id))
4787 		ret = false;
4788 	else if (rule)
4789 		rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
4790 expire:
4791 	saved_spec = *spec; /* remove operation will kfree spec */
4792 	spin_unlock_bh(&efx->rps_hash_lock);
4793 	/* At this point (since we dropped the lock), another thread might queue
4794 	 * up a fresh insertion request (but the actual insertion will be held
4795 	 * up by our possession of the filter table lock).  In that case, it
4796 	 * will set rule->filter_id to EFX_ARFS_FILTER_ID_PENDING, meaning that
4797 	 * the rule is not removed by efx_rps_hash_del() below.
4798 	 */
4799 	if (ret)
4800 		ret = efx_ef10_filter_remove_internal(efx, 1U << spec->priority,
4801 						      filter_idx, true) == 0;
4802 	/* While we can't safely dereference rule (we dropped the lock), we can
4803 	 * still test it for NULL.
4804 	 */
4805 	if (ret && rule) {
4806 		/* Expiring, so remove entry from ARFS table */
4807 		spin_lock_bh(&efx->rps_hash_lock);
4808 		efx_rps_hash_del(efx, &saved_spec);
4809 		spin_unlock_bh(&efx->rps_hash_lock);
4810 	}
4811 out_unlock:
4812 	up_write(&table->lock);
4813 	up_read(&efx->filter_sem);
4814 	return ret;
4815 }
4816 
4817 #endif /* CONFIG_RFS_ACCEL */
4818 
4819 static int efx_ef10_filter_match_flags_from_mcdi(bool encap, u32 mcdi_flags)
4820 {
4821 	int match_flags = 0;
4822 
4823 #define MAP_FLAG(gen_flag, mcdi_field) do {				\
4824 		u32 old_mcdi_flags = mcdi_flags;			\
4825 		mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ ##	\
4826 				     mcdi_field ## _LBN);		\
4827 		if (mcdi_flags != old_mcdi_flags)			\
4828 			match_flags |= EFX_FILTER_MATCH_ ## gen_flag;	\
4829 	} while (0)
4830 
4831 	if (encap) {
4832 		/* encap filters must specify encap type */
4833 		match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
4834 		/* and imply ethertype and ip proto */
4835 		mcdi_flags &=
4836 			~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN);
4837 		mcdi_flags &=
4838 			~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN);
4839 		/* VLAN tags refer to the outer packet */
4840 		MAP_FLAG(INNER_VID, INNER_VLAN);
4841 		MAP_FLAG(OUTER_VID, OUTER_VLAN);
4842 		/* everything else refers to the inner packet */
4843 		MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_UCAST_DST);
4844 		MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_MCAST_DST);
4845 		MAP_FLAG(REM_HOST, IFRM_SRC_IP);
4846 		MAP_FLAG(LOC_HOST, IFRM_DST_IP);
4847 		MAP_FLAG(REM_MAC, IFRM_SRC_MAC);
4848 		MAP_FLAG(REM_PORT, IFRM_SRC_PORT);
4849 		MAP_FLAG(LOC_MAC, IFRM_DST_MAC);
4850 		MAP_FLAG(LOC_PORT, IFRM_DST_PORT);
4851 		MAP_FLAG(ETHER_TYPE, IFRM_ETHER_TYPE);
4852 		MAP_FLAG(IP_PROTO, IFRM_IP_PROTO);
4853 	} else {
4854 		MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST);
4855 		MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST);
4856 		MAP_FLAG(REM_HOST, SRC_IP);
4857 		MAP_FLAG(LOC_HOST, DST_IP);
4858 		MAP_FLAG(REM_MAC, SRC_MAC);
4859 		MAP_FLAG(REM_PORT, SRC_PORT);
4860 		MAP_FLAG(LOC_MAC, DST_MAC);
4861 		MAP_FLAG(LOC_PORT, DST_PORT);
4862 		MAP_FLAG(ETHER_TYPE, ETHER_TYPE);
4863 		MAP_FLAG(INNER_VID, INNER_VLAN);
4864 		MAP_FLAG(OUTER_VID, OUTER_VLAN);
4865 		MAP_FLAG(IP_PROTO, IP_PROTO);
4866 	}
4867 #undef MAP_FLAG
4868 
4869 	/* Did we map them all? */
4870 	if (mcdi_flags)
4871 		return -EINVAL;
4872 
4873 	return match_flags;
4874 }
4875 
4876 static void efx_ef10_filter_cleanup_vlans(struct efx_nic *efx)
4877 {
4878 	struct efx_ef10_filter_table *table = efx->filter_state;
4879 	struct efx_ef10_filter_vlan *vlan, *next_vlan;
4880 
4881 	/* See comment in efx_ef10_filter_table_remove() */
4882 	if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
4883 		return;
4884 
4885 	if (!table)
4886 		return;
4887 
4888 	list_for_each_entry_safe(vlan, next_vlan, &table->vlan_list, list)
4889 		efx_ef10_filter_del_vlan_internal(efx, vlan);
4890 }
4891 
4892 static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table,
4893 					    bool encap,
4894 					    enum efx_filter_match_flags match_flags)
4895 {
4896 	unsigned int match_pri;
4897 	int mf;
4898 
4899 	for (match_pri = 0;
4900 	     match_pri < table->rx_match_count;
4901 	     match_pri++) {
4902 		mf = efx_ef10_filter_match_flags_from_mcdi(encap,
4903 				table->rx_match_mcdi_flags[match_pri]);
4904 		if (mf == match_flags)
4905 			return true;
4906 	}
4907 
4908 	return false;
4909 }
4910 
4911 static int
4912 efx_ef10_filter_table_probe_matches(struct efx_nic *efx,
4913 				    struct efx_ef10_filter_table *table,
4914 				    bool encap)
4915 {
4916 	MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN);
4917 	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX);
4918 	unsigned int pd_match_pri, pd_match_count;
4919 	size_t outlen;
4920 	int rc;
4921 
4922 	/* Find out which RX filter types are supported, and their priorities */
4923 	MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP,
4924 		       encap ?
4925 		       MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES :
4926 		       MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
4927 	rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO,
4928 			  inbuf, sizeof(inbuf), outbuf, sizeof(outbuf),
4929 			  &outlen);
4930 	if (rc)
4931 		return rc;
4932 
4933 	pd_match_count = MCDI_VAR_ARRAY_LEN(
4934 		outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES);
4935 
4936 	for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) {
4937 		u32 mcdi_flags =
4938 			MCDI_ARRAY_DWORD(
4939 				outbuf,
4940 				GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES,
4941 				pd_match_pri);
4942 		rc = efx_ef10_filter_match_flags_from_mcdi(encap, mcdi_flags);
4943 		if (rc < 0) {
4944 			netif_dbg(efx, probe, efx->net_dev,
4945 				  "%s: fw flags %#x pri %u not supported in driver\n",
4946 				  __func__, mcdi_flags, pd_match_pri);
4947 		} else {
4948 			netif_dbg(efx, probe, efx->net_dev,
4949 				  "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n",
4950 				  __func__, mcdi_flags, pd_match_pri,
4951 				  rc, table->rx_match_count);
4952 			table->rx_match_mcdi_flags[table->rx_match_count] = mcdi_flags;
4953 			table->rx_match_count++;
4954 		}
4955 	}
4956 
4957 	return 0;
4958 }
4959 
4960 static int efx_ef10_filter_table_probe(struct efx_nic *efx)
4961 {
4962 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
4963 	struct net_device *net_dev = efx->net_dev;
4964 	struct efx_ef10_filter_table *table;
4965 	struct efx_ef10_vlan *vlan;
4966 	int rc;
4967 
4968 	if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
4969 		return -EINVAL;
4970 
4971 	if (efx->filter_state) /* already probed */
4972 		return 0;
4973 
4974 	table = kzalloc(sizeof(*table), GFP_KERNEL);
4975 	if (!table)
4976 		return -ENOMEM;
4977 
4978 	table->rx_match_count = 0;
4979 	rc = efx_ef10_filter_table_probe_matches(efx, table, false);
4980 	if (rc)
4981 		goto fail;
4982 	if (nic_data->datapath_caps &
4983 		   (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
4984 		rc = efx_ef10_filter_table_probe_matches(efx, table, true);
4985 	if (rc)
4986 		goto fail;
4987 	if ((efx_supported_features(efx) & NETIF_F_HW_VLAN_CTAG_FILTER) &&
4988 	    !(efx_ef10_filter_match_supported(table, false,
4989 		(EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC)) &&
4990 	      efx_ef10_filter_match_supported(table, false,
4991 		(EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC_IG)))) {
4992 		netif_info(efx, probe, net_dev,
4993 			   "VLAN filters are not supported in this firmware variant\n");
4994 		net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4995 		efx->fixed_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4996 		net_dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4997 	}
4998 
4999 	table->entry = vzalloc(array_size(HUNT_FILTER_TBL_ROWS,
5000 					  sizeof(*table->entry)));
5001 	if (!table->entry) {
5002 		rc = -ENOMEM;
5003 		goto fail;
5004 	}
5005 
5006 	table->mc_promisc_last = false;
5007 	table->vlan_filter =
5008 		!!(efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER);
5009 	INIT_LIST_HEAD(&table->vlan_list);
5010 	init_rwsem(&table->lock);
5011 
5012 	efx->filter_state = table;
5013 
5014 	list_for_each_entry(vlan, &nic_data->vlan_list, list) {
5015 		rc = efx_ef10_filter_add_vlan(efx, vlan->vid);
5016 		if (rc)
5017 			goto fail_add_vlan;
5018 	}
5019 
5020 	return 0;
5021 
5022 fail_add_vlan:
5023 	efx_ef10_filter_cleanup_vlans(efx);
5024 	efx->filter_state = NULL;
5025 fail:
5026 	kfree(table);
5027 	return rc;
5028 }
5029 
5030 /* Caller must hold efx->filter_sem for read if race against
5031  * efx_ef10_filter_table_remove() is possible
5032  */
5033 static void efx_ef10_filter_table_restore(struct efx_nic *efx)
5034 {
5035 	struct efx_ef10_filter_table *table = efx->filter_state;
5036 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
5037 	unsigned int invalid_filters = 0, failed = 0;
5038 	struct efx_ef10_filter_vlan *vlan;
5039 	struct efx_filter_spec *spec;
5040 	struct efx_rss_context *ctx;
5041 	unsigned int filter_idx;
5042 	u32 mcdi_flags;
5043 	int match_pri;
5044 	int rc, i;
5045 
5046 	WARN_ON(!rwsem_is_locked(&efx->filter_sem));
5047 
5048 	if (!nic_data->must_restore_filters)
5049 		return;
5050 
5051 	if (!table)
5052 		return;
5053 
5054 	down_write(&table->lock);
5055 	mutex_lock(&efx->rss_lock);
5056 
5057 	for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
5058 		spec = efx_ef10_filter_entry_spec(table, filter_idx);
5059 		if (!spec)
5060 			continue;
5061 
5062 		mcdi_flags = efx_ef10_filter_mcdi_flags_from_spec(spec);
5063 		match_pri = 0;
5064 		while (match_pri < table->rx_match_count &&
5065 		       table->rx_match_mcdi_flags[match_pri] != mcdi_flags)
5066 			++match_pri;
5067 		if (match_pri >= table->rx_match_count) {
5068 			invalid_filters++;
5069 			goto not_restored;
5070 		}
5071 		if (spec->rss_context)
5072 			ctx = efx_find_rss_context_entry(efx, spec->rss_context);
5073 		else
5074 			ctx = &efx->rss_context;
5075 		if (spec->flags & EFX_FILTER_FLAG_RX_RSS) {
5076 			if (!ctx) {
5077 				netif_warn(efx, drv, efx->net_dev,
5078 					   "Warning: unable to restore a filter with nonexistent RSS context %u.\n",
5079 					   spec->rss_context);
5080 				invalid_filters++;
5081 				goto not_restored;
5082 			}
5083 			if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID) {
5084 				netif_warn(efx, drv, efx->net_dev,
5085 					   "Warning: unable to restore a filter with RSS context %u as it was not created.\n",
5086 					   spec->rss_context);
5087 				invalid_filters++;
5088 				goto not_restored;
5089 			}
5090 		}
5091 
5092 		rc = efx_ef10_filter_push(efx, spec,
5093 					  &table->entry[filter_idx].handle,
5094 					  ctx, false);
5095 		if (rc)
5096 			failed++;
5097 
5098 		if (rc) {
5099 not_restored:
5100 			list_for_each_entry(vlan, &table->vlan_list, list)
5101 				for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; ++i)
5102 					if (vlan->default_filters[i] == filter_idx)
5103 						vlan->default_filters[i] =
5104 							EFX_EF10_FILTER_ID_INVALID;
5105 
5106 			kfree(spec);
5107 			efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
5108 		}
5109 	}
5110 
5111 	mutex_unlock(&efx->rss_lock);
5112 	up_write(&table->lock);
5113 
5114 	/* This can happen validly if the MC's capabilities have changed, so
5115 	 * is not an error.
5116 	 */
5117 	if (invalid_filters)
5118 		netif_dbg(efx, drv, efx->net_dev,
5119 			  "Did not restore %u filters that are now unsupported.\n",
5120 			  invalid_filters);
5121 
5122 	if (failed)
5123 		netif_err(efx, hw, efx->net_dev,
5124 			  "unable to restore %u filters\n", failed);
5125 	else
5126 		nic_data->must_restore_filters = false;
5127 }
5128 
5129 static void efx_ef10_filter_table_remove(struct efx_nic *efx)
5130 {
5131 	struct efx_ef10_filter_table *table = efx->filter_state;
5132 	MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
5133 	struct efx_filter_spec *spec;
5134 	unsigned int filter_idx;
5135 	int rc;
5136 
5137 	efx_ef10_filter_cleanup_vlans(efx);
5138 	efx->filter_state = NULL;
5139 	/* If we were called without locking, then it's not safe to free
5140 	 * the table as others might be using it.  So we just WARN, leak
5141 	 * the memory, and potentially get an inconsistent filter table
5142 	 * state.
5143 	 * This should never actually happen.
5144 	 */
5145 	if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
5146 		return;
5147 
5148 	if (!table)
5149 		return;
5150 
5151 	for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
5152 		spec = efx_ef10_filter_entry_spec(table, filter_idx);
5153 		if (!spec)
5154 			continue;
5155 
5156 		MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
5157 			       efx_ef10_filter_is_exclusive(spec) ?
5158 			       MC_CMD_FILTER_OP_IN_OP_REMOVE :
5159 			       MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
5160 		MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
5161 			       table->entry[filter_idx].handle);
5162 		rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf,
5163 					sizeof(inbuf), NULL, 0, NULL);
5164 		if (rc)
5165 			netif_info(efx, drv, efx->net_dev,
5166 				   "%s: filter %04x remove failed\n",
5167 				   __func__, filter_idx);
5168 		kfree(spec);
5169 	}
5170 
5171 	vfree(table->entry);
5172 	kfree(table);
5173 }
5174 
5175 static void efx_ef10_filter_mark_one_old(struct efx_nic *efx, uint16_t *id)
5176 {
5177 	struct efx_ef10_filter_table *table = efx->filter_state;
5178 	unsigned int filter_idx;
5179 
5180 	efx_rwsem_assert_write_locked(&table->lock);
5181 
5182 	if (*id != EFX_EF10_FILTER_ID_INVALID) {
5183 		filter_idx = efx_ef10_filter_get_unsafe_id(*id);
5184 		if (!table->entry[filter_idx].spec)
5185 			netif_dbg(efx, drv, efx->net_dev,
5186 				  "marked null spec old %04x:%04x\n", *id,
5187 				  filter_idx);
5188 		table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
5189 		*id = EFX_EF10_FILTER_ID_INVALID;
5190 	}
5191 }
5192 
5193 /* Mark old per-VLAN filters that may need to be removed */
5194 static void _efx_ef10_filter_vlan_mark_old(struct efx_nic *efx,
5195 					   struct efx_ef10_filter_vlan *vlan)
5196 {
5197 	struct efx_ef10_filter_table *table = efx->filter_state;
5198 	unsigned int i;
5199 
5200 	for (i = 0; i < table->dev_uc_count; i++)
5201 		efx_ef10_filter_mark_one_old(efx, &vlan->uc[i]);
5202 	for (i = 0; i < table->dev_mc_count; i++)
5203 		efx_ef10_filter_mark_one_old(efx, &vlan->mc[i]);
5204 	for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
5205 		efx_ef10_filter_mark_one_old(efx, &vlan->default_filters[i]);
5206 }
5207 
5208 /* Mark old filters that may need to be removed.
5209  * Caller must hold efx->filter_sem for read if race against
5210  * efx_ef10_filter_table_remove() is possible
5211  */
5212 static void efx_ef10_filter_mark_old(struct efx_nic *efx)
5213 {
5214 	struct efx_ef10_filter_table *table = efx->filter_state;
5215 	struct efx_ef10_filter_vlan *vlan;
5216 
5217 	down_write(&table->lock);
5218 	list_for_each_entry(vlan, &table->vlan_list, list)
5219 		_efx_ef10_filter_vlan_mark_old(efx, vlan);
5220 	up_write(&table->lock);
5221 }
5222 
5223 static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx)
5224 {
5225 	struct efx_ef10_filter_table *table = efx->filter_state;
5226 	struct net_device *net_dev = efx->net_dev;
5227 	struct netdev_hw_addr *uc;
5228 	unsigned int i;
5229 
5230 	table->uc_promisc = !!(net_dev->flags & IFF_PROMISC);
5231 	ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
5232 	i = 1;
5233 	netdev_for_each_uc_addr(uc, net_dev) {
5234 		if (i >= EFX_EF10_FILTER_DEV_UC_MAX) {
5235 			table->uc_promisc = true;
5236 			break;
5237 		}
5238 		ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
5239 		i++;
5240 	}
5241 
5242 	table->dev_uc_count = i;
5243 }
5244 
5245 static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx)
5246 {
5247 	struct efx_ef10_filter_table *table = efx->filter_state;
5248 	struct net_device *net_dev = efx->net_dev;
5249 	struct netdev_hw_addr *mc;
5250 	unsigned int i;
5251 
5252 	table->mc_overflow = false;
5253 	table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI));
5254 
5255 	i = 0;
5256 	netdev_for_each_mc_addr(mc, net_dev) {
5257 		if (i >= EFX_EF10_FILTER_DEV_MC_MAX) {
5258 			table->mc_promisc = true;
5259 			table->mc_overflow = true;
5260 			break;
5261 		}
5262 		ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
5263 		i++;
5264 	}
5265 
5266 	table->dev_mc_count = i;
5267 }
5268 
5269 static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
5270 					    struct efx_ef10_filter_vlan *vlan,
5271 					    bool multicast, bool rollback)
5272 {
5273 	struct efx_ef10_filter_table *table = efx->filter_state;
5274 	struct efx_ef10_dev_addr *addr_list;
5275 	enum efx_filter_flags filter_flags;
5276 	struct efx_filter_spec spec;
5277 	u8 baddr[ETH_ALEN];
5278 	unsigned int i, j;
5279 	int addr_count;
5280 	u16 *ids;
5281 	int rc;
5282 
5283 	if (multicast) {
5284 		addr_list = table->dev_mc_list;
5285 		addr_count = table->dev_mc_count;
5286 		ids = vlan->mc;
5287 	} else {
5288 		addr_list = table->dev_uc_list;
5289 		addr_count = table->dev_uc_count;
5290 		ids = vlan->uc;
5291 	}
5292 
5293 	filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0;
5294 
5295 	/* Insert/renew filters */
5296 	for (i = 0; i < addr_count; i++) {
5297 		EFX_WARN_ON_PARANOID(ids[i] != EFX_EF10_FILTER_ID_INVALID);
5298 		efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
5299 		efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr);
5300 		rc = efx_ef10_filter_insert_locked(efx, &spec, true);
5301 		if (rc < 0) {
5302 			if (rollback) {
5303 				netif_info(efx, drv, efx->net_dev,
5304 					   "efx_ef10_filter_insert failed rc=%d\n",
5305 					   rc);
5306 				/* Fall back to promiscuous */
5307 				for (j = 0; j < i; j++) {
5308 					efx_ef10_filter_remove_unsafe(
5309 						efx, EFX_FILTER_PRI_AUTO,
5310 						ids[j]);
5311 					ids[j] = EFX_EF10_FILTER_ID_INVALID;
5312 				}
5313 				return rc;
5314 			} else {
5315 				/* keep invalid ID, and carry on */
5316 			}
5317 		} else {
5318 			ids[i] = efx_ef10_filter_get_unsafe_id(rc);
5319 		}
5320 	}
5321 
5322 	if (multicast && rollback) {
5323 		/* Also need an Ethernet broadcast filter */
5324 		EFX_WARN_ON_PARANOID(vlan->default_filters[EFX_EF10_BCAST] !=
5325 				     EFX_EF10_FILTER_ID_INVALID);
5326 		efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
5327 		eth_broadcast_addr(baddr);
5328 		efx_filter_set_eth_local(&spec, vlan->vid, baddr);
5329 		rc = efx_ef10_filter_insert_locked(efx, &spec, true);
5330 		if (rc < 0) {
5331 			netif_warn(efx, drv, efx->net_dev,
5332 				   "Broadcast filter insert failed rc=%d\n", rc);
5333 			/* Fall back to promiscuous */
5334 			for (j = 0; j < i; j++) {
5335 				efx_ef10_filter_remove_unsafe(
5336 					efx, EFX_FILTER_PRI_AUTO,
5337 					ids[j]);
5338 				ids[j] = EFX_EF10_FILTER_ID_INVALID;
5339 			}
5340 			return rc;
5341 		} else {
5342 			vlan->default_filters[EFX_EF10_BCAST] =
5343 				efx_ef10_filter_get_unsafe_id(rc);
5344 		}
5345 	}
5346 
5347 	return 0;
5348 }
5349 
5350 static int efx_ef10_filter_insert_def(struct efx_nic *efx,
5351 				      struct efx_ef10_filter_vlan *vlan,
5352 				      enum efx_encap_type encap_type,
5353 				      bool multicast, bool rollback)
5354 {
5355 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
5356 	enum efx_filter_flags filter_flags;
5357 	struct efx_filter_spec spec;
5358 	u8 baddr[ETH_ALEN];
5359 	int rc;
5360 	u16 *id;
5361 
5362 	filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0;
5363 
5364 	efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
5365 
5366 	if (multicast)
5367 		efx_filter_set_mc_def(&spec);
5368 	else
5369 		efx_filter_set_uc_def(&spec);
5370 
5371 	if (encap_type) {
5372 		if (nic_data->datapath_caps &
5373 		    (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
5374 			efx_filter_set_encap_type(&spec, encap_type);
5375 		else
5376 			/* don't insert encap filters on non-supporting
5377 			 * platforms. ID will be left as INVALID.
5378 			 */
5379 			return 0;
5380 	}
5381 
5382 	if (vlan->vid != EFX_FILTER_VID_UNSPEC)
5383 		efx_filter_set_eth_local(&spec, vlan->vid, NULL);
5384 
5385 	rc = efx_ef10_filter_insert_locked(efx, &spec, true);
5386 	if (rc < 0) {
5387 		const char *um = multicast ? "Multicast" : "Unicast";
5388 		const char *encap_name = "";
5389 		const char *encap_ipv = "";
5390 
5391 		if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
5392 		    EFX_ENCAP_TYPE_VXLAN)
5393 			encap_name = "VXLAN ";
5394 		else if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
5395 			 EFX_ENCAP_TYPE_NVGRE)
5396 			encap_name = "NVGRE ";
5397 		else if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
5398 			 EFX_ENCAP_TYPE_GENEVE)
5399 			encap_name = "GENEVE ";
5400 		if (encap_type & EFX_ENCAP_FLAG_IPV6)
5401 			encap_ipv = "IPv6 ";
5402 		else if (encap_type)
5403 			encap_ipv = "IPv4 ";
5404 
5405 		/* unprivileged functions can't insert mismatch filters
5406 		 * for encapsulated or unicast traffic, so downgrade
5407 		 * those warnings to debug.
5408 		 */
5409 		netif_cond_dbg(efx, drv, efx->net_dev,
5410 			       rc == -EPERM && (encap_type || !multicast), warn,
5411 			       "%s%s%s mismatch filter insert failed rc=%d\n",
5412 			       encap_name, encap_ipv, um, rc);
5413 	} else if (multicast) {
5414 		/* mapping from encap types to default filter IDs (multicast) */
5415 		static enum efx_ef10_default_filters map[] = {
5416 			[EFX_ENCAP_TYPE_NONE] = EFX_EF10_MCDEF,
5417 			[EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_MCDEF,
5418 			[EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_MCDEF,
5419 			[EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_MCDEF,
5420 			[EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] =
5421 				EFX_EF10_VXLAN6_MCDEF,
5422 			[EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] =
5423 				EFX_EF10_NVGRE6_MCDEF,
5424 			[EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] =
5425 				EFX_EF10_GENEVE6_MCDEF,
5426 		};
5427 
5428 		/* quick bounds check (BCAST result impossible) */
5429 		BUILD_BUG_ON(EFX_EF10_BCAST != 0);
5430 		if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) {
5431 			WARN_ON(1);
5432 			return -EINVAL;
5433 		}
5434 		/* then follow map */
5435 		id = &vlan->default_filters[map[encap_type]];
5436 
5437 		EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID);
5438 		*id = efx_ef10_filter_get_unsafe_id(rc);
5439 		if (!nic_data->workaround_26807 && !encap_type) {
5440 			/* Also need an Ethernet broadcast filter */
5441 			efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
5442 					   filter_flags, 0);
5443 			eth_broadcast_addr(baddr);
5444 			efx_filter_set_eth_local(&spec, vlan->vid, baddr);
5445 			rc = efx_ef10_filter_insert_locked(efx, &spec, true);
5446 			if (rc < 0) {
5447 				netif_warn(efx, drv, efx->net_dev,
5448 					   "Broadcast filter insert failed rc=%d\n",
5449 					   rc);
5450 				if (rollback) {
5451 					/* Roll back the mc_def filter */
5452 					efx_ef10_filter_remove_unsafe(
5453 							efx, EFX_FILTER_PRI_AUTO,
5454 							*id);
5455 					*id = EFX_EF10_FILTER_ID_INVALID;
5456 					return rc;
5457 				}
5458 			} else {
5459 				EFX_WARN_ON_PARANOID(
5460 					vlan->default_filters[EFX_EF10_BCAST] !=
5461 					EFX_EF10_FILTER_ID_INVALID);
5462 				vlan->default_filters[EFX_EF10_BCAST] =
5463 					efx_ef10_filter_get_unsafe_id(rc);
5464 			}
5465 		}
5466 		rc = 0;
5467 	} else {
5468 		/* mapping from encap types to default filter IDs (unicast) */
5469 		static enum efx_ef10_default_filters map[] = {
5470 			[EFX_ENCAP_TYPE_NONE] = EFX_EF10_UCDEF,
5471 			[EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_UCDEF,
5472 			[EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_UCDEF,
5473 			[EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_UCDEF,
5474 			[EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] =
5475 				EFX_EF10_VXLAN6_UCDEF,
5476 			[EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] =
5477 				EFX_EF10_NVGRE6_UCDEF,
5478 			[EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] =
5479 				EFX_EF10_GENEVE6_UCDEF,
5480 		};
5481 
5482 		/* quick bounds check (BCAST result impossible) */
5483 		BUILD_BUG_ON(EFX_EF10_BCAST != 0);
5484 		if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) {
5485 			WARN_ON(1);
5486 			return -EINVAL;
5487 		}
5488 		/* then follow map */
5489 		id = &vlan->default_filters[map[encap_type]];
5490 		EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID);
5491 		*id = rc;
5492 		rc = 0;
5493 	}
5494 	return rc;
5495 }
5496 
5497 /* Remove filters that weren't renewed. */
5498 static void efx_ef10_filter_remove_old(struct efx_nic *efx)
5499 {
5500 	struct efx_ef10_filter_table *table = efx->filter_state;
5501 	int remove_failed = 0;
5502 	int remove_noent = 0;
5503 	int rc;
5504 	int i;
5505 
5506 	down_write(&table->lock);
5507 	for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
5508 		if (READ_ONCE(table->entry[i].spec) &
5509 		    EFX_EF10_FILTER_FLAG_AUTO_OLD) {
5510 			rc = efx_ef10_filter_remove_internal(efx,
5511 					1U << EFX_FILTER_PRI_AUTO, i, true);
5512 			if (rc == -ENOENT)
5513 				remove_noent++;
5514 			else if (rc)
5515 				remove_failed++;
5516 		}
5517 	}
5518 	up_write(&table->lock);
5519 
5520 	if (remove_failed)
5521 		netif_info(efx, drv, efx->net_dev,
5522 			   "%s: failed to remove %d filters\n",
5523 			   __func__, remove_failed);
5524 	if (remove_noent)
5525 		netif_info(efx, drv, efx->net_dev,
5526 			   "%s: failed to remove %d non-existent filters\n",
5527 			   __func__, remove_noent);
5528 }
5529 
5530 static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
5531 {
5532 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
5533 	u8 mac_old[ETH_ALEN];
5534 	int rc, rc2;
5535 
5536 	/* Only reconfigure a PF-created vport */
5537 	if (is_zero_ether_addr(nic_data->vport_mac))
5538 		return 0;
5539 
5540 	efx_device_detach_sync(efx);
5541 	efx_net_stop(efx->net_dev);
5542 	down_write(&efx->filter_sem);
5543 	efx_ef10_filter_table_remove(efx);
5544 	up_write(&efx->filter_sem);
5545 
5546 	rc = efx_ef10_vadaptor_free(efx, nic_data->vport_id);
5547 	if (rc)
5548 		goto restore_filters;
5549 
5550 	ether_addr_copy(mac_old, nic_data->vport_mac);
5551 	rc = efx_ef10_vport_del_mac(efx, nic_data->vport_id,
5552 				    nic_data->vport_mac);
5553 	if (rc)
5554 		goto restore_vadaptor;
5555 
5556 	rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id,
5557 				    efx->net_dev->dev_addr);
5558 	if (!rc) {
5559 		ether_addr_copy(nic_data->vport_mac, efx->net_dev->dev_addr);
5560 	} else {
5561 		rc2 = efx_ef10_vport_add_mac(efx, nic_data->vport_id, mac_old);
5562 		if (rc2) {
5563 			/* Failed to add original MAC, so clear vport_mac */
5564 			eth_zero_addr(nic_data->vport_mac);
5565 			goto reset_nic;
5566 		}
5567 	}
5568 
5569 restore_vadaptor:
5570 	rc2 = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
5571 	if (rc2)
5572 		goto reset_nic;
5573 restore_filters:
5574 	down_write(&efx->filter_sem);
5575 	rc2 = efx_ef10_filter_table_probe(efx);
5576 	up_write(&efx->filter_sem);
5577 	if (rc2)
5578 		goto reset_nic;
5579 
5580 	rc2 = efx_net_open(efx->net_dev);
5581 	if (rc2)
5582 		goto reset_nic;
5583 
5584 	efx_device_attach_if_not_resetting(efx);
5585 
5586 	return rc;
5587 
5588 reset_nic:
5589 	netif_err(efx, drv, efx->net_dev,
5590 		  "Failed to restore when changing MAC address - scheduling reset\n");
5591 	efx_schedule_reset(efx, RESET_TYPE_DATAPATH);
5592 
5593 	return rc ? rc : rc2;
5594 }
5595 
5596 /* Caller must hold efx->filter_sem for read if race against
5597  * efx_ef10_filter_table_remove() is possible
5598  */
5599 static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx,
5600 					      struct efx_ef10_filter_vlan *vlan)
5601 {
5602 	struct efx_ef10_filter_table *table = efx->filter_state;
5603 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
5604 
5605 	/* Do not install unspecified VID if VLAN filtering is enabled.
5606 	 * Do not install all specified VIDs if VLAN filtering is disabled.
5607 	 */
5608 	if ((vlan->vid == EFX_FILTER_VID_UNSPEC) == table->vlan_filter)
5609 		return;
5610 
5611 	/* Insert/renew unicast filters */
5612 	if (table->uc_promisc) {
5613 		efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NONE,
5614 					   false, false);
5615 		efx_ef10_filter_insert_addr_list(efx, vlan, false, false);
5616 	} else {
5617 		/* If any of the filters failed to insert, fall back to
5618 		 * promiscuous mode - add in the uc_def filter.  But keep
5619 		 * our individual unicast filters.
5620 		 */
5621 		if (efx_ef10_filter_insert_addr_list(efx, vlan, false, false))
5622 			efx_ef10_filter_insert_def(efx, vlan,
5623 						   EFX_ENCAP_TYPE_NONE,
5624 						   false, false);
5625 	}
5626 	efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN,
5627 				   false, false);
5628 	efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN |
5629 					      EFX_ENCAP_FLAG_IPV6,
5630 				   false, false);
5631 	efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE,
5632 				   false, false);
5633 	efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE |
5634 					      EFX_ENCAP_FLAG_IPV6,
5635 				   false, false);
5636 	efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE,
5637 				   false, false);
5638 	efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE |
5639 					      EFX_ENCAP_FLAG_IPV6,
5640 				   false, false);
5641 
5642 	/* Insert/renew multicast filters */
5643 	/* If changing promiscuous state with cascaded multicast filters, remove
5644 	 * old filters first, so that packets are dropped rather than duplicated
5645 	 */
5646 	if (nic_data->workaround_26807 &&
5647 	    table->mc_promisc_last != table->mc_promisc)
5648 		efx_ef10_filter_remove_old(efx);
5649 	if (table->mc_promisc) {
5650 		if (nic_data->workaround_26807) {
5651 			/* If we failed to insert promiscuous filters, rollback
5652 			 * and fall back to individual multicast filters
5653 			 */
5654 			if (efx_ef10_filter_insert_def(efx, vlan,
5655 						       EFX_ENCAP_TYPE_NONE,
5656 						       true, true)) {
5657 				/* Changing promisc state, so remove old filters */
5658 				efx_ef10_filter_remove_old(efx);
5659 				efx_ef10_filter_insert_addr_list(efx, vlan,
5660 								 true, false);
5661 			}
5662 		} else {
5663 			/* If we failed to insert promiscuous filters, don't
5664 			 * rollback.  Regardless, also insert the mc_list,
5665 			 * unless it's incomplete due to overflow
5666 			 */
5667 			efx_ef10_filter_insert_def(efx, vlan,
5668 						   EFX_ENCAP_TYPE_NONE,
5669 						   true, false);
5670 			if (!table->mc_overflow)
5671 				efx_ef10_filter_insert_addr_list(efx, vlan,
5672 								 true, false);
5673 		}
5674 	} else {
5675 		/* If any filters failed to insert, rollback and fall back to
5676 		 * promiscuous mode - mc_def filter and maybe broadcast.  If
5677 		 * that fails, roll back again and insert as many of our
5678 		 * individual multicast filters as we can.
5679 		 */
5680 		if (efx_ef10_filter_insert_addr_list(efx, vlan, true, true)) {
5681 			/* Changing promisc state, so remove old filters */
5682 			if (nic_data->workaround_26807)
5683 				efx_ef10_filter_remove_old(efx);
5684 			if (efx_ef10_filter_insert_def(efx, vlan,
5685 						       EFX_ENCAP_TYPE_NONE,
5686 						       true, true))
5687 				efx_ef10_filter_insert_addr_list(efx, vlan,
5688 								 true, false);
5689 		}
5690 	}
5691 	efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN,
5692 				   true, false);
5693 	efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN |
5694 					      EFX_ENCAP_FLAG_IPV6,
5695 				   true, false);
5696 	efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE,
5697 				   true, false);
5698 	efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE |
5699 					      EFX_ENCAP_FLAG_IPV6,
5700 				   true, false);
5701 	efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE,
5702 				   true, false);
5703 	efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE |
5704 					      EFX_ENCAP_FLAG_IPV6,
5705 				   true, false);
5706 }
5707 
5708 /* Caller must hold efx->filter_sem for read if race against
5709  * efx_ef10_filter_table_remove() is possible
5710  */
5711 static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
5712 {
5713 	struct efx_ef10_filter_table *table = efx->filter_state;
5714 	struct net_device *net_dev = efx->net_dev;
5715 	struct efx_ef10_filter_vlan *vlan;
5716 	bool vlan_filter;
5717 
5718 	if (!efx_dev_registered(efx))
5719 		return;
5720 
5721 	if (!table)
5722 		return;
5723 
5724 	efx_ef10_filter_mark_old(efx);
5725 
5726 	/* Copy/convert the address lists; add the primary station
5727 	 * address and broadcast address
5728 	 */
5729 	netif_addr_lock_bh(net_dev);
5730 	efx_ef10_filter_uc_addr_list(efx);
5731 	efx_ef10_filter_mc_addr_list(efx);
5732 	netif_addr_unlock_bh(net_dev);
5733 
5734 	/* If VLAN filtering changes, all old filters are finally removed.
5735 	 * Do it in advance to avoid conflicts for unicast untagged and
5736 	 * VLAN 0 tagged filters.
5737 	 */
5738 	vlan_filter = !!(net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER);
5739 	if (table->vlan_filter != vlan_filter) {
5740 		table->vlan_filter = vlan_filter;
5741 		efx_ef10_filter_remove_old(efx);
5742 	}
5743 
5744 	list_for_each_entry(vlan, &table->vlan_list, list)
5745 		efx_ef10_filter_vlan_sync_rx_mode(efx, vlan);
5746 
5747 	efx_ef10_filter_remove_old(efx);
5748 	table->mc_promisc_last = table->mc_promisc;
5749 }
5750 
5751 static struct efx_ef10_filter_vlan *efx_ef10_filter_find_vlan(struct efx_nic *efx, u16 vid)
5752 {
5753 	struct efx_ef10_filter_table *table = efx->filter_state;
5754 	struct efx_ef10_filter_vlan *vlan;
5755 
5756 	WARN_ON(!rwsem_is_locked(&efx->filter_sem));
5757 
5758 	list_for_each_entry(vlan, &table->vlan_list, list) {
5759 		if (vlan->vid == vid)
5760 			return vlan;
5761 	}
5762 
5763 	return NULL;
5764 }
5765 
5766 static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid)
5767 {
5768 	struct efx_ef10_filter_table *table = efx->filter_state;
5769 	struct efx_ef10_filter_vlan *vlan;
5770 	unsigned int i;
5771 
5772 	if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
5773 		return -EINVAL;
5774 
5775 	vlan = efx_ef10_filter_find_vlan(efx, vid);
5776 	if (WARN_ON(vlan)) {
5777 		netif_err(efx, drv, efx->net_dev,
5778 			  "VLAN %u already added\n", vid);
5779 		return -EALREADY;
5780 	}
5781 
5782 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
5783 	if (!vlan)
5784 		return -ENOMEM;
5785 
5786 	vlan->vid = vid;
5787 
5788 	for (i = 0; i < ARRAY_SIZE(vlan->uc); i++)
5789 		vlan->uc[i] = EFX_EF10_FILTER_ID_INVALID;
5790 	for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
5791 		vlan->mc[i] = EFX_EF10_FILTER_ID_INVALID;
5792 	for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
5793 		vlan->default_filters[i] = EFX_EF10_FILTER_ID_INVALID;
5794 
5795 	list_add_tail(&vlan->list, &table->vlan_list);
5796 
5797 	if (efx_dev_registered(efx))
5798 		efx_ef10_filter_vlan_sync_rx_mode(efx, vlan);
5799 
5800 	return 0;
5801 }
5802 
5803 static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx,
5804 					      struct efx_ef10_filter_vlan *vlan)
5805 {
5806 	unsigned int i;
5807 
5808 	/* See comment in efx_ef10_filter_table_remove() */
5809 	if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
5810 		return;
5811 
5812 	list_del(&vlan->list);
5813 
5814 	for (i = 0; i < ARRAY_SIZE(vlan->uc); i++)
5815 		efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
5816 					      vlan->uc[i]);
5817 	for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
5818 		efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
5819 					      vlan->mc[i]);
5820 	for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
5821 		if (vlan->default_filters[i] != EFX_EF10_FILTER_ID_INVALID)
5822 			efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
5823 						      vlan->default_filters[i]);
5824 
5825 	kfree(vlan);
5826 }
5827 
5828 static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid)
5829 {
5830 	struct efx_ef10_filter_vlan *vlan;
5831 
5832 	/* See comment in efx_ef10_filter_table_remove() */
5833 	if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
5834 		return;
5835 
5836 	vlan = efx_ef10_filter_find_vlan(efx, vid);
5837 	if (!vlan) {
5838 		netif_err(efx, drv, efx->net_dev,
5839 			  "VLAN %u not found in filter state\n", vid);
5840 		return;
5841 	}
5842 
5843 	efx_ef10_filter_del_vlan_internal(efx, vlan);
5844 }
5845 
5846 static int efx_ef10_set_mac_address(struct efx_nic *efx)
5847 {
5848 	MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN);
5849 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
5850 	bool was_enabled = efx->port_enabled;
5851 	int rc;
5852 
5853 	efx_device_detach_sync(efx);
5854 	efx_net_stop(efx->net_dev);
5855 
5856 	mutex_lock(&efx->mac_lock);
5857 	down_write(&efx->filter_sem);
5858 	efx_ef10_filter_table_remove(efx);
5859 
5860 	ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR),
5861 			efx->net_dev->dev_addr);
5862 	MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID,
5863 		       nic_data->vport_id);
5864 	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf,
5865 				sizeof(inbuf), NULL, 0, NULL);
5866 
5867 	efx_ef10_filter_table_probe(efx);
5868 	up_write(&efx->filter_sem);
5869 	mutex_unlock(&efx->mac_lock);
5870 
5871 	if (was_enabled)
5872 		efx_net_open(efx->net_dev);
5873 	efx_device_attach_if_not_resetting(efx);
5874 
5875 #ifdef CONFIG_SFC_SRIOV
5876 	if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) {
5877 		struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
5878 
5879 		if (rc == -EPERM) {
5880 			struct efx_nic *efx_pf;
5881 
5882 			/* Switch to PF and change MAC address on vport */
5883 			efx_pf = pci_get_drvdata(pci_dev_pf);
5884 
5885 			rc = efx_ef10_sriov_set_vf_mac(efx_pf,
5886 						       nic_data->vf_index,
5887 						       efx->net_dev->dev_addr);
5888 		} else if (!rc) {
5889 			struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
5890 			struct efx_ef10_nic_data *nic_data = efx_pf->nic_data;
5891 			unsigned int i;
5892 
5893 			/* MAC address successfully changed by VF (with MAC
5894 			 * spoofing) so update the parent PF if possible.
5895 			 */
5896 			for (i = 0; i < efx_pf->vf_count; ++i) {
5897 				struct ef10_vf *vf = nic_data->vf + i;
5898 
5899 				if (vf->efx == efx) {
5900 					ether_addr_copy(vf->mac,
5901 							efx->net_dev->dev_addr);
5902 					return 0;
5903 				}
5904 			}
5905 		}
5906 	} else
5907 #endif
5908 	if (rc == -EPERM) {
5909 		netif_err(efx, drv, efx->net_dev,
5910 			  "Cannot change MAC address; use sfboot to enable"
5911 			  " mac-spoofing on this interface\n");
5912 	} else if (rc == -ENOSYS && !efx_ef10_is_vf(efx)) {
5913 		/* If the active MCFW does not support MC_CMD_VADAPTOR_SET_MAC
5914 		 * fall-back to the method of changing the MAC address on the
5915 		 * vport.  This only applies to PFs because such versions of
5916 		 * MCFW do not support VFs.
5917 		 */
5918 		rc = efx_ef10_vport_set_mac_address(efx);
5919 	} else if (rc) {
5920 		efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC,
5921 				       sizeof(inbuf), NULL, 0, rc);
5922 	}
5923 
5924 	return rc;
5925 }
5926 
5927 static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
5928 {
5929 	efx_ef10_filter_sync_rx_mode(efx);
5930 
5931 	return efx_mcdi_set_mac(efx);
5932 }
5933 
5934 static int efx_ef10_mac_reconfigure_vf(struct efx_nic *efx)
5935 {
5936 	efx_ef10_filter_sync_rx_mode(efx);
5937 
5938 	return 0;
5939 }
5940 
5941 static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type)
5942 {
5943 	MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
5944 
5945 	MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_type);
5946 	return efx_mcdi_rpc(efx, MC_CMD_START_BIST, inbuf, sizeof(inbuf),
5947 			    NULL, 0, NULL);
5948 }
5949 
5950 /* MC BISTs follow a different poll mechanism to phy BISTs.
5951  * The BIST is done in the poll handler on the MC, and the MCDI command
5952  * will block until the BIST is done.
5953  */
5954 static int efx_ef10_poll_bist(struct efx_nic *efx)
5955 {
5956 	int rc;
5957 	MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_LEN);
5958 	size_t outlen;
5959 	u32 result;
5960 
5961 	rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
5962 			   outbuf, sizeof(outbuf), &outlen);
5963 	if (rc != 0)
5964 		return rc;
5965 
5966 	if (outlen < MC_CMD_POLL_BIST_OUT_LEN)
5967 		return -EIO;
5968 
5969 	result = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT);
5970 	switch (result) {
5971 	case MC_CMD_POLL_BIST_PASSED:
5972 		netif_dbg(efx, hw, efx->net_dev, "BIST passed.\n");
5973 		return 0;
5974 	case MC_CMD_POLL_BIST_TIMEOUT:
5975 		netif_err(efx, hw, efx->net_dev, "BIST timed out\n");
5976 		return -EIO;
5977 	case MC_CMD_POLL_BIST_FAILED:
5978 		netif_err(efx, hw, efx->net_dev, "BIST failed.\n");
5979 		return -EIO;
5980 	default:
5981 		netif_err(efx, hw, efx->net_dev,
5982 			  "BIST returned unknown result %u", result);
5983 		return -EIO;
5984 	}
5985 }
5986 
5987 static int efx_ef10_run_bist(struct efx_nic *efx, u32 bist_type)
5988 {
5989 	int rc;
5990 
5991 	netif_dbg(efx, drv, efx->net_dev, "starting BIST type %u\n", bist_type);
5992 
5993 	rc = efx_ef10_start_bist(efx, bist_type);
5994 	if (rc != 0)
5995 		return rc;
5996 
5997 	return efx_ef10_poll_bist(efx);
5998 }
5999 
6000 static int
6001 efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
6002 {
6003 	int rc, rc2;
6004 
6005 	efx_reset_down(efx, RESET_TYPE_WORLD);
6006 
6007 	rc = efx_mcdi_rpc(efx, MC_CMD_ENABLE_OFFLINE_BIST,
6008 			  NULL, 0, NULL, 0, NULL);
6009 	if (rc != 0)
6010 		goto out;
6011 
6012 	tests->memory = efx_ef10_run_bist(efx, MC_CMD_MC_MEM_BIST) ? -1 : 1;
6013 	tests->registers = efx_ef10_run_bist(efx, MC_CMD_REG_BIST) ? -1 : 1;
6014 
6015 	rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD);
6016 
6017 out:
6018 	if (rc == -EPERM)
6019 		rc = 0;
6020 	rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0);
6021 	return rc ? rc : rc2;
6022 }
6023 
6024 #ifdef CONFIG_SFC_MTD
6025 
6026 struct efx_ef10_nvram_type_info {
6027 	u16 type, type_mask;
6028 	u8 port;
6029 	const char *name;
6030 };
6031 
6032 static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
6033 	{ NVRAM_PARTITION_TYPE_MC_FIRMWARE,	   0,    0, "sfc_mcfw" },
6034 	{ NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0,    0, "sfc_mcfw_backup" },
6035 	{ NVRAM_PARTITION_TYPE_EXPANSION_ROM,	   0,    0, "sfc_exp_rom" },
6036 	{ NVRAM_PARTITION_TYPE_STATIC_CONFIG,	   0,    0, "sfc_static_cfg" },
6037 	{ NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG,	   0,    0, "sfc_dynamic_cfg" },
6038 	{ NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0,   0, "sfc_exp_rom_cfg" },
6039 	{ NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0,   1, "sfc_exp_rom_cfg" },
6040 	{ NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0,   2, "sfc_exp_rom_cfg" },
6041 	{ NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0,   3, "sfc_exp_rom_cfg" },
6042 	{ NVRAM_PARTITION_TYPE_LICENSE,		   0,    0, "sfc_license" },
6043 	{ NVRAM_PARTITION_TYPE_PHY_MIN,		   0xff, 0, "sfc_phy_fw" },
6044 	{ NVRAM_PARTITION_TYPE_MUM_FIRMWARE,	   0,    0, "sfc_mumfw" },
6045 	{ NVRAM_PARTITION_TYPE_EXPANSION_UEFI,	   0,    0, "sfc_uefi" },
6046 	{ NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS, 0,    0, "sfc_dynamic_cfg_dflt" },
6047 	{ NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS, 0,    0, "sfc_exp_rom_cfg_dflt" },
6048 	{ NVRAM_PARTITION_TYPE_STATUS,		   0,    0, "sfc_status" },
6049 	{ NVRAM_PARTITION_TYPE_BUNDLE,		   0,    0, "sfc_bundle" },
6050 	{ NVRAM_PARTITION_TYPE_BUNDLE_METADATA,	   0,    0, "sfc_bundle_metadata" },
6051 };
6052 #define EF10_NVRAM_PARTITION_COUNT	ARRAY_SIZE(efx_ef10_nvram_types)
6053 
6054 static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
6055 					struct efx_mcdi_mtd_partition *part,
6056 					unsigned int type,
6057 					unsigned long *found)
6058 {
6059 	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
6060 	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX);
6061 	const struct efx_ef10_nvram_type_info *info;
6062 	size_t size, erase_size, outlen;
6063 	int type_idx = 0;
6064 	bool protected;
6065 	int rc;
6066 
6067 	for (type_idx = 0; ; type_idx++) {
6068 		if (type_idx == EF10_NVRAM_PARTITION_COUNT)
6069 			return -ENODEV;
6070 		info = efx_ef10_nvram_types + type_idx;
6071 		if ((type & ~info->type_mask) == info->type)
6072 			break;
6073 	}
6074 	if (info->port != efx_port_num(efx))
6075 		return -ENODEV;
6076 
6077 	rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
6078 	if (rc)
6079 		return rc;
6080 	if (protected &&
6081 	    (type != NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS &&
6082 	     type != NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS))
6083 		/* Hide protected partitions that don't provide defaults. */
6084 		return -ENODEV;
6085 
6086 	if (protected)
6087 		/* Protected partitions are read only. */
6088 		erase_size = 0;
6089 
6090 	/* If we've already exposed a partition of this type, hide this
6091 	 * duplicate.  All operations on MTDs are keyed by the type anyway,
6092 	 * so we can't act on the duplicate.
6093 	 */
6094 	if (__test_and_set_bit(type_idx, found))
6095 		return -EEXIST;
6096 
6097 	part->nvram_type = type;
6098 
6099 	MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type);
6100 	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_METADATA, inbuf, sizeof(inbuf),
6101 			  outbuf, sizeof(outbuf), &outlen);
6102 	if (rc)
6103 		return rc;
6104 	if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN)
6105 		return -EIO;
6106 	if (MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS) &
6107 	    (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN))
6108 		part->fw_subtype = MCDI_DWORD(outbuf,
6109 					      NVRAM_METADATA_OUT_SUBTYPE);
6110 
6111 	part->common.dev_type_name = "EF10 NVRAM manager";
6112 	part->common.type_name = info->name;
6113 
6114 	part->common.mtd.type = MTD_NORFLASH;
6115 	part->common.mtd.flags = MTD_CAP_NORFLASH;
6116 	part->common.mtd.size = size;
6117 	part->common.mtd.erasesize = erase_size;
6118 	/* sfc_status is read-only */
6119 	if (!erase_size)
6120 		part->common.mtd.flags |= MTD_NO_ERASE;
6121 
6122 	return 0;
6123 }
6124 
6125 static int efx_ef10_mtd_probe(struct efx_nic *efx)
6126 {
6127 	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX);
6128 	DECLARE_BITMAP(found, EF10_NVRAM_PARTITION_COUNT) = { 0 };
6129 	struct efx_mcdi_mtd_partition *parts;
6130 	size_t outlen, n_parts_total, i, n_parts;
6131 	unsigned int type;
6132 	int rc;
6133 
6134 	ASSERT_RTNL();
6135 
6136 	BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0);
6137 	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0,
6138 			  outbuf, sizeof(outbuf), &outlen);
6139 	if (rc)
6140 		return rc;
6141 	if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN)
6142 		return -EIO;
6143 
6144 	n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS);
6145 	if (n_parts_total >
6146 	    MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID))
6147 		return -EIO;
6148 
6149 	parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL);
6150 	if (!parts)
6151 		return -ENOMEM;
6152 
6153 	n_parts = 0;
6154 	for (i = 0; i < n_parts_total; i++) {
6155 		type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID,
6156 					i);
6157 		rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type,
6158 						  found);
6159 		if (rc == -EEXIST || rc == -ENODEV)
6160 			continue;
6161 		if (rc)
6162 			goto fail;
6163 		n_parts++;
6164 	}
6165 
6166 	rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
6167 fail:
6168 	if (rc)
6169 		kfree(parts);
6170 	return rc;
6171 }
6172 
6173 #endif /* CONFIG_SFC_MTD */
6174 
6175 static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
6176 {
6177 	_efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD);
6178 }
6179 
6180 static void efx_ef10_ptp_write_host_time_vf(struct efx_nic *efx,
6181 					    u32 host_time) {}
6182 
6183 static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel,
6184 					   bool temp)
6185 {
6186 	MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN);
6187 	int rc;
6188 
6189 	if (channel->sync_events_state == SYNC_EVENTS_REQUESTED ||
6190 	    channel->sync_events_state == SYNC_EVENTS_VALID ||
6191 	    (temp && channel->sync_events_state == SYNC_EVENTS_DISABLED))
6192 		return 0;
6193 	channel->sync_events_state = SYNC_EVENTS_REQUESTED;
6194 
6195 	MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE);
6196 	MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
6197 	MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE,
6198 		       channel->channel);
6199 
6200 	rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
6201 			  inbuf, sizeof(inbuf), NULL, 0, NULL);
6202 
6203 	if (rc != 0)
6204 		channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
6205 						    SYNC_EVENTS_DISABLED;
6206 
6207 	return rc;
6208 }
6209 
6210 static int efx_ef10_rx_disable_timestamping(struct efx_channel *channel,
6211 					    bool temp)
6212 {
6213 	MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN);
6214 	int rc;
6215 
6216 	if (channel->sync_events_state == SYNC_EVENTS_DISABLED ||
6217 	    (temp && channel->sync_events_state == SYNC_EVENTS_QUIESCENT))
6218 		return 0;
6219 	if (channel->sync_events_state == SYNC_EVENTS_QUIESCENT) {
6220 		channel->sync_events_state = SYNC_EVENTS_DISABLED;
6221 		return 0;
6222 	}
6223 	channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
6224 					    SYNC_EVENTS_DISABLED;
6225 
6226 	MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE);
6227 	MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
6228 	MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL,
6229 		       MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE);
6230 	MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE,
6231 		       channel->channel);
6232 
6233 	rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
6234 			  inbuf, sizeof(inbuf), NULL, 0, NULL);
6235 
6236 	return rc;
6237 }
6238 
6239 static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en,
6240 					   bool temp)
6241 {
6242 	int (*set)(struct efx_channel *channel, bool temp);
6243 	struct efx_channel *channel;
6244 
6245 	set = en ?
6246 	      efx_ef10_rx_enable_timestamping :
6247 	      efx_ef10_rx_disable_timestamping;
6248 
6249 	channel = efx_ptp_channel(efx);
6250 	if (channel) {
6251 		int rc = set(channel, temp);
6252 		if (en && rc != 0) {
6253 			efx_ef10_ptp_set_ts_sync_events(efx, false, temp);
6254 			return rc;
6255 		}
6256 	}
6257 
6258 	return 0;
6259 }
6260 
6261 static int efx_ef10_ptp_set_ts_config_vf(struct efx_nic *efx,
6262 					 struct hwtstamp_config *init)
6263 {
6264 	return -EOPNOTSUPP;
6265 }
6266 
6267 static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
6268 				      struct hwtstamp_config *init)
6269 {
6270 	int rc;
6271 
6272 	switch (init->rx_filter) {
6273 	case HWTSTAMP_FILTER_NONE:
6274 		efx_ef10_ptp_set_ts_sync_events(efx, false, false);
6275 		/* if TX timestamping is still requested then leave PTP on */
6276 		return efx_ptp_change_mode(efx,
6277 					   init->tx_type != HWTSTAMP_TX_OFF, 0);
6278 	case HWTSTAMP_FILTER_ALL:
6279 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
6280 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
6281 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
6282 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
6283 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
6284 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
6285 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
6286 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
6287 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
6288 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
6289 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
6290 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
6291 	case HWTSTAMP_FILTER_NTP_ALL:
6292 		init->rx_filter = HWTSTAMP_FILTER_ALL;
6293 		rc = efx_ptp_change_mode(efx, true, 0);
6294 		if (!rc)
6295 			rc = efx_ef10_ptp_set_ts_sync_events(efx, true, false);
6296 		if (rc)
6297 			efx_ptp_change_mode(efx, false, 0);
6298 		return rc;
6299 	default:
6300 		return -ERANGE;
6301 	}
6302 }
6303 
6304 static int efx_ef10_get_phys_port_id(struct efx_nic *efx,
6305 				     struct netdev_phys_item_id *ppid)
6306 {
6307 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
6308 
6309 	if (!is_valid_ether_addr(nic_data->port_id))
6310 		return -EOPNOTSUPP;
6311 
6312 	ppid->id_len = ETH_ALEN;
6313 	memcpy(ppid->id, nic_data->port_id, ppid->id_len);
6314 
6315 	return 0;
6316 }
6317 
6318 static int efx_ef10_vlan_rx_add_vid(struct efx_nic *efx, __be16 proto, u16 vid)
6319 {
6320 	if (proto != htons(ETH_P_8021Q))
6321 		return -EINVAL;
6322 
6323 	return efx_ef10_add_vlan(efx, vid);
6324 }
6325 
6326 static int efx_ef10_vlan_rx_kill_vid(struct efx_nic *efx, __be16 proto, u16 vid)
6327 {
6328 	if (proto != htons(ETH_P_8021Q))
6329 		return -EINVAL;
6330 
6331 	return efx_ef10_del_vlan(efx, vid);
6332 }
6333 
6334 /* We rely on the MCDI wiping out our TX rings if it made any changes to the
6335  * ports table, ensuring that any TSO descriptors that were made on a now-
6336  * removed tunnel port will be blown away and won't break things when we try
6337  * to transmit them using the new ports table.
6338  */
6339 static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading)
6340 {
6341 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
6342 	MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX);
6343 	MCDI_DECLARE_BUF(outbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN);
6344 	bool will_reset = false;
6345 	size_t num_entries = 0;
6346 	size_t inlen, outlen;
6347 	size_t i;
6348 	int rc;
6349 	efx_dword_t flags_and_num_entries;
6350 
6351 	WARN_ON(!mutex_is_locked(&nic_data->udp_tunnels_lock));
6352 
6353 	nic_data->udp_tunnels_dirty = false;
6354 
6355 	if (!(nic_data->datapath_caps &
6356 	    (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) {
6357 		efx_device_attach_if_not_resetting(efx);
6358 		return 0;
6359 	}
6360 
6361 	BUILD_BUG_ON(ARRAY_SIZE(nic_data->udp_tunnels) >
6362 		     MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM);
6363 
6364 	for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) {
6365 		if (nic_data->udp_tunnels[i].count &&
6366 		    nic_data->udp_tunnels[i].port) {
6367 			efx_dword_t entry;
6368 
6369 			EFX_POPULATE_DWORD_2(entry,
6370 				TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT,
6371 					ntohs(nic_data->udp_tunnels[i].port),
6372 				TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL,
6373 					nic_data->udp_tunnels[i].type);
6374 			*_MCDI_ARRAY_DWORD(inbuf,
6375 				SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES,
6376 				num_entries++) = entry;
6377 		}
6378 	}
6379 
6380 	BUILD_BUG_ON((MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_OFST -
6381 		      MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST) * 8 !=
6382 		     EFX_WORD_1_LBN);
6383 	BUILD_BUG_ON(MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_LEN * 8 !=
6384 		     EFX_WORD_1_WIDTH);
6385 	EFX_POPULATE_DWORD_2(flags_and_num_entries,
6386 			     MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING,
6387 				!!unloading,
6388 			     EFX_WORD_1, num_entries);
6389 	*_MCDI_DWORD(inbuf, SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS) =
6390 		flags_and_num_entries;
6391 
6392 	inlen = MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num_entries);
6393 
6394 	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS,
6395 				inbuf, inlen, outbuf, sizeof(outbuf), &outlen);
6396 	if (rc == -EIO) {
6397 		/* Most likely the MC rebooted due to another function also
6398 		 * setting its tunnel port list. Mark the tunnel port list as
6399 		 * dirty, so it will be pushed upon coming up from the reboot.
6400 		 */
6401 		nic_data->udp_tunnels_dirty = true;
6402 		return 0;
6403 	}
6404 
6405 	if (rc) {
6406 		/* expected not available on unprivileged functions */
6407 		if (rc != -EPERM)
6408 			netif_warn(efx, drv, efx->net_dev,
6409 				   "Unable to set UDP tunnel ports; rc=%d.\n", rc);
6410 	} else if (MCDI_DWORD(outbuf, SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS) &
6411 		   (1 << MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN)) {
6412 		netif_info(efx, drv, efx->net_dev,
6413 			   "Rebooting MC due to UDP tunnel port list change\n");
6414 		will_reset = true;
6415 		if (unloading)
6416 			/* Delay for the MC reset to complete. This will make
6417 			 * unloading other functions a bit smoother. This is a
6418 			 * race, but the other unload will work whichever way
6419 			 * it goes, this just avoids an unnecessary error
6420 			 * message.
6421 			 */
6422 			msleep(100);
6423 	}
6424 	if (!will_reset && !unloading) {
6425 		/* The caller will have detached, relying on the MC reset to
6426 		 * trigger a re-attach.  Since there won't be an MC reset, we
6427 		 * have to do the attach ourselves.
6428 		 */
6429 		efx_device_attach_if_not_resetting(efx);
6430 	}
6431 
6432 	return rc;
6433 }
6434 
6435 static int efx_ef10_udp_tnl_push_ports(struct efx_nic *efx)
6436 {
6437 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
6438 	int rc = 0;
6439 
6440 	mutex_lock(&nic_data->udp_tunnels_lock);
6441 	if (nic_data->udp_tunnels_dirty) {
6442 		/* Make sure all TX are stopped while we modify the table, else
6443 		 * we might race against an efx_features_check().
6444 		 */
6445 		efx_device_detach_sync(efx);
6446 		rc = efx_ef10_set_udp_tnl_ports(efx, false);
6447 	}
6448 	mutex_unlock(&nic_data->udp_tunnels_lock);
6449 	return rc;
6450 }
6451 
6452 static struct efx_udp_tunnel *__efx_ef10_udp_tnl_lookup_port(struct efx_nic *efx,
6453 							     __be16 port)
6454 {
6455 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
6456 	size_t i;
6457 
6458 	for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) {
6459 		if (!nic_data->udp_tunnels[i].count)
6460 			continue;
6461 		if (nic_data->udp_tunnels[i].port == port)
6462 			return &nic_data->udp_tunnels[i];
6463 	}
6464 	return NULL;
6465 }
6466 
6467 static int efx_ef10_udp_tnl_add_port(struct efx_nic *efx,
6468 				     struct efx_udp_tunnel tnl)
6469 {
6470 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
6471 	struct efx_udp_tunnel *match;
6472 	char typebuf[8];
6473 	size_t i;
6474 	int rc;
6475 
6476 	if (!(nic_data->datapath_caps &
6477 	      (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)))
6478 		return 0;
6479 
6480 	efx_get_udp_tunnel_type_name(tnl.type, typebuf, sizeof(typebuf));
6481 	netif_dbg(efx, drv, efx->net_dev, "Adding UDP tunnel (%s) port %d\n",
6482 		  typebuf, ntohs(tnl.port));
6483 
6484 	mutex_lock(&nic_data->udp_tunnels_lock);
6485 	/* Make sure all TX are stopped while we add to the table, else we
6486 	 * might race against an efx_features_check().
6487 	 */
6488 	efx_device_detach_sync(efx);
6489 
6490 	match = __efx_ef10_udp_tnl_lookup_port(efx, tnl.port);
6491 	if (match != NULL) {
6492 		if (match->type == tnl.type) {
6493 			netif_dbg(efx, drv, efx->net_dev,
6494 				  "Referencing existing tunnel entry\n");
6495 			match->count++;
6496 			/* No need to cause an MCDI update */
6497 			rc = 0;
6498 			goto unlock_out;
6499 		}
6500 		efx_get_udp_tunnel_type_name(match->type,
6501 					     typebuf, sizeof(typebuf));
6502 		netif_dbg(efx, drv, efx->net_dev,
6503 			  "UDP port %d is already in use by %s\n",
6504 			  ntohs(tnl.port), typebuf);
6505 		rc = -EEXIST;
6506 		goto unlock_out;
6507 	}
6508 
6509 	for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i)
6510 		if (!nic_data->udp_tunnels[i].count) {
6511 			nic_data->udp_tunnels[i] = tnl;
6512 			nic_data->udp_tunnels[i].count = 1;
6513 			rc = efx_ef10_set_udp_tnl_ports(efx, false);
6514 			goto unlock_out;
6515 		}
6516 
6517 	netif_dbg(efx, drv, efx->net_dev,
6518 		  "Unable to add UDP tunnel (%s) port %d; insufficient resources.\n",
6519 		  typebuf, ntohs(tnl.port));
6520 
6521 	rc = -ENOMEM;
6522 
6523 unlock_out:
6524 	mutex_unlock(&nic_data->udp_tunnels_lock);
6525 	return rc;
6526 }
6527 
6528 /* Called under the TX lock with the TX queue running, hence no-one can be
6529  * in the middle of updating the UDP tunnels table.  However, they could
6530  * have tried and failed the MCDI, in which case they'll have set the dirty
6531  * flag before dropping their locks.
6532  */
6533 static bool efx_ef10_udp_tnl_has_port(struct efx_nic *efx, __be16 port)
6534 {
6535 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
6536 
6537 	if (!(nic_data->datapath_caps &
6538 	      (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)))
6539 		return false;
6540 
6541 	if (nic_data->udp_tunnels_dirty)
6542 		/* SW table may not match HW state, so just assume we can't
6543 		 * use any UDP tunnel offloads.
6544 		 */
6545 		return false;
6546 
6547 	return __efx_ef10_udp_tnl_lookup_port(efx, port) != NULL;
6548 }
6549 
6550 static int efx_ef10_udp_tnl_del_port(struct efx_nic *efx,
6551 				     struct efx_udp_tunnel tnl)
6552 {
6553 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
6554 	struct efx_udp_tunnel *match;
6555 	char typebuf[8];
6556 	int rc;
6557 
6558 	if (!(nic_data->datapath_caps &
6559 	      (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)))
6560 		return 0;
6561 
6562 	efx_get_udp_tunnel_type_name(tnl.type, typebuf, sizeof(typebuf));
6563 	netif_dbg(efx, drv, efx->net_dev, "Removing UDP tunnel (%s) port %d\n",
6564 		  typebuf, ntohs(tnl.port));
6565 
6566 	mutex_lock(&nic_data->udp_tunnels_lock);
6567 	/* Make sure all TX are stopped while we remove from the table, else we
6568 	 * might race against an efx_features_check().
6569 	 */
6570 	efx_device_detach_sync(efx);
6571 
6572 	match = __efx_ef10_udp_tnl_lookup_port(efx, tnl.port);
6573 	if (match != NULL) {
6574 		if (match->type == tnl.type) {
6575 			if (--match->count) {
6576 				/* Port is still in use, so nothing to do */
6577 				netif_dbg(efx, drv, efx->net_dev,
6578 					  "UDP tunnel port %d remains active\n",
6579 					  ntohs(tnl.port));
6580 				rc = 0;
6581 				goto out_unlock;
6582 			}
6583 			rc = efx_ef10_set_udp_tnl_ports(efx, false);
6584 			goto out_unlock;
6585 		}
6586 		efx_get_udp_tunnel_type_name(match->type,
6587 					     typebuf, sizeof(typebuf));
6588 		netif_warn(efx, drv, efx->net_dev,
6589 			   "UDP port %d is actually in use by %s, not removing\n",
6590 			   ntohs(tnl.port), typebuf);
6591 	}
6592 	rc = -ENOENT;
6593 
6594 out_unlock:
6595 	mutex_unlock(&nic_data->udp_tunnels_lock);
6596 	return rc;
6597 }
6598 
6599 #define EF10_OFFLOAD_FEATURES		\
6600 	(NETIF_F_IP_CSUM |		\
6601 	 NETIF_F_HW_VLAN_CTAG_FILTER |	\
6602 	 NETIF_F_IPV6_CSUM |		\
6603 	 NETIF_F_RXHASH |		\
6604 	 NETIF_F_NTUPLE)
6605 
6606 const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
6607 	.is_vf = true,
6608 	.mem_bar = efx_ef10_vf_mem_bar,
6609 	.mem_map_size = efx_ef10_mem_map_size,
6610 	.probe = efx_ef10_probe_vf,
6611 	.remove = efx_ef10_remove,
6612 	.dimension_resources = efx_ef10_dimension_resources,
6613 	.init = efx_ef10_init_nic,
6614 	.fini = efx_port_dummy_op_void,
6615 	.map_reset_reason = efx_ef10_map_reset_reason,
6616 	.map_reset_flags = efx_ef10_map_reset_flags,
6617 	.reset = efx_ef10_reset,
6618 	.probe_port = efx_mcdi_port_probe,
6619 	.remove_port = efx_mcdi_port_remove,
6620 	.fini_dmaq = efx_ef10_fini_dmaq,
6621 	.prepare_flr = efx_ef10_prepare_flr,
6622 	.finish_flr = efx_port_dummy_op_void,
6623 	.describe_stats = efx_ef10_describe_stats,
6624 	.update_stats = efx_ef10_update_stats_vf,
6625 	.start_stats = efx_port_dummy_op_void,
6626 	.pull_stats = efx_port_dummy_op_void,
6627 	.stop_stats = efx_port_dummy_op_void,
6628 	.set_id_led = efx_mcdi_set_id_led,
6629 	.push_irq_moderation = efx_ef10_push_irq_moderation,
6630 	.reconfigure_mac = efx_ef10_mac_reconfigure_vf,
6631 	.check_mac_fault = efx_mcdi_mac_check_fault,
6632 	.reconfigure_port = efx_mcdi_port_reconfigure,
6633 	.get_wol = efx_ef10_get_wol_vf,
6634 	.set_wol = efx_ef10_set_wol_vf,
6635 	.resume_wol = efx_port_dummy_op_void,
6636 	.mcdi_request = efx_ef10_mcdi_request,
6637 	.mcdi_poll_response = efx_ef10_mcdi_poll_response,
6638 	.mcdi_read_response = efx_ef10_mcdi_read_response,
6639 	.mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
6640 	.mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected,
6641 	.irq_enable_master = efx_port_dummy_op_void,
6642 	.irq_test_generate = efx_ef10_irq_test_generate,
6643 	.irq_disable_non_ev = efx_port_dummy_op_void,
6644 	.irq_handle_msi = efx_ef10_msi_interrupt,
6645 	.irq_handle_legacy = efx_ef10_legacy_interrupt,
6646 	.tx_probe = efx_ef10_tx_probe,
6647 	.tx_init = efx_ef10_tx_init,
6648 	.tx_remove = efx_ef10_tx_remove,
6649 	.tx_write = efx_ef10_tx_write,
6650 	.tx_limit_len = efx_ef10_tx_limit_len,
6651 	.rx_push_rss_config = efx_ef10_vf_rx_push_rss_config,
6652 	.rx_pull_rss_config = efx_ef10_rx_pull_rss_config,
6653 	.rx_probe = efx_ef10_rx_probe,
6654 	.rx_init = efx_ef10_rx_init,
6655 	.rx_remove = efx_ef10_rx_remove,
6656 	.rx_write = efx_ef10_rx_write,
6657 	.rx_defer_refill = efx_ef10_rx_defer_refill,
6658 	.ev_probe = efx_ef10_ev_probe,
6659 	.ev_init = efx_ef10_ev_init,
6660 	.ev_fini = efx_ef10_ev_fini,
6661 	.ev_remove = efx_ef10_ev_remove,
6662 	.ev_process = efx_ef10_ev_process,
6663 	.ev_read_ack = efx_ef10_ev_read_ack,
6664 	.ev_test_generate = efx_ef10_ev_test_generate,
6665 	.filter_table_probe = efx_ef10_filter_table_probe,
6666 	.filter_table_restore = efx_ef10_filter_table_restore,
6667 	.filter_table_remove = efx_ef10_filter_table_remove,
6668 	.filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
6669 	.filter_insert = efx_ef10_filter_insert,
6670 	.filter_remove_safe = efx_ef10_filter_remove_safe,
6671 	.filter_get_safe = efx_ef10_filter_get_safe,
6672 	.filter_clear_rx = efx_ef10_filter_clear_rx,
6673 	.filter_count_rx_used = efx_ef10_filter_count_rx_used,
6674 	.filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
6675 	.filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
6676 #ifdef CONFIG_RFS_ACCEL
6677 	.filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
6678 #endif
6679 #ifdef CONFIG_SFC_MTD
6680 	.mtd_probe = efx_port_dummy_op_int,
6681 #endif
6682 	.ptp_write_host_time = efx_ef10_ptp_write_host_time_vf,
6683 	.ptp_set_ts_config = efx_ef10_ptp_set_ts_config_vf,
6684 	.vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid,
6685 	.vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid,
6686 #ifdef CONFIG_SFC_SRIOV
6687 	.vswitching_probe = efx_ef10_vswitching_probe_vf,
6688 	.vswitching_restore = efx_ef10_vswitching_restore_vf,
6689 	.vswitching_remove = efx_ef10_vswitching_remove_vf,
6690 #endif
6691 	.get_mac_address = efx_ef10_get_mac_address_vf,
6692 	.set_mac_address = efx_ef10_set_mac_address,
6693 
6694 	.get_phys_port_id = efx_ef10_get_phys_port_id,
6695 	.revision = EFX_REV_HUNT_A0,
6696 	.max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
6697 	.rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
6698 	.rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
6699 	.rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
6700 	.can_rx_scatter = true,
6701 	.always_rx_scatter = true,
6702 	.min_interrupt_mode = EFX_INT_MODE_MSIX,
6703 	.max_interrupt_mode = EFX_INT_MODE_MSIX,
6704 	.timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
6705 	.offload_features = EF10_OFFLOAD_FEATURES,
6706 	.mcdi_max_ver = 2,
6707 	.max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
6708 	.hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
6709 			    1 << HWTSTAMP_FILTER_ALL,
6710 	.rx_hash_key_size = 40,
6711 };
6712 
6713 const struct efx_nic_type efx_hunt_a0_nic_type = {
6714 	.is_vf = false,
6715 	.mem_bar = efx_ef10_pf_mem_bar,
6716 	.mem_map_size = efx_ef10_mem_map_size,
6717 	.probe = efx_ef10_probe_pf,
6718 	.remove = efx_ef10_remove,
6719 	.dimension_resources = efx_ef10_dimension_resources,
6720 	.init = efx_ef10_init_nic,
6721 	.fini = efx_port_dummy_op_void,
6722 	.map_reset_reason = efx_ef10_map_reset_reason,
6723 	.map_reset_flags = efx_ef10_map_reset_flags,
6724 	.reset = efx_ef10_reset,
6725 	.probe_port = efx_mcdi_port_probe,
6726 	.remove_port = efx_mcdi_port_remove,
6727 	.fini_dmaq = efx_ef10_fini_dmaq,
6728 	.prepare_flr = efx_ef10_prepare_flr,
6729 	.finish_flr = efx_port_dummy_op_void,
6730 	.describe_stats = efx_ef10_describe_stats,
6731 	.update_stats = efx_ef10_update_stats_pf,
6732 	.start_stats = efx_mcdi_mac_start_stats,
6733 	.pull_stats = efx_mcdi_mac_pull_stats,
6734 	.stop_stats = efx_mcdi_mac_stop_stats,
6735 	.set_id_led = efx_mcdi_set_id_led,
6736 	.push_irq_moderation = efx_ef10_push_irq_moderation,
6737 	.reconfigure_mac = efx_ef10_mac_reconfigure,
6738 	.check_mac_fault = efx_mcdi_mac_check_fault,
6739 	.reconfigure_port = efx_mcdi_port_reconfigure,
6740 	.get_wol = efx_ef10_get_wol,
6741 	.set_wol = efx_ef10_set_wol,
6742 	.resume_wol = efx_port_dummy_op_void,
6743 	.test_chip = efx_ef10_test_chip,
6744 	.test_nvram = efx_mcdi_nvram_test_all,
6745 	.mcdi_request = efx_ef10_mcdi_request,
6746 	.mcdi_poll_response = efx_ef10_mcdi_poll_response,
6747 	.mcdi_read_response = efx_ef10_mcdi_read_response,
6748 	.mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
6749 	.mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected,
6750 	.irq_enable_master = efx_port_dummy_op_void,
6751 	.irq_test_generate = efx_ef10_irq_test_generate,
6752 	.irq_disable_non_ev = efx_port_dummy_op_void,
6753 	.irq_handle_msi = efx_ef10_msi_interrupt,
6754 	.irq_handle_legacy = efx_ef10_legacy_interrupt,
6755 	.tx_probe = efx_ef10_tx_probe,
6756 	.tx_init = efx_ef10_tx_init,
6757 	.tx_remove = efx_ef10_tx_remove,
6758 	.tx_write = efx_ef10_tx_write,
6759 	.tx_limit_len = efx_ef10_tx_limit_len,
6760 	.rx_push_rss_config = efx_ef10_pf_rx_push_rss_config,
6761 	.rx_pull_rss_config = efx_ef10_rx_pull_rss_config,
6762 	.rx_push_rss_context_config = efx_ef10_rx_push_rss_context_config,
6763 	.rx_pull_rss_context_config = efx_ef10_rx_pull_rss_context_config,
6764 	.rx_restore_rss_contexts = efx_ef10_rx_restore_rss_contexts,
6765 	.rx_probe = efx_ef10_rx_probe,
6766 	.rx_init = efx_ef10_rx_init,
6767 	.rx_remove = efx_ef10_rx_remove,
6768 	.rx_write = efx_ef10_rx_write,
6769 	.rx_defer_refill = efx_ef10_rx_defer_refill,
6770 	.ev_probe = efx_ef10_ev_probe,
6771 	.ev_init = efx_ef10_ev_init,
6772 	.ev_fini = efx_ef10_ev_fini,
6773 	.ev_remove = efx_ef10_ev_remove,
6774 	.ev_process = efx_ef10_ev_process,
6775 	.ev_read_ack = efx_ef10_ev_read_ack,
6776 	.ev_test_generate = efx_ef10_ev_test_generate,
6777 	.filter_table_probe = efx_ef10_filter_table_probe,
6778 	.filter_table_restore = efx_ef10_filter_table_restore,
6779 	.filter_table_remove = efx_ef10_filter_table_remove,
6780 	.filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
6781 	.filter_insert = efx_ef10_filter_insert,
6782 	.filter_remove_safe = efx_ef10_filter_remove_safe,
6783 	.filter_get_safe = efx_ef10_filter_get_safe,
6784 	.filter_clear_rx = efx_ef10_filter_clear_rx,
6785 	.filter_count_rx_used = efx_ef10_filter_count_rx_used,
6786 	.filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
6787 	.filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
6788 #ifdef CONFIG_RFS_ACCEL
6789 	.filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
6790 #endif
6791 #ifdef CONFIG_SFC_MTD
6792 	.mtd_probe = efx_ef10_mtd_probe,
6793 	.mtd_rename = efx_mcdi_mtd_rename,
6794 	.mtd_read = efx_mcdi_mtd_read,
6795 	.mtd_erase = efx_mcdi_mtd_erase,
6796 	.mtd_write = efx_mcdi_mtd_write,
6797 	.mtd_sync = efx_mcdi_mtd_sync,
6798 #endif
6799 	.ptp_write_host_time = efx_ef10_ptp_write_host_time,
6800 	.ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events,
6801 	.ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
6802 	.vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid,
6803 	.vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid,
6804 	.udp_tnl_push_ports = efx_ef10_udp_tnl_push_ports,
6805 	.udp_tnl_add_port = efx_ef10_udp_tnl_add_port,
6806 	.udp_tnl_has_port = efx_ef10_udp_tnl_has_port,
6807 	.udp_tnl_del_port = efx_ef10_udp_tnl_del_port,
6808 #ifdef CONFIG_SFC_SRIOV
6809 	.sriov_configure = efx_ef10_sriov_configure,
6810 	.sriov_init = efx_ef10_sriov_init,
6811 	.sriov_fini = efx_ef10_sriov_fini,
6812 	.sriov_wanted = efx_ef10_sriov_wanted,
6813 	.sriov_reset = efx_ef10_sriov_reset,
6814 	.sriov_flr = efx_ef10_sriov_flr,
6815 	.sriov_set_vf_mac = efx_ef10_sriov_set_vf_mac,
6816 	.sriov_set_vf_vlan = efx_ef10_sriov_set_vf_vlan,
6817 	.sriov_set_vf_spoofchk = efx_ef10_sriov_set_vf_spoofchk,
6818 	.sriov_get_vf_config = efx_ef10_sriov_get_vf_config,
6819 	.sriov_set_vf_link_state = efx_ef10_sriov_set_vf_link_state,
6820 	.vswitching_probe = efx_ef10_vswitching_probe_pf,
6821 	.vswitching_restore = efx_ef10_vswitching_restore_pf,
6822 	.vswitching_remove = efx_ef10_vswitching_remove_pf,
6823 #endif
6824 	.get_mac_address = efx_ef10_get_mac_address_pf,
6825 	.set_mac_address = efx_ef10_set_mac_address,
6826 	.tso_versions = efx_ef10_tso_versions,
6827 
6828 	.get_phys_port_id = efx_ef10_get_phys_port_id,
6829 	.revision = EFX_REV_HUNT_A0,
6830 	.max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
6831 	.rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
6832 	.rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
6833 	.rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
6834 	.can_rx_scatter = true,
6835 	.always_rx_scatter = true,
6836 	.option_descriptors = true,
6837 	.min_interrupt_mode = EFX_INT_MODE_LEGACY,
6838 	.max_interrupt_mode = EFX_INT_MODE_MSIX,
6839 	.timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
6840 	.offload_features = EF10_OFFLOAD_FEATURES,
6841 	.mcdi_max_ver = 2,
6842 	.max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
6843 	.hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
6844 			    1 << HWTSTAMP_FILTER_ALL,
6845 	.rx_hash_key_size = 40,
6846 };
6847