xref: /linux/drivers/net/ethernet/sfc/falcon/farch.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3  * Driver for Solarflare network controllers and boards
4  * Copyright 2005-2006 Fen Systems Ltd.
5  * Copyright 2006-2013 Solarflare Communications Inc.
6  */
7 
8 #include <linux/bitops.h>
9 #include <linux/delay.h>
10 #include <linux/interrupt.h>
11 #include <linux/pci.h>
12 #include <linux/module.h>
13 #include <linux/seq_file.h>
14 #include <linux/crc32.h>
15 #include "net_driver.h"
16 #include "bitfield.h"
17 #include "efx.h"
18 #include "nic.h"
19 #include "farch_regs.h"
20 #include "io.h"
21 #include "workarounds.h"
22 
23 /* Falcon-architecture (SFC4000) support */
24 
25 /**************************************************************************
26  *
27  * Configurable values
28  *
29  **************************************************************************
30  */
31 
32 /* This is set to 16 for a good reason.  In summary, if larger than
33  * 16, the descriptor cache holds more than a default socket
34  * buffer's worth of packets (for UDP we can only have at most one
35  * socket buffer's worth outstanding).  This combined with the fact
36  * that we only get 1 TX event per descriptor cache means the NIC
37  * goes idle.
38  */
39 #define TX_DC_ENTRIES 16
40 #define TX_DC_ENTRIES_ORDER 1
41 
42 #define RX_DC_ENTRIES 64
43 #define RX_DC_ENTRIES_ORDER 3
44 
45 /* If EF4_MAX_INT_ERRORS internal errors occur within
46  * EF4_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
47  * disable it.
48  */
49 #define EF4_INT_ERROR_EXPIRE 3600
50 #define EF4_MAX_INT_ERRORS 5
51 
52 /* Depth of RX flush request fifo */
53 #define EF4_RX_FLUSH_COUNT 4
54 
55 /* Driver generated events */
56 #define _EF4_CHANNEL_MAGIC_TEST		0x000101
57 #define _EF4_CHANNEL_MAGIC_FILL		0x000102
58 #define _EF4_CHANNEL_MAGIC_RX_DRAIN	0x000103
59 #define _EF4_CHANNEL_MAGIC_TX_DRAIN	0x000104
60 
61 #define _EF4_CHANNEL_MAGIC(_code, _data)	((_code) << 8 | (_data))
62 #define _EF4_CHANNEL_MAGIC_CODE(_magic)		((_magic) >> 8)
63 
64 #define EF4_CHANNEL_MAGIC_TEST(_channel)				\
65 	_EF4_CHANNEL_MAGIC(_EF4_CHANNEL_MAGIC_TEST, (_channel)->channel)
66 #define EF4_CHANNEL_MAGIC_FILL(_rx_queue)				\
67 	_EF4_CHANNEL_MAGIC(_EF4_CHANNEL_MAGIC_FILL,			\
68 			   ef4_rx_queue_index(_rx_queue))
69 #define EF4_CHANNEL_MAGIC_RX_DRAIN(_rx_queue)				\
70 	_EF4_CHANNEL_MAGIC(_EF4_CHANNEL_MAGIC_RX_DRAIN,			\
71 			   ef4_rx_queue_index(_rx_queue))
72 #define EF4_CHANNEL_MAGIC_TX_DRAIN(_tx_queue)				\
73 	_EF4_CHANNEL_MAGIC(_EF4_CHANNEL_MAGIC_TX_DRAIN,			\
74 			   (_tx_queue)->queue)
75 
76 static void ef4_farch_magic_event(struct ef4_channel *channel, u32 magic);
77 
78 /**************************************************************************
79  *
80  * Hardware access
81  *
82  **************************************************************************/
83 
84 static inline void ef4_write_buf_tbl(struct ef4_nic *efx, ef4_qword_t *value,
85 				     unsigned int index)
86 {
87 	ef4_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
88 			value, index);
89 }
90 
91 static bool ef4_masked_compare_oword(const ef4_oword_t *a, const ef4_oword_t *b,
92 				     const ef4_oword_t *mask)
93 {
94 	return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
95 		((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
96 }
97 
98 int ef4_farch_test_registers(struct ef4_nic *efx,
99 			     const struct ef4_farch_register_test *regs,
100 			     size_t n_regs)
101 {
102 	unsigned address = 0;
103 	int i, j;
104 	ef4_oword_t mask, imask, original, reg, buf;
105 
106 	for (i = 0; i < n_regs; ++i) {
107 		address = regs[i].address;
108 		mask = imask = regs[i].mask;
109 		EF4_INVERT_OWORD(imask);
110 
111 		ef4_reado(efx, &original, address);
112 
113 		/* bit sweep on and off */
114 		for (j = 0; j < 128; j++) {
115 			if (!EF4_EXTRACT_OWORD32(mask, j, j))
116 				continue;
117 
118 			/* Test this testable bit can be set in isolation */
119 			EF4_AND_OWORD(reg, original, mask);
120 			EF4_SET_OWORD32(reg, j, j, 1);
121 
122 			ef4_writeo(efx, &reg, address);
123 			ef4_reado(efx, &buf, address);
124 
125 			if (ef4_masked_compare_oword(&reg, &buf, &mask))
126 				goto fail;
127 
128 			/* Test this testable bit can be cleared in isolation */
129 			EF4_OR_OWORD(reg, original, mask);
130 			EF4_SET_OWORD32(reg, j, j, 0);
131 
132 			ef4_writeo(efx, &reg, address);
133 			ef4_reado(efx, &buf, address);
134 
135 			if (ef4_masked_compare_oword(&reg, &buf, &mask))
136 				goto fail;
137 		}
138 
139 		ef4_writeo(efx, &original, address);
140 	}
141 
142 	return 0;
143 
144 fail:
145 	netif_err(efx, hw, efx->net_dev,
146 		  "wrote "EF4_OWORD_FMT" read "EF4_OWORD_FMT
147 		  " at address 0x%x mask "EF4_OWORD_FMT"\n", EF4_OWORD_VAL(reg),
148 		  EF4_OWORD_VAL(buf), address, EF4_OWORD_VAL(mask));
149 	return -EIO;
150 }
151 
152 /**************************************************************************
153  *
154  * Special buffer handling
155  * Special buffers are used for event queues and the TX and RX
156  * descriptor rings.
157  *
158  *************************************************************************/
159 
160 /*
161  * Initialise a special buffer
162  *
163  * This will define a buffer (previously allocated via
164  * ef4_alloc_special_buffer()) in the buffer table, allowing
165  * it to be used for event queues, descriptor rings etc.
166  */
167 static void
168 ef4_init_special_buffer(struct ef4_nic *efx, struct ef4_special_buffer *buffer)
169 {
170 	ef4_qword_t buf_desc;
171 	unsigned int index;
172 	dma_addr_t dma_addr;
173 	int i;
174 
175 	EF4_BUG_ON_PARANOID(!buffer->buf.addr);
176 
177 	/* Write buffer descriptors to NIC */
178 	for (i = 0; i < buffer->entries; i++) {
179 		index = buffer->index + i;
180 		dma_addr = buffer->buf.dma_addr + (i * EF4_BUF_SIZE);
181 		netif_dbg(efx, probe, efx->net_dev,
182 			  "mapping special buffer %d at %llx\n",
183 			  index, (unsigned long long)dma_addr);
184 		EF4_POPULATE_QWORD_3(buf_desc,
185 				     FRF_AZ_BUF_ADR_REGION, 0,
186 				     FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
187 				     FRF_AZ_BUF_OWNER_ID_FBUF, 0);
188 		ef4_write_buf_tbl(efx, &buf_desc, index);
189 	}
190 }
191 
192 /* Unmaps a buffer and clears the buffer table entries */
193 static void
194 ef4_fini_special_buffer(struct ef4_nic *efx, struct ef4_special_buffer *buffer)
195 {
196 	ef4_oword_t buf_tbl_upd;
197 	unsigned int start = buffer->index;
198 	unsigned int end = (buffer->index + buffer->entries - 1);
199 
200 	if (!buffer->entries)
201 		return;
202 
203 	netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
204 		  buffer->index, buffer->index + buffer->entries - 1);
205 
206 	EF4_POPULATE_OWORD_4(buf_tbl_upd,
207 			     FRF_AZ_BUF_UPD_CMD, 0,
208 			     FRF_AZ_BUF_CLR_CMD, 1,
209 			     FRF_AZ_BUF_CLR_END_ID, end,
210 			     FRF_AZ_BUF_CLR_START_ID, start);
211 	ef4_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
212 }
213 
214 /*
215  * Allocate a new special buffer
216  *
217  * This allocates memory for a new buffer, clears it and allocates a
218  * new buffer ID range.  It does not write into the buffer table.
219  *
220  * This call will allocate 4KB buffers, since 8KB buffers can't be
221  * used for event queues and descriptor rings.
222  */
223 static int ef4_alloc_special_buffer(struct ef4_nic *efx,
224 				    struct ef4_special_buffer *buffer,
225 				    unsigned int len)
226 {
227 	len = ALIGN(len, EF4_BUF_SIZE);
228 
229 	if (ef4_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL))
230 		return -ENOMEM;
231 	buffer->entries = len / EF4_BUF_SIZE;
232 	BUG_ON(buffer->buf.dma_addr & (EF4_BUF_SIZE - 1));
233 
234 	/* Select new buffer ID */
235 	buffer->index = efx->next_buffer_table;
236 	efx->next_buffer_table += buffer->entries;
237 
238 	netif_dbg(efx, probe, efx->net_dev,
239 		  "allocating special buffers %d-%d at %llx+%x "
240 		  "(virt %p phys %llx)\n", buffer->index,
241 		  buffer->index + buffer->entries - 1,
242 		  (u64)buffer->buf.dma_addr, len,
243 		  buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
244 
245 	return 0;
246 }
247 
248 static void
249 ef4_free_special_buffer(struct ef4_nic *efx, struct ef4_special_buffer *buffer)
250 {
251 	if (!buffer->buf.addr)
252 		return;
253 
254 	netif_dbg(efx, hw, efx->net_dev,
255 		  "deallocating special buffers %d-%d at %llx+%x "
256 		  "(virt %p phys %llx)\n", buffer->index,
257 		  buffer->index + buffer->entries - 1,
258 		  (u64)buffer->buf.dma_addr, buffer->buf.len,
259 		  buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
260 
261 	ef4_nic_free_buffer(efx, &buffer->buf);
262 	buffer->entries = 0;
263 }
264 
265 /**************************************************************************
266  *
267  * TX path
268  *
269  **************************************************************************/
270 
271 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
272 static inline void ef4_farch_notify_tx_desc(struct ef4_tx_queue *tx_queue)
273 {
274 	unsigned write_ptr;
275 	ef4_dword_t reg;
276 
277 	write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
278 	EF4_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
279 	ef4_writed_page(tx_queue->efx, &reg,
280 			FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
281 }
282 
283 /* Write pointer and first descriptor for TX descriptor ring */
284 static inline void ef4_farch_push_tx_desc(struct ef4_tx_queue *tx_queue,
285 					  const ef4_qword_t *txd)
286 {
287 	unsigned write_ptr;
288 	ef4_oword_t reg;
289 
290 	BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
291 	BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
292 
293 	write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
294 	EF4_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
295 			     FRF_AZ_TX_DESC_WPTR, write_ptr);
296 	reg.qword[0] = *txd;
297 	ef4_writeo_page(tx_queue->efx, &reg,
298 			FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
299 }
300 
301 
302 /* For each entry inserted into the software descriptor ring, create a
303  * descriptor in the hardware TX descriptor ring (in host memory), and
304  * write a doorbell.
305  */
306 void ef4_farch_tx_write(struct ef4_tx_queue *tx_queue)
307 {
308 	struct ef4_tx_buffer *buffer;
309 	ef4_qword_t *txd;
310 	unsigned write_ptr;
311 	unsigned old_write_count = tx_queue->write_count;
312 
313 	tx_queue->xmit_more_available = false;
314 	if (unlikely(tx_queue->write_count == tx_queue->insert_count))
315 		return;
316 
317 	do {
318 		write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
319 		buffer = &tx_queue->buffer[write_ptr];
320 		txd = ef4_tx_desc(tx_queue, write_ptr);
321 		++tx_queue->write_count;
322 
323 		EF4_BUG_ON_PARANOID(buffer->flags & EF4_TX_BUF_OPTION);
324 
325 		/* Create TX descriptor ring entry */
326 		BUILD_BUG_ON(EF4_TX_BUF_CONT != 1);
327 		EF4_POPULATE_QWORD_4(*txd,
328 				     FSF_AZ_TX_KER_CONT,
329 				     buffer->flags & EF4_TX_BUF_CONT,
330 				     FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
331 				     FSF_AZ_TX_KER_BUF_REGION, 0,
332 				     FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
333 	} while (tx_queue->write_count != tx_queue->insert_count);
334 
335 	wmb(); /* Ensure descriptors are written before they are fetched */
336 
337 	if (ef4_nic_may_push_tx_desc(tx_queue, old_write_count)) {
338 		txd = ef4_tx_desc(tx_queue,
339 				  old_write_count & tx_queue->ptr_mask);
340 		ef4_farch_push_tx_desc(tx_queue, txd);
341 		++tx_queue->pushes;
342 	} else {
343 		ef4_farch_notify_tx_desc(tx_queue);
344 	}
345 }
346 
347 unsigned int ef4_farch_tx_limit_len(struct ef4_tx_queue *tx_queue,
348 				    dma_addr_t dma_addr, unsigned int len)
349 {
350 	/* Don't cross 4K boundaries with descriptors. */
351 	unsigned int limit = (~dma_addr & (EF4_PAGE_SIZE - 1)) + 1;
352 
353 	len = min(limit, len);
354 
355 	if (EF4_WORKAROUND_5391(tx_queue->efx) && (dma_addr & 0xf))
356 		len = min_t(unsigned int, len, 512 - (dma_addr & 0xf));
357 
358 	return len;
359 }
360 
361 
362 /* Allocate hardware resources for a TX queue */
363 int ef4_farch_tx_probe(struct ef4_tx_queue *tx_queue)
364 {
365 	struct ef4_nic *efx = tx_queue->efx;
366 	unsigned entries;
367 
368 	entries = tx_queue->ptr_mask + 1;
369 	return ef4_alloc_special_buffer(efx, &tx_queue->txd,
370 					entries * sizeof(ef4_qword_t));
371 }
372 
373 void ef4_farch_tx_init(struct ef4_tx_queue *tx_queue)
374 {
375 	struct ef4_nic *efx = tx_queue->efx;
376 	ef4_oword_t reg;
377 
378 	/* Pin TX descriptor ring */
379 	ef4_init_special_buffer(efx, &tx_queue->txd);
380 
381 	/* Push TX descriptor ring to card */
382 	EF4_POPULATE_OWORD_10(reg,
383 			      FRF_AZ_TX_DESCQ_EN, 1,
384 			      FRF_AZ_TX_ISCSI_DDIG_EN, 0,
385 			      FRF_AZ_TX_ISCSI_HDIG_EN, 0,
386 			      FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
387 			      FRF_AZ_TX_DESCQ_EVQ_ID,
388 			      tx_queue->channel->channel,
389 			      FRF_AZ_TX_DESCQ_OWNER_ID, 0,
390 			      FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
391 			      FRF_AZ_TX_DESCQ_SIZE,
392 			      __ffs(tx_queue->txd.entries),
393 			      FRF_AZ_TX_DESCQ_TYPE, 0,
394 			      FRF_BZ_TX_NON_IP_DROP_DIS, 1);
395 
396 	if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
397 		int csum = tx_queue->queue & EF4_TXQ_TYPE_OFFLOAD;
398 		EF4_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
399 		EF4_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
400 				    !csum);
401 	}
402 
403 	ef4_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
404 			 tx_queue->queue);
405 
406 	if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0) {
407 		/* Only 128 bits in this register */
408 		BUILD_BUG_ON(EF4_MAX_TX_QUEUES > 128);
409 
410 		ef4_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
411 		if (tx_queue->queue & EF4_TXQ_TYPE_OFFLOAD)
412 			__clear_bit_le(tx_queue->queue, &reg);
413 		else
414 			__set_bit_le(tx_queue->queue, &reg);
415 		ef4_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
416 	}
417 
418 	if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
419 		EF4_POPULATE_OWORD_1(reg,
420 				     FRF_BZ_TX_PACE,
421 				     (tx_queue->queue & EF4_TXQ_TYPE_HIGHPRI) ?
422 				     FFE_BZ_TX_PACE_OFF :
423 				     FFE_BZ_TX_PACE_RESERVED);
424 		ef4_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
425 				 tx_queue->queue);
426 	}
427 }
428 
429 static void ef4_farch_flush_tx_queue(struct ef4_tx_queue *tx_queue)
430 {
431 	struct ef4_nic *efx = tx_queue->efx;
432 	ef4_oword_t tx_flush_descq;
433 
434 	WARN_ON(atomic_read(&tx_queue->flush_outstanding));
435 	atomic_set(&tx_queue->flush_outstanding, 1);
436 
437 	EF4_POPULATE_OWORD_2(tx_flush_descq,
438 			     FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
439 			     FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
440 	ef4_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
441 }
442 
443 void ef4_farch_tx_fini(struct ef4_tx_queue *tx_queue)
444 {
445 	struct ef4_nic *efx = tx_queue->efx;
446 	ef4_oword_t tx_desc_ptr;
447 
448 	/* Remove TX descriptor ring from card */
449 	EF4_ZERO_OWORD(tx_desc_ptr);
450 	ef4_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
451 			 tx_queue->queue);
452 
453 	/* Unpin TX descriptor ring */
454 	ef4_fini_special_buffer(efx, &tx_queue->txd);
455 }
456 
457 /* Free buffers backing TX queue */
458 void ef4_farch_tx_remove(struct ef4_tx_queue *tx_queue)
459 {
460 	ef4_free_special_buffer(tx_queue->efx, &tx_queue->txd);
461 }
462 
463 /**************************************************************************
464  *
465  * RX path
466  *
467  **************************************************************************/
468 
469 /* This creates an entry in the RX descriptor queue */
470 static inline void
471 ef4_farch_build_rx_desc(struct ef4_rx_queue *rx_queue, unsigned index)
472 {
473 	struct ef4_rx_buffer *rx_buf;
474 	ef4_qword_t *rxd;
475 
476 	rxd = ef4_rx_desc(rx_queue, index);
477 	rx_buf = ef4_rx_buffer(rx_queue, index);
478 	EF4_POPULATE_QWORD_3(*rxd,
479 			     FSF_AZ_RX_KER_BUF_SIZE,
480 			     rx_buf->len -
481 			     rx_queue->efx->type->rx_buffer_padding,
482 			     FSF_AZ_RX_KER_BUF_REGION, 0,
483 			     FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
484 }
485 
486 /* This writes to the RX_DESC_WPTR register for the specified receive
487  * descriptor ring.
488  */
489 void ef4_farch_rx_write(struct ef4_rx_queue *rx_queue)
490 {
491 	struct ef4_nic *efx = rx_queue->efx;
492 	ef4_dword_t reg;
493 	unsigned write_ptr;
494 
495 	while (rx_queue->notified_count != rx_queue->added_count) {
496 		ef4_farch_build_rx_desc(
497 			rx_queue,
498 			rx_queue->notified_count & rx_queue->ptr_mask);
499 		++rx_queue->notified_count;
500 	}
501 
502 	wmb();
503 	write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
504 	EF4_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
505 	ef4_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
506 			ef4_rx_queue_index(rx_queue));
507 }
508 
509 int ef4_farch_rx_probe(struct ef4_rx_queue *rx_queue)
510 {
511 	struct ef4_nic *efx = rx_queue->efx;
512 	unsigned entries;
513 
514 	entries = rx_queue->ptr_mask + 1;
515 	return ef4_alloc_special_buffer(efx, &rx_queue->rxd,
516 					entries * sizeof(ef4_qword_t));
517 }
518 
519 void ef4_farch_rx_init(struct ef4_rx_queue *rx_queue)
520 {
521 	ef4_oword_t rx_desc_ptr;
522 	struct ef4_nic *efx = rx_queue->efx;
523 	bool is_b0 = ef4_nic_rev(efx) >= EF4_REV_FALCON_B0;
524 	bool iscsi_digest_en = is_b0;
525 	bool jumbo_en;
526 
527 	/* For kernel-mode queues in Falcon A1, the JUMBO flag enables
528 	 * DMA to continue after a PCIe page boundary (and scattering
529 	 * is not possible).  In Falcon B0 and Siena, it enables
530 	 * scatter.
531 	 */
532 	jumbo_en = !is_b0 || efx->rx_scatter;
533 
534 	netif_dbg(efx, hw, efx->net_dev,
535 		  "RX queue %d ring in special buffers %d-%d\n",
536 		  ef4_rx_queue_index(rx_queue), rx_queue->rxd.index,
537 		  rx_queue->rxd.index + rx_queue->rxd.entries - 1);
538 
539 	rx_queue->scatter_n = 0;
540 
541 	/* Pin RX descriptor ring */
542 	ef4_init_special_buffer(efx, &rx_queue->rxd);
543 
544 	/* Push RX descriptor ring to card */
545 	EF4_POPULATE_OWORD_10(rx_desc_ptr,
546 			      FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
547 			      FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
548 			      FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
549 			      FRF_AZ_RX_DESCQ_EVQ_ID,
550 			      ef4_rx_queue_channel(rx_queue)->channel,
551 			      FRF_AZ_RX_DESCQ_OWNER_ID, 0,
552 			      FRF_AZ_RX_DESCQ_LABEL,
553 			      ef4_rx_queue_index(rx_queue),
554 			      FRF_AZ_RX_DESCQ_SIZE,
555 			      __ffs(rx_queue->rxd.entries),
556 			      FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
557 			      FRF_AZ_RX_DESCQ_JUMBO, jumbo_en,
558 			      FRF_AZ_RX_DESCQ_EN, 1);
559 	ef4_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
560 			 ef4_rx_queue_index(rx_queue));
561 }
562 
563 static void ef4_farch_flush_rx_queue(struct ef4_rx_queue *rx_queue)
564 {
565 	struct ef4_nic *efx = rx_queue->efx;
566 	ef4_oword_t rx_flush_descq;
567 
568 	EF4_POPULATE_OWORD_2(rx_flush_descq,
569 			     FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
570 			     FRF_AZ_RX_FLUSH_DESCQ,
571 			     ef4_rx_queue_index(rx_queue));
572 	ef4_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
573 }
574 
575 void ef4_farch_rx_fini(struct ef4_rx_queue *rx_queue)
576 {
577 	ef4_oword_t rx_desc_ptr;
578 	struct ef4_nic *efx = rx_queue->efx;
579 
580 	/* Remove RX descriptor ring from card */
581 	EF4_ZERO_OWORD(rx_desc_ptr);
582 	ef4_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
583 			 ef4_rx_queue_index(rx_queue));
584 
585 	/* Unpin RX descriptor ring */
586 	ef4_fini_special_buffer(efx, &rx_queue->rxd);
587 }
588 
589 /* Free buffers backing RX queue */
590 void ef4_farch_rx_remove(struct ef4_rx_queue *rx_queue)
591 {
592 	ef4_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
593 }
594 
595 /**************************************************************************
596  *
597  * Flush handling
598  *
599  **************************************************************************/
600 
601 /* ef4_farch_flush_queues() must be woken up when all flushes are completed,
602  * or more RX flushes can be kicked off.
603  */
604 static bool ef4_farch_flush_wake(struct ef4_nic *efx)
605 {
606 	/* Ensure that all updates are visible to ef4_farch_flush_queues() */
607 	smp_mb();
608 
609 	return (atomic_read(&efx->active_queues) == 0 ||
610 		(atomic_read(&efx->rxq_flush_outstanding) < EF4_RX_FLUSH_COUNT
611 		 && atomic_read(&efx->rxq_flush_pending) > 0));
612 }
613 
614 static bool ef4_check_tx_flush_complete(struct ef4_nic *efx)
615 {
616 	bool i = true;
617 	ef4_oword_t txd_ptr_tbl;
618 	struct ef4_channel *channel;
619 	struct ef4_tx_queue *tx_queue;
620 
621 	ef4_for_each_channel(channel, efx) {
622 		ef4_for_each_channel_tx_queue(tx_queue, channel) {
623 			ef4_reado_table(efx, &txd_ptr_tbl,
624 					FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
625 			if (EF4_OWORD_FIELD(txd_ptr_tbl,
626 					    FRF_AZ_TX_DESCQ_FLUSH) ||
627 			    EF4_OWORD_FIELD(txd_ptr_tbl,
628 					    FRF_AZ_TX_DESCQ_EN)) {
629 				netif_dbg(efx, hw, efx->net_dev,
630 					  "flush did not complete on TXQ %d\n",
631 					  tx_queue->queue);
632 				i = false;
633 			} else if (atomic_cmpxchg(&tx_queue->flush_outstanding,
634 						  1, 0)) {
635 				/* The flush is complete, but we didn't
636 				 * receive a flush completion event
637 				 */
638 				netif_dbg(efx, hw, efx->net_dev,
639 					  "flush complete on TXQ %d, so drain "
640 					  "the queue\n", tx_queue->queue);
641 				/* Don't need to increment active_queues as it
642 				 * has already been incremented for the queues
643 				 * which did not drain
644 				 */
645 				ef4_farch_magic_event(channel,
646 						      EF4_CHANNEL_MAGIC_TX_DRAIN(
647 							      tx_queue));
648 			}
649 		}
650 	}
651 
652 	return i;
653 }
654 
655 /* Flush all the transmit queues, and continue flushing receive queues until
656  * they're all flushed. Wait for the DRAIN events to be received so that there
657  * are no more RX and TX events left on any channel. */
658 static int ef4_farch_do_flush(struct ef4_nic *efx)
659 {
660 	unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */
661 	struct ef4_channel *channel;
662 	struct ef4_rx_queue *rx_queue;
663 	struct ef4_tx_queue *tx_queue;
664 	int rc = 0;
665 
666 	ef4_for_each_channel(channel, efx) {
667 		ef4_for_each_channel_tx_queue(tx_queue, channel) {
668 			ef4_farch_flush_tx_queue(tx_queue);
669 		}
670 		ef4_for_each_channel_rx_queue(rx_queue, channel) {
671 			rx_queue->flush_pending = true;
672 			atomic_inc(&efx->rxq_flush_pending);
673 		}
674 	}
675 
676 	while (timeout && atomic_read(&efx->active_queues) > 0) {
677 		/* The hardware supports four concurrent rx flushes, each of
678 		 * which may need to be retried if there is an outstanding
679 		 * descriptor fetch
680 		 */
681 		ef4_for_each_channel(channel, efx) {
682 			ef4_for_each_channel_rx_queue(rx_queue, channel) {
683 				if (atomic_read(&efx->rxq_flush_outstanding) >=
684 				    EF4_RX_FLUSH_COUNT)
685 					break;
686 
687 				if (rx_queue->flush_pending) {
688 					rx_queue->flush_pending = false;
689 					atomic_dec(&efx->rxq_flush_pending);
690 					atomic_inc(&efx->rxq_flush_outstanding);
691 					ef4_farch_flush_rx_queue(rx_queue);
692 				}
693 			}
694 		}
695 
696 		timeout = wait_event_timeout(efx->flush_wq,
697 					     ef4_farch_flush_wake(efx),
698 					     timeout);
699 	}
700 
701 	if (atomic_read(&efx->active_queues) &&
702 	    !ef4_check_tx_flush_complete(efx)) {
703 		netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
704 			  "(rx %d+%d)\n", atomic_read(&efx->active_queues),
705 			  atomic_read(&efx->rxq_flush_outstanding),
706 			  atomic_read(&efx->rxq_flush_pending));
707 		rc = -ETIMEDOUT;
708 
709 		atomic_set(&efx->active_queues, 0);
710 		atomic_set(&efx->rxq_flush_pending, 0);
711 		atomic_set(&efx->rxq_flush_outstanding, 0);
712 	}
713 
714 	return rc;
715 }
716 
717 int ef4_farch_fini_dmaq(struct ef4_nic *efx)
718 {
719 	struct ef4_channel *channel;
720 	struct ef4_tx_queue *tx_queue;
721 	struct ef4_rx_queue *rx_queue;
722 	int rc = 0;
723 
724 	/* Do not attempt to write to the NIC during EEH recovery */
725 	if (efx->state != STATE_RECOVERY) {
726 		/* Only perform flush if DMA is enabled */
727 		if (efx->pci_dev->is_busmaster) {
728 			efx->type->prepare_flush(efx);
729 			rc = ef4_farch_do_flush(efx);
730 			efx->type->finish_flush(efx);
731 		}
732 
733 		ef4_for_each_channel(channel, efx) {
734 			ef4_for_each_channel_rx_queue(rx_queue, channel)
735 				ef4_farch_rx_fini(rx_queue);
736 			ef4_for_each_channel_tx_queue(tx_queue, channel)
737 				ef4_farch_tx_fini(tx_queue);
738 		}
739 	}
740 
741 	return rc;
742 }
743 
744 /* Reset queue and flush accounting after FLR
745  *
746  * One possible cause of FLR recovery is that DMA may be failing (eg. if bus
747  * mastering was disabled), in which case we don't receive (RXQ) flush
748  * completion events.  This means that efx->rxq_flush_outstanding remained at 4
749  * after the FLR; also, efx->active_queues was non-zero (as no flush completion
750  * events were received, and we didn't go through ef4_check_tx_flush_complete())
751  * If we don't fix this up, on the next call to ef4_realloc_channels() we won't
752  * flush any RX queues because efx->rxq_flush_outstanding is at the limit of 4
753  * for batched flush requests; and the efx->active_queues gets messed up because
754  * we keep incrementing for the newly initialised queues, but it never went to
755  * zero previously.  Then we get a timeout every time we try to restart the
756  * queues, as it doesn't go back to zero when we should be flushing the queues.
757  */
758 void ef4_farch_finish_flr(struct ef4_nic *efx)
759 {
760 	atomic_set(&efx->rxq_flush_pending, 0);
761 	atomic_set(&efx->rxq_flush_outstanding, 0);
762 	atomic_set(&efx->active_queues, 0);
763 }
764 
765 
766 /**************************************************************************
767  *
768  * Event queue processing
769  * Event queues are processed by per-channel tasklets.
770  *
771  **************************************************************************/
772 
773 /* Update a channel's event queue's read pointer (RPTR) register
774  *
775  * This writes the EVQ_RPTR_REG register for the specified channel's
776  * event queue.
777  */
778 void ef4_farch_ev_read_ack(struct ef4_channel *channel)
779 {
780 	ef4_dword_t reg;
781 	struct ef4_nic *efx = channel->efx;
782 
783 	EF4_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
784 			     channel->eventq_read_ptr & channel->eventq_mask);
785 
786 	/* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
787 	 * of 4 bytes, but it is really 16 bytes just like later revisions.
788 	 */
789 	ef4_writed(efx, &reg,
790 		   efx->type->evq_rptr_tbl_base +
791 		   FR_BZ_EVQ_RPTR_STEP * channel->channel);
792 }
793 
794 /* Use HW to insert a SW defined event */
795 void ef4_farch_generate_event(struct ef4_nic *efx, unsigned int evq,
796 			      ef4_qword_t *event)
797 {
798 	ef4_oword_t drv_ev_reg;
799 
800 	BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
801 		     FRF_AZ_DRV_EV_DATA_WIDTH != 64);
802 	drv_ev_reg.u32[0] = event->u32[0];
803 	drv_ev_reg.u32[1] = event->u32[1];
804 	drv_ev_reg.u32[2] = 0;
805 	drv_ev_reg.u32[3] = 0;
806 	EF4_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq);
807 	ef4_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV);
808 }
809 
810 static void ef4_farch_magic_event(struct ef4_channel *channel, u32 magic)
811 {
812 	ef4_qword_t event;
813 
814 	EF4_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE,
815 			     FSE_AZ_EV_CODE_DRV_GEN_EV,
816 			     FSF_AZ_DRV_GEN_EV_MAGIC, magic);
817 	ef4_farch_generate_event(channel->efx, channel->channel, &event);
818 }
819 
820 /* Handle a transmit completion event
821  *
822  * The NIC batches TX completion events; the message we receive is of
823  * the form "complete all TX events up to this index".
824  */
825 static int
826 ef4_farch_handle_tx_event(struct ef4_channel *channel, ef4_qword_t *event)
827 {
828 	unsigned int tx_ev_desc_ptr;
829 	unsigned int tx_ev_q_label;
830 	struct ef4_tx_queue *tx_queue;
831 	struct ef4_nic *efx = channel->efx;
832 	int tx_packets = 0;
833 
834 	if (unlikely(READ_ONCE(efx->reset_pending)))
835 		return 0;
836 
837 	if (likely(EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
838 		/* Transmit completion */
839 		tx_ev_desc_ptr = EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
840 		tx_ev_q_label = EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
841 		tx_queue = ef4_channel_get_tx_queue(
842 			channel, tx_ev_q_label % EF4_TXQ_TYPES);
843 		tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
844 			      tx_queue->ptr_mask);
845 		ef4_xmit_done(tx_queue, tx_ev_desc_ptr);
846 	} else if (EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
847 		/* Rewrite the FIFO write pointer */
848 		tx_ev_q_label = EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
849 		tx_queue = ef4_channel_get_tx_queue(
850 			channel, tx_ev_q_label % EF4_TXQ_TYPES);
851 
852 		netif_tx_lock(efx->net_dev);
853 		ef4_farch_notify_tx_desc(tx_queue);
854 		netif_tx_unlock(efx->net_dev);
855 	} else if (EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) {
856 		ef4_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
857 	} else {
858 		netif_err(efx, tx_err, efx->net_dev,
859 			  "channel %d unexpected TX event "
860 			  EF4_QWORD_FMT"\n", channel->channel,
861 			  EF4_QWORD_VAL(*event));
862 	}
863 
864 	return tx_packets;
865 }
866 
867 /* Detect errors included in the rx_evt_pkt_ok bit. */
868 static u16 ef4_farch_handle_rx_not_ok(struct ef4_rx_queue *rx_queue,
869 				      const ef4_qword_t *event)
870 {
871 	struct ef4_channel *channel = ef4_rx_queue_channel(rx_queue);
872 	struct ef4_nic *efx = rx_queue->efx;
873 	bool __maybe_unused rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
874 	bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
875 	bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
876 	bool rx_ev_pause_frm;
877 
878 	rx_ev_tobe_disc = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
879 	rx_ev_buf_owner_id_err = EF4_QWORD_FIELD(*event,
880 						 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
881 	rx_ev_ip_hdr_chksum_err = EF4_QWORD_FIELD(*event,
882 						  FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
883 	rx_ev_tcp_udp_chksum_err = EF4_QWORD_FIELD(*event,
884 						   FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
885 	rx_ev_eth_crc_err = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
886 	rx_ev_frm_trunc = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
887 	rx_ev_drib_nib = ((ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) ?
888 			  0 : EF4_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
889 	rx_ev_pause_frm = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
890 
891 
892 	/* Count errors that are not in MAC stats.  Ignore expected
893 	 * checksum errors during self-test. */
894 	if (rx_ev_frm_trunc)
895 		++channel->n_rx_frm_trunc;
896 	else if (rx_ev_tobe_disc)
897 		++channel->n_rx_tobe_disc;
898 	else if (!efx->loopback_selftest) {
899 		if (rx_ev_ip_hdr_chksum_err)
900 			++channel->n_rx_ip_hdr_chksum_err;
901 		else if (rx_ev_tcp_udp_chksum_err)
902 			++channel->n_rx_tcp_udp_chksum_err;
903 	}
904 
905 	/* TOBE_DISC is expected on unicast mismatches; don't print out an
906 	 * error message.  FRM_TRUNC indicates RXDP dropped the packet due
907 	 * to a FIFO overflow.
908 	 */
909 #ifdef DEBUG
910 	{
911 	/* Every error apart from tobe_disc and pause_frm */
912 
913 	bool rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
914 				rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
915 				rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
916 
917 	if (rx_ev_other_err && net_ratelimit()) {
918 		netif_dbg(efx, rx_err, efx->net_dev,
919 			  " RX queue %d unexpected RX event "
920 			  EF4_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
921 			  ef4_rx_queue_index(rx_queue), EF4_QWORD_VAL(*event),
922 			  rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
923 			  rx_ev_ip_hdr_chksum_err ?
924 			  " [IP_HDR_CHKSUM_ERR]" : "",
925 			  rx_ev_tcp_udp_chksum_err ?
926 			  " [TCP_UDP_CHKSUM_ERR]" : "",
927 			  rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
928 			  rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
929 			  rx_ev_drib_nib ? " [DRIB_NIB]" : "",
930 			  rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
931 			  rx_ev_pause_frm ? " [PAUSE]" : "");
932 	}
933 	}
934 #endif
935 
936 	/* The frame must be discarded if any of these are true. */
937 	return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
938 		rx_ev_tobe_disc | rx_ev_pause_frm) ?
939 		EF4_RX_PKT_DISCARD : 0;
940 }
941 
942 /* Handle receive events that are not in-order. Return true if this
943  * can be handled as a partial packet discard, false if it's more
944  * serious.
945  */
946 static bool
947 ef4_farch_handle_rx_bad_index(struct ef4_rx_queue *rx_queue, unsigned index)
948 {
949 	struct ef4_channel *channel = ef4_rx_queue_channel(rx_queue);
950 	struct ef4_nic *efx = rx_queue->efx;
951 	unsigned expected, dropped;
952 
953 	if (rx_queue->scatter_n &&
954 	    index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) &
955 		      rx_queue->ptr_mask)) {
956 		++channel->n_rx_nodesc_trunc;
957 		return true;
958 	}
959 
960 	expected = rx_queue->removed_count & rx_queue->ptr_mask;
961 	dropped = (index - expected) & rx_queue->ptr_mask;
962 	netif_info(efx, rx_err, efx->net_dev,
963 		   "dropped %d events (index=%d expected=%d)\n",
964 		   dropped, index, expected);
965 
966 	ef4_schedule_reset(efx, EF4_WORKAROUND_5676(efx) ?
967 			   RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
968 	return false;
969 }
970 
971 /* Handle a packet received event
972  *
973  * The NIC gives a "discard" flag if it's a unicast packet with the
974  * wrong destination address
975  * Also "is multicast" and "matches multicast filter" flags can be used to
976  * discard non-matching multicast packets.
977  */
978 static void
979 ef4_farch_handle_rx_event(struct ef4_channel *channel, const ef4_qword_t *event)
980 {
981 	unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
982 	unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
983 	unsigned expected_ptr;
984 	bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont;
985 	u16 flags;
986 	struct ef4_rx_queue *rx_queue;
987 	struct ef4_nic *efx = channel->efx;
988 
989 	if (unlikely(READ_ONCE(efx->reset_pending)))
990 		return;
991 
992 	rx_ev_cont = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
993 	rx_ev_sop = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP);
994 	WARN_ON(EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
995 		channel->channel);
996 
997 	rx_queue = ef4_channel_get_rx_queue(channel);
998 
999 	rx_ev_desc_ptr = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
1000 	expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) &
1001 			rx_queue->ptr_mask);
1002 
1003 	/* Check for partial drops and other errors */
1004 	if (unlikely(rx_ev_desc_ptr != expected_ptr) ||
1005 	    unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) {
1006 		if (rx_ev_desc_ptr != expected_ptr &&
1007 		    !ef4_farch_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr))
1008 			return;
1009 
1010 		/* Discard all pending fragments */
1011 		if (rx_queue->scatter_n) {
1012 			ef4_rx_packet(
1013 				rx_queue,
1014 				rx_queue->removed_count & rx_queue->ptr_mask,
1015 				rx_queue->scatter_n, 0, EF4_RX_PKT_DISCARD);
1016 			rx_queue->removed_count += rx_queue->scatter_n;
1017 			rx_queue->scatter_n = 0;
1018 		}
1019 
1020 		/* Return if there is no new fragment */
1021 		if (rx_ev_desc_ptr != expected_ptr)
1022 			return;
1023 
1024 		/* Discard new fragment if not SOP */
1025 		if (!rx_ev_sop) {
1026 			ef4_rx_packet(
1027 				rx_queue,
1028 				rx_queue->removed_count & rx_queue->ptr_mask,
1029 				1, 0, EF4_RX_PKT_DISCARD);
1030 			++rx_queue->removed_count;
1031 			return;
1032 		}
1033 	}
1034 
1035 	++rx_queue->scatter_n;
1036 	if (rx_ev_cont)
1037 		return;
1038 
1039 	rx_ev_byte_cnt = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
1040 	rx_ev_pkt_ok = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
1041 	rx_ev_hdr_type = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
1042 
1043 	if (likely(rx_ev_pkt_ok)) {
1044 		/* If packet is marked as OK then we can rely on the
1045 		 * hardware checksum and classification.
1046 		 */
1047 		flags = 0;
1048 		switch (rx_ev_hdr_type) {
1049 		case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
1050 			flags |= EF4_RX_PKT_TCP;
1051 			fallthrough;
1052 		case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
1053 			flags |= EF4_RX_PKT_CSUMMED;
1054 			fallthrough;
1055 		case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
1056 		case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
1057 			break;
1058 		}
1059 	} else {
1060 		flags = ef4_farch_handle_rx_not_ok(rx_queue, event);
1061 	}
1062 
1063 	/* Detect multicast packets that didn't match the filter */
1064 	rx_ev_mcast_pkt = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
1065 	if (rx_ev_mcast_pkt) {
1066 		unsigned int rx_ev_mcast_hash_match =
1067 			EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
1068 
1069 		if (unlikely(!rx_ev_mcast_hash_match)) {
1070 			++channel->n_rx_mcast_mismatch;
1071 			flags |= EF4_RX_PKT_DISCARD;
1072 		}
1073 	}
1074 
1075 	channel->irq_mod_score += 2;
1076 
1077 	/* Handle received packet */
1078 	ef4_rx_packet(rx_queue,
1079 		      rx_queue->removed_count & rx_queue->ptr_mask,
1080 		      rx_queue->scatter_n, rx_ev_byte_cnt, flags);
1081 	rx_queue->removed_count += rx_queue->scatter_n;
1082 	rx_queue->scatter_n = 0;
1083 }
1084 
1085 /* If this flush done event corresponds to a &struct ef4_tx_queue, then
1086  * send an %EF4_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
1087  * of all transmit completions.
1088  */
1089 static void
1090 ef4_farch_handle_tx_flush_done(struct ef4_nic *efx, ef4_qword_t *event)
1091 {
1092 	struct ef4_tx_queue *tx_queue;
1093 	int qid;
1094 
1095 	qid = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1096 	if (qid < EF4_TXQ_TYPES * efx->n_tx_channels) {
1097 		tx_queue = ef4_get_tx_queue(efx, qid / EF4_TXQ_TYPES,
1098 					    qid % EF4_TXQ_TYPES);
1099 		if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
1100 			ef4_farch_magic_event(tx_queue->channel,
1101 					      EF4_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
1102 		}
1103 	}
1104 }
1105 
1106 /* If this flush done event corresponds to a &struct ef4_rx_queue: If the flush
1107  * was successful then send an %EF4_CHANNEL_MAGIC_RX_DRAIN, otherwise add
1108  * the RX queue back to the mask of RX queues in need of flushing.
1109  */
1110 static void
1111 ef4_farch_handle_rx_flush_done(struct ef4_nic *efx, ef4_qword_t *event)
1112 {
1113 	struct ef4_channel *channel;
1114 	struct ef4_rx_queue *rx_queue;
1115 	int qid;
1116 	bool failed;
1117 
1118 	qid = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1119 	failed = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1120 	if (qid >= efx->n_channels)
1121 		return;
1122 	channel = ef4_get_channel(efx, qid);
1123 	if (!ef4_channel_has_rx_queue(channel))
1124 		return;
1125 	rx_queue = ef4_channel_get_rx_queue(channel);
1126 
1127 	if (failed) {
1128 		netif_info(efx, hw, efx->net_dev,
1129 			   "RXQ %d flush retry\n", qid);
1130 		rx_queue->flush_pending = true;
1131 		atomic_inc(&efx->rxq_flush_pending);
1132 	} else {
1133 		ef4_farch_magic_event(ef4_rx_queue_channel(rx_queue),
1134 				      EF4_CHANNEL_MAGIC_RX_DRAIN(rx_queue));
1135 	}
1136 	atomic_dec(&efx->rxq_flush_outstanding);
1137 	if (ef4_farch_flush_wake(efx))
1138 		wake_up(&efx->flush_wq);
1139 }
1140 
1141 static void
1142 ef4_farch_handle_drain_event(struct ef4_channel *channel)
1143 {
1144 	struct ef4_nic *efx = channel->efx;
1145 
1146 	WARN_ON(atomic_read(&efx->active_queues) == 0);
1147 	atomic_dec(&efx->active_queues);
1148 	if (ef4_farch_flush_wake(efx))
1149 		wake_up(&efx->flush_wq);
1150 }
1151 
1152 static void ef4_farch_handle_generated_event(struct ef4_channel *channel,
1153 					     ef4_qword_t *event)
1154 {
1155 	struct ef4_nic *efx = channel->efx;
1156 	struct ef4_rx_queue *rx_queue =
1157 		ef4_channel_has_rx_queue(channel) ?
1158 		ef4_channel_get_rx_queue(channel) : NULL;
1159 	unsigned magic, code;
1160 
1161 	magic = EF4_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
1162 	code = _EF4_CHANNEL_MAGIC_CODE(magic);
1163 
1164 	if (magic == EF4_CHANNEL_MAGIC_TEST(channel)) {
1165 		channel->event_test_cpu = raw_smp_processor_id();
1166 	} else if (rx_queue && magic == EF4_CHANNEL_MAGIC_FILL(rx_queue)) {
1167 		/* The queue must be empty, so we won't receive any rx
1168 		 * events, so ef4_process_channel() won't refill the
1169 		 * queue. Refill it here */
1170 		ef4_fast_push_rx_descriptors(rx_queue, true);
1171 	} else if (rx_queue && magic == EF4_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
1172 		ef4_farch_handle_drain_event(channel);
1173 	} else if (code == _EF4_CHANNEL_MAGIC_TX_DRAIN) {
1174 		ef4_farch_handle_drain_event(channel);
1175 	} else {
1176 		netif_dbg(efx, hw, efx->net_dev, "channel %d received "
1177 			  "generated event "EF4_QWORD_FMT"\n",
1178 			  channel->channel, EF4_QWORD_VAL(*event));
1179 	}
1180 }
1181 
1182 static void
1183 ef4_farch_handle_driver_event(struct ef4_channel *channel, ef4_qword_t *event)
1184 {
1185 	struct ef4_nic *efx = channel->efx;
1186 	unsigned int ev_sub_code;
1187 	unsigned int ev_sub_data;
1188 
1189 	ev_sub_code = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
1190 	ev_sub_data = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1191 
1192 	switch (ev_sub_code) {
1193 	case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
1194 		netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
1195 			   channel->channel, ev_sub_data);
1196 		ef4_farch_handle_tx_flush_done(efx, event);
1197 		break;
1198 	case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
1199 		netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
1200 			   channel->channel, ev_sub_data);
1201 		ef4_farch_handle_rx_flush_done(efx, event);
1202 		break;
1203 	case FSE_AZ_EVQ_INIT_DONE_EV:
1204 		netif_dbg(efx, hw, efx->net_dev,
1205 			  "channel %d EVQ %d initialised\n",
1206 			  channel->channel, ev_sub_data);
1207 		break;
1208 	case FSE_AZ_SRM_UPD_DONE_EV:
1209 		netif_vdbg(efx, hw, efx->net_dev,
1210 			   "channel %d SRAM update done\n", channel->channel);
1211 		break;
1212 	case FSE_AZ_WAKE_UP_EV:
1213 		netif_vdbg(efx, hw, efx->net_dev,
1214 			   "channel %d RXQ %d wakeup event\n",
1215 			   channel->channel, ev_sub_data);
1216 		break;
1217 	case FSE_AZ_TIMER_EV:
1218 		netif_vdbg(efx, hw, efx->net_dev,
1219 			   "channel %d RX queue %d timer expired\n",
1220 			   channel->channel, ev_sub_data);
1221 		break;
1222 	case FSE_AA_RX_RECOVER_EV:
1223 		netif_err(efx, rx_err, efx->net_dev,
1224 			  "channel %d seen DRIVER RX_RESET event. "
1225 			"Resetting.\n", channel->channel);
1226 		atomic_inc(&efx->rx_reset);
1227 		ef4_schedule_reset(efx,
1228 				   EF4_WORKAROUND_6555(efx) ?
1229 				   RESET_TYPE_RX_RECOVERY :
1230 				   RESET_TYPE_DISABLE);
1231 		break;
1232 	case FSE_BZ_RX_DSC_ERROR_EV:
1233 		netif_err(efx, rx_err, efx->net_dev,
1234 			  "RX DMA Q %d reports descriptor fetch error."
1235 			  " RX Q %d is disabled.\n", ev_sub_data,
1236 			  ev_sub_data);
1237 		ef4_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
1238 		break;
1239 	case FSE_BZ_TX_DSC_ERROR_EV:
1240 		netif_err(efx, tx_err, efx->net_dev,
1241 			  "TX DMA Q %d reports descriptor fetch error."
1242 			  " TX Q %d is disabled.\n", ev_sub_data,
1243 			  ev_sub_data);
1244 		ef4_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
1245 		break;
1246 	default:
1247 		netif_vdbg(efx, hw, efx->net_dev,
1248 			   "channel %d unknown driver event code %d "
1249 			   "data %04x\n", channel->channel, ev_sub_code,
1250 			   ev_sub_data);
1251 		break;
1252 	}
1253 }
1254 
1255 int ef4_farch_ev_process(struct ef4_channel *channel, int budget)
1256 {
1257 	struct ef4_nic *efx = channel->efx;
1258 	unsigned int read_ptr;
1259 	ef4_qword_t event, *p_event;
1260 	int ev_code;
1261 	int tx_packets = 0;
1262 	int spent = 0;
1263 
1264 	if (budget <= 0)
1265 		return spent;
1266 
1267 	read_ptr = channel->eventq_read_ptr;
1268 
1269 	for (;;) {
1270 		p_event = ef4_event(channel, read_ptr);
1271 		event = *p_event;
1272 
1273 		if (!ef4_event_present(&event))
1274 			/* End of events */
1275 			break;
1276 
1277 		netif_vdbg(channel->efx, intr, channel->efx->net_dev,
1278 			   "channel %d event is "EF4_QWORD_FMT"\n",
1279 			   channel->channel, EF4_QWORD_VAL(event));
1280 
1281 		/* Clear this event by marking it all ones */
1282 		EF4_SET_QWORD(*p_event);
1283 
1284 		++read_ptr;
1285 
1286 		ev_code = EF4_QWORD_FIELD(event, FSF_AZ_EV_CODE);
1287 
1288 		switch (ev_code) {
1289 		case FSE_AZ_EV_CODE_RX_EV:
1290 			ef4_farch_handle_rx_event(channel, &event);
1291 			if (++spent == budget)
1292 				goto out;
1293 			break;
1294 		case FSE_AZ_EV_CODE_TX_EV:
1295 			tx_packets += ef4_farch_handle_tx_event(channel,
1296 								&event);
1297 			if (tx_packets > efx->txq_entries) {
1298 				spent = budget;
1299 				goto out;
1300 			}
1301 			break;
1302 		case FSE_AZ_EV_CODE_DRV_GEN_EV:
1303 			ef4_farch_handle_generated_event(channel, &event);
1304 			break;
1305 		case FSE_AZ_EV_CODE_DRIVER_EV:
1306 			ef4_farch_handle_driver_event(channel, &event);
1307 			break;
1308 		case FSE_AZ_EV_CODE_GLOBAL_EV:
1309 			if (efx->type->handle_global_event &&
1310 			    efx->type->handle_global_event(channel, &event))
1311 				break;
1312 			fallthrough;
1313 		default:
1314 			netif_err(channel->efx, hw, channel->efx->net_dev,
1315 				  "channel %d unknown event type %d (data "
1316 				  EF4_QWORD_FMT ")\n", channel->channel,
1317 				  ev_code, EF4_QWORD_VAL(event));
1318 		}
1319 	}
1320 
1321 out:
1322 	channel->eventq_read_ptr = read_ptr;
1323 	return spent;
1324 }
1325 
1326 /* Allocate buffer table entries for event queue */
1327 int ef4_farch_ev_probe(struct ef4_channel *channel)
1328 {
1329 	struct ef4_nic *efx = channel->efx;
1330 	unsigned entries;
1331 
1332 	entries = channel->eventq_mask + 1;
1333 	return ef4_alloc_special_buffer(efx, &channel->eventq,
1334 					entries * sizeof(ef4_qword_t));
1335 }
1336 
1337 int ef4_farch_ev_init(struct ef4_channel *channel)
1338 {
1339 	ef4_oword_t reg;
1340 	struct ef4_nic *efx = channel->efx;
1341 
1342 	netif_dbg(efx, hw, efx->net_dev,
1343 		  "channel %d event queue in special buffers %d-%d\n",
1344 		  channel->channel, channel->eventq.index,
1345 		  channel->eventq.index + channel->eventq.entries - 1);
1346 
1347 	/* Pin event queue buffer */
1348 	ef4_init_special_buffer(efx, &channel->eventq);
1349 
1350 	/* Fill event queue with all ones (i.e. empty events) */
1351 	memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
1352 
1353 	/* Push event queue to card */
1354 	EF4_POPULATE_OWORD_3(reg,
1355 			     FRF_AZ_EVQ_EN, 1,
1356 			     FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
1357 			     FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
1358 	ef4_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
1359 			 channel->channel);
1360 
1361 	return 0;
1362 }
1363 
1364 void ef4_farch_ev_fini(struct ef4_channel *channel)
1365 {
1366 	ef4_oword_t reg;
1367 	struct ef4_nic *efx = channel->efx;
1368 
1369 	/* Remove event queue from card */
1370 	EF4_ZERO_OWORD(reg);
1371 	ef4_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
1372 			 channel->channel);
1373 
1374 	/* Unpin event queue */
1375 	ef4_fini_special_buffer(efx, &channel->eventq);
1376 }
1377 
1378 /* Free buffers backing event queue */
1379 void ef4_farch_ev_remove(struct ef4_channel *channel)
1380 {
1381 	ef4_free_special_buffer(channel->efx, &channel->eventq);
1382 }
1383 
1384 
1385 void ef4_farch_ev_test_generate(struct ef4_channel *channel)
1386 {
1387 	ef4_farch_magic_event(channel, EF4_CHANNEL_MAGIC_TEST(channel));
1388 }
1389 
1390 void ef4_farch_rx_defer_refill(struct ef4_rx_queue *rx_queue)
1391 {
1392 	ef4_farch_magic_event(ef4_rx_queue_channel(rx_queue),
1393 			      EF4_CHANNEL_MAGIC_FILL(rx_queue));
1394 }
1395 
1396 /**************************************************************************
1397  *
1398  * Hardware interrupts
1399  * The hardware interrupt handler does very little work; all the event
1400  * queue processing is carried out by per-channel tasklets.
1401  *
1402  **************************************************************************/
1403 
1404 /* Enable/disable/generate interrupts */
1405 static inline void ef4_farch_interrupts(struct ef4_nic *efx,
1406 				      bool enabled, bool force)
1407 {
1408 	ef4_oword_t int_en_reg_ker;
1409 
1410 	EF4_POPULATE_OWORD_3(int_en_reg_ker,
1411 			     FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level,
1412 			     FRF_AZ_KER_INT_KER, force,
1413 			     FRF_AZ_DRV_INT_EN_KER, enabled);
1414 	ef4_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
1415 }
1416 
1417 void ef4_farch_irq_enable_master(struct ef4_nic *efx)
1418 {
1419 	EF4_ZERO_OWORD(*((ef4_oword_t *) efx->irq_status.addr));
1420 	wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1421 
1422 	ef4_farch_interrupts(efx, true, false);
1423 }
1424 
1425 void ef4_farch_irq_disable_master(struct ef4_nic *efx)
1426 {
1427 	/* Disable interrupts */
1428 	ef4_farch_interrupts(efx, false, false);
1429 }
1430 
1431 /* Generate a test interrupt
1432  * Interrupt must already have been enabled, otherwise nasty things
1433  * may happen.
1434  */
1435 int ef4_farch_irq_test_generate(struct ef4_nic *efx)
1436 {
1437 	ef4_farch_interrupts(efx, true, true);
1438 	return 0;
1439 }
1440 
1441 /* Process a fatal interrupt
1442  * Disable bus mastering ASAP and schedule a reset
1443  */
1444 irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx)
1445 {
1446 	struct falcon_nic_data *nic_data = efx->nic_data;
1447 	ef4_oword_t *int_ker = efx->irq_status.addr;
1448 	ef4_oword_t fatal_intr;
1449 	int error, mem_perr;
1450 
1451 	ef4_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
1452 	error = EF4_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
1453 
1454 	netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EF4_OWORD_FMT" status "
1455 		  EF4_OWORD_FMT ": %s\n", EF4_OWORD_VAL(*int_ker),
1456 		  EF4_OWORD_VAL(fatal_intr),
1457 		  error ? "disabling bus mastering" : "no recognised error");
1458 
1459 	/* If this is a memory parity error dump which blocks are offending */
1460 	mem_perr = (EF4_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
1461 		    EF4_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
1462 	if (mem_perr) {
1463 		ef4_oword_t reg;
1464 		ef4_reado(efx, &reg, FR_AZ_MEM_STAT);
1465 		netif_err(efx, hw, efx->net_dev,
1466 			  "SYSTEM ERROR: memory parity error "EF4_OWORD_FMT"\n",
1467 			  EF4_OWORD_VAL(reg));
1468 	}
1469 
1470 	/* Disable both devices */
1471 	pci_clear_master(efx->pci_dev);
1472 	if (ef4_nic_is_dual_func(efx))
1473 		pci_clear_master(nic_data->pci_dev2);
1474 	ef4_farch_irq_disable_master(efx);
1475 
1476 	/* Count errors and reset or disable the NIC accordingly */
1477 	if (efx->int_error_count == 0 ||
1478 	    time_after(jiffies, efx->int_error_expire)) {
1479 		efx->int_error_count = 0;
1480 		efx->int_error_expire =
1481 			jiffies + EF4_INT_ERROR_EXPIRE * HZ;
1482 	}
1483 	if (++efx->int_error_count < EF4_MAX_INT_ERRORS) {
1484 		netif_err(efx, hw, efx->net_dev,
1485 			  "SYSTEM ERROR - reset scheduled\n");
1486 		ef4_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1487 	} else {
1488 		netif_err(efx, hw, efx->net_dev,
1489 			  "SYSTEM ERROR - max number of errors seen."
1490 			  "NIC will be disabled\n");
1491 		ef4_schedule_reset(efx, RESET_TYPE_DISABLE);
1492 	}
1493 
1494 	return IRQ_HANDLED;
1495 }
1496 
1497 /* Handle a legacy interrupt
1498  * Acknowledges the interrupt and schedule event queue processing.
1499  */
1500 irqreturn_t ef4_farch_legacy_interrupt(int irq, void *dev_id)
1501 {
1502 	struct ef4_nic *efx = dev_id;
1503 	bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
1504 	ef4_oword_t *int_ker = efx->irq_status.addr;
1505 	irqreturn_t result = IRQ_NONE;
1506 	struct ef4_channel *channel;
1507 	ef4_dword_t reg;
1508 	u32 queues;
1509 	int syserr;
1510 
1511 	/* Read the ISR which also ACKs the interrupts */
1512 	ef4_readd(efx, &reg, FR_BZ_INT_ISR0);
1513 	queues = EF4_EXTRACT_DWORD(reg, 0, 31);
1514 
1515 	/* Legacy interrupts are disabled too late by the EEH kernel
1516 	 * code. Disable them earlier.
1517 	 * If an EEH error occurred, the read will have returned all ones.
1518 	 */
1519 	if (EF4_DWORD_IS_ALL_ONES(reg) && ef4_try_recovery(efx) &&
1520 	    !efx->eeh_disabled_legacy_irq) {
1521 		disable_irq_nosync(efx->legacy_irq);
1522 		efx->eeh_disabled_legacy_irq = true;
1523 	}
1524 
1525 	/* Handle non-event-queue sources */
1526 	if (queues & (1U << efx->irq_level) && soft_enabled) {
1527 		syserr = EF4_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1528 		if (unlikely(syserr))
1529 			return ef4_farch_fatal_interrupt(efx);
1530 		efx->last_irq_cpu = raw_smp_processor_id();
1531 	}
1532 
1533 	if (queues != 0) {
1534 		efx->irq_zero_count = 0;
1535 
1536 		/* Schedule processing of any interrupting queues */
1537 		if (likely(soft_enabled)) {
1538 			ef4_for_each_channel(channel, efx) {
1539 				if (queues & 1)
1540 					ef4_schedule_channel_irq(channel);
1541 				queues >>= 1;
1542 			}
1543 		}
1544 		result = IRQ_HANDLED;
1545 
1546 	} else {
1547 		ef4_qword_t *event;
1548 
1549 		/* Legacy ISR read can return zero once (SF bug 15783) */
1550 
1551 		/* We can't return IRQ_HANDLED more than once on seeing ISR=0
1552 		 * because this might be a shared interrupt. */
1553 		if (efx->irq_zero_count++ == 0)
1554 			result = IRQ_HANDLED;
1555 
1556 		/* Ensure we schedule or rearm all event queues */
1557 		if (likely(soft_enabled)) {
1558 			ef4_for_each_channel(channel, efx) {
1559 				event = ef4_event(channel,
1560 						  channel->eventq_read_ptr);
1561 				if (ef4_event_present(event))
1562 					ef4_schedule_channel_irq(channel);
1563 				else
1564 					ef4_farch_ev_read_ack(channel);
1565 			}
1566 		}
1567 	}
1568 
1569 	if (result == IRQ_HANDLED)
1570 		netif_vdbg(efx, intr, efx->net_dev,
1571 			   "IRQ %d on CPU %d status " EF4_DWORD_FMT "\n",
1572 			   irq, raw_smp_processor_id(), EF4_DWORD_VAL(reg));
1573 
1574 	return result;
1575 }
1576 
1577 /* Handle an MSI interrupt
1578  *
1579  * Handle an MSI hardware interrupt.  This routine schedules event
1580  * queue processing.  No interrupt acknowledgement cycle is necessary.
1581  * Also, we never need to check that the interrupt is for us, since
1582  * MSI interrupts cannot be shared.
1583  */
1584 irqreturn_t ef4_farch_msi_interrupt(int irq, void *dev_id)
1585 {
1586 	struct ef4_msi_context *context = dev_id;
1587 	struct ef4_nic *efx = context->efx;
1588 	ef4_oword_t *int_ker = efx->irq_status.addr;
1589 	int syserr;
1590 
1591 	netif_vdbg(efx, intr, efx->net_dev,
1592 		   "IRQ %d on CPU %d status " EF4_OWORD_FMT "\n",
1593 		   irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker));
1594 
1595 	if (!likely(READ_ONCE(efx->irq_soft_enabled)))
1596 		return IRQ_HANDLED;
1597 
1598 	/* Handle non-event-queue sources */
1599 	if (context->index == efx->irq_level) {
1600 		syserr = EF4_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1601 		if (unlikely(syserr))
1602 			return ef4_farch_fatal_interrupt(efx);
1603 		efx->last_irq_cpu = raw_smp_processor_id();
1604 	}
1605 
1606 	/* Schedule processing of the channel */
1607 	ef4_schedule_channel_irq(efx->channel[context->index]);
1608 
1609 	return IRQ_HANDLED;
1610 }
1611 
1612 /* Setup RSS indirection table.
1613  * This maps from the hash value of the packet to RXQ
1614  */
1615 void ef4_farch_rx_push_indir_table(struct ef4_nic *efx)
1616 {
1617 	size_t i = 0;
1618 	ef4_dword_t dword;
1619 
1620 	BUG_ON(ef4_nic_rev(efx) < EF4_REV_FALCON_B0);
1621 
1622 	BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
1623 		     FR_BZ_RX_INDIRECTION_TBL_ROWS);
1624 
1625 	for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
1626 		EF4_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
1627 				     efx->rx_indir_table[i]);
1628 		ef4_writed(efx, &dword,
1629 			   FR_BZ_RX_INDIRECTION_TBL +
1630 			   FR_BZ_RX_INDIRECTION_TBL_STEP * i);
1631 	}
1632 }
1633 
1634 u32 ef4_farch_fpga_ver(struct ef4_nic *efx)
1635 {
1636 	ef4_oword_t altera_build;
1637 	ef4_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
1638 	return EF4_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
1639 }
1640 
1641 void ef4_farch_init_common(struct ef4_nic *efx)
1642 {
1643 	ef4_oword_t temp;
1644 
1645 	/* Set positions of descriptor caches in SRAM. */
1646 	EF4_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base);
1647 	ef4_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
1648 	EF4_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base);
1649 	ef4_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
1650 
1651 	/* Set TX descriptor cache size. */
1652 	BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
1653 	EF4_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
1654 	ef4_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
1655 
1656 	/* Set RX descriptor cache size.  Set low watermark to size-8, as
1657 	 * this allows most efficient prefetching.
1658 	 */
1659 	BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
1660 	EF4_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
1661 	ef4_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
1662 	EF4_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
1663 	ef4_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
1664 
1665 	/* Program INT_KER address */
1666 	EF4_POPULATE_OWORD_2(temp,
1667 			     FRF_AZ_NORM_INT_VEC_DIS_KER,
1668 			     EF4_INT_MODE_USE_MSI(efx),
1669 			     FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
1670 	ef4_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
1671 
1672 	/* Use a valid MSI-X vector */
1673 	efx->irq_level = 0;
1674 
1675 	/* Enable all the genuinely fatal interrupts.  (They are still
1676 	 * masked by the overall interrupt mask, controlled by
1677 	 * falcon_interrupts()).
1678 	 *
1679 	 * Note: All other fatal interrupts are enabled
1680 	 */
1681 	EF4_POPULATE_OWORD_3(temp,
1682 			     FRF_AZ_ILL_ADR_INT_KER_EN, 1,
1683 			     FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
1684 			     FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
1685 	EF4_INVERT_OWORD(temp);
1686 	ef4_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
1687 
1688 	/* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
1689 	 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
1690 	 */
1691 	ef4_reado(efx, &temp, FR_AZ_TX_RESERVED);
1692 	EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
1693 	EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
1694 	EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
1695 	EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
1696 	EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
1697 	/* Enable SW_EV to inherit in char driver - assume harmless here */
1698 	EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
1699 	/* Prefetch threshold 2 => fetch when descriptor cache half empty */
1700 	EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
1701 	/* Disable hardware watchdog which can misfire */
1702 	EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
1703 	/* Squash TX of packets of 16 bytes or less */
1704 	if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0)
1705 		EF4_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
1706 	ef4_writeo(efx, &temp, FR_AZ_TX_RESERVED);
1707 
1708 	if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
1709 		EF4_POPULATE_OWORD_4(temp,
1710 				     /* Default values */
1711 				     FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
1712 				     FRF_BZ_TX_PACE_SB_AF, 0xb,
1713 				     FRF_BZ_TX_PACE_FB_BASE, 0,
1714 				     /* Allow large pace values in the
1715 				      * fast bin. */
1716 				     FRF_BZ_TX_PACE_BIN_TH,
1717 				     FFE_BZ_TX_PACE_RESERVED);
1718 		ef4_writeo(efx, &temp, FR_BZ_TX_PACE);
1719 	}
1720 }
1721 
1722 /**************************************************************************
1723  *
1724  * Filter tables
1725  *
1726  **************************************************************************
1727  */
1728 
1729 /* "Fudge factors" - difference between programmed value and actual depth.
1730  * Due to pipelined implementation we need to program H/W with a value that
1731  * is larger than the hop limit we want.
1732  */
1733 #define EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3
1734 #define EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1
1735 
1736 /* Hard maximum search limit.  Hardware will time-out beyond 200-something.
1737  * We also need to avoid infinite loops in ef4_farch_filter_search() when the
1738  * table is full.
1739  */
1740 #define EF4_FARCH_FILTER_CTL_SRCH_MAX 200
1741 
1742 /* Don't try very hard to find space for performance hints, as this is
1743  * counter-productive. */
1744 #define EF4_FARCH_FILTER_CTL_SRCH_HINT_MAX 5
1745 
1746 enum ef4_farch_filter_type {
1747 	EF4_FARCH_FILTER_TCP_FULL = 0,
1748 	EF4_FARCH_FILTER_TCP_WILD,
1749 	EF4_FARCH_FILTER_UDP_FULL,
1750 	EF4_FARCH_FILTER_UDP_WILD,
1751 	EF4_FARCH_FILTER_MAC_FULL = 4,
1752 	EF4_FARCH_FILTER_MAC_WILD,
1753 	EF4_FARCH_FILTER_UC_DEF = 8,
1754 	EF4_FARCH_FILTER_MC_DEF,
1755 	EF4_FARCH_FILTER_TYPE_COUNT,		/* number of specific types */
1756 };
1757 
1758 enum ef4_farch_filter_table_id {
1759 	EF4_FARCH_FILTER_TABLE_RX_IP = 0,
1760 	EF4_FARCH_FILTER_TABLE_RX_MAC,
1761 	EF4_FARCH_FILTER_TABLE_RX_DEF,
1762 	EF4_FARCH_FILTER_TABLE_TX_MAC,
1763 	EF4_FARCH_FILTER_TABLE_COUNT,
1764 };
1765 
1766 enum ef4_farch_filter_index {
1767 	EF4_FARCH_FILTER_INDEX_UC_DEF,
1768 	EF4_FARCH_FILTER_INDEX_MC_DEF,
1769 	EF4_FARCH_FILTER_SIZE_RX_DEF,
1770 };
1771 
1772 struct ef4_farch_filter_spec {
1773 	u8	type:4;
1774 	u8	priority:4;
1775 	u8	flags;
1776 	u16	dmaq_id;
1777 	u32	data[3];
1778 };
1779 
1780 struct ef4_farch_filter_table {
1781 	enum ef4_farch_filter_table_id id;
1782 	u32		offset;		/* address of table relative to BAR */
1783 	unsigned	size;		/* number of entries */
1784 	unsigned	step;		/* step between entries */
1785 	unsigned	used;		/* number currently used */
1786 	unsigned long	*used_bitmap;
1787 	struct ef4_farch_filter_spec *spec;
1788 	unsigned	search_limit[EF4_FARCH_FILTER_TYPE_COUNT];
1789 };
1790 
1791 struct ef4_farch_filter_state {
1792 	struct ef4_farch_filter_table table[EF4_FARCH_FILTER_TABLE_COUNT];
1793 };
1794 
1795 static void
1796 ef4_farch_filter_table_clear_entry(struct ef4_nic *efx,
1797 				   struct ef4_farch_filter_table *table,
1798 				   unsigned int filter_idx);
1799 
1800 /* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
1801  * key derived from the n-tuple.  The initial LFSR state is 0xffff. */
1802 static u16 ef4_farch_filter_hash(u32 key)
1803 {
1804 	u16 tmp;
1805 
1806 	/* First 16 rounds */
1807 	tmp = 0x1fff ^ key >> 16;
1808 	tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
1809 	tmp = tmp ^ tmp >> 9;
1810 	/* Last 16 rounds */
1811 	tmp = tmp ^ tmp << 13 ^ key;
1812 	tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
1813 	return tmp ^ tmp >> 9;
1814 }
1815 
1816 /* To allow for hash collisions, filter search continues at these
1817  * increments from the first possible entry selected by the hash. */
1818 static u16 ef4_farch_filter_increment(u32 key)
1819 {
1820 	return key * 2 - 1;
1821 }
1822 
1823 static enum ef4_farch_filter_table_id
1824 ef4_farch_filter_spec_table_id(const struct ef4_farch_filter_spec *spec)
1825 {
1826 	BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_IP !=
1827 		     (EF4_FARCH_FILTER_TCP_FULL >> 2));
1828 	BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_IP !=
1829 		     (EF4_FARCH_FILTER_TCP_WILD >> 2));
1830 	BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_IP !=
1831 		     (EF4_FARCH_FILTER_UDP_FULL >> 2));
1832 	BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_IP !=
1833 		     (EF4_FARCH_FILTER_UDP_WILD >> 2));
1834 	BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_MAC !=
1835 		     (EF4_FARCH_FILTER_MAC_FULL >> 2));
1836 	BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_MAC !=
1837 		     (EF4_FARCH_FILTER_MAC_WILD >> 2));
1838 	BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_TX_MAC !=
1839 		     EF4_FARCH_FILTER_TABLE_RX_MAC + 2);
1840 	return (spec->type >> 2) + ((spec->flags & EF4_FILTER_FLAG_TX) ? 2 : 0);
1841 }
1842 
1843 static void ef4_farch_filter_push_rx_config(struct ef4_nic *efx)
1844 {
1845 	struct ef4_farch_filter_state *state = efx->filter_state;
1846 	struct ef4_farch_filter_table *table;
1847 	ef4_oword_t filter_ctl;
1848 
1849 	ef4_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
1850 
1851 	table = &state->table[EF4_FARCH_FILTER_TABLE_RX_IP];
1852 	EF4_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
1853 			    table->search_limit[EF4_FARCH_FILTER_TCP_FULL] +
1854 			    EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1855 	EF4_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
1856 			    table->search_limit[EF4_FARCH_FILTER_TCP_WILD] +
1857 			    EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1858 	EF4_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
1859 			    table->search_limit[EF4_FARCH_FILTER_UDP_FULL] +
1860 			    EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1861 	EF4_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
1862 			    table->search_limit[EF4_FARCH_FILTER_UDP_WILD] +
1863 			    EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1864 
1865 	table = &state->table[EF4_FARCH_FILTER_TABLE_RX_MAC];
1866 	if (table->size) {
1867 		EF4_SET_OWORD_FIELD(
1868 			filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
1869 			table->search_limit[EF4_FARCH_FILTER_MAC_FULL] +
1870 			EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1871 		EF4_SET_OWORD_FIELD(
1872 			filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
1873 			table->search_limit[EF4_FARCH_FILTER_MAC_WILD] +
1874 			EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1875 	}
1876 
1877 	table = &state->table[EF4_FARCH_FILTER_TABLE_RX_DEF];
1878 	if (table->size) {
1879 		EF4_SET_OWORD_FIELD(
1880 			filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID,
1881 			table->spec[EF4_FARCH_FILTER_INDEX_UC_DEF].dmaq_id);
1882 		EF4_SET_OWORD_FIELD(
1883 			filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
1884 			!!(table->spec[EF4_FARCH_FILTER_INDEX_UC_DEF].flags &
1885 			   EF4_FILTER_FLAG_RX_RSS));
1886 		EF4_SET_OWORD_FIELD(
1887 			filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
1888 			table->spec[EF4_FARCH_FILTER_INDEX_MC_DEF].dmaq_id);
1889 		EF4_SET_OWORD_FIELD(
1890 			filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
1891 			!!(table->spec[EF4_FARCH_FILTER_INDEX_MC_DEF].flags &
1892 			   EF4_FILTER_FLAG_RX_RSS));
1893 
1894 		/* There is a single bit to enable RX scatter for all
1895 		 * unmatched packets.  Only set it if scatter is
1896 		 * enabled in both filter specs.
1897 		 */
1898 		EF4_SET_OWORD_FIELD(
1899 			filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
1900 			!!(table->spec[EF4_FARCH_FILTER_INDEX_UC_DEF].flags &
1901 			   table->spec[EF4_FARCH_FILTER_INDEX_MC_DEF].flags &
1902 			   EF4_FILTER_FLAG_RX_SCATTER));
1903 	} else if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
1904 		/* We don't expose 'default' filters because unmatched
1905 		 * packets always go to the queue number found in the
1906 		 * RSS table.  But we still need to set the RX scatter
1907 		 * bit here.
1908 		 */
1909 		EF4_SET_OWORD_FIELD(
1910 			filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
1911 			efx->rx_scatter);
1912 	}
1913 
1914 	ef4_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
1915 }
1916 
1917 static void ef4_farch_filter_push_tx_limits(struct ef4_nic *efx)
1918 {
1919 	struct ef4_farch_filter_state *state = efx->filter_state;
1920 	struct ef4_farch_filter_table *table;
1921 	ef4_oword_t tx_cfg;
1922 
1923 	ef4_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
1924 
1925 	table = &state->table[EF4_FARCH_FILTER_TABLE_TX_MAC];
1926 	if (table->size) {
1927 		EF4_SET_OWORD_FIELD(
1928 			tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
1929 			table->search_limit[EF4_FARCH_FILTER_MAC_FULL] +
1930 			EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1931 		EF4_SET_OWORD_FIELD(
1932 			tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
1933 			table->search_limit[EF4_FARCH_FILTER_MAC_WILD] +
1934 			EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1935 	}
1936 
1937 	ef4_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
1938 }
1939 
1940 static int
1941 ef4_farch_filter_from_gen_spec(struct ef4_farch_filter_spec *spec,
1942 			       const struct ef4_filter_spec *gen_spec)
1943 {
1944 	bool is_full = false;
1945 
1946 	if ((gen_spec->flags & EF4_FILTER_FLAG_RX_RSS) &&
1947 	    gen_spec->rss_context != EF4_FILTER_RSS_CONTEXT_DEFAULT)
1948 		return -EINVAL;
1949 
1950 	spec->priority = gen_spec->priority;
1951 	spec->flags = gen_spec->flags;
1952 	spec->dmaq_id = gen_spec->dmaq_id;
1953 
1954 	switch (gen_spec->match_flags) {
1955 	case (EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_IP_PROTO |
1956 	      EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT |
1957 	      EF4_FILTER_MATCH_REM_HOST | EF4_FILTER_MATCH_REM_PORT):
1958 		is_full = true;
1959 		fallthrough;
1960 	case (EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_IP_PROTO |
1961 	      EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT): {
1962 		__be32 rhost, host1, host2;
1963 		__be16 rport, port1, port2;
1964 
1965 		EF4_BUG_ON_PARANOID(!(gen_spec->flags & EF4_FILTER_FLAG_RX));
1966 
1967 		if (gen_spec->ether_type != htons(ETH_P_IP))
1968 			return -EPROTONOSUPPORT;
1969 		if (gen_spec->loc_port == 0 ||
1970 		    (is_full && gen_spec->rem_port == 0))
1971 			return -EADDRNOTAVAIL;
1972 		switch (gen_spec->ip_proto) {
1973 		case IPPROTO_TCP:
1974 			spec->type = (is_full ? EF4_FARCH_FILTER_TCP_FULL :
1975 				      EF4_FARCH_FILTER_TCP_WILD);
1976 			break;
1977 		case IPPROTO_UDP:
1978 			spec->type = (is_full ? EF4_FARCH_FILTER_UDP_FULL :
1979 				      EF4_FARCH_FILTER_UDP_WILD);
1980 			break;
1981 		default:
1982 			return -EPROTONOSUPPORT;
1983 		}
1984 
1985 		/* Filter is constructed in terms of source and destination,
1986 		 * with the odd wrinkle that the ports are swapped in a UDP
1987 		 * wildcard filter.  We need to convert from local and remote
1988 		 * (= zero for wildcard) addresses.
1989 		 */
1990 		rhost = is_full ? gen_spec->rem_host[0] : 0;
1991 		rport = is_full ? gen_spec->rem_port : 0;
1992 		host1 = rhost;
1993 		host2 = gen_spec->loc_host[0];
1994 		if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) {
1995 			port1 = gen_spec->loc_port;
1996 			port2 = rport;
1997 		} else {
1998 			port1 = rport;
1999 			port2 = gen_spec->loc_port;
2000 		}
2001 		spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
2002 		spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
2003 		spec->data[2] = ntohl(host2);
2004 
2005 		break;
2006 	}
2007 
2008 	case EF4_FILTER_MATCH_LOC_MAC | EF4_FILTER_MATCH_OUTER_VID:
2009 		is_full = true;
2010 		fallthrough;
2011 	case EF4_FILTER_MATCH_LOC_MAC:
2012 		spec->type = (is_full ? EF4_FARCH_FILTER_MAC_FULL :
2013 			      EF4_FARCH_FILTER_MAC_WILD);
2014 		spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0;
2015 		spec->data[1] = (gen_spec->loc_mac[2] << 24 |
2016 				 gen_spec->loc_mac[3] << 16 |
2017 				 gen_spec->loc_mac[4] << 8 |
2018 				 gen_spec->loc_mac[5]);
2019 		spec->data[2] = (gen_spec->loc_mac[0] << 8 |
2020 				 gen_spec->loc_mac[1]);
2021 		break;
2022 
2023 	case EF4_FILTER_MATCH_LOC_MAC_IG:
2024 		spec->type = (is_multicast_ether_addr(gen_spec->loc_mac) ?
2025 			      EF4_FARCH_FILTER_MC_DEF :
2026 			      EF4_FARCH_FILTER_UC_DEF);
2027 		memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
2028 		break;
2029 
2030 	default:
2031 		return -EPROTONOSUPPORT;
2032 	}
2033 
2034 	return 0;
2035 }
2036 
2037 static void
2038 ef4_farch_filter_to_gen_spec(struct ef4_filter_spec *gen_spec,
2039 			     const struct ef4_farch_filter_spec *spec)
2040 {
2041 	bool is_full = false;
2042 
2043 	/* *gen_spec should be completely initialised, to be consistent
2044 	 * with ef4_filter_init_{rx,tx}() and in case we want to copy
2045 	 * it back to userland.
2046 	 */
2047 	memset(gen_spec, 0, sizeof(*gen_spec));
2048 
2049 	gen_spec->priority = spec->priority;
2050 	gen_spec->flags = spec->flags;
2051 	gen_spec->dmaq_id = spec->dmaq_id;
2052 
2053 	switch (spec->type) {
2054 	case EF4_FARCH_FILTER_TCP_FULL:
2055 	case EF4_FARCH_FILTER_UDP_FULL:
2056 		is_full = true;
2057 		fallthrough;
2058 	case EF4_FARCH_FILTER_TCP_WILD:
2059 	case EF4_FARCH_FILTER_UDP_WILD: {
2060 		__be32 host1, host2;
2061 		__be16 port1, port2;
2062 
2063 		gen_spec->match_flags =
2064 			EF4_FILTER_MATCH_ETHER_TYPE |
2065 			EF4_FILTER_MATCH_IP_PROTO |
2066 			EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT;
2067 		if (is_full)
2068 			gen_spec->match_flags |= (EF4_FILTER_MATCH_REM_HOST |
2069 						  EF4_FILTER_MATCH_REM_PORT);
2070 		gen_spec->ether_type = htons(ETH_P_IP);
2071 		gen_spec->ip_proto =
2072 			(spec->type == EF4_FARCH_FILTER_TCP_FULL ||
2073 			 spec->type == EF4_FARCH_FILTER_TCP_WILD) ?
2074 			IPPROTO_TCP : IPPROTO_UDP;
2075 
2076 		host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
2077 		port1 = htons(spec->data[0]);
2078 		host2 = htonl(spec->data[2]);
2079 		port2 = htons(spec->data[1] >> 16);
2080 		if (spec->flags & EF4_FILTER_FLAG_TX) {
2081 			gen_spec->loc_host[0] = host1;
2082 			gen_spec->rem_host[0] = host2;
2083 		} else {
2084 			gen_spec->loc_host[0] = host2;
2085 			gen_spec->rem_host[0] = host1;
2086 		}
2087 		if (!!(gen_spec->flags & EF4_FILTER_FLAG_TX) ^
2088 		    (!is_full && gen_spec->ip_proto == IPPROTO_UDP)) {
2089 			gen_spec->loc_port = port1;
2090 			gen_spec->rem_port = port2;
2091 		} else {
2092 			gen_spec->loc_port = port2;
2093 			gen_spec->rem_port = port1;
2094 		}
2095 
2096 		break;
2097 	}
2098 
2099 	case EF4_FARCH_FILTER_MAC_FULL:
2100 		is_full = true;
2101 		fallthrough;
2102 	case EF4_FARCH_FILTER_MAC_WILD:
2103 		gen_spec->match_flags = EF4_FILTER_MATCH_LOC_MAC;
2104 		if (is_full)
2105 			gen_spec->match_flags |= EF4_FILTER_MATCH_OUTER_VID;
2106 		gen_spec->loc_mac[0] = spec->data[2] >> 8;
2107 		gen_spec->loc_mac[1] = spec->data[2];
2108 		gen_spec->loc_mac[2] = spec->data[1] >> 24;
2109 		gen_spec->loc_mac[3] = spec->data[1] >> 16;
2110 		gen_spec->loc_mac[4] = spec->data[1] >> 8;
2111 		gen_spec->loc_mac[5] = spec->data[1];
2112 		gen_spec->outer_vid = htons(spec->data[0]);
2113 		break;
2114 
2115 	case EF4_FARCH_FILTER_UC_DEF:
2116 	case EF4_FARCH_FILTER_MC_DEF:
2117 		gen_spec->match_flags = EF4_FILTER_MATCH_LOC_MAC_IG;
2118 		gen_spec->loc_mac[0] = spec->type == EF4_FARCH_FILTER_MC_DEF;
2119 		break;
2120 
2121 	default:
2122 		WARN_ON(1);
2123 		break;
2124 	}
2125 }
2126 
2127 static void
2128 ef4_farch_filter_init_rx_auto(struct ef4_nic *efx,
2129 			      struct ef4_farch_filter_spec *spec)
2130 {
2131 	/* If there's only one channel then disable RSS for non VF
2132 	 * traffic, thereby allowing VFs to use RSS when the PF can't.
2133 	 */
2134 	spec->priority = EF4_FILTER_PRI_AUTO;
2135 	spec->flags = (EF4_FILTER_FLAG_RX |
2136 		       (ef4_rss_enabled(efx) ? EF4_FILTER_FLAG_RX_RSS : 0) |
2137 		       (efx->rx_scatter ? EF4_FILTER_FLAG_RX_SCATTER : 0));
2138 	spec->dmaq_id = 0;
2139 }
2140 
2141 /* Build a filter entry and return its n-tuple key. */
2142 static u32 ef4_farch_filter_build(ef4_oword_t *filter,
2143 				  struct ef4_farch_filter_spec *spec)
2144 {
2145 	u32 data3;
2146 
2147 	switch (ef4_farch_filter_spec_table_id(spec)) {
2148 	case EF4_FARCH_FILTER_TABLE_RX_IP: {
2149 		bool is_udp = (spec->type == EF4_FARCH_FILTER_UDP_FULL ||
2150 			       spec->type == EF4_FARCH_FILTER_UDP_WILD);
2151 		EF4_POPULATE_OWORD_7(
2152 			*filter,
2153 			FRF_BZ_RSS_EN,
2154 			!!(spec->flags & EF4_FILTER_FLAG_RX_RSS),
2155 			FRF_BZ_SCATTER_EN,
2156 			!!(spec->flags & EF4_FILTER_FLAG_RX_SCATTER),
2157 			FRF_BZ_TCP_UDP, is_udp,
2158 			FRF_BZ_RXQ_ID, spec->dmaq_id,
2159 			EF4_DWORD_2, spec->data[2],
2160 			EF4_DWORD_1, spec->data[1],
2161 			EF4_DWORD_0, spec->data[0]);
2162 		data3 = is_udp;
2163 		break;
2164 	}
2165 
2166 	case EF4_FARCH_FILTER_TABLE_RX_MAC: {
2167 		bool is_wild = spec->type == EF4_FARCH_FILTER_MAC_WILD;
2168 		EF4_POPULATE_OWORD_7(
2169 			*filter,
2170 			FRF_CZ_RMFT_RSS_EN,
2171 			!!(spec->flags & EF4_FILTER_FLAG_RX_RSS),
2172 			FRF_CZ_RMFT_SCATTER_EN,
2173 			!!(spec->flags & EF4_FILTER_FLAG_RX_SCATTER),
2174 			FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
2175 			FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
2176 			FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
2177 			FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
2178 			FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
2179 		data3 = is_wild;
2180 		break;
2181 	}
2182 
2183 	case EF4_FARCH_FILTER_TABLE_TX_MAC: {
2184 		bool is_wild = spec->type == EF4_FARCH_FILTER_MAC_WILD;
2185 		EF4_POPULATE_OWORD_5(*filter,
2186 				     FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id,
2187 				     FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
2188 				     FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2],
2189 				     FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1],
2190 				     FRF_CZ_TMFT_VLAN_ID, spec->data[0]);
2191 		data3 = is_wild | spec->dmaq_id << 1;
2192 		break;
2193 	}
2194 
2195 	default:
2196 		BUG();
2197 	}
2198 
2199 	return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
2200 }
2201 
2202 static bool ef4_farch_filter_equal(const struct ef4_farch_filter_spec *left,
2203 				   const struct ef4_farch_filter_spec *right)
2204 {
2205 	if (left->type != right->type ||
2206 	    memcmp(left->data, right->data, sizeof(left->data)))
2207 		return false;
2208 
2209 	if (left->flags & EF4_FILTER_FLAG_TX &&
2210 	    left->dmaq_id != right->dmaq_id)
2211 		return false;
2212 
2213 	return true;
2214 }
2215 
2216 /*
2217  * Construct/deconstruct external filter IDs.  At least the RX filter
2218  * IDs must be ordered by matching priority, for RX NFC semantics.
2219  *
2220  * Deconstruction needs to be robust against invalid IDs so that
2221  * ef4_filter_remove_id_safe() and ef4_filter_get_filter_safe() can
2222  * accept user-provided IDs.
2223  */
2224 
2225 #define EF4_FARCH_FILTER_MATCH_PRI_COUNT	5
2226 
2227 static const u8 ef4_farch_filter_type_match_pri[EF4_FARCH_FILTER_TYPE_COUNT] = {
2228 	[EF4_FARCH_FILTER_TCP_FULL]	= 0,
2229 	[EF4_FARCH_FILTER_UDP_FULL]	= 0,
2230 	[EF4_FARCH_FILTER_TCP_WILD]	= 1,
2231 	[EF4_FARCH_FILTER_UDP_WILD]	= 1,
2232 	[EF4_FARCH_FILTER_MAC_FULL]	= 2,
2233 	[EF4_FARCH_FILTER_MAC_WILD]	= 3,
2234 	[EF4_FARCH_FILTER_UC_DEF]	= 4,
2235 	[EF4_FARCH_FILTER_MC_DEF]	= 4,
2236 };
2237 
2238 static const enum ef4_farch_filter_table_id ef4_farch_filter_range_table[] = {
2239 	EF4_FARCH_FILTER_TABLE_RX_IP,	/* RX match pri 0 */
2240 	EF4_FARCH_FILTER_TABLE_RX_IP,
2241 	EF4_FARCH_FILTER_TABLE_RX_MAC,
2242 	EF4_FARCH_FILTER_TABLE_RX_MAC,
2243 	EF4_FARCH_FILTER_TABLE_RX_DEF,	/* RX match pri 4 */
2244 	EF4_FARCH_FILTER_TABLE_TX_MAC,	/* TX match pri 0 */
2245 	EF4_FARCH_FILTER_TABLE_TX_MAC,	/* TX match pri 1 */
2246 };
2247 
2248 #define EF4_FARCH_FILTER_INDEX_WIDTH 13
2249 #define EF4_FARCH_FILTER_INDEX_MASK ((1 << EF4_FARCH_FILTER_INDEX_WIDTH) - 1)
2250 
2251 static inline u32
2252 ef4_farch_filter_make_id(const struct ef4_farch_filter_spec *spec,
2253 			 unsigned int index)
2254 {
2255 	unsigned int range;
2256 
2257 	range = ef4_farch_filter_type_match_pri[spec->type];
2258 	if (!(spec->flags & EF4_FILTER_FLAG_RX))
2259 		range += EF4_FARCH_FILTER_MATCH_PRI_COUNT;
2260 
2261 	return range << EF4_FARCH_FILTER_INDEX_WIDTH | index;
2262 }
2263 
2264 static inline enum ef4_farch_filter_table_id
2265 ef4_farch_filter_id_table_id(u32 id)
2266 {
2267 	unsigned int range = id >> EF4_FARCH_FILTER_INDEX_WIDTH;
2268 
2269 	if (range < ARRAY_SIZE(ef4_farch_filter_range_table))
2270 		return ef4_farch_filter_range_table[range];
2271 	else
2272 		return EF4_FARCH_FILTER_TABLE_COUNT; /* invalid */
2273 }
2274 
2275 static inline unsigned int ef4_farch_filter_id_index(u32 id)
2276 {
2277 	return id & EF4_FARCH_FILTER_INDEX_MASK;
2278 }
2279 
2280 u32 ef4_farch_filter_get_rx_id_limit(struct ef4_nic *efx)
2281 {
2282 	struct ef4_farch_filter_state *state = efx->filter_state;
2283 	unsigned int range = EF4_FARCH_FILTER_MATCH_PRI_COUNT - 1;
2284 	enum ef4_farch_filter_table_id table_id;
2285 
2286 	do {
2287 		table_id = ef4_farch_filter_range_table[range];
2288 		if (state->table[table_id].size != 0)
2289 			return range << EF4_FARCH_FILTER_INDEX_WIDTH |
2290 				state->table[table_id].size;
2291 	} while (range--);
2292 
2293 	return 0;
2294 }
2295 
2296 s32 ef4_farch_filter_insert(struct ef4_nic *efx,
2297 			    struct ef4_filter_spec *gen_spec,
2298 			    bool replace_equal)
2299 {
2300 	struct ef4_farch_filter_state *state = efx->filter_state;
2301 	struct ef4_farch_filter_table *table;
2302 	struct ef4_farch_filter_spec spec;
2303 	ef4_oword_t filter;
2304 	int rep_index, ins_index;
2305 	unsigned int depth = 0;
2306 	int rc;
2307 
2308 	rc = ef4_farch_filter_from_gen_spec(&spec, gen_spec);
2309 	if (rc)
2310 		return rc;
2311 
2312 	table = &state->table[ef4_farch_filter_spec_table_id(&spec)];
2313 	if (table->size == 0)
2314 		return -EINVAL;
2315 
2316 	netif_vdbg(efx, hw, efx->net_dev,
2317 		   "%s: type %d search_limit=%d", __func__, spec.type,
2318 		   table->search_limit[spec.type]);
2319 
2320 	if (table->id == EF4_FARCH_FILTER_TABLE_RX_DEF) {
2321 		/* One filter spec per type */
2322 		BUILD_BUG_ON(EF4_FARCH_FILTER_INDEX_UC_DEF != 0);
2323 		BUILD_BUG_ON(EF4_FARCH_FILTER_INDEX_MC_DEF !=
2324 			     EF4_FARCH_FILTER_MC_DEF - EF4_FARCH_FILTER_UC_DEF);
2325 		rep_index = spec.type - EF4_FARCH_FILTER_UC_DEF;
2326 		ins_index = rep_index;
2327 
2328 		spin_lock_bh(&efx->filter_lock);
2329 	} else {
2330 		/* Search concurrently for
2331 		 * (1) a filter to be replaced (rep_index): any filter
2332 		 *     with the same match values, up to the current
2333 		 *     search depth for this type, and
2334 		 * (2) the insertion point (ins_index): (1) or any
2335 		 *     free slot before it or up to the maximum search
2336 		 *     depth for this priority
2337 		 * We fail if we cannot find (2).
2338 		 *
2339 		 * We can stop once either
2340 		 * (a) we find (1), in which case we have definitely
2341 		 *     found (2) as well; or
2342 		 * (b) we have searched exhaustively for (1), and have
2343 		 *     either found (2) or searched exhaustively for it
2344 		 */
2345 		u32 key = ef4_farch_filter_build(&filter, &spec);
2346 		unsigned int hash = ef4_farch_filter_hash(key);
2347 		unsigned int incr = ef4_farch_filter_increment(key);
2348 		unsigned int max_rep_depth = table->search_limit[spec.type];
2349 		unsigned int max_ins_depth =
2350 			spec.priority <= EF4_FILTER_PRI_HINT ?
2351 			EF4_FARCH_FILTER_CTL_SRCH_HINT_MAX :
2352 			EF4_FARCH_FILTER_CTL_SRCH_MAX;
2353 		unsigned int i = hash & (table->size - 1);
2354 
2355 		ins_index = -1;
2356 		depth = 1;
2357 
2358 		spin_lock_bh(&efx->filter_lock);
2359 
2360 		for (;;) {
2361 			if (!test_bit(i, table->used_bitmap)) {
2362 				if (ins_index < 0)
2363 					ins_index = i;
2364 			} else if (ef4_farch_filter_equal(&spec,
2365 							  &table->spec[i])) {
2366 				/* Case (a) */
2367 				if (ins_index < 0)
2368 					ins_index = i;
2369 				rep_index = i;
2370 				break;
2371 			}
2372 
2373 			if (depth >= max_rep_depth &&
2374 			    (ins_index >= 0 || depth >= max_ins_depth)) {
2375 				/* Case (b) */
2376 				if (ins_index < 0) {
2377 					rc = -EBUSY;
2378 					goto out;
2379 				}
2380 				rep_index = -1;
2381 				break;
2382 			}
2383 
2384 			i = (i + incr) & (table->size - 1);
2385 			++depth;
2386 		}
2387 	}
2388 
2389 	/* If we found a filter to be replaced, check whether we
2390 	 * should do so
2391 	 */
2392 	if (rep_index >= 0) {
2393 		struct ef4_farch_filter_spec *saved_spec =
2394 			&table->spec[rep_index];
2395 
2396 		if (spec.priority == saved_spec->priority && !replace_equal) {
2397 			rc = -EEXIST;
2398 			goto out;
2399 		}
2400 		if (spec.priority < saved_spec->priority) {
2401 			rc = -EPERM;
2402 			goto out;
2403 		}
2404 		if (saved_spec->priority == EF4_FILTER_PRI_AUTO ||
2405 		    saved_spec->flags & EF4_FILTER_FLAG_RX_OVER_AUTO)
2406 			spec.flags |= EF4_FILTER_FLAG_RX_OVER_AUTO;
2407 	}
2408 
2409 	/* Insert the filter */
2410 	if (ins_index != rep_index) {
2411 		__set_bit(ins_index, table->used_bitmap);
2412 		++table->used;
2413 	}
2414 	table->spec[ins_index] = spec;
2415 
2416 	if (table->id == EF4_FARCH_FILTER_TABLE_RX_DEF) {
2417 		ef4_farch_filter_push_rx_config(efx);
2418 	} else {
2419 		if (table->search_limit[spec.type] < depth) {
2420 			table->search_limit[spec.type] = depth;
2421 			if (spec.flags & EF4_FILTER_FLAG_TX)
2422 				ef4_farch_filter_push_tx_limits(efx);
2423 			else
2424 				ef4_farch_filter_push_rx_config(efx);
2425 		}
2426 
2427 		ef4_writeo(efx, &filter,
2428 			   table->offset + table->step * ins_index);
2429 
2430 		/* If we were able to replace a filter by inserting
2431 		 * at a lower depth, clear the replaced filter
2432 		 */
2433 		if (ins_index != rep_index && rep_index >= 0)
2434 			ef4_farch_filter_table_clear_entry(efx, table,
2435 							   rep_index);
2436 	}
2437 
2438 	netif_vdbg(efx, hw, efx->net_dev,
2439 		   "%s: filter type %d index %d rxq %u set",
2440 		   __func__, spec.type, ins_index, spec.dmaq_id);
2441 	rc = ef4_farch_filter_make_id(&spec, ins_index);
2442 
2443 out:
2444 	spin_unlock_bh(&efx->filter_lock);
2445 	return rc;
2446 }
2447 
2448 static void
2449 ef4_farch_filter_table_clear_entry(struct ef4_nic *efx,
2450 				   struct ef4_farch_filter_table *table,
2451 				   unsigned int filter_idx)
2452 {
2453 	static ef4_oword_t filter;
2454 
2455 	EF4_WARN_ON_PARANOID(!test_bit(filter_idx, table->used_bitmap));
2456 	BUG_ON(table->offset == 0); /* can't clear MAC default filters */
2457 
2458 	__clear_bit(filter_idx, table->used_bitmap);
2459 	--table->used;
2460 	memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
2461 
2462 	ef4_writeo(efx, &filter, table->offset + table->step * filter_idx);
2463 
2464 	/* If this filter required a greater search depth than
2465 	 * any other, the search limit for its type can now be
2466 	 * decreased.  However, it is hard to determine that
2467 	 * unless the table has become completely empty - in
2468 	 * which case, all its search limits can be set to 0.
2469 	 */
2470 	if (unlikely(table->used == 0)) {
2471 		memset(table->search_limit, 0, sizeof(table->search_limit));
2472 		if (table->id == EF4_FARCH_FILTER_TABLE_TX_MAC)
2473 			ef4_farch_filter_push_tx_limits(efx);
2474 		else
2475 			ef4_farch_filter_push_rx_config(efx);
2476 	}
2477 }
2478 
2479 static int ef4_farch_filter_remove(struct ef4_nic *efx,
2480 				   struct ef4_farch_filter_table *table,
2481 				   unsigned int filter_idx,
2482 				   enum ef4_filter_priority priority)
2483 {
2484 	struct ef4_farch_filter_spec *spec = &table->spec[filter_idx];
2485 
2486 	if (!test_bit(filter_idx, table->used_bitmap) ||
2487 	    spec->priority != priority)
2488 		return -ENOENT;
2489 
2490 	if (spec->flags & EF4_FILTER_FLAG_RX_OVER_AUTO) {
2491 		ef4_farch_filter_init_rx_auto(efx, spec);
2492 		ef4_farch_filter_push_rx_config(efx);
2493 	} else {
2494 		ef4_farch_filter_table_clear_entry(efx, table, filter_idx);
2495 	}
2496 
2497 	return 0;
2498 }
2499 
2500 int ef4_farch_filter_remove_safe(struct ef4_nic *efx,
2501 				 enum ef4_filter_priority priority,
2502 				 u32 filter_id)
2503 {
2504 	struct ef4_farch_filter_state *state = efx->filter_state;
2505 	enum ef4_farch_filter_table_id table_id;
2506 	struct ef4_farch_filter_table *table;
2507 	unsigned int filter_idx;
2508 	int rc;
2509 
2510 	table_id = ef4_farch_filter_id_table_id(filter_id);
2511 	if ((unsigned int)table_id >= EF4_FARCH_FILTER_TABLE_COUNT)
2512 		return -ENOENT;
2513 	table = &state->table[table_id];
2514 
2515 	filter_idx = ef4_farch_filter_id_index(filter_id);
2516 	if (filter_idx >= table->size)
2517 		return -ENOENT;
2518 
2519 	spin_lock_bh(&efx->filter_lock);
2520 	rc = ef4_farch_filter_remove(efx, table, filter_idx, priority);
2521 	spin_unlock_bh(&efx->filter_lock);
2522 
2523 	return rc;
2524 }
2525 
2526 int ef4_farch_filter_get_safe(struct ef4_nic *efx,
2527 			      enum ef4_filter_priority priority,
2528 			      u32 filter_id, struct ef4_filter_spec *spec_buf)
2529 {
2530 	struct ef4_farch_filter_state *state = efx->filter_state;
2531 	enum ef4_farch_filter_table_id table_id;
2532 	struct ef4_farch_filter_table *table;
2533 	struct ef4_farch_filter_spec *spec;
2534 	unsigned int filter_idx;
2535 	int rc;
2536 
2537 	table_id = ef4_farch_filter_id_table_id(filter_id);
2538 	if ((unsigned int)table_id >= EF4_FARCH_FILTER_TABLE_COUNT)
2539 		return -ENOENT;
2540 	table = &state->table[table_id];
2541 
2542 	filter_idx = ef4_farch_filter_id_index(filter_id);
2543 	if (filter_idx >= table->size)
2544 		return -ENOENT;
2545 	spec = &table->spec[filter_idx];
2546 
2547 	spin_lock_bh(&efx->filter_lock);
2548 
2549 	if (test_bit(filter_idx, table->used_bitmap) &&
2550 	    spec->priority == priority) {
2551 		ef4_farch_filter_to_gen_spec(spec_buf, spec);
2552 		rc = 0;
2553 	} else {
2554 		rc = -ENOENT;
2555 	}
2556 
2557 	spin_unlock_bh(&efx->filter_lock);
2558 
2559 	return rc;
2560 }
2561 
2562 static void
2563 ef4_farch_filter_table_clear(struct ef4_nic *efx,
2564 			     enum ef4_farch_filter_table_id table_id,
2565 			     enum ef4_filter_priority priority)
2566 {
2567 	struct ef4_farch_filter_state *state = efx->filter_state;
2568 	struct ef4_farch_filter_table *table = &state->table[table_id];
2569 	unsigned int filter_idx;
2570 
2571 	spin_lock_bh(&efx->filter_lock);
2572 	for (filter_idx = 0; filter_idx < table->size; ++filter_idx) {
2573 		if (table->spec[filter_idx].priority != EF4_FILTER_PRI_AUTO)
2574 			ef4_farch_filter_remove(efx, table,
2575 						filter_idx, priority);
2576 	}
2577 	spin_unlock_bh(&efx->filter_lock);
2578 }
2579 
2580 int ef4_farch_filter_clear_rx(struct ef4_nic *efx,
2581 			       enum ef4_filter_priority priority)
2582 {
2583 	ef4_farch_filter_table_clear(efx, EF4_FARCH_FILTER_TABLE_RX_IP,
2584 				     priority);
2585 	ef4_farch_filter_table_clear(efx, EF4_FARCH_FILTER_TABLE_RX_MAC,
2586 				     priority);
2587 	ef4_farch_filter_table_clear(efx, EF4_FARCH_FILTER_TABLE_RX_DEF,
2588 				     priority);
2589 	return 0;
2590 }
2591 
2592 u32 ef4_farch_filter_count_rx_used(struct ef4_nic *efx,
2593 				   enum ef4_filter_priority priority)
2594 {
2595 	struct ef4_farch_filter_state *state = efx->filter_state;
2596 	enum ef4_farch_filter_table_id table_id;
2597 	struct ef4_farch_filter_table *table;
2598 	unsigned int filter_idx;
2599 	u32 count = 0;
2600 
2601 	spin_lock_bh(&efx->filter_lock);
2602 
2603 	for (table_id = EF4_FARCH_FILTER_TABLE_RX_IP;
2604 	     table_id <= EF4_FARCH_FILTER_TABLE_RX_DEF;
2605 	     table_id++) {
2606 		table = &state->table[table_id];
2607 		for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2608 			if (test_bit(filter_idx, table->used_bitmap) &&
2609 			    table->spec[filter_idx].priority == priority)
2610 				++count;
2611 		}
2612 	}
2613 
2614 	spin_unlock_bh(&efx->filter_lock);
2615 
2616 	return count;
2617 }
2618 
2619 s32 ef4_farch_filter_get_rx_ids(struct ef4_nic *efx,
2620 				enum ef4_filter_priority priority,
2621 				u32 *buf, u32 size)
2622 {
2623 	struct ef4_farch_filter_state *state = efx->filter_state;
2624 	enum ef4_farch_filter_table_id table_id;
2625 	struct ef4_farch_filter_table *table;
2626 	unsigned int filter_idx;
2627 	s32 count = 0;
2628 
2629 	spin_lock_bh(&efx->filter_lock);
2630 
2631 	for (table_id = EF4_FARCH_FILTER_TABLE_RX_IP;
2632 	     table_id <= EF4_FARCH_FILTER_TABLE_RX_DEF;
2633 	     table_id++) {
2634 		table = &state->table[table_id];
2635 		for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2636 			if (test_bit(filter_idx, table->used_bitmap) &&
2637 			    table->spec[filter_idx].priority == priority) {
2638 				if (count == size) {
2639 					count = -EMSGSIZE;
2640 					goto out;
2641 				}
2642 				buf[count++] = ef4_farch_filter_make_id(
2643 					&table->spec[filter_idx], filter_idx);
2644 			}
2645 		}
2646 	}
2647 out:
2648 	spin_unlock_bh(&efx->filter_lock);
2649 
2650 	return count;
2651 }
2652 
2653 /* Restore filter stater after reset */
2654 void ef4_farch_filter_table_restore(struct ef4_nic *efx)
2655 {
2656 	struct ef4_farch_filter_state *state = efx->filter_state;
2657 	enum ef4_farch_filter_table_id table_id;
2658 	struct ef4_farch_filter_table *table;
2659 	ef4_oword_t filter;
2660 	unsigned int filter_idx;
2661 
2662 	spin_lock_bh(&efx->filter_lock);
2663 
2664 	for (table_id = 0; table_id < EF4_FARCH_FILTER_TABLE_COUNT; table_id++) {
2665 		table = &state->table[table_id];
2666 
2667 		/* Check whether this is a regular register table */
2668 		if (table->step == 0)
2669 			continue;
2670 
2671 		for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2672 			if (!test_bit(filter_idx, table->used_bitmap))
2673 				continue;
2674 			ef4_farch_filter_build(&filter, &table->spec[filter_idx]);
2675 			ef4_writeo(efx, &filter,
2676 				   table->offset + table->step * filter_idx);
2677 		}
2678 	}
2679 
2680 	ef4_farch_filter_push_rx_config(efx);
2681 	ef4_farch_filter_push_tx_limits(efx);
2682 
2683 	spin_unlock_bh(&efx->filter_lock);
2684 }
2685 
2686 void ef4_farch_filter_table_remove(struct ef4_nic *efx)
2687 {
2688 	struct ef4_farch_filter_state *state = efx->filter_state;
2689 	enum ef4_farch_filter_table_id table_id;
2690 
2691 	for (table_id = 0; table_id < EF4_FARCH_FILTER_TABLE_COUNT; table_id++) {
2692 		bitmap_free(state->table[table_id].used_bitmap);
2693 		vfree(state->table[table_id].spec);
2694 	}
2695 	kfree(state);
2696 }
2697 
2698 int ef4_farch_filter_table_probe(struct ef4_nic *efx)
2699 {
2700 	struct ef4_farch_filter_state *state;
2701 	struct ef4_farch_filter_table *table;
2702 	unsigned table_id;
2703 
2704 	state = kzalloc(sizeof(struct ef4_farch_filter_state), GFP_KERNEL);
2705 	if (!state)
2706 		return -ENOMEM;
2707 	efx->filter_state = state;
2708 
2709 	if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
2710 		table = &state->table[EF4_FARCH_FILTER_TABLE_RX_IP];
2711 		table->id = EF4_FARCH_FILTER_TABLE_RX_IP;
2712 		table->offset = FR_BZ_RX_FILTER_TBL0;
2713 		table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
2714 		table->step = FR_BZ_RX_FILTER_TBL0_STEP;
2715 	}
2716 
2717 	for (table_id = 0; table_id < EF4_FARCH_FILTER_TABLE_COUNT; table_id++) {
2718 		table = &state->table[table_id];
2719 		if (table->size == 0)
2720 			continue;
2721 		table->used_bitmap = bitmap_zalloc(table->size, GFP_KERNEL);
2722 		if (!table->used_bitmap)
2723 			goto fail;
2724 		table->spec = vzalloc(array_size(sizeof(*table->spec),
2725 						 table->size));
2726 		if (!table->spec)
2727 			goto fail;
2728 	}
2729 
2730 	table = &state->table[EF4_FARCH_FILTER_TABLE_RX_DEF];
2731 	if (table->size) {
2732 		/* RX default filters must always exist */
2733 		struct ef4_farch_filter_spec *spec;
2734 		unsigned i;
2735 
2736 		for (i = 0; i < EF4_FARCH_FILTER_SIZE_RX_DEF; i++) {
2737 			spec = &table->spec[i];
2738 			spec->type = EF4_FARCH_FILTER_UC_DEF + i;
2739 			ef4_farch_filter_init_rx_auto(efx, spec);
2740 			__set_bit(i, table->used_bitmap);
2741 		}
2742 	}
2743 
2744 	ef4_farch_filter_push_rx_config(efx);
2745 
2746 	return 0;
2747 
2748 fail:
2749 	ef4_farch_filter_table_remove(efx);
2750 	return -ENOMEM;
2751 }
2752 
2753 /* Update scatter enable flags for filters pointing to our own RX queues */
2754 void ef4_farch_filter_update_rx_scatter(struct ef4_nic *efx)
2755 {
2756 	struct ef4_farch_filter_state *state = efx->filter_state;
2757 	enum ef4_farch_filter_table_id table_id;
2758 	struct ef4_farch_filter_table *table;
2759 	ef4_oword_t filter;
2760 	unsigned int filter_idx;
2761 
2762 	spin_lock_bh(&efx->filter_lock);
2763 
2764 	for (table_id = EF4_FARCH_FILTER_TABLE_RX_IP;
2765 	     table_id <= EF4_FARCH_FILTER_TABLE_RX_DEF;
2766 	     table_id++) {
2767 		table = &state->table[table_id];
2768 
2769 		for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2770 			if (!test_bit(filter_idx, table->used_bitmap) ||
2771 			    table->spec[filter_idx].dmaq_id >=
2772 			    efx->n_rx_channels)
2773 				continue;
2774 
2775 			if (efx->rx_scatter)
2776 				table->spec[filter_idx].flags |=
2777 					EF4_FILTER_FLAG_RX_SCATTER;
2778 			else
2779 				table->spec[filter_idx].flags &=
2780 					~EF4_FILTER_FLAG_RX_SCATTER;
2781 
2782 			if (table_id == EF4_FARCH_FILTER_TABLE_RX_DEF)
2783 				/* Pushed by ef4_farch_filter_push_rx_config() */
2784 				continue;
2785 
2786 			ef4_farch_filter_build(&filter, &table->spec[filter_idx]);
2787 			ef4_writeo(efx, &filter,
2788 				   table->offset + table->step * filter_idx);
2789 		}
2790 	}
2791 
2792 	ef4_farch_filter_push_rx_config(efx);
2793 
2794 	spin_unlock_bh(&efx->filter_lock);
2795 }
2796 
2797 #ifdef CONFIG_RFS_ACCEL
2798 
2799 s32 ef4_farch_filter_rfs_insert(struct ef4_nic *efx,
2800 				struct ef4_filter_spec *gen_spec)
2801 {
2802 	return ef4_farch_filter_insert(efx, gen_spec, true);
2803 }
2804 
2805 bool ef4_farch_filter_rfs_expire_one(struct ef4_nic *efx, u32 flow_id,
2806 				     unsigned int index)
2807 {
2808 	struct ef4_farch_filter_state *state = efx->filter_state;
2809 	struct ef4_farch_filter_table *table =
2810 		&state->table[EF4_FARCH_FILTER_TABLE_RX_IP];
2811 
2812 	if (test_bit(index, table->used_bitmap) &&
2813 	    table->spec[index].priority == EF4_FILTER_PRI_HINT &&
2814 	    rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id,
2815 				flow_id, index)) {
2816 		ef4_farch_filter_table_clear_entry(efx, table, index);
2817 		return true;
2818 	}
2819 
2820 	return false;
2821 }
2822 
2823 #endif /* CONFIG_RFS_ACCEL */
2824 
2825 void ef4_farch_filter_sync_rx_mode(struct ef4_nic *efx)
2826 {
2827 	struct net_device *net_dev = efx->net_dev;
2828 	struct netdev_hw_addr *ha;
2829 	union ef4_multicast_hash *mc_hash = &efx->multicast_hash;
2830 	u32 crc;
2831 	int bit;
2832 
2833 	if (!ef4_dev_registered(efx))
2834 		return;
2835 
2836 	netif_addr_lock_bh(net_dev);
2837 
2838 	efx->unicast_filter = !(net_dev->flags & IFF_PROMISC);
2839 
2840 	/* Build multicast hash table */
2841 	if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2842 		memset(mc_hash, 0xff, sizeof(*mc_hash));
2843 	} else {
2844 		memset(mc_hash, 0x00, sizeof(*mc_hash));
2845 		netdev_for_each_mc_addr(ha, net_dev) {
2846 			crc = ether_crc_le(ETH_ALEN, ha->addr);
2847 			bit = crc & (EF4_MCAST_HASH_ENTRIES - 1);
2848 			__set_bit_le(bit, mc_hash);
2849 		}
2850 
2851 		/* Broadcast packets go through the multicast hash filter.
2852 		 * ether_crc_le() of the broadcast address is 0xbe2612ff
2853 		 * so we always add bit 0xff to the mask.
2854 		 */
2855 		__set_bit_le(0xff, mc_hash);
2856 	}
2857 
2858 	netif_addr_unlock_bh(net_dev);
2859 }
2860