xref: /linux/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c (revision 4b132aacb0768ac1e652cf517097ea6f237214b9)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) Meta Platforms, Inc. and affiliates. */
3 
4 #include <linux/bitfield.h>
5 #include <linux/iopoll.h>
6 #include <linux/pci.h>
7 #include <net/netdev_queues.h>
8 #include <net/page_pool/helpers.h>
9 
10 #include "fbnic.h"
11 #include "fbnic_csr.h"
12 #include "fbnic_netdev.h"
13 #include "fbnic_txrx.h"
14 
15 struct fbnic_xmit_cb {
16 	u32 bytecount;
17 	u8 desc_count;
18 	int hw_head;
19 };
20 
21 #define FBNIC_XMIT_CB(__skb) ((struct fbnic_xmit_cb *)((__skb)->cb))
22 
23 static u32 __iomem *fbnic_ring_csr_base(const struct fbnic_ring *ring)
24 {
25 	unsigned long csr_base = (unsigned long)ring->doorbell;
26 
27 	csr_base &= ~(FBNIC_QUEUE_STRIDE * sizeof(u32) - 1);
28 
29 	return (u32 __iomem *)csr_base;
30 }
31 
32 static u32 fbnic_ring_rd32(struct fbnic_ring *ring, unsigned int csr)
33 {
34 	u32 __iomem *csr_base = fbnic_ring_csr_base(ring);
35 
36 	return readl(csr_base + csr);
37 }
38 
39 static void fbnic_ring_wr32(struct fbnic_ring *ring, unsigned int csr, u32 val)
40 {
41 	u32 __iomem *csr_base = fbnic_ring_csr_base(ring);
42 
43 	writel(val, csr_base + csr);
44 }
45 
46 static unsigned int fbnic_desc_unused(struct fbnic_ring *ring)
47 {
48 	return (ring->head - ring->tail - 1) & ring->size_mask;
49 }
50 
51 static unsigned int fbnic_desc_used(struct fbnic_ring *ring)
52 {
53 	return (ring->tail - ring->head) & ring->size_mask;
54 }
55 
56 static struct netdev_queue *txring_txq(const struct net_device *dev,
57 				       const struct fbnic_ring *ring)
58 {
59 	return netdev_get_tx_queue(dev, ring->q_idx);
60 }
61 
62 static int fbnic_maybe_stop_tx(const struct net_device *dev,
63 			       struct fbnic_ring *ring,
64 			       const unsigned int size)
65 {
66 	struct netdev_queue *txq = txring_txq(dev, ring);
67 	int res;
68 
69 	res = netif_txq_maybe_stop(txq, fbnic_desc_unused(ring), size,
70 				   FBNIC_TX_DESC_WAKEUP);
71 
72 	return !res;
73 }
74 
75 static bool fbnic_tx_sent_queue(struct sk_buff *skb, struct fbnic_ring *ring)
76 {
77 	struct netdev_queue *dev_queue = txring_txq(skb->dev, ring);
78 	unsigned int bytecount = FBNIC_XMIT_CB(skb)->bytecount;
79 	bool xmit_more = netdev_xmit_more();
80 
81 	/* TBD: Request completion more often if xmit_more becomes large */
82 
83 	return __netdev_tx_sent_queue(dev_queue, bytecount, xmit_more);
84 }
85 
86 static void fbnic_unmap_single_twd(struct device *dev, __le64 *twd)
87 {
88 	u64 raw_twd = le64_to_cpu(*twd);
89 	unsigned int len;
90 	dma_addr_t dma;
91 
92 	dma = FIELD_GET(FBNIC_TWD_ADDR_MASK, raw_twd);
93 	len = FIELD_GET(FBNIC_TWD_LEN_MASK, raw_twd);
94 
95 	dma_unmap_single(dev, dma, len, DMA_TO_DEVICE);
96 }
97 
98 static void fbnic_unmap_page_twd(struct device *dev, __le64 *twd)
99 {
100 	u64 raw_twd = le64_to_cpu(*twd);
101 	unsigned int len;
102 	dma_addr_t dma;
103 
104 	dma = FIELD_GET(FBNIC_TWD_ADDR_MASK, raw_twd);
105 	len = FIELD_GET(FBNIC_TWD_LEN_MASK, raw_twd);
106 
107 	dma_unmap_page(dev, dma, len, DMA_TO_DEVICE);
108 }
109 
110 #define FBNIC_TWD_TYPE(_type) \
111 	cpu_to_le64(FIELD_PREP(FBNIC_TWD_TYPE_MASK, FBNIC_TWD_TYPE_##_type))
112 
113 static bool
114 fbnic_tx_offloads(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
115 {
116 	unsigned int l2len, i3len;
117 
118 	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL))
119 		return false;
120 
121 	l2len = skb_mac_header_len(skb);
122 	i3len = skb_checksum_start(skb) - skb_network_header(skb);
123 
124 	*meta |= cpu_to_le64(FIELD_PREP(FBNIC_TWD_CSUM_OFFSET_MASK,
125 					skb->csum_offset / 2));
126 
127 	*meta |= cpu_to_le64(FBNIC_TWD_FLAG_REQ_CSO);
128 
129 	*meta |= cpu_to_le64(FIELD_PREP(FBNIC_TWD_L2_HLEN_MASK, l2len / 2) |
130 			     FIELD_PREP(FBNIC_TWD_L3_IHLEN_MASK, i3len / 2));
131 	return false;
132 }
133 
134 static void
135 fbnic_rx_csum(u64 rcd, struct sk_buff *skb, struct fbnic_ring *rcq)
136 {
137 	skb_checksum_none_assert(skb);
138 
139 	if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM)))
140 		return;
141 
142 	if (FIELD_GET(FBNIC_RCD_META_L4_CSUM_UNNECESSARY, rcd)) {
143 		skb->ip_summed = CHECKSUM_UNNECESSARY;
144 	} else {
145 		u16 csum = FIELD_GET(FBNIC_RCD_META_L2_CSUM_MASK, rcd);
146 
147 		skb->ip_summed = CHECKSUM_COMPLETE;
148 		skb->csum = (__force __wsum)csum;
149 	}
150 }
151 
152 static bool
153 fbnic_tx_map(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
154 {
155 	struct device *dev = skb->dev->dev.parent;
156 	unsigned int tail = ring->tail, first;
157 	unsigned int size, data_len;
158 	skb_frag_t *frag;
159 	dma_addr_t dma;
160 	__le64 *twd;
161 
162 	ring->tx_buf[tail] = skb;
163 
164 	tail++;
165 	tail &= ring->size_mask;
166 	first = tail;
167 
168 	size = skb_headlen(skb);
169 	data_len = skb->data_len;
170 
171 	if (size > FIELD_MAX(FBNIC_TWD_LEN_MASK))
172 		goto dma_error;
173 
174 	dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
175 
176 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
177 		twd = &ring->desc[tail];
178 
179 		if (dma_mapping_error(dev, dma))
180 			goto dma_error;
181 
182 		*twd = cpu_to_le64(FIELD_PREP(FBNIC_TWD_ADDR_MASK, dma) |
183 				   FIELD_PREP(FBNIC_TWD_LEN_MASK, size) |
184 				   FIELD_PREP(FBNIC_TWD_TYPE_MASK,
185 					      FBNIC_TWD_TYPE_AL));
186 
187 		tail++;
188 		tail &= ring->size_mask;
189 
190 		if (!data_len)
191 			break;
192 
193 		size = skb_frag_size(frag);
194 		data_len -= size;
195 
196 		if (size > FIELD_MAX(FBNIC_TWD_LEN_MASK))
197 			goto dma_error;
198 
199 		dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
200 	}
201 
202 	*twd |= FBNIC_TWD_TYPE(LAST_AL);
203 
204 	FBNIC_XMIT_CB(skb)->desc_count = ((twd - meta) + 1) & ring->size_mask;
205 
206 	ring->tail = tail;
207 
208 	/* Verify there is room for another packet */
209 	fbnic_maybe_stop_tx(skb->dev, ring, FBNIC_MAX_SKB_DESC);
210 
211 	if (fbnic_tx_sent_queue(skb, ring)) {
212 		*meta |= cpu_to_le64(FBNIC_TWD_FLAG_REQ_COMPLETION);
213 
214 		/* Force DMA writes to flush before writing to tail */
215 		dma_wmb();
216 
217 		writel(tail, ring->doorbell);
218 	}
219 
220 	return false;
221 dma_error:
222 	if (net_ratelimit())
223 		netdev_err(skb->dev, "TX DMA map failed\n");
224 
225 	while (tail != first) {
226 		tail--;
227 		tail &= ring->size_mask;
228 		twd = &ring->desc[tail];
229 		if (tail == first)
230 			fbnic_unmap_single_twd(dev, twd);
231 		else
232 			fbnic_unmap_page_twd(dev, twd);
233 	}
234 
235 	return true;
236 }
237 
238 #define FBNIC_MIN_FRAME_LEN	60
239 
240 static netdev_tx_t
241 fbnic_xmit_frame_ring(struct sk_buff *skb, struct fbnic_ring *ring)
242 {
243 	__le64 *meta = &ring->desc[ring->tail];
244 	u16 desc_needed;
245 
246 	if (skb_put_padto(skb, FBNIC_MIN_FRAME_LEN))
247 		goto err_count;
248 
249 	/* Need: 1 descriptor per page,
250 	 *       + 1 desc for skb_head,
251 	 *       + 2 desc for metadata and timestamp metadata
252 	 *       + 7 desc gap to keep tail from touching head
253 	 * otherwise try next time
254 	 */
255 	desc_needed = skb_shinfo(skb)->nr_frags + 10;
256 	if (fbnic_maybe_stop_tx(skb->dev, ring, desc_needed))
257 		return NETDEV_TX_BUSY;
258 
259 	*meta = cpu_to_le64(FBNIC_TWD_FLAG_DEST_MAC);
260 
261 	/* Write all members within DWORD to condense this into 2 4B writes */
262 	FBNIC_XMIT_CB(skb)->bytecount = skb->len;
263 	FBNIC_XMIT_CB(skb)->desc_count = 0;
264 
265 	if (fbnic_tx_offloads(ring, skb, meta))
266 		goto err_free;
267 
268 	if (fbnic_tx_map(ring, skb, meta))
269 		goto err_free;
270 
271 	return NETDEV_TX_OK;
272 
273 err_free:
274 	dev_kfree_skb_any(skb);
275 err_count:
276 	return NETDEV_TX_OK;
277 }
278 
279 netdev_tx_t fbnic_xmit_frame(struct sk_buff *skb, struct net_device *dev)
280 {
281 	struct fbnic_net *fbn = netdev_priv(dev);
282 	unsigned int q_map = skb->queue_mapping;
283 
284 	return fbnic_xmit_frame_ring(skb, fbn->tx[q_map]);
285 }
286 
287 netdev_features_t
288 fbnic_features_check(struct sk_buff *skb, struct net_device *dev,
289 		     netdev_features_t features)
290 {
291 	unsigned int l2len, l3len;
292 
293 	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL))
294 		return features;
295 
296 	l2len = skb_mac_header_len(skb);
297 	l3len = skb_checksum_start(skb) - skb_network_header(skb);
298 
299 	/* Check header lengths are multiple of 2.
300 	 * In case of 6in6 we support longer headers (IHLEN + OHLEN)
301 	 * but keep things simple for now, 512B is plenty.
302 	 */
303 	if ((l2len | l3len | skb->csum_offset) % 2 ||
304 	    !FIELD_FIT(FBNIC_TWD_L2_HLEN_MASK, l2len / 2) ||
305 	    !FIELD_FIT(FBNIC_TWD_L3_IHLEN_MASK, l3len / 2) ||
306 	    !FIELD_FIT(FBNIC_TWD_CSUM_OFFSET_MASK, skb->csum_offset / 2))
307 		return features & ~NETIF_F_CSUM_MASK;
308 
309 	return features;
310 }
311 
312 static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
313 			     struct fbnic_ring *ring, bool discard,
314 			     unsigned int hw_head)
315 {
316 	u64 total_bytes = 0, total_packets = 0;
317 	unsigned int head = ring->head;
318 	struct netdev_queue *txq;
319 	unsigned int clean_desc;
320 
321 	clean_desc = (hw_head - head) & ring->size_mask;
322 
323 	while (clean_desc) {
324 		struct sk_buff *skb = ring->tx_buf[head];
325 		unsigned int desc_cnt;
326 
327 		desc_cnt = FBNIC_XMIT_CB(skb)->desc_count;
328 		if (desc_cnt > clean_desc)
329 			break;
330 
331 		ring->tx_buf[head] = NULL;
332 
333 		clean_desc -= desc_cnt;
334 
335 		while (!(ring->desc[head] & FBNIC_TWD_TYPE(AL))) {
336 			head++;
337 			head &= ring->size_mask;
338 			desc_cnt--;
339 		}
340 
341 		fbnic_unmap_single_twd(nv->dev, &ring->desc[head]);
342 		head++;
343 		head &= ring->size_mask;
344 		desc_cnt--;
345 
346 		while (desc_cnt--) {
347 			fbnic_unmap_page_twd(nv->dev, &ring->desc[head]);
348 			head++;
349 			head &= ring->size_mask;
350 		}
351 
352 		total_bytes += FBNIC_XMIT_CB(skb)->bytecount;
353 		total_packets += 1;
354 
355 		napi_consume_skb(skb, napi_budget);
356 	}
357 
358 	if (!total_bytes)
359 		return;
360 
361 	ring->head = head;
362 
363 	txq = txring_txq(nv->napi.dev, ring);
364 
365 	if (unlikely(discard)) {
366 		netdev_tx_completed_queue(txq, total_packets, total_bytes);
367 		return;
368 	}
369 
370 	netif_txq_completed_wake(txq, total_packets, total_bytes,
371 				 fbnic_desc_unused(ring),
372 				 FBNIC_TX_DESC_WAKEUP);
373 }
374 
375 static void fbnic_page_pool_init(struct fbnic_ring *ring, unsigned int idx,
376 				 struct page *page)
377 {
378 	struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx];
379 
380 	page_pool_fragment_page(page, PAGECNT_BIAS_MAX);
381 	rx_buf->pagecnt_bias = PAGECNT_BIAS_MAX;
382 	rx_buf->page = page;
383 }
384 
385 static struct page *fbnic_page_pool_get(struct fbnic_ring *ring,
386 					unsigned int idx)
387 {
388 	struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx];
389 
390 	rx_buf->pagecnt_bias--;
391 
392 	return rx_buf->page;
393 }
394 
395 static void fbnic_page_pool_drain(struct fbnic_ring *ring, unsigned int idx,
396 				  struct fbnic_napi_vector *nv, int budget)
397 {
398 	struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx];
399 	struct page *page = rx_buf->page;
400 
401 	if (!page_pool_unref_page(page, rx_buf->pagecnt_bias))
402 		page_pool_put_unrefed_page(nv->page_pool, page, -1, !!budget);
403 
404 	rx_buf->page = NULL;
405 }
406 
407 static void fbnic_clean_twq(struct fbnic_napi_vector *nv, int napi_budget,
408 			    struct fbnic_q_triad *qt, s32 head0)
409 {
410 	if (head0 >= 0)
411 		fbnic_clean_twq0(nv, napi_budget, &qt->sub0, false, head0);
412 }
413 
414 static void
415 fbnic_clean_tcq(struct fbnic_napi_vector *nv, struct fbnic_q_triad *qt,
416 		int napi_budget)
417 {
418 	struct fbnic_ring *cmpl = &qt->cmpl;
419 	__le64 *raw_tcd, done;
420 	u32 head = cmpl->head;
421 	s32 head0 = -1;
422 
423 	done = (head & (cmpl->size_mask + 1)) ? 0 : cpu_to_le64(FBNIC_TCD_DONE);
424 	raw_tcd = &cmpl->desc[head & cmpl->size_mask];
425 
426 	/* Walk the completion queue collecting the heads reported by NIC */
427 	while ((*raw_tcd & cpu_to_le64(FBNIC_TCD_DONE)) == done) {
428 		u64 tcd;
429 
430 		dma_rmb();
431 
432 		tcd = le64_to_cpu(*raw_tcd);
433 
434 		switch (FIELD_GET(FBNIC_TCD_TYPE_MASK, tcd)) {
435 		case FBNIC_TCD_TYPE_0:
436 			if (!(tcd & FBNIC_TCD_TWQ1))
437 				head0 = FIELD_GET(FBNIC_TCD_TYPE0_HEAD0_MASK,
438 						  tcd);
439 			/* Currently all err status bits are related to
440 			 * timestamps and as those have yet to be added
441 			 * they are skipped for now.
442 			 */
443 			break;
444 		default:
445 			break;
446 		}
447 
448 		raw_tcd++;
449 		head++;
450 		if (!(head & cmpl->size_mask)) {
451 			done ^= cpu_to_le64(FBNIC_TCD_DONE);
452 			raw_tcd = &cmpl->desc[0];
453 		}
454 	}
455 
456 	/* Record the current head/tail of the queue */
457 	if (cmpl->head != head) {
458 		cmpl->head = head;
459 		writel(head & cmpl->size_mask, cmpl->doorbell);
460 	}
461 
462 	/* Unmap and free processed buffers */
463 	fbnic_clean_twq(nv, napi_budget, qt, head0);
464 }
465 
466 static void fbnic_clean_bdq(struct fbnic_napi_vector *nv, int napi_budget,
467 			    struct fbnic_ring *ring, unsigned int hw_head)
468 {
469 	unsigned int head = ring->head;
470 
471 	if (head == hw_head)
472 		return;
473 
474 	do {
475 		fbnic_page_pool_drain(ring, head, nv, napi_budget);
476 
477 		head++;
478 		head &= ring->size_mask;
479 	} while (head != hw_head);
480 
481 	ring->head = head;
482 }
483 
484 static void fbnic_bd_prep(struct fbnic_ring *bdq, u16 id, struct page *page)
485 {
486 	__le64 *bdq_desc = &bdq->desc[id * FBNIC_BD_FRAG_COUNT];
487 	dma_addr_t dma = page_pool_get_dma_addr(page);
488 	u64 bd, i = FBNIC_BD_FRAG_COUNT;
489 
490 	bd = (FBNIC_BD_PAGE_ADDR_MASK & dma) |
491 	     FIELD_PREP(FBNIC_BD_PAGE_ID_MASK, id);
492 
493 	/* In the case that a page size is larger than 4K we will map a
494 	 * single page to multiple fragments. The fragments will be
495 	 * FBNIC_BD_FRAG_COUNT in size and the lower n bits will be use
496 	 * to indicate the individual fragment IDs.
497 	 */
498 	do {
499 		*bdq_desc = cpu_to_le64(bd);
500 		bd += FIELD_PREP(FBNIC_BD_DESC_ADDR_MASK, 1) |
501 		      FIELD_PREP(FBNIC_BD_DESC_ID_MASK, 1);
502 	} while (--i);
503 }
504 
505 static void fbnic_fill_bdq(struct fbnic_napi_vector *nv, struct fbnic_ring *bdq)
506 {
507 	unsigned int count = fbnic_desc_unused(bdq);
508 	unsigned int i = bdq->tail;
509 
510 	if (!count)
511 		return;
512 
513 	do {
514 		struct page *page;
515 
516 		page = page_pool_dev_alloc_pages(nv->page_pool);
517 		if (!page)
518 			break;
519 
520 		fbnic_page_pool_init(bdq, i, page);
521 		fbnic_bd_prep(bdq, i, page);
522 
523 		i++;
524 		i &= bdq->size_mask;
525 
526 		count--;
527 	} while (count);
528 
529 	if (bdq->tail != i) {
530 		bdq->tail = i;
531 
532 		/* Force DMA writes to flush before writing to tail */
533 		dma_wmb();
534 
535 		writel(i, bdq->doorbell);
536 	}
537 }
538 
539 static unsigned int fbnic_hdr_pg_start(unsigned int pg_off)
540 {
541 	/* The headroom of the first header may be larger than FBNIC_RX_HROOM
542 	 * due to alignment. So account for that by just making the page
543 	 * offset 0 if we are starting at the first header.
544 	 */
545 	if (ALIGN(FBNIC_RX_HROOM, 128) > FBNIC_RX_HROOM &&
546 	    pg_off == ALIGN(FBNIC_RX_HROOM, 128))
547 		return 0;
548 
549 	return pg_off - FBNIC_RX_HROOM;
550 }
551 
552 static unsigned int fbnic_hdr_pg_end(unsigned int pg_off, unsigned int len)
553 {
554 	/* Determine the end of the buffer by finding the start of the next
555 	 * and then subtracting the headroom from that frame.
556 	 */
557 	pg_off += len + FBNIC_RX_TROOM + FBNIC_RX_HROOM;
558 
559 	return ALIGN(pg_off, 128) - FBNIC_RX_HROOM;
560 }
561 
562 static void fbnic_pkt_prepare(struct fbnic_napi_vector *nv, u64 rcd,
563 			      struct fbnic_pkt_buff *pkt,
564 			      struct fbnic_q_triad *qt)
565 {
566 	unsigned int hdr_pg_idx = FIELD_GET(FBNIC_RCD_AL_BUFF_PAGE_MASK, rcd);
567 	unsigned int hdr_pg_off = FIELD_GET(FBNIC_RCD_AL_BUFF_OFF_MASK, rcd);
568 	struct page *page = fbnic_page_pool_get(&qt->sub0, hdr_pg_idx);
569 	unsigned int len = FIELD_GET(FBNIC_RCD_AL_BUFF_LEN_MASK, rcd);
570 	unsigned int frame_sz, hdr_pg_start, hdr_pg_end, headroom;
571 	unsigned char *hdr_start;
572 
573 	/* data_hard_start should always be NULL when this is called */
574 	WARN_ON_ONCE(pkt->buff.data_hard_start);
575 
576 	/* Short-cut the end calculation if we know page is fully consumed */
577 	hdr_pg_end = FIELD_GET(FBNIC_RCD_AL_PAGE_FIN, rcd) ?
578 		     FBNIC_BD_FRAG_SIZE : fbnic_hdr_pg_end(hdr_pg_off, len);
579 	hdr_pg_start = fbnic_hdr_pg_start(hdr_pg_off);
580 
581 	headroom = hdr_pg_off - hdr_pg_start + FBNIC_RX_PAD;
582 	frame_sz = hdr_pg_end - hdr_pg_start;
583 	xdp_init_buff(&pkt->buff, frame_sz, NULL);
584 	hdr_pg_start += (FBNIC_RCD_AL_BUFF_FRAG_MASK & rcd) *
585 			FBNIC_BD_FRAG_SIZE;
586 
587 	/* Sync DMA buffer */
588 	dma_sync_single_range_for_cpu(nv->dev, page_pool_get_dma_addr(page),
589 				      hdr_pg_start, frame_sz,
590 				      DMA_BIDIRECTIONAL);
591 
592 	/* Build frame around buffer */
593 	hdr_start = page_address(page) + hdr_pg_start;
594 
595 	xdp_prepare_buff(&pkt->buff, hdr_start, headroom,
596 			 len - FBNIC_RX_PAD, true);
597 
598 	pkt->data_truesize = 0;
599 	pkt->data_len = 0;
600 	pkt->nr_frags = 0;
601 }
602 
603 static void fbnic_add_rx_frag(struct fbnic_napi_vector *nv, u64 rcd,
604 			      struct fbnic_pkt_buff *pkt,
605 			      struct fbnic_q_triad *qt)
606 {
607 	unsigned int pg_idx = FIELD_GET(FBNIC_RCD_AL_BUFF_PAGE_MASK, rcd);
608 	unsigned int pg_off = FIELD_GET(FBNIC_RCD_AL_BUFF_OFF_MASK, rcd);
609 	unsigned int len = FIELD_GET(FBNIC_RCD_AL_BUFF_LEN_MASK, rcd);
610 	struct page *page = fbnic_page_pool_get(&qt->sub1, pg_idx);
611 	struct skb_shared_info *shinfo;
612 	unsigned int truesize;
613 
614 	truesize = FIELD_GET(FBNIC_RCD_AL_PAGE_FIN, rcd) ?
615 		   FBNIC_BD_FRAG_SIZE - pg_off : ALIGN(len, 128);
616 
617 	pg_off += (FBNIC_RCD_AL_BUFF_FRAG_MASK & rcd) *
618 		  FBNIC_BD_FRAG_SIZE;
619 
620 	/* Sync DMA buffer */
621 	dma_sync_single_range_for_cpu(nv->dev, page_pool_get_dma_addr(page),
622 				      pg_off, truesize, DMA_BIDIRECTIONAL);
623 
624 	/* Add page to xdp shared info */
625 	shinfo = xdp_get_shared_info_from_buff(&pkt->buff);
626 
627 	/* We use gso_segs to store truesize */
628 	pkt->data_truesize += truesize;
629 
630 	__skb_fill_page_desc_noacc(shinfo, pkt->nr_frags++, page, pg_off, len);
631 
632 	/* Store data_len in gso_size */
633 	pkt->data_len += len;
634 }
635 
636 static void fbnic_put_pkt_buff(struct fbnic_napi_vector *nv,
637 			       struct fbnic_pkt_buff *pkt, int budget)
638 {
639 	struct skb_shared_info *shinfo;
640 	struct page *page;
641 	int nr_frags;
642 
643 	if (!pkt->buff.data_hard_start)
644 		return;
645 
646 	shinfo = xdp_get_shared_info_from_buff(&pkt->buff);
647 	nr_frags = pkt->nr_frags;
648 
649 	while (nr_frags--) {
650 		page = skb_frag_page(&shinfo->frags[nr_frags]);
651 		page_pool_put_full_page(nv->page_pool, page, !!budget);
652 	}
653 
654 	page = virt_to_page(pkt->buff.data_hard_start);
655 	page_pool_put_full_page(nv->page_pool, page, !!budget);
656 }
657 
658 static struct sk_buff *fbnic_build_skb(struct fbnic_napi_vector *nv,
659 				       struct fbnic_pkt_buff *pkt)
660 {
661 	unsigned int nr_frags = pkt->nr_frags;
662 	struct skb_shared_info *shinfo;
663 	unsigned int truesize;
664 	struct sk_buff *skb;
665 
666 	truesize = xdp_data_hard_end(&pkt->buff) + FBNIC_RX_TROOM -
667 		   pkt->buff.data_hard_start;
668 
669 	/* Build frame around buffer */
670 	skb = napi_build_skb(pkt->buff.data_hard_start, truesize);
671 	if (unlikely(!skb))
672 		return NULL;
673 
674 	/* Push data pointer to start of data, put tail to end of data */
675 	skb_reserve(skb, pkt->buff.data - pkt->buff.data_hard_start);
676 	__skb_put(skb, pkt->buff.data_end - pkt->buff.data);
677 
678 	/* Add tracking for metadata at the start of the frame */
679 	skb_metadata_set(skb, pkt->buff.data - pkt->buff.data_meta);
680 
681 	/* Add Rx frags */
682 	if (nr_frags) {
683 		/* Verify that shared info didn't move */
684 		shinfo = xdp_get_shared_info_from_buff(&pkt->buff);
685 		WARN_ON(skb_shinfo(skb) != shinfo);
686 
687 		skb->truesize += pkt->data_truesize;
688 		skb->data_len += pkt->data_len;
689 		shinfo->nr_frags = nr_frags;
690 		skb->len += pkt->data_len;
691 	}
692 
693 	skb_mark_for_recycle(skb);
694 
695 	/* Set MAC header specific fields */
696 	skb->protocol = eth_type_trans(skb, nv->napi.dev);
697 
698 	return skb;
699 }
700 
701 static enum pkt_hash_types fbnic_skb_hash_type(u64 rcd)
702 {
703 	return (FBNIC_RCD_META_L4_TYPE_MASK & rcd) ? PKT_HASH_TYPE_L4 :
704 	       (FBNIC_RCD_META_L3_TYPE_MASK & rcd) ? PKT_HASH_TYPE_L3 :
705 						     PKT_HASH_TYPE_L2;
706 }
707 
708 static void fbnic_populate_skb_fields(struct fbnic_napi_vector *nv,
709 				      u64 rcd, struct sk_buff *skb,
710 				      struct fbnic_q_triad *qt)
711 {
712 	struct net_device *netdev = nv->napi.dev;
713 	struct fbnic_ring *rcq = &qt->cmpl;
714 
715 	fbnic_rx_csum(rcd, skb, rcq);
716 
717 	if (netdev->features & NETIF_F_RXHASH)
718 		skb_set_hash(skb,
719 			     FIELD_GET(FBNIC_RCD_META_RSS_HASH_MASK, rcd),
720 			     fbnic_skb_hash_type(rcd));
721 
722 	skb_record_rx_queue(skb, rcq->q_idx);
723 }
724 
725 static bool fbnic_rcd_metadata_err(u64 rcd)
726 {
727 	return !!(FBNIC_RCD_META_UNCORRECTABLE_ERR_MASK & rcd);
728 }
729 
730 static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
731 			   struct fbnic_q_triad *qt, int budget)
732 {
733 	struct fbnic_ring *rcq = &qt->cmpl;
734 	struct fbnic_pkt_buff *pkt;
735 	s32 head0 = -1, head1 = -1;
736 	__le64 *raw_rcd, done;
737 	u32 head = rcq->head;
738 	u64 packets = 0;
739 
740 	done = (head & (rcq->size_mask + 1)) ? cpu_to_le64(FBNIC_RCD_DONE) : 0;
741 	raw_rcd = &rcq->desc[head & rcq->size_mask];
742 	pkt = rcq->pkt;
743 
744 	/* Walk the completion queue collecting the heads reported by NIC */
745 	while (likely(packets < budget)) {
746 		struct sk_buff *skb = ERR_PTR(-EINVAL);
747 		u64 rcd;
748 
749 		if ((*raw_rcd & cpu_to_le64(FBNIC_RCD_DONE)) == done)
750 			break;
751 
752 		dma_rmb();
753 
754 		rcd = le64_to_cpu(*raw_rcd);
755 
756 		switch (FIELD_GET(FBNIC_RCD_TYPE_MASK, rcd)) {
757 		case FBNIC_RCD_TYPE_HDR_AL:
758 			head0 = FIELD_GET(FBNIC_RCD_AL_BUFF_PAGE_MASK, rcd);
759 			fbnic_pkt_prepare(nv, rcd, pkt, qt);
760 
761 			break;
762 		case FBNIC_RCD_TYPE_PAY_AL:
763 			head1 = FIELD_GET(FBNIC_RCD_AL_BUFF_PAGE_MASK, rcd);
764 			fbnic_add_rx_frag(nv, rcd, pkt, qt);
765 
766 			break;
767 		case FBNIC_RCD_TYPE_OPT_META:
768 			/* Only type 0 is currently supported */
769 			if (FIELD_GET(FBNIC_RCD_OPT_META_TYPE_MASK, rcd))
770 				break;
771 
772 			/* We currently ignore the action table index */
773 			break;
774 		case FBNIC_RCD_TYPE_META:
775 			if (likely(!fbnic_rcd_metadata_err(rcd)))
776 				skb = fbnic_build_skb(nv, pkt);
777 
778 			/* Populate skb and invalidate XDP */
779 			if (!IS_ERR_OR_NULL(skb)) {
780 				fbnic_populate_skb_fields(nv, rcd, skb, qt);
781 
782 				packets++;
783 
784 				napi_gro_receive(&nv->napi, skb);
785 			} else {
786 				fbnic_put_pkt_buff(nv, pkt, 1);
787 			}
788 
789 			pkt->buff.data_hard_start = NULL;
790 
791 			break;
792 		}
793 
794 		raw_rcd++;
795 		head++;
796 		if (!(head & rcq->size_mask)) {
797 			done ^= cpu_to_le64(FBNIC_RCD_DONE);
798 			raw_rcd = &rcq->desc[0];
799 		}
800 	}
801 
802 	/* Unmap and free processed buffers */
803 	if (head0 >= 0)
804 		fbnic_clean_bdq(nv, budget, &qt->sub0, head0);
805 	fbnic_fill_bdq(nv, &qt->sub0);
806 
807 	if (head1 >= 0)
808 		fbnic_clean_bdq(nv, budget, &qt->sub1, head1);
809 	fbnic_fill_bdq(nv, &qt->sub1);
810 
811 	/* Record the current head/tail of the queue */
812 	if (rcq->head != head) {
813 		rcq->head = head;
814 		writel(head & rcq->size_mask, rcq->doorbell);
815 	}
816 
817 	return packets;
818 }
819 
820 static void fbnic_nv_irq_disable(struct fbnic_napi_vector *nv)
821 {
822 	struct fbnic_dev *fbd = nv->fbd;
823 	u32 v_idx = nv->v_idx;
824 
825 	fbnic_wr32(fbd, FBNIC_INTR_MASK_SET(v_idx / 32), 1 << (v_idx % 32));
826 }
827 
828 static void fbnic_nv_irq_rearm(struct fbnic_napi_vector *nv)
829 {
830 	struct fbnic_dev *fbd = nv->fbd;
831 	u32 v_idx = nv->v_idx;
832 
833 	fbnic_wr32(fbd, FBNIC_INTR_CQ_REARM(v_idx),
834 		   FBNIC_INTR_CQ_REARM_INTR_UNMASK);
835 }
836 
837 static int fbnic_poll(struct napi_struct *napi, int budget)
838 {
839 	struct fbnic_napi_vector *nv = container_of(napi,
840 						    struct fbnic_napi_vector,
841 						    napi);
842 	int i, j, work_done = 0;
843 
844 	for (i = 0; i < nv->txt_count; i++)
845 		fbnic_clean_tcq(nv, &nv->qt[i], budget);
846 
847 	for (j = 0; j < nv->rxt_count; j++, i++)
848 		work_done += fbnic_clean_rcq(nv, &nv->qt[i], budget);
849 
850 	if (work_done >= budget)
851 		return budget;
852 
853 	if (likely(napi_complete_done(napi, work_done)))
854 		fbnic_nv_irq_rearm(nv);
855 
856 	return 0;
857 }
858 
859 static irqreturn_t fbnic_msix_clean_rings(int __always_unused irq, void *data)
860 {
861 	struct fbnic_napi_vector *nv = data;
862 
863 	napi_schedule_irqoff(&nv->napi);
864 
865 	return IRQ_HANDLED;
866 }
867 
868 static void fbnic_remove_tx_ring(struct fbnic_net *fbn,
869 				 struct fbnic_ring *txr)
870 {
871 	if (!(txr->flags & FBNIC_RING_F_STATS))
872 		return;
873 
874 	/* Remove pointer to the Tx ring */
875 	WARN_ON(fbn->tx[txr->q_idx] && fbn->tx[txr->q_idx] != txr);
876 	fbn->tx[txr->q_idx] = NULL;
877 }
878 
879 static void fbnic_remove_rx_ring(struct fbnic_net *fbn,
880 				 struct fbnic_ring *rxr)
881 {
882 	if (!(rxr->flags & FBNIC_RING_F_STATS))
883 		return;
884 
885 	/* Remove pointer to the Rx ring */
886 	WARN_ON(fbn->rx[rxr->q_idx] && fbn->rx[rxr->q_idx] != rxr);
887 	fbn->rx[rxr->q_idx] = NULL;
888 }
889 
890 static void fbnic_free_napi_vector(struct fbnic_net *fbn,
891 				   struct fbnic_napi_vector *nv)
892 {
893 	struct fbnic_dev *fbd = nv->fbd;
894 	u32 v_idx = nv->v_idx;
895 	int i, j;
896 
897 	for (i = 0; i < nv->txt_count; i++) {
898 		fbnic_remove_tx_ring(fbn, &nv->qt[i].sub0);
899 		fbnic_remove_tx_ring(fbn, &nv->qt[i].cmpl);
900 	}
901 
902 	for (j = 0; j < nv->rxt_count; j++, i++) {
903 		fbnic_remove_rx_ring(fbn, &nv->qt[i].sub0);
904 		fbnic_remove_rx_ring(fbn, &nv->qt[i].sub1);
905 		fbnic_remove_rx_ring(fbn, &nv->qt[i].cmpl);
906 	}
907 
908 	fbnic_free_irq(fbd, v_idx, nv);
909 	page_pool_destroy(nv->page_pool);
910 	netif_napi_del(&nv->napi);
911 	list_del(&nv->napis);
912 	kfree(nv);
913 }
914 
915 void fbnic_free_napi_vectors(struct fbnic_net *fbn)
916 {
917 	struct fbnic_napi_vector *nv, *temp;
918 
919 	list_for_each_entry_safe(nv, temp, &fbn->napis, napis)
920 		fbnic_free_napi_vector(fbn, nv);
921 }
922 
923 static void fbnic_name_napi_vector(struct fbnic_napi_vector *nv)
924 {
925 	unsigned char *dev_name = nv->napi.dev->name;
926 
927 	if (!nv->rxt_count)
928 		snprintf(nv->name, sizeof(nv->name), "%s-Tx-%u", dev_name,
929 			 nv->v_idx - FBNIC_NON_NAPI_VECTORS);
930 	else
931 		snprintf(nv->name, sizeof(nv->name), "%s-TxRx-%u", dev_name,
932 			 nv->v_idx - FBNIC_NON_NAPI_VECTORS);
933 }
934 
935 #define FBNIC_PAGE_POOL_FLAGS \
936 	(PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV)
937 
938 static int fbnic_alloc_nv_page_pool(struct fbnic_net *fbn,
939 				    struct fbnic_napi_vector *nv)
940 {
941 	struct page_pool_params pp_params = {
942 		.order = 0,
943 		.flags = FBNIC_PAGE_POOL_FLAGS,
944 		.pool_size = (fbn->hpq_size + fbn->ppq_size) * nv->rxt_count,
945 		.nid = NUMA_NO_NODE,
946 		.dev = nv->dev,
947 		.dma_dir = DMA_BIDIRECTIONAL,
948 		.offset = 0,
949 		.max_len = PAGE_SIZE
950 	};
951 	struct page_pool *pp;
952 
953 	/* Page pool cannot exceed a size of 32768. This doesn't limit the
954 	 * pages on the ring but the number we can have cached waiting on
955 	 * the next use.
956 	 *
957 	 * TBD: Can this be reduced further? Would a multiple of
958 	 * NAPI_POLL_WEIGHT possibly make more sense? The question is how
959 	 * may pages do we need to hold in reserve to get the best return
960 	 * without hogging too much system memory.
961 	 */
962 	if (pp_params.pool_size > 32768)
963 		pp_params.pool_size = 32768;
964 
965 	pp = page_pool_create(&pp_params);
966 	if (IS_ERR(pp))
967 		return PTR_ERR(pp);
968 
969 	nv->page_pool = pp;
970 
971 	return 0;
972 }
973 
974 static void fbnic_ring_init(struct fbnic_ring *ring, u32 __iomem *doorbell,
975 			    int q_idx, u8 flags)
976 {
977 	ring->doorbell = doorbell;
978 	ring->q_idx = q_idx;
979 	ring->flags = flags;
980 }
981 
982 static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn,
983 				   unsigned int v_count, unsigned int v_idx,
984 				   unsigned int txq_count, unsigned int txq_idx,
985 				   unsigned int rxq_count, unsigned int rxq_idx)
986 {
987 	int txt_count = txq_count, rxt_count = rxq_count;
988 	u32 __iomem *uc_addr = fbd->uc_addr0;
989 	struct fbnic_napi_vector *nv;
990 	struct fbnic_q_triad *qt;
991 	int qt_count, err;
992 	u32 __iomem *db;
993 
994 	qt_count = txt_count + rxq_count;
995 	if (!qt_count)
996 		return -EINVAL;
997 
998 	/* If MMIO has already failed there are no rings to initialize */
999 	if (!uc_addr)
1000 		return -EIO;
1001 
1002 	/* Allocate NAPI vector and queue triads */
1003 	nv = kzalloc(struct_size(nv, qt, qt_count), GFP_KERNEL);
1004 	if (!nv)
1005 		return -ENOMEM;
1006 
1007 	/* Record queue triad counts */
1008 	nv->txt_count = txt_count;
1009 	nv->rxt_count = rxt_count;
1010 
1011 	/* Provide pointer back to fbnic and MSI-X vectors */
1012 	nv->fbd = fbd;
1013 	nv->v_idx = v_idx;
1014 
1015 	/* Record IRQ to NAPI struct */
1016 	netif_napi_set_irq(&nv->napi,
1017 			   pci_irq_vector(to_pci_dev(fbd->dev), nv->v_idx));
1018 
1019 	/* Tie napi to netdev */
1020 	list_add(&nv->napis, &fbn->napis);
1021 	netif_napi_add(fbn->netdev, &nv->napi, fbnic_poll);
1022 
1023 	/* Tie nv back to PCIe dev */
1024 	nv->dev = fbd->dev;
1025 
1026 	/* Allocate page pool */
1027 	if (rxq_count) {
1028 		err = fbnic_alloc_nv_page_pool(fbn, nv);
1029 		if (err)
1030 			goto napi_del;
1031 	}
1032 
1033 	/* Initialize vector name */
1034 	fbnic_name_napi_vector(nv);
1035 
1036 	/* Request the IRQ for napi vector */
1037 	err = fbnic_request_irq(fbd, v_idx, &fbnic_msix_clean_rings,
1038 				IRQF_SHARED, nv->name, nv);
1039 	if (err)
1040 		goto pp_destroy;
1041 
1042 	/* Initialize queue triads */
1043 	qt = nv->qt;
1044 
1045 	while (txt_count) {
1046 		/* Configure Tx queue */
1047 		db = &uc_addr[FBNIC_QUEUE(txq_idx) + FBNIC_QUEUE_TWQ0_TAIL];
1048 
1049 		/* Assign Tx queue to netdev if applicable */
1050 		if (txq_count > 0) {
1051 			u8 flags = FBNIC_RING_F_CTX | FBNIC_RING_F_STATS;
1052 
1053 			fbnic_ring_init(&qt->sub0, db, txq_idx, flags);
1054 			fbn->tx[txq_idx] = &qt->sub0;
1055 			txq_count--;
1056 		} else {
1057 			fbnic_ring_init(&qt->sub0, db, 0,
1058 					FBNIC_RING_F_DISABLED);
1059 		}
1060 
1061 		/* Configure Tx completion queue */
1062 		db = &uc_addr[FBNIC_QUEUE(txq_idx) + FBNIC_QUEUE_TCQ_HEAD];
1063 		fbnic_ring_init(&qt->cmpl, db, 0, 0);
1064 
1065 		/* Update Tx queue index */
1066 		txt_count--;
1067 		txq_idx += v_count;
1068 
1069 		/* Move to next queue triad */
1070 		qt++;
1071 	}
1072 
1073 	while (rxt_count) {
1074 		/* Configure header queue */
1075 		db = &uc_addr[FBNIC_QUEUE(rxq_idx) + FBNIC_QUEUE_BDQ_HPQ_TAIL];
1076 		fbnic_ring_init(&qt->sub0, db, 0, FBNIC_RING_F_CTX);
1077 
1078 		/* Configure payload queue */
1079 		db = &uc_addr[FBNIC_QUEUE(rxq_idx) + FBNIC_QUEUE_BDQ_PPQ_TAIL];
1080 		fbnic_ring_init(&qt->sub1, db, 0, FBNIC_RING_F_CTX);
1081 
1082 		/* Configure Rx completion queue */
1083 		db = &uc_addr[FBNIC_QUEUE(rxq_idx) + FBNIC_QUEUE_RCQ_HEAD];
1084 		fbnic_ring_init(&qt->cmpl, db, rxq_idx, FBNIC_RING_F_STATS);
1085 		fbn->rx[rxq_idx] = &qt->cmpl;
1086 
1087 		/* Update Rx queue index */
1088 		rxt_count--;
1089 		rxq_idx += v_count;
1090 
1091 		/* Move to next queue triad */
1092 		qt++;
1093 	}
1094 
1095 	return 0;
1096 
1097 pp_destroy:
1098 	page_pool_destroy(nv->page_pool);
1099 napi_del:
1100 	netif_napi_del(&nv->napi);
1101 	list_del(&nv->napis);
1102 	kfree(nv);
1103 	return err;
1104 }
1105 
1106 int fbnic_alloc_napi_vectors(struct fbnic_net *fbn)
1107 {
1108 	unsigned int txq_idx = 0, rxq_idx = 0, v_idx = FBNIC_NON_NAPI_VECTORS;
1109 	unsigned int num_tx = fbn->num_tx_queues;
1110 	unsigned int num_rx = fbn->num_rx_queues;
1111 	unsigned int num_napi = fbn->num_napi;
1112 	struct fbnic_dev *fbd = fbn->fbd;
1113 	int err;
1114 
1115 	/* Allocate 1 Tx queue per napi vector */
1116 	if (num_napi < FBNIC_MAX_TXQS && num_napi == num_tx + num_rx) {
1117 		while (num_tx) {
1118 			err = fbnic_alloc_napi_vector(fbd, fbn,
1119 						      num_napi, v_idx,
1120 						      1, txq_idx, 0, 0);
1121 			if (err)
1122 				goto free_vectors;
1123 
1124 			/* Update counts and index */
1125 			num_tx--;
1126 			txq_idx++;
1127 
1128 			v_idx++;
1129 		}
1130 	}
1131 
1132 	/* Allocate Tx/Rx queue pairs per vector, or allocate remaining Rx */
1133 	while (num_rx | num_tx) {
1134 		int tqpv = DIV_ROUND_UP(num_tx, num_napi - txq_idx);
1135 		int rqpv = DIV_ROUND_UP(num_rx, num_napi - rxq_idx);
1136 
1137 		err = fbnic_alloc_napi_vector(fbd, fbn, num_napi, v_idx,
1138 					      tqpv, txq_idx, rqpv, rxq_idx);
1139 		if (err)
1140 			goto free_vectors;
1141 
1142 		/* Update counts and index */
1143 		num_tx -= tqpv;
1144 		txq_idx++;
1145 
1146 		num_rx -= rqpv;
1147 		rxq_idx++;
1148 
1149 		v_idx++;
1150 	}
1151 
1152 	return 0;
1153 
1154 free_vectors:
1155 	fbnic_free_napi_vectors(fbn);
1156 
1157 	return -ENOMEM;
1158 }
1159 
1160 static void fbnic_free_ring_resources(struct device *dev,
1161 				      struct fbnic_ring *ring)
1162 {
1163 	kvfree(ring->buffer);
1164 	ring->buffer = NULL;
1165 
1166 	/* If size is not set there are no descriptors present */
1167 	if (!ring->size)
1168 		return;
1169 
1170 	dma_free_coherent(dev, ring->size, ring->desc, ring->dma);
1171 	ring->size_mask = 0;
1172 	ring->size = 0;
1173 }
1174 
1175 static int fbnic_alloc_tx_ring_desc(struct fbnic_net *fbn,
1176 				    struct fbnic_ring *txr)
1177 {
1178 	struct device *dev = fbn->netdev->dev.parent;
1179 	size_t size;
1180 
1181 	/* Round size up to nearest 4K */
1182 	size = ALIGN(array_size(sizeof(*txr->desc), fbn->txq_size), 4096);
1183 
1184 	txr->desc = dma_alloc_coherent(dev, size, &txr->dma,
1185 				       GFP_KERNEL | __GFP_NOWARN);
1186 	if (!txr->desc)
1187 		return -ENOMEM;
1188 
1189 	/* txq_size should be a power of 2, so mask is just that -1 */
1190 	txr->size_mask = fbn->txq_size - 1;
1191 	txr->size = size;
1192 
1193 	return 0;
1194 }
1195 
1196 static int fbnic_alloc_tx_ring_buffer(struct fbnic_ring *txr)
1197 {
1198 	size_t size = array_size(sizeof(*txr->tx_buf), txr->size_mask + 1);
1199 
1200 	txr->tx_buf = kvzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1201 
1202 	return txr->tx_buf ? 0 : -ENOMEM;
1203 }
1204 
1205 static int fbnic_alloc_tx_ring_resources(struct fbnic_net *fbn,
1206 					 struct fbnic_ring *txr)
1207 {
1208 	struct device *dev = fbn->netdev->dev.parent;
1209 	int err;
1210 
1211 	if (txr->flags & FBNIC_RING_F_DISABLED)
1212 		return 0;
1213 
1214 	err = fbnic_alloc_tx_ring_desc(fbn, txr);
1215 	if (err)
1216 		return err;
1217 
1218 	if (!(txr->flags & FBNIC_RING_F_CTX))
1219 		return 0;
1220 
1221 	err = fbnic_alloc_tx_ring_buffer(txr);
1222 	if (err)
1223 		goto free_desc;
1224 
1225 	return 0;
1226 
1227 free_desc:
1228 	fbnic_free_ring_resources(dev, txr);
1229 	return err;
1230 }
1231 
1232 static int fbnic_alloc_rx_ring_desc(struct fbnic_net *fbn,
1233 				    struct fbnic_ring *rxr)
1234 {
1235 	struct device *dev = fbn->netdev->dev.parent;
1236 	size_t desc_size = sizeof(*rxr->desc);
1237 	u32 rxq_size;
1238 	size_t size;
1239 
1240 	switch (rxr->doorbell - fbnic_ring_csr_base(rxr)) {
1241 	case FBNIC_QUEUE_BDQ_HPQ_TAIL:
1242 		rxq_size = fbn->hpq_size / FBNIC_BD_FRAG_COUNT;
1243 		desc_size *= FBNIC_BD_FRAG_COUNT;
1244 		break;
1245 	case FBNIC_QUEUE_BDQ_PPQ_TAIL:
1246 		rxq_size = fbn->ppq_size / FBNIC_BD_FRAG_COUNT;
1247 		desc_size *= FBNIC_BD_FRAG_COUNT;
1248 		break;
1249 	case FBNIC_QUEUE_RCQ_HEAD:
1250 		rxq_size = fbn->rcq_size;
1251 		break;
1252 	default:
1253 		return -EINVAL;
1254 	}
1255 
1256 	/* Round size up to nearest 4K */
1257 	size = ALIGN(array_size(desc_size, rxq_size), 4096);
1258 
1259 	rxr->desc = dma_alloc_coherent(dev, size, &rxr->dma,
1260 				       GFP_KERNEL | __GFP_NOWARN);
1261 	if (!rxr->desc)
1262 		return -ENOMEM;
1263 
1264 	/* rxq_size should be a power of 2, so mask is just that -1 */
1265 	rxr->size_mask = rxq_size - 1;
1266 	rxr->size = size;
1267 
1268 	return 0;
1269 }
1270 
1271 static int fbnic_alloc_rx_ring_buffer(struct fbnic_ring *rxr)
1272 {
1273 	size_t size = array_size(sizeof(*rxr->rx_buf), rxr->size_mask + 1);
1274 
1275 	if (rxr->flags & FBNIC_RING_F_CTX)
1276 		size = sizeof(*rxr->rx_buf) * (rxr->size_mask + 1);
1277 	else
1278 		size = sizeof(*rxr->pkt);
1279 
1280 	rxr->rx_buf = kvzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1281 
1282 	return rxr->rx_buf ? 0 : -ENOMEM;
1283 }
1284 
1285 static int fbnic_alloc_rx_ring_resources(struct fbnic_net *fbn,
1286 					 struct fbnic_ring *rxr)
1287 {
1288 	struct device *dev = fbn->netdev->dev.parent;
1289 	int err;
1290 
1291 	err = fbnic_alloc_rx_ring_desc(fbn, rxr);
1292 	if (err)
1293 		return err;
1294 
1295 	err = fbnic_alloc_rx_ring_buffer(rxr);
1296 	if (err)
1297 		goto free_desc;
1298 
1299 	return 0;
1300 
1301 free_desc:
1302 	fbnic_free_ring_resources(dev, rxr);
1303 	return err;
1304 }
1305 
1306 static void fbnic_free_qt_resources(struct fbnic_net *fbn,
1307 				    struct fbnic_q_triad *qt)
1308 {
1309 	struct device *dev = fbn->netdev->dev.parent;
1310 
1311 	fbnic_free_ring_resources(dev, &qt->cmpl);
1312 	fbnic_free_ring_resources(dev, &qt->sub1);
1313 	fbnic_free_ring_resources(dev, &qt->sub0);
1314 }
1315 
1316 static int fbnic_alloc_tx_qt_resources(struct fbnic_net *fbn,
1317 				       struct fbnic_q_triad *qt)
1318 {
1319 	struct device *dev = fbn->netdev->dev.parent;
1320 	int err;
1321 
1322 	err = fbnic_alloc_tx_ring_resources(fbn, &qt->sub0);
1323 	if (err)
1324 		return err;
1325 
1326 	err = fbnic_alloc_tx_ring_resources(fbn, &qt->cmpl);
1327 	if (err)
1328 		goto free_sub1;
1329 
1330 	return 0;
1331 
1332 free_sub1:
1333 	fbnic_free_ring_resources(dev, &qt->sub0);
1334 	return err;
1335 }
1336 
1337 static int fbnic_alloc_rx_qt_resources(struct fbnic_net *fbn,
1338 				       struct fbnic_q_triad *qt)
1339 {
1340 	struct device *dev = fbn->netdev->dev.parent;
1341 	int err;
1342 
1343 	err = fbnic_alloc_rx_ring_resources(fbn, &qt->sub0);
1344 	if (err)
1345 		return err;
1346 
1347 	err = fbnic_alloc_rx_ring_resources(fbn, &qt->sub1);
1348 	if (err)
1349 		goto free_sub0;
1350 
1351 	err = fbnic_alloc_rx_ring_resources(fbn, &qt->cmpl);
1352 	if (err)
1353 		goto free_sub1;
1354 
1355 	return 0;
1356 
1357 free_sub1:
1358 	fbnic_free_ring_resources(dev, &qt->sub1);
1359 free_sub0:
1360 	fbnic_free_ring_resources(dev, &qt->sub0);
1361 	return err;
1362 }
1363 
1364 static void fbnic_free_nv_resources(struct fbnic_net *fbn,
1365 				    struct fbnic_napi_vector *nv)
1366 {
1367 	int i, j;
1368 
1369 	/* Free Tx Resources  */
1370 	for (i = 0; i < nv->txt_count; i++)
1371 		fbnic_free_qt_resources(fbn, &nv->qt[i]);
1372 
1373 	for (j = 0; j < nv->rxt_count; j++, i++)
1374 		fbnic_free_qt_resources(fbn, &nv->qt[i]);
1375 }
1376 
1377 static int fbnic_alloc_nv_resources(struct fbnic_net *fbn,
1378 				    struct fbnic_napi_vector *nv)
1379 {
1380 	int i, j, err;
1381 
1382 	/* Allocate Tx Resources */
1383 	for (i = 0; i < nv->txt_count; i++) {
1384 		err = fbnic_alloc_tx_qt_resources(fbn, &nv->qt[i]);
1385 		if (err)
1386 			goto free_resources;
1387 	}
1388 
1389 	/* Allocate Rx Resources */
1390 	for (j = 0; j < nv->rxt_count; j++, i++) {
1391 		err = fbnic_alloc_rx_qt_resources(fbn, &nv->qt[i]);
1392 		if (err)
1393 			goto free_resources;
1394 	}
1395 
1396 	return 0;
1397 
1398 free_resources:
1399 	while (i--)
1400 		fbnic_free_qt_resources(fbn, &nv->qt[i]);
1401 	return err;
1402 }
1403 
1404 void fbnic_free_resources(struct fbnic_net *fbn)
1405 {
1406 	struct fbnic_napi_vector *nv;
1407 
1408 	list_for_each_entry(nv, &fbn->napis, napis)
1409 		fbnic_free_nv_resources(fbn, nv);
1410 }
1411 
1412 int fbnic_alloc_resources(struct fbnic_net *fbn)
1413 {
1414 	struct fbnic_napi_vector *nv;
1415 	int err = -ENODEV;
1416 
1417 	list_for_each_entry(nv, &fbn->napis, napis) {
1418 		err = fbnic_alloc_nv_resources(fbn, nv);
1419 		if (err)
1420 			goto free_resources;
1421 	}
1422 
1423 	return 0;
1424 
1425 free_resources:
1426 	list_for_each_entry_continue_reverse(nv, &fbn->napis, napis)
1427 		fbnic_free_nv_resources(fbn, nv);
1428 
1429 	return err;
1430 }
1431 
1432 static void fbnic_disable_twq0(struct fbnic_ring *txr)
1433 {
1434 	u32 twq_ctl = fbnic_ring_rd32(txr, FBNIC_QUEUE_TWQ0_CTL);
1435 
1436 	twq_ctl &= ~FBNIC_QUEUE_TWQ_CTL_ENABLE;
1437 
1438 	fbnic_ring_wr32(txr, FBNIC_QUEUE_TWQ0_CTL, twq_ctl);
1439 }
1440 
1441 static void fbnic_disable_tcq(struct fbnic_ring *txr)
1442 {
1443 	fbnic_ring_wr32(txr, FBNIC_QUEUE_TCQ_CTL, 0);
1444 	fbnic_ring_wr32(txr, FBNIC_QUEUE_TIM_MASK, FBNIC_QUEUE_TIM_MASK_MASK);
1445 }
1446 
1447 static void fbnic_disable_bdq(struct fbnic_ring *hpq, struct fbnic_ring *ppq)
1448 {
1449 	u32 bdq_ctl = fbnic_ring_rd32(hpq, FBNIC_QUEUE_BDQ_CTL);
1450 
1451 	bdq_ctl &= ~FBNIC_QUEUE_BDQ_CTL_ENABLE;
1452 
1453 	fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_CTL, bdq_ctl);
1454 }
1455 
1456 static void fbnic_disable_rcq(struct fbnic_ring *rxr)
1457 {
1458 	fbnic_ring_wr32(rxr, FBNIC_QUEUE_RCQ_CTL, 0);
1459 	fbnic_ring_wr32(rxr, FBNIC_QUEUE_RIM_MASK, FBNIC_QUEUE_RIM_MASK_MASK);
1460 }
1461 
1462 void fbnic_napi_disable(struct fbnic_net *fbn)
1463 {
1464 	struct fbnic_napi_vector *nv;
1465 
1466 	list_for_each_entry(nv, &fbn->napis, napis) {
1467 		napi_disable(&nv->napi);
1468 
1469 		fbnic_nv_irq_disable(nv);
1470 	}
1471 }
1472 
1473 void fbnic_disable(struct fbnic_net *fbn)
1474 {
1475 	struct fbnic_dev *fbd = fbn->fbd;
1476 	struct fbnic_napi_vector *nv;
1477 	int i, j;
1478 
1479 	list_for_each_entry(nv, &fbn->napis, napis) {
1480 		/* Disable Tx queue triads */
1481 		for (i = 0; i < nv->txt_count; i++) {
1482 			struct fbnic_q_triad *qt = &nv->qt[i];
1483 
1484 			fbnic_disable_twq0(&qt->sub0);
1485 			fbnic_disable_tcq(&qt->cmpl);
1486 		}
1487 
1488 		/* Disable Rx queue triads */
1489 		for (j = 0; j < nv->rxt_count; j++, i++) {
1490 			struct fbnic_q_triad *qt = &nv->qt[i];
1491 
1492 			fbnic_disable_bdq(&qt->sub0, &qt->sub1);
1493 			fbnic_disable_rcq(&qt->cmpl);
1494 		}
1495 	}
1496 
1497 	fbnic_wrfl(fbd);
1498 }
1499 
1500 static void fbnic_tx_flush(struct fbnic_dev *fbd)
1501 {
1502 	netdev_warn(fbd->netdev, "triggering Tx flush\n");
1503 
1504 	fbnic_rmw32(fbd, FBNIC_TMI_DROP_CTRL, FBNIC_TMI_DROP_CTRL_EN,
1505 		    FBNIC_TMI_DROP_CTRL_EN);
1506 }
1507 
1508 static void fbnic_tx_flush_off(struct fbnic_dev *fbd)
1509 {
1510 	fbnic_rmw32(fbd, FBNIC_TMI_DROP_CTRL, FBNIC_TMI_DROP_CTRL_EN, 0);
1511 }
1512 
1513 struct fbnic_idle_regs {
1514 	u32 reg_base;
1515 	u8 reg_cnt;
1516 };
1517 
1518 static bool fbnic_all_idle(struct fbnic_dev *fbd,
1519 			   const struct fbnic_idle_regs *regs,
1520 			   unsigned int nregs)
1521 {
1522 	unsigned int i, j;
1523 
1524 	for (i = 0; i < nregs; i++) {
1525 		for (j = 0; j < regs[i].reg_cnt; j++) {
1526 			if (fbnic_rd32(fbd, regs[i].reg_base + j) != ~0U)
1527 				return false;
1528 		}
1529 	}
1530 	return true;
1531 }
1532 
1533 static void fbnic_idle_dump(struct fbnic_dev *fbd,
1534 			    const struct fbnic_idle_regs *regs,
1535 			    unsigned int nregs, const char *dir, int err)
1536 {
1537 	unsigned int i, j;
1538 
1539 	netdev_err(fbd->netdev, "error waiting for %s idle %d\n", dir, err);
1540 	for (i = 0; i < nregs; i++)
1541 		for (j = 0; j < regs[i].reg_cnt; j++)
1542 			netdev_err(fbd->netdev, "0x%04x: %08x\n",
1543 				   regs[i].reg_base + j,
1544 				   fbnic_rd32(fbd, regs[i].reg_base + j));
1545 }
1546 
1547 int fbnic_wait_all_queues_idle(struct fbnic_dev *fbd, bool may_fail)
1548 {
1549 	static const struct fbnic_idle_regs tx[] = {
1550 		{ FBNIC_QM_TWQ_IDLE(0),	FBNIC_QM_TWQ_IDLE_CNT, },
1551 		{ FBNIC_QM_TQS_IDLE(0),	FBNIC_QM_TQS_IDLE_CNT, },
1552 		{ FBNIC_QM_TDE_IDLE(0),	FBNIC_QM_TDE_IDLE_CNT, },
1553 		{ FBNIC_QM_TCQ_IDLE(0),	FBNIC_QM_TCQ_IDLE_CNT, },
1554 	}, rx[] = {
1555 		{ FBNIC_QM_HPQ_IDLE(0),	FBNIC_QM_HPQ_IDLE_CNT, },
1556 		{ FBNIC_QM_PPQ_IDLE(0),	FBNIC_QM_PPQ_IDLE_CNT, },
1557 		{ FBNIC_QM_RCQ_IDLE(0),	FBNIC_QM_RCQ_IDLE_CNT, },
1558 	};
1559 	bool idle;
1560 	int err;
1561 
1562 	err = read_poll_timeout_atomic(fbnic_all_idle, idle, idle, 2, 500000,
1563 				       false, fbd, tx, ARRAY_SIZE(tx));
1564 	if (err == -ETIMEDOUT) {
1565 		fbnic_tx_flush(fbd);
1566 		err = read_poll_timeout_atomic(fbnic_all_idle, idle, idle,
1567 					       2, 500000, false,
1568 					       fbd, tx, ARRAY_SIZE(tx));
1569 		fbnic_tx_flush_off(fbd);
1570 	}
1571 	if (err) {
1572 		fbnic_idle_dump(fbd, tx, ARRAY_SIZE(tx), "Tx", err);
1573 		if (may_fail)
1574 			return err;
1575 	}
1576 
1577 	err = read_poll_timeout_atomic(fbnic_all_idle, idle, idle, 2, 500000,
1578 				       false, fbd, rx, ARRAY_SIZE(rx));
1579 	if (err)
1580 		fbnic_idle_dump(fbd, rx, ARRAY_SIZE(rx), "Rx", err);
1581 	return err;
1582 }
1583 
1584 void fbnic_flush(struct fbnic_net *fbn)
1585 {
1586 	struct fbnic_napi_vector *nv;
1587 
1588 	list_for_each_entry(nv, &fbn->napis, napis) {
1589 		int i, j;
1590 
1591 		/* Flush any processed Tx Queue Triads and drop the rest */
1592 		for (i = 0; i < nv->txt_count; i++) {
1593 			struct fbnic_q_triad *qt = &nv->qt[i];
1594 			struct netdev_queue *tx_queue;
1595 
1596 			/* Clean the work queues of unprocessed work */
1597 			fbnic_clean_twq0(nv, 0, &qt->sub0, true, qt->sub0.tail);
1598 
1599 			/* Reset completion queue descriptor ring */
1600 			memset(qt->cmpl.desc, 0, qt->cmpl.size);
1601 
1602 			/* Nothing else to do if Tx queue is disabled */
1603 			if (qt->sub0.flags & FBNIC_RING_F_DISABLED)
1604 				continue;
1605 
1606 			/* Reset BQL associated with Tx queue */
1607 			tx_queue = netdev_get_tx_queue(nv->napi.dev,
1608 						       qt->sub0.q_idx);
1609 			netdev_tx_reset_queue(tx_queue);
1610 
1611 			/* Disassociate Tx queue from NAPI */
1612 			netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx,
1613 					     NETDEV_QUEUE_TYPE_TX, NULL);
1614 		}
1615 
1616 		/* Flush any processed Rx Queue Triads and drop the rest */
1617 		for (j = 0; j < nv->rxt_count; j++, i++) {
1618 			struct fbnic_q_triad *qt = &nv->qt[i];
1619 
1620 			/* Clean the work queues of unprocessed work */
1621 			fbnic_clean_bdq(nv, 0, &qt->sub0, qt->sub0.tail);
1622 			fbnic_clean_bdq(nv, 0, &qt->sub1, qt->sub1.tail);
1623 
1624 			/* Reset completion queue descriptor ring */
1625 			memset(qt->cmpl.desc, 0, qt->cmpl.size);
1626 
1627 			fbnic_put_pkt_buff(nv, qt->cmpl.pkt, 0);
1628 			qt->cmpl.pkt->buff.data_hard_start = NULL;
1629 
1630 			/* Disassociate Rx queue from NAPI */
1631 			netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx,
1632 					     NETDEV_QUEUE_TYPE_RX, NULL);
1633 		}
1634 	}
1635 }
1636 
1637 void fbnic_fill(struct fbnic_net *fbn)
1638 {
1639 	struct fbnic_napi_vector *nv;
1640 
1641 	list_for_each_entry(nv, &fbn->napis, napis) {
1642 		int i, j;
1643 
1644 		/* Configure NAPI mapping for Tx */
1645 		for (i = 0; i < nv->txt_count; i++) {
1646 			struct fbnic_q_triad *qt = &nv->qt[i];
1647 
1648 			/* Nothing to do if Tx queue is disabled */
1649 			if (qt->sub0.flags & FBNIC_RING_F_DISABLED)
1650 				continue;
1651 
1652 			/* Associate Tx queue with NAPI */
1653 			netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx,
1654 					     NETDEV_QUEUE_TYPE_TX, &nv->napi);
1655 		}
1656 
1657 		/* Configure NAPI mapping and populate pages
1658 		 * in the BDQ rings to use for Rx
1659 		 */
1660 		for (j = 0; j < nv->rxt_count; j++, i++) {
1661 			struct fbnic_q_triad *qt = &nv->qt[i];
1662 
1663 			/* Associate Rx queue with NAPI */
1664 			netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx,
1665 					     NETDEV_QUEUE_TYPE_RX, &nv->napi);
1666 
1667 			/* Populate the header and payload BDQs */
1668 			fbnic_fill_bdq(nv, &qt->sub0);
1669 			fbnic_fill_bdq(nv, &qt->sub1);
1670 		}
1671 	}
1672 }
1673 
1674 static void fbnic_enable_twq0(struct fbnic_ring *twq)
1675 {
1676 	u32 log_size = fls(twq->size_mask);
1677 
1678 	if (!twq->size_mask)
1679 		return;
1680 
1681 	/* Reset head/tail */
1682 	fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_CTL, FBNIC_QUEUE_TWQ_CTL_RESET);
1683 	twq->tail = 0;
1684 	twq->head = 0;
1685 
1686 	/* Store descriptor ring address and size */
1687 	fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_BAL, lower_32_bits(twq->dma));
1688 	fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_BAH, upper_32_bits(twq->dma));
1689 
1690 	/* Write lower 4 bits of log size as 64K ring size is 0 */
1691 	fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_SIZE, log_size & 0xf);
1692 
1693 	fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_CTL, FBNIC_QUEUE_TWQ_CTL_ENABLE);
1694 }
1695 
1696 static void fbnic_enable_tcq(struct fbnic_napi_vector *nv,
1697 			     struct fbnic_ring *tcq)
1698 {
1699 	u32 log_size = fls(tcq->size_mask);
1700 
1701 	if (!tcq->size_mask)
1702 		return;
1703 
1704 	/* Reset head/tail */
1705 	fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_CTL, FBNIC_QUEUE_TCQ_CTL_RESET);
1706 	tcq->tail = 0;
1707 	tcq->head = 0;
1708 
1709 	/* Store descriptor ring address and size */
1710 	fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_BAL, lower_32_bits(tcq->dma));
1711 	fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_BAH, upper_32_bits(tcq->dma));
1712 
1713 	/* Write lower 4 bits of log size as 64K ring size is 0 */
1714 	fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_SIZE, log_size & 0xf);
1715 
1716 	/* Store interrupt information for the completion queue */
1717 	fbnic_ring_wr32(tcq, FBNIC_QUEUE_TIM_CTL, nv->v_idx);
1718 	fbnic_ring_wr32(tcq, FBNIC_QUEUE_TIM_THRESHOLD, tcq->size_mask / 2);
1719 	fbnic_ring_wr32(tcq, FBNIC_QUEUE_TIM_MASK, 0);
1720 
1721 	/* Enable queue */
1722 	fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_CTL, FBNIC_QUEUE_TCQ_CTL_ENABLE);
1723 }
1724 
1725 static void fbnic_enable_bdq(struct fbnic_ring *hpq, struct fbnic_ring *ppq)
1726 {
1727 	u32 bdq_ctl = FBNIC_QUEUE_BDQ_CTL_ENABLE;
1728 	u32 log_size;
1729 
1730 	/* Reset head/tail */
1731 	fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_CTL, FBNIC_QUEUE_BDQ_CTL_RESET);
1732 	ppq->tail = 0;
1733 	ppq->head = 0;
1734 	hpq->tail = 0;
1735 	hpq->head = 0;
1736 
1737 	log_size = fls(hpq->size_mask);
1738 
1739 	/* Store descriptor ring address and size */
1740 	fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_HPQ_BAL, lower_32_bits(hpq->dma));
1741 	fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_HPQ_BAH, upper_32_bits(hpq->dma));
1742 
1743 	/* Write lower 4 bits of log size as 64K ring size is 0 */
1744 	fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_HPQ_SIZE, log_size & 0xf);
1745 
1746 	if (!ppq->size_mask)
1747 		goto write_ctl;
1748 
1749 	log_size = fls(ppq->size_mask);
1750 
1751 	/* Add enabling of PPQ to BDQ control */
1752 	bdq_ctl |= FBNIC_QUEUE_BDQ_CTL_PPQ_ENABLE;
1753 
1754 	/* Store descriptor ring address and size */
1755 	fbnic_ring_wr32(ppq, FBNIC_QUEUE_BDQ_PPQ_BAL, lower_32_bits(ppq->dma));
1756 	fbnic_ring_wr32(ppq, FBNIC_QUEUE_BDQ_PPQ_BAH, upper_32_bits(ppq->dma));
1757 	fbnic_ring_wr32(ppq, FBNIC_QUEUE_BDQ_PPQ_SIZE, log_size & 0xf);
1758 
1759 write_ctl:
1760 	fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_CTL, bdq_ctl);
1761 }
1762 
1763 static void fbnic_config_drop_mode_rcq(struct fbnic_napi_vector *nv,
1764 				       struct fbnic_ring *rcq)
1765 {
1766 	u32 drop_mode, rcq_ctl;
1767 
1768 	drop_mode = FBNIC_QUEUE_RDE_CTL0_DROP_IMMEDIATE;
1769 
1770 	/* Specify packet layout */
1771 	rcq_ctl = FIELD_PREP(FBNIC_QUEUE_RDE_CTL0_DROP_MODE_MASK, drop_mode) |
1772 	    FIELD_PREP(FBNIC_QUEUE_RDE_CTL0_MIN_HROOM_MASK, FBNIC_RX_HROOM) |
1773 	    FIELD_PREP(FBNIC_QUEUE_RDE_CTL0_MIN_TROOM_MASK, FBNIC_RX_TROOM);
1774 
1775 	fbnic_ring_wr32(rcq, FBNIC_QUEUE_RDE_CTL0, rcq_ctl);
1776 }
1777 
1778 static void fbnic_enable_rcq(struct fbnic_napi_vector *nv,
1779 			     struct fbnic_ring *rcq)
1780 {
1781 	u32 log_size = fls(rcq->size_mask);
1782 	u32 rcq_ctl;
1783 
1784 	fbnic_config_drop_mode_rcq(nv, rcq);
1785 
1786 	rcq_ctl = FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_PADLEN_MASK, FBNIC_RX_PAD) |
1787 		   FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_MAX_HDR_MASK,
1788 			      FBNIC_RX_MAX_HDR) |
1789 		   FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_PAYLD_OFF_MASK,
1790 			      FBNIC_RX_PAYLD_OFFSET) |
1791 		   FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_PAYLD_PG_CL_MASK,
1792 			      FBNIC_RX_PAYLD_PG_CL);
1793 	fbnic_ring_wr32(rcq, FBNIC_QUEUE_RDE_CTL1, rcq_ctl);
1794 
1795 	/* Reset head/tail */
1796 	fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_CTL, FBNIC_QUEUE_RCQ_CTL_RESET);
1797 	rcq->head = 0;
1798 	rcq->tail = 0;
1799 
1800 	/* Store descriptor ring address and size */
1801 	fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_BAL, lower_32_bits(rcq->dma));
1802 	fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_BAH, upper_32_bits(rcq->dma));
1803 
1804 	/* Write lower 4 bits of log size as 64K ring size is 0 */
1805 	fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_SIZE, log_size & 0xf);
1806 
1807 	/* Store interrupt information for the completion queue */
1808 	fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_CTL, nv->v_idx);
1809 	fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_THRESHOLD, rcq->size_mask / 2);
1810 	fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_MASK, 0);
1811 
1812 	/* Enable queue */
1813 	fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_CTL, FBNIC_QUEUE_RCQ_CTL_ENABLE);
1814 }
1815 
1816 void fbnic_enable(struct fbnic_net *fbn)
1817 {
1818 	struct fbnic_dev *fbd = fbn->fbd;
1819 	struct fbnic_napi_vector *nv;
1820 	int i, j;
1821 
1822 	list_for_each_entry(nv, &fbn->napis, napis) {
1823 		/* Setup Tx Queue Triads */
1824 		for (i = 0; i < nv->txt_count; i++) {
1825 			struct fbnic_q_triad *qt = &nv->qt[i];
1826 
1827 			fbnic_enable_twq0(&qt->sub0);
1828 			fbnic_enable_tcq(nv, &qt->cmpl);
1829 		}
1830 
1831 		/* Setup Rx Queue Triads */
1832 		for (j = 0; j < nv->rxt_count; j++, i++) {
1833 			struct fbnic_q_triad *qt = &nv->qt[i];
1834 
1835 			fbnic_enable_bdq(&qt->sub0, &qt->sub1);
1836 			fbnic_config_drop_mode_rcq(nv, &qt->cmpl);
1837 			fbnic_enable_rcq(nv, &qt->cmpl);
1838 		}
1839 	}
1840 
1841 	fbnic_wrfl(fbd);
1842 }
1843 
1844 static void fbnic_nv_irq_enable(struct fbnic_napi_vector *nv)
1845 {
1846 	struct fbnic_dev *fbd = nv->fbd;
1847 	u32 val;
1848 
1849 	val = FBNIC_INTR_CQ_REARM_INTR_UNMASK;
1850 
1851 	fbnic_wr32(fbd, FBNIC_INTR_CQ_REARM(nv->v_idx), val);
1852 }
1853 
1854 void fbnic_napi_enable(struct fbnic_net *fbn)
1855 {
1856 	u32 irqs[FBNIC_MAX_MSIX_VECS / 32] = {};
1857 	struct fbnic_dev *fbd = fbn->fbd;
1858 	struct fbnic_napi_vector *nv;
1859 	int i;
1860 
1861 	list_for_each_entry(nv, &fbn->napis, napis) {
1862 		napi_enable(&nv->napi);
1863 
1864 		fbnic_nv_irq_enable(nv);
1865 
1866 		/* Record bit used for NAPI IRQs so we can
1867 		 * set the mask appropriately
1868 		 */
1869 		irqs[nv->v_idx / 32] |= BIT(nv->v_idx % 32);
1870 	}
1871 
1872 	/* Force the first interrupt on the device to guarantee
1873 	 * that any packets that may have been enqueued during the
1874 	 * bringup are processed.
1875 	 */
1876 	for (i = 0; i < ARRAY_SIZE(irqs); i++) {
1877 		if (!irqs[i])
1878 			continue;
1879 		fbnic_wr32(fbd, FBNIC_INTR_SET(i), irqs[i]);
1880 	}
1881 
1882 	fbnic_wrfl(fbd);
1883 }
1884 
1885 void fbnic_napi_depletion_check(struct net_device *netdev)
1886 {
1887 	struct fbnic_net *fbn = netdev_priv(netdev);
1888 	u32 irqs[FBNIC_MAX_MSIX_VECS / 32] = {};
1889 	struct fbnic_dev *fbd = fbn->fbd;
1890 	struct fbnic_napi_vector *nv;
1891 	int i, j;
1892 
1893 	list_for_each_entry(nv, &fbn->napis, napis) {
1894 		/* Find RQs which are completely out of pages */
1895 		for (i = nv->txt_count, j = 0; j < nv->rxt_count; j++, i++) {
1896 			/* Assume 4 pages is always enough to fit a packet
1897 			 * and therefore generate a completion and an IRQ.
1898 			 */
1899 			if (fbnic_desc_used(&nv->qt[i].sub0) < 4 ||
1900 			    fbnic_desc_used(&nv->qt[i].sub1) < 4)
1901 				irqs[nv->v_idx / 32] |= BIT(nv->v_idx % 32);
1902 		}
1903 	}
1904 
1905 	for (i = 0; i < ARRAY_SIZE(irqs); i++) {
1906 		if (!irqs[i])
1907 			continue;
1908 		fbnic_wr32(fbd, FBNIC_INTR_MASK_CLEAR(i), irqs[i]);
1909 		fbnic_wr32(fbd, FBNIC_INTR_SET(i), irqs[i]);
1910 	}
1911 
1912 	fbnic_wrfl(fbd);
1913 }
1914