xref: /linux/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c (revision 3fd6c59042dbba50391e30862beac979491145fe)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) Meta Platforms, Inc. and affiliates. */
3 
4 #include <linux/bitfield.h>
5 #include <linux/iopoll.h>
6 #include <linux/pci.h>
7 #include <net/netdev_queues.h>
8 #include <net/page_pool/helpers.h>
9 
10 #include "fbnic.h"
11 #include "fbnic_csr.h"
12 #include "fbnic_netdev.h"
13 #include "fbnic_txrx.h"
14 
15 enum {
16 	FBNIC_XMIT_CB_TS	= 0x01,
17 };
18 
19 struct fbnic_xmit_cb {
20 	u32 bytecount;
21 	u8 desc_count;
22 	u8 flags;
23 	int hw_head;
24 };
25 
26 #define FBNIC_XMIT_CB(__skb) ((struct fbnic_xmit_cb *)((__skb)->cb))
27 
fbnic_ring_csr_base(const struct fbnic_ring * ring)28 static u32 __iomem *fbnic_ring_csr_base(const struct fbnic_ring *ring)
29 {
30 	unsigned long csr_base = (unsigned long)ring->doorbell;
31 
32 	csr_base &= ~(FBNIC_QUEUE_STRIDE * sizeof(u32) - 1);
33 
34 	return (u32 __iomem *)csr_base;
35 }
36 
fbnic_ring_rd32(struct fbnic_ring * ring,unsigned int csr)37 static u32 fbnic_ring_rd32(struct fbnic_ring *ring, unsigned int csr)
38 {
39 	u32 __iomem *csr_base = fbnic_ring_csr_base(ring);
40 
41 	return readl(csr_base + csr);
42 }
43 
fbnic_ring_wr32(struct fbnic_ring * ring,unsigned int csr,u32 val)44 static void fbnic_ring_wr32(struct fbnic_ring *ring, unsigned int csr, u32 val)
45 {
46 	u32 __iomem *csr_base = fbnic_ring_csr_base(ring);
47 
48 	writel(val, csr_base + csr);
49 }
50 
51 /**
52  * fbnic_ts40_to_ns() - convert descriptor timestamp to PHC time
53  * @fbn: netdev priv of the FB NIC
54  * @ts40: timestamp read from a descriptor
55  *
56  * Return: u64 value of PHC time in nanoseconds
57  *
58  * Convert truncated 40 bit device timestamp as read from a descriptor
59  * to the full PHC time in nanoseconds.
60  */
fbnic_ts40_to_ns(struct fbnic_net * fbn,u64 ts40)61 static __maybe_unused u64 fbnic_ts40_to_ns(struct fbnic_net *fbn, u64 ts40)
62 {
63 	unsigned int s;
64 	u64 time_ns;
65 	s64 offset;
66 	u8 ts_top;
67 	u32 high;
68 
69 	do {
70 		s = u64_stats_fetch_begin(&fbn->time_seq);
71 		offset = READ_ONCE(fbn->time_offset);
72 	} while (u64_stats_fetch_retry(&fbn->time_seq, s));
73 
74 	high = READ_ONCE(fbn->time_high);
75 
76 	/* Bits 63..40 from periodic clock reads, 39..0 from ts40 */
77 	time_ns = (u64)(high >> 8) << 40 | ts40;
78 
79 	/* Compare bits 32-39 between periodic reads and ts40,
80 	 * see if HW clock may have wrapped since last read. We are sure
81 	 * that periodic reads are always at least ~1 minute behind, so
82 	 * this logic works perfectly fine.
83 	 */
84 	ts_top = ts40 >> 32;
85 	if (ts_top < (u8)high && (u8)high - ts_top > U8_MAX / 2)
86 		time_ns += 1ULL << 40;
87 
88 	return time_ns + offset;
89 }
90 
fbnic_desc_unused(struct fbnic_ring * ring)91 static unsigned int fbnic_desc_unused(struct fbnic_ring *ring)
92 {
93 	return (ring->head - ring->tail - 1) & ring->size_mask;
94 }
95 
fbnic_desc_used(struct fbnic_ring * ring)96 static unsigned int fbnic_desc_used(struct fbnic_ring *ring)
97 {
98 	return (ring->tail - ring->head) & ring->size_mask;
99 }
100 
txring_txq(const struct net_device * dev,const struct fbnic_ring * ring)101 static struct netdev_queue *txring_txq(const struct net_device *dev,
102 				       const struct fbnic_ring *ring)
103 {
104 	return netdev_get_tx_queue(dev, ring->q_idx);
105 }
106 
fbnic_maybe_stop_tx(const struct net_device * dev,struct fbnic_ring * ring,const unsigned int size)107 static int fbnic_maybe_stop_tx(const struct net_device *dev,
108 			       struct fbnic_ring *ring,
109 			       const unsigned int size)
110 {
111 	struct netdev_queue *txq = txring_txq(dev, ring);
112 	int res;
113 
114 	res = netif_txq_maybe_stop(txq, fbnic_desc_unused(ring), size,
115 				   FBNIC_TX_DESC_WAKEUP);
116 
117 	return !res;
118 }
119 
fbnic_tx_sent_queue(struct sk_buff * skb,struct fbnic_ring * ring)120 static bool fbnic_tx_sent_queue(struct sk_buff *skb, struct fbnic_ring *ring)
121 {
122 	struct netdev_queue *dev_queue = txring_txq(skb->dev, ring);
123 	unsigned int bytecount = FBNIC_XMIT_CB(skb)->bytecount;
124 	bool xmit_more = netdev_xmit_more();
125 
126 	/* TBD: Request completion more often if xmit_more becomes large */
127 
128 	return __netdev_tx_sent_queue(dev_queue, bytecount, xmit_more);
129 }
130 
fbnic_unmap_single_twd(struct device * dev,__le64 * twd)131 static void fbnic_unmap_single_twd(struct device *dev, __le64 *twd)
132 {
133 	u64 raw_twd = le64_to_cpu(*twd);
134 	unsigned int len;
135 	dma_addr_t dma;
136 
137 	dma = FIELD_GET(FBNIC_TWD_ADDR_MASK, raw_twd);
138 	len = FIELD_GET(FBNIC_TWD_LEN_MASK, raw_twd);
139 
140 	dma_unmap_single(dev, dma, len, DMA_TO_DEVICE);
141 }
142 
fbnic_unmap_page_twd(struct device * dev,__le64 * twd)143 static void fbnic_unmap_page_twd(struct device *dev, __le64 *twd)
144 {
145 	u64 raw_twd = le64_to_cpu(*twd);
146 	unsigned int len;
147 	dma_addr_t dma;
148 
149 	dma = FIELD_GET(FBNIC_TWD_ADDR_MASK, raw_twd);
150 	len = FIELD_GET(FBNIC_TWD_LEN_MASK, raw_twd);
151 
152 	dma_unmap_page(dev, dma, len, DMA_TO_DEVICE);
153 }
154 
155 #define FBNIC_TWD_TYPE(_type) \
156 	cpu_to_le64(FIELD_PREP(FBNIC_TWD_TYPE_MASK, FBNIC_TWD_TYPE_##_type))
157 
fbnic_tx_tstamp(struct sk_buff * skb)158 static bool fbnic_tx_tstamp(struct sk_buff *skb)
159 {
160 	struct fbnic_net *fbn;
161 
162 	if (!unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
163 		return false;
164 
165 	fbn = netdev_priv(skb->dev);
166 	if (fbn->hwtstamp_config.tx_type == HWTSTAMP_TX_OFF)
167 		return false;
168 
169 	skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
170 	FBNIC_XMIT_CB(skb)->flags |= FBNIC_XMIT_CB_TS;
171 	FBNIC_XMIT_CB(skb)->hw_head = -1;
172 
173 	return true;
174 }
175 
176 static bool
fbnic_tx_offloads(struct fbnic_ring * ring,struct sk_buff * skb,__le64 * meta)177 fbnic_tx_offloads(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
178 {
179 	unsigned int l2len, i3len;
180 
181 	if (fbnic_tx_tstamp(skb))
182 		*meta |= cpu_to_le64(FBNIC_TWD_FLAG_REQ_TS);
183 
184 	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL))
185 		return false;
186 
187 	l2len = skb_mac_header_len(skb);
188 	i3len = skb_checksum_start(skb) - skb_network_header(skb);
189 
190 	*meta |= cpu_to_le64(FIELD_PREP(FBNIC_TWD_CSUM_OFFSET_MASK,
191 					skb->csum_offset / 2));
192 
193 	*meta |= cpu_to_le64(FBNIC_TWD_FLAG_REQ_CSO);
194 
195 	*meta |= cpu_to_le64(FIELD_PREP(FBNIC_TWD_L2_HLEN_MASK, l2len / 2) |
196 			     FIELD_PREP(FBNIC_TWD_L3_IHLEN_MASK, i3len / 2));
197 	return false;
198 }
199 
200 static void
fbnic_rx_csum(u64 rcd,struct sk_buff * skb,struct fbnic_ring * rcq)201 fbnic_rx_csum(u64 rcd, struct sk_buff *skb, struct fbnic_ring *rcq)
202 {
203 	skb_checksum_none_assert(skb);
204 
205 	if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM)))
206 		return;
207 
208 	if (FIELD_GET(FBNIC_RCD_META_L4_CSUM_UNNECESSARY, rcd)) {
209 		skb->ip_summed = CHECKSUM_UNNECESSARY;
210 	} else {
211 		u16 csum = FIELD_GET(FBNIC_RCD_META_L2_CSUM_MASK, rcd);
212 
213 		skb->ip_summed = CHECKSUM_COMPLETE;
214 		skb->csum = (__force __wsum)csum;
215 	}
216 }
217 
218 static bool
fbnic_tx_map(struct fbnic_ring * ring,struct sk_buff * skb,__le64 * meta)219 fbnic_tx_map(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
220 {
221 	struct device *dev = skb->dev->dev.parent;
222 	unsigned int tail = ring->tail, first;
223 	unsigned int size, data_len;
224 	skb_frag_t *frag;
225 	dma_addr_t dma;
226 	__le64 *twd;
227 
228 	ring->tx_buf[tail] = skb;
229 
230 	tail++;
231 	tail &= ring->size_mask;
232 	first = tail;
233 
234 	size = skb_headlen(skb);
235 	data_len = skb->data_len;
236 
237 	if (size > FIELD_MAX(FBNIC_TWD_LEN_MASK))
238 		goto dma_error;
239 
240 	dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
241 
242 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
243 		twd = &ring->desc[tail];
244 
245 		if (dma_mapping_error(dev, dma))
246 			goto dma_error;
247 
248 		*twd = cpu_to_le64(FIELD_PREP(FBNIC_TWD_ADDR_MASK, dma) |
249 				   FIELD_PREP(FBNIC_TWD_LEN_MASK, size) |
250 				   FIELD_PREP(FBNIC_TWD_TYPE_MASK,
251 					      FBNIC_TWD_TYPE_AL));
252 
253 		tail++;
254 		tail &= ring->size_mask;
255 
256 		if (!data_len)
257 			break;
258 
259 		size = skb_frag_size(frag);
260 		data_len -= size;
261 
262 		if (size > FIELD_MAX(FBNIC_TWD_LEN_MASK))
263 			goto dma_error;
264 
265 		dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
266 	}
267 
268 	*twd |= FBNIC_TWD_TYPE(LAST_AL);
269 
270 	FBNIC_XMIT_CB(skb)->desc_count = ((twd - meta) + 1) & ring->size_mask;
271 
272 	ring->tail = tail;
273 
274 	/* Record SW timestamp */
275 	skb_tx_timestamp(skb);
276 
277 	/* Verify there is room for another packet */
278 	fbnic_maybe_stop_tx(skb->dev, ring, FBNIC_MAX_SKB_DESC);
279 
280 	if (fbnic_tx_sent_queue(skb, ring)) {
281 		*meta |= cpu_to_le64(FBNIC_TWD_FLAG_REQ_COMPLETION);
282 
283 		/* Force DMA writes to flush before writing to tail */
284 		dma_wmb();
285 
286 		writel(tail, ring->doorbell);
287 	}
288 
289 	return false;
290 dma_error:
291 	if (net_ratelimit())
292 		netdev_err(skb->dev, "TX DMA map failed\n");
293 
294 	while (tail != first) {
295 		tail--;
296 		tail &= ring->size_mask;
297 		twd = &ring->desc[tail];
298 		if (tail == first)
299 			fbnic_unmap_single_twd(dev, twd);
300 		else
301 			fbnic_unmap_page_twd(dev, twd);
302 	}
303 
304 	return true;
305 }
306 
307 #define FBNIC_MIN_FRAME_LEN	60
308 
309 static netdev_tx_t
fbnic_xmit_frame_ring(struct sk_buff * skb,struct fbnic_ring * ring)310 fbnic_xmit_frame_ring(struct sk_buff *skb, struct fbnic_ring *ring)
311 {
312 	__le64 *meta = &ring->desc[ring->tail];
313 	u16 desc_needed;
314 
315 	if (skb_put_padto(skb, FBNIC_MIN_FRAME_LEN))
316 		goto err_count;
317 
318 	/* Need: 1 descriptor per page,
319 	 *       + 1 desc for skb_head,
320 	 *       + 2 desc for metadata and timestamp metadata
321 	 *       + 7 desc gap to keep tail from touching head
322 	 * otherwise try next time
323 	 */
324 	desc_needed = skb_shinfo(skb)->nr_frags + 10;
325 	if (fbnic_maybe_stop_tx(skb->dev, ring, desc_needed))
326 		return NETDEV_TX_BUSY;
327 
328 	*meta = cpu_to_le64(FBNIC_TWD_FLAG_DEST_MAC);
329 
330 	/* Write all members within DWORD to condense this into 2 4B writes */
331 	FBNIC_XMIT_CB(skb)->bytecount = skb->len;
332 	FBNIC_XMIT_CB(skb)->desc_count = 0;
333 
334 	if (fbnic_tx_offloads(ring, skb, meta))
335 		goto err_free;
336 
337 	if (fbnic_tx_map(ring, skb, meta))
338 		goto err_free;
339 
340 	return NETDEV_TX_OK;
341 
342 err_free:
343 	dev_kfree_skb_any(skb);
344 err_count:
345 	u64_stats_update_begin(&ring->stats.syncp);
346 	ring->stats.dropped++;
347 	u64_stats_update_end(&ring->stats.syncp);
348 	return NETDEV_TX_OK;
349 }
350 
fbnic_xmit_frame(struct sk_buff * skb,struct net_device * dev)351 netdev_tx_t fbnic_xmit_frame(struct sk_buff *skb, struct net_device *dev)
352 {
353 	struct fbnic_net *fbn = netdev_priv(dev);
354 	unsigned int q_map = skb->queue_mapping;
355 
356 	return fbnic_xmit_frame_ring(skb, fbn->tx[q_map]);
357 }
358 
359 netdev_features_t
fbnic_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)360 fbnic_features_check(struct sk_buff *skb, struct net_device *dev,
361 		     netdev_features_t features)
362 {
363 	unsigned int l2len, l3len;
364 
365 	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL))
366 		return features;
367 
368 	l2len = skb_mac_header_len(skb);
369 	l3len = skb_checksum_start(skb) - skb_network_header(skb);
370 
371 	/* Check header lengths are multiple of 2.
372 	 * In case of 6in6 we support longer headers (IHLEN + OHLEN)
373 	 * but keep things simple for now, 512B is plenty.
374 	 */
375 	if ((l2len | l3len | skb->csum_offset) % 2 ||
376 	    !FIELD_FIT(FBNIC_TWD_L2_HLEN_MASK, l2len / 2) ||
377 	    !FIELD_FIT(FBNIC_TWD_L3_IHLEN_MASK, l3len / 2) ||
378 	    !FIELD_FIT(FBNIC_TWD_CSUM_OFFSET_MASK, skb->csum_offset / 2))
379 		return features & ~NETIF_F_CSUM_MASK;
380 
381 	return features;
382 }
383 
fbnic_clean_twq0(struct fbnic_napi_vector * nv,int napi_budget,struct fbnic_ring * ring,bool discard,unsigned int hw_head)384 static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
385 			     struct fbnic_ring *ring, bool discard,
386 			     unsigned int hw_head)
387 {
388 	u64 total_bytes = 0, total_packets = 0, ts_lost = 0;
389 	unsigned int head = ring->head;
390 	struct netdev_queue *txq;
391 	unsigned int clean_desc;
392 
393 	clean_desc = (hw_head - head) & ring->size_mask;
394 
395 	while (clean_desc) {
396 		struct sk_buff *skb = ring->tx_buf[head];
397 		unsigned int desc_cnt;
398 
399 		desc_cnt = FBNIC_XMIT_CB(skb)->desc_count;
400 		if (desc_cnt > clean_desc)
401 			break;
402 
403 		if (unlikely(FBNIC_XMIT_CB(skb)->flags & FBNIC_XMIT_CB_TS)) {
404 			FBNIC_XMIT_CB(skb)->hw_head = hw_head;
405 			if (likely(!discard))
406 				break;
407 			ts_lost++;
408 		}
409 
410 		ring->tx_buf[head] = NULL;
411 
412 		clean_desc -= desc_cnt;
413 
414 		while (!(ring->desc[head] & FBNIC_TWD_TYPE(AL))) {
415 			head++;
416 			head &= ring->size_mask;
417 			desc_cnt--;
418 		}
419 
420 		fbnic_unmap_single_twd(nv->dev, &ring->desc[head]);
421 		head++;
422 		head &= ring->size_mask;
423 		desc_cnt--;
424 
425 		while (desc_cnt--) {
426 			fbnic_unmap_page_twd(nv->dev, &ring->desc[head]);
427 			head++;
428 			head &= ring->size_mask;
429 		}
430 
431 		total_bytes += FBNIC_XMIT_CB(skb)->bytecount;
432 		total_packets += 1;
433 
434 		napi_consume_skb(skb, napi_budget);
435 	}
436 
437 	if (!total_bytes)
438 		return;
439 
440 	ring->head = head;
441 
442 	txq = txring_txq(nv->napi.dev, ring);
443 
444 	if (unlikely(discard)) {
445 		u64_stats_update_begin(&ring->stats.syncp);
446 		ring->stats.dropped += total_packets;
447 		ring->stats.ts_lost += ts_lost;
448 		u64_stats_update_end(&ring->stats.syncp);
449 
450 		netdev_tx_completed_queue(txq, total_packets, total_bytes);
451 		return;
452 	}
453 
454 	u64_stats_update_begin(&ring->stats.syncp);
455 	ring->stats.bytes += total_bytes;
456 	ring->stats.packets += total_packets;
457 	u64_stats_update_end(&ring->stats.syncp);
458 
459 	netif_txq_completed_wake(txq, total_packets, total_bytes,
460 				 fbnic_desc_unused(ring),
461 				 FBNIC_TX_DESC_WAKEUP);
462 }
463 
fbnic_clean_tsq(struct fbnic_napi_vector * nv,struct fbnic_ring * ring,u64 tcd,int * ts_head,int * head0)464 static void fbnic_clean_tsq(struct fbnic_napi_vector *nv,
465 			    struct fbnic_ring *ring,
466 			    u64 tcd, int *ts_head, int *head0)
467 {
468 	struct skb_shared_hwtstamps hwtstamp;
469 	struct fbnic_net *fbn;
470 	struct sk_buff *skb;
471 	int head;
472 	u64 ns;
473 
474 	head = (*ts_head < 0) ? ring->head : *ts_head;
475 
476 	do {
477 		unsigned int desc_cnt;
478 
479 		if (head == ring->tail) {
480 			if (unlikely(net_ratelimit()))
481 				netdev_err(nv->napi.dev,
482 					   "Tx timestamp without matching packet\n");
483 			return;
484 		}
485 
486 		skb = ring->tx_buf[head];
487 		desc_cnt = FBNIC_XMIT_CB(skb)->desc_count;
488 
489 		head += desc_cnt;
490 		head &= ring->size_mask;
491 	} while (!(FBNIC_XMIT_CB(skb)->flags & FBNIC_XMIT_CB_TS));
492 
493 	fbn = netdev_priv(nv->napi.dev);
494 	ns = fbnic_ts40_to_ns(fbn, FIELD_GET(FBNIC_TCD_TYPE1_TS_MASK, tcd));
495 
496 	memset(&hwtstamp, 0, sizeof(hwtstamp));
497 	hwtstamp.hwtstamp = ns_to_ktime(ns);
498 
499 	*ts_head = head;
500 
501 	FBNIC_XMIT_CB(skb)->flags &= ~FBNIC_XMIT_CB_TS;
502 	if (*head0 < 0) {
503 		head = FBNIC_XMIT_CB(skb)->hw_head;
504 		if (head >= 0)
505 			*head0 = head;
506 	}
507 
508 	skb_tstamp_tx(skb, &hwtstamp);
509 	u64_stats_update_begin(&ring->stats.syncp);
510 	ring->stats.ts_packets++;
511 	u64_stats_update_end(&ring->stats.syncp);
512 }
513 
fbnic_page_pool_init(struct fbnic_ring * ring,unsigned int idx,struct page * page)514 static void fbnic_page_pool_init(struct fbnic_ring *ring, unsigned int idx,
515 				 struct page *page)
516 {
517 	struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx];
518 
519 	page_pool_fragment_page(page, PAGECNT_BIAS_MAX);
520 	rx_buf->pagecnt_bias = PAGECNT_BIAS_MAX;
521 	rx_buf->page = page;
522 }
523 
fbnic_page_pool_get(struct fbnic_ring * ring,unsigned int idx)524 static struct page *fbnic_page_pool_get(struct fbnic_ring *ring,
525 					unsigned int idx)
526 {
527 	struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx];
528 
529 	rx_buf->pagecnt_bias--;
530 
531 	return rx_buf->page;
532 }
533 
fbnic_page_pool_drain(struct fbnic_ring * ring,unsigned int idx,struct fbnic_napi_vector * nv,int budget)534 static void fbnic_page_pool_drain(struct fbnic_ring *ring, unsigned int idx,
535 				  struct fbnic_napi_vector *nv, int budget)
536 {
537 	struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx];
538 	struct page *page = rx_buf->page;
539 
540 	if (!page_pool_unref_page(page, rx_buf->pagecnt_bias))
541 		page_pool_put_unrefed_page(nv->page_pool, page, -1, !!budget);
542 
543 	rx_buf->page = NULL;
544 }
545 
fbnic_clean_twq(struct fbnic_napi_vector * nv,int napi_budget,struct fbnic_q_triad * qt,s32 ts_head,s32 head0)546 static void fbnic_clean_twq(struct fbnic_napi_vector *nv, int napi_budget,
547 			    struct fbnic_q_triad *qt, s32 ts_head, s32 head0)
548 {
549 	if (head0 >= 0)
550 		fbnic_clean_twq0(nv, napi_budget, &qt->sub0, false, head0);
551 	else if (ts_head >= 0)
552 		fbnic_clean_twq0(nv, napi_budget, &qt->sub0, false, ts_head);
553 }
554 
555 static void
fbnic_clean_tcq(struct fbnic_napi_vector * nv,struct fbnic_q_triad * qt,int napi_budget)556 fbnic_clean_tcq(struct fbnic_napi_vector *nv, struct fbnic_q_triad *qt,
557 		int napi_budget)
558 {
559 	struct fbnic_ring *cmpl = &qt->cmpl;
560 	s32 head0 = -1, ts_head = -1;
561 	__le64 *raw_tcd, done;
562 	u32 head = cmpl->head;
563 
564 	done = (head & (cmpl->size_mask + 1)) ? 0 : cpu_to_le64(FBNIC_TCD_DONE);
565 	raw_tcd = &cmpl->desc[head & cmpl->size_mask];
566 
567 	/* Walk the completion queue collecting the heads reported by NIC */
568 	while ((*raw_tcd & cpu_to_le64(FBNIC_TCD_DONE)) == done) {
569 		u64 tcd;
570 
571 		dma_rmb();
572 
573 		tcd = le64_to_cpu(*raw_tcd);
574 
575 		switch (FIELD_GET(FBNIC_TCD_TYPE_MASK, tcd)) {
576 		case FBNIC_TCD_TYPE_0:
577 			if (!(tcd & FBNIC_TCD_TWQ1))
578 				head0 = FIELD_GET(FBNIC_TCD_TYPE0_HEAD0_MASK,
579 						  tcd);
580 			/* Currently all err status bits are related to
581 			 * timestamps and as those have yet to be added
582 			 * they are skipped for now.
583 			 */
584 			break;
585 		case FBNIC_TCD_TYPE_1:
586 			if (WARN_ON_ONCE(tcd & FBNIC_TCD_TWQ1))
587 				break;
588 
589 			fbnic_clean_tsq(nv, &qt->sub0, tcd, &ts_head, &head0);
590 			break;
591 		default:
592 			break;
593 		}
594 
595 		raw_tcd++;
596 		head++;
597 		if (!(head & cmpl->size_mask)) {
598 			done ^= cpu_to_le64(FBNIC_TCD_DONE);
599 			raw_tcd = &cmpl->desc[0];
600 		}
601 	}
602 
603 	/* Record the current head/tail of the queue */
604 	if (cmpl->head != head) {
605 		cmpl->head = head;
606 		writel(head & cmpl->size_mask, cmpl->doorbell);
607 	}
608 
609 	/* Unmap and free processed buffers */
610 	fbnic_clean_twq(nv, napi_budget, qt, ts_head, head0);
611 }
612 
fbnic_clean_bdq(struct fbnic_napi_vector * nv,int napi_budget,struct fbnic_ring * ring,unsigned int hw_head)613 static void fbnic_clean_bdq(struct fbnic_napi_vector *nv, int napi_budget,
614 			    struct fbnic_ring *ring, unsigned int hw_head)
615 {
616 	unsigned int head = ring->head;
617 
618 	if (head == hw_head)
619 		return;
620 
621 	do {
622 		fbnic_page_pool_drain(ring, head, nv, napi_budget);
623 
624 		head++;
625 		head &= ring->size_mask;
626 	} while (head != hw_head);
627 
628 	ring->head = head;
629 }
630 
fbnic_bd_prep(struct fbnic_ring * bdq,u16 id,struct page * page)631 static void fbnic_bd_prep(struct fbnic_ring *bdq, u16 id, struct page *page)
632 {
633 	__le64 *bdq_desc = &bdq->desc[id * FBNIC_BD_FRAG_COUNT];
634 	dma_addr_t dma = page_pool_get_dma_addr(page);
635 	u64 bd, i = FBNIC_BD_FRAG_COUNT;
636 
637 	bd = (FBNIC_BD_PAGE_ADDR_MASK & dma) |
638 	     FIELD_PREP(FBNIC_BD_PAGE_ID_MASK, id);
639 
640 	/* In the case that a page size is larger than 4K we will map a
641 	 * single page to multiple fragments. The fragments will be
642 	 * FBNIC_BD_FRAG_COUNT in size and the lower n bits will be use
643 	 * to indicate the individual fragment IDs.
644 	 */
645 	do {
646 		*bdq_desc = cpu_to_le64(bd);
647 		bd += FIELD_PREP(FBNIC_BD_DESC_ADDR_MASK, 1) |
648 		      FIELD_PREP(FBNIC_BD_DESC_ID_MASK, 1);
649 	} while (--i);
650 }
651 
fbnic_fill_bdq(struct fbnic_napi_vector * nv,struct fbnic_ring * bdq)652 static void fbnic_fill_bdq(struct fbnic_napi_vector *nv, struct fbnic_ring *bdq)
653 {
654 	unsigned int count = fbnic_desc_unused(bdq);
655 	unsigned int i = bdq->tail;
656 
657 	if (!count)
658 		return;
659 
660 	do {
661 		struct page *page;
662 
663 		page = page_pool_dev_alloc_pages(nv->page_pool);
664 		if (!page)
665 			break;
666 
667 		fbnic_page_pool_init(bdq, i, page);
668 		fbnic_bd_prep(bdq, i, page);
669 
670 		i++;
671 		i &= bdq->size_mask;
672 
673 		count--;
674 	} while (count);
675 
676 	if (bdq->tail != i) {
677 		bdq->tail = i;
678 
679 		/* Force DMA writes to flush before writing to tail */
680 		dma_wmb();
681 
682 		writel(i, bdq->doorbell);
683 	}
684 }
685 
fbnic_hdr_pg_start(unsigned int pg_off)686 static unsigned int fbnic_hdr_pg_start(unsigned int pg_off)
687 {
688 	/* The headroom of the first header may be larger than FBNIC_RX_HROOM
689 	 * due to alignment. So account for that by just making the page
690 	 * offset 0 if we are starting at the first header.
691 	 */
692 	if (ALIGN(FBNIC_RX_HROOM, 128) > FBNIC_RX_HROOM &&
693 	    pg_off == ALIGN(FBNIC_RX_HROOM, 128))
694 		return 0;
695 
696 	return pg_off - FBNIC_RX_HROOM;
697 }
698 
fbnic_hdr_pg_end(unsigned int pg_off,unsigned int len)699 static unsigned int fbnic_hdr_pg_end(unsigned int pg_off, unsigned int len)
700 {
701 	/* Determine the end of the buffer by finding the start of the next
702 	 * and then subtracting the headroom from that frame.
703 	 */
704 	pg_off += len + FBNIC_RX_TROOM + FBNIC_RX_HROOM;
705 
706 	return ALIGN(pg_off, 128) - FBNIC_RX_HROOM;
707 }
708 
fbnic_pkt_prepare(struct fbnic_napi_vector * nv,u64 rcd,struct fbnic_pkt_buff * pkt,struct fbnic_q_triad * qt)709 static void fbnic_pkt_prepare(struct fbnic_napi_vector *nv, u64 rcd,
710 			      struct fbnic_pkt_buff *pkt,
711 			      struct fbnic_q_triad *qt)
712 {
713 	unsigned int hdr_pg_idx = FIELD_GET(FBNIC_RCD_AL_BUFF_PAGE_MASK, rcd);
714 	unsigned int hdr_pg_off = FIELD_GET(FBNIC_RCD_AL_BUFF_OFF_MASK, rcd);
715 	struct page *page = fbnic_page_pool_get(&qt->sub0, hdr_pg_idx);
716 	unsigned int len = FIELD_GET(FBNIC_RCD_AL_BUFF_LEN_MASK, rcd);
717 	unsigned int frame_sz, hdr_pg_start, hdr_pg_end, headroom;
718 	unsigned char *hdr_start;
719 
720 	/* data_hard_start should always be NULL when this is called */
721 	WARN_ON_ONCE(pkt->buff.data_hard_start);
722 
723 	/* Short-cut the end calculation if we know page is fully consumed */
724 	hdr_pg_end = FIELD_GET(FBNIC_RCD_AL_PAGE_FIN, rcd) ?
725 		     FBNIC_BD_FRAG_SIZE : fbnic_hdr_pg_end(hdr_pg_off, len);
726 	hdr_pg_start = fbnic_hdr_pg_start(hdr_pg_off);
727 
728 	headroom = hdr_pg_off - hdr_pg_start + FBNIC_RX_PAD;
729 	frame_sz = hdr_pg_end - hdr_pg_start;
730 	xdp_init_buff(&pkt->buff, frame_sz, NULL);
731 	hdr_pg_start += (FBNIC_RCD_AL_BUFF_FRAG_MASK & rcd) *
732 			FBNIC_BD_FRAG_SIZE;
733 
734 	/* Sync DMA buffer */
735 	dma_sync_single_range_for_cpu(nv->dev, page_pool_get_dma_addr(page),
736 				      hdr_pg_start, frame_sz,
737 				      DMA_BIDIRECTIONAL);
738 
739 	/* Build frame around buffer */
740 	hdr_start = page_address(page) + hdr_pg_start;
741 
742 	xdp_prepare_buff(&pkt->buff, hdr_start, headroom,
743 			 len - FBNIC_RX_PAD, true);
744 
745 	pkt->data_truesize = 0;
746 	pkt->data_len = 0;
747 	pkt->nr_frags = 0;
748 }
749 
fbnic_add_rx_frag(struct fbnic_napi_vector * nv,u64 rcd,struct fbnic_pkt_buff * pkt,struct fbnic_q_triad * qt)750 static void fbnic_add_rx_frag(struct fbnic_napi_vector *nv, u64 rcd,
751 			      struct fbnic_pkt_buff *pkt,
752 			      struct fbnic_q_triad *qt)
753 {
754 	unsigned int pg_idx = FIELD_GET(FBNIC_RCD_AL_BUFF_PAGE_MASK, rcd);
755 	unsigned int pg_off = FIELD_GET(FBNIC_RCD_AL_BUFF_OFF_MASK, rcd);
756 	unsigned int len = FIELD_GET(FBNIC_RCD_AL_BUFF_LEN_MASK, rcd);
757 	struct page *page = fbnic_page_pool_get(&qt->sub1, pg_idx);
758 	struct skb_shared_info *shinfo;
759 	unsigned int truesize;
760 
761 	truesize = FIELD_GET(FBNIC_RCD_AL_PAGE_FIN, rcd) ?
762 		   FBNIC_BD_FRAG_SIZE - pg_off : ALIGN(len, 128);
763 
764 	pg_off += (FBNIC_RCD_AL_BUFF_FRAG_MASK & rcd) *
765 		  FBNIC_BD_FRAG_SIZE;
766 
767 	/* Sync DMA buffer */
768 	dma_sync_single_range_for_cpu(nv->dev, page_pool_get_dma_addr(page),
769 				      pg_off, truesize, DMA_BIDIRECTIONAL);
770 
771 	/* Add page to xdp shared info */
772 	shinfo = xdp_get_shared_info_from_buff(&pkt->buff);
773 
774 	/* We use gso_segs to store truesize */
775 	pkt->data_truesize += truesize;
776 
777 	__skb_fill_page_desc_noacc(shinfo, pkt->nr_frags++, page, pg_off, len);
778 
779 	/* Store data_len in gso_size */
780 	pkt->data_len += len;
781 }
782 
fbnic_put_pkt_buff(struct fbnic_napi_vector * nv,struct fbnic_pkt_buff * pkt,int budget)783 static void fbnic_put_pkt_buff(struct fbnic_napi_vector *nv,
784 			       struct fbnic_pkt_buff *pkt, int budget)
785 {
786 	struct skb_shared_info *shinfo;
787 	struct page *page;
788 	int nr_frags;
789 
790 	if (!pkt->buff.data_hard_start)
791 		return;
792 
793 	shinfo = xdp_get_shared_info_from_buff(&pkt->buff);
794 	nr_frags = pkt->nr_frags;
795 
796 	while (nr_frags--) {
797 		page = skb_frag_page(&shinfo->frags[nr_frags]);
798 		page_pool_put_full_page(nv->page_pool, page, !!budget);
799 	}
800 
801 	page = virt_to_page(pkt->buff.data_hard_start);
802 	page_pool_put_full_page(nv->page_pool, page, !!budget);
803 }
804 
fbnic_build_skb(struct fbnic_napi_vector * nv,struct fbnic_pkt_buff * pkt)805 static struct sk_buff *fbnic_build_skb(struct fbnic_napi_vector *nv,
806 				       struct fbnic_pkt_buff *pkt)
807 {
808 	unsigned int nr_frags = pkt->nr_frags;
809 	struct skb_shared_info *shinfo;
810 	unsigned int truesize;
811 	struct sk_buff *skb;
812 
813 	truesize = xdp_data_hard_end(&pkt->buff) + FBNIC_RX_TROOM -
814 		   pkt->buff.data_hard_start;
815 
816 	/* Build frame around buffer */
817 	skb = napi_build_skb(pkt->buff.data_hard_start, truesize);
818 	if (unlikely(!skb))
819 		return NULL;
820 
821 	/* Push data pointer to start of data, put tail to end of data */
822 	skb_reserve(skb, pkt->buff.data - pkt->buff.data_hard_start);
823 	__skb_put(skb, pkt->buff.data_end - pkt->buff.data);
824 
825 	/* Add tracking for metadata at the start of the frame */
826 	skb_metadata_set(skb, pkt->buff.data - pkt->buff.data_meta);
827 
828 	/* Add Rx frags */
829 	if (nr_frags) {
830 		/* Verify that shared info didn't move */
831 		shinfo = xdp_get_shared_info_from_buff(&pkt->buff);
832 		WARN_ON(skb_shinfo(skb) != shinfo);
833 
834 		skb->truesize += pkt->data_truesize;
835 		skb->data_len += pkt->data_len;
836 		shinfo->nr_frags = nr_frags;
837 		skb->len += pkt->data_len;
838 	}
839 
840 	skb_mark_for_recycle(skb);
841 
842 	/* Set MAC header specific fields */
843 	skb->protocol = eth_type_trans(skb, nv->napi.dev);
844 
845 	/* Add timestamp if present */
846 	if (pkt->hwtstamp)
847 		skb_hwtstamps(skb)->hwtstamp = pkt->hwtstamp;
848 
849 	return skb;
850 }
851 
fbnic_skb_hash_type(u64 rcd)852 static enum pkt_hash_types fbnic_skb_hash_type(u64 rcd)
853 {
854 	return (FBNIC_RCD_META_L4_TYPE_MASK & rcd) ? PKT_HASH_TYPE_L4 :
855 	       (FBNIC_RCD_META_L3_TYPE_MASK & rcd) ? PKT_HASH_TYPE_L3 :
856 						     PKT_HASH_TYPE_L2;
857 }
858 
fbnic_rx_tstamp(struct fbnic_napi_vector * nv,u64 rcd,struct fbnic_pkt_buff * pkt)859 static void fbnic_rx_tstamp(struct fbnic_napi_vector *nv, u64 rcd,
860 			    struct fbnic_pkt_buff *pkt)
861 {
862 	struct fbnic_net *fbn;
863 	u64 ns, ts;
864 
865 	if (!FIELD_GET(FBNIC_RCD_OPT_META_TS, rcd))
866 		return;
867 
868 	fbn = netdev_priv(nv->napi.dev);
869 	ts = FIELD_GET(FBNIC_RCD_OPT_META_TS_MASK, rcd);
870 	ns = fbnic_ts40_to_ns(fbn, ts);
871 
872 	/* Add timestamp to shared info */
873 	pkt->hwtstamp = ns_to_ktime(ns);
874 }
875 
fbnic_populate_skb_fields(struct fbnic_napi_vector * nv,u64 rcd,struct sk_buff * skb,struct fbnic_q_triad * qt)876 static void fbnic_populate_skb_fields(struct fbnic_napi_vector *nv,
877 				      u64 rcd, struct sk_buff *skb,
878 				      struct fbnic_q_triad *qt)
879 {
880 	struct net_device *netdev = nv->napi.dev;
881 	struct fbnic_ring *rcq = &qt->cmpl;
882 
883 	fbnic_rx_csum(rcd, skb, rcq);
884 
885 	if (netdev->features & NETIF_F_RXHASH)
886 		skb_set_hash(skb,
887 			     FIELD_GET(FBNIC_RCD_META_RSS_HASH_MASK, rcd),
888 			     fbnic_skb_hash_type(rcd));
889 
890 	skb_record_rx_queue(skb, rcq->q_idx);
891 }
892 
fbnic_rcd_metadata_err(u64 rcd)893 static bool fbnic_rcd_metadata_err(u64 rcd)
894 {
895 	return !!(FBNIC_RCD_META_UNCORRECTABLE_ERR_MASK & rcd);
896 }
897 
fbnic_clean_rcq(struct fbnic_napi_vector * nv,struct fbnic_q_triad * qt,int budget)898 static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
899 			   struct fbnic_q_triad *qt, int budget)
900 {
901 	unsigned int packets = 0, bytes = 0, dropped = 0;
902 	struct fbnic_ring *rcq = &qt->cmpl;
903 	struct fbnic_pkt_buff *pkt;
904 	s32 head0 = -1, head1 = -1;
905 	__le64 *raw_rcd, done;
906 	u32 head = rcq->head;
907 
908 	done = (head & (rcq->size_mask + 1)) ? cpu_to_le64(FBNIC_RCD_DONE) : 0;
909 	raw_rcd = &rcq->desc[head & rcq->size_mask];
910 	pkt = rcq->pkt;
911 
912 	/* Walk the completion queue collecting the heads reported by NIC */
913 	while (likely(packets < budget)) {
914 		struct sk_buff *skb = ERR_PTR(-EINVAL);
915 		u64 rcd;
916 
917 		if ((*raw_rcd & cpu_to_le64(FBNIC_RCD_DONE)) == done)
918 			break;
919 
920 		dma_rmb();
921 
922 		rcd = le64_to_cpu(*raw_rcd);
923 
924 		switch (FIELD_GET(FBNIC_RCD_TYPE_MASK, rcd)) {
925 		case FBNIC_RCD_TYPE_HDR_AL:
926 			head0 = FIELD_GET(FBNIC_RCD_AL_BUFF_PAGE_MASK, rcd);
927 			fbnic_pkt_prepare(nv, rcd, pkt, qt);
928 
929 			break;
930 		case FBNIC_RCD_TYPE_PAY_AL:
931 			head1 = FIELD_GET(FBNIC_RCD_AL_BUFF_PAGE_MASK, rcd);
932 			fbnic_add_rx_frag(nv, rcd, pkt, qt);
933 
934 			break;
935 		case FBNIC_RCD_TYPE_OPT_META:
936 			/* Only type 0 is currently supported */
937 			if (FIELD_GET(FBNIC_RCD_OPT_META_TYPE_MASK, rcd))
938 				break;
939 
940 			fbnic_rx_tstamp(nv, rcd, pkt);
941 
942 			/* We currently ignore the action table index */
943 			break;
944 		case FBNIC_RCD_TYPE_META:
945 			if (likely(!fbnic_rcd_metadata_err(rcd)))
946 				skb = fbnic_build_skb(nv, pkt);
947 
948 			/* Populate skb and invalidate XDP */
949 			if (!IS_ERR_OR_NULL(skb)) {
950 				fbnic_populate_skb_fields(nv, rcd, skb, qt);
951 
952 				packets++;
953 				bytes += skb->len;
954 
955 				napi_gro_receive(&nv->napi, skb);
956 			} else {
957 				dropped++;
958 				fbnic_put_pkt_buff(nv, pkt, 1);
959 			}
960 
961 			pkt->buff.data_hard_start = NULL;
962 
963 			break;
964 		}
965 
966 		raw_rcd++;
967 		head++;
968 		if (!(head & rcq->size_mask)) {
969 			done ^= cpu_to_le64(FBNIC_RCD_DONE);
970 			raw_rcd = &rcq->desc[0];
971 		}
972 	}
973 
974 	u64_stats_update_begin(&rcq->stats.syncp);
975 	rcq->stats.packets += packets;
976 	rcq->stats.bytes += bytes;
977 	/* Re-add ethernet header length (removed in fbnic_build_skb) */
978 	rcq->stats.bytes += ETH_HLEN * packets;
979 	rcq->stats.dropped += dropped;
980 	u64_stats_update_end(&rcq->stats.syncp);
981 
982 	/* Unmap and free processed buffers */
983 	if (head0 >= 0)
984 		fbnic_clean_bdq(nv, budget, &qt->sub0, head0);
985 	fbnic_fill_bdq(nv, &qt->sub0);
986 
987 	if (head1 >= 0)
988 		fbnic_clean_bdq(nv, budget, &qt->sub1, head1);
989 	fbnic_fill_bdq(nv, &qt->sub1);
990 
991 	/* Record the current head/tail of the queue */
992 	if (rcq->head != head) {
993 		rcq->head = head;
994 		writel(head & rcq->size_mask, rcq->doorbell);
995 	}
996 
997 	return packets;
998 }
999 
fbnic_nv_irq_disable(struct fbnic_napi_vector * nv)1000 static void fbnic_nv_irq_disable(struct fbnic_napi_vector *nv)
1001 {
1002 	struct fbnic_dev *fbd = nv->fbd;
1003 	u32 v_idx = nv->v_idx;
1004 
1005 	fbnic_wr32(fbd, FBNIC_INTR_MASK_SET(v_idx / 32), 1 << (v_idx % 32));
1006 }
1007 
fbnic_nv_irq_rearm(struct fbnic_napi_vector * nv)1008 static void fbnic_nv_irq_rearm(struct fbnic_napi_vector *nv)
1009 {
1010 	struct fbnic_dev *fbd = nv->fbd;
1011 	u32 v_idx = nv->v_idx;
1012 
1013 	fbnic_wr32(fbd, FBNIC_INTR_CQ_REARM(v_idx),
1014 		   FBNIC_INTR_CQ_REARM_INTR_UNMASK);
1015 }
1016 
fbnic_poll(struct napi_struct * napi,int budget)1017 static int fbnic_poll(struct napi_struct *napi, int budget)
1018 {
1019 	struct fbnic_napi_vector *nv = container_of(napi,
1020 						    struct fbnic_napi_vector,
1021 						    napi);
1022 	int i, j, work_done = 0;
1023 
1024 	for (i = 0; i < nv->txt_count; i++)
1025 		fbnic_clean_tcq(nv, &nv->qt[i], budget);
1026 
1027 	for (j = 0; j < nv->rxt_count; j++, i++)
1028 		work_done += fbnic_clean_rcq(nv, &nv->qt[i], budget);
1029 
1030 	if (work_done >= budget)
1031 		return budget;
1032 
1033 	if (likely(napi_complete_done(napi, work_done)))
1034 		fbnic_nv_irq_rearm(nv);
1035 
1036 	return 0;
1037 }
1038 
fbnic_msix_clean_rings(int __always_unused irq,void * data)1039 static irqreturn_t fbnic_msix_clean_rings(int __always_unused irq, void *data)
1040 {
1041 	struct fbnic_napi_vector *nv = data;
1042 
1043 	napi_schedule_irqoff(&nv->napi);
1044 
1045 	return IRQ_HANDLED;
1046 }
1047 
fbnic_aggregate_ring_rx_counters(struct fbnic_net * fbn,struct fbnic_ring * rxr)1048 static void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn,
1049 					     struct fbnic_ring *rxr)
1050 {
1051 	struct fbnic_queue_stats *stats = &rxr->stats;
1052 
1053 	/* Capture stats from queues before dissasociating them */
1054 	fbn->rx_stats.bytes += stats->bytes;
1055 	fbn->rx_stats.packets += stats->packets;
1056 	fbn->rx_stats.dropped += stats->dropped;
1057 }
1058 
fbnic_aggregate_ring_tx_counters(struct fbnic_net * fbn,struct fbnic_ring * txr)1059 static void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
1060 					     struct fbnic_ring *txr)
1061 {
1062 	struct fbnic_queue_stats *stats = &txr->stats;
1063 
1064 	/* Capture stats from queues before dissasociating them */
1065 	fbn->tx_stats.bytes += stats->bytes;
1066 	fbn->tx_stats.packets += stats->packets;
1067 	fbn->tx_stats.dropped += stats->dropped;
1068 	fbn->tx_stats.ts_lost += stats->ts_lost;
1069 	fbn->tx_stats.ts_packets += stats->ts_packets;
1070 }
1071 
fbnic_remove_tx_ring(struct fbnic_net * fbn,struct fbnic_ring * txr)1072 static void fbnic_remove_tx_ring(struct fbnic_net *fbn,
1073 				 struct fbnic_ring *txr)
1074 {
1075 	if (!(txr->flags & FBNIC_RING_F_STATS))
1076 		return;
1077 
1078 	fbnic_aggregate_ring_tx_counters(fbn, txr);
1079 
1080 	/* Remove pointer to the Tx ring */
1081 	WARN_ON(fbn->tx[txr->q_idx] && fbn->tx[txr->q_idx] != txr);
1082 	fbn->tx[txr->q_idx] = NULL;
1083 }
1084 
fbnic_remove_rx_ring(struct fbnic_net * fbn,struct fbnic_ring * rxr)1085 static void fbnic_remove_rx_ring(struct fbnic_net *fbn,
1086 				 struct fbnic_ring *rxr)
1087 {
1088 	if (!(rxr->flags & FBNIC_RING_F_STATS))
1089 		return;
1090 
1091 	fbnic_aggregate_ring_rx_counters(fbn, rxr);
1092 
1093 	/* Remove pointer to the Rx ring */
1094 	WARN_ON(fbn->rx[rxr->q_idx] && fbn->rx[rxr->q_idx] != rxr);
1095 	fbn->rx[rxr->q_idx] = NULL;
1096 }
1097 
fbnic_free_napi_vector(struct fbnic_net * fbn,struct fbnic_napi_vector * nv)1098 static void fbnic_free_napi_vector(struct fbnic_net *fbn,
1099 				   struct fbnic_napi_vector *nv)
1100 {
1101 	struct fbnic_dev *fbd = nv->fbd;
1102 	u32 v_idx = nv->v_idx;
1103 	int i, j;
1104 
1105 	for (i = 0; i < nv->txt_count; i++) {
1106 		fbnic_remove_tx_ring(fbn, &nv->qt[i].sub0);
1107 		fbnic_remove_tx_ring(fbn, &nv->qt[i].cmpl);
1108 	}
1109 
1110 	for (j = 0; j < nv->rxt_count; j++, i++) {
1111 		fbnic_remove_rx_ring(fbn, &nv->qt[i].sub0);
1112 		fbnic_remove_rx_ring(fbn, &nv->qt[i].sub1);
1113 		fbnic_remove_rx_ring(fbn, &nv->qt[i].cmpl);
1114 	}
1115 
1116 	fbnic_free_irq(fbd, v_idx, nv);
1117 	page_pool_destroy(nv->page_pool);
1118 	netif_napi_del(&nv->napi);
1119 	list_del(&nv->napis);
1120 	kfree(nv);
1121 }
1122 
fbnic_free_napi_vectors(struct fbnic_net * fbn)1123 void fbnic_free_napi_vectors(struct fbnic_net *fbn)
1124 {
1125 	struct fbnic_napi_vector *nv, *temp;
1126 
1127 	list_for_each_entry_safe(nv, temp, &fbn->napis, napis)
1128 		fbnic_free_napi_vector(fbn, nv);
1129 }
1130 
fbnic_name_napi_vector(struct fbnic_napi_vector * nv)1131 static void fbnic_name_napi_vector(struct fbnic_napi_vector *nv)
1132 {
1133 	unsigned char *dev_name = nv->napi.dev->name;
1134 
1135 	if (!nv->rxt_count)
1136 		snprintf(nv->name, sizeof(nv->name), "%s-Tx-%u", dev_name,
1137 			 nv->v_idx - FBNIC_NON_NAPI_VECTORS);
1138 	else
1139 		snprintf(nv->name, sizeof(nv->name), "%s-TxRx-%u", dev_name,
1140 			 nv->v_idx - FBNIC_NON_NAPI_VECTORS);
1141 }
1142 
1143 #define FBNIC_PAGE_POOL_FLAGS \
1144 	(PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV)
1145 
fbnic_alloc_nv_page_pool(struct fbnic_net * fbn,struct fbnic_napi_vector * nv)1146 static int fbnic_alloc_nv_page_pool(struct fbnic_net *fbn,
1147 				    struct fbnic_napi_vector *nv)
1148 {
1149 	struct page_pool_params pp_params = {
1150 		.order = 0,
1151 		.flags = FBNIC_PAGE_POOL_FLAGS,
1152 		.pool_size = (fbn->hpq_size + fbn->ppq_size) * nv->rxt_count,
1153 		.nid = NUMA_NO_NODE,
1154 		.dev = nv->dev,
1155 		.dma_dir = DMA_BIDIRECTIONAL,
1156 		.offset = 0,
1157 		.max_len = PAGE_SIZE
1158 	};
1159 	struct page_pool *pp;
1160 
1161 	/* Page pool cannot exceed a size of 32768. This doesn't limit the
1162 	 * pages on the ring but the number we can have cached waiting on
1163 	 * the next use.
1164 	 *
1165 	 * TBD: Can this be reduced further? Would a multiple of
1166 	 * NAPI_POLL_WEIGHT possibly make more sense? The question is how
1167 	 * may pages do we need to hold in reserve to get the best return
1168 	 * without hogging too much system memory.
1169 	 */
1170 	if (pp_params.pool_size > 32768)
1171 		pp_params.pool_size = 32768;
1172 
1173 	pp = page_pool_create(&pp_params);
1174 	if (IS_ERR(pp))
1175 		return PTR_ERR(pp);
1176 
1177 	nv->page_pool = pp;
1178 
1179 	return 0;
1180 }
1181 
fbnic_ring_init(struct fbnic_ring * ring,u32 __iomem * doorbell,int q_idx,u8 flags)1182 static void fbnic_ring_init(struct fbnic_ring *ring, u32 __iomem *doorbell,
1183 			    int q_idx, u8 flags)
1184 {
1185 	u64_stats_init(&ring->stats.syncp);
1186 	ring->doorbell = doorbell;
1187 	ring->q_idx = q_idx;
1188 	ring->flags = flags;
1189 }
1190 
fbnic_alloc_napi_vector(struct fbnic_dev * fbd,struct fbnic_net * fbn,unsigned int v_count,unsigned int v_idx,unsigned int txq_count,unsigned int txq_idx,unsigned int rxq_count,unsigned int rxq_idx)1191 static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn,
1192 				   unsigned int v_count, unsigned int v_idx,
1193 				   unsigned int txq_count, unsigned int txq_idx,
1194 				   unsigned int rxq_count, unsigned int rxq_idx)
1195 {
1196 	int txt_count = txq_count, rxt_count = rxq_count;
1197 	u32 __iomem *uc_addr = fbd->uc_addr0;
1198 	struct fbnic_napi_vector *nv;
1199 	struct fbnic_q_triad *qt;
1200 	int qt_count, err;
1201 	u32 __iomem *db;
1202 
1203 	qt_count = txt_count + rxq_count;
1204 	if (!qt_count)
1205 		return -EINVAL;
1206 
1207 	/* If MMIO has already failed there are no rings to initialize */
1208 	if (!uc_addr)
1209 		return -EIO;
1210 
1211 	/* Allocate NAPI vector and queue triads */
1212 	nv = kzalloc(struct_size(nv, qt, qt_count), GFP_KERNEL);
1213 	if (!nv)
1214 		return -ENOMEM;
1215 
1216 	/* Record queue triad counts */
1217 	nv->txt_count = txt_count;
1218 	nv->rxt_count = rxt_count;
1219 
1220 	/* Provide pointer back to fbnic and MSI-X vectors */
1221 	nv->fbd = fbd;
1222 	nv->v_idx = v_idx;
1223 
1224 	/* Tie napi to netdev */
1225 	list_add(&nv->napis, &fbn->napis);
1226 	netif_napi_add(fbn->netdev, &nv->napi, fbnic_poll);
1227 
1228 	/* Record IRQ to NAPI struct */
1229 	netif_napi_set_irq(&nv->napi,
1230 			   pci_irq_vector(to_pci_dev(fbd->dev), nv->v_idx));
1231 
1232 	/* Tie nv back to PCIe dev */
1233 	nv->dev = fbd->dev;
1234 
1235 	/* Allocate page pool */
1236 	if (rxq_count) {
1237 		err = fbnic_alloc_nv_page_pool(fbn, nv);
1238 		if (err)
1239 			goto napi_del;
1240 	}
1241 
1242 	/* Initialize vector name */
1243 	fbnic_name_napi_vector(nv);
1244 
1245 	/* Request the IRQ for napi vector */
1246 	err = fbnic_request_irq(fbd, v_idx, &fbnic_msix_clean_rings,
1247 				IRQF_SHARED, nv->name, nv);
1248 	if (err)
1249 		goto pp_destroy;
1250 
1251 	/* Initialize queue triads */
1252 	qt = nv->qt;
1253 
1254 	while (txt_count) {
1255 		/* Configure Tx queue */
1256 		db = &uc_addr[FBNIC_QUEUE(txq_idx) + FBNIC_QUEUE_TWQ0_TAIL];
1257 
1258 		/* Assign Tx queue to netdev if applicable */
1259 		if (txq_count > 0) {
1260 			u8 flags = FBNIC_RING_F_CTX | FBNIC_RING_F_STATS;
1261 
1262 			fbnic_ring_init(&qt->sub0, db, txq_idx, flags);
1263 			fbn->tx[txq_idx] = &qt->sub0;
1264 			txq_count--;
1265 		} else {
1266 			fbnic_ring_init(&qt->sub0, db, 0,
1267 					FBNIC_RING_F_DISABLED);
1268 		}
1269 
1270 		/* Configure Tx completion queue */
1271 		db = &uc_addr[FBNIC_QUEUE(txq_idx) + FBNIC_QUEUE_TCQ_HEAD];
1272 		fbnic_ring_init(&qt->cmpl, db, 0, 0);
1273 
1274 		/* Update Tx queue index */
1275 		txt_count--;
1276 		txq_idx += v_count;
1277 
1278 		/* Move to next queue triad */
1279 		qt++;
1280 	}
1281 
1282 	while (rxt_count) {
1283 		/* Configure header queue */
1284 		db = &uc_addr[FBNIC_QUEUE(rxq_idx) + FBNIC_QUEUE_BDQ_HPQ_TAIL];
1285 		fbnic_ring_init(&qt->sub0, db, 0, FBNIC_RING_F_CTX);
1286 
1287 		/* Configure payload queue */
1288 		db = &uc_addr[FBNIC_QUEUE(rxq_idx) + FBNIC_QUEUE_BDQ_PPQ_TAIL];
1289 		fbnic_ring_init(&qt->sub1, db, 0, FBNIC_RING_F_CTX);
1290 
1291 		/* Configure Rx completion queue */
1292 		db = &uc_addr[FBNIC_QUEUE(rxq_idx) + FBNIC_QUEUE_RCQ_HEAD];
1293 		fbnic_ring_init(&qt->cmpl, db, rxq_idx, FBNIC_RING_F_STATS);
1294 		fbn->rx[rxq_idx] = &qt->cmpl;
1295 
1296 		/* Update Rx queue index */
1297 		rxt_count--;
1298 		rxq_idx += v_count;
1299 
1300 		/* Move to next queue triad */
1301 		qt++;
1302 	}
1303 
1304 	return 0;
1305 
1306 pp_destroy:
1307 	page_pool_destroy(nv->page_pool);
1308 napi_del:
1309 	netif_napi_del(&nv->napi);
1310 	list_del(&nv->napis);
1311 	kfree(nv);
1312 	return err;
1313 }
1314 
fbnic_alloc_napi_vectors(struct fbnic_net * fbn)1315 int fbnic_alloc_napi_vectors(struct fbnic_net *fbn)
1316 {
1317 	unsigned int txq_idx = 0, rxq_idx = 0, v_idx = FBNIC_NON_NAPI_VECTORS;
1318 	unsigned int num_tx = fbn->num_tx_queues;
1319 	unsigned int num_rx = fbn->num_rx_queues;
1320 	unsigned int num_napi = fbn->num_napi;
1321 	struct fbnic_dev *fbd = fbn->fbd;
1322 	int err;
1323 
1324 	/* Allocate 1 Tx queue per napi vector */
1325 	if (num_napi < FBNIC_MAX_TXQS && num_napi == num_tx + num_rx) {
1326 		while (num_tx) {
1327 			err = fbnic_alloc_napi_vector(fbd, fbn,
1328 						      num_napi, v_idx,
1329 						      1, txq_idx, 0, 0);
1330 			if (err)
1331 				goto free_vectors;
1332 
1333 			/* Update counts and index */
1334 			num_tx--;
1335 			txq_idx++;
1336 
1337 			v_idx++;
1338 		}
1339 	}
1340 
1341 	/* Allocate Tx/Rx queue pairs per vector, or allocate remaining Rx */
1342 	while (num_rx | num_tx) {
1343 		int tqpv = DIV_ROUND_UP(num_tx, num_napi - txq_idx);
1344 		int rqpv = DIV_ROUND_UP(num_rx, num_napi - rxq_idx);
1345 
1346 		err = fbnic_alloc_napi_vector(fbd, fbn, num_napi, v_idx,
1347 					      tqpv, txq_idx, rqpv, rxq_idx);
1348 		if (err)
1349 			goto free_vectors;
1350 
1351 		/* Update counts and index */
1352 		num_tx -= tqpv;
1353 		txq_idx++;
1354 
1355 		num_rx -= rqpv;
1356 		rxq_idx++;
1357 
1358 		v_idx++;
1359 	}
1360 
1361 	return 0;
1362 
1363 free_vectors:
1364 	fbnic_free_napi_vectors(fbn);
1365 
1366 	return -ENOMEM;
1367 }
1368 
fbnic_free_ring_resources(struct device * dev,struct fbnic_ring * ring)1369 static void fbnic_free_ring_resources(struct device *dev,
1370 				      struct fbnic_ring *ring)
1371 {
1372 	kvfree(ring->buffer);
1373 	ring->buffer = NULL;
1374 
1375 	/* If size is not set there are no descriptors present */
1376 	if (!ring->size)
1377 		return;
1378 
1379 	dma_free_coherent(dev, ring->size, ring->desc, ring->dma);
1380 	ring->size_mask = 0;
1381 	ring->size = 0;
1382 }
1383 
fbnic_alloc_tx_ring_desc(struct fbnic_net * fbn,struct fbnic_ring * txr)1384 static int fbnic_alloc_tx_ring_desc(struct fbnic_net *fbn,
1385 				    struct fbnic_ring *txr)
1386 {
1387 	struct device *dev = fbn->netdev->dev.parent;
1388 	size_t size;
1389 
1390 	/* Round size up to nearest 4K */
1391 	size = ALIGN(array_size(sizeof(*txr->desc), fbn->txq_size), 4096);
1392 
1393 	txr->desc = dma_alloc_coherent(dev, size, &txr->dma,
1394 				       GFP_KERNEL | __GFP_NOWARN);
1395 	if (!txr->desc)
1396 		return -ENOMEM;
1397 
1398 	/* txq_size should be a power of 2, so mask is just that -1 */
1399 	txr->size_mask = fbn->txq_size - 1;
1400 	txr->size = size;
1401 
1402 	return 0;
1403 }
1404 
fbnic_alloc_tx_ring_buffer(struct fbnic_ring * txr)1405 static int fbnic_alloc_tx_ring_buffer(struct fbnic_ring *txr)
1406 {
1407 	size_t size = array_size(sizeof(*txr->tx_buf), txr->size_mask + 1);
1408 
1409 	txr->tx_buf = kvzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1410 
1411 	return txr->tx_buf ? 0 : -ENOMEM;
1412 }
1413 
fbnic_alloc_tx_ring_resources(struct fbnic_net * fbn,struct fbnic_ring * txr)1414 static int fbnic_alloc_tx_ring_resources(struct fbnic_net *fbn,
1415 					 struct fbnic_ring *txr)
1416 {
1417 	struct device *dev = fbn->netdev->dev.parent;
1418 	int err;
1419 
1420 	if (txr->flags & FBNIC_RING_F_DISABLED)
1421 		return 0;
1422 
1423 	err = fbnic_alloc_tx_ring_desc(fbn, txr);
1424 	if (err)
1425 		return err;
1426 
1427 	if (!(txr->flags & FBNIC_RING_F_CTX))
1428 		return 0;
1429 
1430 	err = fbnic_alloc_tx_ring_buffer(txr);
1431 	if (err)
1432 		goto free_desc;
1433 
1434 	return 0;
1435 
1436 free_desc:
1437 	fbnic_free_ring_resources(dev, txr);
1438 	return err;
1439 }
1440 
fbnic_alloc_rx_ring_desc(struct fbnic_net * fbn,struct fbnic_ring * rxr)1441 static int fbnic_alloc_rx_ring_desc(struct fbnic_net *fbn,
1442 				    struct fbnic_ring *rxr)
1443 {
1444 	struct device *dev = fbn->netdev->dev.parent;
1445 	size_t desc_size = sizeof(*rxr->desc);
1446 	u32 rxq_size;
1447 	size_t size;
1448 
1449 	switch (rxr->doorbell - fbnic_ring_csr_base(rxr)) {
1450 	case FBNIC_QUEUE_BDQ_HPQ_TAIL:
1451 		rxq_size = fbn->hpq_size / FBNIC_BD_FRAG_COUNT;
1452 		desc_size *= FBNIC_BD_FRAG_COUNT;
1453 		break;
1454 	case FBNIC_QUEUE_BDQ_PPQ_TAIL:
1455 		rxq_size = fbn->ppq_size / FBNIC_BD_FRAG_COUNT;
1456 		desc_size *= FBNIC_BD_FRAG_COUNT;
1457 		break;
1458 	case FBNIC_QUEUE_RCQ_HEAD:
1459 		rxq_size = fbn->rcq_size;
1460 		break;
1461 	default:
1462 		return -EINVAL;
1463 	}
1464 
1465 	/* Round size up to nearest 4K */
1466 	size = ALIGN(array_size(desc_size, rxq_size), 4096);
1467 
1468 	rxr->desc = dma_alloc_coherent(dev, size, &rxr->dma,
1469 				       GFP_KERNEL | __GFP_NOWARN);
1470 	if (!rxr->desc)
1471 		return -ENOMEM;
1472 
1473 	/* rxq_size should be a power of 2, so mask is just that -1 */
1474 	rxr->size_mask = rxq_size - 1;
1475 	rxr->size = size;
1476 
1477 	return 0;
1478 }
1479 
fbnic_alloc_rx_ring_buffer(struct fbnic_ring * rxr)1480 static int fbnic_alloc_rx_ring_buffer(struct fbnic_ring *rxr)
1481 {
1482 	size_t size = array_size(sizeof(*rxr->rx_buf), rxr->size_mask + 1);
1483 
1484 	if (rxr->flags & FBNIC_RING_F_CTX)
1485 		size = sizeof(*rxr->rx_buf) * (rxr->size_mask + 1);
1486 	else
1487 		size = sizeof(*rxr->pkt);
1488 
1489 	rxr->rx_buf = kvzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1490 
1491 	return rxr->rx_buf ? 0 : -ENOMEM;
1492 }
1493 
fbnic_alloc_rx_ring_resources(struct fbnic_net * fbn,struct fbnic_ring * rxr)1494 static int fbnic_alloc_rx_ring_resources(struct fbnic_net *fbn,
1495 					 struct fbnic_ring *rxr)
1496 {
1497 	struct device *dev = fbn->netdev->dev.parent;
1498 	int err;
1499 
1500 	err = fbnic_alloc_rx_ring_desc(fbn, rxr);
1501 	if (err)
1502 		return err;
1503 
1504 	err = fbnic_alloc_rx_ring_buffer(rxr);
1505 	if (err)
1506 		goto free_desc;
1507 
1508 	return 0;
1509 
1510 free_desc:
1511 	fbnic_free_ring_resources(dev, rxr);
1512 	return err;
1513 }
1514 
fbnic_free_qt_resources(struct fbnic_net * fbn,struct fbnic_q_triad * qt)1515 static void fbnic_free_qt_resources(struct fbnic_net *fbn,
1516 				    struct fbnic_q_triad *qt)
1517 {
1518 	struct device *dev = fbn->netdev->dev.parent;
1519 
1520 	fbnic_free_ring_resources(dev, &qt->cmpl);
1521 	fbnic_free_ring_resources(dev, &qt->sub1);
1522 	fbnic_free_ring_resources(dev, &qt->sub0);
1523 }
1524 
fbnic_alloc_tx_qt_resources(struct fbnic_net * fbn,struct fbnic_q_triad * qt)1525 static int fbnic_alloc_tx_qt_resources(struct fbnic_net *fbn,
1526 				       struct fbnic_q_triad *qt)
1527 {
1528 	struct device *dev = fbn->netdev->dev.parent;
1529 	int err;
1530 
1531 	err = fbnic_alloc_tx_ring_resources(fbn, &qt->sub0);
1532 	if (err)
1533 		return err;
1534 
1535 	err = fbnic_alloc_tx_ring_resources(fbn, &qt->cmpl);
1536 	if (err)
1537 		goto free_sub1;
1538 
1539 	return 0;
1540 
1541 free_sub1:
1542 	fbnic_free_ring_resources(dev, &qt->sub0);
1543 	return err;
1544 }
1545 
fbnic_alloc_rx_qt_resources(struct fbnic_net * fbn,struct fbnic_q_triad * qt)1546 static int fbnic_alloc_rx_qt_resources(struct fbnic_net *fbn,
1547 				       struct fbnic_q_triad *qt)
1548 {
1549 	struct device *dev = fbn->netdev->dev.parent;
1550 	int err;
1551 
1552 	err = fbnic_alloc_rx_ring_resources(fbn, &qt->sub0);
1553 	if (err)
1554 		return err;
1555 
1556 	err = fbnic_alloc_rx_ring_resources(fbn, &qt->sub1);
1557 	if (err)
1558 		goto free_sub0;
1559 
1560 	err = fbnic_alloc_rx_ring_resources(fbn, &qt->cmpl);
1561 	if (err)
1562 		goto free_sub1;
1563 
1564 	return 0;
1565 
1566 free_sub1:
1567 	fbnic_free_ring_resources(dev, &qt->sub1);
1568 free_sub0:
1569 	fbnic_free_ring_resources(dev, &qt->sub0);
1570 	return err;
1571 }
1572 
fbnic_free_nv_resources(struct fbnic_net * fbn,struct fbnic_napi_vector * nv)1573 static void fbnic_free_nv_resources(struct fbnic_net *fbn,
1574 				    struct fbnic_napi_vector *nv)
1575 {
1576 	int i, j;
1577 
1578 	/* Free Tx Resources  */
1579 	for (i = 0; i < nv->txt_count; i++)
1580 		fbnic_free_qt_resources(fbn, &nv->qt[i]);
1581 
1582 	for (j = 0; j < nv->rxt_count; j++, i++)
1583 		fbnic_free_qt_resources(fbn, &nv->qt[i]);
1584 }
1585 
fbnic_alloc_nv_resources(struct fbnic_net * fbn,struct fbnic_napi_vector * nv)1586 static int fbnic_alloc_nv_resources(struct fbnic_net *fbn,
1587 				    struct fbnic_napi_vector *nv)
1588 {
1589 	int i, j, err;
1590 
1591 	/* Allocate Tx Resources */
1592 	for (i = 0; i < nv->txt_count; i++) {
1593 		err = fbnic_alloc_tx_qt_resources(fbn, &nv->qt[i]);
1594 		if (err)
1595 			goto free_resources;
1596 	}
1597 
1598 	/* Allocate Rx Resources */
1599 	for (j = 0; j < nv->rxt_count; j++, i++) {
1600 		err = fbnic_alloc_rx_qt_resources(fbn, &nv->qt[i]);
1601 		if (err)
1602 			goto free_resources;
1603 	}
1604 
1605 	return 0;
1606 
1607 free_resources:
1608 	while (i--)
1609 		fbnic_free_qt_resources(fbn, &nv->qt[i]);
1610 	return err;
1611 }
1612 
fbnic_free_resources(struct fbnic_net * fbn)1613 void fbnic_free_resources(struct fbnic_net *fbn)
1614 {
1615 	struct fbnic_napi_vector *nv;
1616 
1617 	list_for_each_entry(nv, &fbn->napis, napis)
1618 		fbnic_free_nv_resources(fbn, nv);
1619 }
1620 
fbnic_alloc_resources(struct fbnic_net * fbn)1621 int fbnic_alloc_resources(struct fbnic_net *fbn)
1622 {
1623 	struct fbnic_napi_vector *nv;
1624 	int err = -ENODEV;
1625 
1626 	list_for_each_entry(nv, &fbn->napis, napis) {
1627 		err = fbnic_alloc_nv_resources(fbn, nv);
1628 		if (err)
1629 			goto free_resources;
1630 	}
1631 
1632 	return 0;
1633 
1634 free_resources:
1635 	list_for_each_entry_continue_reverse(nv, &fbn->napis, napis)
1636 		fbnic_free_nv_resources(fbn, nv);
1637 
1638 	return err;
1639 }
1640 
fbnic_disable_twq0(struct fbnic_ring * txr)1641 static void fbnic_disable_twq0(struct fbnic_ring *txr)
1642 {
1643 	u32 twq_ctl = fbnic_ring_rd32(txr, FBNIC_QUEUE_TWQ0_CTL);
1644 
1645 	twq_ctl &= ~FBNIC_QUEUE_TWQ_CTL_ENABLE;
1646 
1647 	fbnic_ring_wr32(txr, FBNIC_QUEUE_TWQ0_CTL, twq_ctl);
1648 }
1649 
fbnic_disable_tcq(struct fbnic_ring * txr)1650 static void fbnic_disable_tcq(struct fbnic_ring *txr)
1651 {
1652 	fbnic_ring_wr32(txr, FBNIC_QUEUE_TCQ_CTL, 0);
1653 	fbnic_ring_wr32(txr, FBNIC_QUEUE_TIM_MASK, FBNIC_QUEUE_TIM_MASK_MASK);
1654 }
1655 
fbnic_disable_bdq(struct fbnic_ring * hpq,struct fbnic_ring * ppq)1656 static void fbnic_disable_bdq(struct fbnic_ring *hpq, struct fbnic_ring *ppq)
1657 {
1658 	u32 bdq_ctl = fbnic_ring_rd32(hpq, FBNIC_QUEUE_BDQ_CTL);
1659 
1660 	bdq_ctl &= ~FBNIC_QUEUE_BDQ_CTL_ENABLE;
1661 
1662 	fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_CTL, bdq_ctl);
1663 }
1664 
fbnic_disable_rcq(struct fbnic_ring * rxr)1665 static void fbnic_disable_rcq(struct fbnic_ring *rxr)
1666 {
1667 	fbnic_ring_wr32(rxr, FBNIC_QUEUE_RCQ_CTL, 0);
1668 	fbnic_ring_wr32(rxr, FBNIC_QUEUE_RIM_MASK, FBNIC_QUEUE_RIM_MASK_MASK);
1669 }
1670 
fbnic_napi_disable(struct fbnic_net * fbn)1671 void fbnic_napi_disable(struct fbnic_net *fbn)
1672 {
1673 	struct fbnic_napi_vector *nv;
1674 
1675 	list_for_each_entry(nv, &fbn->napis, napis) {
1676 		napi_disable(&nv->napi);
1677 
1678 		fbnic_nv_irq_disable(nv);
1679 	}
1680 }
1681 
fbnic_disable(struct fbnic_net * fbn)1682 void fbnic_disable(struct fbnic_net *fbn)
1683 {
1684 	struct fbnic_dev *fbd = fbn->fbd;
1685 	struct fbnic_napi_vector *nv;
1686 	int i, j;
1687 
1688 	list_for_each_entry(nv, &fbn->napis, napis) {
1689 		/* Disable Tx queue triads */
1690 		for (i = 0; i < nv->txt_count; i++) {
1691 			struct fbnic_q_triad *qt = &nv->qt[i];
1692 
1693 			fbnic_disable_twq0(&qt->sub0);
1694 			fbnic_disable_tcq(&qt->cmpl);
1695 		}
1696 
1697 		/* Disable Rx queue triads */
1698 		for (j = 0; j < nv->rxt_count; j++, i++) {
1699 			struct fbnic_q_triad *qt = &nv->qt[i];
1700 
1701 			fbnic_disable_bdq(&qt->sub0, &qt->sub1);
1702 			fbnic_disable_rcq(&qt->cmpl);
1703 		}
1704 	}
1705 
1706 	fbnic_wrfl(fbd);
1707 }
1708 
fbnic_tx_flush(struct fbnic_dev * fbd)1709 static void fbnic_tx_flush(struct fbnic_dev *fbd)
1710 {
1711 	netdev_warn(fbd->netdev, "triggering Tx flush\n");
1712 
1713 	fbnic_rmw32(fbd, FBNIC_TMI_DROP_CTRL, FBNIC_TMI_DROP_CTRL_EN,
1714 		    FBNIC_TMI_DROP_CTRL_EN);
1715 }
1716 
fbnic_tx_flush_off(struct fbnic_dev * fbd)1717 static void fbnic_tx_flush_off(struct fbnic_dev *fbd)
1718 {
1719 	fbnic_rmw32(fbd, FBNIC_TMI_DROP_CTRL, FBNIC_TMI_DROP_CTRL_EN, 0);
1720 }
1721 
1722 struct fbnic_idle_regs {
1723 	u32 reg_base;
1724 	u8 reg_cnt;
1725 };
1726 
fbnic_all_idle(struct fbnic_dev * fbd,const struct fbnic_idle_regs * regs,unsigned int nregs)1727 static bool fbnic_all_idle(struct fbnic_dev *fbd,
1728 			   const struct fbnic_idle_regs *regs,
1729 			   unsigned int nregs)
1730 {
1731 	unsigned int i, j;
1732 
1733 	for (i = 0; i < nregs; i++) {
1734 		for (j = 0; j < regs[i].reg_cnt; j++) {
1735 			if (fbnic_rd32(fbd, regs[i].reg_base + j) != ~0U)
1736 				return false;
1737 		}
1738 	}
1739 	return true;
1740 }
1741 
fbnic_idle_dump(struct fbnic_dev * fbd,const struct fbnic_idle_regs * regs,unsigned int nregs,const char * dir,int err)1742 static void fbnic_idle_dump(struct fbnic_dev *fbd,
1743 			    const struct fbnic_idle_regs *regs,
1744 			    unsigned int nregs, const char *dir, int err)
1745 {
1746 	unsigned int i, j;
1747 
1748 	netdev_err(fbd->netdev, "error waiting for %s idle %d\n", dir, err);
1749 	for (i = 0; i < nregs; i++)
1750 		for (j = 0; j < regs[i].reg_cnt; j++)
1751 			netdev_err(fbd->netdev, "0x%04x: %08x\n",
1752 				   regs[i].reg_base + j,
1753 				   fbnic_rd32(fbd, regs[i].reg_base + j));
1754 }
1755 
fbnic_wait_all_queues_idle(struct fbnic_dev * fbd,bool may_fail)1756 int fbnic_wait_all_queues_idle(struct fbnic_dev *fbd, bool may_fail)
1757 {
1758 	static const struct fbnic_idle_regs tx[] = {
1759 		{ FBNIC_QM_TWQ_IDLE(0),	FBNIC_QM_TWQ_IDLE_CNT, },
1760 		{ FBNIC_QM_TQS_IDLE(0),	FBNIC_QM_TQS_IDLE_CNT, },
1761 		{ FBNIC_QM_TDE_IDLE(0),	FBNIC_QM_TDE_IDLE_CNT, },
1762 		{ FBNIC_QM_TCQ_IDLE(0),	FBNIC_QM_TCQ_IDLE_CNT, },
1763 	}, rx[] = {
1764 		{ FBNIC_QM_HPQ_IDLE(0),	FBNIC_QM_HPQ_IDLE_CNT, },
1765 		{ FBNIC_QM_PPQ_IDLE(0),	FBNIC_QM_PPQ_IDLE_CNT, },
1766 		{ FBNIC_QM_RCQ_IDLE(0),	FBNIC_QM_RCQ_IDLE_CNT, },
1767 	};
1768 	bool idle;
1769 	int err;
1770 
1771 	err = read_poll_timeout_atomic(fbnic_all_idle, idle, idle, 2, 500000,
1772 				       false, fbd, tx, ARRAY_SIZE(tx));
1773 	if (err == -ETIMEDOUT) {
1774 		fbnic_tx_flush(fbd);
1775 		err = read_poll_timeout_atomic(fbnic_all_idle, idle, idle,
1776 					       2, 500000, false,
1777 					       fbd, tx, ARRAY_SIZE(tx));
1778 		fbnic_tx_flush_off(fbd);
1779 	}
1780 	if (err) {
1781 		fbnic_idle_dump(fbd, tx, ARRAY_SIZE(tx), "Tx", err);
1782 		if (may_fail)
1783 			return err;
1784 	}
1785 
1786 	err = read_poll_timeout_atomic(fbnic_all_idle, idle, idle, 2, 500000,
1787 				       false, fbd, rx, ARRAY_SIZE(rx));
1788 	if (err)
1789 		fbnic_idle_dump(fbd, rx, ARRAY_SIZE(rx), "Rx", err);
1790 	return err;
1791 }
1792 
fbnic_flush(struct fbnic_net * fbn)1793 void fbnic_flush(struct fbnic_net *fbn)
1794 {
1795 	struct fbnic_napi_vector *nv;
1796 
1797 	list_for_each_entry(nv, &fbn->napis, napis) {
1798 		int i, j;
1799 
1800 		/* Flush any processed Tx Queue Triads and drop the rest */
1801 		for (i = 0; i < nv->txt_count; i++) {
1802 			struct fbnic_q_triad *qt = &nv->qt[i];
1803 			struct netdev_queue *tx_queue;
1804 
1805 			/* Clean the work queues of unprocessed work */
1806 			fbnic_clean_twq0(nv, 0, &qt->sub0, true, qt->sub0.tail);
1807 
1808 			/* Reset completion queue descriptor ring */
1809 			memset(qt->cmpl.desc, 0, qt->cmpl.size);
1810 
1811 			/* Nothing else to do if Tx queue is disabled */
1812 			if (qt->sub0.flags & FBNIC_RING_F_DISABLED)
1813 				continue;
1814 
1815 			/* Reset BQL associated with Tx queue */
1816 			tx_queue = netdev_get_tx_queue(nv->napi.dev,
1817 						       qt->sub0.q_idx);
1818 			netdev_tx_reset_queue(tx_queue);
1819 
1820 			/* Disassociate Tx queue from NAPI */
1821 			netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx,
1822 					     NETDEV_QUEUE_TYPE_TX, NULL);
1823 		}
1824 
1825 		/* Flush any processed Rx Queue Triads and drop the rest */
1826 		for (j = 0; j < nv->rxt_count; j++, i++) {
1827 			struct fbnic_q_triad *qt = &nv->qt[i];
1828 
1829 			/* Clean the work queues of unprocessed work */
1830 			fbnic_clean_bdq(nv, 0, &qt->sub0, qt->sub0.tail);
1831 			fbnic_clean_bdq(nv, 0, &qt->sub1, qt->sub1.tail);
1832 
1833 			/* Reset completion queue descriptor ring */
1834 			memset(qt->cmpl.desc, 0, qt->cmpl.size);
1835 
1836 			fbnic_put_pkt_buff(nv, qt->cmpl.pkt, 0);
1837 			qt->cmpl.pkt->buff.data_hard_start = NULL;
1838 
1839 			/* Disassociate Rx queue from NAPI */
1840 			netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx,
1841 					     NETDEV_QUEUE_TYPE_RX, NULL);
1842 		}
1843 	}
1844 }
1845 
fbnic_fill(struct fbnic_net * fbn)1846 void fbnic_fill(struct fbnic_net *fbn)
1847 {
1848 	struct fbnic_napi_vector *nv;
1849 
1850 	list_for_each_entry(nv, &fbn->napis, napis) {
1851 		int i, j;
1852 
1853 		/* Configure NAPI mapping for Tx */
1854 		for (i = 0; i < nv->txt_count; i++) {
1855 			struct fbnic_q_triad *qt = &nv->qt[i];
1856 
1857 			/* Nothing to do if Tx queue is disabled */
1858 			if (qt->sub0.flags & FBNIC_RING_F_DISABLED)
1859 				continue;
1860 
1861 			/* Associate Tx queue with NAPI */
1862 			netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx,
1863 					     NETDEV_QUEUE_TYPE_TX, &nv->napi);
1864 		}
1865 
1866 		/* Configure NAPI mapping and populate pages
1867 		 * in the BDQ rings to use for Rx
1868 		 */
1869 		for (j = 0; j < nv->rxt_count; j++, i++) {
1870 			struct fbnic_q_triad *qt = &nv->qt[i];
1871 
1872 			/* Associate Rx queue with NAPI */
1873 			netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx,
1874 					     NETDEV_QUEUE_TYPE_RX, &nv->napi);
1875 
1876 			/* Populate the header and payload BDQs */
1877 			fbnic_fill_bdq(nv, &qt->sub0);
1878 			fbnic_fill_bdq(nv, &qt->sub1);
1879 		}
1880 	}
1881 }
1882 
fbnic_enable_twq0(struct fbnic_ring * twq)1883 static void fbnic_enable_twq0(struct fbnic_ring *twq)
1884 {
1885 	u32 log_size = fls(twq->size_mask);
1886 
1887 	if (!twq->size_mask)
1888 		return;
1889 
1890 	/* Reset head/tail */
1891 	fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_CTL, FBNIC_QUEUE_TWQ_CTL_RESET);
1892 	twq->tail = 0;
1893 	twq->head = 0;
1894 
1895 	/* Store descriptor ring address and size */
1896 	fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_BAL, lower_32_bits(twq->dma));
1897 	fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_BAH, upper_32_bits(twq->dma));
1898 
1899 	/* Write lower 4 bits of log size as 64K ring size is 0 */
1900 	fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_SIZE, log_size & 0xf);
1901 
1902 	fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_CTL, FBNIC_QUEUE_TWQ_CTL_ENABLE);
1903 }
1904 
fbnic_enable_tcq(struct fbnic_napi_vector * nv,struct fbnic_ring * tcq)1905 static void fbnic_enable_tcq(struct fbnic_napi_vector *nv,
1906 			     struct fbnic_ring *tcq)
1907 {
1908 	u32 log_size = fls(tcq->size_mask);
1909 
1910 	if (!tcq->size_mask)
1911 		return;
1912 
1913 	/* Reset head/tail */
1914 	fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_CTL, FBNIC_QUEUE_TCQ_CTL_RESET);
1915 	tcq->tail = 0;
1916 	tcq->head = 0;
1917 
1918 	/* Store descriptor ring address and size */
1919 	fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_BAL, lower_32_bits(tcq->dma));
1920 	fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_BAH, upper_32_bits(tcq->dma));
1921 
1922 	/* Write lower 4 bits of log size as 64K ring size is 0 */
1923 	fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_SIZE, log_size & 0xf);
1924 
1925 	/* Store interrupt information for the completion queue */
1926 	fbnic_ring_wr32(tcq, FBNIC_QUEUE_TIM_CTL, nv->v_idx);
1927 	fbnic_ring_wr32(tcq, FBNIC_QUEUE_TIM_THRESHOLD, tcq->size_mask / 2);
1928 	fbnic_ring_wr32(tcq, FBNIC_QUEUE_TIM_MASK, 0);
1929 
1930 	/* Enable queue */
1931 	fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_CTL, FBNIC_QUEUE_TCQ_CTL_ENABLE);
1932 }
1933 
fbnic_enable_bdq(struct fbnic_ring * hpq,struct fbnic_ring * ppq)1934 static void fbnic_enable_bdq(struct fbnic_ring *hpq, struct fbnic_ring *ppq)
1935 {
1936 	u32 bdq_ctl = FBNIC_QUEUE_BDQ_CTL_ENABLE;
1937 	u32 log_size;
1938 
1939 	/* Reset head/tail */
1940 	fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_CTL, FBNIC_QUEUE_BDQ_CTL_RESET);
1941 	ppq->tail = 0;
1942 	ppq->head = 0;
1943 	hpq->tail = 0;
1944 	hpq->head = 0;
1945 
1946 	log_size = fls(hpq->size_mask);
1947 
1948 	/* Store descriptor ring address and size */
1949 	fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_HPQ_BAL, lower_32_bits(hpq->dma));
1950 	fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_HPQ_BAH, upper_32_bits(hpq->dma));
1951 
1952 	/* Write lower 4 bits of log size as 64K ring size is 0 */
1953 	fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_HPQ_SIZE, log_size & 0xf);
1954 
1955 	if (!ppq->size_mask)
1956 		goto write_ctl;
1957 
1958 	log_size = fls(ppq->size_mask);
1959 
1960 	/* Add enabling of PPQ to BDQ control */
1961 	bdq_ctl |= FBNIC_QUEUE_BDQ_CTL_PPQ_ENABLE;
1962 
1963 	/* Store descriptor ring address and size */
1964 	fbnic_ring_wr32(ppq, FBNIC_QUEUE_BDQ_PPQ_BAL, lower_32_bits(ppq->dma));
1965 	fbnic_ring_wr32(ppq, FBNIC_QUEUE_BDQ_PPQ_BAH, upper_32_bits(ppq->dma));
1966 	fbnic_ring_wr32(ppq, FBNIC_QUEUE_BDQ_PPQ_SIZE, log_size & 0xf);
1967 
1968 write_ctl:
1969 	fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_CTL, bdq_ctl);
1970 }
1971 
fbnic_config_drop_mode_rcq(struct fbnic_napi_vector * nv,struct fbnic_ring * rcq)1972 static void fbnic_config_drop_mode_rcq(struct fbnic_napi_vector *nv,
1973 				       struct fbnic_ring *rcq)
1974 {
1975 	u32 drop_mode, rcq_ctl;
1976 
1977 	drop_mode = FBNIC_QUEUE_RDE_CTL0_DROP_IMMEDIATE;
1978 
1979 	/* Specify packet layout */
1980 	rcq_ctl = FIELD_PREP(FBNIC_QUEUE_RDE_CTL0_DROP_MODE_MASK, drop_mode) |
1981 	    FIELD_PREP(FBNIC_QUEUE_RDE_CTL0_MIN_HROOM_MASK, FBNIC_RX_HROOM) |
1982 	    FIELD_PREP(FBNIC_QUEUE_RDE_CTL0_MIN_TROOM_MASK, FBNIC_RX_TROOM);
1983 
1984 	fbnic_ring_wr32(rcq, FBNIC_QUEUE_RDE_CTL0, rcq_ctl);
1985 }
1986 
fbnic_enable_rcq(struct fbnic_napi_vector * nv,struct fbnic_ring * rcq)1987 static void fbnic_enable_rcq(struct fbnic_napi_vector *nv,
1988 			     struct fbnic_ring *rcq)
1989 {
1990 	u32 log_size = fls(rcq->size_mask);
1991 	u32 rcq_ctl;
1992 
1993 	fbnic_config_drop_mode_rcq(nv, rcq);
1994 
1995 	rcq_ctl = FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_PADLEN_MASK, FBNIC_RX_PAD) |
1996 		   FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_MAX_HDR_MASK,
1997 			      FBNIC_RX_MAX_HDR) |
1998 		   FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_PAYLD_OFF_MASK,
1999 			      FBNIC_RX_PAYLD_OFFSET) |
2000 		   FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_PAYLD_PG_CL_MASK,
2001 			      FBNIC_RX_PAYLD_PG_CL);
2002 	fbnic_ring_wr32(rcq, FBNIC_QUEUE_RDE_CTL1, rcq_ctl);
2003 
2004 	/* Reset head/tail */
2005 	fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_CTL, FBNIC_QUEUE_RCQ_CTL_RESET);
2006 	rcq->head = 0;
2007 	rcq->tail = 0;
2008 
2009 	/* Store descriptor ring address and size */
2010 	fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_BAL, lower_32_bits(rcq->dma));
2011 	fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_BAH, upper_32_bits(rcq->dma));
2012 
2013 	/* Write lower 4 bits of log size as 64K ring size is 0 */
2014 	fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_SIZE, log_size & 0xf);
2015 
2016 	/* Store interrupt information for the completion queue */
2017 	fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_CTL, nv->v_idx);
2018 	fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_THRESHOLD, rcq->size_mask / 2);
2019 	fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_MASK, 0);
2020 
2021 	/* Enable queue */
2022 	fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_CTL, FBNIC_QUEUE_RCQ_CTL_ENABLE);
2023 }
2024 
fbnic_enable(struct fbnic_net * fbn)2025 void fbnic_enable(struct fbnic_net *fbn)
2026 {
2027 	struct fbnic_dev *fbd = fbn->fbd;
2028 	struct fbnic_napi_vector *nv;
2029 	int i, j;
2030 
2031 	list_for_each_entry(nv, &fbn->napis, napis) {
2032 		/* Setup Tx Queue Triads */
2033 		for (i = 0; i < nv->txt_count; i++) {
2034 			struct fbnic_q_triad *qt = &nv->qt[i];
2035 
2036 			fbnic_enable_twq0(&qt->sub0);
2037 			fbnic_enable_tcq(nv, &qt->cmpl);
2038 		}
2039 
2040 		/* Setup Rx Queue Triads */
2041 		for (j = 0; j < nv->rxt_count; j++, i++) {
2042 			struct fbnic_q_triad *qt = &nv->qt[i];
2043 
2044 			fbnic_enable_bdq(&qt->sub0, &qt->sub1);
2045 			fbnic_config_drop_mode_rcq(nv, &qt->cmpl);
2046 			fbnic_enable_rcq(nv, &qt->cmpl);
2047 		}
2048 	}
2049 
2050 	fbnic_wrfl(fbd);
2051 }
2052 
fbnic_nv_irq_enable(struct fbnic_napi_vector * nv)2053 static void fbnic_nv_irq_enable(struct fbnic_napi_vector *nv)
2054 {
2055 	struct fbnic_dev *fbd = nv->fbd;
2056 	u32 val;
2057 
2058 	val = FBNIC_INTR_CQ_REARM_INTR_UNMASK;
2059 
2060 	fbnic_wr32(fbd, FBNIC_INTR_CQ_REARM(nv->v_idx), val);
2061 }
2062 
fbnic_napi_enable(struct fbnic_net * fbn)2063 void fbnic_napi_enable(struct fbnic_net *fbn)
2064 {
2065 	u32 irqs[FBNIC_MAX_MSIX_VECS / 32] = {};
2066 	struct fbnic_dev *fbd = fbn->fbd;
2067 	struct fbnic_napi_vector *nv;
2068 	int i;
2069 
2070 	list_for_each_entry(nv, &fbn->napis, napis) {
2071 		napi_enable(&nv->napi);
2072 
2073 		fbnic_nv_irq_enable(nv);
2074 
2075 		/* Record bit used for NAPI IRQs so we can
2076 		 * set the mask appropriately
2077 		 */
2078 		irqs[nv->v_idx / 32] |= BIT(nv->v_idx % 32);
2079 	}
2080 
2081 	/* Force the first interrupt on the device to guarantee
2082 	 * that any packets that may have been enqueued during the
2083 	 * bringup are processed.
2084 	 */
2085 	for (i = 0; i < ARRAY_SIZE(irqs); i++) {
2086 		if (!irqs[i])
2087 			continue;
2088 		fbnic_wr32(fbd, FBNIC_INTR_SET(i), irqs[i]);
2089 	}
2090 
2091 	fbnic_wrfl(fbd);
2092 }
2093 
fbnic_napi_depletion_check(struct net_device * netdev)2094 void fbnic_napi_depletion_check(struct net_device *netdev)
2095 {
2096 	struct fbnic_net *fbn = netdev_priv(netdev);
2097 	u32 irqs[FBNIC_MAX_MSIX_VECS / 32] = {};
2098 	struct fbnic_dev *fbd = fbn->fbd;
2099 	struct fbnic_napi_vector *nv;
2100 	int i, j;
2101 
2102 	list_for_each_entry(nv, &fbn->napis, napis) {
2103 		/* Find RQs which are completely out of pages */
2104 		for (i = nv->txt_count, j = 0; j < nv->rxt_count; j++, i++) {
2105 			/* Assume 4 pages is always enough to fit a packet
2106 			 * and therefore generate a completion and an IRQ.
2107 			 */
2108 			if (fbnic_desc_used(&nv->qt[i].sub0) < 4 ||
2109 			    fbnic_desc_used(&nv->qt[i].sub1) < 4)
2110 				irqs[nv->v_idx / 32] |= BIT(nv->v_idx % 32);
2111 		}
2112 	}
2113 
2114 	for (i = 0; i < ARRAY_SIZE(irqs); i++) {
2115 		if (!irqs[i])
2116 			continue;
2117 		fbnic_wr32(fbd, FBNIC_INTR_MASK_CLEAR(i), irqs[i]);
2118 		fbnic_wr32(fbd, FBNIC_INTR_SET(i), irqs[i]);
2119 	}
2120 
2121 	fbnic_wrfl(fbd);
2122 }
2123