xref: /linux/drivers/net/ethernet/aquantia/atlantic/aq_ring.c (revision 170aafe35cb98e0f3fbacb446ea86389fbce22ea)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Atlantic Network Driver
3  *
4  * Copyright (C) 2014-2019 aQuantia Corporation
5  * Copyright (C) 2019-2020 Marvell International Ltd.
6  */
7 
8 /* File aq_ring.c: Definition of functions for Rx/Tx rings. */
9 
10 #include "aq_nic.h"
11 #include "aq_hw.h"
12 #include "aq_hw_utils.h"
13 #include "aq_ptp.h"
14 #include "aq_vec.h"
15 #include "aq_main.h"
16 
17 #include <net/xdp.h>
18 #include <linux/filter.h>
19 #include <linux/bpf_trace.h>
20 #include <linux/netdevice.h>
21 #include <linux/etherdevice.h>
22 
23 static void aq_get_rxpages_xdp(struct aq_ring_buff_s *buff,
24 			       struct xdp_buff *xdp)
25 {
26 	struct skb_shared_info *sinfo;
27 	int i;
28 
29 	if (xdp_buff_has_frags(xdp)) {
30 		sinfo = xdp_get_shared_info_from_buff(xdp);
31 
32 		for (i = 0; i < sinfo->nr_frags; i++) {
33 			skb_frag_t *frag = &sinfo->frags[i];
34 
35 			page_ref_inc(skb_frag_page(frag));
36 		}
37 	}
38 	page_ref_inc(buff->rxdata.page);
39 }
40 
41 static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev)
42 {
43 	unsigned int len = PAGE_SIZE << rxpage->order;
44 
45 	dma_unmap_page(dev, rxpage->daddr, len, DMA_FROM_DEVICE);
46 
47 	/* Drop the ref for being in the ring. */
48 	__free_pages(rxpage->page, rxpage->order);
49 	rxpage->page = NULL;
50 }
51 
52 static int aq_alloc_rxpages(struct aq_rxpage *rxpage, struct aq_ring_s *rx_ring)
53 {
54 	struct device *dev = aq_nic_get_dev(rx_ring->aq_nic);
55 	unsigned int order = rx_ring->page_order;
56 	struct page *page;
57 	int ret = -ENOMEM;
58 	dma_addr_t daddr;
59 
60 	page = dev_alloc_pages(order);
61 	if (unlikely(!page))
62 		goto err_exit;
63 
64 	daddr = dma_map_page(dev, page, 0, PAGE_SIZE << order,
65 			     DMA_FROM_DEVICE);
66 
67 	if (unlikely(dma_mapping_error(dev, daddr)))
68 		goto free_page;
69 
70 	rxpage->page = page;
71 	rxpage->daddr = daddr;
72 	rxpage->order = order;
73 	rxpage->pg_off = rx_ring->page_offset;
74 
75 	return 0;
76 
77 free_page:
78 	__free_pages(page, order);
79 
80 err_exit:
81 	return ret;
82 }
83 
84 static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf)
85 {
86 	unsigned int order = self->page_order;
87 	u16 page_offset = self->page_offset;
88 	u16 frame_max = self->frame_max;
89 	u16 tail_size = self->tail_size;
90 	int ret;
91 
92 	if (rxbuf->rxdata.page) {
93 		/* One means ring is the only user and can reuse */
94 		if (page_ref_count(rxbuf->rxdata.page) > 1) {
95 			/* Try reuse buffer */
96 			rxbuf->rxdata.pg_off += frame_max + page_offset +
97 						tail_size;
98 			if (rxbuf->rxdata.pg_off + frame_max + tail_size <=
99 			    (PAGE_SIZE << order)) {
100 				u64_stats_update_begin(&self->stats.rx.syncp);
101 				self->stats.rx.pg_flips++;
102 				u64_stats_update_end(&self->stats.rx.syncp);
103 
104 			} else {
105 				/* Buffer exhausted. We have other users and
106 				 * should release this page and realloc
107 				 */
108 				aq_free_rxpage(&rxbuf->rxdata,
109 					       aq_nic_get_dev(self->aq_nic));
110 				u64_stats_update_begin(&self->stats.rx.syncp);
111 				self->stats.rx.pg_losts++;
112 				u64_stats_update_end(&self->stats.rx.syncp);
113 			}
114 		} else {
115 			rxbuf->rxdata.pg_off = page_offset;
116 			u64_stats_update_begin(&self->stats.rx.syncp);
117 			self->stats.rx.pg_reuses++;
118 			u64_stats_update_end(&self->stats.rx.syncp);
119 		}
120 	}
121 
122 	if (!rxbuf->rxdata.page) {
123 		ret = aq_alloc_rxpages(&rxbuf->rxdata, self);
124 		if (ret) {
125 			u64_stats_update_begin(&self->stats.rx.syncp);
126 			self->stats.rx.alloc_fails++;
127 			u64_stats_update_end(&self->stats.rx.syncp);
128 		}
129 		return ret;
130 	}
131 
132 	return 0;
133 }
134 
135 static int aq_ring_alloc(struct aq_ring_s *self,
136 			 struct aq_nic_s *aq_nic)
137 {
138 	int err = 0;
139 
140 	self->buff_ring =
141 		kcalloc(self->size, sizeof(struct aq_ring_buff_s), GFP_KERNEL);
142 
143 	if (!self->buff_ring) {
144 		err = -ENOMEM;
145 		goto err_exit;
146 	}
147 
148 	self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic),
149 					   self->size * self->dx_size,
150 					   &self->dx_ring_pa, GFP_KERNEL);
151 	if (!self->dx_ring) {
152 		err = -ENOMEM;
153 		goto err_exit;
154 	}
155 
156 err_exit:
157 	if (err < 0) {
158 		aq_ring_free(self);
159 	}
160 
161 	return err;
162 }
163 
164 int aq_ring_tx_alloc(struct aq_ring_s *self,
165 		     struct aq_nic_s *aq_nic,
166 		     unsigned int idx,
167 		     struct aq_nic_cfg_s *aq_nic_cfg)
168 {
169 	self->aq_nic = aq_nic;
170 	self->idx = idx;
171 	self->size = aq_nic_cfg->txds;
172 	self->dx_size = aq_nic_cfg->aq_hw_caps->txd_size;
173 
174 	return aq_ring_alloc(self, aq_nic);
175 }
176 
177 int aq_ring_rx_alloc(struct aq_ring_s *self,
178 		     struct aq_nic_s *aq_nic,
179 		     unsigned int idx,
180 		     struct aq_nic_cfg_s *aq_nic_cfg)
181 {
182 	self->aq_nic = aq_nic;
183 	self->idx = idx;
184 	self->size = aq_nic_cfg->rxds;
185 	self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size;
186 	self->xdp_prog = aq_nic->xdp_prog;
187 	self->frame_max = AQ_CFG_RX_FRAME_MAX;
188 
189 	/* Only order-2 is allowed if XDP is enabled */
190 	if (READ_ONCE(self->xdp_prog)) {
191 		self->page_offset = AQ_XDP_HEADROOM;
192 		self->page_order = AQ_CFG_XDP_PAGEORDER;
193 		self->tail_size = AQ_XDP_TAILROOM;
194 	} else {
195 		self->page_offset = 0;
196 		self->page_order = fls(self->frame_max / PAGE_SIZE +
197 				       (self->frame_max % PAGE_SIZE ? 1 : 0)) - 1;
198 		if (aq_nic_cfg->rxpageorder > self->page_order)
199 			self->page_order = aq_nic_cfg->rxpageorder;
200 		self->tail_size = 0;
201 	}
202 
203 	return aq_ring_alloc(self, aq_nic);
204 }
205 
206 int
207 aq_ring_hwts_rx_alloc(struct aq_ring_s *self, struct aq_nic_s *aq_nic,
208 		      unsigned int idx, unsigned int size, unsigned int dx_size)
209 {
210 	struct device *dev = aq_nic_get_dev(aq_nic);
211 	size_t sz = size * dx_size + AQ_CFG_RXDS_DEF;
212 
213 	memset(self, 0, sizeof(*self));
214 
215 	self->aq_nic = aq_nic;
216 	self->idx = idx;
217 	self->size = size;
218 	self->dx_size = dx_size;
219 
220 	self->dx_ring = dma_alloc_coherent(dev, sz, &self->dx_ring_pa,
221 					   GFP_KERNEL);
222 	if (!self->dx_ring) {
223 		aq_ring_free(self);
224 		return -ENOMEM;
225 	}
226 
227 	return 0;
228 }
229 
230 int aq_ring_init(struct aq_ring_s *self, const enum atl_ring_type ring_type)
231 {
232 	self->hw_head = 0;
233 	self->sw_head = 0;
234 	self->sw_tail = 0;
235 	self->ring_type = ring_type;
236 
237 	if (self->ring_type == ATL_RING_RX)
238 		u64_stats_init(&self->stats.rx.syncp);
239 	else
240 		u64_stats_init(&self->stats.tx.syncp);
241 
242 	return 0;
243 }
244 
245 static inline bool aq_ring_dx_in_range(unsigned int h, unsigned int i,
246 				       unsigned int t)
247 {
248 	return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t));
249 }
250 
251 void aq_ring_update_queue_state(struct aq_ring_s *ring)
252 {
253 	if (aq_ring_avail_dx(ring) <= AQ_CFG_SKB_FRAGS_MAX)
254 		aq_ring_queue_stop(ring);
255 	else if (aq_ring_avail_dx(ring) > AQ_CFG_RESTART_DESC_THRES)
256 		aq_ring_queue_wake(ring);
257 }
258 
259 void aq_ring_queue_wake(struct aq_ring_s *ring)
260 {
261 	struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
262 
263 	if (__netif_subqueue_stopped(ndev,
264 				     AQ_NIC_RING2QMAP(ring->aq_nic,
265 						      ring->idx))) {
266 		netif_wake_subqueue(ndev,
267 				    AQ_NIC_RING2QMAP(ring->aq_nic, ring->idx));
268 		u64_stats_update_begin(&ring->stats.tx.syncp);
269 		ring->stats.tx.queue_restarts++;
270 		u64_stats_update_end(&ring->stats.tx.syncp);
271 	}
272 }
273 
274 void aq_ring_queue_stop(struct aq_ring_s *ring)
275 {
276 	struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
277 
278 	if (!__netif_subqueue_stopped(ndev,
279 				      AQ_NIC_RING2QMAP(ring->aq_nic,
280 						       ring->idx)))
281 		netif_stop_subqueue(ndev,
282 				    AQ_NIC_RING2QMAP(ring->aq_nic, ring->idx));
283 }
284 
285 bool aq_ring_tx_clean(struct aq_ring_s *self)
286 {
287 	struct device *dev = aq_nic_get_dev(self->aq_nic);
288 	unsigned int budget;
289 
290 	for (budget = AQ_CFG_TX_CLEAN_BUDGET;
291 	     budget && self->sw_head != self->hw_head; budget--) {
292 		struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
293 
294 		if (likely(buff->is_mapped)) {
295 			if (unlikely(buff->is_sop)) {
296 				if (!buff->is_eop &&
297 				    buff->eop_index != 0xffffU &&
298 				    (!aq_ring_dx_in_range(self->sw_head,
299 						buff->eop_index,
300 						self->hw_head)))
301 					break;
302 
303 				dma_unmap_single(dev, buff->pa, buff->len,
304 						 DMA_TO_DEVICE);
305 			} else {
306 				dma_unmap_page(dev, buff->pa, buff->len,
307 					       DMA_TO_DEVICE);
308 			}
309 		}
310 
311 		if (likely(!buff->is_eop))
312 			goto out;
313 
314 		if (buff->skb) {
315 			u64_stats_update_begin(&self->stats.tx.syncp);
316 			++self->stats.tx.packets;
317 			self->stats.tx.bytes += buff->skb->len;
318 			u64_stats_update_end(&self->stats.tx.syncp);
319 			dev_kfree_skb_any(buff->skb);
320 		} else if (buff->xdpf) {
321 			u64_stats_update_begin(&self->stats.tx.syncp);
322 			++self->stats.tx.packets;
323 			self->stats.tx.bytes += xdp_get_frame_len(buff->xdpf);
324 			u64_stats_update_end(&self->stats.tx.syncp);
325 			xdp_return_frame_rx_napi(buff->xdpf);
326 		}
327 
328 out:
329 		buff->skb = NULL;
330 		buff->xdpf = NULL;
331 		buff->pa = 0U;
332 		buff->eop_index = 0xffffU;
333 		self->sw_head = aq_ring_next_dx(self, self->sw_head);
334 	}
335 
336 	return !!budget;
337 }
338 
339 static void aq_rx_checksum(struct aq_ring_s *self,
340 			   struct aq_ring_buff_s *buff,
341 			   struct sk_buff *skb)
342 {
343 	if (!(self->aq_nic->ndev->features & NETIF_F_RXCSUM))
344 		return;
345 
346 	if (unlikely(buff->is_cso_err)) {
347 		u64_stats_update_begin(&self->stats.rx.syncp);
348 		++self->stats.rx.errors;
349 		u64_stats_update_end(&self->stats.rx.syncp);
350 		skb->ip_summed = CHECKSUM_NONE;
351 		return;
352 	}
353 	if (buff->is_ip_cso) {
354 		__skb_incr_checksum_unnecessary(skb);
355 	} else {
356 		skb->ip_summed = CHECKSUM_NONE;
357 	}
358 
359 	if (buff->is_udp_cso || buff->is_tcp_cso)
360 		__skb_incr_checksum_unnecessary(skb);
361 }
362 
363 int aq_xdp_xmit(struct net_device *dev, int num_frames,
364 		struct xdp_frame **frames, u32 flags)
365 {
366 	struct aq_nic_s *aq_nic = netdev_priv(dev);
367 	unsigned int vec, i, drop = 0;
368 	int cpu = smp_processor_id();
369 	struct aq_nic_cfg_s *aq_cfg;
370 	struct aq_ring_s *ring;
371 
372 	aq_cfg = aq_nic_get_cfg(aq_nic);
373 	vec = cpu % aq_cfg->vecs;
374 	ring = aq_nic->aq_ring_tx[AQ_NIC_CFG_TCVEC2RING(aq_cfg, 0, vec)];
375 
376 	for (i = 0; i < num_frames; i++) {
377 		struct xdp_frame *xdpf = frames[i];
378 
379 		if (aq_nic_xmit_xdpf(aq_nic, ring, xdpf) == NETDEV_TX_BUSY)
380 			drop++;
381 	}
382 
383 	return num_frames - drop;
384 }
385 
386 static struct sk_buff *aq_xdp_build_skb(struct xdp_buff *xdp,
387 					struct net_device *dev,
388 					struct aq_ring_buff_s *buff)
389 {
390 	struct xdp_frame *xdpf;
391 	struct sk_buff *skb;
392 
393 	xdpf = xdp_convert_buff_to_frame(xdp);
394 	if (unlikely(!xdpf))
395 		return NULL;
396 
397 	skb = xdp_build_skb_from_frame(xdpf, dev);
398 	if (!skb)
399 		return NULL;
400 
401 	aq_get_rxpages_xdp(buff, xdp);
402 	return skb;
403 }
404 
405 static struct sk_buff *aq_xdp_run_prog(struct aq_nic_s *aq_nic,
406 				       struct xdp_buff *xdp,
407 				       struct aq_ring_s *rx_ring,
408 				       struct aq_ring_buff_s *buff)
409 {
410 	int result = NETDEV_TX_BUSY;
411 	struct aq_ring_s *tx_ring;
412 	struct xdp_frame *xdpf;
413 	struct bpf_prog *prog;
414 	u32 act = XDP_ABORTED;
415 	struct sk_buff *skb;
416 
417 	u64_stats_update_begin(&rx_ring->stats.rx.syncp);
418 	++rx_ring->stats.rx.packets;
419 	rx_ring->stats.rx.bytes += xdp_get_buff_len(xdp);
420 	u64_stats_update_end(&rx_ring->stats.rx.syncp);
421 
422 	prog = READ_ONCE(rx_ring->xdp_prog);
423 	if (!prog)
424 		return aq_xdp_build_skb(xdp, aq_nic->ndev, buff);
425 
426 	prefetchw(xdp->data_hard_start); /* xdp_frame write */
427 
428 	/* single buffer XDP program, but packet is multi buffer, aborted */
429 	if (xdp_buff_has_frags(xdp) && !prog->aux->xdp_has_frags)
430 		goto out_aborted;
431 
432 	act = bpf_prog_run_xdp(prog, xdp);
433 	switch (act) {
434 	case XDP_PASS:
435 		skb = aq_xdp_build_skb(xdp, aq_nic->ndev, buff);
436 		if (!skb)
437 			goto out_aborted;
438 		u64_stats_update_begin(&rx_ring->stats.rx.syncp);
439 		++rx_ring->stats.rx.xdp_pass;
440 		u64_stats_update_end(&rx_ring->stats.rx.syncp);
441 		return skb;
442 	case XDP_TX:
443 		xdpf = xdp_convert_buff_to_frame(xdp);
444 		if (unlikely(!xdpf))
445 			goto out_aborted;
446 		tx_ring = aq_nic->aq_ring_tx[rx_ring->idx];
447 		result = aq_nic_xmit_xdpf(aq_nic, tx_ring, xdpf);
448 		if (result == NETDEV_TX_BUSY)
449 			goto out_aborted;
450 		u64_stats_update_begin(&rx_ring->stats.rx.syncp);
451 		++rx_ring->stats.rx.xdp_tx;
452 		u64_stats_update_end(&rx_ring->stats.rx.syncp);
453 		aq_get_rxpages_xdp(buff, xdp);
454 		break;
455 	case XDP_REDIRECT:
456 		if (xdp_do_redirect(aq_nic->ndev, xdp, prog) < 0)
457 			goto out_aborted;
458 		xdp_do_flush();
459 		u64_stats_update_begin(&rx_ring->stats.rx.syncp);
460 		++rx_ring->stats.rx.xdp_redirect;
461 		u64_stats_update_end(&rx_ring->stats.rx.syncp);
462 		aq_get_rxpages_xdp(buff, xdp);
463 		break;
464 	default:
465 		fallthrough;
466 	case XDP_ABORTED:
467 out_aborted:
468 		u64_stats_update_begin(&rx_ring->stats.rx.syncp);
469 		++rx_ring->stats.rx.xdp_aborted;
470 		u64_stats_update_end(&rx_ring->stats.rx.syncp);
471 		trace_xdp_exception(aq_nic->ndev, prog, act);
472 		bpf_warn_invalid_xdp_action(aq_nic->ndev, prog, act);
473 		break;
474 	case XDP_DROP:
475 		u64_stats_update_begin(&rx_ring->stats.rx.syncp);
476 		++rx_ring->stats.rx.xdp_drop;
477 		u64_stats_update_end(&rx_ring->stats.rx.syncp);
478 		break;
479 	}
480 
481 	return ERR_PTR(-result);
482 }
483 
484 static bool aq_add_rx_fragment(struct device *dev,
485 			       struct aq_ring_s *ring,
486 			       struct aq_ring_buff_s *buff,
487 			       struct xdp_buff *xdp)
488 {
489 	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
490 	struct aq_ring_buff_s *buff_ = buff;
491 
492 	memset(sinfo, 0, sizeof(*sinfo));
493 	do {
494 		skb_frag_t *frag;
495 
496 		if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS))
497 			return true;
498 
499 		frag = &sinfo->frags[sinfo->nr_frags++];
500 		buff_ = &ring->buff_ring[buff_->next];
501 		dma_sync_single_range_for_cpu(dev,
502 					      buff_->rxdata.daddr,
503 					      buff_->rxdata.pg_off,
504 					      buff_->len,
505 					      DMA_FROM_DEVICE);
506 		sinfo->xdp_frags_size += buff_->len;
507 		skb_frag_fill_page_desc(frag, buff_->rxdata.page,
508 					buff_->rxdata.pg_off,
509 					buff_->len);
510 
511 		buff_->is_cleaned = 1;
512 
513 		buff->is_ip_cso &= buff_->is_ip_cso;
514 		buff->is_udp_cso &= buff_->is_udp_cso;
515 		buff->is_tcp_cso &= buff_->is_tcp_cso;
516 		buff->is_cso_err |= buff_->is_cso_err;
517 
518 		if (page_is_pfmemalloc(buff_->rxdata.page))
519 			xdp_buff_set_frag_pfmemalloc(xdp);
520 
521 	} while (!buff_->is_eop);
522 
523 	xdp_buff_set_frags_flag(xdp);
524 
525 	return false;
526 }
527 
528 static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi,
529 			      int *work_done, int budget)
530 {
531 	struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
532 	int err = 0;
533 
534 	for (; (self->sw_head != self->hw_head) && budget;
535 		self->sw_head = aq_ring_next_dx(self, self->sw_head),
536 		--budget, ++(*work_done)) {
537 		struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
538 		bool is_ptp_ring = aq_ptp_ring(self->aq_nic, self);
539 		struct aq_ring_buff_s *buff_ = NULL;
540 		struct sk_buff *skb = NULL;
541 		unsigned int next_ = 0U;
542 		unsigned int i = 0U;
543 		u16 hdr_len;
544 
545 		if (buff->is_cleaned)
546 			continue;
547 
548 		if (!buff->is_eop) {
549 			unsigned int frag_cnt = 0U;
550 			buff_ = buff;
551 			do {
552 				bool is_rsc_completed = true;
553 
554 				if (buff_->next >= self->size) {
555 					err = -EIO;
556 					goto err_exit;
557 				}
558 
559 				frag_cnt++;
560 				next_ = buff_->next;
561 				buff_ = &self->buff_ring[next_];
562 				is_rsc_completed =
563 					aq_ring_dx_in_range(self->sw_head,
564 							    next_,
565 							    self->hw_head);
566 
567 				if (unlikely(!is_rsc_completed) ||
568 						frag_cnt > MAX_SKB_FRAGS) {
569 					err = 0;
570 					goto err_exit;
571 				}
572 
573 				buff->is_error |= buff_->is_error;
574 				buff->is_cso_err |= buff_->is_cso_err;
575 
576 			} while (!buff_->is_eop);
577 
578 			if (buff->is_error ||
579 			    (buff->is_lro && buff->is_cso_err)) {
580 				buff_ = buff;
581 				do {
582 					if (buff_->next >= self->size) {
583 						err = -EIO;
584 						goto err_exit;
585 					}
586 					next_ = buff_->next;
587 					buff_ = &self->buff_ring[next_];
588 
589 					buff_->is_cleaned = true;
590 				} while (!buff_->is_eop);
591 
592 				u64_stats_update_begin(&self->stats.rx.syncp);
593 				++self->stats.rx.errors;
594 				u64_stats_update_end(&self->stats.rx.syncp);
595 				continue;
596 			}
597 		}
598 
599 		if (buff->is_error) {
600 			u64_stats_update_begin(&self->stats.rx.syncp);
601 			++self->stats.rx.errors;
602 			u64_stats_update_end(&self->stats.rx.syncp);
603 			continue;
604 		}
605 
606 		dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic),
607 					      buff->rxdata.daddr,
608 					      buff->rxdata.pg_off,
609 					      buff->len, DMA_FROM_DEVICE);
610 
611 		skb = napi_alloc_skb(napi, AQ_CFG_RX_HDR_SIZE);
612 		if (unlikely(!skb)) {
613 			u64_stats_update_begin(&self->stats.rx.syncp);
614 			self->stats.rx.skb_alloc_fails++;
615 			u64_stats_update_end(&self->stats.rx.syncp);
616 			err = -ENOMEM;
617 			goto err_exit;
618 		}
619 		if (is_ptp_ring)
620 			buff->len -=
621 				aq_ptp_extract_ts(self->aq_nic, skb_hwtstamps(skb),
622 						  aq_buf_vaddr(&buff->rxdata),
623 						  buff->len);
624 
625 		hdr_len = buff->len;
626 		if (hdr_len > AQ_CFG_RX_HDR_SIZE)
627 			hdr_len = eth_get_headlen(skb->dev,
628 						  aq_buf_vaddr(&buff->rxdata),
629 						  AQ_CFG_RX_HDR_SIZE);
630 
631 		memcpy(__skb_put(skb, hdr_len), aq_buf_vaddr(&buff->rxdata),
632 		       ALIGN(hdr_len, sizeof(long)));
633 
634 		if (buff->len - hdr_len > 0) {
635 			skb_add_rx_frag(skb, i++, buff->rxdata.page,
636 					buff->rxdata.pg_off + hdr_len,
637 					buff->len - hdr_len,
638 					self->frame_max);
639 			page_ref_inc(buff->rxdata.page);
640 		}
641 
642 		if (!buff->is_eop) {
643 			buff_ = buff;
644 			do {
645 				next_ = buff_->next;
646 				buff_ = &self->buff_ring[next_];
647 
648 				dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic),
649 							      buff_->rxdata.daddr,
650 							      buff_->rxdata.pg_off,
651 							      buff_->len,
652 							      DMA_FROM_DEVICE);
653 				skb_add_rx_frag(skb, i++,
654 						buff_->rxdata.page,
655 						buff_->rxdata.pg_off,
656 						buff_->len,
657 						self->frame_max);
658 				page_ref_inc(buff_->rxdata.page);
659 				buff_->is_cleaned = 1;
660 
661 				buff->is_ip_cso &= buff_->is_ip_cso;
662 				buff->is_udp_cso &= buff_->is_udp_cso;
663 				buff->is_tcp_cso &= buff_->is_tcp_cso;
664 				buff->is_cso_err |= buff_->is_cso_err;
665 
666 			} while (!buff_->is_eop);
667 		}
668 
669 		if (buff->is_vlan)
670 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
671 					       buff->vlan_rx_tag);
672 
673 		skb->protocol = eth_type_trans(skb, ndev);
674 
675 		aq_rx_checksum(self, buff, skb);
676 
677 		skb_set_hash(skb, buff->rss_hash,
678 			     buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
679 			     PKT_HASH_TYPE_NONE);
680 		/* Send all PTP traffic to 0 queue */
681 		skb_record_rx_queue(skb,
682 				    is_ptp_ring ? 0
683 						: AQ_NIC_RING2QMAP(self->aq_nic,
684 								   self->idx));
685 
686 		u64_stats_update_begin(&self->stats.rx.syncp);
687 		++self->stats.rx.packets;
688 		self->stats.rx.bytes += skb->len;
689 		u64_stats_update_end(&self->stats.rx.syncp);
690 
691 		napi_gro_receive(napi, skb);
692 	}
693 
694 err_exit:
695 	return err;
696 }
697 
698 static int __aq_ring_xdp_clean(struct aq_ring_s *rx_ring,
699 			       struct napi_struct *napi, int *work_done,
700 			       int budget)
701 {
702 	int frame_sz = rx_ring->page_offset + rx_ring->frame_max +
703 		       rx_ring->tail_size;
704 	struct aq_nic_s *aq_nic = rx_ring->aq_nic;
705 	bool is_rsc_completed = true;
706 	struct device *dev;
707 	int err = 0;
708 
709 	dev = aq_nic_get_dev(aq_nic);
710 	for (; (rx_ring->sw_head != rx_ring->hw_head) && budget;
711 		rx_ring->sw_head = aq_ring_next_dx(rx_ring, rx_ring->sw_head),
712 		--budget, ++(*work_done)) {
713 		struct aq_ring_buff_s *buff = &rx_ring->buff_ring[rx_ring->sw_head];
714 		bool is_ptp_ring = aq_ptp_ring(rx_ring->aq_nic, rx_ring);
715 		struct aq_ring_buff_s *buff_ = NULL;
716 		u16 ptp_hwtstamp_len = 0;
717 		struct skb_shared_hwtstamps shhwtstamps;
718 		struct sk_buff *skb = NULL;
719 		unsigned int next_ = 0U;
720 		struct xdp_buff xdp;
721 		void *hard_start;
722 
723 		if (buff->is_cleaned)
724 			continue;
725 
726 		if (!buff->is_eop) {
727 			buff_ = buff;
728 			do {
729 				if (buff_->next >= rx_ring->size) {
730 					err = -EIO;
731 					goto err_exit;
732 				}
733 				next_ = buff_->next;
734 				buff_ = &rx_ring->buff_ring[next_];
735 				is_rsc_completed =
736 					aq_ring_dx_in_range(rx_ring->sw_head,
737 							    next_,
738 							    rx_ring->hw_head);
739 
740 				if (unlikely(!is_rsc_completed))
741 					break;
742 
743 				buff->is_error |= buff_->is_error;
744 				buff->is_cso_err |= buff_->is_cso_err;
745 			} while (!buff_->is_eop);
746 
747 			if (!is_rsc_completed) {
748 				err = 0;
749 				goto err_exit;
750 			}
751 			if (buff->is_error ||
752 			    (buff->is_lro && buff->is_cso_err)) {
753 				buff_ = buff;
754 				do {
755 					if (buff_->next >= rx_ring->size) {
756 						err = -EIO;
757 						goto err_exit;
758 					}
759 					next_ = buff_->next;
760 					buff_ = &rx_ring->buff_ring[next_];
761 
762 					buff_->is_cleaned = true;
763 				} while (!buff_->is_eop);
764 
765 				u64_stats_update_begin(&rx_ring->stats.rx.syncp);
766 				++rx_ring->stats.rx.errors;
767 				u64_stats_update_end(&rx_ring->stats.rx.syncp);
768 				continue;
769 			}
770 		}
771 
772 		if (buff->is_error) {
773 			u64_stats_update_begin(&rx_ring->stats.rx.syncp);
774 			++rx_ring->stats.rx.errors;
775 			u64_stats_update_end(&rx_ring->stats.rx.syncp);
776 			continue;
777 		}
778 
779 		dma_sync_single_range_for_cpu(dev,
780 					      buff->rxdata.daddr,
781 					      buff->rxdata.pg_off,
782 					      buff->len, DMA_FROM_DEVICE);
783 		hard_start = page_address(buff->rxdata.page) +
784 			     buff->rxdata.pg_off - rx_ring->page_offset;
785 
786 		if (is_ptp_ring) {
787 			ptp_hwtstamp_len = aq_ptp_extract_ts(rx_ring->aq_nic, &shhwtstamps,
788 							     aq_buf_vaddr(&buff->rxdata),
789 							     buff->len);
790 			buff->len -= ptp_hwtstamp_len;
791 		}
792 
793 		xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
794 		xdp_prepare_buff(&xdp, hard_start, rx_ring->page_offset,
795 				 buff->len, false);
796 		if (!buff->is_eop) {
797 			if (aq_add_rx_fragment(dev, rx_ring, buff, &xdp)) {
798 				u64_stats_update_begin(&rx_ring->stats.rx.syncp);
799 				++rx_ring->stats.rx.packets;
800 				rx_ring->stats.rx.bytes += xdp_get_buff_len(&xdp);
801 				++rx_ring->stats.rx.xdp_aborted;
802 				u64_stats_update_end(&rx_ring->stats.rx.syncp);
803 				continue;
804 			}
805 		}
806 
807 		skb = aq_xdp_run_prog(aq_nic, &xdp, rx_ring, buff);
808 		if (IS_ERR(skb) || !skb)
809 			continue;
810 
811 		if (ptp_hwtstamp_len > 0)
812 			*skb_hwtstamps(skb) = shhwtstamps;
813 
814 		if (buff->is_vlan)
815 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
816 					       buff->vlan_rx_tag);
817 
818 		aq_rx_checksum(rx_ring, buff, skb);
819 
820 		skb_set_hash(skb, buff->rss_hash,
821 			     buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
822 			     PKT_HASH_TYPE_NONE);
823 		/* Send all PTP traffic to 0 queue */
824 		skb_record_rx_queue(skb,
825 				    is_ptp_ring ? 0
826 						: AQ_NIC_RING2QMAP(rx_ring->aq_nic,
827 								   rx_ring->idx));
828 
829 		napi_gro_receive(napi, skb);
830 	}
831 
832 err_exit:
833 	return err;
834 }
835 
836 int aq_ring_rx_clean(struct aq_ring_s *self,
837 		     struct napi_struct *napi,
838 		     int *work_done,
839 		     int budget)
840 {
841 	if (static_branch_unlikely(&aq_xdp_locking_key))
842 		return __aq_ring_xdp_clean(self, napi, work_done, budget);
843 	else
844 		return __aq_ring_rx_clean(self, napi, work_done, budget);
845 }
846 
847 void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic)
848 {
849 #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
850 	while (self->sw_head != self->hw_head) {
851 		u64 ns;
852 
853 		aq_nic->aq_hw_ops->extract_hwts(aq_nic->aq_hw,
854 						self->dx_ring +
855 						(self->sw_head * self->dx_size),
856 						self->dx_size, &ns);
857 		aq_ptp_tx_hwtstamp(aq_nic, ns);
858 
859 		self->sw_head = aq_ring_next_dx(self, self->sw_head);
860 	}
861 #endif
862 }
863 
864 int aq_ring_rx_fill(struct aq_ring_s *self)
865 {
866 	struct aq_ring_buff_s *buff = NULL;
867 	int err = 0;
868 	int i = 0;
869 
870 	if (aq_ring_avail_dx(self) < min_t(unsigned int, AQ_CFG_RX_REFILL_THRES,
871 					   self->size / 2))
872 		return err;
873 
874 	for (i = aq_ring_avail_dx(self); i--;
875 		self->sw_tail = aq_ring_next_dx(self, self->sw_tail)) {
876 		buff = &self->buff_ring[self->sw_tail];
877 
878 		buff->flags = 0U;
879 		buff->len = self->frame_max;
880 
881 		err = aq_get_rxpages(self, buff);
882 		if (err)
883 			goto err_exit;
884 
885 		buff->pa = aq_buf_daddr(&buff->rxdata);
886 		buff = NULL;
887 	}
888 
889 err_exit:
890 	return err;
891 }
892 
893 void aq_ring_rx_deinit(struct aq_ring_s *self)
894 {
895 	if (!self)
896 		return;
897 
898 	for (; self->sw_head != self->sw_tail;
899 		self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
900 		struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
901 
902 		aq_free_rxpage(&buff->rxdata, aq_nic_get_dev(self->aq_nic));
903 	}
904 }
905 
906 void aq_ring_free(struct aq_ring_s *self)
907 {
908 	if (!self)
909 		return;
910 
911 	kfree(self->buff_ring);
912 	self->buff_ring = NULL;
913 
914 	if (self->dx_ring) {
915 		dma_free_coherent(aq_nic_get_dev(self->aq_nic),
916 				  self->size * self->dx_size, self->dx_ring,
917 				  self->dx_ring_pa);
918 		self->dx_ring = NULL;
919 	}
920 }
921 
922 void aq_ring_hwts_rx_free(struct aq_ring_s *self)
923 {
924 	if (!self)
925 		return;
926 
927 	if (self->dx_ring) {
928 		dma_free_coherent(aq_nic_get_dev(self->aq_nic),
929 				  self->size * self->dx_size + AQ_CFG_RXDS_DEF,
930 				  self->dx_ring, self->dx_ring_pa);
931 		self->dx_ring = NULL;
932 	}
933 }
934 
935 unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data)
936 {
937 	unsigned int count;
938 	unsigned int start;
939 
940 	if (self->ring_type == ATL_RING_RX) {
941 		/* This data should mimic aq_ethtool_queue_rx_stat_names structure */
942 		do {
943 			count = 0;
944 			start = u64_stats_fetch_begin(&self->stats.rx.syncp);
945 			data[count] = self->stats.rx.packets;
946 			data[++count] = self->stats.rx.jumbo_packets;
947 			data[++count] = self->stats.rx.lro_packets;
948 			data[++count] = self->stats.rx.errors;
949 			data[++count] = self->stats.rx.alloc_fails;
950 			data[++count] = self->stats.rx.skb_alloc_fails;
951 			data[++count] = self->stats.rx.polls;
952 			data[++count] = self->stats.rx.pg_flips;
953 			data[++count] = self->stats.rx.pg_reuses;
954 			data[++count] = self->stats.rx.pg_losts;
955 			data[++count] = self->stats.rx.xdp_aborted;
956 			data[++count] = self->stats.rx.xdp_drop;
957 			data[++count] = self->stats.rx.xdp_pass;
958 			data[++count] = self->stats.rx.xdp_tx;
959 			data[++count] = self->stats.rx.xdp_invalid;
960 			data[++count] = self->stats.rx.xdp_redirect;
961 		} while (u64_stats_fetch_retry(&self->stats.rx.syncp, start));
962 	} else {
963 		/* This data should mimic aq_ethtool_queue_tx_stat_names structure */
964 		do {
965 			count = 0;
966 			start = u64_stats_fetch_begin(&self->stats.tx.syncp);
967 			data[count] = self->stats.tx.packets;
968 			data[++count] = self->stats.tx.queue_restarts;
969 		} while (u64_stats_fetch_retry(&self->stats.tx.syncp, start));
970 	}
971 
972 	return ++count;
973 }
974