xref: /freebsd/sys/dev/aq/aq_ring.c (revision 4756f5ff8f10cdda925cab60c0b66606698e49ee)
1493d26c5SEd Maste /*
2493d26c5SEd Maste  * aQuantia Corporation Network Driver
3493d26c5SEd Maste  * Copyright (C) 2014-2018 aQuantia Corporation. All rights reserved
4493d26c5SEd Maste  *
5493d26c5SEd Maste  * Redistribution and use in source and binary forms, with or without
6493d26c5SEd Maste  * modification, are permitted provided that the following conditions
7493d26c5SEd Maste  * are met:
8493d26c5SEd Maste  *
9493d26c5SEd Maste  *   (1) Redistributions of source code must retain the above
10493d26c5SEd Maste  *   copyright notice, this list of conditions and the following
11493d26c5SEd Maste  *   disclaimer.
12493d26c5SEd Maste  *
13493d26c5SEd Maste  *   (2) Redistributions in binary form must reproduce the above
14493d26c5SEd Maste  *   copyright notice, this list of conditions and the following
15493d26c5SEd Maste  *   disclaimer in the documentation and/or other materials provided
16493d26c5SEd Maste  *   with the distribution.
17493d26c5SEd Maste  *
18493d26c5SEd Maste  *   (3)The name of the author may not be used to endorse or promote
19493d26c5SEd Maste  *   products derived from this software without specific prior
20493d26c5SEd Maste  *   written permission.
21493d26c5SEd Maste  *
22493d26c5SEd Maste  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
23493d26c5SEd Maste  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24493d26c5SEd Maste  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25493d26c5SEd Maste  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
26493d26c5SEd Maste  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27493d26c5SEd Maste  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
28493d26c5SEd Maste  * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29493d26c5SEd Maste  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30493d26c5SEd Maste  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
31493d26c5SEd Maste  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32493d26c5SEd Maste  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33493d26c5SEd Maste  */
34493d26c5SEd Maste 
35493d26c5SEd Maste #include <sys/cdefs.h>
36493d26c5SEd Maste __FBSDID("$FreeBSD$");
37493d26c5SEd Maste 
38493d26c5SEd Maste #include <sys/param.h>
39493d26c5SEd Maste #include <machine/param.h>
40493d26c5SEd Maste #include <sys/kernel.h>
41493d26c5SEd Maste #include <sys/socket.h>
42493d26c5SEd Maste #include <sys/bitstring.h>
43493d26c5SEd Maste #include <net/if.h>
44493d26c5SEd Maste #include <net/if_media.h>
45493d26c5SEd Maste #include <net/if_var.h>
46493d26c5SEd Maste #include <net/if_dl.h>
47493d26c5SEd Maste #include <net/ethernet.h>
48493d26c5SEd Maste #include <net/iflib.h>
49493d26c5SEd Maste #include <netinet/in.h>
50493d26c5SEd Maste 
51493d26c5SEd Maste #include "aq_common.h"
52493d26c5SEd Maste 
53493d26c5SEd Maste #include "aq_ring.h"
54493d26c5SEd Maste #include "aq_dbg.h"
55493d26c5SEd Maste #include "aq_device.h"
56493d26c5SEd Maste #include "aq_hw.h"
57493d26c5SEd Maste #include "aq_hw_llh.h"
58493d26c5SEd Maste 
59493d26c5SEd Maste /* iflib txrx interface prototypes */
60493d26c5SEd Maste static int aq_isc_txd_encap(void *arg, if_pkt_info_t pi);
61493d26c5SEd Maste static void aq_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx);
62493d26c5SEd Maste static int aq_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear);
63493d26c5SEd Maste static void aq_ring_rx_refill(void* arg, if_rxd_update_t iru);
64493d26c5SEd Maste static void aq_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, qidx_t pidx);
65493d26c5SEd Maste static int aq_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget);
66493d26c5SEd Maste static int aq_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri);
67493d26c5SEd Maste 
68493d26c5SEd Maste struct if_txrx aq_txrx = {
69493d26c5SEd Maste 	.ift_txd_encap = aq_isc_txd_encap,
70493d26c5SEd Maste 	.ift_txd_flush = aq_isc_txd_flush,
71493d26c5SEd Maste 	.ift_txd_credits_update = aq_isc_txd_credits_update,
72493d26c5SEd Maste 	.ift_rxd_available = aq_isc_rxd_available,
73493d26c5SEd Maste 	.ift_rxd_pkt_get = aq_isc_rxd_pkt_get,
74493d26c5SEd Maste 	.ift_rxd_refill = aq_ring_rx_refill,
75493d26c5SEd Maste 	.ift_rxd_flush = aq_isc_rxd_flush,
76493d26c5SEd Maste 	.ift_legacy_intr = NULL
77493d26c5SEd Maste };
78493d26c5SEd Maste 
79493d26c5SEd Maste 
80493d26c5SEd Maste static inline uint32_t
81493d26c5SEd Maste aq_next(uint32_t i, uint32_t lim)
82493d26c5SEd Maste {
83493d26c5SEd Maste     return (i == lim) ? 0 : i + 1;
84493d26c5SEd Maste }
85493d26c5SEd Maste 
86493d26c5SEd Maste int aq_ring_rx_init(struct aq_hw *hw, struct aq_ring *ring)
87493d26c5SEd Maste /*                     uint64_t ring_addr,
88493d26c5SEd Maste                      u32 ring_size,
89493d26c5SEd Maste                      u32 ring_idx,
90493d26c5SEd Maste                      u32 interrupt_cause,
91493d26c5SEd Maste                      u32 cpu_idx) */
92493d26c5SEd Maste {
93493d26c5SEd Maste     int err;
94493d26c5SEd Maste     u32 dma_desc_addr_lsw = (u32)ring->rx_descs_phys & 0xffffffff;
95493d26c5SEd Maste     u32 dma_desc_addr_msw = (u32)(ring->rx_descs_phys >> 32);
96493d26c5SEd Maste 
97493d26c5SEd Maste     AQ_DBG_ENTERA("[%d]", ring->index);
98493d26c5SEd Maste 
99493d26c5SEd Maste     rdm_rx_desc_en_set(hw, false, ring->index);
100493d26c5SEd Maste 
101493d26c5SEd Maste     rdm_rx_desc_head_splitting_set(hw, 0U, ring->index);
102493d26c5SEd Maste 
103493d26c5SEd Maste     reg_rx_dma_desc_base_addresslswset(hw, dma_desc_addr_lsw, ring->index);
104493d26c5SEd Maste 
105493d26c5SEd Maste     reg_rx_dma_desc_base_addressmswset(hw, dma_desc_addr_msw, ring->index);
106493d26c5SEd Maste 
107493d26c5SEd Maste     rdm_rx_desc_len_set(hw, ring->rx_size / 8U, ring->index);
108493d26c5SEd Maste 
109493d26c5SEd Maste     device_printf(ring->dev->dev, "ring %d: __PAGESIZE=%d MCLBYTES=%d hw->max_frame_size=%d\n",
110493d26c5SEd Maste 				  ring->index, PAGE_SIZE, MCLBYTES, ring->rx_max_frame_size);
111493d26c5SEd Maste     rdm_rx_desc_data_buff_size_set(hw, ring->rx_max_frame_size / 1024U, ring->index);
112493d26c5SEd Maste 
113493d26c5SEd Maste     rdm_rx_desc_head_buff_size_set(hw, 0U, ring->index);
114493d26c5SEd Maste     rdm_rx_desc_head_splitting_set(hw, 0U, ring->index);
115493d26c5SEd Maste     rpo_rx_desc_vlan_stripping_set(hw, 0U, ring->index);
116493d26c5SEd Maste 
117493d26c5SEd Maste     /* Rx ring set mode */
118493d26c5SEd Maste 
119493d26c5SEd Maste     /* Mapping interrupt vector */
120493d26c5SEd Maste     itr_irq_map_rx_set(hw, ring->msix, ring->index);
121493d26c5SEd Maste     itr_irq_map_en_rx_set(hw, true, ring->index);
122493d26c5SEd Maste 
123493d26c5SEd Maste     rdm_cpu_id_set(hw, 0, ring->index);
124493d26c5SEd Maste     rdm_rx_desc_dca_en_set(hw, 0U, ring->index);
125493d26c5SEd Maste     rdm_rx_head_dca_en_set(hw, 0U, ring->index);
126493d26c5SEd Maste     rdm_rx_pld_dca_en_set(hw, 0U, ring->index);
127493d26c5SEd Maste 
128493d26c5SEd Maste     err = aq_hw_err_from_flags(hw);
129493d26c5SEd Maste     AQ_DBG_EXIT(err);
130493d26c5SEd Maste     return (err);
131493d26c5SEd Maste }
132493d26c5SEd Maste 
133493d26c5SEd Maste int aq_ring_tx_init(struct aq_hw *hw, struct aq_ring *ring)
134493d26c5SEd Maste /*                     uint64_t ring_addr,
135493d26c5SEd Maste                      u32 ring_size,
136493d26c5SEd Maste                      u32 ring_idx,
137493d26c5SEd Maste                      u32 interrupt_cause,
138493d26c5SEd Maste                      u32 cpu_idx) */
139493d26c5SEd Maste {
140493d26c5SEd Maste     int err;
141493d26c5SEd Maste     u32 dma_desc_addr_lsw = (u32)ring->tx_descs_phys & 0xffffffff;
142493d26c5SEd Maste     u32 dma_desc_addr_msw = (u64)(ring->tx_descs_phys >> 32);
143493d26c5SEd Maste 
144493d26c5SEd Maste     AQ_DBG_ENTERA("[%d]", ring->index);
145493d26c5SEd Maste 
146493d26c5SEd Maste     tdm_tx_desc_en_set(hw, 0U, ring->index);
147493d26c5SEd Maste 
148493d26c5SEd Maste     reg_tx_dma_desc_base_addresslswset(hw, dma_desc_addr_lsw, ring->index);
149493d26c5SEd Maste 
150493d26c5SEd Maste     reg_tx_dma_desc_base_addressmswset(hw, dma_desc_addr_msw, ring->index);
151493d26c5SEd Maste 
152493d26c5SEd Maste     tdm_tx_desc_len_set(hw, ring->tx_size / 8U, ring->index);
153493d26c5SEd Maste 
154493d26c5SEd Maste     aq_ring_tx_tail_update(hw, ring, 0U);
155493d26c5SEd Maste 
156493d26c5SEd Maste     /* Set Tx threshold */
157493d26c5SEd Maste     tdm_tx_desc_wr_wb_threshold_set(hw, 0U, ring->index);
158493d26c5SEd Maste 
159493d26c5SEd Maste     /* Mapping interrupt vector */
160493d26c5SEd Maste     itr_irq_map_tx_set(hw, ring->msix, ring->index);
161493d26c5SEd Maste     itr_irq_map_en_tx_set(hw, true, ring->index);
162493d26c5SEd Maste 
163493d26c5SEd Maste     tdm_cpu_id_set(hw, 0, ring->index);
164493d26c5SEd Maste     tdm_tx_desc_dca_en_set(hw, 0U, ring->index);
165493d26c5SEd Maste 
166493d26c5SEd Maste     err = aq_hw_err_from_flags(hw);
167493d26c5SEd Maste     AQ_DBG_EXIT(err);
168493d26c5SEd Maste     return (err);
169493d26c5SEd Maste }
170493d26c5SEd Maste 
171493d26c5SEd Maste int aq_ring_tx_tail_update(struct aq_hw *hw, struct aq_ring *ring, u32 tail)
172493d26c5SEd Maste {
173493d26c5SEd Maste     AQ_DBG_ENTERA("[%d]", ring->index);
174493d26c5SEd Maste     reg_tx_dma_desc_tail_ptr_set(hw, tail, ring->index);
175493d26c5SEd Maste     AQ_DBG_EXIT(0);
176493d26c5SEd Maste     return (0);
177493d26c5SEd Maste }
178493d26c5SEd Maste 
179493d26c5SEd Maste int aq_ring_tx_start(struct aq_hw *hw, struct aq_ring *ring)
180493d26c5SEd Maste {
181493d26c5SEd Maste     int err;
182493d26c5SEd Maste 
183493d26c5SEd Maste     AQ_DBG_ENTERA("[%d]", ring->index);
184493d26c5SEd Maste     tdm_tx_desc_en_set(hw, 1U, ring->index);
185493d26c5SEd Maste     err = aq_hw_err_from_flags(hw);
186493d26c5SEd Maste     AQ_DBG_EXIT(err);
187493d26c5SEd Maste     return (err);
188493d26c5SEd Maste }
189493d26c5SEd Maste 
190493d26c5SEd Maste int aq_ring_rx_start(struct aq_hw *hw, struct aq_ring *ring)
191493d26c5SEd Maste {
192493d26c5SEd Maste     int err;
193493d26c5SEd Maste 
194493d26c5SEd Maste     AQ_DBG_ENTERA("[%d]", ring->index);
195493d26c5SEd Maste     rdm_rx_desc_en_set(hw, 1U, ring->index);
196493d26c5SEd Maste     err = aq_hw_err_from_flags(hw);
197493d26c5SEd Maste     AQ_DBG_EXIT(err);
198493d26c5SEd Maste     return (err);
199493d26c5SEd Maste }
200493d26c5SEd Maste 
201493d26c5SEd Maste int aq_ring_tx_stop(struct aq_hw *hw, struct aq_ring *ring)
202493d26c5SEd Maste {
203493d26c5SEd Maste     int err;
204493d26c5SEd Maste 
205493d26c5SEd Maste     AQ_DBG_ENTERA("[%d]", ring->index);
206493d26c5SEd Maste     tdm_tx_desc_en_set(hw, 0U, ring->index);
207493d26c5SEd Maste     err = aq_hw_err_from_flags(hw);
208493d26c5SEd Maste     AQ_DBG_EXIT(err);
209493d26c5SEd Maste     return (err);
210493d26c5SEd Maste }
211493d26c5SEd Maste 
212493d26c5SEd Maste int aq_ring_rx_stop(struct aq_hw *hw, struct aq_ring *ring)
213493d26c5SEd Maste {
214493d26c5SEd Maste     int err;
215493d26c5SEd Maste 
216493d26c5SEd Maste     AQ_DBG_ENTERA("[%d]", ring->index);
217493d26c5SEd Maste     rdm_rx_desc_en_set(hw, 0U, ring->index);
218493d26c5SEd Maste     /* Invalidate Descriptor Cache to prevent writing to the cached
219493d26c5SEd Maste      * descriptors and to the data pointer of those descriptors
220493d26c5SEd Maste      */
221493d26c5SEd Maste     rdm_rx_dma_desc_cache_init_tgl(hw);
222493d26c5SEd Maste     err = aq_hw_err_from_flags(hw);
223493d26c5SEd Maste     AQ_DBG_EXIT(err);
224493d26c5SEd Maste     return (err);
225493d26c5SEd Maste }
226493d26c5SEd Maste 
227493d26c5SEd Maste static void aq_ring_rx_refill(void* arg, if_rxd_update_t iru)
228493d26c5SEd Maste {
229493d26c5SEd Maste 	aq_dev_t *aq_dev = arg;
230493d26c5SEd Maste 	aq_rx_desc_t *rx_desc;
231493d26c5SEd Maste 	struct aq_ring *ring;
232493d26c5SEd Maste 	qidx_t i, pidx;
233493d26c5SEd Maste 
234493d26c5SEd Maste 	AQ_DBG_ENTERA("ring=%d iru_pidx=%d iru_count=%d iru->iru_buf_size=%d",
235493d26c5SEd Maste 				  iru->iru_qsidx, iru->iru_pidx, iru->iru_count, iru->iru_buf_size);
236493d26c5SEd Maste 
237493d26c5SEd Maste 	ring = aq_dev->rx_rings[iru->iru_qsidx];
238493d26c5SEd Maste 	pidx = iru->iru_pidx;
239493d26c5SEd Maste 
240493d26c5SEd Maste 	for (i = 0; i < iru->iru_count; i++) {
241493d26c5SEd Maste 		rx_desc = (aq_rx_desc_t *) &ring->rx_descs[pidx];
242493d26c5SEd Maste 		rx_desc->read.buf_addr = htole64(iru->iru_paddrs[i]);
243493d26c5SEd Maste 		rx_desc->read.hdr_addr = 0;
244493d26c5SEd Maste 
245493d26c5SEd Maste 		pidx=aq_next(pidx, ring->rx_size - 1);
246493d26c5SEd Maste 	}
247493d26c5SEd Maste 
248493d26c5SEd Maste 	AQ_DBG_EXIT(0);
249493d26c5SEd Maste }
250493d26c5SEd Maste 
251493d26c5SEd Maste static void aq_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused,
252493d26c5SEd Maste 							 qidx_t pidx)
253493d26c5SEd Maste {
254493d26c5SEd Maste 	aq_dev_t *aq_dev = arg;
255493d26c5SEd Maste 	struct aq_ring *ring = aq_dev->rx_rings[rxqid];
256493d26c5SEd Maste 
257493d26c5SEd Maste 	AQ_DBG_ENTERA("[%d] tail=%u", ring->index, pidx);
258493d26c5SEd Maste 	reg_rx_dma_desc_tail_ptr_set(&aq_dev->hw, pidx, ring->index);
259493d26c5SEd Maste 	AQ_DBG_EXIT(0);
260493d26c5SEd Maste }
261493d26c5SEd Maste 
262493d26c5SEd Maste static int aq_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget)
263493d26c5SEd Maste {
264493d26c5SEd Maste 	aq_dev_t *aq_dev = arg;
265493d26c5SEd Maste 	struct aq_ring *ring = aq_dev->rx_rings[rxqid];
266493d26c5SEd Maste 	aq_rx_desc_t *rx_desc = (aq_rx_desc_t *) ring->rx_descs;
267493d26c5SEd Maste 	int cnt, i, iter;
268493d26c5SEd Maste 
269493d26c5SEd Maste 	AQ_DBG_ENTERA("[%d] head=%u, budget %d", ring->index, idx, budget);
270493d26c5SEd Maste 
271493d26c5SEd Maste 	for (iter = 0, cnt = 0, i = idx; iter < ring->rx_size && cnt <= budget;) {
272493d26c5SEd Maste 		trace_aq_rx_descr(ring->index, i, (volatile u64*)&rx_desc[i]);
273493d26c5SEd Maste 		if (!rx_desc[i].wb.dd)
274493d26c5SEd Maste 			break;
275493d26c5SEd Maste 
276493d26c5SEd Maste 		if (rx_desc[i].wb.eop) {
277493d26c5SEd Maste 			iter++;
278493d26c5SEd Maste 			i = aq_next(i, ring->rx_size - 1);
279493d26c5SEd Maste 
280493d26c5SEd Maste 			cnt++;
281493d26c5SEd Maste 		} else {
282493d26c5SEd Maste 			/* LRO/Jumbo: wait for whole packet be in the ring */
283493d26c5SEd Maste 			if (rx_desc[i].wb.rsc_cnt) {
284493d26c5SEd Maste 				i = rx_desc[i].wb.next_desp;
285493d26c5SEd Maste 				iter++;
286493d26c5SEd Maste 				continue;
287493d26c5SEd Maste 			} else {
288493d26c5SEd Maste 				iter++;
289493d26c5SEd Maste 				i = aq_next(i, ring->rx_size - 1);
290493d26c5SEd Maste 				continue;
291493d26c5SEd Maste 			}
292493d26c5SEd Maste 		}
293493d26c5SEd Maste 	}
294493d26c5SEd Maste 
295493d26c5SEd Maste 	AQ_DBG_EXIT(cnt);
296493d26c5SEd Maste 	return (cnt);
297493d26c5SEd Maste }
298493d26c5SEd Maste 
299493d26c5SEd Maste static void aq_rx_set_cso_flags(aq_rx_desc_t *rx_desc,  if_rxd_info_t ri)
300493d26c5SEd Maste {
301493d26c5SEd Maste 	if ((rx_desc->wb.pkt_type & 0x3) == 0) { //IPv4
302493d26c5SEd Maste 		if (rx_desc->wb.rx_cntl & BIT(0)){ // IPv4 csum checked
303493d26c5SEd Maste 			ri->iri_csum_flags |= CSUM_IP_CHECKED;
304493d26c5SEd Maste 			if (!(rx_desc->wb.rx_stat & BIT(1)))
305493d26c5SEd Maste 				ri->iri_csum_flags |= CSUM_IP_VALID;
306493d26c5SEd Maste 		}
307493d26c5SEd Maste 	}
308493d26c5SEd Maste 	if (rx_desc->wb.rx_cntl & BIT(1)) { // TCP/UDP csum checked
309493d26c5SEd Maste 		ri->iri_csum_flags |= CSUM_L4_CALC;
310493d26c5SEd Maste 		if (!(rx_desc->wb.rx_stat & BIT(2)) && // L4 csum error
311493d26c5SEd Maste 			(rx_desc->wb.rx_stat & BIT(3))) {  // L4 csum valid
312493d26c5SEd Maste 			ri->iri_csum_flags |= CSUM_L4_VALID;
313493d26c5SEd Maste 			ri->iri_csum_data = htons(0xffff);
314493d26c5SEd Maste 		}
315493d26c5SEd Maste 	}
316493d26c5SEd Maste }
317493d26c5SEd Maste 
318493d26c5SEd Maste static uint8_t bsd_rss_type[16] = {
319493d26c5SEd Maste 	[AQ_RX_RSS_TYPE_IPV4]=M_HASHTYPE_RSS_IPV4,
320493d26c5SEd Maste 	[AQ_RX_RSS_TYPE_IPV6]=M_HASHTYPE_RSS_IPV6,
321493d26c5SEd Maste 	[AQ_RX_RSS_TYPE_IPV4_TCP]=M_HASHTYPE_RSS_TCP_IPV4,
322493d26c5SEd Maste 	[AQ_RX_RSS_TYPE_IPV6_TCP]=M_HASHTYPE_RSS_TCP_IPV6,
323493d26c5SEd Maste 	[AQ_RX_RSS_TYPE_IPV4_UDP]=M_HASHTYPE_RSS_UDP_IPV4,
324493d26c5SEd Maste 	[AQ_RX_RSS_TYPE_IPV6_UDP]=M_HASHTYPE_RSS_UDP_IPV6,
325493d26c5SEd Maste };
326493d26c5SEd Maste 
327493d26c5SEd Maste 
328493d26c5SEd Maste 
329493d26c5SEd Maste static int aq_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
330493d26c5SEd Maste {
331493d26c5SEd Maste 	aq_dev_t *aq_dev = arg;
332493d26c5SEd Maste 	struct aq_ring *ring = aq_dev->rx_rings[ri->iri_qsidx];
333493d26c5SEd Maste 	aq_rx_desc_t *rx_desc;
334*4756f5ffSOlivier Cochard 	if_t ifp;
335493d26c5SEd Maste 	int cidx, rc = 0, i;
336493d26c5SEd Maste 	size_t len, total_len;
337493d26c5SEd Maste 
338493d26c5SEd Maste 	AQ_DBG_ENTERA("[%d] start=%d", ring->index, ri->iri_cidx);
339493d26c5SEd Maste 	cidx = ri->iri_cidx;
340493d26c5SEd Maste 	ifp = iflib_get_ifp(aq_dev->ctx);
341493d26c5SEd Maste 	i = 0;
342493d26c5SEd Maste 
343493d26c5SEd Maste 	do {
344493d26c5SEd Maste 		rx_desc = (aq_rx_desc_t *) &ring->rx_descs[cidx];
345493d26c5SEd Maste 
346493d26c5SEd Maste 		trace_aq_rx_descr(ring->index, cidx, (volatile u64*)rx_desc);
347493d26c5SEd Maste 
348493d26c5SEd Maste 		if ((rx_desc->wb.rx_stat & BIT(0)) != 0) {
349493d26c5SEd Maste 			ring->stats.rx_err++;
350493d26c5SEd Maste 			rc = (EBADMSG);
351493d26c5SEd Maste 			goto exit;
352493d26c5SEd Maste 		}
353493d26c5SEd Maste 
354493d26c5SEd Maste 		if (!rx_desc->wb.eop) {
355493d26c5SEd Maste 			len = ring->rx_max_frame_size;
356493d26c5SEd Maste 		} else {
357493d26c5SEd Maste 			total_len = le32toh(rx_desc->wb.pkt_len);
358493d26c5SEd Maste 			len = total_len & (ring->rx_max_frame_size - 1);
359493d26c5SEd Maste 		}
360493d26c5SEd Maste 		ri->iri_frags[i].irf_flid = 0;
361493d26c5SEd Maste 		ri->iri_frags[i].irf_idx = cidx;
362493d26c5SEd Maste 		ri->iri_frags[i].irf_len = len;
363493d26c5SEd Maste 
364493d26c5SEd Maste 		if ((rx_desc->wb.pkt_type & 0x60) == 1) {
365493d26c5SEd Maste 			ri->iri_flags |= M_VLANTAG;
366493d26c5SEd Maste 			ri->iri_vtag = le32toh(rx_desc->wb.vlan);
367493d26c5SEd Maste 		}
368493d26c5SEd Maste 
369493d26c5SEd Maste 		i++;
370493d26c5SEd Maste 		cidx = aq_next(cidx, ring->rx_size - 1);
371493d26c5SEd Maste 	} while (!rx_desc->wb.eop);
372493d26c5SEd Maste 
373*4756f5ffSOlivier Cochard 	if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
374493d26c5SEd Maste 		aq_rx_set_cso_flags(rx_desc, ri);
375493d26c5SEd Maste 	}
376493d26c5SEd Maste 	ri->iri_rsstype = bsd_rss_type[rx_desc->wb.rss_type & 0xF];
377493d26c5SEd Maste 	if (ri->iri_rsstype != M_HASHTYPE_NONE) {
378493d26c5SEd Maste 		ri->iri_flowid = le32toh(rx_desc->wb.rss_hash);
379493d26c5SEd Maste 	}
380493d26c5SEd Maste 
381493d26c5SEd Maste 	ri->iri_len = total_len;
382493d26c5SEd Maste 	ri->iri_nfrags = i;
383493d26c5SEd Maste 
384493d26c5SEd Maste 	ring->stats.rx_bytes += total_len;
385493d26c5SEd Maste 	ring->stats.rx_pkts++;
386493d26c5SEd Maste 
387493d26c5SEd Maste exit:
388493d26c5SEd Maste 	AQ_DBG_EXIT(rc);
389493d26c5SEd Maste 	return (rc);
390493d26c5SEd Maste }
391493d26c5SEd Maste 
392493d26c5SEd Maste /*****************************************************************************/
393493d26c5SEd Maste /*                                                                           */
394493d26c5SEd Maste /*****************************************************************************/
395493d26c5SEd Maste 
396493d26c5SEd Maste static void aq_setup_offloads(aq_dev_t *aq_dev, if_pkt_info_t pi, aq_tx_desc_t *txd, u32 tx_cmd)
397493d26c5SEd Maste {
398493d26c5SEd Maste     AQ_DBG_ENTER();
399493d26c5SEd Maste     txd->cmd |= tx_desc_cmd_fcs;
400493d26c5SEd Maste     txd->cmd |= (pi->ipi_csum_flags & (CSUM_IP|CSUM_TSO)) ? tx_desc_cmd_ipv4 : 0;
401493d26c5SEd Maste     txd->cmd |= (pi->ipi_csum_flags &
402493d26c5SEd Maste 				 (CSUM_IP_TCP | CSUM_IP6_TCP | CSUM_IP_UDP | CSUM_IP6_UDP)
403493d26c5SEd Maste 				) ? tx_desc_cmd_l4cs : 0;
404493d26c5SEd Maste     txd->cmd |= (pi->ipi_flags & IPI_TX_INTR) ? tx_desc_cmd_wb : 0;
405493d26c5SEd Maste     txd->cmd |= tx_cmd;
406493d26c5SEd Maste     AQ_DBG_EXIT(0);
407493d26c5SEd Maste }
408493d26c5SEd Maste 
409493d26c5SEd Maste static int aq_ring_tso_setup(aq_dev_t *aq_dev, if_pkt_info_t pi, uint32_t *hdrlen, aq_txc_desc_t *txc)
410493d26c5SEd Maste {
411493d26c5SEd Maste 	uint32_t tx_cmd = 0;
412493d26c5SEd Maste 
413493d26c5SEd Maste 	AQ_DBG_ENTER();
414493d26c5SEd Maste 	if (pi->ipi_csum_flags & CSUM_TSO) {
415493d26c5SEd Maste 		AQ_DBG_PRINT("aq_tso_setup(): TSO enabled");
416493d26c5SEd Maste 		tx_cmd |= tx_desc_cmd_lso | tx_desc_cmd_l4cs;
417493d26c5SEd Maste 
418493d26c5SEd Maste 		if (pi->ipi_ipproto != IPPROTO_TCP) {
419493d26c5SEd Maste 			AQ_DBG_PRINT("aq_tso_setup not a tcp");
420493d26c5SEd Maste 			AQ_DBG_EXIT(0);
421493d26c5SEd Maste 			return (0);
422493d26c5SEd Maste 		}
423493d26c5SEd Maste 
424493d26c5SEd Maste 		txc->cmd = 0x4; /* TCP */
425493d26c5SEd Maste 
426493d26c5SEd Maste 		if (pi->ipi_csum_flags & CSUM_IP6_TCP)
427493d26c5SEd Maste 		    txc->cmd |= 0x2;
428493d26c5SEd Maste 
429493d26c5SEd Maste 		txc->l2_len = pi->ipi_ehdrlen;
430493d26c5SEd Maste 		txc->l3_len = pi->ipi_ip_hlen;
431493d26c5SEd Maste 		txc->l4_len = pi->ipi_tcp_hlen;
432493d26c5SEd Maste 		txc->mss_len = pi->ipi_tso_segsz;
433493d26c5SEd Maste 		*hdrlen = txc->l2_len + txc->l3_len + txc->l4_len;
434493d26c5SEd Maste 	}
435493d26c5SEd Maste 
436493d26c5SEd Maste 	// Set VLAN tag
437493d26c5SEd Maste 	if (pi->ipi_mflags & M_VLANTAG) {
438493d26c5SEd Maste 		tx_cmd |= tx_desc_cmd_vlan;
439493d26c5SEd Maste 		txc->vlan_tag = htole16(pi->ipi_vtag);
440493d26c5SEd Maste 	}
441493d26c5SEd Maste 
442493d26c5SEd Maste 	if (tx_cmd) {
443493d26c5SEd Maste 		txc->type = tx_desc_type_ctx;
444493d26c5SEd Maste 		txc->idx = 0;
445493d26c5SEd Maste 	}
446493d26c5SEd Maste 
447493d26c5SEd Maste 	AQ_DBG_EXIT(tx_cmd);
448493d26c5SEd Maste 	return (tx_cmd);
449493d26c5SEd Maste }
450493d26c5SEd Maste 
451493d26c5SEd Maste static int aq_isc_txd_encap(void *arg, if_pkt_info_t pi)
452493d26c5SEd Maste {
453493d26c5SEd Maste 	aq_dev_t *aq_dev = arg;
454493d26c5SEd Maste 	struct aq_ring *ring;
455493d26c5SEd Maste 	aq_txc_desc_t *txc;
456493d26c5SEd Maste 	aq_tx_desc_t *txd = NULL;
457493d26c5SEd Maste 	bus_dma_segment_t *segs;
458493d26c5SEd Maste 	qidx_t pidx;
459493d26c5SEd Maste 	uint32_t hdrlen=0, pay_len;
460493d26c5SEd Maste 	uint8_t tx_cmd = 0;
461493d26c5SEd Maste 	int i, desc_count = 0;
462493d26c5SEd Maste 
463493d26c5SEd Maste 	AQ_DBG_ENTERA("[%d] start=%d", pi->ipi_qsidx, pi->ipi_pidx);
464493d26c5SEd Maste 	ring = aq_dev->tx_rings[pi->ipi_qsidx];
465493d26c5SEd Maste 
466493d26c5SEd Maste 	segs = pi->ipi_segs;
467493d26c5SEd Maste 	pidx = pi->ipi_pidx;
468493d26c5SEd Maste 	txc = (aq_txc_desc_t *)&ring->tx_descs[pidx];
469493d26c5SEd Maste 	AQ_DBG_PRINT("txc at 0x%p, txd at 0x%p len %d", txc, txd, pi->ipi_len);
470493d26c5SEd Maste 
471493d26c5SEd Maste 	pay_len = pi->ipi_len;
472493d26c5SEd Maste 
473493d26c5SEd Maste 	txc->flags1 = 0U;
474493d26c5SEd Maste 	txc->flags2 = 0U;
475493d26c5SEd Maste 
476493d26c5SEd Maste 	tx_cmd = aq_ring_tso_setup(aq_dev, pi, &hdrlen, txc);
477493d26c5SEd Maste 	AQ_DBG_PRINT("tx_cmd = 0x%x", tx_cmd);
478493d26c5SEd Maste 
479493d26c5SEd Maste 	if (tx_cmd) {
480493d26c5SEd Maste 		trace_aq_tx_context_descr(ring->index, pidx, (volatile void*)txc);
481493d26c5SEd Maste 		/* We've consumed the first desc, adjust counters */
482493d26c5SEd Maste 		pidx = aq_next(pidx, ring->tx_size - 1);
483493d26c5SEd Maste 
484493d26c5SEd Maste 		txd = &ring->tx_descs[pidx];
485493d26c5SEd Maste 		txd->flags = 0U;
486493d26c5SEd Maste 	} else {
487493d26c5SEd Maste 		txd = (aq_tx_desc_t *)txc;
488493d26c5SEd Maste 	}
489493d26c5SEd Maste 	AQ_DBG_PRINT("txc at 0x%p, txd at 0x%p", txc, txd);
490493d26c5SEd Maste 
491493d26c5SEd Maste 	txd->ct_en = !!tx_cmd;
492493d26c5SEd Maste 
493493d26c5SEd Maste 	txd->type = tx_desc_type_desc;
494493d26c5SEd Maste 
495493d26c5SEd Maste 	aq_setup_offloads(aq_dev, pi, txd, tx_cmd);
496493d26c5SEd Maste 
497493d26c5SEd Maste 	if (tx_cmd) {
498493d26c5SEd Maste 		txd->ct_idx = 0;
499493d26c5SEd Maste 	}
500493d26c5SEd Maste 
501493d26c5SEd Maste 	pay_len -= hdrlen;
502493d26c5SEd Maste 
503493d26c5SEd Maste 	txd->pay_len = pay_len;
504493d26c5SEd Maste 
505493d26c5SEd Maste 	AQ_DBG_PRINT("num_frag[%d] pay_len[%d]", pi->ipi_nsegs, pay_len);
506493d26c5SEd Maste 	for (i = 0; i < pi->ipi_nsegs; i++) {
507493d26c5SEd Maste 		if (desc_count > 0) {
508493d26c5SEd Maste 			txd = &ring->tx_descs[pidx];
509493d26c5SEd Maste 			txd->flags = 0U;
510493d26c5SEd Maste 		}
511493d26c5SEd Maste 
512493d26c5SEd Maste 		txd->buf_addr = htole64(segs[i].ds_addr);
513493d26c5SEd Maste 
514493d26c5SEd Maste 		txd->type = tx_desc_type_desc;
515493d26c5SEd Maste 		txd->len = segs[i].ds_len;
516493d26c5SEd Maste 		txd->pay_len = pay_len;
517493d26c5SEd Maste 		if (i < pi->ipi_nsegs - 1)
518493d26c5SEd Maste 			trace_aq_tx_descr(ring->index, pidx, (volatile void*)txd);
519493d26c5SEd Maste 
520493d26c5SEd Maste 		pidx = aq_next(pidx, ring->tx_size - 1);
521493d26c5SEd Maste 
522493d26c5SEd Maste 		desc_count++;
523493d26c5SEd Maste 	}
524493d26c5SEd Maste 	// Last descriptor requires EOP and WB
525493d26c5SEd Maste 	txd->eop = 1U;
526493d26c5SEd Maste 
527493d26c5SEd Maste 	AQ_DBG_DUMP_DESC(txd);
528493d26c5SEd Maste 	trace_aq_tx_descr(ring->index, pidx, (volatile void*)txd);
529493d26c5SEd Maste 	ring->tx_tail = pidx;
530493d26c5SEd Maste 
531493d26c5SEd Maste 	ring->stats.tx_pkts++;
532493d26c5SEd Maste 	ring->stats.tx_bytes += pay_len;
533493d26c5SEd Maste 
534493d26c5SEd Maste 	pi->ipi_new_pidx = pidx;
535493d26c5SEd Maste 
536493d26c5SEd Maste 	AQ_DBG_EXIT(0);
537493d26c5SEd Maste 	return (0);
538493d26c5SEd Maste }
539493d26c5SEd Maste 
540493d26c5SEd Maste static void aq_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
541493d26c5SEd Maste {
542493d26c5SEd Maste 	aq_dev_t *aq_dev = arg;
543493d26c5SEd Maste 	struct aq_ring *ring = aq_dev->tx_rings[txqid];
544493d26c5SEd Maste 	AQ_DBG_ENTERA("[%d] tail=%d", ring->index, pidx);
545493d26c5SEd Maste 
546493d26c5SEd Maste 	// Update the write pointer - submits packet for transmission
547493d26c5SEd Maste 	aq_ring_tx_tail_update(&aq_dev->hw, ring, pidx);
548493d26c5SEd Maste 	AQ_DBG_EXIT(0);
549493d26c5SEd Maste }
550493d26c5SEd Maste 
551493d26c5SEd Maste 
552493d26c5SEd Maste static inline unsigned int aq_avail_desc(int a, int b, int size)
553493d26c5SEd Maste {
554493d26c5SEd Maste     return (((b >= a)) ? ((size ) - b + a) : (a - b));
555493d26c5SEd Maste }
556493d26c5SEd Maste 
557493d26c5SEd Maste static int aq_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
558493d26c5SEd Maste {
559493d26c5SEd Maste 	aq_dev_t *aq_dev = arg;
560493d26c5SEd Maste 	struct aq_ring *ring = aq_dev->tx_rings[txqid];
561493d26c5SEd Maste 	uint32_t head;
562493d26c5SEd Maste 	int avail;
563493d26c5SEd Maste 
564493d26c5SEd Maste 	AQ_DBG_ENTERA("[%d] clear=%d", ring->index, clear);
565493d26c5SEd Maste 	avail = 0;
566493d26c5SEd Maste 	head = tdm_tx_desc_head_ptr_get(&aq_dev->hw, ring->index);
567493d26c5SEd Maste 	AQ_DBG_PRINT("swhead %d hwhead %d", ring->tx_head, head);
568493d26c5SEd Maste 
569493d26c5SEd Maste 	if (ring->tx_head == head) {
570493d26c5SEd Maste 		avail = 0; //ring->tx_size;
571493d26c5SEd Maste 		goto done;
572493d26c5SEd Maste 	}
573493d26c5SEd Maste 
574493d26c5SEd Maste 	avail = aq_avail_desc(head, ring->tx_head, ring->tx_size);
575493d26c5SEd Maste 	if (clear)
576493d26c5SEd Maste 		ring->tx_head = head;
577493d26c5SEd Maste 
578493d26c5SEd Maste done:
579493d26c5SEd Maste 	AQ_DBG_EXIT(avail);
580493d26c5SEd Maste 	return (avail);
581493d26c5SEd Maste }
582