xref: /linux/drivers/net/ethernet/aquantia/atlantic/aq_ring.h (revision 79790b6818e96c58fe2bffee1b418c16e64e7b80)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Atlantic Network Driver
3  *
4  * Copyright (C) 2014-2019 aQuantia Corporation
5  * Copyright (C) 2019-2020 Marvell International Ltd.
6  */
7 
8 /* File aq_ring.h: Declaration of functions for Rx/Tx rings. */
9 
10 #ifndef AQ_RING_H
11 #define AQ_RING_H
12 
13 #include "aq_common.h"
14 #include "aq_vec.h"
15 
16 #define AQ_XDP_HEADROOM		ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8)
17 #define AQ_XDP_TAILROOM		SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
18 
19 struct page;
20 struct aq_nic_cfg_s;
21 
22 struct aq_rxpage {
23 	struct page *page;
24 	dma_addr_t daddr;
25 	unsigned int order;
26 	unsigned int pg_off;
27 };
28 
29 /*           TxC       SOP        DX         EOP
30  *         +----------+----------+----------+-----------
31  *   8bytes|len l3,l4 | pa       | pa       | pa
32  *         +----------+----------+----------+-----------
33  * 4/8bytes|len pkt   |len pkt   |          | skb
34  *         +----------+----------+----------+-----------
35  * 4/8bytes|is_gso    |len,flags |len       |len,is_eop
36  *         +----------+----------+----------+-----------
37  *
38  *  This aq_ring_buff_s doesn't have endianness dependency.
39  *  It is __packed for cache line optimizations.
40  */
41 struct __packed aq_ring_buff_s {
42 	union {
43 		/* RX/TX */
44 		dma_addr_t pa;
45 		/* RX */
46 		struct {
47 			u32 rss_hash;
48 			u16 next;
49 			u8 is_hash_l4;
50 			u8 rsvd1;
51 			struct aq_rxpage rxdata;
52 			u16 vlan_rx_tag;
53 		};
54 		/* EOP */
55 		struct {
56 			dma_addr_t pa_eop;
57 			struct sk_buff *skb;
58 			struct xdp_frame *xdpf;
59 		};
60 		/* TxC */
61 		struct {
62 			u32 mss;
63 			u8 len_l2;
64 			u8 len_l3;
65 			u8 len_l4;
66 			u8 is_ipv6:1;
67 			u8 rsvd2:7;
68 			u32 len_pkt;
69 			u16 vlan_tx_tag;
70 		};
71 	};
72 	union {
73 		struct {
74 			u32 len:16;
75 			u32 is_ip_cso:1;
76 			u32 is_udp_cso:1;
77 			u32 is_tcp_cso:1;
78 			u32 is_cso_err:1;
79 			u32 is_sop:1;
80 			u32 is_eop:1;
81 			u32 is_gso_tcp:1;
82 			u32 is_gso_udp:1;
83 			u32 is_mapped:1;
84 			u32 is_cleaned:1;
85 			u32 is_error:1;
86 			u32 is_vlan:1;
87 			u32 is_lro:1;
88 			u32 rsvd3:3;
89 			u16 eop_index;
90 			u16 rsvd4;
91 		};
92 		u64 flags;
93 	};
94 };
95 
96 struct aq_ring_stats_rx_s {
97 	struct u64_stats_sync syncp;	/* must be first */
98 	u64 errors;
99 	u64 packets;
100 	u64 bytes;
101 	u64 lro_packets;
102 	u64 jumbo_packets;
103 	u64 alloc_fails;
104 	u64 skb_alloc_fails;
105 	u64 polls;
106 	u64 pg_losts;
107 	u64 pg_flips;
108 	u64 pg_reuses;
109 	u64 xdp_aborted;
110 	u64 xdp_drop;
111 	u64 xdp_pass;
112 	u64 xdp_tx;
113 	u64 xdp_invalid;
114 	u64 xdp_redirect;
115 };
116 
117 struct aq_ring_stats_tx_s {
118 	struct u64_stats_sync syncp;	/* must be first */
119 	u64 errors;
120 	u64 packets;
121 	u64 bytes;
122 	u64 queue_restarts;
123 };
124 
125 union aq_ring_stats_s {
126 	struct aq_ring_stats_rx_s rx;
127 	struct aq_ring_stats_tx_s tx;
128 };
129 
130 enum atl_ring_type {
131 	ATL_RING_TX,
132 	ATL_RING_RX,
133 };
134 
135 struct aq_ring_s {
136 	struct aq_ring_buff_s *buff_ring;
137 	u8 *dx_ring;		/* descriptors ring, dma shared mem */
138 	struct aq_nic_s *aq_nic;
139 	unsigned int idx;	/* for HW layer registers operations */
140 	unsigned int hw_head;
141 	unsigned int sw_head;
142 	unsigned int sw_tail;
143 	unsigned int size;	/* descriptors number */
144 	unsigned int dx_size;	/* TX or RX descriptor size,  */
145 				/* stored here for fater math */
146 	u16 page_order;
147 	u16 page_offset;
148 	u16 frame_max;
149 	u16 tail_size;
150 	union aq_ring_stats_s stats;
151 	dma_addr_t dx_ring_pa;
152 	struct bpf_prog *xdp_prog;
153 	enum atl_ring_type ring_type;
154 	struct xdp_rxq_info xdp_rxq;
155 };
156 
157 struct aq_ring_param_s {
158 	unsigned int vec_idx;
159 	unsigned int cpu;
160 	cpumask_t affinity_mask;
161 };
162 
aq_buf_vaddr(struct aq_rxpage * rxpage)163 static inline void *aq_buf_vaddr(struct aq_rxpage *rxpage)
164 {
165 	return page_to_virt(rxpage->page) + rxpage->pg_off;
166 }
167 
aq_buf_daddr(struct aq_rxpage * rxpage)168 static inline dma_addr_t aq_buf_daddr(struct aq_rxpage *rxpage)
169 {
170 	return rxpage->daddr + rxpage->pg_off;
171 }
172 
aq_ring_next_dx(struct aq_ring_s * self,unsigned int dx)173 static inline unsigned int aq_ring_next_dx(struct aq_ring_s *self,
174 					   unsigned int dx)
175 {
176 	return (++dx >= self->size) ? 0U : dx;
177 }
178 
aq_ring_avail_dx(struct aq_ring_s * self)179 static inline unsigned int aq_ring_avail_dx(struct aq_ring_s *self)
180 {
181 	return (((self->sw_tail >= self->sw_head)) ?
182 		(self->size - 1) - self->sw_tail + self->sw_head :
183 		self->sw_head - self->sw_tail - 1);
184 }
185 
186 int aq_ring_tx_alloc(struct aq_ring_s *self,
187 		     struct aq_nic_s *aq_nic,
188 		     unsigned int idx,
189 		     struct aq_nic_cfg_s *aq_nic_cfg);
190 int aq_ring_rx_alloc(struct aq_ring_s *self,
191 		     struct aq_nic_s *aq_nic,
192 		     unsigned int idx,
193 		     struct aq_nic_cfg_s *aq_nic_cfg);
194 
195 int aq_ring_init(struct aq_ring_s *self, const enum atl_ring_type ring_type);
196 void aq_ring_rx_deinit(struct aq_ring_s *self);
197 void aq_ring_free(struct aq_ring_s *self);
198 void aq_ring_update_queue_state(struct aq_ring_s *ring);
199 void aq_ring_queue_wake(struct aq_ring_s *ring);
200 void aq_ring_queue_stop(struct aq_ring_s *ring);
201 bool aq_ring_tx_clean(struct aq_ring_s *self);
202 int aq_xdp_xmit(struct net_device *dev, int num_frames,
203 		struct xdp_frame **frames, u32 flags);
204 int aq_ring_rx_clean(struct aq_ring_s *self,
205 		     struct napi_struct *napi,
206 		     int *work_done,
207 		     int budget);
208 int aq_ring_rx_fill(struct aq_ring_s *self);
209 
210 int aq_ring_hwts_rx_alloc(struct aq_ring_s *self,
211 			  struct aq_nic_s *aq_nic, unsigned int idx,
212 			  unsigned int size, unsigned int dx_size);
213 void aq_ring_hwts_rx_free(struct aq_ring_s *self);
214 void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic);
215 
216 unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data);
217 
218 #endif /* AQ_RING_H */
219