1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3
4 #ifndef _IAVF_TXRX_H_
5 #define _IAVF_TXRX_H_
6
7 /* Interrupt Throttling and Rate Limiting Goodies */
8 #define IAVF_DEFAULT_IRQ_WORK 256
9
10 /* The datasheet for the X710 and XL710 indicate that the maximum value for
11 * the ITR is 8160usec which is then called out as 0xFF0 with a 2usec
12 * resolution. 8160 is 0x1FE0 when written out in hex. So instead of storing
13 * the register value which is divided by 2 lets use the actual values and
14 * avoid an excessive amount of translation.
15 */
16 #define IAVF_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
17 #define IAVF_ITR_MASK 0x1FFE /* mask for ITR register value */
18 #define IAVF_ITR_100K 10 /* all values below must be even */
19 #define IAVF_ITR_50K 20
20 #define IAVF_ITR_20K 50
21 #define IAVF_ITR_18K 60
22 #define IAVF_ITR_8K 122
23 #define IAVF_MAX_ITR 8160 /* maximum value as per datasheet */
24 #define ITR_TO_REG(setting) ((setting) & ~IAVF_ITR_DYNAMIC)
25 #define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~IAVF_ITR_MASK)
26 #define ITR_IS_DYNAMIC(setting) (!!((setting) & IAVF_ITR_DYNAMIC))
27
28 #define IAVF_ITR_RX_DEF (IAVF_ITR_20K | IAVF_ITR_DYNAMIC)
29 #define IAVF_ITR_TX_DEF (IAVF_ITR_20K | IAVF_ITR_DYNAMIC)
30
31 /* 0x40 is the enable bit for interrupt rate limiting, and must be set if
32 * the value of the rate limit is non-zero
33 */
34 #define INTRL_ENA BIT(6)
35 #define IAVF_MAX_INTRL 0x3B /* reg uses 4 usec resolution */
36 #define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
37 #define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0)
38 #define IAVF_INTRL_8K 125 /* 8000 ints/sec */
39 #define IAVF_INTRL_62K 16 /* 62500 ints/sec */
40 #define IAVF_INTRL_83K 12 /* 83333 ints/sec */
41
42 #define IAVF_QUEUE_END_OF_LIST 0x7FF
43
44 /* this enum matches hardware bits and is meant to be used by DYN_CTLN
45 * registers and QINT registers or more generally anywhere in the manual
46 * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
47 * register but instead is a special value meaning "don't update" ITR0/1/2.
48 */
49 enum iavf_dyn_idx_t {
50 IAVF_IDX_ITR0 = 0,
51 IAVF_IDX_ITR1 = 1,
52 IAVF_IDX_ITR2 = 2,
53 IAVF_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
54 };
55
56 /* these are indexes into ITRN registers */
57 #define IAVF_RX_ITR IAVF_IDX_ITR0
58 #define IAVF_TX_ITR IAVF_IDX_ITR1
59 #define IAVF_PE_ITR IAVF_IDX_ITR2
60
61 /* Supported RSS offloads */
62 #define IAVF_DEFAULT_RSS_HENA ( \
63 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_UDP) | \
64 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
65 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP) | \
66 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
67 BIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV4) | \
68 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_UDP) | \
69 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP) | \
70 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
71 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
72 BIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV6) | \
73 BIT_ULL(IAVF_FILTER_PCTYPE_L2_PAYLOAD))
74
75 #define IAVF_DEFAULT_RSS_HENA_EXPANDED (IAVF_DEFAULT_RSS_HENA | \
76 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
77 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
78 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
79 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
80 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
81 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
82
83 /* How many Rx Buffers do we bundle into one write to the hardware ? */
84 #define IAVF_RX_INCREMENT(r, i) \
85 do { \
86 (i)++; \
87 if ((i) == (r)->count) \
88 i = 0; \
89 r->next_to_clean = i; \
90 } while (0)
91
92 #define IAVF_RX_NEXT_DESC(r, i, n) \
93 do { \
94 (i)++; \
95 if ((i) == (r)->count) \
96 i = 0; \
97 (n) = IAVF_RX_DESC((r), (i)); \
98 } while (0)
99
100 #define IAVF_RX_NEXT_DESC_PREFETCH(r, i, n) \
101 do { \
102 IAVF_RX_NEXT_DESC((r), (i), (n)); \
103 prefetch((n)); \
104 } while (0)
105
106 #define IAVF_MAX_BUFFER_TXD 8
107 #define IAVF_MIN_TX_LEN 17
108
109 /* The size limit for a transmit buffer in a descriptor is (16K - 1).
110 * In order to align with the read requests we will align the value to
111 * the nearest 4K which represents our maximum read request size.
112 */
113 #define IAVF_MAX_READ_REQ_SIZE 4096
114 #define IAVF_MAX_DATA_PER_TXD (16 * 1024 - 1)
115 #define IAVF_MAX_DATA_PER_TXD_ALIGNED \
116 (IAVF_MAX_DATA_PER_TXD & ~(IAVF_MAX_READ_REQ_SIZE - 1))
117
118 /**
119 * iavf_txd_use_count - estimate the number of descriptors needed for Tx
120 * @size: transmit request size in bytes
121 *
122 * Due to hardware alignment restrictions (4K alignment), we need to
123 * assume that we can have no more than 12K of data per descriptor, even
124 * though each descriptor can take up to 16K - 1 bytes of aligned memory.
125 * Thus, we need to divide by 12K. But division is slow! Instead,
126 * we decompose the operation into shifts and one relatively cheap
127 * multiply operation.
128 *
129 * To divide by 12K, we first divide by 4K, then divide by 3:
130 * To divide by 4K, shift right by 12 bits
131 * To divide by 3, multiply by 85, then divide by 256
132 * (Divide by 256 is done by shifting right by 8 bits)
133 * Finally, we add one to round up. Because 256 isn't an exact multiple of
134 * 3, we'll underestimate near each multiple of 12K. This is actually more
135 * accurate as we have 4K - 1 of wiggle room that we can fit into the last
136 * segment. For our purposes this is accurate out to 1M which is orders of
137 * magnitude greater than our largest possible GSO size.
138 *
139 * This would then be implemented as:
140 * return (((size >> 12) * 85) >> 8) + 1;
141 *
142 * Since multiplication and division are commutative, we can reorder
143 * operations into:
144 * return ((size * 85) >> 20) + 1;
145 */
iavf_txd_use_count(unsigned int size)146 static inline unsigned int iavf_txd_use_count(unsigned int size)
147 {
148 return ((size * 85) >> 20) + 1;
149 }
150
151 /* Tx Descriptors needed, worst case */
152 #define DESC_NEEDED (MAX_SKB_FRAGS + 6)
153 #define IAVF_MIN_DESC_PENDING 4
154
155 #define IAVF_TX_FLAGS_HW_VLAN BIT(1)
156 #define IAVF_TX_FLAGS_SW_VLAN BIT(2)
157 #define IAVF_TX_FLAGS_TSO BIT(3)
158 #define IAVF_TX_FLAGS_IPV4 BIT(4)
159 #define IAVF_TX_FLAGS_IPV6 BIT(5)
160 #define IAVF_TX_FLAGS_FCCRC BIT(6)
161 #define IAVF_TX_FLAGS_FSO BIT(7)
162 #define IAVF_TX_FLAGS_FD_SB BIT(9)
163 #define IAVF_TX_FLAGS_VXLAN_TUNNEL BIT(10)
164 #define IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN BIT(11)
165 #define IAVF_TX_FLAGS_VLAN_MASK 0xffff0000
166 #define IAVF_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
167 #define IAVF_TX_FLAGS_VLAN_PRIO_SHIFT 29
168 #define IAVF_TX_FLAGS_VLAN_SHIFT 16
169
170 struct iavf_tx_buffer {
171 struct iavf_tx_desc *next_to_watch;
172 union {
173 struct sk_buff *skb;
174 void *raw_buf;
175 };
176 unsigned int bytecount;
177 unsigned short gso_segs;
178
179 DEFINE_DMA_UNMAP_ADDR(dma);
180 DEFINE_DMA_UNMAP_LEN(len);
181 u32 tx_flags;
182 };
183
184 struct iavf_queue_stats {
185 u64 packets;
186 u64 bytes;
187 };
188
189 struct iavf_tx_queue_stats {
190 u64 restart_queue;
191 u64 tx_busy;
192 u64 tx_done_old;
193 u64 tx_linearize;
194 u64 tx_force_wb;
195 u64 tx_lost_interrupt;
196 };
197
198 struct iavf_rx_queue_stats {
199 u64 non_eop_descs;
200 u64 alloc_page_failed;
201 u64 alloc_buff_failed;
202 };
203
204 /* some useful defines for virtchannel interface, which
205 * is the only remaining user of header split
206 */
207 #define IAVF_RX_DTYPE_NO_SPLIT 0
208 #define IAVF_RX_DTYPE_HEADER_SPLIT 1
209 #define IAVF_RX_DTYPE_SPLIT_ALWAYS 2
210 #define IAVF_RX_SPLIT_L2 0x1
211 #define IAVF_RX_SPLIT_IP 0x2
212 #define IAVF_RX_SPLIT_TCP_UDP 0x4
213 #define IAVF_RX_SPLIT_SCTP 0x8
214
215 /* struct that defines a descriptor ring, associated with a VSI */
216 struct iavf_ring {
217 struct iavf_ring *next; /* pointer to next ring in q_vector */
218 void *desc; /* Descriptor ring memory */
219 union {
220 struct page_pool *pp; /* Used on Rx for buffer management */
221 struct device *dev; /* Used on Tx for DMA mapping */
222 };
223 struct net_device *netdev; /* netdev ring maps to */
224 union {
225 struct libeth_fqe *rx_fqes;
226 struct iavf_tx_buffer *tx_bi;
227 };
228 u8 __iomem *tail;
229 u32 truesize;
230
231 u16 queue_index; /* Queue number of ring */
232
233 /* high bit set means dynamic, use accessors routines to read/write.
234 * hardware only supports 2us resolution for the ITR registers.
235 * these values always store the USER setting, and must be converted
236 * before programming to a register.
237 */
238 u16 itr_setting;
239
240 u16 count; /* Number of descriptors */
241
242 /* used in interrupt processing */
243 u16 next_to_use;
244 u16 next_to_clean;
245
246 u16 rxdid; /* Rx descriptor format */
247
248 u16 flags;
249 #define IAVF_TXR_FLAGS_WB_ON_ITR BIT(0)
250 #define IAVF_TXR_FLAGS_ARM_WB BIT(1)
251 /* BIT(2) is free */
252 #define IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 BIT(3)
253 #define IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2 BIT(4)
254 #define IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2 BIT(5)
255 #define IAVF_TXRX_FLAGS_HW_TSTAMP BIT(6)
256
257 /* stats structs */
258 struct iavf_queue_stats stats;
259 struct u64_stats_sync syncp;
260 union {
261 struct iavf_tx_queue_stats tx_stats;
262 struct iavf_rx_queue_stats rx_stats;
263 };
264
265 int prev_pkt_ctr; /* For Tx stall detection */
266 unsigned int size; /* length of descriptor ring in bytes */
267 dma_addr_t dma; /* physical address of ring */
268
269 struct iavf_vsi *vsi; /* Backreference to associated VSI */
270 struct iavf_q_vector *q_vector; /* Backreference to associated vector */
271
272 struct rcu_head rcu; /* to avoid race on free */
273 struct sk_buff *skb; /* When iavf_clean_rx_ring_irq() must
274 * return before it sees the EOP for
275 * the current packet, we save that skb
276 * here and resume receiving this
277 * packet the next time
278 * iavf_clean_rx_ring_irq() is called
279 * for this ring.
280 */
281
282 struct iavf_ptp *ptp;
283
284 u32 rx_buf_len;
285 struct net_shaper q_shaper;
286 bool q_shaper_update;
287 } ____cacheline_internodealigned_in_smp;
288
289 #define IAVF_ITR_ADAPTIVE_MIN_INC 0x0002
290 #define IAVF_ITR_ADAPTIVE_MIN_USECS 0x0002
291 #define IAVF_ITR_ADAPTIVE_MAX_USECS 0x007e
292 #define IAVF_ITR_ADAPTIVE_LATENCY 0x8000
293 #define IAVF_ITR_ADAPTIVE_BULK 0x0000
294 #define ITR_IS_BULK(x) (!((x) & IAVF_ITR_ADAPTIVE_LATENCY))
295
296 struct iavf_ring_container {
297 struct iavf_ring *ring; /* pointer to linked list of ring(s) */
298 unsigned long next_update; /* jiffies value of next update */
299 unsigned int total_bytes; /* total bytes processed this int */
300 unsigned int total_packets; /* total packets processed this int */
301 u16 count;
302 u16 target_itr; /* target ITR setting for ring(s) */
303 u16 current_itr; /* current ITR setting for ring(s) */
304 };
305
306 /* iterator for handling rings in ring container */
307 #define iavf_for_each_ring(pos, head) \
308 for (pos = (head).ring; pos != NULL; pos = pos->next)
309
310 bool iavf_alloc_rx_buffers(struct iavf_ring *rxr, u16 cleaned_count);
311 netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
312 int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring);
313 int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring);
314 void iavf_free_tx_resources(struct iavf_ring *tx_ring);
315 void iavf_free_rx_resources(struct iavf_ring *rx_ring);
316 int iavf_napi_poll(struct napi_struct *napi, int budget);
317 void iavf_detect_recover_hung(struct iavf_vsi *vsi);
318 int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size);
319 bool __iavf_chk_linearize(struct sk_buff *skb);
320
321 /**
322 * iavf_xmit_descriptor_count - calculate number of Tx descriptors needed
323 * @skb: send buffer
324 *
325 * Returns number of data descriptors needed for this skb. Returns 0 to indicate
326 * there is not enough descriptors available in this ring since we need at least
327 * one descriptor.
328 **/
iavf_xmit_descriptor_count(struct sk_buff * skb)329 static inline int iavf_xmit_descriptor_count(struct sk_buff *skb)
330 {
331 const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
332 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
333 int count = 0, size = skb_headlen(skb);
334
335 for (;;) {
336 count += iavf_txd_use_count(size);
337
338 if (!nr_frags--)
339 break;
340
341 size = skb_frag_size(frag++);
342 }
343
344 return count;
345 }
346
347 /**
348 * iavf_maybe_stop_tx - 1st level check for Tx stop conditions
349 * @tx_ring: the ring to be checked
350 * @size: the size buffer we want to assure is available
351 *
352 * Returns 0 if stop is not needed
353 **/
iavf_maybe_stop_tx(struct iavf_ring * tx_ring,int size)354 static inline int iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size)
355 {
356 if (likely(IAVF_DESC_UNUSED(tx_ring) >= size))
357 return 0;
358 return __iavf_maybe_stop_tx(tx_ring, size);
359 }
360
361 /**
362 * iavf_chk_linearize - Check if there are more than 8 fragments per packet
363 * @skb: send buffer
364 * @count: number of buffers used
365 *
366 * Note: Our HW can't scatter-gather more than 8 fragments to build
367 * a packet on the wire and so we need to figure out the cases where we
368 * need to linearize the skb.
369 **/
iavf_chk_linearize(struct sk_buff * skb,int count)370 static inline bool iavf_chk_linearize(struct sk_buff *skb, int count)
371 {
372 /* Both TSO and single send will work if count is less than 8 */
373 if (likely(count < IAVF_MAX_BUFFER_TXD))
374 return false;
375
376 if (skb_is_gso(skb))
377 return __iavf_chk_linearize(skb);
378
379 /* we can support up to 8 data buffers for a single send */
380 return count != IAVF_MAX_BUFFER_TXD;
381 }
382 /**
383 * txring_txq - helper to convert from a ring to a queue
384 * @ring: Tx ring to find the netdev equivalent of
385 **/
txring_txq(const struct iavf_ring * ring)386 static inline struct netdev_queue *txring_txq(const struct iavf_ring *ring)
387 {
388 return netdev_get_tx_queue(ring->netdev, ring->queue_index);
389 }
390 #endif /* _IAVF_TXRX_H_ */
391