xref: /linux/drivers/net/ethernet/intel/iavf/iavf_txrx.h (revision 53844673d555290010dd3d6de1365af72e9839c8)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3 
4 #ifndef _IAVF_TXRX_H_
5 #define _IAVF_TXRX_H_
6 
7 /* Interrupt Throttling and Rate Limiting Goodies */
8 #define IAVF_DEFAULT_IRQ_WORK      256
9 
10 /* The datasheet for the X710 and XL710 indicate that the maximum value for
11  * the ITR is 8160usec which is then called out as 0xFF0 with a 2usec
12  * resolution. 8160 is 0x1FE0 when written out in hex. So instead of storing
13  * the register value which is divided by 2 lets use the actual values and
14  * avoid an excessive amount of translation.
15  */
16 #define IAVF_ITR_DYNAMIC	0x8000	/* use top bit as a flag */
17 #define IAVF_ITR_MASK		0x1FFE	/* mask for ITR register value */
18 #define IAVF_ITR_100K		    10	/* all values below must be even */
19 #define IAVF_ITR_50K		    20
20 #define IAVF_ITR_20K		    50
21 #define IAVF_ITR_18K		    60
22 #define IAVF_ITR_8K		   122
23 #define IAVF_MAX_ITR		  8160	/* maximum value as per datasheet */
24 #define ITR_TO_REG(setting) ((setting) & ~IAVF_ITR_DYNAMIC)
25 #define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~IAVF_ITR_MASK)
26 #define ITR_IS_DYNAMIC(setting) (!!((setting) & IAVF_ITR_DYNAMIC))
27 
28 #define IAVF_ITR_RX_DEF		(IAVF_ITR_20K | IAVF_ITR_DYNAMIC)
29 #define IAVF_ITR_TX_DEF		(IAVF_ITR_20K | IAVF_ITR_DYNAMIC)
30 
31 /* 0x40 is the enable bit for interrupt rate limiting, and must be set if
32  * the value of the rate limit is non-zero
33  */
34 #define INTRL_ENA                  BIT(6)
35 #define IAVF_MAX_INTRL             0x3B    /* reg uses 4 usec resolution */
36 #define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
37 #define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0)
38 #define IAVF_INTRL_8K              125     /* 8000 ints/sec */
39 #define IAVF_INTRL_62K             16      /* 62500 ints/sec */
40 #define IAVF_INTRL_83K             12      /* 83333 ints/sec */
41 
42 #define IAVF_QUEUE_END_OF_LIST 0x7FF
43 
44 /* this enum matches hardware bits and is meant to be used by DYN_CTLN
45  * registers and QINT registers or more generally anywhere in the manual
46  * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
47  * register but instead is a special value meaning "don't update" ITR0/1/2.
48  */
49 enum iavf_dyn_idx_t {
50 	IAVF_IDX_ITR0 = 0,
51 	IAVF_IDX_ITR1 = 1,
52 	IAVF_IDX_ITR2 = 2,
53 	IAVF_ITR_NONE = 3	/* ITR_NONE must not be used as an index */
54 };
55 
56 /* these are indexes into ITRN registers */
57 #define IAVF_RX_ITR    IAVF_IDX_ITR0
58 #define IAVF_TX_ITR    IAVF_IDX_ITR1
59 #define IAVF_PE_ITR    IAVF_IDX_ITR2
60 
61 /* Supported RSS offloads */
62 #define IAVF_DEFAULT_RSS_HENA ( \
63 	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_UDP) | \
64 	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
65 	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP) | \
66 	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
67 	BIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV4) | \
68 	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_UDP) | \
69 	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP) | \
70 	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
71 	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
72 	BIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV6) | \
73 	BIT_ULL(IAVF_FILTER_PCTYPE_L2_PAYLOAD))
74 
75 #define IAVF_DEFAULT_RSS_HENA_EXPANDED (IAVF_DEFAULT_RSS_HENA | \
76 	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
77 	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
78 	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
79 	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
80 	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
81 	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
82 
83 /* Supported Rx Buffer Sizes (a multiple of 128) */
84 #define IAVF_RXBUFFER_1536  1536  /* 128B aligned standard Ethernet frame */
85 #define IAVF_RXBUFFER_2048  2048
86 #define IAVF_RXBUFFER_3072  3072  /* Used for large frames w/ padding */
87 #define IAVF_MAX_RXBUFFER   9728  /* largest size for single descriptor */
88 
89 #define IAVF_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
90 #define iavf_rx_desc iavf_32byte_rx_desc
91 
92 #define IAVF_RX_DMA_ATTR \
93 	(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
94 
95 /* Attempt to maximize the headroom available for incoming frames.  We
96  * use a 2K buffer for receives and need 1536/1534 to store the data for
97  * the frame.  This leaves us with 512 bytes of room.  From that we need
98  * to deduct the space needed for the shared info and the padding needed
99  * to IP align the frame.
100  *
101  * Note: For cache line sizes 256 or larger this value is going to end
102  *	 up negative.  In these cases we should fall back to the legacy
103  *	 receive path.
104  */
105 #if (PAGE_SIZE < 8192)
106 #define IAVF_2K_TOO_SMALL_WITH_PADDING \
107 ((NET_SKB_PAD + IAVF_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IAVF_RXBUFFER_2048))
108 
109 static inline int iavf_compute_pad(int rx_buf_len)
110 {
111 	int page_size, pad_size;
112 
113 	page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
114 	pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
115 
116 	return pad_size;
117 }
118 
119 static inline int iavf_skb_pad(void)
120 {
121 	int rx_buf_len;
122 
123 	/* If a 2K buffer cannot handle a standard Ethernet frame then
124 	 * optimize padding for a 3K buffer instead of a 1.5K buffer.
125 	 *
126 	 * For a 3K buffer we need to add enough padding to allow for
127 	 * tailroom due to NET_IP_ALIGN possibly shifting us out of
128 	 * cache-line alignment.
129 	 */
130 	if (IAVF_2K_TOO_SMALL_WITH_PADDING)
131 		rx_buf_len = IAVF_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
132 	else
133 		rx_buf_len = IAVF_RXBUFFER_1536;
134 
135 	/* if needed make room for NET_IP_ALIGN */
136 	rx_buf_len -= NET_IP_ALIGN;
137 
138 	return iavf_compute_pad(rx_buf_len);
139 }
140 
141 #define IAVF_SKB_PAD iavf_skb_pad()
142 #else
143 #define IAVF_2K_TOO_SMALL_WITH_PADDING false
144 #define IAVF_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
145 #endif
146 
147 /**
148  * iavf_test_staterr - tests bits in Rx descriptor status and error fields
149  * @rx_desc: pointer to receive descriptor (in le64 format)
150  * @stat_err_bits: value to mask
151  *
152  * This function does some fast chicanery in order to return the
153  * value of the mask which is really only used for boolean tests.
154  * The status_error_len doesn't need to be shifted because it begins
155  * at offset zero.
156  */
157 static inline bool iavf_test_staterr(union iavf_rx_desc *rx_desc,
158 				     const u64 stat_err_bits)
159 {
160 	return !!(rx_desc->wb.qword1.status_error_len &
161 		  cpu_to_le64(stat_err_bits));
162 }
163 
164 /* How many Rx Buffers do we bundle into one write to the hardware ? */
165 #define IAVF_RX_INCREMENT(r, i) \
166 	do {					\
167 		(i)++;				\
168 		if ((i) == (r)->count)		\
169 			i = 0;			\
170 		r->next_to_clean = i;		\
171 	} while (0)
172 
173 #define IAVF_RX_NEXT_DESC(r, i, n)		\
174 	do {					\
175 		(i)++;				\
176 		if ((i) == (r)->count)		\
177 			i = 0;			\
178 		(n) = IAVF_RX_DESC((r), (i));	\
179 	} while (0)
180 
181 #define IAVF_RX_NEXT_DESC_PREFETCH(r, i, n)		\
182 	do {						\
183 		IAVF_RX_NEXT_DESC((r), (i), (n));	\
184 		prefetch((n));				\
185 	} while (0)
186 
187 #define IAVF_MAX_BUFFER_TXD	8
188 #define IAVF_MIN_TX_LEN		17
189 
190 /* The size limit for a transmit buffer in a descriptor is (16K - 1).
191  * In order to align with the read requests we will align the value to
192  * the nearest 4K which represents our maximum read request size.
193  */
194 #define IAVF_MAX_READ_REQ_SIZE		4096
195 #define IAVF_MAX_DATA_PER_TXD		(16 * 1024 - 1)
196 #define IAVF_MAX_DATA_PER_TXD_ALIGNED \
197 	(IAVF_MAX_DATA_PER_TXD & ~(IAVF_MAX_READ_REQ_SIZE - 1))
198 
199 /**
200  * iavf_txd_use_count  - estimate the number of descriptors needed for Tx
201  * @size: transmit request size in bytes
202  *
203  * Due to hardware alignment restrictions (4K alignment), we need to
204  * assume that we can have no more than 12K of data per descriptor, even
205  * though each descriptor can take up to 16K - 1 bytes of aligned memory.
206  * Thus, we need to divide by 12K. But division is slow! Instead,
207  * we decompose the operation into shifts and one relatively cheap
208  * multiply operation.
209  *
210  * To divide by 12K, we first divide by 4K, then divide by 3:
211  *     To divide by 4K, shift right by 12 bits
212  *     To divide by 3, multiply by 85, then divide by 256
213  *     (Divide by 256 is done by shifting right by 8 bits)
214  * Finally, we add one to round up. Because 256 isn't an exact multiple of
215  * 3, we'll underestimate near each multiple of 12K. This is actually more
216  * accurate as we have 4K - 1 of wiggle room that we can fit into the last
217  * segment.  For our purposes this is accurate out to 1M which is orders of
218  * magnitude greater than our largest possible GSO size.
219  *
220  * This would then be implemented as:
221  *     return (((size >> 12) * 85) >> 8) + 1;
222  *
223  * Since multiplication and division are commutative, we can reorder
224  * operations into:
225  *     return ((size * 85) >> 20) + 1;
226  */
227 static inline unsigned int iavf_txd_use_count(unsigned int size)
228 {
229 	return ((size * 85) >> 20) + 1;
230 }
231 
232 /* Tx Descriptors needed, worst case */
233 #define DESC_NEEDED (MAX_SKB_FRAGS + 6)
234 #define IAVF_MIN_DESC_PENDING	4
235 
236 #define IAVF_TX_FLAGS_HW_VLAN			BIT(1)
237 #define IAVF_TX_FLAGS_SW_VLAN			BIT(2)
238 #define IAVF_TX_FLAGS_TSO			BIT(3)
239 #define IAVF_TX_FLAGS_IPV4			BIT(4)
240 #define IAVF_TX_FLAGS_IPV6			BIT(5)
241 #define IAVF_TX_FLAGS_FCCRC			BIT(6)
242 #define IAVF_TX_FLAGS_FSO			BIT(7)
243 #define IAVF_TX_FLAGS_FD_SB			BIT(9)
244 #define IAVF_TX_FLAGS_VXLAN_TUNNEL		BIT(10)
245 #define IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN	BIT(11)
246 #define IAVF_TX_FLAGS_VLAN_MASK			0xffff0000
247 #define IAVF_TX_FLAGS_VLAN_PRIO_MASK		0xe0000000
248 #define IAVF_TX_FLAGS_VLAN_PRIO_SHIFT		29
249 #define IAVF_TX_FLAGS_VLAN_SHIFT		16
250 
251 struct iavf_tx_buffer {
252 	struct iavf_tx_desc *next_to_watch;
253 	union {
254 		struct sk_buff *skb;
255 		void *raw_buf;
256 	};
257 	unsigned int bytecount;
258 	unsigned short gso_segs;
259 
260 	DEFINE_DMA_UNMAP_ADDR(dma);
261 	DEFINE_DMA_UNMAP_LEN(len);
262 	u32 tx_flags;
263 };
264 
265 struct iavf_rx_buffer {
266 	dma_addr_t dma;
267 	struct page *page;
268 #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
269 	__u32 page_offset;
270 #else
271 	__u16 page_offset;
272 #endif
273 	__u16 pagecnt_bias;
274 };
275 
276 struct iavf_queue_stats {
277 	u64 packets;
278 	u64 bytes;
279 };
280 
281 struct iavf_tx_queue_stats {
282 	u64 restart_queue;
283 	u64 tx_busy;
284 	u64 tx_done_old;
285 	u64 tx_linearize;
286 	u64 tx_force_wb;
287 	int prev_pkt_ctr;
288 	u64 tx_lost_interrupt;
289 };
290 
291 struct iavf_rx_queue_stats {
292 	u64 non_eop_descs;
293 	u64 alloc_page_failed;
294 	u64 alloc_buff_failed;
295 	u64 page_reuse_count;
296 	u64 realloc_count;
297 };
298 
299 enum iavf_ring_state_t {
300 	__IAVF_TX_FDIR_INIT_DONE,
301 	__IAVF_TX_XPS_INIT_DONE,
302 	__IAVF_RING_STATE_NBITS /* must be last */
303 };
304 
305 /* some useful defines for virtchannel interface, which
306  * is the only remaining user of header split
307  */
308 #define IAVF_RX_DTYPE_NO_SPLIT      0
309 #define IAVF_RX_DTYPE_HEADER_SPLIT  1
310 #define IAVF_RX_DTYPE_SPLIT_ALWAYS  2
311 #define IAVF_RX_SPLIT_L2      0x1
312 #define IAVF_RX_SPLIT_IP      0x2
313 #define IAVF_RX_SPLIT_TCP_UDP 0x4
314 #define IAVF_RX_SPLIT_SCTP    0x8
315 
316 /* struct that defines a descriptor ring, associated with a VSI */
317 struct iavf_ring {
318 	struct iavf_ring *next;		/* pointer to next ring in q_vector */
319 	void *desc;			/* Descriptor ring memory */
320 	struct device *dev;		/* Used for DMA mapping */
321 	struct net_device *netdev;	/* netdev ring maps to */
322 	union {
323 		struct iavf_tx_buffer *tx_bi;
324 		struct iavf_rx_buffer *rx_bi;
325 	};
326 	DECLARE_BITMAP(state, __IAVF_RING_STATE_NBITS);
327 	u16 queue_index;		/* Queue number of ring */
328 	u8 dcb_tc;			/* Traffic class of ring */
329 	u8 __iomem *tail;
330 
331 	/* high bit set means dynamic, use accessors routines to read/write.
332 	 * hardware only supports 2us resolution for the ITR registers.
333 	 * these values always store the USER setting, and must be converted
334 	 * before programming to a register.
335 	 */
336 	u16 itr_setting;
337 
338 	u16 count;			/* Number of descriptors */
339 	u16 reg_idx;			/* HW register index of the ring */
340 	u16 rx_buf_len;
341 
342 	/* used in interrupt processing */
343 	u16 next_to_use;
344 	u16 next_to_clean;
345 
346 	u8 atr_sample_rate;
347 	u8 atr_count;
348 
349 	bool ring_active;		/* is ring online or not */
350 	bool arm_wb;		/* do something to arm write back */
351 	u8 packet_stride;
352 
353 	u16 flags;
354 #define IAVF_TXR_FLAGS_WB_ON_ITR		BIT(0)
355 /* BIT(1) is free, was IAVF_RXR_FLAGS_BUILD_SKB_ENABLED */
356 /* BIT(2) is free */
357 #define IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1	BIT(3)
358 #define IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2	BIT(4)
359 #define IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2	BIT(5)
360 
361 	/* stats structs */
362 	struct iavf_queue_stats	stats;
363 	struct u64_stats_sync syncp;
364 	union {
365 		struct iavf_tx_queue_stats tx_stats;
366 		struct iavf_rx_queue_stats rx_stats;
367 	};
368 
369 	unsigned int size;		/* length of descriptor ring in bytes */
370 	dma_addr_t dma;			/* physical address of ring */
371 
372 	struct iavf_vsi *vsi;		/* Backreference to associated VSI */
373 	struct iavf_q_vector *q_vector;	/* Backreference to associated vector */
374 
375 	struct rcu_head rcu;		/* to avoid race on free */
376 	u16 next_to_alloc;
377 	struct sk_buff *skb;		/* When iavf_clean_rx_ring_irq() must
378 					 * return before it sees the EOP for
379 					 * the current packet, we save that skb
380 					 * here and resume receiving this
381 					 * packet the next time
382 					 * iavf_clean_rx_ring_irq() is called
383 					 * for this ring.
384 					 */
385 } ____cacheline_internodealigned_in_smp;
386 
387 #define IAVF_ITR_ADAPTIVE_MIN_INC	0x0002
388 #define IAVF_ITR_ADAPTIVE_MIN_USECS	0x0002
389 #define IAVF_ITR_ADAPTIVE_MAX_USECS	0x007e
390 #define IAVF_ITR_ADAPTIVE_LATENCY	0x8000
391 #define IAVF_ITR_ADAPTIVE_BULK		0x0000
392 #define ITR_IS_BULK(x) (!((x) & IAVF_ITR_ADAPTIVE_LATENCY))
393 
394 struct iavf_ring_container {
395 	struct iavf_ring *ring;		/* pointer to linked list of ring(s) */
396 	unsigned long next_update;	/* jiffies value of next update */
397 	unsigned int total_bytes;	/* total bytes processed this int */
398 	unsigned int total_packets;	/* total packets processed this int */
399 	u16 count;
400 	u16 target_itr;			/* target ITR setting for ring(s) */
401 	u16 current_itr;		/* current ITR setting for ring(s) */
402 };
403 
404 /* iterator for handling rings in ring container */
405 #define iavf_for_each_ring(pos, head) \
406 	for (pos = (head).ring; pos != NULL; pos = pos->next)
407 
408 static inline unsigned int iavf_rx_pg_order(struct iavf_ring *ring)
409 {
410 #if (PAGE_SIZE < 8192)
411 	if (ring->rx_buf_len > (PAGE_SIZE / 2))
412 		return 1;
413 #endif
414 	return 0;
415 }
416 
417 #define iavf_rx_pg_size(_ring) (PAGE_SIZE << iavf_rx_pg_order(_ring))
418 
419 bool iavf_alloc_rx_buffers(struct iavf_ring *rxr, u16 cleaned_count);
420 netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
421 int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring);
422 int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring);
423 void iavf_free_tx_resources(struct iavf_ring *tx_ring);
424 void iavf_free_rx_resources(struct iavf_ring *rx_ring);
425 int iavf_napi_poll(struct napi_struct *napi, int budget);
426 void iavf_detect_recover_hung(struct iavf_vsi *vsi);
427 int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size);
428 bool __iavf_chk_linearize(struct sk_buff *skb);
429 
430 /**
431  * iavf_xmit_descriptor_count - calculate number of Tx descriptors needed
432  * @skb:     send buffer
433  *
434  * Returns number of data descriptors needed for this skb. Returns 0 to indicate
435  * there is not enough descriptors available in this ring since we need at least
436  * one descriptor.
437  **/
438 static inline int iavf_xmit_descriptor_count(struct sk_buff *skb)
439 {
440 	const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
441 	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
442 	int count = 0, size = skb_headlen(skb);
443 
444 	for (;;) {
445 		count += iavf_txd_use_count(size);
446 
447 		if (!nr_frags--)
448 			break;
449 
450 		size = skb_frag_size(frag++);
451 	}
452 
453 	return count;
454 }
455 
456 /**
457  * iavf_maybe_stop_tx - 1st level check for Tx stop conditions
458  * @tx_ring: the ring to be checked
459  * @size:    the size buffer we want to assure is available
460  *
461  * Returns 0 if stop is not needed
462  **/
463 static inline int iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size)
464 {
465 	if (likely(IAVF_DESC_UNUSED(tx_ring) >= size))
466 		return 0;
467 	return __iavf_maybe_stop_tx(tx_ring, size);
468 }
469 
470 /**
471  * iavf_chk_linearize - Check if there are more than 8 fragments per packet
472  * @skb:      send buffer
473  * @count:    number of buffers used
474  *
475  * Note: Our HW can't scatter-gather more than 8 fragments to build
476  * a packet on the wire and so we need to figure out the cases where we
477  * need to linearize the skb.
478  **/
479 static inline bool iavf_chk_linearize(struct sk_buff *skb, int count)
480 {
481 	/* Both TSO and single send will work if count is less than 8 */
482 	if (likely(count < IAVF_MAX_BUFFER_TXD))
483 		return false;
484 
485 	if (skb_is_gso(skb))
486 		return __iavf_chk_linearize(skb);
487 
488 	/* we can support up to 8 data buffers for a single send */
489 	return count != IAVF_MAX_BUFFER_TXD;
490 }
491 /**
492  * txring_txq - helper to convert from a ring to a queue
493  * @ring: Tx ring to find the netdev equivalent of
494  **/
495 static inline struct netdev_queue *txring_txq(const struct iavf_ring *ring)
496 {
497 	return netdev_get_tx_queue(ring->netdev, ring->queue_index);
498 }
499 #endif /* _IAVF_TXRX_H_ */
500