xref: /linux/drivers/net/ethernet/intel/idpf/idpf_txrx.h (revision 90d32e92011eaae8e70a9169b4e7acf4ca8f9d3a)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (C) 2023 Intel Corporation */
3 
4 #ifndef _IDPF_TXRX_H_
5 #define _IDPF_TXRX_H_
6 
7 #include <net/page_pool/helpers.h>
8 #include <net/tcp.h>
9 #include <net/netdev_queues.h>
10 
11 #include "virtchnl2_lan_desc.h"
12 
13 #define IDPF_LARGE_MAX_Q			256
14 #define IDPF_MAX_Q				16
15 #define IDPF_MIN_Q				2
16 /* Mailbox Queue */
17 #define IDPF_MAX_MBXQ				1
18 
19 #define IDPF_MIN_TXQ_DESC			64
20 #define IDPF_MIN_RXQ_DESC			64
21 #define IDPF_MIN_TXQ_COMPLQ_DESC		256
22 #define IDPF_MAX_QIDS				256
23 
24 /* Number of descriptors in a queue should be a multiple of 32. RX queue
25  * descriptors alone should be a multiple of IDPF_REQ_RXQ_DESC_MULTIPLE
26  * to achieve BufQ descriptors aligned to 32
27  */
28 #define IDPF_REQ_DESC_MULTIPLE			32
29 #define IDPF_REQ_RXQ_DESC_MULTIPLE (IDPF_MAX_BUFQS_PER_RXQ_GRP * 32)
30 #define IDPF_MIN_TX_DESC_NEEDED (MAX_SKB_FRAGS + 6)
31 #define IDPF_TX_WAKE_THRESH ((u16)IDPF_MIN_TX_DESC_NEEDED * 2)
32 
33 #define IDPF_MAX_DESCS				8160
34 #define IDPF_MAX_TXQ_DESC ALIGN_DOWN(IDPF_MAX_DESCS, IDPF_REQ_DESC_MULTIPLE)
35 #define IDPF_MAX_RXQ_DESC ALIGN_DOWN(IDPF_MAX_DESCS, IDPF_REQ_RXQ_DESC_MULTIPLE)
36 #define MIN_SUPPORT_TXDID (\
37 	VIRTCHNL2_TXDID_FLEX_FLOW_SCHED |\
38 	VIRTCHNL2_TXDID_FLEX_TSO_CTX)
39 
40 #define IDPF_DFLT_SINGLEQ_TX_Q_GROUPS		1
41 #define IDPF_DFLT_SINGLEQ_RX_Q_GROUPS		1
42 #define IDPF_DFLT_SINGLEQ_TXQ_PER_GROUP		4
43 #define IDPF_DFLT_SINGLEQ_RXQ_PER_GROUP		4
44 
45 #define IDPF_COMPLQ_PER_GROUP			1
46 #define IDPF_SINGLE_BUFQ_PER_RXQ_GRP		1
47 #define IDPF_MAX_BUFQS_PER_RXQ_GRP		2
48 #define IDPF_BUFQ2_ENA				1
49 #define IDPF_NUMQ_PER_CHUNK			1
50 
51 #define IDPF_DFLT_SPLITQ_TXQ_PER_GROUP		1
52 #define IDPF_DFLT_SPLITQ_RXQ_PER_GROUP		1
53 
54 /* Default vector sharing */
55 #define IDPF_MBX_Q_VEC		1
56 #define IDPF_MIN_Q_VEC		1
57 
58 #define IDPF_DFLT_TX_Q_DESC_COUNT		512
59 #define IDPF_DFLT_TX_COMPLQ_DESC_COUNT		512
60 #define IDPF_DFLT_RX_Q_DESC_COUNT		512
61 
62 /* IMPORTANT: We absolutely _cannot_ have more buffers in the system than a
63  * given RX completion queue has descriptors. This includes _ALL_ buffer
64  * queues. E.g.: If you have two buffer queues of 512 descriptors and buffers,
65  * you have a total of 1024 buffers so your RX queue _must_ have at least that
66  * many descriptors. This macro divides a given number of RX descriptors by
67  * number of buffer queues to calculate how many descriptors each buffer queue
68  * can have without overrunning the RX queue.
69  *
70  * If you give hardware more buffers than completion descriptors what will
71  * happen is that if hardware gets a chance to post more than ring wrap of
72  * descriptors before SW gets an interrupt and overwrites SW head, the gen bit
73  * in the descriptor will be wrong. Any overwritten descriptors' buffers will
74  * be gone forever and SW has no reasonable way to tell that this has happened.
75  * From SW perspective, when we finally get an interrupt, it looks like we're
76  * still waiting for descriptor to be done, stalling forever.
77  */
78 #define IDPF_RX_BUFQ_DESC_COUNT(RXD, NUM_BUFQ)	((RXD) / (NUM_BUFQ))
79 
80 #define IDPF_RX_BUFQ_WORKING_SET(rxq)		((rxq)->desc_count - 1)
81 
82 #define IDPF_RX_BUMP_NTC(rxq, ntc)				\
83 do {								\
84 	if (unlikely(++(ntc) == (rxq)->desc_count)) {		\
85 		ntc = 0;					\
86 		change_bit(__IDPF_Q_GEN_CHK, (rxq)->flags);	\
87 	}							\
88 } while (0)
89 
90 #define IDPF_SINGLEQ_BUMP_RING_IDX(q, idx)			\
91 do {								\
92 	if (unlikely(++(idx) == (q)->desc_count))		\
93 		idx = 0;					\
94 } while (0)
95 
96 #define IDPF_RX_HDR_SIZE			256
97 #define IDPF_RX_BUF_2048			2048
98 #define IDPF_RX_BUF_4096			4096
99 #define IDPF_RX_BUF_STRIDE			32
100 #define IDPF_RX_BUF_POST_STRIDE			16
101 #define IDPF_LOW_WATERMARK			64
102 /* Size of header buffer specifically for header split */
103 #define IDPF_HDR_BUF_SIZE			256
104 #define IDPF_PACKET_HDR_PAD	\
105 	(ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN * 2)
106 #define IDPF_TX_TSO_MIN_MSS			88
107 
108 /* Minimum number of descriptors between 2 descriptors with the RE bit set;
109  * only relevant in flow scheduling mode
110  */
111 #define IDPF_TX_SPLITQ_RE_MIN_GAP	64
112 
113 #define IDPF_RX_BI_BUFID_S		0
114 #define IDPF_RX_BI_BUFID_M		GENMASK(14, 0)
115 #define IDPF_RX_BI_GEN_S		15
116 #define IDPF_RX_BI_GEN_M		BIT(IDPF_RX_BI_GEN_S)
117 #define IDPF_RXD_EOF_SPLITQ		VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_M
118 #define IDPF_RXD_EOF_SINGLEQ		VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_M
119 
120 #define IDPF_SINGLEQ_RX_BUF_DESC(rxq, i)	\
121 	(&(((struct virtchnl2_singleq_rx_buf_desc *)((rxq)->desc_ring))[i]))
122 #define IDPF_SPLITQ_RX_BUF_DESC(rxq, i)	\
123 	(&(((struct virtchnl2_splitq_rx_buf_desc *)((rxq)->desc_ring))[i]))
124 #define IDPF_SPLITQ_RX_BI_DESC(rxq, i) ((((rxq)->ring))[i])
125 
126 #define IDPF_BASE_TX_DESC(txq, i)	\
127 	(&(((struct idpf_base_tx_desc *)((txq)->desc_ring))[i]))
128 #define IDPF_BASE_TX_CTX_DESC(txq, i) \
129 	(&(((struct idpf_base_tx_ctx_desc *)((txq)->desc_ring))[i]))
130 #define IDPF_SPLITQ_TX_COMPLQ_DESC(txcq, i)	\
131 	(&(((struct idpf_splitq_tx_compl_desc *)((txcq)->desc_ring))[i]))
132 
133 #define IDPF_FLEX_TX_DESC(txq, i) \
134 	(&(((union idpf_tx_flex_desc *)((txq)->desc_ring))[i]))
135 #define IDPF_FLEX_TX_CTX_DESC(txq, i)	\
136 	(&(((struct idpf_flex_tx_ctx_desc *)((txq)->desc_ring))[i]))
137 
138 #define IDPF_DESC_UNUSED(txq)     \
139 	((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \
140 	(txq)->next_to_clean - (txq)->next_to_use - 1)
141 
142 #define IDPF_TX_BUF_RSV_UNUSED(txq)	((txq)->buf_stack.top)
143 #define IDPF_TX_BUF_RSV_LOW(txq)	(IDPF_TX_BUF_RSV_UNUSED(txq) < \
144 					 (txq)->desc_count >> 2)
145 
146 #define IDPF_TX_COMPLQ_OVERFLOW_THRESH(txcq)	((txcq)->desc_count >> 1)
147 /* Determine the absolute number of completions pending, i.e. the number of
148  * completions that are expected to arrive on the TX completion queue.
149  */
150 #define IDPF_TX_COMPLQ_PENDING(txq)	\
151 	(((txq)->num_completions_pending >= (txq)->complq->num_completions ? \
152 	0 : U64_MAX) + \
153 	(txq)->num_completions_pending - (txq)->complq->num_completions)
154 
155 #define IDPF_TX_SPLITQ_COMPL_TAG_WIDTH	16
156 #define IDPF_SPLITQ_TX_INVAL_COMPL_TAG	-1
157 /* Adjust the generation for the completion tag and wrap if necessary */
158 #define IDPF_TX_ADJ_COMPL_TAG_GEN(txq) \
159 	((++(txq)->compl_tag_cur_gen) >= (txq)->compl_tag_gen_max ? \
160 	0 : (txq)->compl_tag_cur_gen)
161 
162 #define IDPF_TXD_LAST_DESC_CMD (IDPF_TX_DESC_CMD_EOP | IDPF_TX_DESC_CMD_RS)
163 
164 #define IDPF_TX_FLAGS_TSO		BIT(0)
165 #define IDPF_TX_FLAGS_IPV4		BIT(1)
166 #define IDPF_TX_FLAGS_IPV6		BIT(2)
167 #define IDPF_TX_FLAGS_TUNNEL		BIT(3)
168 
169 union idpf_tx_flex_desc {
170 	struct idpf_flex_tx_desc q; /* queue based scheduling */
171 	struct idpf_flex_tx_sched_desc flow; /* flow based scheduling */
172 };
173 
174 /**
175  * struct idpf_tx_buf
176  * @next_to_watch: Next descriptor to clean
177  * @skb: Pointer to the skb
178  * @dma: DMA address
179  * @len: DMA length
180  * @bytecount: Number of bytes
181  * @gso_segs: Number of GSO segments
182  * @compl_tag: Splitq only, unique identifier for a buffer. Used to compare
183  *	       with completion tag returned in buffer completion event.
184  *	       Because the completion tag is expected to be the same in all
185  *	       data descriptors for a given packet, and a single packet can
186  *	       span multiple buffers, we need this field to track all
187  *	       buffers associated with this completion tag independently of
188  *	       the buf_id. The tag consists of a N bit buf_id and M upper
189  *	       order "generation bits". See compl_tag_bufid_m and
190  *	       compl_tag_gen_s in struct idpf_queue. We'll use a value of -1
191  *	       to indicate the tag is not valid.
192  * @ctx_entry: Singleq only. Used to indicate the corresponding entry
193  *	       in the descriptor ring was used for a context descriptor and
194  *	       this buffer entry should be skipped.
195  */
196 struct idpf_tx_buf {
197 	void *next_to_watch;
198 	struct sk_buff *skb;
199 	DEFINE_DMA_UNMAP_ADDR(dma);
200 	DEFINE_DMA_UNMAP_LEN(len);
201 	unsigned int bytecount;
202 	unsigned short gso_segs;
203 
204 	union {
205 		int compl_tag;
206 
207 		bool ctx_entry;
208 	};
209 };
210 
211 struct idpf_tx_stash {
212 	struct hlist_node hlist;
213 	struct idpf_tx_buf buf;
214 };
215 
216 /**
217  * struct idpf_buf_lifo - LIFO for managing OOO completions
218  * @top: Used to know how many buffers are left
219  * @size: Total size of LIFO
220  * @bufs: Backing array
221  */
222 struct idpf_buf_lifo {
223 	u16 top;
224 	u16 size;
225 	struct idpf_tx_stash **bufs;
226 };
227 
228 /**
229  * struct idpf_tx_offload_params - Offload parameters for a given packet
230  * @tx_flags: Feature flags enabled for this packet
231  * @hdr_offsets: Offset parameter for single queue model
232  * @cd_tunneling: Type of tunneling enabled for single queue model
233  * @tso_len: Total length of payload to segment
234  * @mss: Segment size
235  * @tso_segs: Number of segments to be sent
236  * @tso_hdr_len: Length of headers to be duplicated
237  * @td_cmd: Command field to be inserted into descriptor
238  */
239 struct idpf_tx_offload_params {
240 	u32 tx_flags;
241 
242 	u32 hdr_offsets;
243 	u32 cd_tunneling;
244 
245 	u32 tso_len;
246 	u16 mss;
247 	u16 tso_segs;
248 	u16 tso_hdr_len;
249 
250 	u16 td_cmd;
251 };
252 
253 /**
254  * struct idpf_tx_splitq_params
255  * @dtype: General descriptor info
256  * @eop_cmd: Type of EOP
257  * @compl_tag: Associated tag for completion
258  * @td_tag: Descriptor tunneling tag
259  * @offload: Offload parameters
260  */
261 struct idpf_tx_splitq_params {
262 	enum idpf_tx_desc_dtype_value dtype;
263 	u16 eop_cmd;
264 	union {
265 		u16 compl_tag;
266 		u16 td_tag;
267 	};
268 
269 	struct idpf_tx_offload_params offload;
270 };
271 
272 enum idpf_tx_ctx_desc_eipt_offload {
273 	IDPF_TX_CTX_EXT_IP_NONE         = 0x0,
274 	IDPF_TX_CTX_EXT_IP_IPV6         = 0x1,
275 	IDPF_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2,
276 	IDPF_TX_CTX_EXT_IP_IPV4         = 0x3
277 };
278 
279 /* Checksum offload bits decoded from the receive descriptor. */
280 struct idpf_rx_csum_decoded {
281 	u32 l3l4p : 1;
282 	u32 ipe : 1;
283 	u32 eipe : 1;
284 	u32 eudpe : 1;
285 	u32 ipv6exadd : 1;
286 	u32 l4e : 1;
287 	u32 pprs : 1;
288 	u32 nat : 1;
289 	u32 raw_csum_inv : 1;
290 	u32 raw_csum : 16;
291 };
292 
293 struct idpf_rx_extracted {
294 	unsigned int size;
295 	u16 rx_ptype;
296 };
297 
298 #define IDPF_TX_COMPLQ_CLEAN_BUDGET	256
299 #define IDPF_TX_MIN_PKT_LEN		17
300 #define IDPF_TX_DESCS_FOR_SKB_DATA_PTR	1
301 #define IDPF_TX_DESCS_PER_CACHE_LINE	(L1_CACHE_BYTES / \
302 					 sizeof(struct idpf_flex_tx_desc))
303 #define IDPF_TX_DESCS_FOR_CTX		1
304 /* TX descriptors needed, worst case */
305 #define IDPF_TX_DESC_NEEDED (MAX_SKB_FRAGS + IDPF_TX_DESCS_FOR_CTX + \
306 			     IDPF_TX_DESCS_PER_CACHE_LINE + \
307 			     IDPF_TX_DESCS_FOR_SKB_DATA_PTR)
308 
309 /* The size limit for a transmit buffer in a descriptor is (16K - 1).
310  * In order to align with the read requests we will align the value to
311  * the nearest 4K which represents our maximum read request size.
312  */
313 #define IDPF_TX_MAX_READ_REQ_SIZE	SZ_4K
314 #define IDPF_TX_MAX_DESC_DATA		(SZ_16K - 1)
315 #define IDPF_TX_MAX_DESC_DATA_ALIGNED \
316 	ALIGN_DOWN(IDPF_TX_MAX_DESC_DATA, IDPF_TX_MAX_READ_REQ_SIZE)
317 
318 #define IDPF_RX_DMA_ATTR \
319 	(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
320 #define IDPF_RX_DESC(rxq, i)	\
321 	(&(((union virtchnl2_rx_desc *)((rxq)->desc_ring))[i]))
322 
323 struct idpf_rx_buf {
324 	struct page *page;
325 	unsigned int page_offset;
326 	u16 truesize;
327 };
328 
329 #define IDPF_RX_MAX_PTYPE_PROTO_IDS    32
330 #define IDPF_RX_MAX_PTYPE_SZ	(sizeof(struct virtchnl2_ptype) + \
331 				 (sizeof(u16) * IDPF_RX_MAX_PTYPE_PROTO_IDS))
332 #define IDPF_RX_PTYPE_HDR_SZ	sizeof(struct virtchnl2_get_ptype_info)
333 #define IDPF_RX_MAX_PTYPES_PER_BUF	\
334 	DIV_ROUND_DOWN_ULL((IDPF_CTLQ_MAX_BUF_LEN - IDPF_RX_PTYPE_HDR_SZ), \
335 			   IDPF_RX_MAX_PTYPE_SZ)
336 
337 #define IDPF_GET_PTYPE_SIZE(p) struct_size((p), proto_id, (p)->proto_id_count)
338 
339 #define IDPF_TUN_IP_GRE (\
340 	IDPF_PTYPE_TUNNEL_IP |\
341 	IDPF_PTYPE_TUNNEL_IP_GRENAT)
342 
343 #define IDPF_TUN_IP_GRE_MAC (\
344 	IDPF_TUN_IP_GRE |\
345 	IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC)
346 
347 #define IDPF_RX_MAX_PTYPE	1024
348 #define IDPF_RX_MAX_BASE_PTYPE	256
349 #define IDPF_INVALID_PTYPE_ID	0xFFFF
350 
351 /* Packet type non-ip values */
352 enum idpf_rx_ptype_l2 {
353 	IDPF_RX_PTYPE_L2_RESERVED	= 0,
354 	IDPF_RX_PTYPE_L2_MAC_PAY2	= 1,
355 	IDPF_RX_PTYPE_L2_TIMESYNC_PAY2	= 2,
356 	IDPF_RX_PTYPE_L2_FIP_PAY2	= 3,
357 	IDPF_RX_PTYPE_L2_OUI_PAY2	= 4,
358 	IDPF_RX_PTYPE_L2_MACCNTRL_PAY2	= 5,
359 	IDPF_RX_PTYPE_L2_LLDP_PAY2	= 6,
360 	IDPF_RX_PTYPE_L2_ECP_PAY2	= 7,
361 	IDPF_RX_PTYPE_L2_EVB_PAY2	= 8,
362 	IDPF_RX_PTYPE_L2_QCN_PAY2	= 9,
363 	IDPF_RX_PTYPE_L2_EAPOL_PAY2	= 10,
364 	IDPF_RX_PTYPE_L2_ARP		= 11,
365 };
366 
367 enum idpf_rx_ptype_outer_ip {
368 	IDPF_RX_PTYPE_OUTER_L2	= 0,
369 	IDPF_RX_PTYPE_OUTER_IP	= 1,
370 };
371 
372 #define IDPF_RX_PTYPE_TO_IPV(ptype, ipv)			\
373 	(((ptype)->outer_ip == IDPF_RX_PTYPE_OUTER_IP) &&	\
374 	 ((ptype)->outer_ip_ver == (ipv)))
375 
376 enum idpf_rx_ptype_outer_ip_ver {
377 	IDPF_RX_PTYPE_OUTER_NONE	= 0,
378 	IDPF_RX_PTYPE_OUTER_IPV4	= 1,
379 	IDPF_RX_PTYPE_OUTER_IPV6	= 2,
380 };
381 
382 enum idpf_rx_ptype_outer_fragmented {
383 	IDPF_RX_PTYPE_NOT_FRAG	= 0,
384 	IDPF_RX_PTYPE_FRAG	= 1,
385 };
386 
387 enum idpf_rx_ptype_tunnel_type {
388 	IDPF_RX_PTYPE_TUNNEL_NONE		= 0,
389 	IDPF_RX_PTYPE_TUNNEL_IP_IP		= 1,
390 	IDPF_RX_PTYPE_TUNNEL_IP_GRENAT		= 2,
391 	IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC	= 3,
392 	IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN	= 4,
393 };
394 
395 enum idpf_rx_ptype_tunnel_end_prot {
396 	IDPF_RX_PTYPE_TUNNEL_END_NONE	= 0,
397 	IDPF_RX_PTYPE_TUNNEL_END_IPV4	= 1,
398 	IDPF_RX_PTYPE_TUNNEL_END_IPV6	= 2,
399 };
400 
401 enum idpf_rx_ptype_inner_prot {
402 	IDPF_RX_PTYPE_INNER_PROT_NONE		= 0,
403 	IDPF_RX_PTYPE_INNER_PROT_UDP		= 1,
404 	IDPF_RX_PTYPE_INNER_PROT_TCP		= 2,
405 	IDPF_RX_PTYPE_INNER_PROT_SCTP		= 3,
406 	IDPF_RX_PTYPE_INNER_PROT_ICMP		= 4,
407 	IDPF_RX_PTYPE_INNER_PROT_TIMESYNC	= 5,
408 };
409 
410 enum idpf_rx_ptype_payload_layer {
411 	IDPF_RX_PTYPE_PAYLOAD_LAYER_NONE	= 0,
412 	IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2	= 1,
413 	IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY3	= 2,
414 	IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY4	= 3,
415 };
416 
417 enum idpf_tunnel_state {
418 	IDPF_PTYPE_TUNNEL_IP                    = BIT(0),
419 	IDPF_PTYPE_TUNNEL_IP_GRENAT             = BIT(1),
420 	IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC         = BIT(2),
421 };
422 
423 struct idpf_ptype_state {
424 	bool outer_ip;
425 	bool outer_frag;
426 	u8 tunnel_state;
427 };
428 
429 struct idpf_rx_ptype_decoded {
430 	u32 ptype:10;
431 	u32 known:1;
432 	u32 outer_ip:1;
433 	u32 outer_ip_ver:2;
434 	u32 outer_frag:1;
435 	u32 tunnel_type:3;
436 	u32 tunnel_end_prot:2;
437 	u32 tunnel_end_frag:1;
438 	u32 inner_prot:4;
439 	u32 payload_layer:3;
440 };
441 
442 /**
443  * enum idpf_queue_flags_t
444  * @__IDPF_Q_GEN_CHK: Queues operating in splitq mode use a generation bit to
445  *		      identify new descriptor writebacks on the ring. HW sets
446  *		      the gen bit to 1 on the first writeback of any given
447  *		      descriptor. After the ring wraps, HW sets the gen bit of
448  *		      those descriptors to 0, and continues flipping
449  *		      0->1 or 1->0 on each ring wrap. SW maintains its own
450  *		      gen bit to know what value will indicate writebacks on
451  *		      the next pass around the ring. E.g. it is initialized
452  *		      to 1 and knows that reading a gen bit of 1 in any
453  *		      descriptor on the initial pass of the ring indicates a
454  *		      writeback. It also flips on every ring wrap.
455  * @__IDPF_RFLQ_GEN_CHK: Refill queues are SW only, so Q_GEN acts as the HW bit
456  *			 and RFLGQ_GEN is the SW bit.
457  * @__IDPF_Q_FLOW_SCH_EN: Enable flow scheduling
458  * @__IDPF_Q_SW_MARKER: Used to indicate TX queue marker completions
459  * @__IDPF_Q_POLL_MODE: Enable poll mode
460  * @__IDPF_Q_FLAGS_NBITS: Must be last
461  */
462 enum idpf_queue_flags_t {
463 	__IDPF_Q_GEN_CHK,
464 	__IDPF_RFLQ_GEN_CHK,
465 	__IDPF_Q_FLOW_SCH_EN,
466 	__IDPF_Q_SW_MARKER,
467 	__IDPF_Q_POLL_MODE,
468 
469 	__IDPF_Q_FLAGS_NBITS,
470 };
471 
472 /**
473  * struct idpf_vec_regs
474  * @dyn_ctl_reg: Dynamic control interrupt register offset
475  * @itrn_reg: Interrupt Throttling Rate register offset
476  * @itrn_index_spacing: Register spacing between ITR registers of the same
477  *			vector
478  */
479 struct idpf_vec_regs {
480 	u32 dyn_ctl_reg;
481 	u32 itrn_reg;
482 	u32 itrn_index_spacing;
483 };
484 
485 /**
486  * struct idpf_intr_reg
487  * @dyn_ctl: Dynamic control interrupt register
488  * @dyn_ctl_intena_m: Mask for dyn_ctl interrupt enable
489  * @dyn_ctl_itridx_s: Register bit offset for ITR index
490  * @dyn_ctl_itridx_m: Mask for ITR index
491  * @dyn_ctl_intrvl_s: Register bit offset for ITR interval
492  * @rx_itr: RX ITR register
493  * @tx_itr: TX ITR register
494  * @icr_ena: Interrupt cause register offset
495  * @icr_ena_ctlq_m: Mask for ICR
496  */
497 struct idpf_intr_reg {
498 	void __iomem *dyn_ctl;
499 	u32 dyn_ctl_intena_m;
500 	u32 dyn_ctl_itridx_s;
501 	u32 dyn_ctl_itridx_m;
502 	u32 dyn_ctl_intrvl_s;
503 	void __iomem *rx_itr;
504 	void __iomem *tx_itr;
505 	void __iomem *icr_ena;
506 	u32 icr_ena_ctlq_m;
507 };
508 
509 /**
510  * struct idpf_q_vector
511  * @vport: Vport back pointer
512  * @affinity_mask: CPU affinity mask
513  * @napi: napi handler
514  * @v_idx: Vector index
515  * @intr_reg: See struct idpf_intr_reg
516  * @num_txq: Number of TX queues
517  * @tx: Array of TX queues to service
518  * @tx_dim: Data for TX net_dim algorithm
519  * @tx_itr_value: TX interrupt throttling rate
520  * @tx_intr_mode: Dynamic ITR or not
521  * @tx_itr_idx: TX ITR index
522  * @num_rxq: Number of RX queues
523  * @rx: Array of RX queues to service
524  * @rx_dim: Data for RX net_dim algorithm
525  * @rx_itr_value: RX interrupt throttling rate
526  * @rx_intr_mode: Dynamic ITR or not
527  * @rx_itr_idx: RX ITR index
528  * @num_bufq: Number of buffer queues
529  * @bufq: Array of buffer queues to service
530  * @total_events: Number of interrupts processed
531  * @name: Queue vector name
532  */
533 struct idpf_q_vector {
534 	struct idpf_vport *vport;
535 	cpumask_t affinity_mask;
536 	struct napi_struct napi;
537 	u16 v_idx;
538 	struct idpf_intr_reg intr_reg;
539 
540 	u16 num_txq;
541 	struct idpf_queue **tx;
542 	struct dim tx_dim;
543 	u16 tx_itr_value;
544 	bool tx_intr_mode;
545 	u32 tx_itr_idx;
546 
547 	u16 num_rxq;
548 	struct idpf_queue **rx;
549 	struct dim rx_dim;
550 	u16 rx_itr_value;
551 	bool rx_intr_mode;
552 	u32 rx_itr_idx;
553 
554 	u16 num_bufq;
555 	struct idpf_queue **bufq;
556 
557 	u16 total_events;
558 	char *name;
559 };
560 
561 struct idpf_rx_queue_stats {
562 	u64_stats_t packets;
563 	u64_stats_t bytes;
564 	u64_stats_t rsc_pkts;
565 	u64_stats_t hw_csum_err;
566 	u64_stats_t hsplit_pkts;
567 	u64_stats_t hsplit_buf_ovf;
568 	u64_stats_t bad_descs;
569 };
570 
571 struct idpf_tx_queue_stats {
572 	u64_stats_t packets;
573 	u64_stats_t bytes;
574 	u64_stats_t lso_pkts;
575 	u64_stats_t linearize;
576 	u64_stats_t q_busy;
577 	u64_stats_t skb_drops;
578 	u64_stats_t dma_map_errs;
579 };
580 
581 struct idpf_cleaned_stats {
582 	u32 packets;
583 	u32 bytes;
584 };
585 
586 union idpf_queue_stats {
587 	struct idpf_rx_queue_stats rx;
588 	struct idpf_tx_queue_stats tx;
589 };
590 
591 #define IDPF_ITR_DYNAMIC	1
592 #define IDPF_ITR_MAX		0x1FE0
593 #define IDPF_ITR_20K		0x0032
594 #define IDPF_ITR_GRAN_S		1	/* Assume ITR granularity is 2us */
595 #define IDPF_ITR_MASK		0x1FFE  /* ITR register value alignment mask */
596 #define ITR_REG_ALIGN(setting)	((setting) & IDPF_ITR_MASK)
597 #define IDPF_ITR_IS_DYNAMIC(itr_mode) (itr_mode)
598 #define IDPF_ITR_TX_DEF		IDPF_ITR_20K
599 #define IDPF_ITR_RX_DEF		IDPF_ITR_20K
600 /* Index used for 'No ITR' update in DYN_CTL register */
601 #define IDPF_NO_ITR_UPDATE_IDX	3
602 #define IDPF_ITR_IDX_SPACING(spacing, dflt)	(spacing ? spacing : dflt)
603 #define IDPF_DIM_DEFAULT_PROFILE_IX		1
604 
605 /**
606  * struct idpf_queue
607  * @dev: Device back pointer for DMA mapping
608  * @vport: Back pointer to associated vport
609  * @txq_grp: See struct idpf_txq_group
610  * @rxq_grp: See struct idpf_rxq_group
611  * @idx: For buffer queue, it is used as group id, either 0 or 1. On clean,
612  *	 buffer queue uses this index to determine which group of refill queues
613  *	 to clean.
614  *	 For TX queue, it is used as index to map between TX queue group and
615  *	 hot path TX pointers stored in vport. Used in both singleq/splitq.
616  *	 For RX queue, it is used to index to total RX queue across groups and
617  *	 used for skb reporting.
618  * @tail: Tail offset. Used for both queue models single and split. In splitq
619  *	  model relevant only for TX queue and RX queue.
620  * @tx_buf: See struct idpf_tx_buf
621  * @rx_buf: Struct with RX buffer related members
622  * @rx_buf.buf: See struct idpf_rx_buf
623  * @rx_buf.hdr_buf_pa: DMA handle
624  * @rx_buf.hdr_buf_va: Virtual address
625  * @pp: Page pool pointer
626  * @skb: Pointer to the skb
627  * @q_type: Queue type (TX, RX, TX completion, RX buffer)
628  * @q_id: Queue id
629  * @desc_count: Number of descriptors
630  * @next_to_use: Next descriptor to use. Relevant in both split & single txq
631  *		 and bufq.
632  * @next_to_clean: Next descriptor to clean. In split queue model, only
633  *		   relevant to TX completion queue and RX queue.
634  * @next_to_alloc: RX buffer to allocate at. Used only for RX. In splitq model
635  *		   only relevant to RX queue.
636  * @flags: See enum idpf_queue_flags_t
637  * @q_stats: See union idpf_queue_stats
638  * @stats_sync: See struct u64_stats_sync
639  * @cleaned_bytes: Splitq only, TXQ only: When a TX completion is received on
640  *		   the TX completion queue, it can be for any TXQ associated
641  *		   with that completion queue. This means we can clean up to
642  *		   N TXQs during a single call to clean the completion queue.
643  *		   cleaned_bytes|pkts tracks the clean stats per TXQ during
644  *		   that single call to clean the completion queue. By doing so,
645  *		   we can update BQL with aggregate cleaned stats for each TXQ
646  *		   only once at the end of the cleaning routine.
647  * @cleaned_pkts: Number of packets cleaned for the above said case
648  * @rx_hsplit_en: RX headsplit enable
649  * @rx_hbuf_size: Header buffer size
650  * @rx_buf_size: Buffer size
651  * @rx_max_pkt_size: RX max packet size
652  * @rx_buf_stride: RX buffer stride
653  * @rx_buffer_low_watermark: RX buffer low watermark
654  * @rxdids: Supported RX descriptor ids
655  * @q_vector: Backreference to associated vector
656  * @size: Length of descriptor ring in bytes
657  * @dma: Physical address of ring
658  * @desc_ring: Descriptor ring memory
659  * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
660  * @tx_min_pkt_len: Min supported packet length
661  * @num_completions: Only relevant for TX completion queue. It tracks the
662  *		     number of completions received to compare against the
663  *		     number of completions pending, as accumulated by the
664  *		     TX queues.
665  * @buf_stack: Stack of empty buffers to store buffer info for out of order
666  *	       buffer completions. See struct idpf_buf_lifo.
667  * @compl_tag_bufid_m: Completion tag buffer id mask
668  * @compl_tag_gen_s: Completion tag generation bit
669  *	The format of the completion tag will change based on the TXQ
670  *	descriptor ring size so that we can maintain roughly the same level
671  *	of "uniqueness" across all descriptor sizes. For example, if the
672  *	TXQ descriptor ring size is 64 (the minimum size supported), the
673  *	completion tag will be formatted as below:
674  *	15                 6 5         0
675  *	--------------------------------
676  *	|    GEN=0-1023     |IDX = 0-63|
677  *	--------------------------------
678  *
679  *	This gives us 64*1024 = 65536 possible unique values. Similarly, if
680  *	the TXQ descriptor ring size is 8160 (the maximum size supported),
681  *	the completion tag will be formatted as below:
682  *	15 13 12                       0
683  *	--------------------------------
684  *	|GEN |       IDX = 0-8159      |
685  *	--------------------------------
686  *
687  *	This gives us 8*8160 = 65280 possible unique values.
688  * @compl_tag_cur_gen: Used to keep track of current completion tag generation
689  * @compl_tag_gen_max: To determine when compl_tag_cur_gen should be reset
690  * @sched_buf_hash: Hash table to stores buffers
691  */
692 struct idpf_queue {
693 	struct device *dev;
694 	struct idpf_vport *vport;
695 	union {
696 		struct idpf_txq_group *txq_grp;
697 		struct idpf_rxq_group *rxq_grp;
698 	};
699 	u16 idx;
700 	void __iomem *tail;
701 	union {
702 		struct idpf_tx_buf *tx_buf;
703 		struct {
704 			struct idpf_rx_buf *buf;
705 			dma_addr_t hdr_buf_pa;
706 			void *hdr_buf_va;
707 		} rx_buf;
708 	};
709 	struct page_pool *pp;
710 	struct sk_buff *skb;
711 	u16 q_type;
712 	u32 q_id;
713 	u16 desc_count;
714 
715 	u16 next_to_use;
716 	u16 next_to_clean;
717 	u16 next_to_alloc;
718 	DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
719 
720 	union idpf_queue_stats q_stats;
721 	struct u64_stats_sync stats_sync;
722 
723 	u32 cleaned_bytes;
724 	u16 cleaned_pkts;
725 
726 	bool rx_hsplit_en;
727 	u16 rx_hbuf_size;
728 	u16 rx_buf_size;
729 	u16 rx_max_pkt_size;
730 	u16 rx_buf_stride;
731 	u8 rx_buffer_low_watermark;
732 	u64 rxdids;
733 	struct idpf_q_vector *q_vector;
734 	unsigned int size;
735 	dma_addr_t dma;
736 	void *desc_ring;
737 
738 	u16 tx_max_bufs;
739 	u8 tx_min_pkt_len;
740 
741 	u32 num_completions;
742 
743 	struct idpf_buf_lifo buf_stack;
744 
745 	u16 compl_tag_bufid_m;
746 	u16 compl_tag_gen_s;
747 
748 	u16 compl_tag_cur_gen;
749 	u16 compl_tag_gen_max;
750 
751 	DECLARE_HASHTABLE(sched_buf_hash, 12);
752 } ____cacheline_internodealigned_in_smp;
753 
754 /**
755  * struct idpf_sw_queue
756  * @next_to_clean: Next descriptor to clean
757  * @next_to_alloc: Buffer to allocate at
758  * @flags: See enum idpf_queue_flags_t
759  * @ring: Pointer to the ring
760  * @desc_count: Descriptor count
761  * @dev: Device back pointer for DMA mapping
762  *
763  * Software queues are used in splitq mode to manage buffers between rxq
764  * producer and the bufq consumer.  These are required in order to maintain a
765  * lockless buffer management system and are strictly software only constructs.
766  */
767 struct idpf_sw_queue {
768 	u16 next_to_clean;
769 	u16 next_to_alloc;
770 	DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
771 	u16 *ring;
772 	u16 desc_count;
773 	struct device *dev;
774 } ____cacheline_internodealigned_in_smp;
775 
776 /**
777  * struct idpf_rxq_set
778  * @rxq: RX queue
779  * @refillq0: Pointer to refill queue 0
780  * @refillq1: Pointer to refill queue 1
781  *
782  * Splitq only.  idpf_rxq_set associates an rxq with at an array of refillqs.
783  * Each rxq needs a refillq to return used buffers back to the respective bufq.
784  * Bufqs then clean these refillqs for buffers to give to hardware.
785  */
786 struct idpf_rxq_set {
787 	struct idpf_queue rxq;
788 	struct idpf_sw_queue *refillq0;
789 	struct idpf_sw_queue *refillq1;
790 };
791 
792 /**
793  * struct idpf_bufq_set
794  * @bufq: Buffer queue
795  * @num_refillqs: Number of refill queues. This is always equal to num_rxq_sets
796  *		  in idpf_rxq_group.
797  * @refillqs: Pointer to refill queues array.
798  *
799  * Splitq only. idpf_bufq_set associates a bufq to an array of refillqs.
800  * In this bufq_set, there will be one refillq for each rxq in this rxq_group.
801  * Used buffers received by rxqs will be put on refillqs which bufqs will
802  * clean to return new buffers back to hardware.
803  *
804  * Buffers needed by some number of rxqs associated in this rxq_group are
805  * managed by at most two bufqs (depending on performance configuration).
806  */
807 struct idpf_bufq_set {
808 	struct idpf_queue bufq;
809 	int num_refillqs;
810 	struct idpf_sw_queue *refillqs;
811 };
812 
813 /**
814  * struct idpf_rxq_group
815  * @vport: Vport back pointer
816  * @singleq: Struct with single queue related members
817  * @singleq.num_rxq: Number of RX queues associated
818  * @singleq.rxqs: Array of RX queue pointers
819  * @splitq: Struct with split queue related members
820  * @splitq.num_rxq_sets: Number of RX queue sets
821  * @splitq.rxq_sets: Array of RX queue sets
822  * @splitq.bufq_sets: Buffer queue set pointer
823  *
824  * In singleq mode, an rxq_group is simply an array of rxqs.  In splitq, a
825  * rxq_group contains all the rxqs, bufqs and refillqs needed to
826  * manage buffers in splitq mode.
827  */
828 struct idpf_rxq_group {
829 	struct idpf_vport *vport;
830 
831 	union {
832 		struct {
833 			u16 num_rxq;
834 			struct idpf_queue *rxqs[IDPF_LARGE_MAX_Q];
835 		} singleq;
836 		struct {
837 			u16 num_rxq_sets;
838 			struct idpf_rxq_set *rxq_sets[IDPF_LARGE_MAX_Q];
839 			struct idpf_bufq_set *bufq_sets;
840 		} splitq;
841 	};
842 };
843 
844 /**
845  * struct idpf_txq_group
846  * @vport: Vport back pointer
847  * @num_txq: Number of TX queues associated
848  * @txqs: Array of TX queue pointers
849  * @complq: Associated completion queue pointer, split queue only
850  * @num_completions_pending: Total number of completions pending for the
851  *			     completion queue, acculumated for all TX queues
852  *			     associated with that completion queue.
853  *
854  * Between singleq and splitq, a txq_group is largely the same except for the
855  * complq. In splitq a single complq is responsible for handling completions
856  * for some number of txqs associated in this txq_group.
857  */
858 struct idpf_txq_group {
859 	struct idpf_vport *vport;
860 
861 	u16 num_txq;
862 	struct idpf_queue *txqs[IDPF_LARGE_MAX_Q];
863 
864 	struct idpf_queue *complq;
865 
866 	u32 num_completions_pending;
867 };
868 
869 /**
870  * idpf_size_to_txd_count - Get number of descriptors needed for large Tx frag
871  * @size: transmit request size in bytes
872  *
873  * In the case where a large frag (>= 16K) needs to be split across multiple
874  * descriptors, we need to assume that we can have no more than 12K of data
875  * per descriptor due to hardware alignment restrictions (4K alignment).
876  */
877 static inline u32 idpf_size_to_txd_count(unsigned int size)
878 {
879 	return DIV_ROUND_UP(size, IDPF_TX_MAX_DESC_DATA_ALIGNED);
880 }
881 
882 /**
883  * idpf_tx_singleq_build_ctob - populate command tag offset and size
884  * @td_cmd: Command to be filled in desc
885  * @td_offset: Offset to be filled in desc
886  * @size: Size of the buffer
887  * @td_tag: td tag to be filled
888  *
889  * Returns the 64 bit value populated with the input parameters
890  */
891 static inline __le64 idpf_tx_singleq_build_ctob(u64 td_cmd, u64 td_offset,
892 						unsigned int size, u64 td_tag)
893 {
894 	return cpu_to_le64(IDPF_TX_DESC_DTYPE_DATA |
895 			   (td_cmd << IDPF_TXD_QW1_CMD_S) |
896 			   (td_offset << IDPF_TXD_QW1_OFFSET_S) |
897 			   ((u64)size << IDPF_TXD_QW1_TX_BUF_SZ_S) |
898 			   (td_tag << IDPF_TXD_QW1_L2TAG1_S));
899 }
900 
901 void idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc *desc,
902 			      struct idpf_tx_splitq_params *params,
903 			      u16 td_cmd, u16 size);
904 void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
905 				    struct idpf_tx_splitq_params *params,
906 				    u16 td_cmd, u16 size);
907 /**
908  * idpf_tx_splitq_build_desc - determine which type of data descriptor to build
909  * @desc: descriptor to populate
910  * @params: pointer to tx params struct
911  * @td_cmd: command to be filled in desc
912  * @size: size of buffer
913  */
914 static inline void idpf_tx_splitq_build_desc(union idpf_tx_flex_desc *desc,
915 					     struct idpf_tx_splitq_params *params,
916 					     u16 td_cmd, u16 size)
917 {
918 	if (params->dtype == IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2)
919 		idpf_tx_splitq_build_ctb(desc, params, td_cmd, size);
920 	else
921 		idpf_tx_splitq_build_flow_desc(desc, params, td_cmd, size);
922 }
923 
924 /**
925  * idpf_alloc_page - Allocate a new RX buffer from the page pool
926  * @pool: page_pool to allocate from
927  * @buf: metadata struct to populate with page info
928  * @buf_size: 2K or 4K
929  *
930  * Returns &dma_addr_t to be passed to HW for Rx, %DMA_MAPPING_ERROR otherwise.
931  */
932 static inline dma_addr_t idpf_alloc_page(struct page_pool *pool,
933 					 struct idpf_rx_buf *buf,
934 					 unsigned int buf_size)
935 {
936 	if (buf_size == IDPF_RX_BUF_2048)
937 		buf->page = page_pool_dev_alloc_frag(pool, &buf->page_offset,
938 						     buf_size);
939 	else
940 		buf->page = page_pool_dev_alloc_pages(pool);
941 
942 	if (!buf->page)
943 		return DMA_MAPPING_ERROR;
944 
945 	buf->truesize = buf_size;
946 
947 	return page_pool_get_dma_addr(buf->page) + buf->page_offset +
948 	       pool->p.offset;
949 }
950 
951 /**
952  * idpf_rx_put_page - Return RX buffer page to pool
953  * @rx_buf: RX buffer metadata struct
954  */
955 static inline void idpf_rx_put_page(struct idpf_rx_buf *rx_buf)
956 {
957 	page_pool_put_page(rx_buf->page->pp, rx_buf->page,
958 			   rx_buf->truesize, true);
959 	rx_buf->page = NULL;
960 }
961 
962 /**
963  * idpf_rx_sync_for_cpu - Synchronize DMA buffer
964  * @rx_buf: RX buffer metadata struct
965  * @len: frame length from descriptor
966  */
967 static inline void idpf_rx_sync_for_cpu(struct idpf_rx_buf *rx_buf, u32 len)
968 {
969 	struct page *page = rx_buf->page;
970 	struct page_pool *pp = page->pp;
971 
972 	dma_sync_single_range_for_cpu(pp->p.dev,
973 				      page_pool_get_dma_addr(page),
974 				      rx_buf->page_offset + pp->p.offset, len,
975 				      page_pool_get_dma_dir(pp));
976 }
977 
978 int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget);
979 void idpf_vport_init_num_qs(struct idpf_vport *vport,
980 			    struct virtchnl2_create_vport *vport_msg);
981 void idpf_vport_calc_num_q_desc(struct idpf_vport *vport);
982 int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_index,
983 			     struct virtchnl2_create_vport *vport_msg,
984 			     struct idpf_vport_max_q *max_q);
985 void idpf_vport_calc_num_q_groups(struct idpf_vport *vport);
986 int idpf_vport_queues_alloc(struct idpf_vport *vport);
987 void idpf_vport_queues_rel(struct idpf_vport *vport);
988 void idpf_vport_intr_rel(struct idpf_vport *vport);
989 int idpf_vport_intr_alloc(struct idpf_vport *vport);
990 void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector);
991 void idpf_vport_intr_deinit(struct idpf_vport *vport);
992 int idpf_vport_intr_init(struct idpf_vport *vport);
993 enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *decoded);
994 int idpf_config_rss(struct idpf_vport *vport);
995 int idpf_init_rss(struct idpf_vport *vport);
996 void idpf_deinit_rss(struct idpf_vport *vport);
997 int idpf_rx_bufs_init_all(struct idpf_vport *vport);
998 void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb,
999 		      unsigned int size);
1000 struct sk_buff *idpf_rx_construct_skb(struct idpf_queue *rxq,
1001 				      struct idpf_rx_buf *rx_buf,
1002 				      unsigned int size);
1003 bool idpf_init_rx_buf_hw_alloc(struct idpf_queue *rxq, struct idpf_rx_buf *buf);
1004 void idpf_rx_buf_hw_update(struct idpf_queue *rxq, u32 val);
1005 void idpf_tx_buf_hw_update(struct idpf_queue *tx_q, u32 val,
1006 			   bool xmit_more);
1007 unsigned int idpf_size_to_txd_count(unsigned int size);
1008 netdev_tx_t idpf_tx_drop_skb(struct idpf_queue *tx_q, struct sk_buff *skb);
1009 void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb,
1010 			   struct idpf_tx_buf *first, u16 ring_idx);
1011 unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq,
1012 					 struct sk_buff *skb);
1013 bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
1014 			unsigned int count);
1015 int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size);
1016 void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue);
1017 netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb,
1018 				 struct net_device *netdev);
1019 netdev_tx_t idpf_tx_singleq_start(struct sk_buff *skb,
1020 				  struct net_device *netdev);
1021 bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rxq,
1022 				      u16 cleaned_count);
1023 int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off);
1024 
1025 #endif /* !_IDPF_TXRX_H_ */
1026