xref: /linux/drivers/net/ethernet/intel/idpf/idpf_txrx.h (revision 1a9239bb4253f9076b5b4b2a1a4e8d7defd77a95)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (C) 2023 Intel Corporation */
3 
4 #ifndef _IDPF_TXRX_H_
5 #define _IDPF_TXRX_H_
6 
7 #include <linux/dim.h>
8 
9 #include <net/libeth/cache.h>
10 #include <net/tcp.h>
11 #include <net/netdev_queues.h>
12 
13 #include "idpf_lan_txrx.h"
14 #include "virtchnl2_lan_desc.h"
15 
16 #define IDPF_LARGE_MAX_Q			256
17 #define IDPF_MAX_Q				16
18 #define IDPF_MIN_Q				2
19 /* Mailbox Queue */
20 #define IDPF_MAX_MBXQ				1
21 
22 #define IDPF_MIN_TXQ_DESC			64
23 #define IDPF_MIN_RXQ_DESC			64
24 #define IDPF_MIN_TXQ_COMPLQ_DESC		256
25 #define IDPF_MAX_QIDS				256
26 
27 /* Number of descriptors in a queue should be a multiple of 32. RX queue
28  * descriptors alone should be a multiple of IDPF_REQ_RXQ_DESC_MULTIPLE
29  * to achieve BufQ descriptors aligned to 32
30  */
31 #define IDPF_REQ_DESC_MULTIPLE			32
32 #define IDPF_REQ_RXQ_DESC_MULTIPLE (IDPF_MAX_BUFQS_PER_RXQ_GRP * 32)
33 #define IDPF_MIN_TX_DESC_NEEDED (MAX_SKB_FRAGS + 6)
34 #define IDPF_TX_WAKE_THRESH ((u16)IDPF_MIN_TX_DESC_NEEDED * 2)
35 
36 #define IDPF_MAX_DESCS				8160
37 #define IDPF_MAX_TXQ_DESC ALIGN_DOWN(IDPF_MAX_DESCS, IDPF_REQ_DESC_MULTIPLE)
38 #define IDPF_MAX_RXQ_DESC ALIGN_DOWN(IDPF_MAX_DESCS, IDPF_REQ_RXQ_DESC_MULTIPLE)
39 #define MIN_SUPPORT_TXDID (\
40 	VIRTCHNL2_TXDID_FLEX_FLOW_SCHED |\
41 	VIRTCHNL2_TXDID_FLEX_TSO_CTX)
42 
43 #define IDPF_DFLT_SINGLEQ_TX_Q_GROUPS		1
44 #define IDPF_DFLT_SINGLEQ_RX_Q_GROUPS		1
45 #define IDPF_DFLT_SINGLEQ_TXQ_PER_GROUP		4
46 #define IDPF_DFLT_SINGLEQ_RXQ_PER_GROUP		4
47 
48 #define IDPF_COMPLQ_PER_GROUP			1
49 #define IDPF_SINGLE_BUFQ_PER_RXQ_GRP		1
50 #define IDPF_MAX_BUFQS_PER_RXQ_GRP		2
51 #define IDPF_BUFQ2_ENA				1
52 #define IDPF_NUMQ_PER_CHUNK			1
53 
54 #define IDPF_DFLT_SPLITQ_TXQ_PER_GROUP		1
55 #define IDPF_DFLT_SPLITQ_RXQ_PER_GROUP		1
56 
57 /* Default vector sharing */
58 #define IDPF_MBX_Q_VEC		1
59 #define IDPF_MIN_Q_VEC		1
60 
61 #define IDPF_DFLT_TX_Q_DESC_COUNT		512
62 #define IDPF_DFLT_TX_COMPLQ_DESC_COUNT		512
63 #define IDPF_DFLT_RX_Q_DESC_COUNT		512
64 
65 /* IMPORTANT: We absolutely _cannot_ have more buffers in the system than a
66  * given RX completion queue has descriptors. This includes _ALL_ buffer
67  * queues. E.g.: If you have two buffer queues of 512 descriptors and buffers,
68  * you have a total of 1024 buffers so your RX queue _must_ have at least that
69  * many descriptors. This macro divides a given number of RX descriptors by
70  * number of buffer queues to calculate how many descriptors each buffer queue
71  * can have without overrunning the RX queue.
72  *
73  * If you give hardware more buffers than completion descriptors what will
74  * happen is that if hardware gets a chance to post more than ring wrap of
75  * descriptors before SW gets an interrupt and overwrites SW head, the gen bit
76  * in the descriptor will be wrong. Any overwritten descriptors' buffers will
77  * be gone forever and SW has no reasonable way to tell that this has happened.
78  * From SW perspective, when we finally get an interrupt, it looks like we're
79  * still waiting for descriptor to be done, stalling forever.
80  */
81 #define IDPF_RX_BUFQ_DESC_COUNT(RXD, NUM_BUFQ)	((RXD) / (NUM_BUFQ))
82 
83 #define IDPF_RX_BUFQ_WORKING_SET(rxq)		((rxq)->desc_count - 1)
84 
85 #define IDPF_RX_BUMP_NTC(rxq, ntc)				\
86 do {								\
87 	if (unlikely(++(ntc) == (rxq)->desc_count)) {		\
88 		ntc = 0;					\
89 		idpf_queue_change(GEN_CHK, rxq);		\
90 	}							\
91 } while (0)
92 
93 #define IDPF_SINGLEQ_BUMP_RING_IDX(q, idx)			\
94 do {								\
95 	if (unlikely(++(idx) == (q)->desc_count))		\
96 		idx = 0;					\
97 } while (0)
98 
99 #define IDPF_RX_BUF_STRIDE			32
100 #define IDPF_RX_BUF_POST_STRIDE			16
101 #define IDPF_LOW_WATERMARK			64
102 
103 #define IDPF_TX_TSO_MIN_MSS			88
104 
105 /* Minimum number of descriptors between 2 descriptors with the RE bit set;
106  * only relevant in flow scheduling mode
107  */
108 #define IDPF_TX_SPLITQ_RE_MIN_GAP	64
109 
110 #define IDPF_RX_BI_GEN_M		BIT(16)
111 #define IDPF_RX_BI_BUFID_M		GENMASK(15, 0)
112 
113 #define IDPF_RXD_EOF_SPLITQ		VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_M
114 #define IDPF_RXD_EOF_SINGLEQ		VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_M
115 
116 #define IDPF_DESC_UNUSED(txq)     \
117 	((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \
118 	(txq)->next_to_clean - (txq)->next_to_use - 1)
119 
120 #define IDPF_TX_BUF_RSV_UNUSED(txq)	((txq)->stash->buf_stack.top)
121 #define IDPF_TX_BUF_RSV_LOW(txq)	(IDPF_TX_BUF_RSV_UNUSED(txq) < \
122 					 (txq)->desc_count >> 2)
123 
124 #define IDPF_TX_COMPLQ_OVERFLOW_THRESH(txcq)	((txcq)->desc_count >> 1)
125 /* Determine the absolute number of completions pending, i.e. the number of
126  * completions that are expected to arrive on the TX completion queue.
127  */
128 #define IDPF_TX_COMPLQ_PENDING(txq)	\
129 	(((txq)->num_completions_pending >= (txq)->complq->num_completions ? \
130 	0 : U32_MAX) + \
131 	(txq)->num_completions_pending - (txq)->complq->num_completions)
132 
133 #define IDPF_TX_SPLITQ_COMPL_TAG_WIDTH	16
134 /* Adjust the generation for the completion tag and wrap if necessary */
135 #define IDPF_TX_ADJ_COMPL_TAG_GEN(txq) \
136 	((++(txq)->compl_tag_cur_gen) >= (txq)->compl_tag_gen_max ? \
137 	0 : (txq)->compl_tag_cur_gen)
138 
139 #define IDPF_TXD_LAST_DESC_CMD (IDPF_TX_DESC_CMD_EOP | IDPF_TX_DESC_CMD_RS)
140 
141 #define IDPF_TX_FLAGS_TSO		BIT(0)
142 #define IDPF_TX_FLAGS_IPV4		BIT(1)
143 #define IDPF_TX_FLAGS_IPV6		BIT(2)
144 #define IDPF_TX_FLAGS_TUNNEL		BIT(3)
145 
146 union idpf_tx_flex_desc {
147 	struct idpf_flex_tx_desc q; /* queue based scheduling */
148 	struct idpf_flex_tx_sched_desc flow; /* flow based scheduling */
149 };
150 
151 #define idpf_tx_buf libeth_sqe
152 
153 /**
154  * struct idpf_buf_lifo - LIFO for managing OOO completions
155  * @top: Used to know how many buffers are left
156  * @size: Total size of LIFO
157  * @bufs: Backing array
158  */
159 struct idpf_buf_lifo {
160 	u16 top;
161 	u16 size;
162 	struct idpf_tx_stash **bufs;
163 };
164 
165 /**
166  * struct idpf_tx_offload_params - Offload parameters for a given packet
167  * @tx_flags: Feature flags enabled for this packet
168  * @hdr_offsets: Offset parameter for single queue model
169  * @cd_tunneling: Type of tunneling enabled for single queue model
170  * @tso_len: Total length of payload to segment
171  * @mss: Segment size
172  * @tso_segs: Number of segments to be sent
173  * @tso_hdr_len: Length of headers to be duplicated
174  * @td_cmd: Command field to be inserted into descriptor
175  */
176 struct idpf_tx_offload_params {
177 	u32 tx_flags;
178 
179 	u32 hdr_offsets;
180 	u32 cd_tunneling;
181 
182 	u32 tso_len;
183 	u16 mss;
184 	u16 tso_segs;
185 	u16 tso_hdr_len;
186 
187 	u16 td_cmd;
188 };
189 
190 /**
191  * struct idpf_tx_splitq_params
192  * @dtype: General descriptor info
193  * @eop_cmd: Type of EOP
194  * @compl_tag: Associated tag for completion
195  * @td_tag: Descriptor tunneling tag
196  * @offload: Offload parameters
197  */
198 struct idpf_tx_splitq_params {
199 	enum idpf_tx_desc_dtype_value dtype;
200 	u16 eop_cmd;
201 	union {
202 		u16 compl_tag;
203 		u16 td_tag;
204 	};
205 
206 	struct idpf_tx_offload_params offload;
207 };
208 
209 enum idpf_tx_ctx_desc_eipt_offload {
210 	IDPF_TX_CTX_EXT_IP_NONE         = 0x0,
211 	IDPF_TX_CTX_EXT_IP_IPV6         = 0x1,
212 	IDPF_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2,
213 	IDPF_TX_CTX_EXT_IP_IPV4         = 0x3
214 };
215 
216 #define IDPF_TX_COMPLQ_CLEAN_BUDGET	256
217 #define IDPF_TX_MIN_PKT_LEN		17
218 #define IDPF_TX_DESCS_FOR_SKB_DATA_PTR	1
219 #define IDPF_TX_DESCS_PER_CACHE_LINE	(L1_CACHE_BYTES / \
220 					 sizeof(struct idpf_flex_tx_desc))
221 #define IDPF_TX_DESCS_FOR_CTX		1
222 /* TX descriptors needed, worst case */
223 #define IDPF_TX_DESC_NEEDED (MAX_SKB_FRAGS + IDPF_TX_DESCS_FOR_CTX + \
224 			     IDPF_TX_DESCS_PER_CACHE_LINE + \
225 			     IDPF_TX_DESCS_FOR_SKB_DATA_PTR)
226 
227 /* The size limit for a transmit buffer in a descriptor is (16K - 1).
228  * In order to align with the read requests we will align the value to
229  * the nearest 4K which represents our maximum read request size.
230  */
231 #define IDPF_TX_MAX_READ_REQ_SIZE	SZ_4K
232 #define IDPF_TX_MAX_DESC_DATA		(SZ_16K - 1)
233 #define IDPF_TX_MAX_DESC_DATA_ALIGNED \
234 	ALIGN_DOWN(IDPF_TX_MAX_DESC_DATA, IDPF_TX_MAX_READ_REQ_SIZE)
235 
236 #define idpf_rx_buf libeth_fqe
237 
238 #define IDPF_RX_MAX_PTYPE_PROTO_IDS    32
239 #define IDPF_RX_MAX_PTYPE_SZ	(sizeof(struct virtchnl2_ptype) + \
240 				 (sizeof(u16) * IDPF_RX_MAX_PTYPE_PROTO_IDS))
241 #define IDPF_RX_PTYPE_HDR_SZ	sizeof(struct virtchnl2_get_ptype_info)
242 #define IDPF_RX_MAX_PTYPES_PER_BUF	\
243 	DIV_ROUND_DOWN_ULL((IDPF_CTLQ_MAX_BUF_LEN - IDPF_RX_PTYPE_HDR_SZ), \
244 			   IDPF_RX_MAX_PTYPE_SZ)
245 
246 #define IDPF_GET_PTYPE_SIZE(p) struct_size((p), proto_id, (p)->proto_id_count)
247 
248 #define IDPF_TUN_IP_GRE (\
249 	IDPF_PTYPE_TUNNEL_IP |\
250 	IDPF_PTYPE_TUNNEL_IP_GRENAT)
251 
252 #define IDPF_TUN_IP_GRE_MAC (\
253 	IDPF_TUN_IP_GRE |\
254 	IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC)
255 
256 #define IDPF_RX_MAX_PTYPE	1024
257 #define IDPF_RX_MAX_BASE_PTYPE	256
258 #define IDPF_INVALID_PTYPE_ID	0xFFFF
259 
260 enum idpf_tunnel_state {
261 	IDPF_PTYPE_TUNNEL_IP                    = BIT(0),
262 	IDPF_PTYPE_TUNNEL_IP_GRENAT             = BIT(1),
263 	IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC         = BIT(2),
264 };
265 
266 struct idpf_ptype_state {
267 	bool outer_ip:1;
268 	bool outer_frag:1;
269 	u8 tunnel_state:6;
270 };
271 
272 /**
273  * enum idpf_queue_flags_t
274  * @__IDPF_Q_GEN_CHK: Queues operating in splitq mode use a generation bit to
275  *		      identify new descriptor writebacks on the ring. HW sets
276  *		      the gen bit to 1 on the first writeback of any given
277  *		      descriptor. After the ring wraps, HW sets the gen bit of
278  *		      those descriptors to 0, and continues flipping
279  *		      0->1 or 1->0 on each ring wrap. SW maintains its own
280  *		      gen bit to know what value will indicate writebacks on
281  *		      the next pass around the ring. E.g. it is initialized
282  *		      to 1 and knows that reading a gen bit of 1 in any
283  *		      descriptor on the initial pass of the ring indicates a
284  *		      writeback. It also flips on every ring wrap.
285  * @__IDPF_Q_RFL_GEN_CHK: Refill queues are SW only, so Q_GEN acts as the HW
286  *			  bit and Q_RFL_GEN is the SW bit.
287  * @__IDPF_Q_FLOW_SCH_EN: Enable flow scheduling
288  * @__IDPF_Q_SW_MARKER: Used to indicate TX queue marker completions
289  * @__IDPF_Q_POLL_MODE: Enable poll mode
290  * @__IDPF_Q_CRC_EN: enable CRC offload in singleq mode
291  * @__IDPF_Q_HSPLIT_EN: enable header split on Rx (splitq)
292  * @__IDPF_Q_FLAGS_NBITS: Must be last
293  */
294 enum idpf_queue_flags_t {
295 	__IDPF_Q_GEN_CHK,
296 	__IDPF_Q_RFL_GEN_CHK,
297 	__IDPF_Q_FLOW_SCH_EN,
298 	__IDPF_Q_SW_MARKER,
299 	__IDPF_Q_POLL_MODE,
300 	__IDPF_Q_CRC_EN,
301 	__IDPF_Q_HSPLIT_EN,
302 
303 	__IDPF_Q_FLAGS_NBITS,
304 };
305 
306 #define idpf_queue_set(f, q)		__set_bit(__IDPF_Q_##f, (q)->flags)
307 #define idpf_queue_clear(f, q)		__clear_bit(__IDPF_Q_##f, (q)->flags)
308 #define idpf_queue_change(f, q)		__change_bit(__IDPF_Q_##f, (q)->flags)
309 #define idpf_queue_has(f, q)		test_bit(__IDPF_Q_##f, (q)->flags)
310 
311 #define idpf_queue_has_clear(f, q)			\
312 	__test_and_clear_bit(__IDPF_Q_##f, (q)->flags)
313 #define idpf_queue_assign(f, q, v)			\
314 	__assign_bit(__IDPF_Q_##f, (q)->flags, v)
315 
316 /**
317  * struct idpf_vec_regs
318  * @dyn_ctl_reg: Dynamic control interrupt register offset
319  * @itrn_reg: Interrupt Throttling Rate register offset
320  * @itrn_index_spacing: Register spacing between ITR registers of the same
321  *			vector
322  */
323 struct idpf_vec_regs {
324 	u32 dyn_ctl_reg;
325 	u32 itrn_reg;
326 	u32 itrn_index_spacing;
327 };
328 
329 /**
330  * struct idpf_intr_reg
331  * @dyn_ctl: Dynamic control interrupt register
332  * @dyn_ctl_intena_m: Mask for dyn_ctl interrupt enable
333  * @dyn_ctl_intena_msk_m: Mask for dyn_ctl interrupt enable mask
334  * @dyn_ctl_itridx_s: Register bit offset for ITR index
335  * @dyn_ctl_itridx_m: Mask for ITR index
336  * @dyn_ctl_intrvl_s: Register bit offset for ITR interval
337  * @dyn_ctl_wb_on_itr_m: Mask for WB on ITR feature
338  * @dyn_ctl_sw_itridx_ena_m: Mask for SW ITR index
339  * @dyn_ctl_swint_trig_m: Mask for dyn_ctl SW triggered interrupt enable
340  * @rx_itr: RX ITR register
341  * @tx_itr: TX ITR register
342  * @icr_ena: Interrupt cause register offset
343  * @icr_ena_ctlq_m: Mask for ICR
344  */
345 struct idpf_intr_reg {
346 	void __iomem *dyn_ctl;
347 	u32 dyn_ctl_intena_m;
348 	u32 dyn_ctl_intena_msk_m;
349 	u32 dyn_ctl_itridx_s;
350 	u32 dyn_ctl_itridx_m;
351 	u32 dyn_ctl_intrvl_s;
352 	u32 dyn_ctl_wb_on_itr_m;
353 	u32 dyn_ctl_sw_itridx_ena_m;
354 	u32 dyn_ctl_swint_trig_m;
355 	void __iomem *rx_itr;
356 	void __iomem *tx_itr;
357 	void __iomem *icr_ena;
358 	u32 icr_ena_ctlq_m;
359 };
360 
361 /**
362  * struct idpf_q_vector
363  * @vport: Vport back pointer
364  * @num_rxq: Number of RX queues
365  * @num_txq: Number of TX queues
366  * @num_bufq: Number of buffer queues
367  * @num_complq: number of completion queues
368  * @rx: Array of RX queues to service
369  * @tx: Array of TX queues to service
370  * @bufq: Array of buffer queues to service
371  * @complq: array of completion queues
372  * @intr_reg: See struct idpf_intr_reg
373  * @napi: napi handler
374  * @total_events: Number of interrupts processed
375  * @wb_on_itr: whether WB on ITR is enabled
376  * @tx_dim: Data for TX net_dim algorithm
377  * @tx_itr_value: TX interrupt throttling rate
378  * @tx_intr_mode: Dynamic ITR or not
379  * @tx_itr_idx: TX ITR index
380  * @rx_dim: Data for RX net_dim algorithm
381  * @rx_itr_value: RX interrupt throttling rate
382  * @rx_intr_mode: Dynamic ITR or not
383  * @rx_itr_idx: RX ITR index
384  * @v_idx: Vector index
385  */
386 struct idpf_q_vector {
387 	__cacheline_group_begin_aligned(read_mostly);
388 	struct idpf_vport *vport;
389 
390 	u16 num_rxq;
391 	u16 num_txq;
392 	u16 num_bufq;
393 	u16 num_complq;
394 	struct idpf_rx_queue **rx;
395 	struct idpf_tx_queue **tx;
396 	struct idpf_buf_queue **bufq;
397 	struct idpf_compl_queue **complq;
398 
399 	struct idpf_intr_reg intr_reg;
400 	__cacheline_group_end_aligned(read_mostly);
401 
402 	__cacheline_group_begin_aligned(read_write);
403 	struct napi_struct napi;
404 	u16 total_events;
405 	bool wb_on_itr;
406 
407 	struct dim tx_dim;
408 	u16 tx_itr_value;
409 	bool tx_intr_mode;
410 	u32 tx_itr_idx;
411 
412 	struct dim rx_dim;
413 	u16 rx_itr_value;
414 	bool rx_intr_mode;
415 	u32 rx_itr_idx;
416 	__cacheline_group_end_aligned(read_write);
417 
418 	__cacheline_group_begin_aligned(cold);
419 	u16 v_idx;
420 
421 	__cacheline_group_end_aligned(cold);
422 };
423 libeth_cacheline_set_assert(struct idpf_q_vector, 120,
424 			    24 + sizeof(struct napi_struct) +
425 			    2 * sizeof(struct dim),
426 			    8);
427 
428 struct idpf_rx_queue_stats {
429 	u64_stats_t packets;
430 	u64_stats_t bytes;
431 	u64_stats_t rsc_pkts;
432 	u64_stats_t hw_csum_err;
433 	u64_stats_t hsplit_pkts;
434 	u64_stats_t hsplit_buf_ovf;
435 	u64_stats_t bad_descs;
436 };
437 
438 struct idpf_tx_queue_stats {
439 	u64_stats_t packets;
440 	u64_stats_t bytes;
441 	u64_stats_t lso_pkts;
442 	u64_stats_t linearize;
443 	u64_stats_t q_busy;
444 	u64_stats_t skb_drops;
445 	u64_stats_t dma_map_errs;
446 };
447 
448 #define IDPF_ITR_DYNAMIC	1
449 #define IDPF_ITR_MAX		0x1FE0
450 #define IDPF_ITR_20K		0x0032
451 #define IDPF_ITR_GRAN_S		1	/* Assume ITR granularity is 2us */
452 #define IDPF_ITR_MASK		0x1FFE  /* ITR register value alignment mask */
453 #define ITR_REG_ALIGN(setting)	((setting) & IDPF_ITR_MASK)
454 #define IDPF_ITR_IS_DYNAMIC(itr_mode) (itr_mode)
455 #define IDPF_ITR_TX_DEF		IDPF_ITR_20K
456 #define IDPF_ITR_RX_DEF		IDPF_ITR_20K
457 /* Index used for 'SW ITR' update in DYN_CTL register */
458 #define IDPF_SW_ITR_UPDATE_IDX	2
459 /* Index used for 'No ITR' update in DYN_CTL register */
460 #define IDPF_NO_ITR_UPDATE_IDX	3
461 #define IDPF_ITR_IDX_SPACING(spacing, dflt)	(spacing ? spacing : dflt)
462 #define IDPF_DIM_DEFAULT_PROFILE_IX		1
463 
464 /**
465  * struct idpf_txq_stash - Tx buffer stash for Flow-based scheduling mode
466  * @buf_stack: Stack of empty buffers to store buffer info for out of order
467  *	       buffer completions. See struct idpf_buf_lifo
468  * @sched_buf_hash: Hash table to store buffers
469  */
470 struct idpf_txq_stash {
471 	struct idpf_buf_lifo buf_stack;
472 	DECLARE_HASHTABLE(sched_buf_hash, 12);
473 } ____cacheline_aligned;
474 
475 /**
476  * struct idpf_rx_queue - software structure representing a receive queue
477  * @rx: universal receive descriptor array
478  * @single_buf: buffer descriptor array in singleq
479  * @desc_ring: virtual descriptor ring address
480  * @bufq_sets: Pointer to the array of buffer queues in splitq mode
481  * @napi: NAPI instance corresponding to this queue (splitq)
482  * @rx_buf: See struct &libeth_fqe
483  * @pp: Page pool pointer in singleq mode
484  * @netdev: &net_device corresponding to this queue
485  * @tail: Tail offset. Used for both queue models single and split.
486  * @flags: See enum idpf_queue_flags_t
487  * @idx: For RX queue, it is used to index to total RX queue across groups and
488  *	 used for skb reporting.
489  * @desc_count: Number of descriptors
490  * @rxdids: Supported RX descriptor ids
491  * @rx_ptype_lkup: LUT of Rx ptypes
492  * @next_to_use: Next descriptor to use
493  * @next_to_clean: Next descriptor to clean
494  * @next_to_alloc: RX buffer to allocate at
495  * @skb: Pointer to the skb
496  * @truesize: data buffer truesize in singleq
497  * @stats_sync: See struct u64_stats_sync
498  * @q_stats: See union idpf_rx_queue_stats
499  * @q_id: Queue id
500  * @size: Length of descriptor ring in bytes
501  * @dma: Physical address of ring
502  * @q_vector: Backreference to associated vector
503  * @rx_buffer_low_watermark: RX buffer low watermark
504  * @rx_hbuf_size: Header buffer size
505  * @rx_buf_size: Buffer size
506  * @rx_max_pkt_size: RX max packet size
507  */
508 struct idpf_rx_queue {
509 	__cacheline_group_begin_aligned(read_mostly);
510 	union {
511 		union virtchnl2_rx_desc *rx;
512 		struct virtchnl2_singleq_rx_buf_desc *single_buf;
513 
514 		void *desc_ring;
515 	};
516 	union {
517 		struct {
518 			struct idpf_bufq_set *bufq_sets;
519 			struct napi_struct *napi;
520 		};
521 		struct {
522 			struct libeth_fqe *rx_buf;
523 			struct page_pool *pp;
524 		};
525 	};
526 	struct net_device *netdev;
527 	void __iomem *tail;
528 
529 	DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
530 	u16 idx;
531 	u16 desc_count;
532 
533 	u32 rxdids;
534 	const struct libeth_rx_pt *rx_ptype_lkup;
535 	__cacheline_group_end_aligned(read_mostly);
536 
537 	__cacheline_group_begin_aligned(read_write);
538 	u16 next_to_use;
539 	u16 next_to_clean;
540 	u16 next_to_alloc;
541 
542 	struct sk_buff *skb;
543 	u32 truesize;
544 
545 	struct u64_stats_sync stats_sync;
546 	struct idpf_rx_queue_stats q_stats;
547 	__cacheline_group_end_aligned(read_write);
548 
549 	__cacheline_group_begin_aligned(cold);
550 	u32 q_id;
551 	u32 size;
552 	dma_addr_t dma;
553 
554 	struct idpf_q_vector *q_vector;
555 
556 	u16 rx_buffer_low_watermark;
557 	u16 rx_hbuf_size;
558 	u16 rx_buf_size;
559 	u16 rx_max_pkt_size;
560 	__cacheline_group_end_aligned(cold);
561 };
562 libeth_cacheline_set_assert(struct idpf_rx_queue, 64,
563 			    80 + sizeof(struct u64_stats_sync),
564 			    32);
565 
566 /**
567  * struct idpf_tx_queue - software structure representing a transmit queue
568  * @base_tx: base Tx descriptor array
569  * @base_ctx: base Tx context descriptor array
570  * @flex_tx: flex Tx descriptor array
571  * @flex_ctx: flex Tx context descriptor array
572  * @desc_ring: virtual descriptor ring address
573  * @tx_buf: See struct idpf_tx_buf
574  * @txq_grp: See struct idpf_txq_group
575  * @dev: Device back pointer for DMA mapping
576  * @tail: Tail offset. Used for both queue models single and split
577  * @flags: See enum idpf_queue_flags_t
578  * @idx: For TX queue, it is used as index to map between TX queue group and
579  *	 hot path TX pointers stored in vport. Used in both singleq/splitq.
580  * @desc_count: Number of descriptors
581  * @tx_min_pkt_len: Min supported packet length
582  * @compl_tag_gen_s: Completion tag generation bit
583  *	The format of the completion tag will change based on the TXQ
584  *	descriptor ring size so that we can maintain roughly the same level
585  *	of "uniqueness" across all descriptor sizes. For example, if the
586  *	TXQ descriptor ring size is 64 (the minimum size supported), the
587  *	completion tag will be formatted as below:
588  *	15                 6 5         0
589  *	--------------------------------
590  *	|    GEN=0-1023     |IDX = 0-63|
591  *	--------------------------------
592  *
593  *	This gives us 64*1024 = 65536 possible unique values. Similarly, if
594  *	the TXQ descriptor ring size is 8160 (the maximum size supported),
595  *	the completion tag will be formatted as below:
596  *	15 13 12                       0
597  *	--------------------------------
598  *	|GEN |       IDX = 0-8159      |
599  *	--------------------------------
600  *
601  *	This gives us 8*8160 = 65280 possible unique values.
602  * @netdev: &net_device corresponding to this queue
603  * @next_to_use: Next descriptor to use
604  * @next_to_clean: Next descriptor to clean
605  * @cleaned_bytes: Splitq only, TXQ only: When a TX completion is received on
606  *		   the TX completion queue, it can be for any TXQ associated
607  *		   with that completion queue. This means we can clean up to
608  *		   N TXQs during a single call to clean the completion queue.
609  *		   cleaned_bytes|pkts tracks the clean stats per TXQ during
610  *		   that single call to clean the completion queue. By doing so,
611  *		   we can update BQL with aggregate cleaned stats for each TXQ
612  *		   only once at the end of the cleaning routine.
613  * @clean_budget: singleq only, queue cleaning budget
614  * @cleaned_pkts: Number of packets cleaned for the above said case
615  * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
616  * @stash: Tx buffer stash for Flow-based scheduling mode
617  * @compl_tag_bufid_m: Completion tag buffer id mask
618  * @compl_tag_cur_gen: Used to keep track of current completion tag generation
619  * @compl_tag_gen_max: To determine when compl_tag_cur_gen should be reset
620  * @stats_sync: See struct u64_stats_sync
621  * @q_stats: See union idpf_tx_queue_stats
622  * @q_id: Queue id
623  * @size: Length of descriptor ring in bytes
624  * @dma: Physical address of ring
625  * @q_vector: Backreference to associated vector
626  */
627 struct idpf_tx_queue {
628 	__cacheline_group_begin_aligned(read_mostly);
629 	union {
630 		struct idpf_base_tx_desc *base_tx;
631 		struct idpf_base_tx_ctx_desc *base_ctx;
632 		union idpf_tx_flex_desc *flex_tx;
633 		struct idpf_flex_tx_ctx_desc *flex_ctx;
634 
635 		void *desc_ring;
636 	};
637 	struct libeth_sqe *tx_buf;
638 	struct idpf_txq_group *txq_grp;
639 	struct device *dev;
640 	void __iomem *tail;
641 
642 	DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
643 	u16 idx;
644 	u16 desc_count;
645 
646 	u16 tx_min_pkt_len;
647 	u16 compl_tag_gen_s;
648 
649 	struct net_device *netdev;
650 	__cacheline_group_end_aligned(read_mostly);
651 
652 	__cacheline_group_begin_aligned(read_write);
653 	u16 next_to_use;
654 	u16 next_to_clean;
655 
656 	union {
657 		u32 cleaned_bytes;
658 		u32 clean_budget;
659 	};
660 	u16 cleaned_pkts;
661 
662 	u16 tx_max_bufs;
663 	struct idpf_txq_stash *stash;
664 
665 	u16 compl_tag_bufid_m;
666 	u16 compl_tag_cur_gen;
667 	u16 compl_tag_gen_max;
668 
669 	struct u64_stats_sync stats_sync;
670 	struct idpf_tx_queue_stats q_stats;
671 	__cacheline_group_end_aligned(read_write);
672 
673 	__cacheline_group_begin_aligned(cold);
674 	u32 q_id;
675 	u32 size;
676 	dma_addr_t dma;
677 
678 	struct idpf_q_vector *q_vector;
679 	__cacheline_group_end_aligned(cold);
680 };
681 libeth_cacheline_set_assert(struct idpf_tx_queue, 64,
682 			    88 + sizeof(struct u64_stats_sync),
683 			    24);
684 
685 /**
686  * struct idpf_buf_queue - software structure representing a buffer queue
687  * @split_buf: buffer descriptor array
688  * @hdr_buf: &libeth_fqe for header buffers
689  * @hdr_pp: &page_pool for header buffers
690  * @buf: &libeth_fqe for data buffers
691  * @pp: &page_pool for data buffers
692  * @tail: Tail offset
693  * @flags: See enum idpf_queue_flags_t
694  * @desc_count: Number of descriptors
695  * @next_to_use: Next descriptor to use
696  * @next_to_clean: Next descriptor to clean
697  * @next_to_alloc: RX buffer to allocate at
698  * @hdr_truesize: truesize for buffer headers
699  * @truesize: truesize for data buffers
700  * @q_id: Queue id
701  * @size: Length of descriptor ring in bytes
702  * @dma: Physical address of ring
703  * @q_vector: Backreference to associated vector
704  * @rx_buffer_low_watermark: RX buffer low watermark
705  * @rx_hbuf_size: Header buffer size
706  * @rx_buf_size: Buffer size
707  */
708 struct idpf_buf_queue {
709 	__cacheline_group_begin_aligned(read_mostly);
710 	struct virtchnl2_splitq_rx_buf_desc *split_buf;
711 	struct libeth_fqe *hdr_buf;
712 	struct page_pool *hdr_pp;
713 	struct libeth_fqe *buf;
714 	struct page_pool *pp;
715 	void __iomem *tail;
716 
717 	DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
718 	u32 desc_count;
719 	__cacheline_group_end_aligned(read_mostly);
720 
721 	__cacheline_group_begin_aligned(read_write);
722 	u32 next_to_use;
723 	u32 next_to_clean;
724 	u32 next_to_alloc;
725 
726 	u32 hdr_truesize;
727 	u32 truesize;
728 	__cacheline_group_end_aligned(read_write);
729 
730 	__cacheline_group_begin_aligned(cold);
731 	u32 q_id;
732 	u32 size;
733 	dma_addr_t dma;
734 
735 	struct idpf_q_vector *q_vector;
736 
737 	u16 rx_buffer_low_watermark;
738 	u16 rx_hbuf_size;
739 	u16 rx_buf_size;
740 	__cacheline_group_end_aligned(cold);
741 };
742 libeth_cacheline_set_assert(struct idpf_buf_queue, 64, 24, 32);
743 
744 /**
745  * struct idpf_compl_queue - software structure representing a completion queue
746  * @comp: completion descriptor array
747  * @txq_grp: See struct idpf_txq_group
748  * @flags: See enum idpf_queue_flags_t
749  * @desc_count: Number of descriptors
750  * @clean_budget: queue cleaning budget
751  * @netdev: &net_device corresponding to this queue
752  * @next_to_use: Next descriptor to use. Relevant in both split & single txq
753  *		 and bufq.
754  * @next_to_clean: Next descriptor to clean
755  * @num_completions: Only relevant for TX completion queue. It tracks the
756  *		     number of completions received to compare against the
757  *		     number of completions pending, as accumulated by the
758  *		     TX queues.
759  * @q_id: Queue id
760  * @size: Length of descriptor ring in bytes
761  * @dma: Physical address of ring
762  * @q_vector: Backreference to associated vector
763  */
764 struct idpf_compl_queue {
765 	__cacheline_group_begin_aligned(read_mostly);
766 	struct idpf_splitq_tx_compl_desc *comp;
767 	struct idpf_txq_group *txq_grp;
768 
769 	DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
770 	u32 desc_count;
771 
772 	u32 clean_budget;
773 	struct net_device *netdev;
774 	__cacheline_group_end_aligned(read_mostly);
775 
776 	__cacheline_group_begin_aligned(read_write);
777 	u32 next_to_use;
778 	u32 next_to_clean;
779 
780 	aligned_u64 num_completions;
781 	__cacheline_group_end_aligned(read_write);
782 
783 	__cacheline_group_begin_aligned(cold);
784 	u32 q_id;
785 	u32 size;
786 	dma_addr_t dma;
787 
788 	struct idpf_q_vector *q_vector;
789 	__cacheline_group_end_aligned(cold);
790 };
791 libeth_cacheline_set_assert(struct idpf_compl_queue, 40, 16, 24);
792 
793 /**
794  * struct idpf_sw_queue
795  * @ring: Pointer to the ring
796  * @flags: See enum idpf_queue_flags_t
797  * @desc_count: Descriptor count
798  * @next_to_use: Buffer to allocate at
799  * @next_to_clean: Next descriptor to clean
800  *
801  * Software queues are used in splitq mode to manage buffers between rxq
802  * producer and the bufq consumer.  These are required in order to maintain a
803  * lockless buffer management system and are strictly software only constructs.
804  */
805 struct idpf_sw_queue {
806 	__cacheline_group_begin_aligned(read_mostly);
807 	u32 *ring;
808 
809 	DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
810 	u32 desc_count;
811 	__cacheline_group_end_aligned(read_mostly);
812 
813 	__cacheline_group_begin_aligned(read_write);
814 	u32 next_to_use;
815 	u32 next_to_clean;
816 	__cacheline_group_end_aligned(read_write);
817 };
818 libeth_cacheline_group_assert(struct idpf_sw_queue, read_mostly, 24);
819 libeth_cacheline_group_assert(struct idpf_sw_queue, read_write, 8);
820 libeth_cacheline_struct_assert(struct idpf_sw_queue, 24, 8);
821 
822 /**
823  * struct idpf_rxq_set
824  * @rxq: RX queue
825  * @refillq: pointers to refill queues
826  *
827  * Splitq only.  idpf_rxq_set associates an rxq with at an array of refillqs.
828  * Each rxq needs a refillq to return used buffers back to the respective bufq.
829  * Bufqs then clean these refillqs for buffers to give to hardware.
830  */
831 struct idpf_rxq_set {
832 	struct idpf_rx_queue rxq;
833 	struct idpf_sw_queue *refillq[IDPF_MAX_BUFQS_PER_RXQ_GRP];
834 };
835 
836 /**
837  * struct idpf_bufq_set
838  * @bufq: Buffer queue
839  * @num_refillqs: Number of refill queues. This is always equal to num_rxq_sets
840  *		  in idpf_rxq_group.
841  * @refillqs: Pointer to refill queues array.
842  *
843  * Splitq only. idpf_bufq_set associates a bufq to an array of refillqs.
844  * In this bufq_set, there will be one refillq for each rxq in this rxq_group.
845  * Used buffers received by rxqs will be put on refillqs which bufqs will
846  * clean to return new buffers back to hardware.
847  *
848  * Buffers needed by some number of rxqs associated in this rxq_group are
849  * managed by at most two bufqs (depending on performance configuration).
850  */
851 struct idpf_bufq_set {
852 	struct idpf_buf_queue bufq;
853 	int num_refillqs;
854 	struct idpf_sw_queue *refillqs;
855 };
856 
857 /**
858  * struct idpf_rxq_group
859  * @vport: Vport back pointer
860  * @singleq: Struct with single queue related members
861  * @singleq.num_rxq: Number of RX queues associated
862  * @singleq.rxqs: Array of RX queue pointers
863  * @splitq: Struct with split queue related members
864  * @splitq.num_rxq_sets: Number of RX queue sets
865  * @splitq.rxq_sets: Array of RX queue sets
866  * @splitq.bufq_sets: Buffer queue set pointer
867  *
868  * In singleq mode, an rxq_group is simply an array of rxqs.  In splitq, a
869  * rxq_group contains all the rxqs, bufqs and refillqs needed to
870  * manage buffers in splitq mode.
871  */
872 struct idpf_rxq_group {
873 	struct idpf_vport *vport;
874 
875 	union {
876 		struct {
877 			u16 num_rxq;
878 			struct idpf_rx_queue *rxqs[IDPF_LARGE_MAX_Q];
879 		} singleq;
880 		struct {
881 			u16 num_rxq_sets;
882 			struct idpf_rxq_set *rxq_sets[IDPF_LARGE_MAX_Q];
883 			struct idpf_bufq_set *bufq_sets;
884 		} splitq;
885 	};
886 };
887 
888 /**
889  * struct idpf_txq_group
890  * @vport: Vport back pointer
891  * @num_txq: Number of TX queues associated
892  * @txqs: Array of TX queue pointers
893  * @stashes: array of OOO stashes for the queues
894  * @complq: Associated completion queue pointer, split queue only
895  * @num_completions_pending: Total number of completions pending for the
896  *			     completion queue, acculumated for all TX queues
897  *			     associated with that completion queue.
898  *
899  * Between singleq and splitq, a txq_group is largely the same except for the
900  * complq. In splitq a single complq is responsible for handling completions
901  * for some number of txqs associated in this txq_group.
902  */
903 struct idpf_txq_group {
904 	struct idpf_vport *vport;
905 
906 	u16 num_txq;
907 	struct idpf_tx_queue *txqs[IDPF_LARGE_MAX_Q];
908 	struct idpf_txq_stash *stashes;
909 
910 	struct idpf_compl_queue *complq;
911 
912 	aligned_u64 num_completions_pending;
913 };
914 
idpf_q_vector_to_mem(const struct idpf_q_vector * q_vector)915 static inline int idpf_q_vector_to_mem(const struct idpf_q_vector *q_vector)
916 {
917 	u32 cpu;
918 
919 	if (!q_vector)
920 		return NUMA_NO_NODE;
921 
922 	cpu = cpumask_first(&q_vector->napi.config->affinity_mask);
923 
924 	return cpu < nr_cpu_ids ? cpu_to_mem(cpu) : NUMA_NO_NODE;
925 }
926 
927 /**
928  * idpf_size_to_txd_count - Get number of descriptors needed for large Tx frag
929  * @size: transmit request size in bytes
930  *
931  * In the case where a large frag (>= 16K) needs to be split across multiple
932  * descriptors, we need to assume that we can have no more than 12K of data
933  * per descriptor due to hardware alignment restrictions (4K alignment).
934  */
idpf_size_to_txd_count(unsigned int size)935 static inline u32 idpf_size_to_txd_count(unsigned int size)
936 {
937 	return DIV_ROUND_UP(size, IDPF_TX_MAX_DESC_DATA_ALIGNED);
938 }
939 
940 /**
941  * idpf_tx_singleq_build_ctob - populate command tag offset and size
942  * @td_cmd: Command to be filled in desc
943  * @td_offset: Offset to be filled in desc
944  * @size: Size of the buffer
945  * @td_tag: td tag to be filled
946  *
947  * Returns the 64 bit value populated with the input parameters
948  */
idpf_tx_singleq_build_ctob(u64 td_cmd,u64 td_offset,unsigned int size,u64 td_tag)949 static inline __le64 idpf_tx_singleq_build_ctob(u64 td_cmd, u64 td_offset,
950 						unsigned int size, u64 td_tag)
951 {
952 	return cpu_to_le64(IDPF_TX_DESC_DTYPE_DATA |
953 			   (td_cmd << IDPF_TXD_QW1_CMD_S) |
954 			   (td_offset << IDPF_TXD_QW1_OFFSET_S) |
955 			   ((u64)size << IDPF_TXD_QW1_TX_BUF_SZ_S) |
956 			   (td_tag << IDPF_TXD_QW1_L2TAG1_S));
957 }
958 
959 void idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc *desc,
960 			      struct idpf_tx_splitq_params *params,
961 			      u16 td_cmd, u16 size);
962 void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
963 				    struct idpf_tx_splitq_params *params,
964 				    u16 td_cmd, u16 size);
965 /**
966  * idpf_tx_splitq_build_desc - determine which type of data descriptor to build
967  * @desc: descriptor to populate
968  * @params: pointer to tx params struct
969  * @td_cmd: command to be filled in desc
970  * @size: size of buffer
971  */
idpf_tx_splitq_build_desc(union idpf_tx_flex_desc * desc,struct idpf_tx_splitq_params * params,u16 td_cmd,u16 size)972 static inline void idpf_tx_splitq_build_desc(union idpf_tx_flex_desc *desc,
973 					     struct idpf_tx_splitq_params *params,
974 					     u16 td_cmd, u16 size)
975 {
976 	if (params->dtype == IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2)
977 		idpf_tx_splitq_build_ctb(desc, params, td_cmd, size);
978 	else
979 		idpf_tx_splitq_build_flow_desc(desc, params, td_cmd, size);
980 }
981 
982 /**
983  * idpf_vport_intr_set_wb_on_itr - enable descriptor writeback on disabled interrupts
984  * @q_vector: pointer to queue vector struct
985  */
idpf_vport_intr_set_wb_on_itr(struct idpf_q_vector * q_vector)986 static inline void idpf_vport_intr_set_wb_on_itr(struct idpf_q_vector *q_vector)
987 {
988 	struct idpf_intr_reg *reg;
989 
990 	if (q_vector->wb_on_itr)
991 		return;
992 
993 	q_vector->wb_on_itr = true;
994 	reg = &q_vector->intr_reg;
995 
996 	writel(reg->dyn_ctl_wb_on_itr_m | reg->dyn_ctl_intena_msk_m |
997 	       (IDPF_NO_ITR_UPDATE_IDX << reg->dyn_ctl_itridx_s),
998 	       reg->dyn_ctl);
999 }
1000 
1001 int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget);
1002 void idpf_vport_init_num_qs(struct idpf_vport *vport,
1003 			    struct virtchnl2_create_vport *vport_msg);
1004 void idpf_vport_calc_num_q_desc(struct idpf_vport *vport);
1005 int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_index,
1006 			     struct virtchnl2_create_vport *vport_msg,
1007 			     struct idpf_vport_max_q *max_q);
1008 void idpf_vport_calc_num_q_groups(struct idpf_vport *vport);
1009 int idpf_vport_queues_alloc(struct idpf_vport *vport);
1010 void idpf_vport_queues_rel(struct idpf_vport *vport);
1011 void idpf_vport_intr_rel(struct idpf_vport *vport);
1012 int idpf_vport_intr_alloc(struct idpf_vport *vport);
1013 void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector);
1014 void idpf_vport_intr_deinit(struct idpf_vport *vport);
1015 int idpf_vport_intr_init(struct idpf_vport *vport);
1016 void idpf_vport_intr_ena(struct idpf_vport *vport);
1017 int idpf_config_rss(struct idpf_vport *vport);
1018 int idpf_init_rss(struct idpf_vport *vport);
1019 void idpf_deinit_rss(struct idpf_vport *vport);
1020 int idpf_rx_bufs_init_all(struct idpf_vport *vport);
1021 void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb,
1022 		      unsigned int size);
1023 struct sk_buff *idpf_rx_build_skb(const struct libeth_fqe *buf, u32 size);
1024 void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
1025 			   bool xmit_more);
1026 unsigned int idpf_size_to_txd_count(unsigned int size);
1027 netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb);
1028 void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
1029 			   struct idpf_tx_buf *first, u16 ring_idx);
1030 unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
1031 					 struct sk_buff *skb);
1032 void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue);
1033 netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
1034 				  struct idpf_tx_queue *tx_q);
1035 netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev);
1036 bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq,
1037 				      u16 cleaned_count);
1038 int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off);
1039 
idpf_tx_maybe_stop_common(struct idpf_tx_queue * tx_q,u32 needed)1040 static inline bool idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q,
1041 					     u32 needed)
1042 {
1043 	return !netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
1044 					  IDPF_DESC_UNUSED(tx_q),
1045 					  needed, needed);
1046 }
1047 
1048 #endif /* !_IDPF_TXRX_H_ */
1049