1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (C) 2023 Intel Corporation */
3
4 #ifndef _IDPF_TXRX_H_
5 #define _IDPF_TXRX_H_
6
7 #include <linux/dim.h>
8
9 #include <net/libeth/cache.h>
10 #include <net/libeth/types.h>
11 #include <net/netdev_queues.h>
12 #include <net/tcp.h>
13 #include <net/xdp.h>
14
15 #include "idpf_lan_txrx.h"
16 #include "virtchnl2_lan_desc.h"
17
18 #define IDPF_LARGE_MAX_Q 256
19 #define IDPF_MAX_Q 16
20 #define IDPF_MIN_Q 2
21 /* Mailbox Queue */
22 #define IDPF_MAX_MBXQ 1
23
24 #define IDPF_MIN_TXQ_DESC 64
25 #define IDPF_MIN_RXQ_DESC 64
26 #define IDPF_MIN_TXQ_COMPLQ_DESC 256
27 #define IDPF_MAX_QIDS 256
28
29 /* Number of descriptors in a queue should be a multiple of 32. RX queue
30 * descriptors alone should be a multiple of IDPF_REQ_RXQ_DESC_MULTIPLE
31 * to achieve BufQ descriptors aligned to 32
32 */
33 #define IDPF_REQ_DESC_MULTIPLE 32
34 #define IDPF_REQ_RXQ_DESC_MULTIPLE (IDPF_MAX_BUFQS_PER_RXQ_GRP * 32)
35 #define IDPF_MIN_TX_DESC_NEEDED (MAX_SKB_FRAGS + 6)
36 #define IDPF_TX_WAKE_THRESH ((u16)IDPF_MIN_TX_DESC_NEEDED * 2)
37
38 #define IDPF_MAX_DESCS 8160
39 #define IDPF_MAX_TXQ_DESC ALIGN_DOWN(IDPF_MAX_DESCS, IDPF_REQ_DESC_MULTIPLE)
40 #define IDPF_MAX_RXQ_DESC ALIGN_DOWN(IDPF_MAX_DESCS, IDPF_REQ_RXQ_DESC_MULTIPLE)
41 #define MIN_SUPPORT_TXDID (\
42 VIRTCHNL2_TXDID_FLEX_FLOW_SCHED |\
43 VIRTCHNL2_TXDID_FLEX_TSO_CTX)
44
45 #define IDPF_DFLT_SINGLEQ_TX_Q_GROUPS 1
46 #define IDPF_DFLT_SINGLEQ_RX_Q_GROUPS 1
47 #define IDPF_DFLT_SINGLEQ_TXQ_PER_GROUP 4
48 #define IDPF_DFLT_SINGLEQ_RXQ_PER_GROUP 4
49
50 #define IDPF_COMPLQ_PER_GROUP 1
51 #define IDPF_SINGLE_BUFQ_PER_RXQ_GRP 1
52 #define IDPF_MAX_BUFQS_PER_RXQ_GRP 2
53 #define IDPF_BUFQ2_ENA 1
54 #define IDPF_NUMQ_PER_CHUNK 1
55
56 #define IDPF_DFLT_SPLITQ_TXQ_PER_GROUP 1
57 #define IDPF_DFLT_SPLITQ_RXQ_PER_GROUP 1
58
59 /* Default vector sharing */
60 #define IDPF_MBX_Q_VEC 1
61 #define IDPF_MIN_Q_VEC 1
62 #define IDPF_MIN_RDMA_VEC 2
63 /* Data vector for NOIRQ queues */
64 #define IDPF_RESERVED_VECS 1
65
66 #define IDPF_DFLT_TX_Q_DESC_COUNT 512
67 #define IDPF_DFLT_TX_COMPLQ_DESC_COUNT 512
68 #define IDPF_DFLT_RX_Q_DESC_COUNT 512
69
70 /* IMPORTANT: We absolutely _cannot_ have more buffers in the system than a
71 * given RX completion queue has descriptors. This includes _ALL_ buffer
72 * queues. E.g.: If you have two buffer queues of 512 descriptors and buffers,
73 * you have a total of 1024 buffers so your RX queue _must_ have at least that
74 * many descriptors. This macro divides a given number of RX descriptors by
75 * number of buffer queues to calculate how many descriptors each buffer queue
76 * can have without overrunning the RX queue.
77 *
78 * If you give hardware more buffers than completion descriptors what will
79 * happen is that if hardware gets a chance to post more than ring wrap of
80 * descriptors before SW gets an interrupt and overwrites SW head, the gen bit
81 * in the descriptor will be wrong. Any overwritten descriptors' buffers will
82 * be gone forever and SW has no reasonable way to tell that this has happened.
83 * From SW perspective, when we finally get an interrupt, it looks like we're
84 * still waiting for descriptor to be done, stalling forever.
85 */
86 #define IDPF_RX_BUFQ_DESC_COUNT(RXD, NUM_BUFQ) ((RXD) / (NUM_BUFQ))
87
88 #define IDPF_RX_BUFQ_WORKING_SET(rxq) ((rxq)->desc_count - 1)
89
90 #define IDPF_RX_BUMP_NTC(rxq, ntc) \
91 do { \
92 if (unlikely(++(ntc) == (rxq)->desc_count)) { \
93 ntc = 0; \
94 idpf_queue_change(GEN_CHK, rxq); \
95 } \
96 } while (0)
97
98 #define IDPF_SINGLEQ_BUMP_RING_IDX(q, idx) \
99 do { \
100 if (unlikely(++(idx) == (q)->desc_count)) \
101 idx = 0; \
102 } while (0)
103
104 #define IDPF_RX_MAX_BUF_SZ (16384 - 128)
105 #define IDPF_RX_BUF_STRIDE 32
106 #define IDPF_RX_BUF_POST_STRIDE 16
107 #define IDPF_LOW_WATERMARK 64
108
109 #define IDPF_TX_TSO_MIN_MSS 88
110
111 /* Minimum number of descriptors between 2 descriptors with the RE bit set;
112 * only relevant in flow scheduling mode
113 */
114 #define IDPF_TX_SPLITQ_RE_MIN_GAP 64
115
116 #define IDPF_RFL_BI_GEN_M BIT(16)
117 #define IDPF_RFL_BI_BUFID_M GENMASK(15, 0)
118
119 #define IDPF_RXD_EOF_SPLITQ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_M
120 #define IDPF_RXD_EOF_SINGLEQ VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_M
121
122 #define IDPF_DESC_UNUSED(txq) \
123 ((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \
124 (txq)->next_to_clean - (txq)->next_to_use - 1)
125
126 #define IDPF_TX_COMPLQ_OVERFLOW_THRESH(txcq) ((txcq)->desc_count >> 1)
127 /* Determine the absolute number of completions pending, i.e. the number of
128 * completions that are expected to arrive on the TX completion queue.
129 */
130 #define IDPF_TX_COMPLQ_PENDING(txq) \
131 (((txq)->num_completions_pending >= (txq)->complq->num_completions ? \
132 0 : U32_MAX) + \
133 (txq)->num_completions_pending - (txq)->complq->num_completions)
134
135 #define IDPF_TXBUF_NULL U32_MAX
136
137 #define IDPF_TXD_LAST_DESC_CMD (IDPF_TX_DESC_CMD_EOP | IDPF_TX_DESC_CMD_RS)
138
139 #define IDPF_TX_FLAGS_TSO BIT(0)
140 #define IDPF_TX_FLAGS_IPV4 BIT(1)
141 #define IDPF_TX_FLAGS_IPV6 BIT(2)
142 #define IDPF_TX_FLAGS_TUNNEL BIT(3)
143 #define IDPF_TX_FLAGS_TSYN BIT(4)
144
145 struct libeth_rq_napi_stats;
146
147 union idpf_tx_flex_desc {
148 struct idpf_flex_tx_desc q; /* queue based scheduling */
149 struct idpf_flex_tx_sched_desc flow; /* flow based scheduling */
150 };
151
152 #define idpf_tx_buf libeth_sqe
153
154 /**
155 * struct idpf_tx_offload_params - Offload parameters for a given packet
156 * @tx_flags: Feature flags enabled for this packet
157 * @hdr_offsets: Offset parameter for single queue model
158 * @cd_tunneling: Type of tunneling enabled for single queue model
159 * @tso_len: Total length of payload to segment
160 * @mss: Segment size
161 * @tso_segs: Number of segments to be sent
162 * @tso_hdr_len: Length of headers to be duplicated
163 * @td_cmd: Command field to be inserted into descriptor
164 */
165 struct idpf_tx_offload_params {
166 u32 tx_flags;
167
168 u32 hdr_offsets;
169 u32 cd_tunneling;
170
171 u32 tso_len;
172 u16 mss;
173 u16 tso_segs;
174 u16 tso_hdr_len;
175
176 u16 td_cmd;
177 };
178
179 /**
180 * struct idpf_tx_splitq_params
181 * @dtype: General descriptor info
182 * @eop_cmd: Type of EOP
183 * @compl_tag: Associated tag for completion
184 * @td_tag: Descriptor tunneling tag
185 * @offload: Offload parameters
186 * @prev_ntu: stored TxQ next_to_use in case of rollback
187 * @prev_refill_ntc: stored refillq next_to_clean in case of packet rollback
188 * @prev_refill_gen: stored refillq generation bit in case of packet rollback
189 */
190 struct idpf_tx_splitq_params {
191 enum idpf_tx_desc_dtype_value dtype;
192 u16 eop_cmd;
193 union {
194 u16 compl_tag;
195 u16 td_tag;
196 };
197
198 struct idpf_tx_offload_params offload;
199
200 u16 prev_ntu;
201 u16 prev_refill_ntc;
202 bool prev_refill_gen;
203 };
204
205 enum idpf_tx_ctx_desc_eipt_offload {
206 IDPF_TX_CTX_EXT_IP_NONE = 0x0,
207 IDPF_TX_CTX_EXT_IP_IPV6 = 0x1,
208 IDPF_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2,
209 IDPF_TX_CTX_EXT_IP_IPV4 = 0x3
210 };
211
212 #define IDPF_TX_COMPLQ_CLEAN_BUDGET 256
213 #define IDPF_TX_MIN_PKT_LEN 17
214 #define IDPF_TX_DESCS_FOR_SKB_DATA_PTR 1
215 #define IDPF_TX_DESCS_PER_CACHE_LINE (L1_CACHE_BYTES / \
216 sizeof(struct idpf_flex_tx_desc))
217 #define IDPF_TX_DESCS_FOR_CTX 1
218 /* TX descriptors needed, worst case */
219 #define IDPF_TX_DESC_NEEDED (MAX_SKB_FRAGS + IDPF_TX_DESCS_FOR_CTX + \
220 IDPF_TX_DESCS_PER_CACHE_LINE + \
221 IDPF_TX_DESCS_FOR_SKB_DATA_PTR)
222
223 /* The size limit for a transmit buffer in a descriptor is (16K - 1).
224 * In order to align with the read requests we will align the value to
225 * the nearest 4K which represents our maximum read request size.
226 */
227 #define IDPF_TX_MAX_READ_REQ_SIZE SZ_4K
228 #define IDPF_TX_MAX_DESC_DATA (SZ_16K - 1)
229 #define IDPF_TX_MAX_DESC_DATA_ALIGNED \
230 ALIGN_DOWN(IDPF_TX_MAX_DESC_DATA, IDPF_TX_MAX_READ_REQ_SIZE)
231
232 #define idpf_rx_buf libeth_fqe
233
234 #define IDPF_RX_MAX_PTYPE_PROTO_IDS 32
235 #define IDPF_RX_MAX_PTYPE_SZ (sizeof(struct virtchnl2_ptype) + \
236 (sizeof(u16) * IDPF_RX_MAX_PTYPE_PROTO_IDS))
237 #define IDPF_RX_PTYPE_HDR_SZ sizeof(struct virtchnl2_get_ptype_info)
238 #define IDPF_RX_MAX_PTYPES_PER_BUF \
239 DIV_ROUND_DOWN_ULL((IDPF_CTLQ_MAX_BUF_LEN - IDPF_RX_PTYPE_HDR_SZ), \
240 IDPF_RX_MAX_PTYPE_SZ)
241
242 #define IDPF_GET_PTYPE_SIZE(p) struct_size((p), proto_id, (p)->proto_id_count)
243
244 #define IDPF_TUN_IP_GRE (\
245 IDPF_PTYPE_TUNNEL_IP |\
246 IDPF_PTYPE_TUNNEL_IP_GRENAT)
247
248 #define IDPF_TUN_IP_GRE_MAC (\
249 IDPF_TUN_IP_GRE |\
250 IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC)
251
252 #define IDPF_RX_MAX_PTYPE 1024
253 #define IDPF_RX_MAX_BASE_PTYPE 256
254 #define IDPF_INVALID_PTYPE_ID 0xFFFF
255
256 enum idpf_tunnel_state {
257 IDPF_PTYPE_TUNNEL_IP = BIT(0),
258 IDPF_PTYPE_TUNNEL_IP_GRENAT = BIT(1),
259 IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC = BIT(2),
260 };
261
262 struct idpf_ptype_state {
263 bool outer_ip:1;
264 bool outer_frag:1;
265 u8 tunnel_state:6;
266 };
267
268 /**
269 * enum idpf_queue_flags_t
270 * @__IDPF_Q_GEN_CHK: Queues operating in splitq mode use a generation bit to
271 * identify new descriptor writebacks on the ring. HW sets
272 * the gen bit to 1 on the first writeback of any given
273 * descriptor. After the ring wraps, HW sets the gen bit of
274 * those descriptors to 0, and continues flipping
275 * 0->1 or 1->0 on each ring wrap. SW maintains its own
276 * gen bit to know what value will indicate writebacks on
277 * the next pass around the ring. E.g. it is initialized
278 * to 1 and knows that reading a gen bit of 1 in any
279 * descriptor on the initial pass of the ring indicates a
280 * writeback. It also flips on every ring wrap.
281 * @__IDPF_Q_RFL_GEN_CHK: Refill queues are SW only, so Q_GEN acts as the HW
282 * bit and Q_RFL_GEN is the SW bit.
283 * @__IDPF_Q_FLOW_SCH_EN: Enable flow scheduling
284 * @__IDPF_Q_SW_MARKER: Used to indicate TX queue marker completions
285 * @__IDPF_Q_CRC_EN: enable CRC offload in singleq mode
286 * @__IDPF_Q_HSPLIT_EN: enable header split on Rx (splitq)
287 * @__IDPF_Q_PTP: indicates whether the Rx timestamping is enabled for the
288 * queue
289 * @__IDPF_Q_NOIRQ: queue is polling-driven and has no interrupt
290 * @__IDPF_Q_XDP: this is an XDP queue
291 * @__IDPF_Q_XSK: the queue has an XSk pool installed
292 * @__IDPF_Q_FLAGS_NBITS: Must be last
293 */
294 enum idpf_queue_flags_t {
295 __IDPF_Q_GEN_CHK,
296 __IDPF_Q_RFL_GEN_CHK,
297 __IDPF_Q_FLOW_SCH_EN,
298 __IDPF_Q_SW_MARKER,
299 __IDPF_Q_CRC_EN,
300 __IDPF_Q_HSPLIT_EN,
301 __IDPF_Q_PTP,
302 __IDPF_Q_NOIRQ,
303 __IDPF_Q_XDP,
304 __IDPF_Q_XSK,
305
306 __IDPF_Q_FLAGS_NBITS,
307 };
308
309 #define idpf_queue_set(f, q) __set_bit(__IDPF_Q_##f, (q)->flags)
310 #define idpf_queue_clear(f, q) __clear_bit(__IDPF_Q_##f, (q)->flags)
311 #define idpf_queue_change(f, q) __change_bit(__IDPF_Q_##f, (q)->flags)
312 #define idpf_queue_has(f, q) test_bit(__IDPF_Q_##f, (q)->flags)
313
314 #define idpf_queue_has_clear(f, q) \
315 __test_and_clear_bit(__IDPF_Q_##f, (q)->flags)
316 #define idpf_queue_assign(f, q, v) \
317 __assign_bit(__IDPF_Q_##f, (q)->flags, v)
318
319 /**
320 * struct idpf_vec_regs
321 * @dyn_ctl_reg: Dynamic control interrupt register offset
322 * @itrn_reg: Interrupt Throttling Rate register offset
323 * @itrn_index_spacing: Register spacing between ITR registers of the same
324 * vector
325 */
326 struct idpf_vec_regs {
327 u32 dyn_ctl_reg;
328 u32 itrn_reg;
329 u32 itrn_index_spacing;
330 };
331
332 /**
333 * struct idpf_intr_reg
334 * @dyn_ctl: Dynamic control interrupt register
335 * @dyn_ctl_intena_m: Mask for dyn_ctl interrupt enable
336 * @dyn_ctl_intena_msk_m: Mask for dyn_ctl interrupt enable mask
337 * @dyn_ctl_itridx_s: Register bit offset for ITR index
338 * @dyn_ctl_itridx_m: Mask for ITR index
339 * @dyn_ctl_intrvl_s: Register bit offset for ITR interval
340 * @dyn_ctl_wb_on_itr_m: Mask for WB on ITR feature
341 * @dyn_ctl_sw_itridx_ena_m: Mask for SW ITR index
342 * @dyn_ctl_swint_trig_m: Mask for dyn_ctl SW triggered interrupt enable
343 * @rx_itr: RX ITR register
344 * @tx_itr: TX ITR register
345 * @icr_ena: Interrupt cause register offset
346 * @icr_ena_ctlq_m: Mask for ICR
347 */
348 struct idpf_intr_reg {
349 void __iomem *dyn_ctl;
350 u32 dyn_ctl_intena_m;
351 u32 dyn_ctl_intena_msk_m;
352 u32 dyn_ctl_itridx_s;
353 u32 dyn_ctl_itridx_m;
354 u32 dyn_ctl_intrvl_s;
355 u32 dyn_ctl_wb_on_itr_m;
356 u32 dyn_ctl_sw_itridx_ena_m;
357 u32 dyn_ctl_swint_trig_m;
358 void __iomem *rx_itr;
359 void __iomem *tx_itr;
360 void __iomem *icr_ena;
361 u32 icr_ena_ctlq_m;
362 };
363
364 /**
365 * struct idpf_q_vector
366 * @vport: Vport back pointer
367 * @num_rxq: Number of RX queues
368 * @num_txq: Number of TX queues
369 * @num_bufq: Number of buffer queues
370 * @num_complq: number of completion queues
371 * @num_xsksq: number of XSk send queues
372 * @rx: Array of RX queues to service
373 * @tx: Array of TX queues to service
374 * @bufq: Array of buffer queues to service
375 * @complq: array of completion queues
376 * @xsksq: array of XSk send queues
377 * @intr_reg: See struct idpf_intr_reg
378 * @csd: XSk wakeup CSD
379 * @total_events: Number of interrupts processed
380 * @wb_on_itr: whether WB on ITR is enabled
381 * @napi: napi handler
382 * @tx_dim: Data for TX net_dim algorithm
383 * @tx_itr_value: TX interrupt throttling rate
384 * @tx_intr_mode: Dynamic ITR or not
385 * @tx_itr_idx: TX ITR index
386 * @rx_dim: Data for RX net_dim algorithm
387 * @rx_itr_value: RX interrupt throttling rate
388 * @rx_intr_mode: Dynamic ITR or not
389 * @rx_itr_idx: RX ITR index
390 * @v_idx: Vector index
391 */
392 struct idpf_q_vector {
393 __cacheline_group_begin_aligned(read_mostly);
394 struct idpf_vport *vport;
395
396 u16 num_rxq;
397 u16 num_txq;
398 u16 num_bufq;
399 u16 num_complq;
400 u16 num_xsksq;
401 struct idpf_rx_queue **rx;
402 struct idpf_tx_queue **tx;
403 struct idpf_buf_queue **bufq;
404 struct idpf_compl_queue **complq;
405 struct idpf_tx_queue **xsksq;
406
407 struct idpf_intr_reg intr_reg;
408 __cacheline_group_end_aligned(read_mostly);
409
410 __cacheline_group_begin_aligned(read_write);
411 call_single_data_t csd;
412
413 u16 total_events;
414 bool wb_on_itr;
415
416 struct napi_struct napi;
417
418 struct dim tx_dim;
419 u16 tx_itr_value;
420 bool tx_intr_mode;
421 u32 tx_itr_idx;
422
423 struct dim rx_dim;
424 u16 rx_itr_value;
425 bool rx_intr_mode;
426 u32 rx_itr_idx;
427 __cacheline_group_end_aligned(read_write);
428
429 __cacheline_group_begin_aligned(cold);
430 u16 v_idx;
431
432 __cacheline_group_end_aligned(cold);
433 };
434 libeth_cacheline_set_assert(struct idpf_q_vector, 136,
435 56 + sizeof(struct napi_struct) +
436 2 * sizeof(struct dim),
437 8);
438
439 struct idpf_rx_queue_stats {
440 u64_stats_t packets;
441 u64_stats_t bytes;
442 u64_stats_t rsc_pkts;
443 u64_stats_t hw_csum_err;
444 u64_stats_t hsplit_pkts;
445 u64_stats_t hsplit_buf_ovf;
446 u64_stats_t bad_descs;
447 };
448
449 struct idpf_tx_queue_stats {
450 u64_stats_t packets;
451 u64_stats_t bytes;
452 u64_stats_t lso_pkts;
453 u64_stats_t linearize;
454 u64_stats_t q_busy;
455 u64_stats_t skb_drops;
456 u64_stats_t dma_map_errs;
457 u64_stats_t tstamp_skipped;
458 };
459
460 #define IDPF_ITR_DYNAMIC 1
461 #define IDPF_ITR_MAX 0x1FE0
462 #define IDPF_ITR_20K 0x0032
463 #define IDPF_ITR_GRAN_S 1 /* Assume ITR granularity is 2us */
464 #define IDPF_ITR_MASK 0x1FFE /* ITR register value alignment mask */
465 #define ITR_REG_ALIGN(setting) ((setting) & IDPF_ITR_MASK)
466 #define IDPF_ITR_IS_DYNAMIC(itr_mode) (itr_mode)
467 #define IDPF_ITR_TX_DEF IDPF_ITR_20K
468 #define IDPF_ITR_RX_DEF IDPF_ITR_20K
469 /* Index used for 'SW ITR' update in DYN_CTL register */
470 #define IDPF_SW_ITR_UPDATE_IDX 2
471 /* Index used for 'No ITR' update in DYN_CTL register */
472 #define IDPF_NO_ITR_UPDATE_IDX 3
473 #define IDPF_ITR_IDX_SPACING(spacing, dflt) (spacing ? spacing : dflt)
474 #define IDPF_DIM_DEFAULT_PROFILE_IX 1
475
476 /**
477 * struct idpf_rx_queue - software structure representing a receive queue
478 * @rx: universal receive descriptor array
479 * @single_buf: buffer descriptor array in singleq
480 * @desc_ring: virtual descriptor ring address
481 * @bufq_sets: Pointer to the array of buffer queues in splitq mode
482 * @napi: NAPI instance corresponding to this queue (splitq)
483 * @xdp_prog: attached XDP program
484 * @rx_buf: See struct &libeth_fqe
485 * @pp: Page pool pointer in singleq mode
486 * @tail: Tail offset. Used for both queue models single and split.
487 * @flags: See enum idpf_queue_flags_t
488 * @idx: For RX queue, it is used to index to total RX queue across groups and
489 * used for skb reporting.
490 * @desc_count: Number of descriptors
491 * @num_xdp_txq: total number of XDP Tx queues
492 * @xdpsqs: shortcut for XDP Tx queues array
493 * @rxdids: Supported RX descriptor ids
494 * @truesize: data buffer truesize in singleq
495 * @rx_ptype_lkup: LUT of Rx ptypes
496 * @xdp_rxq: XDP queue info
497 * @next_to_use: Next descriptor to use
498 * @next_to_clean: Next descriptor to clean
499 * @next_to_alloc: RX buffer to allocate at
500 * @xdp: XDP buffer with the current frame
501 * @xsk: current XDP buffer in XSk mode
502 * @pool: XSk pool if installed
503 * @cached_phc_time: Cached PHC time for the Rx queue
504 * @stats_sync: See struct u64_stats_sync
505 * @q_stats: See union idpf_rx_queue_stats
506 * @q_id: Queue id
507 * @size: Length of descriptor ring in bytes
508 * @dma: Physical address of ring
509 * @q_vector: Backreference to associated vector
510 * @rx_buffer_low_watermark: RX buffer low watermark
511 * @rx_hbuf_size: Header buffer size
512 * @rx_buf_size: Buffer size
513 * @rx_max_pkt_size: RX max packet size
514 */
515 struct idpf_rx_queue {
516 __cacheline_group_begin_aligned(read_mostly);
517 union {
518 union virtchnl2_rx_desc *rx;
519 struct virtchnl2_singleq_rx_buf_desc *single_buf;
520
521 void *desc_ring;
522 };
523 union {
524 struct {
525 struct idpf_bufq_set *bufq_sets;
526 struct napi_struct *napi;
527 struct bpf_prog __rcu *xdp_prog;
528 };
529 struct {
530 struct libeth_fqe *rx_buf;
531 struct page_pool *pp;
532 void __iomem *tail;
533 };
534 };
535
536 DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
537 u16 idx;
538 u16 desc_count;
539
540 u32 num_xdp_txq;
541 union {
542 struct idpf_tx_queue **xdpsqs;
543 struct {
544 u32 rxdids;
545 u32 truesize;
546 };
547 };
548 const struct libeth_rx_pt *rx_ptype_lkup;
549
550 struct xdp_rxq_info xdp_rxq;
551 __cacheline_group_end_aligned(read_mostly);
552
553 __cacheline_group_begin_aligned(read_write);
554 u32 next_to_use;
555 u32 next_to_clean;
556 u32 next_to_alloc;
557
558 union {
559 struct libeth_xdp_buff_stash xdp;
560 struct {
561 struct libeth_xdp_buff *xsk;
562 struct xsk_buff_pool *pool;
563 };
564 };
565 u64 cached_phc_time;
566
567 struct u64_stats_sync stats_sync;
568 struct idpf_rx_queue_stats q_stats;
569 __cacheline_group_end_aligned(read_write);
570
571 __cacheline_group_begin_aligned(cold);
572 u32 q_id;
573 u32 size;
574 dma_addr_t dma;
575
576 struct idpf_q_vector *q_vector;
577
578 u16 rx_buffer_low_watermark;
579 u16 rx_hbuf_size;
580 u16 rx_buf_size;
581 u16 rx_max_pkt_size;
582 __cacheline_group_end_aligned(cold);
583 };
584 libeth_cacheline_set_assert(struct idpf_rx_queue,
585 ALIGN(64, __alignof(struct xdp_rxq_info)) +
586 sizeof(struct xdp_rxq_info),
587 96 + offsetof(struct idpf_rx_queue, q_stats) -
588 offsetofend(struct idpf_rx_queue, cached_phc_time),
589 32);
590
591 /**
592 * struct idpf_tx_queue - software structure representing a transmit queue
593 * @base_tx: base Tx descriptor array
594 * @base_ctx: base Tx context descriptor array
595 * @flex_tx: flex Tx descriptor array
596 * @flex_ctx: flex Tx context descriptor array
597 * @desc_ring: virtual descriptor ring address
598 * @tx_buf: See struct idpf_tx_buf
599 * @txq_grp: See struct idpf_txq_group
600 * @complq: corresponding completion queue in XDP mode
601 * @dev: Device back pointer for DMA mapping
602 * @pool: corresponding XSk pool if installed
603 * @tail: Tail offset. Used for both queue models single and split
604 * @flags: See enum idpf_queue_flags_t
605 * @idx: For TX queue, it is used as index to map between TX queue group and
606 * hot path TX pointers stored in vport. Used in both singleq/splitq.
607 * @desc_count: Number of descriptors
608 * @tx_min_pkt_len: Min supported packet length
609 * @thresh: XDP queue cleaning threshold
610 * @netdev: &net_device corresponding to this queue
611 * @next_to_use: Next descriptor to use
612 * @next_to_clean: Next descriptor to clean
613 * @last_re: last descriptor index that RE bit was set
614 * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
615 * @cleaned_bytes: Splitq only, TXQ only: When a TX completion is received on
616 * the TX completion queue, it can be for any TXQ associated
617 * with that completion queue. This means we can clean up to
618 * N TXQs during a single call to clean the completion queue.
619 * cleaned_bytes|pkts tracks the clean stats per TXQ during
620 * that single call to clean the completion queue. By doing so,
621 * we can update BQL with aggregate cleaned stats for each TXQ
622 * only once at the end of the cleaning routine.
623 * @clean_budget: singleq only, queue cleaning budget
624 * @cleaned_pkts: Number of packets cleaned for the above said case
625 * @refillq: Pointer to refill queue
626 * @pending: number of pending descriptors to send in QB
627 * @xdp_tx: number of pending &xdp_buff or &xdp_frame buffers
628 * @timer: timer for XDP Tx queue cleanup
629 * @xdp_lock: lock for XDP Tx queues sharing
630 * @cached_tstamp_caps: Tx timestamp capabilities negotiated with the CP
631 * @tstamp_task: Work that handles Tx timestamp read
632 * @stats_sync: See struct u64_stats_sync
633 * @q_stats: See union idpf_tx_queue_stats
634 * @q_id: Queue id
635 * @size: Length of descriptor ring in bytes
636 * @dma: Physical address of ring
637 * @q_vector: Backreference to associated vector
638 * @buf_pool_size: Total number of idpf_tx_buf
639 * @rel_q_id: relative virtchnl queue index
640 */
641 struct idpf_tx_queue {
642 __cacheline_group_begin_aligned(read_mostly);
643 union {
644 struct idpf_base_tx_desc *base_tx;
645 struct idpf_base_tx_ctx_desc *base_ctx;
646 union idpf_tx_flex_desc *flex_tx;
647 union idpf_flex_tx_ctx_desc *flex_ctx;
648
649 void *desc_ring;
650 };
651 struct libeth_sqe *tx_buf;
652 union {
653 struct idpf_txq_group *txq_grp;
654 struct idpf_compl_queue *complq;
655 };
656 union {
657 struct device *dev;
658 struct xsk_buff_pool *pool;
659 };
660 void __iomem *tail;
661
662 DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
663 u16 idx;
664 u16 desc_count;
665
666 union {
667 u16 tx_min_pkt_len;
668 u32 thresh;
669 };
670
671 struct net_device *netdev;
672 __cacheline_group_end_aligned(read_mostly);
673
674 __cacheline_group_begin_aligned(read_write);
675 u32 next_to_use;
676 u32 next_to_clean;
677
678 union {
679 struct {
680 u16 last_re;
681 u16 tx_max_bufs;
682
683 union {
684 u32 cleaned_bytes;
685 u32 clean_budget;
686 };
687 u16 cleaned_pkts;
688
689 struct idpf_sw_queue *refillq;
690 };
691 struct {
692 u32 pending;
693 u32 xdp_tx;
694
695 struct libeth_xdpsq_timer *timer;
696 struct libeth_xdpsq_lock xdp_lock;
697 };
698 };
699
700 struct idpf_ptp_vport_tx_tstamp_caps *cached_tstamp_caps;
701 struct work_struct *tstamp_task;
702
703 struct u64_stats_sync stats_sync;
704 struct idpf_tx_queue_stats q_stats;
705 __cacheline_group_end_aligned(read_write);
706
707 __cacheline_group_begin_aligned(cold);
708 u32 q_id;
709 u32 size;
710 dma_addr_t dma;
711
712 struct idpf_q_vector *q_vector;
713
714 u32 buf_pool_size;
715 u32 rel_q_id;
716 __cacheline_group_end_aligned(cold);
717 };
718 libeth_cacheline_set_assert(struct idpf_tx_queue, 64,
719 104 +
720 offsetof(struct idpf_tx_queue, cached_tstamp_caps) -
721 offsetofend(struct idpf_tx_queue, timer) +
722 offsetof(struct idpf_tx_queue, q_stats) -
723 offsetofend(struct idpf_tx_queue, tstamp_task),
724 32);
725
726 /**
727 * struct idpf_buf_queue - software structure representing a buffer queue
728 * @split_buf: buffer descriptor array
729 * @buf: &libeth_fqe for data buffers
730 * @pp: &page_pool for data buffers
731 * @xsk_buf: &xdp_buff for XSk Rx buffers
732 * @pool: &xsk_buff_pool on XSk queues
733 * @hdr_buf: &libeth_fqe for header buffers
734 * @hdr_pp: &page_pool for header buffers
735 * @tail: Tail offset
736 * @flags: See enum idpf_queue_flags_t
737 * @desc_count: Number of descriptors
738 * @thresh: refill threshold in XSk
739 * @next_to_use: Next descriptor to use
740 * @next_to_clean: Next descriptor to clean
741 * @next_to_alloc: RX buffer to allocate at
742 * @pending: number of buffers to refill (Xsk)
743 * @hdr_truesize: truesize for buffer headers
744 * @truesize: truesize for data buffers
745 * @q_id: Queue id
746 * @size: Length of descriptor ring in bytes
747 * @dma: Physical address of ring
748 * @q_vector: Backreference to associated vector
749 * @rx_buffer_low_watermark: RX buffer low watermark
750 * @rx_hbuf_size: Header buffer size
751 * @rx_buf_size: Buffer size
752 */
753 struct idpf_buf_queue {
754 __cacheline_group_begin_aligned(read_mostly);
755 struct virtchnl2_splitq_rx_buf_desc *split_buf;
756 union {
757 struct {
758 struct libeth_fqe *buf;
759 struct page_pool *pp;
760 };
761 struct {
762 struct libeth_xdp_buff **xsk_buf;
763 struct xsk_buff_pool *pool;
764 };
765 };
766 struct libeth_fqe *hdr_buf;
767 struct page_pool *hdr_pp;
768 void __iomem *tail;
769
770 DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
771 u32 desc_count;
772
773 u32 thresh;
774 __cacheline_group_end_aligned(read_mostly);
775
776 __cacheline_group_begin_aligned(read_write);
777 u32 next_to_use;
778 u32 next_to_clean;
779 u32 next_to_alloc;
780
781 u32 pending;
782 u32 hdr_truesize;
783 u32 truesize;
784 __cacheline_group_end_aligned(read_write);
785
786 __cacheline_group_begin_aligned(cold);
787 u32 q_id;
788 u32 size;
789 dma_addr_t dma;
790
791 struct idpf_q_vector *q_vector;
792
793 u16 rx_buffer_low_watermark;
794 u16 rx_hbuf_size;
795 u16 rx_buf_size;
796 __cacheline_group_end_aligned(cold);
797 };
798 libeth_cacheline_set_assert(struct idpf_buf_queue, 64, 24, 32);
799
800 /**
801 * struct idpf_compl_queue - software structure representing a completion queue
802 * @comp: 8-byte completion descriptor array
803 * @comp_4b: 4-byte completion descriptor array
804 * @desc_ring: virtual descriptor ring address
805 * @txq_grp: See struct idpf_txq_group
806 * @flags: See enum idpf_queue_flags_t
807 * @desc_count: Number of descriptors
808 * @clean_budget: queue cleaning budget
809 * @netdev: &net_device corresponding to this queue
810 * @next_to_use: Next descriptor to use. Relevant in both split & single txq
811 * and bufq.
812 * @next_to_clean: Next descriptor to clean
813 * @num_completions: Only relevant for TX completion queue. It tracks the
814 * number of completions received to compare against the
815 * number of completions pending, as accumulated by the
816 * TX queues.
817 * @q_id: Queue id
818 * @size: Length of descriptor ring in bytes
819 * @dma: Physical address of ring
820 * @q_vector: Backreference to associated vector
821 */
822 struct idpf_compl_queue {
823 __cacheline_group_begin_aligned(read_mostly);
824 union {
825 struct idpf_splitq_tx_compl_desc *comp;
826 struct idpf_splitq_4b_tx_compl_desc *comp_4b;
827
828 void *desc_ring;
829 };
830 struct idpf_txq_group *txq_grp;
831
832 DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
833 u32 desc_count;
834
835 u32 clean_budget;
836 struct net_device *netdev;
837 __cacheline_group_end_aligned(read_mostly);
838
839 __cacheline_group_begin_aligned(read_write);
840 u32 next_to_use;
841 u32 next_to_clean;
842
843 aligned_u64 num_completions;
844 __cacheline_group_end_aligned(read_write);
845
846 __cacheline_group_begin_aligned(cold);
847 u32 q_id;
848 u32 size;
849 dma_addr_t dma;
850
851 struct idpf_q_vector *q_vector;
852 __cacheline_group_end_aligned(cold);
853 };
854 libeth_cacheline_set_assert(struct idpf_compl_queue, 40, 16, 24);
855
856 /**
857 * struct idpf_sw_queue
858 * @ring: Pointer to the ring
859 * @flags: See enum idpf_queue_flags_t
860 * @desc_count: Descriptor count
861 * @next_to_use: Buffer to allocate at
862 * @next_to_clean: Next descriptor to clean
863 *
864 * Software queues are used in splitq mode to manage buffers between rxq
865 * producer and the bufq consumer. These are required in order to maintain a
866 * lockless buffer management system and are strictly software only constructs.
867 */
868 struct idpf_sw_queue {
869 __cacheline_group_begin_aligned(read_mostly);
870 u32 *ring;
871
872 DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
873 u32 desc_count;
874 __cacheline_group_end_aligned(read_mostly);
875
876 __cacheline_group_begin_aligned(read_write);
877 u32 next_to_use;
878 u32 next_to_clean;
879 __cacheline_group_end_aligned(read_write);
880 };
881 libeth_cacheline_group_assert(struct idpf_sw_queue, read_mostly, 24);
882 libeth_cacheline_group_assert(struct idpf_sw_queue, read_write, 8);
883 libeth_cacheline_struct_assert(struct idpf_sw_queue, 24, 8);
884
885 /**
886 * struct idpf_rxq_set
887 * @rxq: RX queue
888 * @refillq: pointers to refill queues
889 *
890 * Splitq only. idpf_rxq_set associates an rxq with at an array of refillqs.
891 * Each rxq needs a refillq to return used buffers back to the respective bufq.
892 * Bufqs then clean these refillqs for buffers to give to hardware.
893 */
894 struct idpf_rxq_set {
895 struct idpf_rx_queue rxq;
896 struct idpf_sw_queue *refillq[IDPF_MAX_BUFQS_PER_RXQ_GRP];
897 };
898
899 /**
900 * struct idpf_bufq_set
901 * @bufq: Buffer queue
902 * @num_refillqs: Number of refill queues. This is always equal to num_rxq_sets
903 * in idpf_rxq_group.
904 * @refillqs: Pointer to refill queues array.
905 *
906 * Splitq only. idpf_bufq_set associates a bufq to an array of refillqs.
907 * In this bufq_set, there will be one refillq for each rxq in this rxq_group.
908 * Used buffers received by rxqs will be put on refillqs which bufqs will
909 * clean to return new buffers back to hardware.
910 *
911 * Buffers needed by some number of rxqs associated in this rxq_group are
912 * managed by at most two bufqs (depending on performance configuration).
913 */
914 struct idpf_bufq_set {
915 struct idpf_buf_queue bufq;
916 int num_refillqs;
917 struct idpf_sw_queue *refillqs;
918 };
919
920 /**
921 * struct idpf_rxq_group
922 * @vport: Vport back pointer
923 * @singleq: Struct with single queue related members
924 * @singleq.num_rxq: Number of RX queues associated
925 * @singleq.rxqs: Array of RX queue pointers
926 * @splitq: Struct with split queue related members
927 * @splitq.num_rxq_sets: Number of RX queue sets
928 * @splitq.rxq_sets: Array of RX queue sets
929 * @splitq.bufq_sets: Buffer queue set pointer
930 *
931 * In singleq mode, an rxq_group is simply an array of rxqs. In splitq, a
932 * rxq_group contains all the rxqs, bufqs and refillqs needed to
933 * manage buffers in splitq mode.
934 */
935 struct idpf_rxq_group {
936 struct idpf_vport *vport;
937
938 union {
939 struct {
940 u16 num_rxq;
941 struct idpf_rx_queue *rxqs[IDPF_LARGE_MAX_Q];
942 } singleq;
943 struct {
944 u16 num_rxq_sets;
945 struct idpf_rxq_set *rxq_sets[IDPF_LARGE_MAX_Q];
946 struct idpf_bufq_set *bufq_sets;
947 } splitq;
948 };
949 };
950
951 /**
952 * struct idpf_txq_group
953 * @vport: Vport back pointer
954 * @num_txq: Number of TX queues associated
955 * @txqs: Array of TX queue pointers
956 * @complq: Associated completion queue pointer, split queue only
957 * @num_completions_pending: Total number of completions pending for the
958 * completion queue, acculumated for all TX queues
959 * associated with that completion queue.
960 *
961 * Between singleq and splitq, a txq_group is largely the same except for the
962 * complq. In splitq a single complq is responsible for handling completions
963 * for some number of txqs associated in this txq_group.
964 */
965 struct idpf_txq_group {
966 struct idpf_vport *vport;
967
968 u16 num_txq;
969 struct idpf_tx_queue *txqs[IDPF_LARGE_MAX_Q];
970
971 struct idpf_compl_queue *complq;
972
973 aligned_u64 num_completions_pending;
974 };
975
idpf_q_vector_to_mem(const struct idpf_q_vector * q_vector)976 static inline int idpf_q_vector_to_mem(const struct idpf_q_vector *q_vector)
977 {
978 u32 cpu;
979
980 if (!q_vector)
981 return NUMA_NO_NODE;
982
983 cpu = cpumask_first(&q_vector->napi.config->affinity_mask);
984
985 return cpu < nr_cpu_ids ? cpu_to_mem(cpu) : NUMA_NO_NODE;
986 }
987
988 /**
989 * idpf_size_to_txd_count - Get number of descriptors needed for large Tx frag
990 * @size: transmit request size in bytes
991 *
992 * In the case where a large frag (>= 16K) needs to be split across multiple
993 * descriptors, we need to assume that we can have no more than 12K of data
994 * per descriptor due to hardware alignment restrictions (4K alignment).
995 */
idpf_size_to_txd_count(unsigned int size)996 static inline u32 idpf_size_to_txd_count(unsigned int size)
997 {
998 return DIV_ROUND_UP(size, IDPF_TX_MAX_DESC_DATA_ALIGNED);
999 }
1000
1001 /**
1002 * idpf_tx_singleq_build_ctob - populate command tag offset and size
1003 * @td_cmd: Command to be filled in desc
1004 * @td_offset: Offset to be filled in desc
1005 * @size: Size of the buffer
1006 * @td_tag: td tag to be filled
1007 *
1008 * Returns the 64 bit value populated with the input parameters
1009 */
idpf_tx_singleq_build_ctob(u64 td_cmd,u64 td_offset,unsigned int size,u64 td_tag)1010 static inline __le64 idpf_tx_singleq_build_ctob(u64 td_cmd, u64 td_offset,
1011 unsigned int size, u64 td_tag)
1012 {
1013 return cpu_to_le64(IDPF_TX_DESC_DTYPE_DATA |
1014 (td_cmd << IDPF_TXD_QW1_CMD_S) |
1015 (td_offset << IDPF_TXD_QW1_OFFSET_S) |
1016 ((u64)size << IDPF_TXD_QW1_TX_BUF_SZ_S) |
1017 (td_tag << IDPF_TXD_QW1_L2TAG1_S));
1018 }
1019
1020 void idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc *desc,
1021 struct idpf_tx_splitq_params *params,
1022 u16 td_cmd, u16 size);
1023 void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
1024 struct idpf_tx_splitq_params *params,
1025 u16 td_cmd, u16 size);
1026 /**
1027 * idpf_tx_splitq_build_desc - determine which type of data descriptor to build
1028 * @desc: descriptor to populate
1029 * @params: pointer to tx params struct
1030 * @td_cmd: command to be filled in desc
1031 * @size: size of buffer
1032 */
idpf_tx_splitq_build_desc(union idpf_tx_flex_desc * desc,struct idpf_tx_splitq_params * params,u16 td_cmd,u16 size)1033 static inline void idpf_tx_splitq_build_desc(union idpf_tx_flex_desc *desc,
1034 struct idpf_tx_splitq_params *params,
1035 u16 td_cmd, u16 size)
1036 {
1037 if (params->dtype == IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2)
1038 idpf_tx_splitq_build_ctb(desc, params, td_cmd, size);
1039 else
1040 idpf_tx_splitq_build_flow_desc(desc, params, td_cmd, size);
1041 }
1042
1043 /**
1044 * idpf_vport_intr_set_wb_on_itr - enable descriptor writeback on disabled interrupts
1045 * @q_vector: pointer to queue vector struct
1046 */
idpf_vport_intr_set_wb_on_itr(struct idpf_q_vector * q_vector)1047 static inline void idpf_vport_intr_set_wb_on_itr(struct idpf_q_vector *q_vector)
1048 {
1049 struct idpf_intr_reg *reg;
1050
1051 if (q_vector->wb_on_itr)
1052 return;
1053
1054 q_vector->wb_on_itr = true;
1055 reg = &q_vector->intr_reg;
1056
1057 writel(reg->dyn_ctl_wb_on_itr_m | reg->dyn_ctl_intena_msk_m |
1058 (IDPF_NO_ITR_UPDATE_IDX << reg->dyn_ctl_itridx_s),
1059 reg->dyn_ctl);
1060 }
1061
1062 /**
1063 * idpf_tx_splitq_get_free_bufs - get number of free buf_ids in refillq
1064 * @refillq: pointer to refillq containing buf_ids
1065 */
idpf_tx_splitq_get_free_bufs(struct idpf_sw_queue * refillq)1066 static inline u32 idpf_tx_splitq_get_free_bufs(struct idpf_sw_queue *refillq)
1067 {
1068 return (refillq->next_to_use > refillq->next_to_clean ?
1069 0 : refillq->desc_count) +
1070 refillq->next_to_use - refillq->next_to_clean - 1;
1071 }
1072
1073 int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget);
1074 void idpf_vport_init_num_qs(struct idpf_vport *vport,
1075 struct virtchnl2_create_vport *vport_msg);
1076 void idpf_vport_calc_num_q_desc(struct idpf_vport *vport);
1077 int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_index,
1078 struct virtchnl2_create_vport *vport_msg,
1079 struct idpf_vport_max_q *max_q);
1080 void idpf_vport_calc_num_q_groups(struct idpf_vport *vport);
1081 int idpf_vport_queues_alloc(struct idpf_vport *vport);
1082 void idpf_vport_queues_rel(struct idpf_vport *vport);
1083 void idpf_vport_intr_rel(struct idpf_vport *vport);
1084 int idpf_vport_intr_alloc(struct idpf_vport *vport);
1085 void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector);
1086 void idpf_vport_intr_deinit(struct idpf_vport *vport);
1087 int idpf_vport_intr_init(struct idpf_vport *vport);
1088 void idpf_vport_intr_ena(struct idpf_vport *vport);
1089 void idpf_fill_dflt_rss_lut(struct idpf_vport *vport);
1090 int idpf_config_rss(struct idpf_vport *vport);
1091 int idpf_init_rss_lut(struct idpf_vport *vport);
1092 void idpf_deinit_rss_lut(struct idpf_vport *vport);
1093 int idpf_rx_bufs_init_all(struct idpf_vport *vport);
1094
1095 struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
1096 u32 q_num);
1097 struct idpf_q_vector *idpf_find_txq_vec(const struct idpf_vport *vport,
1098 u32 q_num);
1099 int idpf_qp_switch(struct idpf_vport *vport, u32 qid, bool en);
1100
1101 void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
1102 bool xmit_more);
1103 unsigned int idpf_size_to_txd_count(unsigned int size);
1104 netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb);
1105 unsigned int idpf_tx_res_count_required(struct idpf_tx_queue *txq,
1106 struct sk_buff *skb, u32 *buf_count);
1107 void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue);
1108 netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
1109 struct idpf_tx_queue *tx_q);
1110 netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev);
1111 bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq,
1112 u16 cleaned_count);
1113 bool idpf_rx_process_skb_fields(struct sk_buff *skb,
1114 const struct libeth_xdp_buff *xdp,
1115 struct libeth_rq_napi_stats *rs);
1116 int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off);
1117
1118 void idpf_wait_for_sw_marker_completion(const struct idpf_tx_queue *txq);
1119
1120 #endif /* !_IDPF_TXRX_H_ */
1121