1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (C) 2023 Intel Corporation */
3
4 #ifndef _IDPF_TXRX_H_
5 #define _IDPF_TXRX_H_
6
7 #include <linux/dim.h>
8
9 #include <net/libeth/cache.h>
10 #include <net/libeth/types.h>
11 #include <net/netdev_queues.h>
12 #include <net/tcp.h>
13 #include <net/xdp.h>
14
15 #include "idpf_lan_txrx.h"
16 #include "virtchnl2_lan_desc.h"
17
18 #define IDPF_LARGE_MAX_Q 256
19 #define IDPF_MAX_Q 16
20 #define IDPF_MIN_Q 2
21 /* Mailbox Queue */
22 #define IDPF_MAX_MBXQ 1
23
24 #define IDPF_MIN_TXQ_DESC 64
25 #define IDPF_MIN_RXQ_DESC 64
26 #define IDPF_MIN_TXQ_COMPLQ_DESC 256
27 #define IDPF_MAX_QIDS 256
28
29 /* Number of descriptors in a queue should be a multiple of 32. RX queue
30 * descriptors alone should be a multiple of IDPF_REQ_RXQ_DESC_MULTIPLE
31 * to achieve BufQ descriptors aligned to 32
32 */
33 #define IDPF_REQ_DESC_MULTIPLE 32
34 #define IDPF_REQ_RXQ_DESC_MULTIPLE (IDPF_MAX_BUFQS_PER_RXQ_GRP * 32)
35 #define IDPF_MIN_TX_DESC_NEEDED (MAX_SKB_FRAGS + 6)
36 #define IDPF_TX_WAKE_THRESH ((u16)IDPF_MIN_TX_DESC_NEEDED * 2)
37
38 #define IDPF_MAX_DESCS 8160
39 #define IDPF_MAX_TXQ_DESC ALIGN_DOWN(IDPF_MAX_DESCS, IDPF_REQ_DESC_MULTIPLE)
40 #define IDPF_MAX_RXQ_DESC ALIGN_DOWN(IDPF_MAX_DESCS, IDPF_REQ_RXQ_DESC_MULTIPLE)
41 #define MIN_SUPPORT_TXDID (\
42 VIRTCHNL2_TXDID_FLEX_FLOW_SCHED |\
43 VIRTCHNL2_TXDID_FLEX_TSO_CTX)
44
45 #define IDPF_DFLT_SINGLEQ_TX_Q_GROUPS 1
46 #define IDPF_DFLT_SINGLEQ_RX_Q_GROUPS 1
47 #define IDPF_DFLT_SINGLEQ_TXQ_PER_GROUP 4
48 #define IDPF_DFLT_SINGLEQ_RXQ_PER_GROUP 4
49
50 #define IDPF_COMPLQ_PER_GROUP 1
51 #define IDPF_SINGLE_BUFQ_PER_RXQ_GRP 1
52 #define IDPF_MAX_BUFQS_PER_RXQ_GRP 2
53 #define IDPF_BUFQ2_ENA 1
54 #define IDPF_NUMQ_PER_CHUNK 1
55
56 #define IDPF_DFLT_SPLITQ_TXQ_PER_GROUP 1
57 #define IDPF_DFLT_SPLITQ_RXQ_PER_GROUP 1
58
59 /* Default vector sharing */
60 #define IDPF_MBX_Q_VEC 1
61 #define IDPF_MIN_Q_VEC 1
62 #define IDPF_MIN_RDMA_VEC 2
63 /* Data vector for NOIRQ queues */
64 #define IDPF_RESERVED_VECS 1
65
66 #define IDPF_DFLT_TX_Q_DESC_COUNT 512
67 #define IDPF_DFLT_TX_COMPLQ_DESC_COUNT 512
68 #define IDPF_DFLT_RX_Q_DESC_COUNT 512
69
70 /* IMPORTANT: We absolutely _cannot_ have more buffers in the system than a
71 * given RX completion queue has descriptors. This includes _ALL_ buffer
72 * queues. E.g.: If you have two buffer queues of 512 descriptors and buffers,
73 * you have a total of 1024 buffers so your RX queue _must_ have at least that
74 * many descriptors. This macro divides a given number of RX descriptors by
75 * number of buffer queues to calculate how many descriptors each buffer queue
76 * can have without overrunning the RX queue.
77 *
78 * If you give hardware more buffers than completion descriptors what will
79 * happen is that if hardware gets a chance to post more than ring wrap of
80 * descriptors before SW gets an interrupt and overwrites SW head, the gen bit
81 * in the descriptor will be wrong. Any overwritten descriptors' buffers will
82 * be gone forever and SW has no reasonable way to tell that this has happened.
83 * From SW perspective, when we finally get an interrupt, it looks like we're
84 * still waiting for descriptor to be done, stalling forever.
85 */
86 #define IDPF_RX_BUFQ_DESC_COUNT(RXD, NUM_BUFQ) ((RXD) / (NUM_BUFQ))
87
88 #define IDPF_RX_BUFQ_WORKING_SET(rxq) ((rxq)->desc_count - 1)
89
90 #define IDPF_RX_BUMP_NTC(rxq, ntc) \
91 do { \
92 if (unlikely(++(ntc) == (rxq)->desc_count)) { \
93 ntc = 0; \
94 idpf_queue_change(GEN_CHK, rxq); \
95 } \
96 } while (0)
97
98 #define IDPF_SINGLEQ_BUMP_RING_IDX(q, idx) \
99 do { \
100 if (unlikely(++(idx) == (q)->desc_count)) \
101 idx = 0; \
102 } while (0)
103
104 #define IDPF_RX_MAX_BUF_SZ (16384 - 128)
105 #define IDPF_RX_BUF_STRIDE 32
106 #define IDPF_RX_BUF_POST_STRIDE 16
107 #define IDPF_LOW_WATERMARK 64
108
109 #define IDPF_TX_TSO_MIN_MSS 88
110
111 /* Minimum number of descriptors between 2 descriptors with the RE bit set;
112 * only relevant in flow scheduling mode
113 */
114 #define IDPF_TX_SPLITQ_RE_MIN_GAP 64
115
116 #define IDPF_RFL_BI_GEN_M BIT(16)
117 #define IDPF_RFL_BI_BUFID_M GENMASK(15, 0)
118
119 #define IDPF_RXD_EOF_SPLITQ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_M
120 #define IDPF_RXD_EOF_SINGLEQ VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_M
121
122 #define IDPF_DESC_UNUSED(txq) \
123 ((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \
124 (txq)->next_to_clean - (txq)->next_to_use - 1)
125
126 #define IDPF_TX_COMPLQ_OVERFLOW_THRESH(txcq) ((txcq)->desc_count >> 1)
127 /* Determine the absolute number of completions pending, i.e. the number of
128 * completions that are expected to arrive on the TX completion queue.
129 */
130 #define IDPF_TX_COMPLQ_PENDING(txq) \
131 (((txq)->num_completions_pending >= (txq)->complq->num_completions ? \
132 0 : U32_MAX) + \
133 (txq)->num_completions_pending - (txq)->complq->num_completions)
134
135 #define IDPF_TXBUF_NULL U32_MAX
136
137 #define IDPF_TXD_LAST_DESC_CMD (IDPF_TX_DESC_CMD_EOP | IDPF_TX_DESC_CMD_RS)
138
139 #define IDPF_TX_FLAGS_TSO BIT(0)
140 #define IDPF_TX_FLAGS_IPV4 BIT(1)
141 #define IDPF_TX_FLAGS_IPV6 BIT(2)
142 #define IDPF_TX_FLAGS_TUNNEL BIT(3)
143 #define IDPF_TX_FLAGS_TSYN BIT(4)
144
145 struct libeth_rq_napi_stats;
146
147 union idpf_tx_flex_desc {
148 struct idpf_flex_tx_desc q; /* queue based scheduling */
149 struct idpf_flex_tx_sched_desc flow; /* flow based scheduling */
150 };
151
152 #define idpf_tx_buf libeth_sqe
153
154 /**
155 * struct idpf_tx_offload_params - Offload parameters for a given packet
156 * @tx_flags: Feature flags enabled for this packet
157 * @hdr_offsets: Offset parameter for single queue model
158 * @cd_tunneling: Type of tunneling enabled for single queue model
159 * @tso_len: Total length of payload to segment
160 * @mss: Segment size
161 * @tso_segs: Number of segments to be sent
162 * @tso_hdr_len: Length of headers to be duplicated
163 * @td_cmd: Command field to be inserted into descriptor
164 */
165 struct idpf_tx_offload_params {
166 u32 tx_flags;
167
168 u32 hdr_offsets;
169 u32 cd_tunneling;
170
171 u32 tso_len;
172 u16 mss;
173 u16 tso_segs;
174 u16 tso_hdr_len;
175
176 u16 td_cmd;
177 };
178
179 /**
180 * struct idpf_tx_splitq_params
181 * @dtype: General descriptor info
182 * @eop_cmd: Type of EOP
183 * @compl_tag: Associated tag for completion
184 * @td_tag: Descriptor tunneling tag
185 * @offload: Offload parameters
186 * @prev_ntu: stored TxQ next_to_use in case of rollback
187 * @prev_refill_ntc: stored refillq next_to_clean in case of packet rollback
188 * @prev_refill_gen: stored refillq generation bit in case of packet rollback
189 */
190 struct idpf_tx_splitq_params {
191 enum idpf_tx_desc_dtype_value dtype;
192 u16 eop_cmd;
193 union {
194 u16 compl_tag;
195 u16 td_tag;
196 };
197
198 struct idpf_tx_offload_params offload;
199
200 u16 prev_ntu;
201 u16 prev_refill_ntc;
202 bool prev_refill_gen;
203 };
204
205 enum idpf_tx_ctx_desc_eipt_offload {
206 IDPF_TX_CTX_EXT_IP_NONE = 0x0,
207 IDPF_TX_CTX_EXT_IP_IPV6 = 0x1,
208 IDPF_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2,
209 IDPF_TX_CTX_EXT_IP_IPV4 = 0x3
210 };
211
212 #define IDPF_TX_COMPLQ_CLEAN_BUDGET 256
213 #define IDPF_TX_MIN_PKT_LEN 17
214 #define IDPF_TX_DESCS_FOR_SKB_DATA_PTR 1
215 #define IDPF_TX_DESCS_PER_CACHE_LINE (L1_CACHE_BYTES / \
216 sizeof(struct idpf_flex_tx_desc))
217 #define IDPF_TX_DESCS_FOR_CTX 1
218 /* TX descriptors needed, worst case */
219 #define IDPF_TX_DESC_NEEDED (MAX_SKB_FRAGS + IDPF_TX_DESCS_FOR_CTX + \
220 IDPF_TX_DESCS_PER_CACHE_LINE + \
221 IDPF_TX_DESCS_FOR_SKB_DATA_PTR)
222
223 /* The size limit for a transmit buffer in a descriptor is (16K - 1).
224 * In order to align with the read requests we will align the value to
225 * the nearest 4K which represents our maximum read request size.
226 */
227 #define IDPF_TX_MAX_READ_REQ_SIZE SZ_4K
228 #define IDPF_TX_MAX_DESC_DATA (SZ_16K - 1)
229 #define IDPF_TX_MAX_DESC_DATA_ALIGNED \
230 ALIGN_DOWN(IDPF_TX_MAX_DESC_DATA, IDPF_TX_MAX_READ_REQ_SIZE)
231
232 #define idpf_rx_buf libeth_fqe
233
234 #define IDPF_RX_MAX_PTYPE_PROTO_IDS 32
235 #define IDPF_RX_MAX_PTYPE_SZ (sizeof(struct virtchnl2_ptype) + \
236 (sizeof(u16) * IDPF_RX_MAX_PTYPE_PROTO_IDS))
237 #define IDPF_RX_PTYPE_HDR_SZ sizeof(struct virtchnl2_get_ptype_info)
238 #define IDPF_RX_MAX_PTYPES_PER_BUF \
239 DIV_ROUND_DOWN_ULL((IDPF_CTLQ_MAX_BUF_LEN - IDPF_RX_PTYPE_HDR_SZ), \
240 IDPF_RX_MAX_PTYPE_SZ)
241
242 #define IDPF_GET_PTYPE_SIZE(p) struct_size((p), proto_id, (p)->proto_id_count)
243
244 #define IDPF_TUN_IP_GRE (\
245 IDPF_PTYPE_TUNNEL_IP |\
246 IDPF_PTYPE_TUNNEL_IP_GRENAT)
247
248 #define IDPF_TUN_IP_GRE_MAC (\
249 IDPF_TUN_IP_GRE |\
250 IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC)
251
252 #define IDPF_RX_MAX_PTYPE 1024
253 #define IDPF_RX_MAX_BASE_PTYPE 256
254 #define IDPF_INVALID_PTYPE_ID 0xFFFF
255
256 enum idpf_tunnel_state {
257 IDPF_PTYPE_TUNNEL_IP = BIT(0),
258 IDPF_PTYPE_TUNNEL_IP_GRENAT = BIT(1),
259 IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC = BIT(2),
260 };
261
262 struct idpf_ptype_state {
263 bool outer_ip:1;
264 bool outer_frag:1;
265 u8 tunnel_state:6;
266 };
267
268 /**
269 * enum idpf_queue_flags_t
270 * @__IDPF_Q_GEN_CHK: Queues operating in splitq mode use a generation bit to
271 * identify new descriptor writebacks on the ring. HW sets
272 * the gen bit to 1 on the first writeback of any given
273 * descriptor. After the ring wraps, HW sets the gen bit of
274 * those descriptors to 0, and continues flipping
275 * 0->1 or 1->0 on each ring wrap. SW maintains its own
276 * gen bit to know what value will indicate writebacks on
277 * the next pass around the ring. E.g. it is initialized
278 * to 1 and knows that reading a gen bit of 1 in any
279 * descriptor on the initial pass of the ring indicates a
280 * writeback. It also flips on every ring wrap.
281 * @__IDPF_Q_RFL_GEN_CHK: Refill queues are SW only, so Q_GEN acts as the HW
282 * bit and Q_RFL_GEN is the SW bit.
283 * @__IDPF_Q_FLOW_SCH_EN: Enable flow scheduling
284 * @__IDPF_Q_SW_MARKER: Used to indicate TX queue marker completions
285 * @__IDPF_Q_CRC_EN: enable CRC offload in singleq mode
286 * @__IDPF_Q_RSC_EN: enable Receive Side Coalescing on Rx (splitq)
287 * @__IDPF_Q_HSPLIT_EN: enable header split on Rx (splitq)
288 * @__IDPF_Q_PTP: indicates whether the Rx timestamping is enabled for the
289 * queue
290 * @__IDPF_Q_NOIRQ: queue is polling-driven and has no interrupt
291 * @__IDPF_Q_XDP: this is an XDP queue
292 * @__IDPF_Q_XSK: the queue has an XSk pool installed
293 * @__IDPF_Q_FLAGS_NBITS: Must be last
294 */
295 enum idpf_queue_flags_t {
296 __IDPF_Q_GEN_CHK,
297 __IDPF_Q_RFL_GEN_CHK,
298 __IDPF_Q_FLOW_SCH_EN,
299 __IDPF_Q_SW_MARKER,
300 __IDPF_Q_CRC_EN,
301 __IDPF_Q_RSC_EN,
302 __IDPF_Q_HSPLIT_EN,
303 __IDPF_Q_PTP,
304 __IDPF_Q_NOIRQ,
305 __IDPF_Q_XDP,
306 __IDPF_Q_XSK,
307
308 __IDPF_Q_FLAGS_NBITS,
309 };
310
311 #define idpf_queue_set(f, q) __set_bit(__IDPF_Q_##f, (q)->flags)
312 #define idpf_queue_clear(f, q) __clear_bit(__IDPF_Q_##f, (q)->flags)
313 #define idpf_queue_change(f, q) __change_bit(__IDPF_Q_##f, (q)->flags)
314 #define idpf_queue_has(f, q) test_bit(__IDPF_Q_##f, (q)->flags)
315
316 #define idpf_queue_has_clear(f, q) \
317 __test_and_clear_bit(__IDPF_Q_##f, (q)->flags)
318 #define idpf_queue_assign(f, q, v) \
319 __assign_bit(__IDPF_Q_##f, (q)->flags, v)
320
321 /**
322 * struct idpf_vec_regs
323 * @dyn_ctl_reg: Dynamic control interrupt register offset
324 * @itrn_reg: Interrupt Throttling Rate register offset
325 * @itrn_index_spacing: Register spacing between ITR registers of the same
326 * vector
327 */
328 struct idpf_vec_regs {
329 u32 dyn_ctl_reg;
330 u32 itrn_reg;
331 u32 itrn_index_spacing;
332 };
333
334 /**
335 * struct idpf_intr_reg
336 * @dyn_ctl: Dynamic control interrupt register
337 * @dyn_ctl_intena_m: Mask for dyn_ctl interrupt enable
338 * @dyn_ctl_intena_msk_m: Mask for dyn_ctl interrupt enable mask
339 * @dyn_ctl_itridx_s: Register bit offset for ITR index
340 * @dyn_ctl_itridx_m: Mask for ITR index
341 * @dyn_ctl_intrvl_s: Register bit offset for ITR interval
342 * @dyn_ctl_wb_on_itr_m: Mask for WB on ITR feature
343 * @dyn_ctl_sw_itridx_ena_m: Mask for SW ITR index
344 * @dyn_ctl_swint_trig_m: Mask for dyn_ctl SW triggered interrupt enable
345 * @rx_itr: RX ITR register
346 * @tx_itr: TX ITR register
347 * @icr_ena: Interrupt cause register offset
348 * @icr_ena_ctlq_m: Mask for ICR
349 */
350 struct idpf_intr_reg {
351 void __iomem *dyn_ctl;
352 u32 dyn_ctl_intena_m;
353 u32 dyn_ctl_intena_msk_m;
354 u32 dyn_ctl_itridx_s;
355 u32 dyn_ctl_itridx_m;
356 u32 dyn_ctl_intrvl_s;
357 u32 dyn_ctl_wb_on_itr_m;
358 u32 dyn_ctl_sw_itridx_ena_m;
359 u32 dyn_ctl_swint_trig_m;
360 void __iomem *rx_itr;
361 void __iomem *tx_itr;
362 void __iomem *icr_ena;
363 u32 icr_ena_ctlq_m;
364 };
365
366 /**
367 * struct idpf_q_vector
368 * @vport: Vport back pointer
369 * @num_rxq: Number of RX queues
370 * @num_txq: Number of TX queues
371 * @num_bufq: Number of buffer queues
372 * @num_complq: number of completion queues
373 * @num_xsksq: number of XSk send queues
374 * @rx: Array of RX queues to service
375 * @tx: Array of TX queues to service
376 * @bufq: Array of buffer queues to service
377 * @complq: array of completion queues
378 * @xsksq: array of XSk send queues
379 * @intr_reg: See struct idpf_intr_reg
380 * @csd: XSk wakeup CSD
381 * @total_events: Number of interrupts processed
382 * @wb_on_itr: whether WB on ITR is enabled
383 * @napi: napi handler
384 * @tx_dim: Data for TX net_dim algorithm
385 * @tx_itr_value: TX interrupt throttling rate
386 * @tx_intr_mode: Dynamic ITR or not
387 * @tx_itr_idx: TX ITR index
388 * @rx_dim: Data for RX net_dim algorithm
389 * @rx_itr_value: RX interrupt throttling rate
390 * @rx_intr_mode: Dynamic ITR or not
391 * @rx_itr_idx: RX ITR index
392 * @v_idx: Vector index
393 */
394 struct idpf_q_vector {
395 __cacheline_group_begin_aligned(read_mostly);
396 struct idpf_vport *vport;
397
398 u16 num_rxq;
399 u16 num_txq;
400 u16 num_bufq;
401 u16 num_complq;
402 u16 num_xsksq;
403 struct idpf_rx_queue **rx;
404 struct idpf_tx_queue **tx;
405 struct idpf_buf_queue **bufq;
406 struct idpf_compl_queue **complq;
407 struct idpf_tx_queue **xsksq;
408
409 struct idpf_intr_reg intr_reg;
410 __cacheline_group_end_aligned(read_mostly);
411
412 __cacheline_group_begin_aligned(read_write);
413 call_single_data_t csd;
414
415 u16 total_events;
416 bool wb_on_itr;
417
418 struct napi_struct napi;
419
420 struct dim tx_dim;
421 u16 tx_itr_value;
422 bool tx_intr_mode;
423 u32 tx_itr_idx;
424
425 struct dim rx_dim;
426 u16 rx_itr_value;
427 bool rx_intr_mode;
428 u32 rx_itr_idx;
429 __cacheline_group_end_aligned(read_write);
430
431 __cacheline_group_begin_aligned(cold);
432 u16 v_idx;
433
434 __cacheline_group_end_aligned(cold);
435 };
436 libeth_cacheline_set_assert(struct idpf_q_vector, 136,
437 56 + sizeof(struct napi_struct) +
438 2 * sizeof(struct dim),
439 8);
440
441 struct idpf_rx_queue_stats {
442 u64_stats_t packets;
443 u64_stats_t bytes;
444 u64_stats_t rsc_pkts;
445 u64_stats_t hw_csum_err;
446 u64_stats_t hsplit_pkts;
447 u64_stats_t hsplit_buf_ovf;
448 u64_stats_t bad_descs;
449 };
450
451 struct idpf_tx_queue_stats {
452 u64_stats_t packets;
453 u64_stats_t bytes;
454 u64_stats_t lso_pkts;
455 u64_stats_t linearize;
456 u64_stats_t q_busy;
457 u64_stats_t skb_drops;
458 u64_stats_t dma_map_errs;
459 u64_stats_t tstamp_skipped;
460 };
461
462 #define IDPF_ITR_DYNAMIC 1
463 #define IDPF_ITR_MAX 0x1FE0
464 #define IDPF_ITR_20K 0x0032
465 #define IDPF_ITR_GRAN_S 1 /* Assume ITR granularity is 2us */
466 #define IDPF_ITR_MASK 0x1FFE /* ITR register value alignment mask */
467 #define ITR_REG_ALIGN(setting) ((setting) & IDPF_ITR_MASK)
468 #define IDPF_ITR_IS_DYNAMIC(itr_mode) (itr_mode)
469 #define IDPF_ITR_TX_DEF IDPF_ITR_20K
470 #define IDPF_ITR_RX_DEF IDPF_ITR_20K
471 /* Index used for 'SW ITR' update in DYN_CTL register */
472 #define IDPF_SW_ITR_UPDATE_IDX 2
473 /* Index used for 'No ITR' update in DYN_CTL register */
474 #define IDPF_NO_ITR_UPDATE_IDX 3
475 #define IDPF_ITR_IDX_SPACING(spacing, dflt) (spacing ? spacing : dflt)
476 #define IDPF_DIM_DEFAULT_PROFILE_IX 1
477
478 /**
479 * struct idpf_rx_queue - software structure representing a receive queue
480 * @rx: universal receive descriptor array
481 * @single_buf: buffer descriptor array in singleq
482 * @desc_ring: virtual descriptor ring address
483 * @bufq_sets: Pointer to the array of buffer queues in splitq mode
484 * @napi: NAPI instance corresponding to this queue (splitq)
485 * @xdp_prog: attached XDP program
486 * @rx_buf: See struct &libeth_fqe
487 * @pp: Page pool pointer in singleq mode
488 * @tail: Tail offset. Used for both queue models single and split.
489 * @flags: See enum idpf_queue_flags_t
490 * @idx: For RX queue, it is used to index to total RX queue across groups and
491 * used for skb reporting.
492 * @desc_count: Number of descriptors
493 * @num_xdp_txq: total number of XDP Tx queues
494 * @xdpsqs: shortcut for XDP Tx queues array
495 * @rxdids: Supported RX descriptor ids
496 * @truesize: data buffer truesize in singleq
497 * @rx_ptype_lkup: LUT of Rx ptypes
498 * @xdp_rxq: XDP queue info
499 * @next_to_use: Next descriptor to use
500 * @next_to_clean: Next descriptor to clean
501 * @next_to_alloc: RX buffer to allocate at
502 * @xdp: XDP buffer with the current frame
503 * @xsk: current XDP buffer in XSk mode
504 * @pool: XSk pool if installed
505 * @cached_phc_time: Cached PHC time for the Rx queue
506 * @stats_sync: See struct u64_stats_sync
507 * @q_stats: See union idpf_rx_queue_stats
508 * @q_id: Queue id
509 * @size: Length of descriptor ring in bytes
510 * @dma: Physical address of ring
511 * @q_vector: Backreference to associated vector
512 * @rx_buffer_low_watermark: RX buffer low watermark
513 * @rx_hbuf_size: Header buffer size
514 * @rx_buf_size: Buffer size
515 * @rx_max_pkt_size: RX max packet size
516 */
517 struct idpf_rx_queue {
518 __cacheline_group_begin_aligned(read_mostly);
519 union {
520 union virtchnl2_rx_desc *rx;
521 struct virtchnl2_singleq_rx_buf_desc *single_buf;
522
523 void *desc_ring;
524 };
525 union {
526 struct {
527 struct idpf_bufq_set *bufq_sets;
528 struct napi_struct *napi;
529 struct bpf_prog __rcu *xdp_prog;
530 };
531 struct {
532 struct libeth_fqe *rx_buf;
533 struct page_pool *pp;
534 void __iomem *tail;
535 };
536 };
537
538 DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
539 u16 idx;
540 u16 desc_count;
541
542 u32 num_xdp_txq;
543 union {
544 struct idpf_tx_queue **xdpsqs;
545 struct {
546 u32 rxdids;
547 u32 truesize;
548 };
549 };
550 const struct libeth_rx_pt *rx_ptype_lkup;
551
552 struct xdp_rxq_info xdp_rxq;
553 __cacheline_group_end_aligned(read_mostly);
554
555 __cacheline_group_begin_aligned(read_write);
556 u32 next_to_use;
557 u32 next_to_clean;
558 u32 next_to_alloc;
559
560 union {
561 struct libeth_xdp_buff_stash xdp;
562 struct {
563 struct libeth_xdp_buff *xsk;
564 struct xsk_buff_pool *pool;
565 };
566 };
567 u64 cached_phc_time;
568
569 struct u64_stats_sync stats_sync;
570 struct idpf_rx_queue_stats q_stats;
571 __cacheline_group_end_aligned(read_write);
572
573 __cacheline_group_begin_aligned(cold);
574 u32 q_id;
575 u32 size;
576 dma_addr_t dma;
577
578 struct idpf_q_vector *q_vector;
579
580 u16 rx_buffer_low_watermark;
581 u16 rx_hbuf_size;
582 u16 rx_buf_size;
583 u16 rx_max_pkt_size;
584 __cacheline_group_end_aligned(cold);
585 };
586 libeth_cacheline_set_assert(struct idpf_rx_queue,
587 ALIGN(64, __alignof(struct xdp_rxq_info)) +
588 sizeof(struct xdp_rxq_info),
589 96 + offsetof(struct idpf_rx_queue, q_stats) -
590 offsetofend(struct idpf_rx_queue, cached_phc_time),
591 32);
592
593 /**
594 * struct idpf_tx_queue - software structure representing a transmit queue
595 * @base_tx: base Tx descriptor array
596 * @base_ctx: base Tx context descriptor array
597 * @flex_tx: flex Tx descriptor array
598 * @flex_ctx: flex Tx context descriptor array
599 * @desc_ring: virtual descriptor ring address
600 * @tx_buf: See struct idpf_tx_buf
601 * @txq_grp: See struct idpf_txq_group
602 * @complq: corresponding completion queue in XDP mode
603 * @dev: Device back pointer for DMA mapping
604 * @pool: corresponding XSk pool if installed
605 * @tail: Tail offset. Used for both queue models single and split
606 * @flags: See enum idpf_queue_flags_t
607 * @idx: For TX queue, it is used as index to map between TX queue group and
608 * hot path TX pointers stored in vport. Used in both singleq/splitq.
609 * @desc_count: Number of descriptors
610 * @tx_min_pkt_len: Min supported packet length
611 * @thresh: XDP queue cleaning threshold
612 * @netdev: &net_device corresponding to this queue
613 * @next_to_use: Next descriptor to use
614 * @next_to_clean: Next descriptor to clean
615 * @last_re: last descriptor index that RE bit was set
616 * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
617 * @cleaned_bytes: Splitq only, TXQ only: When a TX completion is received on
618 * the TX completion queue, it can be for any TXQ associated
619 * with that completion queue. This means we can clean up to
620 * N TXQs during a single call to clean the completion queue.
621 * cleaned_bytes|pkts tracks the clean stats per TXQ during
622 * that single call to clean the completion queue. By doing so,
623 * we can update BQL with aggregate cleaned stats for each TXQ
624 * only once at the end of the cleaning routine.
625 * @clean_budget: singleq only, queue cleaning budget
626 * @cleaned_pkts: Number of packets cleaned for the above said case
627 * @refillq: Pointer to refill queue
628 * @pending: number of pending descriptors to send in QB
629 * @xdp_tx: number of pending &xdp_buff or &xdp_frame buffers
630 * @timer: timer for XDP Tx queue cleanup
631 * @xdp_lock: lock for XDP Tx queues sharing
632 * @cached_tstamp_caps: Tx timestamp capabilities negotiated with the CP
633 * @tstamp_task: Work that handles Tx timestamp read
634 * @stats_sync: See struct u64_stats_sync
635 * @q_stats: See union idpf_tx_queue_stats
636 * @q_id: Queue id
637 * @size: Length of descriptor ring in bytes
638 * @dma: Physical address of ring
639 * @q_vector: Backreference to associated vector
640 * @buf_pool_size: Total number of idpf_tx_buf
641 * @rel_q_id: relative virtchnl queue index
642 */
643 struct idpf_tx_queue {
644 __cacheline_group_begin_aligned(read_mostly);
645 union {
646 struct idpf_base_tx_desc *base_tx;
647 struct idpf_base_tx_ctx_desc *base_ctx;
648 union idpf_tx_flex_desc *flex_tx;
649 union idpf_flex_tx_ctx_desc *flex_ctx;
650
651 void *desc_ring;
652 };
653 struct libeth_sqe *tx_buf;
654 union {
655 struct idpf_txq_group *txq_grp;
656 struct idpf_compl_queue *complq;
657 };
658 union {
659 struct device *dev;
660 struct xsk_buff_pool *pool;
661 };
662 void __iomem *tail;
663
664 DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
665 u16 idx;
666 u16 desc_count;
667
668 union {
669 u16 tx_min_pkt_len;
670 u32 thresh;
671 };
672
673 struct net_device *netdev;
674 __cacheline_group_end_aligned(read_mostly);
675
676 __cacheline_group_begin_aligned(read_write);
677 u32 next_to_use;
678 u32 next_to_clean;
679
680 union {
681 struct {
682 u16 last_re;
683 u16 tx_max_bufs;
684
685 union {
686 u32 cleaned_bytes;
687 u32 clean_budget;
688 };
689 u16 cleaned_pkts;
690
691 struct idpf_sw_queue *refillq;
692 };
693 struct {
694 u32 pending;
695 u32 xdp_tx;
696
697 struct libeth_xdpsq_timer *timer;
698 struct libeth_xdpsq_lock xdp_lock;
699 };
700 };
701
702 struct idpf_ptp_vport_tx_tstamp_caps *cached_tstamp_caps;
703 struct work_struct *tstamp_task;
704
705 struct u64_stats_sync stats_sync;
706 struct idpf_tx_queue_stats q_stats;
707 __cacheline_group_end_aligned(read_write);
708
709 __cacheline_group_begin_aligned(cold);
710 u32 q_id;
711 u32 size;
712 dma_addr_t dma;
713
714 struct idpf_q_vector *q_vector;
715
716 u32 buf_pool_size;
717 u32 rel_q_id;
718 __cacheline_group_end_aligned(cold);
719 };
720 libeth_cacheline_set_assert(struct idpf_tx_queue, 64,
721 104 +
722 offsetof(struct idpf_tx_queue, cached_tstamp_caps) -
723 offsetofend(struct idpf_tx_queue, timer) +
724 offsetof(struct idpf_tx_queue, q_stats) -
725 offsetofend(struct idpf_tx_queue, tstamp_task),
726 32);
727
728 /**
729 * struct idpf_buf_queue - software structure representing a buffer queue
730 * @split_buf: buffer descriptor array
731 * @buf: &libeth_fqe for data buffers
732 * @pp: &page_pool for data buffers
733 * @xsk_buf: &xdp_buff for XSk Rx buffers
734 * @pool: &xsk_buff_pool on XSk queues
735 * @hdr_buf: &libeth_fqe for header buffers
736 * @hdr_pp: &page_pool for header buffers
737 * @tail: Tail offset
738 * @flags: See enum idpf_queue_flags_t
739 * @desc_count: Number of descriptors
740 * @thresh: refill threshold in XSk
741 * @next_to_use: Next descriptor to use
742 * @next_to_clean: Next descriptor to clean
743 * @next_to_alloc: RX buffer to allocate at
744 * @pending: number of buffers to refill (Xsk)
745 * @hdr_truesize: truesize for buffer headers
746 * @truesize: truesize for data buffers
747 * @q_id: Queue id
748 * @size: Length of descriptor ring in bytes
749 * @dma: Physical address of ring
750 * @q_vector: Backreference to associated vector
751 * @rx_buffer_low_watermark: RX buffer low watermark
752 * @rx_hbuf_size: Header buffer size
753 * @rx_buf_size: Buffer size
754 */
755 struct idpf_buf_queue {
756 __cacheline_group_begin_aligned(read_mostly);
757 struct virtchnl2_splitq_rx_buf_desc *split_buf;
758 union {
759 struct {
760 struct libeth_fqe *buf;
761 struct page_pool *pp;
762 };
763 struct {
764 struct libeth_xdp_buff **xsk_buf;
765 struct xsk_buff_pool *pool;
766 };
767 };
768 struct libeth_fqe *hdr_buf;
769 struct page_pool *hdr_pp;
770 void __iomem *tail;
771
772 DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
773 u32 desc_count;
774
775 u32 thresh;
776 __cacheline_group_end_aligned(read_mostly);
777
778 __cacheline_group_begin_aligned(read_write);
779 u32 next_to_use;
780 u32 next_to_clean;
781 u32 next_to_alloc;
782
783 u32 pending;
784 u32 hdr_truesize;
785 u32 truesize;
786 __cacheline_group_end_aligned(read_write);
787
788 __cacheline_group_begin_aligned(cold);
789 u32 q_id;
790 u32 size;
791 dma_addr_t dma;
792
793 struct idpf_q_vector *q_vector;
794
795 u16 rx_buffer_low_watermark;
796 u16 rx_hbuf_size;
797 u16 rx_buf_size;
798 __cacheline_group_end_aligned(cold);
799 };
800 libeth_cacheline_set_assert(struct idpf_buf_queue, 64, 24, 32);
801
802 /**
803 * struct idpf_compl_queue - software structure representing a completion queue
804 * @comp: 8-byte completion descriptor array
805 * @comp_4b: 4-byte completion descriptor array
806 * @desc_ring: virtual descriptor ring address
807 * @txq_grp: See struct idpf_txq_group
808 * @flags: See enum idpf_queue_flags_t
809 * @desc_count: Number of descriptors
810 * @clean_budget: queue cleaning budget
811 * @netdev: &net_device corresponding to this queue
812 * @next_to_use: Next descriptor to use. Relevant in both split & single txq
813 * and bufq.
814 * @next_to_clean: Next descriptor to clean
815 * @num_completions: Only relevant for TX completion queue. It tracks the
816 * number of completions received to compare against the
817 * number of completions pending, as accumulated by the
818 * TX queues.
819 * @q_id: Queue id
820 * @size: Length of descriptor ring in bytes
821 * @dma: Physical address of ring
822 * @q_vector: Backreference to associated vector
823 */
824 struct idpf_compl_queue {
825 __cacheline_group_begin_aligned(read_mostly);
826 union {
827 struct idpf_splitq_tx_compl_desc *comp;
828 struct idpf_splitq_4b_tx_compl_desc *comp_4b;
829
830 void *desc_ring;
831 };
832 struct idpf_txq_group *txq_grp;
833
834 DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
835 u32 desc_count;
836
837 u32 clean_budget;
838 struct net_device *netdev;
839 __cacheline_group_end_aligned(read_mostly);
840
841 __cacheline_group_begin_aligned(read_write);
842 u32 next_to_use;
843 u32 next_to_clean;
844
845 aligned_u64 num_completions;
846 __cacheline_group_end_aligned(read_write);
847
848 __cacheline_group_begin_aligned(cold);
849 u32 q_id;
850 u32 size;
851 dma_addr_t dma;
852
853 struct idpf_q_vector *q_vector;
854 __cacheline_group_end_aligned(cold);
855 };
856 libeth_cacheline_set_assert(struct idpf_compl_queue, 40, 16, 24);
857
858 /**
859 * struct idpf_sw_queue
860 * @ring: Pointer to the ring
861 * @flags: See enum idpf_queue_flags_t
862 * @desc_count: Descriptor count
863 * @next_to_use: Buffer to allocate at
864 * @next_to_clean: Next descriptor to clean
865 *
866 * Software queues are used in splitq mode to manage buffers between rxq
867 * producer and the bufq consumer. These are required in order to maintain a
868 * lockless buffer management system and are strictly software only constructs.
869 */
870 struct idpf_sw_queue {
871 __cacheline_group_begin_aligned(read_mostly);
872 u32 *ring;
873
874 DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
875 u32 desc_count;
876 __cacheline_group_end_aligned(read_mostly);
877
878 __cacheline_group_begin_aligned(read_write);
879 u32 next_to_use;
880 u32 next_to_clean;
881 __cacheline_group_end_aligned(read_write);
882 };
883 libeth_cacheline_group_assert(struct idpf_sw_queue, read_mostly, 24);
884 libeth_cacheline_group_assert(struct idpf_sw_queue, read_write, 8);
885 libeth_cacheline_struct_assert(struct idpf_sw_queue, 24, 8);
886
887 /**
888 * struct idpf_rxq_set
889 * @rxq: RX queue
890 * @refillq: pointers to refill queues
891 *
892 * Splitq only. idpf_rxq_set associates an rxq with at an array of refillqs.
893 * Each rxq needs a refillq to return used buffers back to the respective bufq.
894 * Bufqs then clean these refillqs for buffers to give to hardware.
895 */
896 struct idpf_rxq_set {
897 struct idpf_rx_queue rxq;
898 struct idpf_sw_queue *refillq[IDPF_MAX_BUFQS_PER_RXQ_GRP];
899 };
900
901 /**
902 * struct idpf_bufq_set
903 * @bufq: Buffer queue
904 * @num_refillqs: Number of refill queues. This is always equal to num_rxq_sets
905 * in idpf_rxq_group.
906 * @refillqs: Pointer to refill queues array.
907 *
908 * Splitq only. idpf_bufq_set associates a bufq to an array of refillqs.
909 * In this bufq_set, there will be one refillq for each rxq in this rxq_group.
910 * Used buffers received by rxqs will be put on refillqs which bufqs will
911 * clean to return new buffers back to hardware.
912 *
913 * Buffers needed by some number of rxqs associated in this rxq_group are
914 * managed by at most two bufqs (depending on performance configuration).
915 */
916 struct idpf_bufq_set {
917 struct idpf_buf_queue bufq;
918 int num_refillqs;
919 struct idpf_sw_queue *refillqs;
920 };
921
922 /**
923 * struct idpf_rxq_group
924 * @vport: Vport back pointer
925 * @singleq: Struct with single queue related members
926 * @singleq.num_rxq: Number of RX queues associated
927 * @singleq.rxqs: Array of RX queue pointers
928 * @splitq: Struct with split queue related members
929 * @splitq.num_rxq_sets: Number of RX queue sets
930 * @splitq.num_rxq_sets: Number of Buffer queue sets
931 * @splitq.rxq_sets: Array of RX queue sets
932 * @splitq.bufq_sets: Buffer queue set pointer
933 *
934 * In singleq mode, an rxq_group is simply an array of rxqs. In splitq, a
935 * rxq_group contains all the rxqs, bufqs and refillqs needed to
936 * manage buffers in splitq mode.
937 */
938 struct idpf_rxq_group {
939 struct idpf_vport *vport;
940
941 union {
942 struct {
943 u16 num_rxq;
944 struct idpf_rx_queue *rxqs[IDPF_LARGE_MAX_Q];
945 } singleq;
946 struct {
947 u16 num_rxq_sets;
948 u16 num_bufq_sets;
949 struct idpf_rxq_set *rxq_sets[IDPF_LARGE_MAX_Q];
950 struct idpf_bufq_set *bufq_sets;
951 } splitq;
952 };
953 };
954
955 /**
956 * struct idpf_txq_group
957 * @vport: Vport back pointer
958 * @num_txq: Number of TX queues associated
959 * @txqs: Array of TX queue pointers
960 * @complq: Associated completion queue pointer, split queue only
961 * @num_completions_pending: Total number of completions pending for the
962 * completion queue, acculumated for all TX queues
963 * associated with that completion queue.
964 *
965 * Between singleq and splitq, a txq_group is largely the same except for the
966 * complq. In splitq a single complq is responsible for handling completions
967 * for some number of txqs associated in this txq_group.
968 */
969 struct idpf_txq_group {
970 struct idpf_vport *vport;
971
972 u16 num_txq;
973 struct idpf_tx_queue *txqs[IDPF_LARGE_MAX_Q];
974
975 struct idpf_compl_queue *complq;
976
977 aligned_u64 num_completions_pending;
978 };
979
idpf_q_vector_to_mem(const struct idpf_q_vector * q_vector)980 static inline int idpf_q_vector_to_mem(const struct idpf_q_vector *q_vector)
981 {
982 u32 cpu;
983
984 if (!q_vector)
985 return NUMA_NO_NODE;
986
987 cpu = cpumask_first(&q_vector->napi.config->affinity_mask);
988
989 return cpu < nr_cpu_ids ? cpu_to_mem(cpu) : NUMA_NO_NODE;
990 }
991
992 /**
993 * idpf_size_to_txd_count - Get number of descriptors needed for large Tx frag
994 * @size: transmit request size in bytes
995 *
996 * In the case where a large frag (>= 16K) needs to be split across multiple
997 * descriptors, we need to assume that we can have no more than 12K of data
998 * per descriptor due to hardware alignment restrictions (4K alignment).
999 */
idpf_size_to_txd_count(unsigned int size)1000 static inline u32 idpf_size_to_txd_count(unsigned int size)
1001 {
1002 return DIV_ROUND_UP(size, IDPF_TX_MAX_DESC_DATA_ALIGNED);
1003 }
1004
1005 /**
1006 * idpf_tx_singleq_build_ctob - populate command tag offset and size
1007 * @td_cmd: Command to be filled in desc
1008 * @td_offset: Offset to be filled in desc
1009 * @size: Size of the buffer
1010 * @td_tag: td tag to be filled
1011 *
1012 * Returns the 64 bit value populated with the input parameters
1013 */
idpf_tx_singleq_build_ctob(u64 td_cmd,u64 td_offset,unsigned int size,u64 td_tag)1014 static inline __le64 idpf_tx_singleq_build_ctob(u64 td_cmd, u64 td_offset,
1015 unsigned int size, u64 td_tag)
1016 {
1017 return cpu_to_le64(IDPF_TX_DESC_DTYPE_DATA |
1018 (td_cmd << IDPF_TXD_QW1_CMD_S) |
1019 (td_offset << IDPF_TXD_QW1_OFFSET_S) |
1020 ((u64)size << IDPF_TXD_QW1_TX_BUF_SZ_S) |
1021 (td_tag << IDPF_TXD_QW1_L2TAG1_S));
1022 }
1023
1024 void idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc *desc,
1025 struct idpf_tx_splitq_params *params,
1026 u16 td_cmd, u16 size);
1027 void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
1028 struct idpf_tx_splitq_params *params,
1029 u16 td_cmd, u16 size);
1030 /**
1031 * idpf_tx_splitq_build_desc - determine which type of data descriptor to build
1032 * @desc: descriptor to populate
1033 * @params: pointer to tx params struct
1034 * @td_cmd: command to be filled in desc
1035 * @size: size of buffer
1036 */
idpf_tx_splitq_build_desc(union idpf_tx_flex_desc * desc,struct idpf_tx_splitq_params * params,u16 td_cmd,u16 size)1037 static inline void idpf_tx_splitq_build_desc(union idpf_tx_flex_desc *desc,
1038 struct idpf_tx_splitq_params *params,
1039 u16 td_cmd, u16 size)
1040 {
1041 if (params->dtype == IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2)
1042 idpf_tx_splitq_build_ctb(desc, params, td_cmd, size);
1043 else
1044 idpf_tx_splitq_build_flow_desc(desc, params, td_cmd, size);
1045 }
1046
1047 /**
1048 * idpf_vport_intr_set_wb_on_itr - enable descriptor writeback on disabled interrupts
1049 * @q_vector: pointer to queue vector struct
1050 */
idpf_vport_intr_set_wb_on_itr(struct idpf_q_vector * q_vector)1051 static inline void idpf_vport_intr_set_wb_on_itr(struct idpf_q_vector *q_vector)
1052 {
1053 struct idpf_intr_reg *reg;
1054
1055 if (q_vector->wb_on_itr)
1056 return;
1057
1058 q_vector->wb_on_itr = true;
1059 reg = &q_vector->intr_reg;
1060
1061 writel(reg->dyn_ctl_wb_on_itr_m | reg->dyn_ctl_intena_msk_m |
1062 (IDPF_NO_ITR_UPDATE_IDX << reg->dyn_ctl_itridx_s),
1063 reg->dyn_ctl);
1064 }
1065
1066 /**
1067 * idpf_tx_splitq_get_free_bufs - get number of free buf_ids in refillq
1068 * @refillq: pointer to refillq containing buf_ids
1069 */
idpf_tx_splitq_get_free_bufs(struct idpf_sw_queue * refillq)1070 static inline u32 idpf_tx_splitq_get_free_bufs(struct idpf_sw_queue *refillq)
1071 {
1072 return (refillq->next_to_use > refillq->next_to_clean ?
1073 0 : refillq->desc_count) +
1074 refillq->next_to_use - refillq->next_to_clean - 1;
1075 }
1076
1077 int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget);
1078 void idpf_vport_init_num_qs(struct idpf_vport *vport,
1079 struct virtchnl2_create_vport *vport_msg,
1080 struct idpf_q_vec_rsrc *rsrc);
1081 void idpf_vport_calc_num_q_desc(struct idpf_vport *vport,
1082 struct idpf_q_vec_rsrc *rsrc);
1083 int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_index,
1084 struct virtchnl2_create_vport *vport_msg,
1085 struct idpf_vport_max_q *max_q);
1086 void idpf_vport_calc_num_q_groups(struct idpf_q_vec_rsrc *rsrc);
1087 int idpf_vport_queues_alloc(struct idpf_vport *vport,
1088 struct idpf_q_vec_rsrc *rsrc);
1089 void idpf_vport_queues_rel(struct idpf_vport *vport,
1090 struct idpf_q_vec_rsrc *rsrc);
1091 void idpf_vport_intr_rel(struct idpf_q_vec_rsrc *rsrc);
1092 int idpf_vport_intr_alloc(struct idpf_vport *vport,
1093 struct idpf_q_vec_rsrc *rsrc);
1094 void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector);
1095 void idpf_vport_intr_deinit(struct idpf_vport *vport,
1096 struct idpf_q_vec_rsrc *rsrc);
1097 int idpf_vport_intr_init(struct idpf_vport *vport,
1098 struct idpf_q_vec_rsrc *rsrc);
1099 void idpf_vport_intr_ena(struct idpf_vport *vport,
1100 struct idpf_q_vec_rsrc *rsrc);
1101 void idpf_fill_dflt_rss_lut(struct idpf_vport *vport,
1102 struct idpf_rss_data *rss_data);
1103 int idpf_config_rss(struct idpf_vport *vport, struct idpf_rss_data *rss_data);
1104 int idpf_init_rss_lut(struct idpf_vport *vport, struct idpf_rss_data *rss_data);
1105 void idpf_deinit_rss_lut(struct idpf_rss_data *rss_data);
1106 int idpf_rx_bufs_init_all(struct idpf_vport *vport,
1107 struct idpf_q_vec_rsrc *rsrc);
1108
1109 struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
1110 u32 q_num);
1111 struct idpf_q_vector *idpf_find_txq_vec(const struct idpf_vport *vport,
1112 u32 q_num);
1113 int idpf_qp_switch(struct idpf_vport *vport, u32 qid, bool en);
1114
1115 void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
1116 bool xmit_more);
1117 unsigned int idpf_size_to_txd_count(unsigned int size);
1118 netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb);
1119 unsigned int idpf_tx_res_count_required(struct idpf_tx_queue *txq,
1120 struct sk_buff *skb, u32 *buf_count);
1121 void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue);
1122 netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
1123 struct idpf_tx_queue *tx_q);
1124 netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev);
1125 bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq,
1126 u16 cleaned_count);
1127 bool idpf_rx_process_skb_fields(struct sk_buff *skb,
1128 const struct libeth_xdp_buff *xdp,
1129 struct libeth_rq_napi_stats *rs);
1130 int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off);
1131
1132 void idpf_wait_for_sw_marker_completion(const struct idpf_tx_queue *txq);
1133
1134 #endif /* !_IDPF_TXRX_H_ */
1135