1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (C) 2023 Intel Corporation */
3
4 #ifndef _IDPF_TXRX_H_
5 #define _IDPF_TXRX_H_
6
7 #include <linux/dim.h>
8
9 #include <net/libeth/cache.h>
10 #include <net/tcp.h>
11 #include <net/netdev_queues.h>
12
13 #include "idpf_lan_txrx.h"
14 #include "virtchnl2_lan_desc.h"
15
16 #define IDPF_LARGE_MAX_Q 256
17 #define IDPF_MAX_Q 16
18 #define IDPF_MIN_Q 2
19 /* Mailbox Queue */
20 #define IDPF_MAX_MBXQ 1
21
22 #define IDPF_MIN_TXQ_DESC 64
23 #define IDPF_MIN_RXQ_DESC 64
24 #define IDPF_MIN_TXQ_COMPLQ_DESC 256
25 #define IDPF_MAX_QIDS 256
26
27 /* Number of descriptors in a queue should be a multiple of 32. RX queue
28 * descriptors alone should be a multiple of IDPF_REQ_RXQ_DESC_MULTIPLE
29 * to achieve BufQ descriptors aligned to 32
30 */
31 #define IDPF_REQ_DESC_MULTIPLE 32
32 #define IDPF_REQ_RXQ_DESC_MULTIPLE (IDPF_MAX_BUFQS_PER_RXQ_GRP * 32)
33 #define IDPF_MIN_TX_DESC_NEEDED (MAX_SKB_FRAGS + 6)
34 #define IDPF_TX_WAKE_THRESH ((u16)IDPF_MIN_TX_DESC_NEEDED * 2)
35
36 #define IDPF_MAX_DESCS 8160
37 #define IDPF_MAX_TXQ_DESC ALIGN_DOWN(IDPF_MAX_DESCS, IDPF_REQ_DESC_MULTIPLE)
38 #define IDPF_MAX_RXQ_DESC ALIGN_DOWN(IDPF_MAX_DESCS, IDPF_REQ_RXQ_DESC_MULTIPLE)
39 #define MIN_SUPPORT_TXDID (\
40 VIRTCHNL2_TXDID_FLEX_FLOW_SCHED |\
41 VIRTCHNL2_TXDID_FLEX_TSO_CTX)
42
43 #define IDPF_DFLT_SINGLEQ_TX_Q_GROUPS 1
44 #define IDPF_DFLT_SINGLEQ_RX_Q_GROUPS 1
45 #define IDPF_DFLT_SINGLEQ_TXQ_PER_GROUP 4
46 #define IDPF_DFLT_SINGLEQ_RXQ_PER_GROUP 4
47
48 #define IDPF_COMPLQ_PER_GROUP 1
49 #define IDPF_SINGLE_BUFQ_PER_RXQ_GRP 1
50 #define IDPF_MAX_BUFQS_PER_RXQ_GRP 2
51 #define IDPF_BUFQ2_ENA 1
52 #define IDPF_NUMQ_PER_CHUNK 1
53
54 #define IDPF_DFLT_SPLITQ_TXQ_PER_GROUP 1
55 #define IDPF_DFLT_SPLITQ_RXQ_PER_GROUP 1
56
57 /* Default vector sharing */
58 #define IDPF_MBX_Q_VEC 1
59 #define IDPF_MIN_Q_VEC 1
60
61 #define IDPF_DFLT_TX_Q_DESC_COUNT 512
62 #define IDPF_DFLT_TX_COMPLQ_DESC_COUNT 512
63 #define IDPF_DFLT_RX_Q_DESC_COUNT 512
64
65 /* IMPORTANT: We absolutely _cannot_ have more buffers in the system than a
66 * given RX completion queue has descriptors. This includes _ALL_ buffer
67 * queues. E.g.: If you have two buffer queues of 512 descriptors and buffers,
68 * you have a total of 1024 buffers so your RX queue _must_ have at least that
69 * many descriptors. This macro divides a given number of RX descriptors by
70 * number of buffer queues to calculate how many descriptors each buffer queue
71 * can have without overrunning the RX queue.
72 *
73 * If you give hardware more buffers than completion descriptors what will
74 * happen is that if hardware gets a chance to post more than ring wrap of
75 * descriptors before SW gets an interrupt and overwrites SW head, the gen bit
76 * in the descriptor will be wrong. Any overwritten descriptors' buffers will
77 * be gone forever and SW has no reasonable way to tell that this has happened.
78 * From SW perspective, when we finally get an interrupt, it looks like we're
79 * still waiting for descriptor to be done, stalling forever.
80 */
81 #define IDPF_RX_BUFQ_DESC_COUNT(RXD, NUM_BUFQ) ((RXD) / (NUM_BUFQ))
82
83 #define IDPF_RX_BUFQ_WORKING_SET(rxq) ((rxq)->desc_count - 1)
84
85 #define IDPF_RX_BUMP_NTC(rxq, ntc) \
86 do { \
87 if (unlikely(++(ntc) == (rxq)->desc_count)) { \
88 ntc = 0; \
89 idpf_queue_change(GEN_CHK, rxq); \
90 } \
91 } while (0)
92
93 #define IDPF_SINGLEQ_BUMP_RING_IDX(q, idx) \
94 do { \
95 if (unlikely(++(idx) == (q)->desc_count)) \
96 idx = 0; \
97 } while (0)
98
99 #define IDPF_RX_BUF_STRIDE 32
100 #define IDPF_RX_BUF_POST_STRIDE 16
101 #define IDPF_LOW_WATERMARK 64
102
103 #define IDPF_TX_TSO_MIN_MSS 88
104
105 /* Minimum number of descriptors between 2 descriptors with the RE bit set;
106 * only relevant in flow scheduling mode
107 */
108 #define IDPF_TX_SPLITQ_RE_MIN_GAP 64
109
110 #define IDPF_RX_BI_GEN_M BIT(16)
111 #define IDPF_RX_BI_BUFID_M GENMASK(15, 0)
112
113 #define IDPF_RXD_EOF_SPLITQ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_M
114 #define IDPF_RXD_EOF_SINGLEQ VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_M
115
116 #define IDPF_DESC_UNUSED(txq) \
117 ((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \
118 (txq)->next_to_clean - (txq)->next_to_use - 1)
119
120 #define IDPF_TX_BUF_RSV_UNUSED(txq) ((txq)->stash->buf_stack.top)
121 #define IDPF_TX_BUF_RSV_LOW(txq) (IDPF_TX_BUF_RSV_UNUSED(txq) < \
122 (txq)->desc_count >> 2)
123
124 #define IDPF_TX_COMPLQ_OVERFLOW_THRESH(txcq) ((txcq)->desc_count >> 1)
125 /* Determine the absolute number of completions pending, i.e. the number of
126 * completions that are expected to arrive on the TX completion queue.
127 */
128 #define IDPF_TX_COMPLQ_PENDING(txq) \
129 (((txq)->num_completions_pending >= (txq)->complq->num_completions ? \
130 0 : U32_MAX) + \
131 (txq)->num_completions_pending - (txq)->complq->num_completions)
132
133 #define IDPF_TX_SPLITQ_COMPL_TAG_WIDTH 16
134 /* Adjust the generation for the completion tag and wrap if necessary */
135 #define IDPF_TX_ADJ_COMPL_TAG_GEN(txq) \
136 ((++(txq)->compl_tag_cur_gen) >= (txq)->compl_tag_gen_max ? \
137 0 : (txq)->compl_tag_cur_gen)
138
139 #define IDPF_TXD_LAST_DESC_CMD (IDPF_TX_DESC_CMD_EOP | IDPF_TX_DESC_CMD_RS)
140
141 #define IDPF_TX_FLAGS_TSO BIT(0)
142 #define IDPF_TX_FLAGS_IPV4 BIT(1)
143 #define IDPF_TX_FLAGS_IPV6 BIT(2)
144 #define IDPF_TX_FLAGS_TUNNEL BIT(3)
145
146 union idpf_tx_flex_desc {
147 struct idpf_flex_tx_desc q; /* queue based scheduling */
148 struct idpf_flex_tx_sched_desc flow; /* flow based scheduling */
149 };
150
151 #define idpf_tx_buf libeth_sqe
152
153 /**
154 * struct idpf_buf_lifo - LIFO for managing OOO completions
155 * @top: Used to know how many buffers are left
156 * @size: Total size of LIFO
157 * @bufs: Backing array
158 */
159 struct idpf_buf_lifo {
160 u16 top;
161 u16 size;
162 struct idpf_tx_stash **bufs;
163 };
164
165 /**
166 * struct idpf_tx_offload_params - Offload parameters for a given packet
167 * @tx_flags: Feature flags enabled for this packet
168 * @hdr_offsets: Offset parameter for single queue model
169 * @cd_tunneling: Type of tunneling enabled for single queue model
170 * @tso_len: Total length of payload to segment
171 * @mss: Segment size
172 * @tso_segs: Number of segments to be sent
173 * @tso_hdr_len: Length of headers to be duplicated
174 * @td_cmd: Command field to be inserted into descriptor
175 */
176 struct idpf_tx_offload_params {
177 u32 tx_flags;
178
179 u32 hdr_offsets;
180 u32 cd_tunneling;
181
182 u32 tso_len;
183 u16 mss;
184 u16 tso_segs;
185 u16 tso_hdr_len;
186
187 u16 td_cmd;
188 };
189
190 /**
191 * struct idpf_tx_splitq_params
192 * @dtype: General descriptor info
193 * @eop_cmd: Type of EOP
194 * @compl_tag: Associated tag for completion
195 * @td_tag: Descriptor tunneling tag
196 * @offload: Offload parameters
197 */
198 struct idpf_tx_splitq_params {
199 enum idpf_tx_desc_dtype_value dtype;
200 u16 eop_cmd;
201 union {
202 u16 compl_tag;
203 u16 td_tag;
204 };
205
206 struct idpf_tx_offload_params offload;
207 };
208
209 enum idpf_tx_ctx_desc_eipt_offload {
210 IDPF_TX_CTX_EXT_IP_NONE = 0x0,
211 IDPF_TX_CTX_EXT_IP_IPV6 = 0x1,
212 IDPF_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2,
213 IDPF_TX_CTX_EXT_IP_IPV4 = 0x3
214 };
215
216 /* Checksum offload bits decoded from the receive descriptor. */
217 struct idpf_rx_csum_decoded {
218 u32 l3l4p : 1;
219 u32 ipe : 1;
220 u32 eipe : 1;
221 u32 eudpe : 1;
222 u32 ipv6exadd : 1;
223 u32 l4e : 1;
224 u32 pprs : 1;
225 u32 nat : 1;
226 u32 raw_csum_inv : 1;
227 u32 raw_csum : 16;
228 };
229
230 struct idpf_rx_extracted {
231 unsigned int size;
232 u16 rx_ptype;
233 };
234
235 #define IDPF_TX_COMPLQ_CLEAN_BUDGET 256
236 #define IDPF_TX_MIN_PKT_LEN 17
237 #define IDPF_TX_DESCS_FOR_SKB_DATA_PTR 1
238 #define IDPF_TX_DESCS_PER_CACHE_LINE (L1_CACHE_BYTES / \
239 sizeof(struct idpf_flex_tx_desc))
240 #define IDPF_TX_DESCS_FOR_CTX 1
241 /* TX descriptors needed, worst case */
242 #define IDPF_TX_DESC_NEEDED (MAX_SKB_FRAGS + IDPF_TX_DESCS_FOR_CTX + \
243 IDPF_TX_DESCS_PER_CACHE_LINE + \
244 IDPF_TX_DESCS_FOR_SKB_DATA_PTR)
245
246 /* The size limit for a transmit buffer in a descriptor is (16K - 1).
247 * In order to align with the read requests we will align the value to
248 * the nearest 4K which represents our maximum read request size.
249 */
250 #define IDPF_TX_MAX_READ_REQ_SIZE SZ_4K
251 #define IDPF_TX_MAX_DESC_DATA (SZ_16K - 1)
252 #define IDPF_TX_MAX_DESC_DATA_ALIGNED \
253 ALIGN_DOWN(IDPF_TX_MAX_DESC_DATA, IDPF_TX_MAX_READ_REQ_SIZE)
254
255 #define idpf_rx_buf libeth_fqe
256
257 #define IDPF_RX_MAX_PTYPE_PROTO_IDS 32
258 #define IDPF_RX_MAX_PTYPE_SZ (sizeof(struct virtchnl2_ptype) + \
259 (sizeof(u16) * IDPF_RX_MAX_PTYPE_PROTO_IDS))
260 #define IDPF_RX_PTYPE_HDR_SZ sizeof(struct virtchnl2_get_ptype_info)
261 #define IDPF_RX_MAX_PTYPES_PER_BUF \
262 DIV_ROUND_DOWN_ULL((IDPF_CTLQ_MAX_BUF_LEN - IDPF_RX_PTYPE_HDR_SZ), \
263 IDPF_RX_MAX_PTYPE_SZ)
264
265 #define IDPF_GET_PTYPE_SIZE(p) struct_size((p), proto_id, (p)->proto_id_count)
266
267 #define IDPF_TUN_IP_GRE (\
268 IDPF_PTYPE_TUNNEL_IP |\
269 IDPF_PTYPE_TUNNEL_IP_GRENAT)
270
271 #define IDPF_TUN_IP_GRE_MAC (\
272 IDPF_TUN_IP_GRE |\
273 IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC)
274
275 #define IDPF_RX_MAX_PTYPE 1024
276 #define IDPF_RX_MAX_BASE_PTYPE 256
277 #define IDPF_INVALID_PTYPE_ID 0xFFFF
278
279 enum idpf_tunnel_state {
280 IDPF_PTYPE_TUNNEL_IP = BIT(0),
281 IDPF_PTYPE_TUNNEL_IP_GRENAT = BIT(1),
282 IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC = BIT(2),
283 };
284
285 struct idpf_ptype_state {
286 bool outer_ip:1;
287 bool outer_frag:1;
288 u8 tunnel_state:6;
289 };
290
291 /**
292 * enum idpf_queue_flags_t
293 * @__IDPF_Q_GEN_CHK: Queues operating in splitq mode use a generation bit to
294 * identify new descriptor writebacks on the ring. HW sets
295 * the gen bit to 1 on the first writeback of any given
296 * descriptor. After the ring wraps, HW sets the gen bit of
297 * those descriptors to 0, and continues flipping
298 * 0->1 or 1->0 on each ring wrap. SW maintains its own
299 * gen bit to know what value will indicate writebacks on
300 * the next pass around the ring. E.g. it is initialized
301 * to 1 and knows that reading a gen bit of 1 in any
302 * descriptor on the initial pass of the ring indicates a
303 * writeback. It also flips on every ring wrap.
304 * @__IDPF_Q_RFL_GEN_CHK: Refill queues are SW only, so Q_GEN acts as the HW
305 * bit and Q_RFL_GEN is the SW bit.
306 * @__IDPF_Q_FLOW_SCH_EN: Enable flow scheduling
307 * @__IDPF_Q_SW_MARKER: Used to indicate TX queue marker completions
308 * @__IDPF_Q_POLL_MODE: Enable poll mode
309 * @__IDPF_Q_CRC_EN: enable CRC offload in singleq mode
310 * @__IDPF_Q_HSPLIT_EN: enable header split on Rx (splitq)
311 * @__IDPF_Q_FLAGS_NBITS: Must be last
312 */
313 enum idpf_queue_flags_t {
314 __IDPF_Q_GEN_CHK,
315 __IDPF_Q_RFL_GEN_CHK,
316 __IDPF_Q_FLOW_SCH_EN,
317 __IDPF_Q_SW_MARKER,
318 __IDPF_Q_POLL_MODE,
319 __IDPF_Q_CRC_EN,
320 __IDPF_Q_HSPLIT_EN,
321
322 __IDPF_Q_FLAGS_NBITS,
323 };
324
325 #define idpf_queue_set(f, q) __set_bit(__IDPF_Q_##f, (q)->flags)
326 #define idpf_queue_clear(f, q) __clear_bit(__IDPF_Q_##f, (q)->flags)
327 #define idpf_queue_change(f, q) __change_bit(__IDPF_Q_##f, (q)->flags)
328 #define idpf_queue_has(f, q) test_bit(__IDPF_Q_##f, (q)->flags)
329
330 #define idpf_queue_has_clear(f, q) \
331 __test_and_clear_bit(__IDPF_Q_##f, (q)->flags)
332 #define idpf_queue_assign(f, q, v) \
333 __assign_bit(__IDPF_Q_##f, (q)->flags, v)
334
335 /**
336 * struct idpf_vec_regs
337 * @dyn_ctl_reg: Dynamic control interrupt register offset
338 * @itrn_reg: Interrupt Throttling Rate register offset
339 * @itrn_index_spacing: Register spacing between ITR registers of the same
340 * vector
341 */
342 struct idpf_vec_regs {
343 u32 dyn_ctl_reg;
344 u32 itrn_reg;
345 u32 itrn_index_spacing;
346 };
347
348 /**
349 * struct idpf_intr_reg
350 * @dyn_ctl: Dynamic control interrupt register
351 * @dyn_ctl_intena_m: Mask for dyn_ctl interrupt enable
352 * @dyn_ctl_intena_msk_m: Mask for dyn_ctl interrupt enable mask
353 * @dyn_ctl_itridx_s: Register bit offset for ITR index
354 * @dyn_ctl_itridx_m: Mask for ITR index
355 * @dyn_ctl_intrvl_s: Register bit offset for ITR interval
356 * @dyn_ctl_wb_on_itr_m: Mask for WB on ITR feature
357 * @dyn_ctl_sw_itridx_ena_m: Mask for SW ITR index
358 * @dyn_ctl_swint_trig_m: Mask for dyn_ctl SW triggered interrupt enable
359 * @rx_itr: RX ITR register
360 * @tx_itr: TX ITR register
361 * @icr_ena: Interrupt cause register offset
362 * @icr_ena_ctlq_m: Mask for ICR
363 */
364 struct idpf_intr_reg {
365 void __iomem *dyn_ctl;
366 u32 dyn_ctl_intena_m;
367 u32 dyn_ctl_intena_msk_m;
368 u32 dyn_ctl_itridx_s;
369 u32 dyn_ctl_itridx_m;
370 u32 dyn_ctl_intrvl_s;
371 u32 dyn_ctl_wb_on_itr_m;
372 u32 dyn_ctl_sw_itridx_ena_m;
373 u32 dyn_ctl_swint_trig_m;
374 void __iomem *rx_itr;
375 void __iomem *tx_itr;
376 void __iomem *icr_ena;
377 u32 icr_ena_ctlq_m;
378 };
379
380 /**
381 * struct idpf_q_vector
382 * @vport: Vport back pointer
383 * @num_rxq: Number of RX queues
384 * @num_txq: Number of TX queues
385 * @num_bufq: Number of buffer queues
386 * @num_complq: number of completion queues
387 * @rx: Array of RX queues to service
388 * @tx: Array of TX queues to service
389 * @bufq: Array of buffer queues to service
390 * @complq: array of completion queues
391 * @intr_reg: See struct idpf_intr_reg
392 * @napi: napi handler
393 * @total_events: Number of interrupts processed
394 * @wb_on_itr: whether WB on ITR is enabled
395 * @tx_dim: Data for TX net_dim algorithm
396 * @tx_itr_value: TX interrupt throttling rate
397 * @tx_intr_mode: Dynamic ITR or not
398 * @tx_itr_idx: TX ITR index
399 * @rx_dim: Data for RX net_dim algorithm
400 * @rx_itr_value: RX interrupt throttling rate
401 * @rx_intr_mode: Dynamic ITR or not
402 * @rx_itr_idx: RX ITR index
403 * @v_idx: Vector index
404 * @affinity_mask: CPU affinity mask
405 */
406 struct idpf_q_vector {
407 __cacheline_group_begin_aligned(read_mostly);
408 struct idpf_vport *vport;
409
410 u16 num_rxq;
411 u16 num_txq;
412 u16 num_bufq;
413 u16 num_complq;
414 struct idpf_rx_queue **rx;
415 struct idpf_tx_queue **tx;
416 struct idpf_buf_queue **bufq;
417 struct idpf_compl_queue **complq;
418
419 struct idpf_intr_reg intr_reg;
420 __cacheline_group_end_aligned(read_mostly);
421
422 __cacheline_group_begin_aligned(read_write);
423 struct napi_struct napi;
424 u16 total_events;
425 bool wb_on_itr;
426
427 struct dim tx_dim;
428 u16 tx_itr_value;
429 bool tx_intr_mode;
430 u32 tx_itr_idx;
431
432 struct dim rx_dim;
433 u16 rx_itr_value;
434 bool rx_intr_mode;
435 u32 rx_itr_idx;
436 __cacheline_group_end_aligned(read_write);
437
438 __cacheline_group_begin_aligned(cold);
439 u16 v_idx;
440
441 cpumask_var_t affinity_mask;
442 __cacheline_group_end_aligned(cold);
443 };
444 libeth_cacheline_set_assert(struct idpf_q_vector, 120,
445 24 + sizeof(struct napi_struct) +
446 2 * sizeof(struct dim),
447 8 + sizeof(cpumask_var_t));
448
449 struct idpf_rx_queue_stats {
450 u64_stats_t packets;
451 u64_stats_t bytes;
452 u64_stats_t rsc_pkts;
453 u64_stats_t hw_csum_err;
454 u64_stats_t hsplit_pkts;
455 u64_stats_t hsplit_buf_ovf;
456 u64_stats_t bad_descs;
457 };
458
459 struct idpf_tx_queue_stats {
460 u64_stats_t packets;
461 u64_stats_t bytes;
462 u64_stats_t lso_pkts;
463 u64_stats_t linearize;
464 u64_stats_t q_busy;
465 u64_stats_t skb_drops;
466 u64_stats_t dma_map_errs;
467 };
468
469 #define IDPF_ITR_DYNAMIC 1
470 #define IDPF_ITR_MAX 0x1FE0
471 #define IDPF_ITR_20K 0x0032
472 #define IDPF_ITR_GRAN_S 1 /* Assume ITR granularity is 2us */
473 #define IDPF_ITR_MASK 0x1FFE /* ITR register value alignment mask */
474 #define ITR_REG_ALIGN(setting) ((setting) & IDPF_ITR_MASK)
475 #define IDPF_ITR_IS_DYNAMIC(itr_mode) (itr_mode)
476 #define IDPF_ITR_TX_DEF IDPF_ITR_20K
477 #define IDPF_ITR_RX_DEF IDPF_ITR_20K
478 /* Index used for 'SW ITR' update in DYN_CTL register */
479 #define IDPF_SW_ITR_UPDATE_IDX 2
480 /* Index used for 'No ITR' update in DYN_CTL register */
481 #define IDPF_NO_ITR_UPDATE_IDX 3
482 #define IDPF_ITR_IDX_SPACING(spacing, dflt) (spacing ? spacing : dflt)
483 #define IDPF_DIM_DEFAULT_PROFILE_IX 1
484
485 /**
486 * struct idpf_txq_stash - Tx buffer stash for Flow-based scheduling mode
487 * @buf_stack: Stack of empty buffers to store buffer info for out of order
488 * buffer completions. See struct idpf_buf_lifo
489 * @sched_buf_hash: Hash table to store buffers
490 */
491 struct idpf_txq_stash {
492 struct idpf_buf_lifo buf_stack;
493 DECLARE_HASHTABLE(sched_buf_hash, 12);
494 } ____cacheline_aligned;
495
496 /**
497 * struct idpf_rx_queue - software structure representing a receive queue
498 * @rx: universal receive descriptor array
499 * @single_buf: buffer descriptor array in singleq
500 * @desc_ring: virtual descriptor ring address
501 * @bufq_sets: Pointer to the array of buffer queues in splitq mode
502 * @napi: NAPI instance corresponding to this queue (splitq)
503 * @rx_buf: See struct &libeth_fqe
504 * @pp: Page pool pointer in singleq mode
505 * @netdev: &net_device corresponding to this queue
506 * @tail: Tail offset. Used for both queue models single and split.
507 * @flags: See enum idpf_queue_flags_t
508 * @idx: For RX queue, it is used to index to total RX queue across groups and
509 * used for skb reporting.
510 * @desc_count: Number of descriptors
511 * @rxdids: Supported RX descriptor ids
512 * @rx_ptype_lkup: LUT of Rx ptypes
513 * @next_to_use: Next descriptor to use
514 * @next_to_clean: Next descriptor to clean
515 * @next_to_alloc: RX buffer to allocate at
516 * @skb: Pointer to the skb
517 * @truesize: data buffer truesize in singleq
518 * @stats_sync: See struct u64_stats_sync
519 * @q_stats: See union idpf_rx_queue_stats
520 * @q_id: Queue id
521 * @size: Length of descriptor ring in bytes
522 * @dma: Physical address of ring
523 * @q_vector: Backreference to associated vector
524 * @rx_buffer_low_watermark: RX buffer low watermark
525 * @rx_hbuf_size: Header buffer size
526 * @rx_buf_size: Buffer size
527 * @rx_max_pkt_size: RX max packet size
528 */
529 struct idpf_rx_queue {
530 __cacheline_group_begin_aligned(read_mostly);
531 union {
532 union virtchnl2_rx_desc *rx;
533 struct virtchnl2_singleq_rx_buf_desc *single_buf;
534
535 void *desc_ring;
536 };
537 union {
538 struct {
539 struct idpf_bufq_set *bufq_sets;
540 struct napi_struct *napi;
541 };
542 struct {
543 struct libeth_fqe *rx_buf;
544 struct page_pool *pp;
545 };
546 };
547 struct net_device *netdev;
548 void __iomem *tail;
549
550 DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
551 u16 idx;
552 u16 desc_count;
553
554 u32 rxdids;
555 const struct libeth_rx_pt *rx_ptype_lkup;
556 __cacheline_group_end_aligned(read_mostly);
557
558 __cacheline_group_begin_aligned(read_write);
559 u16 next_to_use;
560 u16 next_to_clean;
561 u16 next_to_alloc;
562
563 struct sk_buff *skb;
564 u32 truesize;
565
566 struct u64_stats_sync stats_sync;
567 struct idpf_rx_queue_stats q_stats;
568 __cacheline_group_end_aligned(read_write);
569
570 __cacheline_group_begin_aligned(cold);
571 u32 q_id;
572 u32 size;
573 dma_addr_t dma;
574
575 struct idpf_q_vector *q_vector;
576
577 u16 rx_buffer_low_watermark;
578 u16 rx_hbuf_size;
579 u16 rx_buf_size;
580 u16 rx_max_pkt_size;
581 __cacheline_group_end_aligned(cold);
582 };
583 libeth_cacheline_set_assert(struct idpf_rx_queue, 64,
584 80 + sizeof(struct u64_stats_sync),
585 32);
586
587 /**
588 * struct idpf_tx_queue - software structure representing a transmit queue
589 * @base_tx: base Tx descriptor array
590 * @base_ctx: base Tx context descriptor array
591 * @flex_tx: flex Tx descriptor array
592 * @flex_ctx: flex Tx context descriptor array
593 * @desc_ring: virtual descriptor ring address
594 * @tx_buf: See struct idpf_tx_buf
595 * @txq_grp: See struct idpf_txq_group
596 * @dev: Device back pointer for DMA mapping
597 * @tail: Tail offset. Used for both queue models single and split
598 * @flags: See enum idpf_queue_flags_t
599 * @idx: For TX queue, it is used as index to map between TX queue group and
600 * hot path TX pointers stored in vport. Used in both singleq/splitq.
601 * @desc_count: Number of descriptors
602 * @tx_min_pkt_len: Min supported packet length
603 * @compl_tag_gen_s: Completion tag generation bit
604 * The format of the completion tag will change based on the TXQ
605 * descriptor ring size so that we can maintain roughly the same level
606 * of "uniqueness" across all descriptor sizes. For example, if the
607 * TXQ descriptor ring size is 64 (the minimum size supported), the
608 * completion tag will be formatted as below:
609 * 15 6 5 0
610 * --------------------------------
611 * | GEN=0-1023 |IDX = 0-63|
612 * --------------------------------
613 *
614 * This gives us 64*1024 = 65536 possible unique values. Similarly, if
615 * the TXQ descriptor ring size is 8160 (the maximum size supported),
616 * the completion tag will be formatted as below:
617 * 15 13 12 0
618 * --------------------------------
619 * |GEN | IDX = 0-8159 |
620 * --------------------------------
621 *
622 * This gives us 8*8160 = 65280 possible unique values.
623 * @netdev: &net_device corresponding to this queue
624 * @next_to_use: Next descriptor to use
625 * @next_to_clean: Next descriptor to clean
626 * @cleaned_bytes: Splitq only, TXQ only: When a TX completion is received on
627 * the TX completion queue, it can be for any TXQ associated
628 * with that completion queue. This means we can clean up to
629 * N TXQs during a single call to clean the completion queue.
630 * cleaned_bytes|pkts tracks the clean stats per TXQ during
631 * that single call to clean the completion queue. By doing so,
632 * we can update BQL with aggregate cleaned stats for each TXQ
633 * only once at the end of the cleaning routine.
634 * @clean_budget: singleq only, queue cleaning budget
635 * @cleaned_pkts: Number of packets cleaned for the above said case
636 * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
637 * @stash: Tx buffer stash for Flow-based scheduling mode
638 * @compl_tag_bufid_m: Completion tag buffer id mask
639 * @compl_tag_cur_gen: Used to keep track of current completion tag generation
640 * @compl_tag_gen_max: To determine when compl_tag_cur_gen should be reset
641 * @stats_sync: See struct u64_stats_sync
642 * @q_stats: See union idpf_tx_queue_stats
643 * @q_id: Queue id
644 * @size: Length of descriptor ring in bytes
645 * @dma: Physical address of ring
646 * @q_vector: Backreference to associated vector
647 */
648 struct idpf_tx_queue {
649 __cacheline_group_begin_aligned(read_mostly);
650 union {
651 struct idpf_base_tx_desc *base_tx;
652 struct idpf_base_tx_ctx_desc *base_ctx;
653 union idpf_tx_flex_desc *flex_tx;
654 struct idpf_flex_tx_ctx_desc *flex_ctx;
655
656 void *desc_ring;
657 };
658 struct libeth_sqe *tx_buf;
659 struct idpf_txq_group *txq_grp;
660 struct device *dev;
661 void __iomem *tail;
662
663 DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
664 u16 idx;
665 u16 desc_count;
666
667 u16 tx_min_pkt_len;
668 u16 compl_tag_gen_s;
669
670 struct net_device *netdev;
671 __cacheline_group_end_aligned(read_mostly);
672
673 __cacheline_group_begin_aligned(read_write);
674 u16 next_to_use;
675 u16 next_to_clean;
676
677 union {
678 u32 cleaned_bytes;
679 u32 clean_budget;
680 };
681 u16 cleaned_pkts;
682
683 u16 tx_max_bufs;
684 struct idpf_txq_stash *stash;
685
686 u16 compl_tag_bufid_m;
687 u16 compl_tag_cur_gen;
688 u16 compl_tag_gen_max;
689
690 struct u64_stats_sync stats_sync;
691 struct idpf_tx_queue_stats q_stats;
692 __cacheline_group_end_aligned(read_write);
693
694 __cacheline_group_begin_aligned(cold);
695 u32 q_id;
696 u32 size;
697 dma_addr_t dma;
698
699 struct idpf_q_vector *q_vector;
700 __cacheline_group_end_aligned(cold);
701 };
702 libeth_cacheline_set_assert(struct idpf_tx_queue, 64,
703 88 + sizeof(struct u64_stats_sync),
704 24);
705
706 /**
707 * struct idpf_buf_queue - software structure representing a buffer queue
708 * @split_buf: buffer descriptor array
709 * @hdr_buf: &libeth_fqe for header buffers
710 * @hdr_pp: &page_pool for header buffers
711 * @buf: &libeth_fqe for data buffers
712 * @pp: &page_pool for data buffers
713 * @tail: Tail offset
714 * @flags: See enum idpf_queue_flags_t
715 * @desc_count: Number of descriptors
716 * @next_to_use: Next descriptor to use
717 * @next_to_clean: Next descriptor to clean
718 * @next_to_alloc: RX buffer to allocate at
719 * @hdr_truesize: truesize for buffer headers
720 * @truesize: truesize for data buffers
721 * @q_id: Queue id
722 * @size: Length of descriptor ring in bytes
723 * @dma: Physical address of ring
724 * @q_vector: Backreference to associated vector
725 * @rx_buffer_low_watermark: RX buffer low watermark
726 * @rx_hbuf_size: Header buffer size
727 * @rx_buf_size: Buffer size
728 */
729 struct idpf_buf_queue {
730 __cacheline_group_begin_aligned(read_mostly);
731 struct virtchnl2_splitq_rx_buf_desc *split_buf;
732 struct libeth_fqe *hdr_buf;
733 struct page_pool *hdr_pp;
734 struct libeth_fqe *buf;
735 struct page_pool *pp;
736 void __iomem *tail;
737
738 DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
739 u32 desc_count;
740 __cacheline_group_end_aligned(read_mostly);
741
742 __cacheline_group_begin_aligned(read_write);
743 u32 next_to_use;
744 u32 next_to_clean;
745 u32 next_to_alloc;
746
747 u32 hdr_truesize;
748 u32 truesize;
749 __cacheline_group_end_aligned(read_write);
750
751 __cacheline_group_begin_aligned(cold);
752 u32 q_id;
753 u32 size;
754 dma_addr_t dma;
755
756 struct idpf_q_vector *q_vector;
757
758 u16 rx_buffer_low_watermark;
759 u16 rx_hbuf_size;
760 u16 rx_buf_size;
761 __cacheline_group_end_aligned(cold);
762 };
763 libeth_cacheline_set_assert(struct idpf_buf_queue, 64, 24, 32);
764
765 /**
766 * struct idpf_compl_queue - software structure representing a completion queue
767 * @comp: completion descriptor array
768 * @txq_grp: See struct idpf_txq_group
769 * @flags: See enum idpf_queue_flags_t
770 * @desc_count: Number of descriptors
771 * @clean_budget: queue cleaning budget
772 * @netdev: &net_device corresponding to this queue
773 * @next_to_use: Next descriptor to use. Relevant in both split & single txq
774 * and bufq.
775 * @next_to_clean: Next descriptor to clean
776 * @num_completions: Only relevant for TX completion queue. It tracks the
777 * number of completions received to compare against the
778 * number of completions pending, as accumulated by the
779 * TX queues.
780 * @q_id: Queue id
781 * @size: Length of descriptor ring in bytes
782 * @dma: Physical address of ring
783 * @q_vector: Backreference to associated vector
784 */
785 struct idpf_compl_queue {
786 __cacheline_group_begin_aligned(read_mostly);
787 struct idpf_splitq_tx_compl_desc *comp;
788 struct idpf_txq_group *txq_grp;
789
790 DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
791 u32 desc_count;
792
793 u32 clean_budget;
794 struct net_device *netdev;
795 __cacheline_group_end_aligned(read_mostly);
796
797 __cacheline_group_begin_aligned(read_write);
798 u32 next_to_use;
799 u32 next_to_clean;
800
801 aligned_u64 num_completions;
802 __cacheline_group_end_aligned(read_write);
803
804 __cacheline_group_begin_aligned(cold);
805 u32 q_id;
806 u32 size;
807 dma_addr_t dma;
808
809 struct idpf_q_vector *q_vector;
810 __cacheline_group_end_aligned(cold);
811 };
812 libeth_cacheline_set_assert(struct idpf_compl_queue, 40, 16, 24);
813
814 /**
815 * struct idpf_sw_queue
816 * @ring: Pointer to the ring
817 * @flags: See enum idpf_queue_flags_t
818 * @desc_count: Descriptor count
819 * @next_to_use: Buffer to allocate at
820 * @next_to_clean: Next descriptor to clean
821 *
822 * Software queues are used in splitq mode to manage buffers between rxq
823 * producer and the bufq consumer. These are required in order to maintain a
824 * lockless buffer management system and are strictly software only constructs.
825 */
826 struct idpf_sw_queue {
827 __cacheline_group_begin_aligned(read_mostly);
828 u32 *ring;
829
830 DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
831 u32 desc_count;
832 __cacheline_group_end_aligned(read_mostly);
833
834 __cacheline_group_begin_aligned(read_write);
835 u32 next_to_use;
836 u32 next_to_clean;
837 __cacheline_group_end_aligned(read_write);
838 };
839 libeth_cacheline_group_assert(struct idpf_sw_queue, read_mostly, 24);
840 libeth_cacheline_group_assert(struct idpf_sw_queue, read_write, 8);
841 libeth_cacheline_struct_assert(struct idpf_sw_queue, 24, 8);
842
843 /**
844 * struct idpf_rxq_set
845 * @rxq: RX queue
846 * @refillq: pointers to refill queues
847 *
848 * Splitq only. idpf_rxq_set associates an rxq with at an array of refillqs.
849 * Each rxq needs a refillq to return used buffers back to the respective bufq.
850 * Bufqs then clean these refillqs for buffers to give to hardware.
851 */
852 struct idpf_rxq_set {
853 struct idpf_rx_queue rxq;
854 struct idpf_sw_queue *refillq[IDPF_MAX_BUFQS_PER_RXQ_GRP];
855 };
856
857 /**
858 * struct idpf_bufq_set
859 * @bufq: Buffer queue
860 * @num_refillqs: Number of refill queues. This is always equal to num_rxq_sets
861 * in idpf_rxq_group.
862 * @refillqs: Pointer to refill queues array.
863 *
864 * Splitq only. idpf_bufq_set associates a bufq to an array of refillqs.
865 * In this bufq_set, there will be one refillq for each rxq in this rxq_group.
866 * Used buffers received by rxqs will be put on refillqs which bufqs will
867 * clean to return new buffers back to hardware.
868 *
869 * Buffers needed by some number of rxqs associated in this rxq_group are
870 * managed by at most two bufqs (depending on performance configuration).
871 */
872 struct idpf_bufq_set {
873 struct idpf_buf_queue bufq;
874 int num_refillqs;
875 struct idpf_sw_queue *refillqs;
876 };
877
878 /**
879 * struct idpf_rxq_group
880 * @vport: Vport back pointer
881 * @singleq: Struct with single queue related members
882 * @singleq.num_rxq: Number of RX queues associated
883 * @singleq.rxqs: Array of RX queue pointers
884 * @splitq: Struct with split queue related members
885 * @splitq.num_rxq_sets: Number of RX queue sets
886 * @splitq.rxq_sets: Array of RX queue sets
887 * @splitq.bufq_sets: Buffer queue set pointer
888 *
889 * In singleq mode, an rxq_group is simply an array of rxqs. In splitq, a
890 * rxq_group contains all the rxqs, bufqs and refillqs needed to
891 * manage buffers in splitq mode.
892 */
893 struct idpf_rxq_group {
894 struct idpf_vport *vport;
895
896 union {
897 struct {
898 u16 num_rxq;
899 struct idpf_rx_queue *rxqs[IDPF_LARGE_MAX_Q];
900 } singleq;
901 struct {
902 u16 num_rxq_sets;
903 struct idpf_rxq_set *rxq_sets[IDPF_LARGE_MAX_Q];
904 struct idpf_bufq_set *bufq_sets;
905 } splitq;
906 };
907 };
908
909 /**
910 * struct idpf_txq_group
911 * @vport: Vport back pointer
912 * @num_txq: Number of TX queues associated
913 * @txqs: Array of TX queue pointers
914 * @stashes: array of OOO stashes for the queues
915 * @complq: Associated completion queue pointer, split queue only
916 * @num_completions_pending: Total number of completions pending for the
917 * completion queue, acculumated for all TX queues
918 * associated with that completion queue.
919 *
920 * Between singleq and splitq, a txq_group is largely the same except for the
921 * complq. In splitq a single complq is responsible for handling completions
922 * for some number of txqs associated in this txq_group.
923 */
924 struct idpf_txq_group {
925 struct idpf_vport *vport;
926
927 u16 num_txq;
928 struct idpf_tx_queue *txqs[IDPF_LARGE_MAX_Q];
929 struct idpf_txq_stash *stashes;
930
931 struct idpf_compl_queue *complq;
932
933 aligned_u64 num_completions_pending;
934 };
935
idpf_q_vector_to_mem(const struct idpf_q_vector * q_vector)936 static inline int idpf_q_vector_to_mem(const struct idpf_q_vector *q_vector)
937 {
938 u32 cpu;
939
940 if (!q_vector)
941 return NUMA_NO_NODE;
942
943 cpu = cpumask_first(q_vector->affinity_mask);
944
945 return cpu < nr_cpu_ids ? cpu_to_mem(cpu) : NUMA_NO_NODE;
946 }
947
948 /**
949 * idpf_size_to_txd_count - Get number of descriptors needed for large Tx frag
950 * @size: transmit request size in bytes
951 *
952 * In the case where a large frag (>= 16K) needs to be split across multiple
953 * descriptors, we need to assume that we can have no more than 12K of data
954 * per descriptor due to hardware alignment restrictions (4K alignment).
955 */
idpf_size_to_txd_count(unsigned int size)956 static inline u32 idpf_size_to_txd_count(unsigned int size)
957 {
958 return DIV_ROUND_UP(size, IDPF_TX_MAX_DESC_DATA_ALIGNED);
959 }
960
961 /**
962 * idpf_tx_singleq_build_ctob - populate command tag offset and size
963 * @td_cmd: Command to be filled in desc
964 * @td_offset: Offset to be filled in desc
965 * @size: Size of the buffer
966 * @td_tag: td tag to be filled
967 *
968 * Returns the 64 bit value populated with the input parameters
969 */
idpf_tx_singleq_build_ctob(u64 td_cmd,u64 td_offset,unsigned int size,u64 td_tag)970 static inline __le64 idpf_tx_singleq_build_ctob(u64 td_cmd, u64 td_offset,
971 unsigned int size, u64 td_tag)
972 {
973 return cpu_to_le64(IDPF_TX_DESC_DTYPE_DATA |
974 (td_cmd << IDPF_TXD_QW1_CMD_S) |
975 (td_offset << IDPF_TXD_QW1_OFFSET_S) |
976 ((u64)size << IDPF_TXD_QW1_TX_BUF_SZ_S) |
977 (td_tag << IDPF_TXD_QW1_L2TAG1_S));
978 }
979
980 void idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc *desc,
981 struct idpf_tx_splitq_params *params,
982 u16 td_cmd, u16 size);
983 void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
984 struct idpf_tx_splitq_params *params,
985 u16 td_cmd, u16 size);
986 /**
987 * idpf_tx_splitq_build_desc - determine which type of data descriptor to build
988 * @desc: descriptor to populate
989 * @params: pointer to tx params struct
990 * @td_cmd: command to be filled in desc
991 * @size: size of buffer
992 */
idpf_tx_splitq_build_desc(union idpf_tx_flex_desc * desc,struct idpf_tx_splitq_params * params,u16 td_cmd,u16 size)993 static inline void idpf_tx_splitq_build_desc(union idpf_tx_flex_desc *desc,
994 struct idpf_tx_splitq_params *params,
995 u16 td_cmd, u16 size)
996 {
997 if (params->dtype == IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2)
998 idpf_tx_splitq_build_ctb(desc, params, td_cmd, size);
999 else
1000 idpf_tx_splitq_build_flow_desc(desc, params, td_cmd, size);
1001 }
1002
1003 /**
1004 * idpf_vport_intr_set_wb_on_itr - enable descriptor writeback on disabled interrupts
1005 * @q_vector: pointer to queue vector struct
1006 */
idpf_vport_intr_set_wb_on_itr(struct idpf_q_vector * q_vector)1007 static inline void idpf_vport_intr_set_wb_on_itr(struct idpf_q_vector *q_vector)
1008 {
1009 struct idpf_intr_reg *reg;
1010
1011 if (q_vector->wb_on_itr)
1012 return;
1013
1014 q_vector->wb_on_itr = true;
1015 reg = &q_vector->intr_reg;
1016
1017 writel(reg->dyn_ctl_wb_on_itr_m | reg->dyn_ctl_intena_msk_m |
1018 (IDPF_NO_ITR_UPDATE_IDX << reg->dyn_ctl_itridx_s),
1019 reg->dyn_ctl);
1020 }
1021
1022 int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget);
1023 void idpf_vport_init_num_qs(struct idpf_vport *vport,
1024 struct virtchnl2_create_vport *vport_msg);
1025 void idpf_vport_calc_num_q_desc(struct idpf_vport *vport);
1026 int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_index,
1027 struct virtchnl2_create_vport *vport_msg,
1028 struct idpf_vport_max_q *max_q);
1029 void idpf_vport_calc_num_q_groups(struct idpf_vport *vport);
1030 int idpf_vport_queues_alloc(struct idpf_vport *vport);
1031 void idpf_vport_queues_rel(struct idpf_vport *vport);
1032 void idpf_vport_intr_rel(struct idpf_vport *vport);
1033 int idpf_vport_intr_alloc(struct idpf_vport *vport);
1034 void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector);
1035 void idpf_vport_intr_deinit(struct idpf_vport *vport);
1036 int idpf_vport_intr_init(struct idpf_vport *vport);
1037 void idpf_vport_intr_ena(struct idpf_vport *vport);
1038 int idpf_config_rss(struct idpf_vport *vport);
1039 int idpf_init_rss(struct idpf_vport *vport);
1040 void idpf_deinit_rss(struct idpf_vport *vport);
1041 int idpf_rx_bufs_init_all(struct idpf_vport *vport);
1042 void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb,
1043 unsigned int size);
1044 struct sk_buff *idpf_rx_build_skb(const struct libeth_fqe *buf, u32 size);
1045 void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
1046 bool xmit_more);
1047 unsigned int idpf_size_to_txd_count(unsigned int size);
1048 netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb);
1049 void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
1050 struct idpf_tx_buf *first, u16 ring_idx);
1051 unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
1052 struct sk_buff *skb);
1053 void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue);
1054 netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
1055 struct idpf_tx_queue *tx_q);
1056 netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev);
1057 bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq,
1058 u16 cleaned_count);
1059 int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off);
1060
idpf_tx_maybe_stop_common(struct idpf_tx_queue * tx_q,u32 needed)1061 static inline bool idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q,
1062 u32 needed)
1063 {
1064 return !netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
1065 IDPF_DESC_UNUSED(tx_q),
1066 needed, needed);
1067 }
1068
1069 #endif /* !_IDPF_TXRX_H_ */
1070