1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (C) 2023 Intel Corporation */
3
4 #ifndef _IDPF_TXRX_H_
5 #define _IDPF_TXRX_H_
6
7 #include <linux/dim.h>
8
9 #include <net/libeth/cache.h>
10 #include <net/tcp.h>
11 #include <net/netdev_queues.h>
12
13 #include "idpf_lan_txrx.h"
14 #include "virtchnl2_lan_desc.h"
15
16 #define IDPF_LARGE_MAX_Q 256
17 #define IDPF_MAX_Q 16
18 #define IDPF_MIN_Q 2
19 /* Mailbox Queue */
20 #define IDPF_MAX_MBXQ 1
21
22 #define IDPF_MIN_TXQ_DESC 64
23 #define IDPF_MIN_RXQ_DESC 64
24 #define IDPF_MIN_TXQ_COMPLQ_DESC 256
25 #define IDPF_MAX_QIDS 256
26
27 /* Number of descriptors in a queue should be a multiple of 32. RX queue
28 * descriptors alone should be a multiple of IDPF_REQ_RXQ_DESC_MULTIPLE
29 * to achieve BufQ descriptors aligned to 32
30 */
31 #define IDPF_REQ_DESC_MULTIPLE 32
32 #define IDPF_REQ_RXQ_DESC_MULTIPLE (IDPF_MAX_BUFQS_PER_RXQ_GRP * 32)
33 #define IDPF_MIN_TX_DESC_NEEDED (MAX_SKB_FRAGS + 6)
34 #define IDPF_TX_WAKE_THRESH ((u16)IDPF_MIN_TX_DESC_NEEDED * 2)
35
36 #define IDPF_MAX_DESCS 8160
37 #define IDPF_MAX_TXQ_DESC ALIGN_DOWN(IDPF_MAX_DESCS, IDPF_REQ_DESC_MULTIPLE)
38 #define IDPF_MAX_RXQ_DESC ALIGN_DOWN(IDPF_MAX_DESCS, IDPF_REQ_RXQ_DESC_MULTIPLE)
39 #define MIN_SUPPORT_TXDID (\
40 VIRTCHNL2_TXDID_FLEX_FLOW_SCHED |\
41 VIRTCHNL2_TXDID_FLEX_TSO_CTX)
42
43 #define IDPF_DFLT_SINGLEQ_TX_Q_GROUPS 1
44 #define IDPF_DFLT_SINGLEQ_RX_Q_GROUPS 1
45 #define IDPF_DFLT_SINGLEQ_TXQ_PER_GROUP 4
46 #define IDPF_DFLT_SINGLEQ_RXQ_PER_GROUP 4
47
48 #define IDPF_COMPLQ_PER_GROUP 1
49 #define IDPF_SINGLE_BUFQ_PER_RXQ_GRP 1
50 #define IDPF_MAX_BUFQS_PER_RXQ_GRP 2
51 #define IDPF_BUFQ2_ENA 1
52 #define IDPF_NUMQ_PER_CHUNK 1
53
54 #define IDPF_DFLT_SPLITQ_TXQ_PER_GROUP 1
55 #define IDPF_DFLT_SPLITQ_RXQ_PER_GROUP 1
56
57 /* Default vector sharing */
58 #define IDPF_MBX_Q_VEC 1
59 #define IDPF_MIN_Q_VEC 1
60 #define IDPF_MIN_RDMA_VEC 2
61
62 #define IDPF_DFLT_TX_Q_DESC_COUNT 512
63 #define IDPF_DFLT_TX_COMPLQ_DESC_COUNT 512
64 #define IDPF_DFLT_RX_Q_DESC_COUNT 512
65
66 /* IMPORTANT: We absolutely _cannot_ have more buffers in the system than a
67 * given RX completion queue has descriptors. This includes _ALL_ buffer
68 * queues. E.g.: If you have two buffer queues of 512 descriptors and buffers,
69 * you have a total of 1024 buffers so your RX queue _must_ have at least that
70 * many descriptors. This macro divides a given number of RX descriptors by
71 * number of buffer queues to calculate how many descriptors each buffer queue
72 * can have without overrunning the RX queue.
73 *
74 * If you give hardware more buffers than completion descriptors what will
75 * happen is that if hardware gets a chance to post more than ring wrap of
76 * descriptors before SW gets an interrupt and overwrites SW head, the gen bit
77 * in the descriptor will be wrong. Any overwritten descriptors' buffers will
78 * be gone forever and SW has no reasonable way to tell that this has happened.
79 * From SW perspective, when we finally get an interrupt, it looks like we're
80 * still waiting for descriptor to be done, stalling forever.
81 */
82 #define IDPF_RX_BUFQ_DESC_COUNT(RXD, NUM_BUFQ) ((RXD) / (NUM_BUFQ))
83
84 #define IDPF_RX_BUFQ_WORKING_SET(rxq) ((rxq)->desc_count - 1)
85
86 #define IDPF_RX_BUMP_NTC(rxq, ntc) \
87 do { \
88 if (unlikely(++(ntc) == (rxq)->desc_count)) { \
89 ntc = 0; \
90 idpf_queue_change(GEN_CHK, rxq); \
91 } \
92 } while (0)
93
94 #define IDPF_SINGLEQ_BUMP_RING_IDX(q, idx) \
95 do { \
96 if (unlikely(++(idx) == (q)->desc_count)) \
97 idx = 0; \
98 } while (0)
99
100 #define IDPF_RX_BUF_STRIDE 32
101 #define IDPF_RX_BUF_POST_STRIDE 16
102 #define IDPF_LOW_WATERMARK 64
103
104 #define IDPF_TX_TSO_MIN_MSS 88
105
106 /* Minimum number of descriptors between 2 descriptors with the RE bit set;
107 * only relevant in flow scheduling mode
108 */
109 #define IDPF_TX_SPLITQ_RE_MIN_GAP 64
110
111 #define IDPF_RX_BI_GEN_M BIT(16)
112 #define IDPF_RX_BI_BUFID_M GENMASK(15, 0)
113
114 #define IDPF_RXD_EOF_SPLITQ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_M
115 #define IDPF_RXD_EOF_SINGLEQ VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_M
116
117 #define IDPF_DESC_UNUSED(txq) \
118 ((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \
119 (txq)->next_to_clean - (txq)->next_to_use - 1)
120
121 #define IDPF_TX_BUF_RSV_UNUSED(txq) ((txq)->stash->buf_stack.top)
122 #define IDPF_TX_BUF_RSV_LOW(txq) (IDPF_TX_BUF_RSV_UNUSED(txq) < \
123 (txq)->desc_count >> 2)
124
125 #define IDPF_TX_COMPLQ_OVERFLOW_THRESH(txcq) ((txcq)->desc_count >> 1)
126 /* Determine the absolute number of completions pending, i.e. the number of
127 * completions that are expected to arrive on the TX completion queue.
128 */
129 #define IDPF_TX_COMPLQ_PENDING(txq) \
130 (((txq)->num_completions_pending >= (txq)->complq->num_completions ? \
131 0 : U32_MAX) + \
132 (txq)->num_completions_pending - (txq)->complq->num_completions)
133
134 #define IDPF_TX_SPLITQ_COMPL_TAG_WIDTH 16
135 /* Adjust the generation for the completion tag and wrap if necessary */
136 #define IDPF_TX_ADJ_COMPL_TAG_GEN(txq) \
137 ((++(txq)->compl_tag_cur_gen) >= (txq)->compl_tag_gen_max ? \
138 0 : (txq)->compl_tag_cur_gen)
139
140 #define IDPF_TXD_LAST_DESC_CMD (IDPF_TX_DESC_CMD_EOP | IDPF_TX_DESC_CMD_RS)
141
142 #define IDPF_TX_FLAGS_TSO BIT(0)
143 #define IDPF_TX_FLAGS_IPV4 BIT(1)
144 #define IDPF_TX_FLAGS_IPV6 BIT(2)
145 #define IDPF_TX_FLAGS_TUNNEL BIT(3)
146 #define IDPF_TX_FLAGS_TSYN BIT(4)
147
148 union idpf_tx_flex_desc {
149 struct idpf_flex_tx_desc q; /* queue based scheduling */
150 struct idpf_flex_tx_sched_desc flow; /* flow based scheduling */
151 };
152
153 #define idpf_tx_buf libeth_sqe
154
155 /**
156 * struct idpf_buf_lifo - LIFO for managing OOO completions
157 * @top: Used to know how many buffers are left
158 * @size: Total size of LIFO
159 * @bufs: Backing array
160 */
161 struct idpf_buf_lifo {
162 u16 top;
163 u16 size;
164 struct idpf_tx_stash **bufs;
165 };
166
167 /**
168 * struct idpf_tx_offload_params - Offload parameters for a given packet
169 * @tx_flags: Feature flags enabled for this packet
170 * @hdr_offsets: Offset parameter for single queue model
171 * @cd_tunneling: Type of tunneling enabled for single queue model
172 * @tso_len: Total length of payload to segment
173 * @mss: Segment size
174 * @tso_segs: Number of segments to be sent
175 * @tso_hdr_len: Length of headers to be duplicated
176 * @td_cmd: Command field to be inserted into descriptor
177 */
178 struct idpf_tx_offload_params {
179 u32 tx_flags;
180
181 u32 hdr_offsets;
182 u32 cd_tunneling;
183
184 u32 tso_len;
185 u16 mss;
186 u16 tso_segs;
187 u16 tso_hdr_len;
188
189 u16 td_cmd;
190 };
191
192 /**
193 * struct idpf_tx_splitq_params
194 * @dtype: General descriptor info
195 * @eop_cmd: Type of EOP
196 * @compl_tag: Associated tag for completion
197 * @td_tag: Descriptor tunneling tag
198 * @offload: Offload parameters
199 */
200 struct idpf_tx_splitq_params {
201 enum idpf_tx_desc_dtype_value dtype;
202 u16 eop_cmd;
203 union {
204 u16 compl_tag;
205 u16 td_tag;
206 };
207
208 struct idpf_tx_offload_params offload;
209 };
210
211 enum idpf_tx_ctx_desc_eipt_offload {
212 IDPF_TX_CTX_EXT_IP_NONE = 0x0,
213 IDPF_TX_CTX_EXT_IP_IPV6 = 0x1,
214 IDPF_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2,
215 IDPF_TX_CTX_EXT_IP_IPV4 = 0x3
216 };
217
218 #define IDPF_TX_COMPLQ_CLEAN_BUDGET 256
219 #define IDPF_TX_MIN_PKT_LEN 17
220 #define IDPF_TX_DESCS_FOR_SKB_DATA_PTR 1
221 #define IDPF_TX_DESCS_PER_CACHE_LINE (L1_CACHE_BYTES / \
222 sizeof(struct idpf_flex_tx_desc))
223 #define IDPF_TX_DESCS_FOR_CTX 1
224 /* TX descriptors needed, worst case */
225 #define IDPF_TX_DESC_NEEDED (MAX_SKB_FRAGS + IDPF_TX_DESCS_FOR_CTX + \
226 IDPF_TX_DESCS_PER_CACHE_LINE + \
227 IDPF_TX_DESCS_FOR_SKB_DATA_PTR)
228
229 /* The size limit for a transmit buffer in a descriptor is (16K - 1).
230 * In order to align with the read requests we will align the value to
231 * the nearest 4K which represents our maximum read request size.
232 */
233 #define IDPF_TX_MAX_READ_REQ_SIZE SZ_4K
234 #define IDPF_TX_MAX_DESC_DATA (SZ_16K - 1)
235 #define IDPF_TX_MAX_DESC_DATA_ALIGNED \
236 ALIGN_DOWN(IDPF_TX_MAX_DESC_DATA, IDPF_TX_MAX_READ_REQ_SIZE)
237
238 #define idpf_rx_buf libeth_fqe
239
240 #define IDPF_RX_MAX_PTYPE_PROTO_IDS 32
241 #define IDPF_RX_MAX_PTYPE_SZ (sizeof(struct virtchnl2_ptype) + \
242 (sizeof(u16) * IDPF_RX_MAX_PTYPE_PROTO_IDS))
243 #define IDPF_RX_PTYPE_HDR_SZ sizeof(struct virtchnl2_get_ptype_info)
244 #define IDPF_RX_MAX_PTYPES_PER_BUF \
245 DIV_ROUND_DOWN_ULL((IDPF_CTLQ_MAX_BUF_LEN - IDPF_RX_PTYPE_HDR_SZ), \
246 IDPF_RX_MAX_PTYPE_SZ)
247
248 #define IDPF_GET_PTYPE_SIZE(p) struct_size((p), proto_id, (p)->proto_id_count)
249
250 #define IDPF_TUN_IP_GRE (\
251 IDPF_PTYPE_TUNNEL_IP |\
252 IDPF_PTYPE_TUNNEL_IP_GRENAT)
253
254 #define IDPF_TUN_IP_GRE_MAC (\
255 IDPF_TUN_IP_GRE |\
256 IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC)
257
258 #define IDPF_RX_MAX_PTYPE 1024
259 #define IDPF_RX_MAX_BASE_PTYPE 256
260 #define IDPF_INVALID_PTYPE_ID 0xFFFF
261
262 enum idpf_tunnel_state {
263 IDPF_PTYPE_TUNNEL_IP = BIT(0),
264 IDPF_PTYPE_TUNNEL_IP_GRENAT = BIT(1),
265 IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC = BIT(2),
266 };
267
268 struct idpf_ptype_state {
269 bool outer_ip:1;
270 bool outer_frag:1;
271 u8 tunnel_state:6;
272 };
273
274 /**
275 * enum idpf_queue_flags_t
276 * @__IDPF_Q_GEN_CHK: Queues operating in splitq mode use a generation bit to
277 * identify new descriptor writebacks on the ring. HW sets
278 * the gen bit to 1 on the first writeback of any given
279 * descriptor. After the ring wraps, HW sets the gen bit of
280 * those descriptors to 0, and continues flipping
281 * 0->1 or 1->0 on each ring wrap. SW maintains its own
282 * gen bit to know what value will indicate writebacks on
283 * the next pass around the ring. E.g. it is initialized
284 * to 1 and knows that reading a gen bit of 1 in any
285 * descriptor on the initial pass of the ring indicates a
286 * writeback. It also flips on every ring wrap.
287 * @__IDPF_Q_RFL_GEN_CHK: Refill queues are SW only, so Q_GEN acts as the HW
288 * bit and Q_RFL_GEN is the SW bit.
289 * @__IDPF_Q_FLOW_SCH_EN: Enable flow scheduling
290 * @__IDPF_Q_SW_MARKER: Used to indicate TX queue marker completions
291 * @__IDPF_Q_POLL_MODE: Enable poll mode
292 * @__IDPF_Q_CRC_EN: enable CRC offload in singleq mode
293 * @__IDPF_Q_HSPLIT_EN: enable header split on Rx (splitq)
294 * @__IDPF_Q_PTP: indicates whether the Rx timestamping is enabled for the
295 * queue
296 * @__IDPF_Q_FLAGS_NBITS: Must be last
297 */
298 enum idpf_queue_flags_t {
299 __IDPF_Q_GEN_CHK,
300 __IDPF_Q_RFL_GEN_CHK,
301 __IDPF_Q_FLOW_SCH_EN,
302 __IDPF_Q_SW_MARKER,
303 __IDPF_Q_POLL_MODE,
304 __IDPF_Q_CRC_EN,
305 __IDPF_Q_HSPLIT_EN,
306 __IDPF_Q_PTP,
307
308 __IDPF_Q_FLAGS_NBITS,
309 };
310
311 #define idpf_queue_set(f, q) __set_bit(__IDPF_Q_##f, (q)->flags)
312 #define idpf_queue_clear(f, q) __clear_bit(__IDPF_Q_##f, (q)->flags)
313 #define idpf_queue_change(f, q) __change_bit(__IDPF_Q_##f, (q)->flags)
314 #define idpf_queue_has(f, q) test_bit(__IDPF_Q_##f, (q)->flags)
315
316 #define idpf_queue_has_clear(f, q) \
317 __test_and_clear_bit(__IDPF_Q_##f, (q)->flags)
318 #define idpf_queue_assign(f, q, v) \
319 __assign_bit(__IDPF_Q_##f, (q)->flags, v)
320
321 /**
322 * struct idpf_vec_regs
323 * @dyn_ctl_reg: Dynamic control interrupt register offset
324 * @itrn_reg: Interrupt Throttling Rate register offset
325 * @itrn_index_spacing: Register spacing between ITR registers of the same
326 * vector
327 */
328 struct idpf_vec_regs {
329 u32 dyn_ctl_reg;
330 u32 itrn_reg;
331 u32 itrn_index_spacing;
332 };
333
334 /**
335 * struct idpf_intr_reg
336 * @dyn_ctl: Dynamic control interrupt register
337 * @dyn_ctl_intena_m: Mask for dyn_ctl interrupt enable
338 * @dyn_ctl_intena_msk_m: Mask for dyn_ctl interrupt enable mask
339 * @dyn_ctl_itridx_s: Register bit offset for ITR index
340 * @dyn_ctl_itridx_m: Mask for ITR index
341 * @dyn_ctl_intrvl_s: Register bit offset for ITR interval
342 * @dyn_ctl_wb_on_itr_m: Mask for WB on ITR feature
343 * @dyn_ctl_sw_itridx_ena_m: Mask for SW ITR index
344 * @dyn_ctl_swint_trig_m: Mask for dyn_ctl SW triggered interrupt enable
345 * @rx_itr: RX ITR register
346 * @tx_itr: TX ITR register
347 * @icr_ena: Interrupt cause register offset
348 * @icr_ena_ctlq_m: Mask for ICR
349 */
350 struct idpf_intr_reg {
351 void __iomem *dyn_ctl;
352 u32 dyn_ctl_intena_m;
353 u32 dyn_ctl_intena_msk_m;
354 u32 dyn_ctl_itridx_s;
355 u32 dyn_ctl_itridx_m;
356 u32 dyn_ctl_intrvl_s;
357 u32 dyn_ctl_wb_on_itr_m;
358 u32 dyn_ctl_sw_itridx_ena_m;
359 u32 dyn_ctl_swint_trig_m;
360 void __iomem *rx_itr;
361 void __iomem *tx_itr;
362 void __iomem *icr_ena;
363 u32 icr_ena_ctlq_m;
364 };
365
366 /**
367 * struct idpf_q_vector
368 * @vport: Vport back pointer
369 * @num_rxq: Number of RX queues
370 * @num_txq: Number of TX queues
371 * @num_bufq: Number of buffer queues
372 * @num_complq: number of completion queues
373 * @rx: Array of RX queues to service
374 * @tx: Array of TX queues to service
375 * @bufq: Array of buffer queues to service
376 * @complq: array of completion queues
377 * @intr_reg: See struct idpf_intr_reg
378 * @napi: napi handler
379 * @total_events: Number of interrupts processed
380 * @wb_on_itr: whether WB on ITR is enabled
381 * @tx_dim: Data for TX net_dim algorithm
382 * @tx_itr_value: TX interrupt throttling rate
383 * @tx_intr_mode: Dynamic ITR or not
384 * @tx_itr_idx: TX ITR index
385 * @rx_dim: Data for RX net_dim algorithm
386 * @rx_itr_value: RX interrupt throttling rate
387 * @rx_intr_mode: Dynamic ITR or not
388 * @rx_itr_idx: RX ITR index
389 * @v_idx: Vector index
390 */
391 struct idpf_q_vector {
392 __cacheline_group_begin_aligned(read_mostly);
393 struct idpf_vport *vport;
394
395 u16 num_rxq;
396 u16 num_txq;
397 u16 num_bufq;
398 u16 num_complq;
399 struct idpf_rx_queue **rx;
400 struct idpf_tx_queue **tx;
401 struct idpf_buf_queue **bufq;
402 struct idpf_compl_queue **complq;
403
404 struct idpf_intr_reg intr_reg;
405 __cacheline_group_end_aligned(read_mostly);
406
407 __cacheline_group_begin_aligned(read_write);
408 struct napi_struct napi;
409 u16 total_events;
410 bool wb_on_itr;
411
412 struct dim tx_dim;
413 u16 tx_itr_value;
414 bool tx_intr_mode;
415 u32 tx_itr_idx;
416
417 struct dim rx_dim;
418 u16 rx_itr_value;
419 bool rx_intr_mode;
420 u32 rx_itr_idx;
421 __cacheline_group_end_aligned(read_write);
422
423 __cacheline_group_begin_aligned(cold);
424 u16 v_idx;
425
426 __cacheline_group_end_aligned(cold);
427 };
428 libeth_cacheline_set_assert(struct idpf_q_vector, 120,
429 24 + sizeof(struct napi_struct) +
430 2 * sizeof(struct dim),
431 8);
432
433 struct idpf_rx_queue_stats {
434 u64_stats_t packets;
435 u64_stats_t bytes;
436 u64_stats_t rsc_pkts;
437 u64_stats_t hw_csum_err;
438 u64_stats_t hsplit_pkts;
439 u64_stats_t hsplit_buf_ovf;
440 u64_stats_t bad_descs;
441 };
442
443 struct idpf_tx_queue_stats {
444 u64_stats_t packets;
445 u64_stats_t bytes;
446 u64_stats_t lso_pkts;
447 u64_stats_t linearize;
448 u64_stats_t q_busy;
449 u64_stats_t skb_drops;
450 u64_stats_t dma_map_errs;
451 u64_stats_t tstamp_skipped;
452 };
453
454 #define IDPF_ITR_DYNAMIC 1
455 #define IDPF_ITR_MAX 0x1FE0
456 #define IDPF_ITR_20K 0x0032
457 #define IDPF_ITR_GRAN_S 1 /* Assume ITR granularity is 2us */
458 #define IDPF_ITR_MASK 0x1FFE /* ITR register value alignment mask */
459 #define ITR_REG_ALIGN(setting) ((setting) & IDPF_ITR_MASK)
460 #define IDPF_ITR_IS_DYNAMIC(itr_mode) (itr_mode)
461 #define IDPF_ITR_TX_DEF IDPF_ITR_20K
462 #define IDPF_ITR_RX_DEF IDPF_ITR_20K
463 /* Index used for 'SW ITR' update in DYN_CTL register */
464 #define IDPF_SW_ITR_UPDATE_IDX 2
465 /* Index used for 'No ITR' update in DYN_CTL register */
466 #define IDPF_NO_ITR_UPDATE_IDX 3
467 #define IDPF_ITR_IDX_SPACING(spacing, dflt) (spacing ? spacing : dflt)
468 #define IDPF_DIM_DEFAULT_PROFILE_IX 1
469
470 /**
471 * struct idpf_txq_stash - Tx buffer stash for Flow-based scheduling mode
472 * @buf_stack: Stack of empty buffers to store buffer info for out of order
473 * buffer completions. See struct idpf_buf_lifo
474 * @sched_buf_hash: Hash table to store buffers
475 */
476 struct idpf_txq_stash {
477 struct idpf_buf_lifo buf_stack;
478 DECLARE_HASHTABLE(sched_buf_hash, 12);
479 } ____cacheline_aligned;
480
481 /**
482 * struct idpf_rx_queue - software structure representing a receive queue
483 * @rx: universal receive descriptor array
484 * @single_buf: buffer descriptor array in singleq
485 * @desc_ring: virtual descriptor ring address
486 * @bufq_sets: Pointer to the array of buffer queues in splitq mode
487 * @napi: NAPI instance corresponding to this queue (splitq)
488 * @rx_buf: See struct &libeth_fqe
489 * @pp: Page pool pointer in singleq mode
490 * @netdev: &net_device corresponding to this queue
491 * @tail: Tail offset. Used for both queue models single and split.
492 * @flags: See enum idpf_queue_flags_t
493 * @idx: For RX queue, it is used to index to total RX queue across groups and
494 * used for skb reporting.
495 * @desc_count: Number of descriptors
496 * @rxdids: Supported RX descriptor ids
497 * @rx_ptype_lkup: LUT of Rx ptypes
498 * @next_to_use: Next descriptor to use
499 * @next_to_clean: Next descriptor to clean
500 * @next_to_alloc: RX buffer to allocate at
501 * @skb: Pointer to the skb
502 * @truesize: data buffer truesize in singleq
503 * @cached_phc_time: Cached PHC time for the Rx queue
504 * @stats_sync: See struct u64_stats_sync
505 * @q_stats: See union idpf_rx_queue_stats
506 * @q_id: Queue id
507 * @size: Length of descriptor ring in bytes
508 * @dma: Physical address of ring
509 * @q_vector: Backreference to associated vector
510 * @rx_buffer_low_watermark: RX buffer low watermark
511 * @rx_hbuf_size: Header buffer size
512 * @rx_buf_size: Buffer size
513 * @rx_max_pkt_size: RX max packet size
514 */
515 struct idpf_rx_queue {
516 __cacheline_group_begin_aligned(read_mostly);
517 union {
518 union virtchnl2_rx_desc *rx;
519 struct virtchnl2_singleq_rx_buf_desc *single_buf;
520
521 void *desc_ring;
522 };
523 union {
524 struct {
525 struct idpf_bufq_set *bufq_sets;
526 struct napi_struct *napi;
527 };
528 struct {
529 struct libeth_fqe *rx_buf;
530 struct page_pool *pp;
531 };
532 };
533 struct net_device *netdev;
534 void __iomem *tail;
535
536 DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
537 u16 idx;
538 u16 desc_count;
539
540 u32 rxdids;
541 const struct libeth_rx_pt *rx_ptype_lkup;
542 __cacheline_group_end_aligned(read_mostly);
543
544 __cacheline_group_begin_aligned(read_write);
545 u16 next_to_use;
546 u16 next_to_clean;
547 u16 next_to_alloc;
548
549 struct sk_buff *skb;
550 u32 truesize;
551 u64 cached_phc_time;
552
553 struct u64_stats_sync stats_sync;
554 struct idpf_rx_queue_stats q_stats;
555 __cacheline_group_end_aligned(read_write);
556
557 __cacheline_group_begin_aligned(cold);
558 u32 q_id;
559 u32 size;
560 dma_addr_t dma;
561
562 struct idpf_q_vector *q_vector;
563
564 u16 rx_buffer_low_watermark;
565 u16 rx_hbuf_size;
566 u16 rx_buf_size;
567 u16 rx_max_pkt_size;
568 __cacheline_group_end_aligned(cold);
569 };
570 libeth_cacheline_set_assert(struct idpf_rx_queue, 64,
571 88 + sizeof(struct u64_stats_sync),
572 32);
573
574 /**
575 * struct idpf_tx_queue - software structure representing a transmit queue
576 * @base_tx: base Tx descriptor array
577 * @base_ctx: base Tx context descriptor array
578 * @flex_tx: flex Tx descriptor array
579 * @flex_ctx: flex Tx context descriptor array
580 * @desc_ring: virtual descriptor ring address
581 * @tx_buf: See struct idpf_tx_buf
582 * @txq_grp: See struct idpf_txq_group
583 * @dev: Device back pointer for DMA mapping
584 * @tail: Tail offset. Used for both queue models single and split
585 * @flags: See enum idpf_queue_flags_t
586 * @idx: For TX queue, it is used as index to map between TX queue group and
587 * hot path TX pointers stored in vport. Used in both singleq/splitq.
588 * @desc_count: Number of descriptors
589 * @tx_min_pkt_len: Min supported packet length
590 * @compl_tag_gen_s: Completion tag generation bit
591 * The format of the completion tag will change based on the TXQ
592 * descriptor ring size so that we can maintain roughly the same level
593 * of "uniqueness" across all descriptor sizes. For example, if the
594 * TXQ descriptor ring size is 64 (the minimum size supported), the
595 * completion tag will be formatted as below:
596 * 15 6 5 0
597 * --------------------------------
598 * | GEN=0-1023 |IDX = 0-63|
599 * --------------------------------
600 *
601 * This gives us 64*1024 = 65536 possible unique values. Similarly, if
602 * the TXQ descriptor ring size is 8160 (the maximum size supported),
603 * the completion tag will be formatted as below:
604 * 15 13 12 0
605 * --------------------------------
606 * |GEN | IDX = 0-8159 |
607 * --------------------------------
608 *
609 * This gives us 8*8160 = 65280 possible unique values.
610 * @netdev: &net_device corresponding to this queue
611 * @next_to_use: Next descriptor to use
612 * @next_to_clean: Next descriptor to clean
613 * @cleaned_bytes: Splitq only, TXQ only: When a TX completion is received on
614 * the TX completion queue, it can be for any TXQ associated
615 * with that completion queue. This means we can clean up to
616 * N TXQs during a single call to clean the completion queue.
617 * cleaned_bytes|pkts tracks the clean stats per TXQ during
618 * that single call to clean the completion queue. By doing so,
619 * we can update BQL with aggregate cleaned stats for each TXQ
620 * only once at the end of the cleaning routine.
621 * @clean_budget: singleq only, queue cleaning budget
622 * @cleaned_pkts: Number of packets cleaned for the above said case
623 * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
624 * @stash: Tx buffer stash for Flow-based scheduling mode
625 * @compl_tag_bufid_m: Completion tag buffer id mask
626 * @compl_tag_cur_gen: Used to keep track of current completion tag generation
627 * @compl_tag_gen_max: To determine when compl_tag_cur_gen should be reset
628 * @cached_tstamp_caps: Tx timestamp capabilities negotiated with the CP
629 * @tstamp_task: Work that handles Tx timestamp read
630 * @stats_sync: See struct u64_stats_sync
631 * @q_stats: See union idpf_tx_queue_stats
632 * @q_id: Queue id
633 * @size: Length of descriptor ring in bytes
634 * @dma: Physical address of ring
635 * @q_vector: Backreference to associated vector
636 */
637 struct idpf_tx_queue {
638 __cacheline_group_begin_aligned(read_mostly);
639 union {
640 struct idpf_base_tx_desc *base_tx;
641 struct idpf_base_tx_ctx_desc *base_ctx;
642 union idpf_tx_flex_desc *flex_tx;
643 union idpf_flex_tx_ctx_desc *flex_ctx;
644
645 void *desc_ring;
646 };
647 struct libeth_sqe *tx_buf;
648 struct idpf_txq_group *txq_grp;
649 struct device *dev;
650 void __iomem *tail;
651
652 DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
653 u16 idx;
654 u16 desc_count;
655
656 u16 tx_min_pkt_len;
657 u16 compl_tag_gen_s;
658
659 struct net_device *netdev;
660 __cacheline_group_end_aligned(read_mostly);
661
662 __cacheline_group_begin_aligned(read_write);
663 u16 next_to_use;
664 u16 next_to_clean;
665
666 union {
667 u32 cleaned_bytes;
668 u32 clean_budget;
669 };
670 u16 cleaned_pkts;
671
672 u16 tx_max_bufs;
673 struct idpf_txq_stash *stash;
674
675 u16 compl_tag_bufid_m;
676 u16 compl_tag_cur_gen;
677 u16 compl_tag_gen_max;
678
679 struct idpf_ptp_vport_tx_tstamp_caps *cached_tstamp_caps;
680 struct work_struct *tstamp_task;
681
682 struct u64_stats_sync stats_sync;
683 struct idpf_tx_queue_stats q_stats;
684 __cacheline_group_end_aligned(read_write);
685
686 __cacheline_group_begin_aligned(cold);
687 u32 q_id;
688 u32 size;
689 dma_addr_t dma;
690
691 struct idpf_q_vector *q_vector;
692 __cacheline_group_end_aligned(cold);
693 };
694 libeth_cacheline_set_assert(struct idpf_tx_queue, 64,
695 112 + sizeof(struct u64_stats_sync),
696 24);
697
698 /**
699 * struct idpf_buf_queue - software structure representing a buffer queue
700 * @split_buf: buffer descriptor array
701 * @hdr_buf: &libeth_fqe for header buffers
702 * @hdr_pp: &page_pool for header buffers
703 * @buf: &libeth_fqe for data buffers
704 * @pp: &page_pool for data buffers
705 * @tail: Tail offset
706 * @flags: See enum idpf_queue_flags_t
707 * @desc_count: Number of descriptors
708 * @next_to_use: Next descriptor to use
709 * @next_to_clean: Next descriptor to clean
710 * @next_to_alloc: RX buffer to allocate at
711 * @hdr_truesize: truesize for buffer headers
712 * @truesize: truesize for data buffers
713 * @q_id: Queue id
714 * @size: Length of descriptor ring in bytes
715 * @dma: Physical address of ring
716 * @q_vector: Backreference to associated vector
717 * @rx_buffer_low_watermark: RX buffer low watermark
718 * @rx_hbuf_size: Header buffer size
719 * @rx_buf_size: Buffer size
720 */
721 struct idpf_buf_queue {
722 __cacheline_group_begin_aligned(read_mostly);
723 struct virtchnl2_splitq_rx_buf_desc *split_buf;
724 struct libeth_fqe *hdr_buf;
725 struct page_pool *hdr_pp;
726 struct libeth_fqe *buf;
727 struct page_pool *pp;
728 void __iomem *tail;
729
730 DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
731 u32 desc_count;
732 __cacheline_group_end_aligned(read_mostly);
733
734 __cacheline_group_begin_aligned(read_write);
735 u32 next_to_use;
736 u32 next_to_clean;
737 u32 next_to_alloc;
738
739 u32 hdr_truesize;
740 u32 truesize;
741 __cacheline_group_end_aligned(read_write);
742
743 __cacheline_group_begin_aligned(cold);
744 u32 q_id;
745 u32 size;
746 dma_addr_t dma;
747
748 struct idpf_q_vector *q_vector;
749
750 u16 rx_buffer_low_watermark;
751 u16 rx_hbuf_size;
752 u16 rx_buf_size;
753 __cacheline_group_end_aligned(cold);
754 };
755 libeth_cacheline_set_assert(struct idpf_buf_queue, 64, 24, 32);
756
757 /**
758 * struct idpf_compl_queue - software structure representing a completion queue
759 * @comp: completion descriptor array
760 * @txq_grp: See struct idpf_txq_group
761 * @flags: See enum idpf_queue_flags_t
762 * @desc_count: Number of descriptors
763 * @clean_budget: queue cleaning budget
764 * @netdev: &net_device corresponding to this queue
765 * @next_to_use: Next descriptor to use. Relevant in both split & single txq
766 * and bufq.
767 * @next_to_clean: Next descriptor to clean
768 * @num_completions: Only relevant for TX completion queue. It tracks the
769 * number of completions received to compare against the
770 * number of completions pending, as accumulated by the
771 * TX queues.
772 * @q_id: Queue id
773 * @size: Length of descriptor ring in bytes
774 * @dma: Physical address of ring
775 * @q_vector: Backreference to associated vector
776 */
777 struct idpf_compl_queue {
778 __cacheline_group_begin_aligned(read_mostly);
779 struct idpf_splitq_tx_compl_desc *comp;
780 struct idpf_txq_group *txq_grp;
781
782 DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
783 u32 desc_count;
784
785 u32 clean_budget;
786 struct net_device *netdev;
787 __cacheline_group_end_aligned(read_mostly);
788
789 __cacheline_group_begin_aligned(read_write);
790 u32 next_to_use;
791 u32 next_to_clean;
792
793 aligned_u64 num_completions;
794 __cacheline_group_end_aligned(read_write);
795
796 __cacheline_group_begin_aligned(cold);
797 u32 q_id;
798 u32 size;
799 dma_addr_t dma;
800
801 struct idpf_q_vector *q_vector;
802 __cacheline_group_end_aligned(cold);
803 };
804 libeth_cacheline_set_assert(struct idpf_compl_queue, 40, 16, 24);
805
806 /**
807 * struct idpf_sw_queue
808 * @ring: Pointer to the ring
809 * @flags: See enum idpf_queue_flags_t
810 * @desc_count: Descriptor count
811 * @next_to_use: Buffer to allocate at
812 * @next_to_clean: Next descriptor to clean
813 *
814 * Software queues are used in splitq mode to manage buffers between rxq
815 * producer and the bufq consumer. These are required in order to maintain a
816 * lockless buffer management system and are strictly software only constructs.
817 */
818 struct idpf_sw_queue {
819 __cacheline_group_begin_aligned(read_mostly);
820 u32 *ring;
821
822 DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
823 u32 desc_count;
824 __cacheline_group_end_aligned(read_mostly);
825
826 __cacheline_group_begin_aligned(read_write);
827 u32 next_to_use;
828 u32 next_to_clean;
829 __cacheline_group_end_aligned(read_write);
830 };
831 libeth_cacheline_group_assert(struct idpf_sw_queue, read_mostly, 24);
832 libeth_cacheline_group_assert(struct idpf_sw_queue, read_write, 8);
833 libeth_cacheline_struct_assert(struct idpf_sw_queue, 24, 8);
834
835 /**
836 * struct idpf_rxq_set
837 * @rxq: RX queue
838 * @refillq: pointers to refill queues
839 *
840 * Splitq only. idpf_rxq_set associates an rxq with at an array of refillqs.
841 * Each rxq needs a refillq to return used buffers back to the respective bufq.
842 * Bufqs then clean these refillqs for buffers to give to hardware.
843 */
844 struct idpf_rxq_set {
845 struct idpf_rx_queue rxq;
846 struct idpf_sw_queue *refillq[IDPF_MAX_BUFQS_PER_RXQ_GRP];
847 };
848
849 /**
850 * struct idpf_bufq_set
851 * @bufq: Buffer queue
852 * @num_refillqs: Number of refill queues. This is always equal to num_rxq_sets
853 * in idpf_rxq_group.
854 * @refillqs: Pointer to refill queues array.
855 *
856 * Splitq only. idpf_bufq_set associates a bufq to an array of refillqs.
857 * In this bufq_set, there will be one refillq for each rxq in this rxq_group.
858 * Used buffers received by rxqs will be put on refillqs which bufqs will
859 * clean to return new buffers back to hardware.
860 *
861 * Buffers needed by some number of rxqs associated in this rxq_group are
862 * managed by at most two bufqs (depending on performance configuration).
863 */
864 struct idpf_bufq_set {
865 struct idpf_buf_queue bufq;
866 int num_refillqs;
867 struct idpf_sw_queue *refillqs;
868 };
869
870 /**
871 * struct idpf_rxq_group
872 * @vport: Vport back pointer
873 * @singleq: Struct with single queue related members
874 * @singleq.num_rxq: Number of RX queues associated
875 * @singleq.rxqs: Array of RX queue pointers
876 * @splitq: Struct with split queue related members
877 * @splitq.num_rxq_sets: Number of RX queue sets
878 * @splitq.rxq_sets: Array of RX queue sets
879 * @splitq.bufq_sets: Buffer queue set pointer
880 *
881 * In singleq mode, an rxq_group is simply an array of rxqs. In splitq, a
882 * rxq_group contains all the rxqs, bufqs and refillqs needed to
883 * manage buffers in splitq mode.
884 */
885 struct idpf_rxq_group {
886 struct idpf_vport *vport;
887
888 union {
889 struct {
890 u16 num_rxq;
891 struct idpf_rx_queue *rxqs[IDPF_LARGE_MAX_Q];
892 } singleq;
893 struct {
894 u16 num_rxq_sets;
895 struct idpf_rxq_set *rxq_sets[IDPF_LARGE_MAX_Q];
896 struct idpf_bufq_set *bufq_sets;
897 } splitq;
898 };
899 };
900
901 /**
902 * struct idpf_txq_group
903 * @vport: Vport back pointer
904 * @num_txq: Number of TX queues associated
905 * @txqs: Array of TX queue pointers
906 * @stashes: array of OOO stashes for the queues
907 * @complq: Associated completion queue pointer, split queue only
908 * @num_completions_pending: Total number of completions pending for the
909 * completion queue, acculumated for all TX queues
910 * associated with that completion queue.
911 *
912 * Between singleq and splitq, a txq_group is largely the same except for the
913 * complq. In splitq a single complq is responsible for handling completions
914 * for some number of txqs associated in this txq_group.
915 */
916 struct idpf_txq_group {
917 struct idpf_vport *vport;
918
919 u16 num_txq;
920 struct idpf_tx_queue *txqs[IDPF_LARGE_MAX_Q];
921 struct idpf_txq_stash *stashes;
922
923 struct idpf_compl_queue *complq;
924
925 aligned_u64 num_completions_pending;
926 };
927
idpf_q_vector_to_mem(const struct idpf_q_vector * q_vector)928 static inline int idpf_q_vector_to_mem(const struct idpf_q_vector *q_vector)
929 {
930 u32 cpu;
931
932 if (!q_vector)
933 return NUMA_NO_NODE;
934
935 cpu = cpumask_first(&q_vector->napi.config->affinity_mask);
936
937 return cpu < nr_cpu_ids ? cpu_to_mem(cpu) : NUMA_NO_NODE;
938 }
939
940 /**
941 * idpf_size_to_txd_count - Get number of descriptors needed for large Tx frag
942 * @size: transmit request size in bytes
943 *
944 * In the case where a large frag (>= 16K) needs to be split across multiple
945 * descriptors, we need to assume that we can have no more than 12K of data
946 * per descriptor due to hardware alignment restrictions (4K alignment).
947 */
idpf_size_to_txd_count(unsigned int size)948 static inline u32 idpf_size_to_txd_count(unsigned int size)
949 {
950 return DIV_ROUND_UP(size, IDPF_TX_MAX_DESC_DATA_ALIGNED);
951 }
952
953 /**
954 * idpf_tx_singleq_build_ctob - populate command tag offset and size
955 * @td_cmd: Command to be filled in desc
956 * @td_offset: Offset to be filled in desc
957 * @size: Size of the buffer
958 * @td_tag: td tag to be filled
959 *
960 * Returns the 64 bit value populated with the input parameters
961 */
idpf_tx_singleq_build_ctob(u64 td_cmd,u64 td_offset,unsigned int size,u64 td_tag)962 static inline __le64 idpf_tx_singleq_build_ctob(u64 td_cmd, u64 td_offset,
963 unsigned int size, u64 td_tag)
964 {
965 return cpu_to_le64(IDPF_TX_DESC_DTYPE_DATA |
966 (td_cmd << IDPF_TXD_QW1_CMD_S) |
967 (td_offset << IDPF_TXD_QW1_OFFSET_S) |
968 ((u64)size << IDPF_TXD_QW1_TX_BUF_SZ_S) |
969 (td_tag << IDPF_TXD_QW1_L2TAG1_S));
970 }
971
972 void idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc *desc,
973 struct idpf_tx_splitq_params *params,
974 u16 td_cmd, u16 size);
975 void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
976 struct idpf_tx_splitq_params *params,
977 u16 td_cmd, u16 size);
978 /**
979 * idpf_tx_splitq_build_desc - determine which type of data descriptor to build
980 * @desc: descriptor to populate
981 * @params: pointer to tx params struct
982 * @td_cmd: command to be filled in desc
983 * @size: size of buffer
984 */
idpf_tx_splitq_build_desc(union idpf_tx_flex_desc * desc,struct idpf_tx_splitq_params * params,u16 td_cmd,u16 size)985 static inline void idpf_tx_splitq_build_desc(union idpf_tx_flex_desc *desc,
986 struct idpf_tx_splitq_params *params,
987 u16 td_cmd, u16 size)
988 {
989 if (params->dtype == IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2)
990 idpf_tx_splitq_build_ctb(desc, params, td_cmd, size);
991 else
992 idpf_tx_splitq_build_flow_desc(desc, params, td_cmd, size);
993 }
994
995 /**
996 * idpf_vport_intr_set_wb_on_itr - enable descriptor writeback on disabled interrupts
997 * @q_vector: pointer to queue vector struct
998 */
idpf_vport_intr_set_wb_on_itr(struct idpf_q_vector * q_vector)999 static inline void idpf_vport_intr_set_wb_on_itr(struct idpf_q_vector *q_vector)
1000 {
1001 struct idpf_intr_reg *reg;
1002
1003 if (q_vector->wb_on_itr)
1004 return;
1005
1006 q_vector->wb_on_itr = true;
1007 reg = &q_vector->intr_reg;
1008
1009 writel(reg->dyn_ctl_wb_on_itr_m | reg->dyn_ctl_intena_msk_m |
1010 (IDPF_NO_ITR_UPDATE_IDX << reg->dyn_ctl_itridx_s),
1011 reg->dyn_ctl);
1012 }
1013
1014 int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget);
1015 void idpf_vport_init_num_qs(struct idpf_vport *vport,
1016 struct virtchnl2_create_vport *vport_msg);
1017 void idpf_vport_calc_num_q_desc(struct idpf_vport *vport);
1018 int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_index,
1019 struct virtchnl2_create_vport *vport_msg,
1020 struct idpf_vport_max_q *max_q);
1021 void idpf_vport_calc_num_q_groups(struct idpf_vport *vport);
1022 int idpf_vport_queues_alloc(struct idpf_vport *vport);
1023 void idpf_vport_queues_rel(struct idpf_vport *vport);
1024 void idpf_vport_intr_rel(struct idpf_vport *vport);
1025 int idpf_vport_intr_alloc(struct idpf_vport *vport);
1026 void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector);
1027 void idpf_vport_intr_deinit(struct idpf_vport *vport);
1028 int idpf_vport_intr_init(struct idpf_vport *vport);
1029 void idpf_vport_intr_ena(struct idpf_vport *vport);
1030 int idpf_config_rss(struct idpf_vport *vport);
1031 int idpf_init_rss(struct idpf_vport *vport);
1032 void idpf_deinit_rss(struct idpf_vport *vport);
1033 int idpf_rx_bufs_init_all(struct idpf_vport *vport);
1034 void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb,
1035 unsigned int size);
1036 struct sk_buff *idpf_rx_build_skb(const struct libeth_fqe *buf, u32 size);
1037 void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
1038 bool xmit_more);
1039 unsigned int idpf_size_to_txd_count(unsigned int size);
1040 netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb);
1041 void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
1042 struct idpf_tx_buf *first, u16 ring_idx);
1043 unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
1044 struct sk_buff *skb);
1045 void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue);
1046 netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
1047 struct idpf_tx_queue *tx_q);
1048 netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev);
1049 bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq,
1050 u16 cleaned_count);
1051 int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off);
1052
1053 #endif /* !_IDPF_TXRX_H_ */
1054