xref: /linux/drivers/net/ethernet/intel/idpf/idpf_txrx.h (revision 9c736ace0666efe68efd53fcdfa2c6653c3e0e72)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (C) 2023 Intel Corporation */
3 
4 #ifndef _IDPF_TXRX_H_
5 #define _IDPF_TXRX_H_
6 
7 #include <linux/dim.h>
8 
9 #include <net/libeth/cache.h>
10 #include <net/tcp.h>
11 #include <net/netdev_queues.h>
12 
13 #include "idpf_lan_txrx.h"
14 #include "virtchnl2_lan_desc.h"
15 
16 #define IDPF_LARGE_MAX_Q			256
17 #define IDPF_MAX_Q				16
18 #define IDPF_MIN_Q				2
19 /* Mailbox Queue */
20 #define IDPF_MAX_MBXQ				1
21 
22 #define IDPF_MIN_TXQ_DESC			64
23 #define IDPF_MIN_RXQ_DESC			64
24 #define IDPF_MIN_TXQ_COMPLQ_DESC		256
25 #define IDPF_MAX_QIDS				256
26 
27 /* Number of descriptors in a queue should be a multiple of 32. RX queue
28  * descriptors alone should be a multiple of IDPF_REQ_RXQ_DESC_MULTIPLE
29  * to achieve BufQ descriptors aligned to 32
30  */
31 #define IDPF_REQ_DESC_MULTIPLE			32
32 #define IDPF_REQ_RXQ_DESC_MULTIPLE (IDPF_MAX_BUFQS_PER_RXQ_GRP * 32)
33 #define IDPF_MIN_TX_DESC_NEEDED (MAX_SKB_FRAGS + 6)
34 #define IDPF_TX_WAKE_THRESH ((u16)IDPF_MIN_TX_DESC_NEEDED * 2)
35 
36 #define IDPF_MAX_DESCS				8160
37 #define IDPF_MAX_TXQ_DESC ALIGN_DOWN(IDPF_MAX_DESCS, IDPF_REQ_DESC_MULTIPLE)
38 #define IDPF_MAX_RXQ_DESC ALIGN_DOWN(IDPF_MAX_DESCS, IDPF_REQ_RXQ_DESC_MULTIPLE)
39 #define MIN_SUPPORT_TXDID (\
40 	VIRTCHNL2_TXDID_FLEX_FLOW_SCHED |\
41 	VIRTCHNL2_TXDID_FLEX_TSO_CTX)
42 
43 #define IDPF_DFLT_SINGLEQ_TX_Q_GROUPS		1
44 #define IDPF_DFLT_SINGLEQ_RX_Q_GROUPS		1
45 #define IDPF_DFLT_SINGLEQ_TXQ_PER_GROUP		4
46 #define IDPF_DFLT_SINGLEQ_RXQ_PER_GROUP		4
47 
48 #define IDPF_COMPLQ_PER_GROUP			1
49 #define IDPF_SINGLE_BUFQ_PER_RXQ_GRP		1
50 #define IDPF_MAX_BUFQS_PER_RXQ_GRP		2
51 #define IDPF_BUFQ2_ENA				1
52 #define IDPF_NUMQ_PER_CHUNK			1
53 
54 #define IDPF_DFLT_SPLITQ_TXQ_PER_GROUP		1
55 #define IDPF_DFLT_SPLITQ_RXQ_PER_GROUP		1
56 
57 /* Default vector sharing */
58 #define IDPF_MBX_Q_VEC		1
59 #define IDPF_MIN_Q_VEC		1
60 #define IDPF_MIN_RDMA_VEC	2
61 
62 #define IDPF_DFLT_TX_Q_DESC_COUNT		512
63 #define IDPF_DFLT_TX_COMPLQ_DESC_COUNT		512
64 #define IDPF_DFLT_RX_Q_DESC_COUNT		512
65 
66 /* IMPORTANT: We absolutely _cannot_ have more buffers in the system than a
67  * given RX completion queue has descriptors. This includes _ALL_ buffer
68  * queues. E.g.: If you have two buffer queues of 512 descriptors and buffers,
69  * you have a total of 1024 buffers so your RX queue _must_ have at least that
70  * many descriptors. This macro divides a given number of RX descriptors by
71  * number of buffer queues to calculate how many descriptors each buffer queue
72  * can have without overrunning the RX queue.
73  *
74  * If you give hardware more buffers than completion descriptors what will
75  * happen is that if hardware gets a chance to post more than ring wrap of
76  * descriptors before SW gets an interrupt and overwrites SW head, the gen bit
77  * in the descriptor will be wrong. Any overwritten descriptors' buffers will
78  * be gone forever and SW has no reasonable way to tell that this has happened.
79  * From SW perspective, when we finally get an interrupt, it looks like we're
80  * still waiting for descriptor to be done, stalling forever.
81  */
82 #define IDPF_RX_BUFQ_DESC_COUNT(RXD, NUM_BUFQ)	((RXD) / (NUM_BUFQ))
83 
84 #define IDPF_RX_BUFQ_WORKING_SET(rxq)		((rxq)->desc_count - 1)
85 
86 #define IDPF_RX_BUMP_NTC(rxq, ntc)				\
87 do {								\
88 	if (unlikely(++(ntc) == (rxq)->desc_count)) {		\
89 		ntc = 0;					\
90 		idpf_queue_change(GEN_CHK, rxq);		\
91 	}							\
92 } while (0)
93 
94 #define IDPF_SINGLEQ_BUMP_RING_IDX(q, idx)			\
95 do {								\
96 	if (unlikely(++(idx) == (q)->desc_count))		\
97 		idx = 0;					\
98 } while (0)
99 
100 #define IDPF_RX_BUF_STRIDE			32
101 #define IDPF_RX_BUF_POST_STRIDE			16
102 #define IDPF_LOW_WATERMARK			64
103 
104 #define IDPF_TX_TSO_MIN_MSS			88
105 
106 /* Minimum number of descriptors between 2 descriptors with the RE bit set;
107  * only relevant in flow scheduling mode
108  */
109 #define IDPF_TX_SPLITQ_RE_MIN_GAP	64
110 
111 #define IDPF_RFL_BI_GEN_M		BIT(16)
112 #define IDPF_RFL_BI_BUFID_M		GENMASK(15, 0)
113 
114 #define IDPF_RXD_EOF_SPLITQ		VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_M
115 #define IDPF_RXD_EOF_SINGLEQ		VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_M
116 
117 #define IDPF_DESC_UNUSED(txq)     \
118 	((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \
119 	(txq)->next_to_clean - (txq)->next_to_use - 1)
120 
121 #define IDPF_TX_COMPLQ_OVERFLOW_THRESH(txcq)	((txcq)->desc_count >> 1)
122 /* Determine the absolute number of completions pending, i.e. the number of
123  * completions that are expected to arrive on the TX completion queue.
124  */
125 #define IDPF_TX_COMPLQ_PENDING(txq)	\
126 	(((txq)->num_completions_pending >= (txq)->complq->num_completions ? \
127 	0 : U32_MAX) + \
128 	(txq)->num_completions_pending - (txq)->complq->num_completions)
129 
130 #define IDPF_TXBUF_NULL			U32_MAX
131 
132 #define IDPF_TXD_LAST_DESC_CMD (IDPF_TX_DESC_CMD_EOP | IDPF_TX_DESC_CMD_RS)
133 
134 #define IDPF_TX_FLAGS_TSO		BIT(0)
135 #define IDPF_TX_FLAGS_IPV4		BIT(1)
136 #define IDPF_TX_FLAGS_IPV6		BIT(2)
137 #define IDPF_TX_FLAGS_TUNNEL		BIT(3)
138 #define IDPF_TX_FLAGS_TSYN		BIT(4)
139 
140 union idpf_tx_flex_desc {
141 	struct idpf_flex_tx_desc q; /* queue based scheduling */
142 	struct idpf_flex_tx_sched_desc flow; /* flow based scheduling */
143 };
144 
145 #define idpf_tx_buf libeth_sqe
146 
147 /**
148  * struct idpf_tx_offload_params - Offload parameters for a given packet
149  * @tx_flags: Feature flags enabled for this packet
150  * @hdr_offsets: Offset parameter for single queue model
151  * @cd_tunneling: Type of tunneling enabled for single queue model
152  * @tso_len: Total length of payload to segment
153  * @mss: Segment size
154  * @tso_segs: Number of segments to be sent
155  * @tso_hdr_len: Length of headers to be duplicated
156  * @td_cmd: Command field to be inserted into descriptor
157  */
158 struct idpf_tx_offload_params {
159 	u32 tx_flags;
160 
161 	u32 hdr_offsets;
162 	u32 cd_tunneling;
163 
164 	u32 tso_len;
165 	u16 mss;
166 	u16 tso_segs;
167 	u16 tso_hdr_len;
168 
169 	u16 td_cmd;
170 };
171 
172 /**
173  * struct idpf_tx_splitq_params
174  * @dtype: General descriptor info
175  * @eop_cmd: Type of EOP
176  * @compl_tag: Associated tag for completion
177  * @td_tag: Descriptor tunneling tag
178  * @offload: Offload parameters
179  * @prev_ntu: stored TxQ next_to_use in case of rollback
180  * @prev_refill_ntc: stored refillq next_to_clean in case of packet rollback
181  * @prev_refill_gen: stored refillq generation bit in case of packet rollback
182  */
183 struct idpf_tx_splitq_params {
184 	enum idpf_tx_desc_dtype_value dtype;
185 	u16 eop_cmd;
186 	union {
187 		u16 compl_tag;
188 		u16 td_tag;
189 	};
190 
191 	struct idpf_tx_offload_params offload;
192 
193 	u16 prev_ntu;
194 	u16 prev_refill_ntc;
195 	bool prev_refill_gen;
196 };
197 
198 enum idpf_tx_ctx_desc_eipt_offload {
199 	IDPF_TX_CTX_EXT_IP_NONE         = 0x0,
200 	IDPF_TX_CTX_EXT_IP_IPV6         = 0x1,
201 	IDPF_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2,
202 	IDPF_TX_CTX_EXT_IP_IPV4         = 0x3
203 };
204 
205 #define IDPF_TX_COMPLQ_CLEAN_BUDGET	256
206 #define IDPF_TX_MIN_PKT_LEN		17
207 #define IDPF_TX_DESCS_FOR_SKB_DATA_PTR	1
208 #define IDPF_TX_DESCS_PER_CACHE_LINE	(L1_CACHE_BYTES / \
209 					 sizeof(struct idpf_flex_tx_desc))
210 #define IDPF_TX_DESCS_FOR_CTX		1
211 /* TX descriptors needed, worst case */
212 #define IDPF_TX_DESC_NEEDED (MAX_SKB_FRAGS + IDPF_TX_DESCS_FOR_CTX + \
213 			     IDPF_TX_DESCS_PER_CACHE_LINE + \
214 			     IDPF_TX_DESCS_FOR_SKB_DATA_PTR)
215 
216 /* The size limit for a transmit buffer in a descriptor is (16K - 1).
217  * In order to align with the read requests we will align the value to
218  * the nearest 4K which represents our maximum read request size.
219  */
220 #define IDPF_TX_MAX_READ_REQ_SIZE	SZ_4K
221 #define IDPF_TX_MAX_DESC_DATA		(SZ_16K - 1)
222 #define IDPF_TX_MAX_DESC_DATA_ALIGNED \
223 	ALIGN_DOWN(IDPF_TX_MAX_DESC_DATA, IDPF_TX_MAX_READ_REQ_SIZE)
224 
225 #define idpf_rx_buf libeth_fqe
226 
227 #define IDPF_RX_MAX_PTYPE_PROTO_IDS    32
228 #define IDPF_RX_MAX_PTYPE_SZ	(sizeof(struct virtchnl2_ptype) + \
229 				 (sizeof(u16) * IDPF_RX_MAX_PTYPE_PROTO_IDS))
230 #define IDPF_RX_PTYPE_HDR_SZ	sizeof(struct virtchnl2_get_ptype_info)
231 #define IDPF_RX_MAX_PTYPES_PER_BUF	\
232 	DIV_ROUND_DOWN_ULL((IDPF_CTLQ_MAX_BUF_LEN - IDPF_RX_PTYPE_HDR_SZ), \
233 			   IDPF_RX_MAX_PTYPE_SZ)
234 
235 #define IDPF_GET_PTYPE_SIZE(p) struct_size((p), proto_id, (p)->proto_id_count)
236 
237 #define IDPF_TUN_IP_GRE (\
238 	IDPF_PTYPE_TUNNEL_IP |\
239 	IDPF_PTYPE_TUNNEL_IP_GRENAT)
240 
241 #define IDPF_TUN_IP_GRE_MAC (\
242 	IDPF_TUN_IP_GRE |\
243 	IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC)
244 
245 #define IDPF_RX_MAX_PTYPE	1024
246 #define IDPF_RX_MAX_BASE_PTYPE	256
247 #define IDPF_INVALID_PTYPE_ID	0xFFFF
248 
249 enum idpf_tunnel_state {
250 	IDPF_PTYPE_TUNNEL_IP                    = BIT(0),
251 	IDPF_PTYPE_TUNNEL_IP_GRENAT             = BIT(1),
252 	IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC         = BIT(2),
253 };
254 
255 struct idpf_ptype_state {
256 	bool outer_ip:1;
257 	bool outer_frag:1;
258 	u8 tunnel_state:6;
259 };
260 
261 /**
262  * enum idpf_queue_flags_t
263  * @__IDPF_Q_GEN_CHK: Queues operating in splitq mode use a generation bit to
264  *		      identify new descriptor writebacks on the ring. HW sets
265  *		      the gen bit to 1 on the first writeback of any given
266  *		      descriptor. After the ring wraps, HW sets the gen bit of
267  *		      those descriptors to 0, and continues flipping
268  *		      0->1 or 1->0 on each ring wrap. SW maintains its own
269  *		      gen bit to know what value will indicate writebacks on
270  *		      the next pass around the ring. E.g. it is initialized
271  *		      to 1 and knows that reading a gen bit of 1 in any
272  *		      descriptor on the initial pass of the ring indicates a
273  *		      writeback. It also flips on every ring wrap.
274  * @__IDPF_Q_RFL_GEN_CHK: Refill queues are SW only, so Q_GEN acts as the HW
275  *			  bit and Q_RFL_GEN is the SW bit.
276  * @__IDPF_Q_FLOW_SCH_EN: Enable flow scheduling
277  * @__IDPF_Q_SW_MARKER: Used to indicate TX queue marker completions
278  * @__IDPF_Q_POLL_MODE: Enable poll mode
279  * @__IDPF_Q_CRC_EN: enable CRC offload in singleq mode
280  * @__IDPF_Q_HSPLIT_EN: enable header split on Rx (splitq)
281  * @__IDPF_Q_PTP: indicates whether the Rx timestamping is enabled for the
282  *		  queue
283  * @__IDPF_Q_FLAGS_NBITS: Must be last
284  */
285 enum idpf_queue_flags_t {
286 	__IDPF_Q_GEN_CHK,
287 	__IDPF_Q_RFL_GEN_CHK,
288 	__IDPF_Q_FLOW_SCH_EN,
289 	__IDPF_Q_SW_MARKER,
290 	__IDPF_Q_POLL_MODE,
291 	__IDPF_Q_CRC_EN,
292 	__IDPF_Q_HSPLIT_EN,
293 	__IDPF_Q_PTP,
294 
295 	__IDPF_Q_FLAGS_NBITS,
296 };
297 
298 #define idpf_queue_set(f, q)		__set_bit(__IDPF_Q_##f, (q)->flags)
299 #define idpf_queue_clear(f, q)		__clear_bit(__IDPF_Q_##f, (q)->flags)
300 #define idpf_queue_change(f, q)		__change_bit(__IDPF_Q_##f, (q)->flags)
301 #define idpf_queue_has(f, q)		test_bit(__IDPF_Q_##f, (q)->flags)
302 
303 #define idpf_queue_has_clear(f, q)			\
304 	__test_and_clear_bit(__IDPF_Q_##f, (q)->flags)
305 #define idpf_queue_assign(f, q, v)			\
306 	__assign_bit(__IDPF_Q_##f, (q)->flags, v)
307 
308 /**
309  * struct idpf_vec_regs
310  * @dyn_ctl_reg: Dynamic control interrupt register offset
311  * @itrn_reg: Interrupt Throttling Rate register offset
312  * @itrn_index_spacing: Register spacing between ITR registers of the same
313  *			vector
314  */
315 struct idpf_vec_regs {
316 	u32 dyn_ctl_reg;
317 	u32 itrn_reg;
318 	u32 itrn_index_spacing;
319 };
320 
321 /**
322  * struct idpf_intr_reg
323  * @dyn_ctl: Dynamic control interrupt register
324  * @dyn_ctl_intena_m: Mask for dyn_ctl interrupt enable
325  * @dyn_ctl_intena_msk_m: Mask for dyn_ctl interrupt enable mask
326  * @dyn_ctl_itridx_s: Register bit offset for ITR index
327  * @dyn_ctl_itridx_m: Mask for ITR index
328  * @dyn_ctl_intrvl_s: Register bit offset for ITR interval
329  * @dyn_ctl_wb_on_itr_m: Mask for WB on ITR feature
330  * @dyn_ctl_sw_itridx_ena_m: Mask for SW ITR index
331  * @dyn_ctl_swint_trig_m: Mask for dyn_ctl SW triggered interrupt enable
332  * @rx_itr: RX ITR register
333  * @tx_itr: TX ITR register
334  * @icr_ena: Interrupt cause register offset
335  * @icr_ena_ctlq_m: Mask for ICR
336  */
337 struct idpf_intr_reg {
338 	void __iomem *dyn_ctl;
339 	u32 dyn_ctl_intena_m;
340 	u32 dyn_ctl_intena_msk_m;
341 	u32 dyn_ctl_itridx_s;
342 	u32 dyn_ctl_itridx_m;
343 	u32 dyn_ctl_intrvl_s;
344 	u32 dyn_ctl_wb_on_itr_m;
345 	u32 dyn_ctl_sw_itridx_ena_m;
346 	u32 dyn_ctl_swint_trig_m;
347 	void __iomem *rx_itr;
348 	void __iomem *tx_itr;
349 	void __iomem *icr_ena;
350 	u32 icr_ena_ctlq_m;
351 };
352 
353 /**
354  * struct idpf_q_vector
355  * @vport: Vport back pointer
356  * @num_rxq: Number of RX queues
357  * @num_txq: Number of TX queues
358  * @num_bufq: Number of buffer queues
359  * @num_complq: number of completion queues
360  * @rx: Array of RX queues to service
361  * @tx: Array of TX queues to service
362  * @bufq: Array of buffer queues to service
363  * @complq: array of completion queues
364  * @intr_reg: See struct idpf_intr_reg
365  * @napi: napi handler
366  * @total_events: Number of interrupts processed
367  * @wb_on_itr: whether WB on ITR is enabled
368  * @tx_dim: Data for TX net_dim algorithm
369  * @tx_itr_value: TX interrupt throttling rate
370  * @tx_intr_mode: Dynamic ITR or not
371  * @tx_itr_idx: TX ITR index
372  * @rx_dim: Data for RX net_dim algorithm
373  * @rx_itr_value: RX interrupt throttling rate
374  * @rx_intr_mode: Dynamic ITR or not
375  * @rx_itr_idx: RX ITR index
376  * @v_idx: Vector index
377  */
378 struct idpf_q_vector {
379 	__cacheline_group_begin_aligned(read_mostly);
380 	struct idpf_vport *vport;
381 
382 	u16 num_rxq;
383 	u16 num_txq;
384 	u16 num_bufq;
385 	u16 num_complq;
386 	struct idpf_rx_queue **rx;
387 	struct idpf_tx_queue **tx;
388 	struct idpf_buf_queue **bufq;
389 	struct idpf_compl_queue **complq;
390 
391 	struct idpf_intr_reg intr_reg;
392 	__cacheline_group_end_aligned(read_mostly);
393 
394 	__cacheline_group_begin_aligned(read_write);
395 	struct napi_struct napi;
396 	u16 total_events;
397 	bool wb_on_itr;
398 
399 	struct dim tx_dim;
400 	u16 tx_itr_value;
401 	bool tx_intr_mode;
402 	u32 tx_itr_idx;
403 
404 	struct dim rx_dim;
405 	u16 rx_itr_value;
406 	bool rx_intr_mode;
407 	u32 rx_itr_idx;
408 	__cacheline_group_end_aligned(read_write);
409 
410 	__cacheline_group_begin_aligned(cold);
411 	u16 v_idx;
412 
413 	__cacheline_group_end_aligned(cold);
414 };
415 libeth_cacheline_set_assert(struct idpf_q_vector, 120,
416 			    24 + sizeof(struct napi_struct) +
417 			    2 * sizeof(struct dim),
418 			    8);
419 
420 struct idpf_rx_queue_stats {
421 	u64_stats_t packets;
422 	u64_stats_t bytes;
423 	u64_stats_t rsc_pkts;
424 	u64_stats_t hw_csum_err;
425 	u64_stats_t hsplit_pkts;
426 	u64_stats_t hsplit_buf_ovf;
427 	u64_stats_t bad_descs;
428 };
429 
430 struct idpf_tx_queue_stats {
431 	u64_stats_t packets;
432 	u64_stats_t bytes;
433 	u64_stats_t lso_pkts;
434 	u64_stats_t linearize;
435 	u64_stats_t q_busy;
436 	u64_stats_t skb_drops;
437 	u64_stats_t dma_map_errs;
438 	u64_stats_t tstamp_skipped;
439 };
440 
441 #define IDPF_ITR_DYNAMIC	1
442 #define IDPF_ITR_MAX		0x1FE0
443 #define IDPF_ITR_20K		0x0032
444 #define IDPF_ITR_GRAN_S		1	/* Assume ITR granularity is 2us */
445 #define IDPF_ITR_MASK		0x1FFE  /* ITR register value alignment mask */
446 #define ITR_REG_ALIGN(setting)	((setting) & IDPF_ITR_MASK)
447 #define IDPF_ITR_IS_DYNAMIC(itr_mode) (itr_mode)
448 #define IDPF_ITR_TX_DEF		IDPF_ITR_20K
449 #define IDPF_ITR_RX_DEF		IDPF_ITR_20K
450 /* Index used for 'SW ITR' update in DYN_CTL register */
451 #define IDPF_SW_ITR_UPDATE_IDX	2
452 /* Index used for 'No ITR' update in DYN_CTL register */
453 #define IDPF_NO_ITR_UPDATE_IDX	3
454 #define IDPF_ITR_IDX_SPACING(spacing, dflt)	(spacing ? spacing : dflt)
455 #define IDPF_DIM_DEFAULT_PROFILE_IX		1
456 
457 /**
458  * struct idpf_rx_queue - software structure representing a receive queue
459  * @rx: universal receive descriptor array
460  * @single_buf: buffer descriptor array in singleq
461  * @desc_ring: virtual descriptor ring address
462  * @bufq_sets: Pointer to the array of buffer queues in splitq mode
463  * @napi: NAPI instance corresponding to this queue (splitq)
464  * @rx_buf: See struct &libeth_fqe
465  * @pp: Page pool pointer in singleq mode
466  * @netdev: &net_device corresponding to this queue
467  * @tail: Tail offset. Used for both queue models single and split.
468  * @flags: See enum idpf_queue_flags_t
469  * @idx: For RX queue, it is used to index to total RX queue across groups and
470  *	 used for skb reporting.
471  * @desc_count: Number of descriptors
472  * @rxdids: Supported RX descriptor ids
473  * @rx_ptype_lkup: LUT of Rx ptypes
474  * @next_to_use: Next descriptor to use
475  * @next_to_clean: Next descriptor to clean
476  * @next_to_alloc: RX buffer to allocate at
477  * @skb: Pointer to the skb
478  * @truesize: data buffer truesize in singleq
479  * @cached_phc_time: Cached PHC time for the Rx queue
480  * @stats_sync: See struct u64_stats_sync
481  * @q_stats: See union idpf_rx_queue_stats
482  * @q_id: Queue id
483  * @size: Length of descriptor ring in bytes
484  * @dma: Physical address of ring
485  * @q_vector: Backreference to associated vector
486  * @rx_buffer_low_watermark: RX buffer low watermark
487  * @rx_hbuf_size: Header buffer size
488  * @rx_buf_size: Buffer size
489  * @rx_max_pkt_size: RX max packet size
490  */
491 struct idpf_rx_queue {
492 	__cacheline_group_begin_aligned(read_mostly);
493 	union {
494 		union virtchnl2_rx_desc *rx;
495 		struct virtchnl2_singleq_rx_buf_desc *single_buf;
496 
497 		void *desc_ring;
498 	};
499 	union {
500 		struct {
501 			struct idpf_bufq_set *bufq_sets;
502 			struct napi_struct *napi;
503 		};
504 		struct {
505 			struct libeth_fqe *rx_buf;
506 			struct page_pool *pp;
507 		};
508 	};
509 	struct net_device *netdev;
510 	void __iomem *tail;
511 
512 	DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
513 	u16 idx;
514 	u16 desc_count;
515 
516 	u32 rxdids;
517 	const struct libeth_rx_pt *rx_ptype_lkup;
518 	__cacheline_group_end_aligned(read_mostly);
519 
520 	__cacheline_group_begin_aligned(read_write);
521 	u16 next_to_use;
522 	u16 next_to_clean;
523 	u16 next_to_alloc;
524 
525 	struct sk_buff *skb;
526 	u32 truesize;
527 	u64 cached_phc_time;
528 
529 	struct u64_stats_sync stats_sync;
530 	struct idpf_rx_queue_stats q_stats;
531 	__cacheline_group_end_aligned(read_write);
532 
533 	__cacheline_group_begin_aligned(cold);
534 	u32 q_id;
535 	u32 size;
536 	dma_addr_t dma;
537 
538 	struct idpf_q_vector *q_vector;
539 
540 	u16 rx_buffer_low_watermark;
541 	u16 rx_hbuf_size;
542 	u16 rx_buf_size;
543 	u16 rx_max_pkt_size;
544 	__cacheline_group_end_aligned(cold);
545 };
546 libeth_cacheline_set_assert(struct idpf_rx_queue, 64,
547 			    88 + sizeof(struct u64_stats_sync),
548 			    32);
549 
550 /**
551  * struct idpf_tx_queue - software structure representing a transmit queue
552  * @base_tx: base Tx descriptor array
553  * @base_ctx: base Tx context descriptor array
554  * @flex_tx: flex Tx descriptor array
555  * @flex_ctx: flex Tx context descriptor array
556  * @desc_ring: virtual descriptor ring address
557  * @tx_buf: See struct idpf_tx_buf
558  * @txq_grp: See struct idpf_txq_group
559  * @dev: Device back pointer for DMA mapping
560  * @tail: Tail offset. Used for both queue models single and split
561  * @flags: See enum idpf_queue_flags_t
562  * @idx: For TX queue, it is used as index to map between TX queue group and
563  *	 hot path TX pointers stored in vport. Used in both singleq/splitq.
564  * @desc_count: Number of descriptors
565  * @tx_min_pkt_len: Min supported packet length
566  * @compl_tag_gen_s: Completion tag generation bit
567  *	The format of the completion tag will change based on the TXQ
568  *	descriptor ring size so that we can maintain roughly the same level
569  *	of "uniqueness" across all descriptor sizes. For example, if the
570  *	TXQ descriptor ring size is 64 (the minimum size supported), the
571  *	completion tag will be formatted as below:
572  *	15                 6 5         0
573  *	--------------------------------
574  *	|    GEN=0-1023     |IDX = 0-63|
575  *	--------------------------------
576  *
577  *	This gives us 64*1024 = 65536 possible unique values. Similarly, if
578  *	the TXQ descriptor ring size is 8160 (the maximum size supported),
579  *	the completion tag will be formatted as below:
580  *	15 13 12                       0
581  *	--------------------------------
582  *	|GEN |       IDX = 0-8159      |
583  *	--------------------------------
584  *
585  *	This gives us 8*8160 = 65280 possible unique values.
586  * @netdev: &net_device corresponding to this queue
587  * @next_to_use: Next descriptor to use
588  * @next_to_clean: Next descriptor to clean
589  * @last_re: last descriptor index that RE bit was set
590  * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
591  * @cleaned_bytes: Splitq only, TXQ only: When a TX completion is received on
592  *		   the TX completion queue, it can be for any TXQ associated
593  *		   with that completion queue. This means we can clean up to
594  *		   N TXQs during a single call to clean the completion queue.
595  *		   cleaned_bytes|pkts tracks the clean stats per TXQ during
596  *		   that single call to clean the completion queue. By doing so,
597  *		   we can update BQL with aggregate cleaned stats for each TXQ
598  *		   only once at the end of the cleaning routine.
599  * @clean_budget: singleq only, queue cleaning budget
600  * @cleaned_pkts: Number of packets cleaned for the above said case
601  * @refillq: Pointer to refill queue
602  * @cached_tstamp_caps: Tx timestamp capabilities negotiated with the CP
603  * @tstamp_task: Work that handles Tx timestamp read
604  * @stats_sync: See struct u64_stats_sync
605  * @q_stats: See union idpf_tx_queue_stats
606  * @q_id: Queue id
607  * @size: Length of descriptor ring in bytes
608  * @dma: Physical address of ring
609  * @q_vector: Backreference to associated vector
610  * @buf_pool_size: Total number of idpf_tx_buf
611  */
612 struct idpf_tx_queue {
613 	__cacheline_group_begin_aligned(read_mostly);
614 	union {
615 		struct idpf_base_tx_desc *base_tx;
616 		struct idpf_base_tx_ctx_desc *base_ctx;
617 		union idpf_tx_flex_desc *flex_tx;
618 		union idpf_flex_tx_ctx_desc *flex_ctx;
619 
620 		void *desc_ring;
621 	};
622 	struct libeth_sqe *tx_buf;
623 	struct idpf_txq_group *txq_grp;
624 	struct device *dev;
625 	void __iomem *tail;
626 
627 	DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
628 	u16 idx;
629 	u16 desc_count;
630 
631 	u16 tx_min_pkt_len;
632 
633 	struct net_device *netdev;
634 	__cacheline_group_end_aligned(read_mostly);
635 
636 	__cacheline_group_begin_aligned(read_write);
637 	u16 next_to_use;
638 	u16 next_to_clean;
639 	u16 last_re;
640 	u16 tx_max_bufs;
641 
642 	union {
643 		u32 cleaned_bytes;
644 		u32 clean_budget;
645 	};
646 	u16 cleaned_pkts;
647 
648 	struct idpf_sw_queue *refillq;
649 
650 	struct idpf_ptp_vport_tx_tstamp_caps *cached_tstamp_caps;
651 	struct work_struct *tstamp_task;
652 
653 	struct u64_stats_sync stats_sync;
654 	struct idpf_tx_queue_stats q_stats;
655 	__cacheline_group_end_aligned(read_write);
656 
657 	__cacheline_group_begin_aligned(cold);
658 	u32 q_id;
659 	u32 size;
660 	dma_addr_t dma;
661 
662 	struct idpf_q_vector *q_vector;
663 	u32 buf_pool_size;
664 	__cacheline_group_end_aligned(cold);
665 };
666 libeth_cacheline_set_assert(struct idpf_tx_queue, 64,
667 			    104 + sizeof(struct u64_stats_sync),
668 			    32);
669 
670 /**
671  * struct idpf_buf_queue - software structure representing a buffer queue
672  * @split_buf: buffer descriptor array
673  * @hdr_buf: &libeth_fqe for header buffers
674  * @hdr_pp: &page_pool for header buffers
675  * @buf: &libeth_fqe for data buffers
676  * @pp: &page_pool for data buffers
677  * @tail: Tail offset
678  * @flags: See enum idpf_queue_flags_t
679  * @desc_count: Number of descriptors
680  * @next_to_use: Next descriptor to use
681  * @next_to_clean: Next descriptor to clean
682  * @next_to_alloc: RX buffer to allocate at
683  * @hdr_truesize: truesize for buffer headers
684  * @truesize: truesize for data buffers
685  * @q_id: Queue id
686  * @size: Length of descriptor ring in bytes
687  * @dma: Physical address of ring
688  * @q_vector: Backreference to associated vector
689  * @rx_buffer_low_watermark: RX buffer low watermark
690  * @rx_hbuf_size: Header buffer size
691  * @rx_buf_size: Buffer size
692  */
693 struct idpf_buf_queue {
694 	__cacheline_group_begin_aligned(read_mostly);
695 	struct virtchnl2_splitq_rx_buf_desc *split_buf;
696 	struct libeth_fqe *hdr_buf;
697 	struct page_pool *hdr_pp;
698 	struct libeth_fqe *buf;
699 	struct page_pool *pp;
700 	void __iomem *tail;
701 
702 	DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
703 	u32 desc_count;
704 	__cacheline_group_end_aligned(read_mostly);
705 
706 	__cacheline_group_begin_aligned(read_write);
707 	u32 next_to_use;
708 	u32 next_to_clean;
709 	u32 next_to_alloc;
710 
711 	u32 hdr_truesize;
712 	u32 truesize;
713 	__cacheline_group_end_aligned(read_write);
714 
715 	__cacheline_group_begin_aligned(cold);
716 	u32 q_id;
717 	u32 size;
718 	dma_addr_t dma;
719 
720 	struct idpf_q_vector *q_vector;
721 
722 	u16 rx_buffer_low_watermark;
723 	u16 rx_hbuf_size;
724 	u16 rx_buf_size;
725 	__cacheline_group_end_aligned(cold);
726 };
727 libeth_cacheline_set_assert(struct idpf_buf_queue, 64, 24, 32);
728 
729 /**
730  * struct idpf_compl_queue - software structure representing a completion queue
731  * @comp: completion descriptor array
732  * @txq_grp: See struct idpf_txq_group
733  * @flags: See enum idpf_queue_flags_t
734  * @desc_count: Number of descriptors
735  * @clean_budget: queue cleaning budget
736  * @netdev: &net_device corresponding to this queue
737  * @next_to_use: Next descriptor to use. Relevant in both split & single txq
738  *		 and bufq.
739  * @next_to_clean: Next descriptor to clean
740  * @num_completions: Only relevant for TX completion queue. It tracks the
741  *		     number of completions received to compare against the
742  *		     number of completions pending, as accumulated by the
743  *		     TX queues.
744  * @q_id: Queue id
745  * @size: Length of descriptor ring in bytes
746  * @dma: Physical address of ring
747  * @q_vector: Backreference to associated vector
748  */
749 struct idpf_compl_queue {
750 	__cacheline_group_begin_aligned(read_mostly);
751 	struct idpf_splitq_tx_compl_desc *comp;
752 	struct idpf_txq_group *txq_grp;
753 
754 	DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
755 	u32 desc_count;
756 
757 	u32 clean_budget;
758 	struct net_device *netdev;
759 	__cacheline_group_end_aligned(read_mostly);
760 
761 	__cacheline_group_begin_aligned(read_write);
762 	u32 next_to_use;
763 	u32 next_to_clean;
764 
765 	aligned_u64 num_completions;
766 	__cacheline_group_end_aligned(read_write);
767 
768 	__cacheline_group_begin_aligned(cold);
769 	u32 q_id;
770 	u32 size;
771 	dma_addr_t dma;
772 
773 	struct idpf_q_vector *q_vector;
774 	__cacheline_group_end_aligned(cold);
775 };
776 libeth_cacheline_set_assert(struct idpf_compl_queue, 40, 16, 24);
777 
778 /**
779  * struct idpf_sw_queue
780  * @ring: Pointer to the ring
781  * @flags: See enum idpf_queue_flags_t
782  * @desc_count: Descriptor count
783  * @next_to_use: Buffer to allocate at
784  * @next_to_clean: Next descriptor to clean
785  *
786  * Software queues are used in splitq mode to manage buffers between rxq
787  * producer and the bufq consumer.  These are required in order to maintain a
788  * lockless buffer management system and are strictly software only constructs.
789  */
790 struct idpf_sw_queue {
791 	__cacheline_group_begin_aligned(read_mostly);
792 	u32 *ring;
793 
794 	DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
795 	u32 desc_count;
796 	__cacheline_group_end_aligned(read_mostly);
797 
798 	__cacheline_group_begin_aligned(read_write);
799 	u32 next_to_use;
800 	u32 next_to_clean;
801 	__cacheline_group_end_aligned(read_write);
802 };
803 libeth_cacheline_group_assert(struct idpf_sw_queue, read_mostly, 24);
804 libeth_cacheline_group_assert(struct idpf_sw_queue, read_write, 8);
805 libeth_cacheline_struct_assert(struct idpf_sw_queue, 24, 8);
806 
807 /**
808  * struct idpf_rxq_set
809  * @rxq: RX queue
810  * @refillq: pointers to refill queues
811  *
812  * Splitq only.  idpf_rxq_set associates an rxq with at an array of refillqs.
813  * Each rxq needs a refillq to return used buffers back to the respective bufq.
814  * Bufqs then clean these refillqs for buffers to give to hardware.
815  */
816 struct idpf_rxq_set {
817 	struct idpf_rx_queue rxq;
818 	struct idpf_sw_queue *refillq[IDPF_MAX_BUFQS_PER_RXQ_GRP];
819 };
820 
821 /**
822  * struct idpf_bufq_set
823  * @bufq: Buffer queue
824  * @num_refillqs: Number of refill queues. This is always equal to num_rxq_sets
825  *		  in idpf_rxq_group.
826  * @refillqs: Pointer to refill queues array.
827  *
828  * Splitq only. idpf_bufq_set associates a bufq to an array of refillqs.
829  * In this bufq_set, there will be one refillq for each rxq in this rxq_group.
830  * Used buffers received by rxqs will be put on refillqs which bufqs will
831  * clean to return new buffers back to hardware.
832  *
833  * Buffers needed by some number of rxqs associated in this rxq_group are
834  * managed by at most two bufqs (depending on performance configuration).
835  */
836 struct idpf_bufq_set {
837 	struct idpf_buf_queue bufq;
838 	int num_refillqs;
839 	struct idpf_sw_queue *refillqs;
840 };
841 
842 /**
843  * struct idpf_rxq_group
844  * @vport: Vport back pointer
845  * @singleq: Struct with single queue related members
846  * @singleq.num_rxq: Number of RX queues associated
847  * @singleq.rxqs: Array of RX queue pointers
848  * @splitq: Struct with split queue related members
849  * @splitq.num_rxq_sets: Number of RX queue sets
850  * @splitq.rxq_sets: Array of RX queue sets
851  * @splitq.bufq_sets: Buffer queue set pointer
852  *
853  * In singleq mode, an rxq_group is simply an array of rxqs.  In splitq, a
854  * rxq_group contains all the rxqs, bufqs and refillqs needed to
855  * manage buffers in splitq mode.
856  */
857 struct idpf_rxq_group {
858 	struct idpf_vport *vport;
859 
860 	union {
861 		struct {
862 			u16 num_rxq;
863 			struct idpf_rx_queue *rxqs[IDPF_LARGE_MAX_Q];
864 		} singleq;
865 		struct {
866 			u16 num_rxq_sets;
867 			struct idpf_rxq_set *rxq_sets[IDPF_LARGE_MAX_Q];
868 			struct idpf_bufq_set *bufq_sets;
869 		} splitq;
870 	};
871 };
872 
873 /**
874  * struct idpf_txq_group
875  * @vport: Vport back pointer
876  * @num_txq: Number of TX queues associated
877  * @txqs: Array of TX queue pointers
878  * @complq: Associated completion queue pointer, split queue only
879  * @num_completions_pending: Total number of completions pending for the
880  *			     completion queue, acculumated for all TX queues
881  *			     associated with that completion queue.
882  *
883  * Between singleq and splitq, a txq_group is largely the same except for the
884  * complq. In splitq a single complq is responsible for handling completions
885  * for some number of txqs associated in this txq_group.
886  */
887 struct idpf_txq_group {
888 	struct idpf_vport *vport;
889 
890 	u16 num_txq;
891 	struct idpf_tx_queue *txqs[IDPF_LARGE_MAX_Q];
892 
893 	struct idpf_compl_queue *complq;
894 
895 	aligned_u64 num_completions_pending;
896 };
897 
idpf_q_vector_to_mem(const struct idpf_q_vector * q_vector)898 static inline int idpf_q_vector_to_mem(const struct idpf_q_vector *q_vector)
899 {
900 	u32 cpu;
901 
902 	if (!q_vector)
903 		return NUMA_NO_NODE;
904 
905 	cpu = cpumask_first(&q_vector->napi.config->affinity_mask);
906 
907 	return cpu < nr_cpu_ids ? cpu_to_mem(cpu) : NUMA_NO_NODE;
908 }
909 
910 /**
911  * idpf_size_to_txd_count - Get number of descriptors needed for large Tx frag
912  * @size: transmit request size in bytes
913  *
914  * In the case where a large frag (>= 16K) needs to be split across multiple
915  * descriptors, we need to assume that we can have no more than 12K of data
916  * per descriptor due to hardware alignment restrictions (4K alignment).
917  */
idpf_size_to_txd_count(unsigned int size)918 static inline u32 idpf_size_to_txd_count(unsigned int size)
919 {
920 	return DIV_ROUND_UP(size, IDPF_TX_MAX_DESC_DATA_ALIGNED);
921 }
922 
923 /**
924  * idpf_tx_singleq_build_ctob - populate command tag offset and size
925  * @td_cmd: Command to be filled in desc
926  * @td_offset: Offset to be filled in desc
927  * @size: Size of the buffer
928  * @td_tag: td tag to be filled
929  *
930  * Returns the 64 bit value populated with the input parameters
931  */
idpf_tx_singleq_build_ctob(u64 td_cmd,u64 td_offset,unsigned int size,u64 td_tag)932 static inline __le64 idpf_tx_singleq_build_ctob(u64 td_cmd, u64 td_offset,
933 						unsigned int size, u64 td_tag)
934 {
935 	return cpu_to_le64(IDPF_TX_DESC_DTYPE_DATA |
936 			   (td_cmd << IDPF_TXD_QW1_CMD_S) |
937 			   (td_offset << IDPF_TXD_QW1_OFFSET_S) |
938 			   ((u64)size << IDPF_TXD_QW1_TX_BUF_SZ_S) |
939 			   (td_tag << IDPF_TXD_QW1_L2TAG1_S));
940 }
941 
942 void idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc *desc,
943 			      struct idpf_tx_splitq_params *params,
944 			      u16 td_cmd, u16 size);
945 void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
946 				    struct idpf_tx_splitq_params *params,
947 				    u16 td_cmd, u16 size);
948 /**
949  * idpf_tx_splitq_build_desc - determine which type of data descriptor to build
950  * @desc: descriptor to populate
951  * @params: pointer to tx params struct
952  * @td_cmd: command to be filled in desc
953  * @size: size of buffer
954  */
idpf_tx_splitq_build_desc(union idpf_tx_flex_desc * desc,struct idpf_tx_splitq_params * params,u16 td_cmd,u16 size)955 static inline void idpf_tx_splitq_build_desc(union idpf_tx_flex_desc *desc,
956 					     struct idpf_tx_splitq_params *params,
957 					     u16 td_cmd, u16 size)
958 {
959 	if (params->dtype == IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2)
960 		idpf_tx_splitq_build_ctb(desc, params, td_cmd, size);
961 	else
962 		idpf_tx_splitq_build_flow_desc(desc, params, td_cmd, size);
963 }
964 
965 /**
966  * idpf_vport_intr_set_wb_on_itr - enable descriptor writeback on disabled interrupts
967  * @q_vector: pointer to queue vector struct
968  */
idpf_vport_intr_set_wb_on_itr(struct idpf_q_vector * q_vector)969 static inline void idpf_vport_intr_set_wb_on_itr(struct idpf_q_vector *q_vector)
970 {
971 	struct idpf_intr_reg *reg;
972 
973 	if (q_vector->wb_on_itr)
974 		return;
975 
976 	q_vector->wb_on_itr = true;
977 	reg = &q_vector->intr_reg;
978 
979 	writel(reg->dyn_ctl_wb_on_itr_m | reg->dyn_ctl_intena_msk_m |
980 	       (IDPF_NO_ITR_UPDATE_IDX << reg->dyn_ctl_itridx_s),
981 	       reg->dyn_ctl);
982 }
983 
984 /**
985  * idpf_tx_splitq_get_free_bufs - get number of free buf_ids in refillq
986  * @refillq: pointer to refillq containing buf_ids
987  */
idpf_tx_splitq_get_free_bufs(struct idpf_sw_queue * refillq)988 static inline u32 idpf_tx_splitq_get_free_bufs(struct idpf_sw_queue *refillq)
989 {
990 	return (refillq->next_to_use > refillq->next_to_clean ?
991 		0 : refillq->desc_count) +
992 	       refillq->next_to_use - refillq->next_to_clean - 1;
993 }
994 
995 int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget);
996 void idpf_vport_init_num_qs(struct idpf_vport *vport,
997 			    struct virtchnl2_create_vport *vport_msg);
998 void idpf_vport_calc_num_q_desc(struct idpf_vport *vport);
999 int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_index,
1000 			     struct virtchnl2_create_vport *vport_msg,
1001 			     struct idpf_vport_max_q *max_q);
1002 void idpf_vport_calc_num_q_groups(struct idpf_vport *vport);
1003 int idpf_vport_queues_alloc(struct idpf_vport *vport);
1004 void idpf_vport_queues_rel(struct idpf_vport *vport);
1005 void idpf_vport_intr_rel(struct idpf_vport *vport);
1006 int idpf_vport_intr_alloc(struct idpf_vport *vport);
1007 void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector);
1008 void idpf_vport_intr_deinit(struct idpf_vport *vport);
1009 int idpf_vport_intr_init(struct idpf_vport *vport);
1010 void idpf_vport_intr_ena(struct idpf_vport *vport);
1011 int idpf_config_rss(struct idpf_vport *vport);
1012 int idpf_init_rss(struct idpf_vport *vport);
1013 void idpf_deinit_rss(struct idpf_vport *vport);
1014 int idpf_rx_bufs_init_all(struct idpf_vport *vport);
1015 void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb,
1016 		      unsigned int size);
1017 struct sk_buff *idpf_rx_build_skb(const struct libeth_fqe *buf, u32 size);
1018 void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
1019 			   bool xmit_more);
1020 unsigned int idpf_size_to_txd_count(unsigned int size);
1021 netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb);
1022 unsigned int idpf_tx_res_count_required(struct idpf_tx_queue *txq,
1023 					struct sk_buff *skb, u32 *buf_count);
1024 void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue);
1025 netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
1026 				  struct idpf_tx_queue *tx_q);
1027 netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev);
1028 bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq,
1029 				      u16 cleaned_count);
1030 int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off);
1031 
1032 #endif /* !_IDPF_TXRX_H_ */
1033