xref: /linux/drivers/net/ethernet/cavium/thunder/nicvf_queues.h (revision 005438a8eef063495ac059d128eea71b58de50e5)
1 /*
2  * Copyright (C) 2015 Cavium, Inc.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of version 2 of the GNU General Public License
6  * as published by the Free Software Foundation.
7  */
8 
9 #ifndef NICVF_QUEUES_H
10 #define NICVF_QUEUES_H
11 
12 #include <linux/netdevice.h>
13 #include "q_struct.h"
14 
15 #define MAX_QUEUE_SET			128
16 #define MAX_RCV_QUEUES_PER_QS		8
17 #define MAX_RCV_BUF_DESC_RINGS_PER_QS	2
18 #define MAX_SND_QUEUES_PER_QS		8
19 #define MAX_CMP_QUEUES_PER_QS		8
20 
21 /* VF's queue interrupt ranges */
22 #define	NICVF_INTR_ID_CQ		0
23 #define	NICVF_INTR_ID_SQ		8
24 #define	NICVF_INTR_ID_RBDR		16
25 #define	NICVF_INTR_ID_MISC		18
26 #define	NICVF_INTR_ID_QS_ERR		19
27 
28 #define	for_each_cq_irq(irq)	\
29 	for (irq = NICVF_INTR_ID_CQ; irq < NICVF_INTR_ID_SQ; irq++)
30 #define	for_each_sq_irq(irq)	\
31 	for (irq = NICVF_INTR_ID_SQ; irq < NICVF_INTR_ID_RBDR; irq++)
32 #define	for_each_rbdr_irq(irq)	\
33 	for (irq = NICVF_INTR_ID_RBDR; irq < NICVF_INTR_ID_MISC; irq++)
34 
35 #define RBDR_SIZE0		0ULL /* 8K entries */
36 #define RBDR_SIZE1		1ULL /* 16K entries */
37 #define RBDR_SIZE2		2ULL /* 32K entries */
38 #define RBDR_SIZE3		3ULL /* 64K entries */
39 #define RBDR_SIZE4		4ULL /* 126K entries */
40 #define RBDR_SIZE5		5ULL /* 256K entries */
41 #define RBDR_SIZE6		6ULL /* 512K entries */
42 
43 #define SND_QUEUE_SIZE0		0ULL /* 1K entries */
44 #define SND_QUEUE_SIZE1		1ULL /* 2K entries */
45 #define SND_QUEUE_SIZE2		2ULL /* 4K entries */
46 #define SND_QUEUE_SIZE3		3ULL /* 8K entries */
47 #define SND_QUEUE_SIZE4		4ULL /* 16K entries */
48 #define SND_QUEUE_SIZE5		5ULL /* 32K entries */
49 #define SND_QUEUE_SIZE6		6ULL /* 64K entries */
50 
51 #define CMP_QUEUE_SIZE0		0ULL /* 1K entries */
52 #define CMP_QUEUE_SIZE1		1ULL /* 2K entries */
53 #define CMP_QUEUE_SIZE2		2ULL /* 4K entries */
54 #define CMP_QUEUE_SIZE3		3ULL /* 8K entries */
55 #define CMP_QUEUE_SIZE4		4ULL /* 16K entries */
56 #define CMP_QUEUE_SIZE5		5ULL /* 32K entries */
57 #define CMP_QUEUE_SIZE6		6ULL /* 64K entries */
58 
59 /* Default queue count per QS, its lengths and threshold values */
60 #define RBDR_CNT		1
61 #define RCV_QUEUE_CNT		8
62 #define SND_QUEUE_CNT		8
63 #define CMP_QUEUE_CNT		8 /* Max of RCV and SND qcount */
64 
65 #define SND_QSIZE		SND_QUEUE_SIZE4
66 #define SND_QUEUE_LEN		(1ULL << (SND_QSIZE + 10))
67 #define MAX_SND_QUEUE_LEN	(1ULL << (SND_QUEUE_SIZE6 + 10))
68 #define SND_QUEUE_THRESH	2ULL
69 #define MIN_SQ_DESC_PER_PKT_XMIT	2
70 /* Since timestamp not enabled, otherwise 2 */
71 #define MAX_CQE_PER_PKT_XMIT		1
72 
73 #define CMP_QSIZE		CMP_QUEUE_SIZE4
74 #define CMP_QUEUE_LEN		(1ULL << (CMP_QSIZE + 10))
75 #define CMP_QUEUE_CQE_THRESH	0
76 #define CMP_QUEUE_TIMER_THRESH	220 /* 10usec */
77 
78 #define RBDR_SIZE		RBDR_SIZE0
79 #define RCV_BUF_COUNT		(1ULL << (RBDR_SIZE + 13))
80 #define MAX_RCV_BUF_COUNT	(1ULL << (RBDR_SIZE6 + 13))
81 #define RBDR_THRESH		(RCV_BUF_COUNT / 2)
82 #define DMA_BUFFER_LEN		2048 /* In multiples of 128bytes */
83 #define RCV_FRAG_LEN	(SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \
84 			 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \
85 			 (NICVF_RCV_BUF_ALIGN_BYTES * 2))
86 #define RCV_DATA_OFFSET		NICVF_RCV_BUF_ALIGN_BYTES
87 
88 #define MAX_CQES_FOR_TX		((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \
89 				 MAX_CQE_PER_PKT_XMIT)
90 #define RQ_CQ_DROP		((CMP_QUEUE_LEN - MAX_CQES_FOR_TX) / 256)
91 
92 /* Descriptor size in bytes */
93 #define SND_QUEUE_DESC_SIZE	16
94 #define CMP_QUEUE_DESC_SIZE	512
95 
96 /* Buffer / descriptor alignments */
97 #define NICVF_RCV_BUF_ALIGN		7
98 #define NICVF_RCV_BUF_ALIGN_BYTES	(1ULL << NICVF_RCV_BUF_ALIGN)
99 #define NICVF_CQ_BASE_ALIGN_BYTES	512  /* 9 bits */
100 #define NICVF_SQ_BASE_ALIGN_BYTES	128  /* 7 bits */
101 
102 #define NICVF_ALIGNED_ADDR(ADDR, ALIGN_BYTES)	ALIGN(ADDR, ALIGN_BYTES)
103 #define NICVF_ADDR_ALIGN_LEN(ADDR, BYTES)\
104 	(NICVF_ALIGNED_ADDR(ADDR, BYTES) - BYTES)
105 #define NICVF_RCV_BUF_ALIGN_LEN(X)\
106 	(NICVF_ALIGNED_ADDR(X, NICVF_RCV_BUF_ALIGN_BYTES) - X)
107 
108 /* Queue enable/disable */
109 #define NICVF_SQ_EN		BIT_ULL(19)
110 
111 /* Queue reset */
112 #define NICVF_CQ_RESET		BIT_ULL(41)
113 #define NICVF_SQ_RESET		BIT_ULL(17)
114 #define NICVF_RBDR_RESET	BIT_ULL(43)
115 
116 enum CQ_RX_ERRLVL_E {
117 	CQ_ERRLVL_MAC,
118 	CQ_ERRLVL_L2,
119 	CQ_ERRLVL_L3,
120 	CQ_ERRLVL_L4,
121 };
122 
123 enum CQ_RX_ERROP_E {
124 	CQ_RX_ERROP_RE_NONE = 0x0,
125 	CQ_RX_ERROP_RE_PARTIAL = 0x1,
126 	CQ_RX_ERROP_RE_JABBER = 0x2,
127 	CQ_RX_ERROP_RE_FCS = 0x7,
128 	CQ_RX_ERROP_RE_TERMINATE = 0x9,
129 	CQ_RX_ERROP_RE_RX_CTL = 0xb,
130 	CQ_RX_ERROP_PREL2_ERR = 0x1f,
131 	CQ_RX_ERROP_L2_FRAGMENT = 0x20,
132 	CQ_RX_ERROP_L2_OVERRUN = 0x21,
133 	CQ_RX_ERROP_L2_PFCS = 0x22,
134 	CQ_RX_ERROP_L2_PUNY = 0x23,
135 	CQ_RX_ERROP_L2_MAL = 0x24,
136 	CQ_RX_ERROP_L2_OVERSIZE = 0x25,
137 	CQ_RX_ERROP_L2_UNDERSIZE = 0x26,
138 	CQ_RX_ERROP_L2_LENMISM = 0x27,
139 	CQ_RX_ERROP_L2_PCLP = 0x28,
140 	CQ_RX_ERROP_IP_NOT = 0x41,
141 	CQ_RX_ERROP_IP_CSUM_ERR = 0x42,
142 	CQ_RX_ERROP_IP_MAL = 0x43,
143 	CQ_RX_ERROP_IP_MALD = 0x44,
144 	CQ_RX_ERROP_IP_HOP = 0x45,
145 	CQ_RX_ERROP_L3_ICRC = 0x46,
146 	CQ_RX_ERROP_L3_PCLP = 0x47,
147 	CQ_RX_ERROP_L4_MAL = 0x61,
148 	CQ_RX_ERROP_L4_CHK = 0x62,
149 	CQ_RX_ERROP_UDP_LEN = 0x63,
150 	CQ_RX_ERROP_L4_PORT = 0x64,
151 	CQ_RX_ERROP_TCP_FLAG = 0x65,
152 	CQ_RX_ERROP_TCP_OFFSET = 0x66,
153 	CQ_RX_ERROP_L4_PCLP = 0x67,
154 	CQ_RX_ERROP_RBDR_TRUNC = 0x70,
155 };
156 
157 enum CQ_TX_ERROP_E {
158 	CQ_TX_ERROP_GOOD = 0x0,
159 	CQ_TX_ERROP_DESC_FAULT = 0x10,
160 	CQ_TX_ERROP_HDR_CONS_ERR = 0x11,
161 	CQ_TX_ERROP_SUBDC_ERR = 0x12,
162 	CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80,
163 	CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81,
164 	CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82,
165 	CQ_TX_ERROP_LOCK_VIOL = 0x83,
166 	CQ_TX_ERROP_DATA_FAULT = 0x84,
167 	CQ_TX_ERROP_TSTMP_CONFLICT = 0x85,
168 	CQ_TX_ERROP_TSTMP_TIMEOUT = 0x86,
169 	CQ_TX_ERROP_MEM_FAULT = 0x87,
170 	CQ_TX_ERROP_CK_OVERLAP = 0x88,
171 	CQ_TX_ERROP_CK_OFLOW = 0x89,
172 	CQ_TX_ERROP_ENUM_LAST = 0x8a,
173 };
174 
175 struct cmp_queue_stats {
176 	struct rx_stats {
177 		struct {
178 			u64 mac_errs;
179 			u64 l2_errs;
180 			u64 l3_errs;
181 			u64 l4_errs;
182 		} errlvl;
183 		struct {
184 			u64 good;
185 			u64 partial_pkts;
186 			u64 jabber_errs;
187 			u64 fcs_errs;
188 			u64 terminate_errs;
189 			u64 bgx_rx_errs;
190 			u64 prel2_errs;
191 			u64 l2_frags;
192 			u64 l2_overruns;
193 			u64 l2_pfcs;
194 			u64 l2_puny;
195 			u64 l2_hdr_malformed;
196 			u64 l2_oversize;
197 			u64 l2_undersize;
198 			u64 l2_len_mismatch;
199 			u64 l2_pclp;
200 			u64 non_ip;
201 			u64 ip_csum_err;
202 			u64 ip_hdr_malformed;
203 			u64 ip_payload_malformed;
204 			u64 ip_hop_errs;
205 			u64 l3_icrc_errs;
206 			u64 l3_pclp;
207 			u64 l4_malformed;
208 			u64 l4_csum_errs;
209 			u64 udp_len_err;
210 			u64 bad_l4_port;
211 			u64 bad_tcp_flag;
212 			u64 tcp_offset_errs;
213 			u64 l4_pclp;
214 			u64 pkt_truncated;
215 		} errop;
216 	} rx;
217 	struct tx_stats {
218 		u64 good;
219 		u64 desc_fault;
220 		u64 hdr_cons_err;
221 		u64 subdesc_err;
222 		u64 imm_size_oflow;
223 		u64 data_seq_err;
224 		u64 mem_seq_err;
225 		u64 lock_viol;
226 		u64 data_fault;
227 		u64 tstmp_conflict;
228 		u64 tstmp_timeout;
229 		u64 mem_fault;
230 		u64 csum_overlap;
231 		u64 csum_overflow;
232 	} tx;
233 } ____cacheline_aligned_in_smp;
234 
235 enum RQ_SQ_STATS {
236 	RQ_SQ_STATS_OCTS,
237 	RQ_SQ_STATS_PKTS,
238 };
239 
240 struct rx_tx_queue_stats {
241 	u64	bytes;
242 	u64	pkts;
243 } ____cacheline_aligned_in_smp;
244 
245 struct q_desc_mem {
246 	dma_addr_t	dma;
247 	u64		size;
248 	u16		q_len;
249 	dma_addr_t	phys_base;
250 	void		*base;
251 	void		*unalign_base;
252 };
253 
254 struct rbdr {
255 	bool		enable;
256 	u32		dma_size;
257 	u32		frag_len;
258 	u32		thresh;		/* Threshold level for interrupt */
259 	void		*desc;
260 	u32		head;
261 	u32		tail;
262 	struct q_desc_mem   dmem;
263 } ____cacheline_aligned_in_smp;
264 
265 struct rcv_queue {
266 	bool		enable;
267 	struct	rbdr	*rbdr_start;
268 	struct	rbdr	*rbdr_cont;
269 	bool		en_tcp_reassembly;
270 	u8		cq_qs;  /* CQ's QS to which this RQ is assigned */
271 	u8		cq_idx; /* CQ index (0 to 7) in the QS */
272 	u8		cont_rbdr_qs;      /* Continue buffer ptrs - QS num */
273 	u8		cont_qs_rbdr_idx;  /* RBDR idx in the cont QS */
274 	u8		start_rbdr_qs;     /* First buffer ptrs - QS num */
275 	u8		start_qs_rbdr_idx; /* RBDR idx in the above QS */
276 	u8		caching;
277 	struct		rx_tx_queue_stats stats;
278 } ____cacheline_aligned_in_smp;
279 
280 struct cmp_queue {
281 	bool		enable;
282 	u16		thresh;
283 	spinlock_t	lock;  /* lock to serialize processing CQEs */
284 	void		*desc;
285 	struct q_desc_mem   dmem;
286 	struct cmp_queue_stats	stats;
287 } ____cacheline_aligned_in_smp;
288 
289 struct snd_queue {
290 	bool		enable;
291 	u8		cq_qs;  /* CQ's QS to which this SQ is pointing */
292 	u8		cq_idx; /* CQ index (0 to 7) in the above QS */
293 	u16		thresh;
294 	atomic_t	free_cnt;
295 	u32		head;
296 	u32		tail;
297 	u64		*skbuff;
298 	void		*desc;
299 
300 #define	TSO_HEADER_SIZE	128
301 	/* For TSO segment's header */
302 	char		*tso_hdrs;
303 	dma_addr_t	tso_hdrs_phys;
304 
305 	cpumask_t	affinity_mask;
306 	struct q_desc_mem   dmem;
307 	struct rx_tx_queue_stats stats;
308 } ____cacheline_aligned_in_smp;
309 
310 struct queue_set {
311 	bool		enable;
312 	bool		be_en;
313 	u8		vnic_id;
314 	u8		rq_cnt;
315 	u8		cq_cnt;
316 	u64		cq_len;
317 	u8		sq_cnt;
318 	u64		sq_len;
319 	u8		rbdr_cnt;
320 	u64		rbdr_len;
321 	struct	rcv_queue	rq[MAX_RCV_QUEUES_PER_QS];
322 	struct	cmp_queue	cq[MAX_CMP_QUEUES_PER_QS];
323 	struct	snd_queue	sq[MAX_SND_QUEUES_PER_QS];
324 	struct	rbdr		rbdr[MAX_RCV_BUF_DESC_RINGS_PER_QS];
325 } ____cacheline_aligned_in_smp;
326 
327 #define GET_RBDR_DESC(RING, idx)\
328 		(&(((struct rbdr_entry_t *)((RING)->desc))[idx]))
329 #define GET_SQ_DESC(RING, idx)\
330 		(&(((struct sq_hdr_subdesc *)((RING)->desc))[idx]))
331 #define GET_CQ_DESC(RING, idx)\
332 		(&(((union cq_desc_t *)((RING)->desc))[idx]))
333 
334 /* CQ status bits */
335 #define	CQ_WR_FULL	BIT(26)
336 #define	CQ_WR_DISABLE	BIT(25)
337 #define	CQ_WR_FAULT	BIT(24)
338 #define	CQ_CQE_COUNT	(0xFFFF << 0)
339 
340 #define	CQ_ERR_MASK	(CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT)
341 
342 int nicvf_set_qset_resources(struct nicvf *nic);
343 int nicvf_config_data_transfer(struct nicvf *nic, bool enable);
344 void nicvf_qset_config(struct nicvf *nic, bool enable);
345 void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
346 			    int qidx, bool enable);
347 
348 void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx);
349 void nicvf_sq_disable(struct nicvf *nic, int qidx);
350 void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt);
351 void nicvf_sq_free_used_descs(struct net_device *netdev,
352 			      struct snd_queue *sq, int qidx);
353 int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb);
354 
355 struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
356 void nicvf_rbdr_task(unsigned long data);
357 void nicvf_rbdr_work(struct work_struct *work);
358 
359 void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx);
360 void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx);
361 void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx);
362 int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx);
363 
364 /* Register access APIs */
365 void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val);
366 u64  nicvf_reg_read(struct nicvf *nic, u64 offset);
367 void nicvf_qset_reg_write(struct nicvf *nic, u64 offset, u64 val);
368 u64 nicvf_qset_reg_read(struct nicvf *nic, u64 offset);
369 void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
370 			   u64 qidx, u64 val);
371 u64  nicvf_queue_reg_read(struct nicvf *nic,
372 			  u64 offset, u64 qidx);
373 
374 /* Stats */
375 void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx);
376 void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx);
377 int nicvf_check_cqe_rx_errs(struct nicvf *nic,
378 			    struct cmp_queue *cq, struct cqe_rx_t *cqe_rx);
379 int nicvf_check_cqe_tx_errs(struct nicvf *nic,
380 			    struct cmp_queue *cq, struct cqe_send_t *cqe_tx);
381 #endif /* NICVF_QUEUES_H */
382