xref: /linux/drivers/net/ethernet/marvell/octeontx2/af/common.h (revision 896d8946da97332d4dc80fa1937d8dd6b1c35ad4)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Marvell RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell.
5  */
6 
7 #ifndef COMMON_H
8 #define COMMON_H
9 
10 #include "rvu_struct.h"
11 
12 #define OTX2_ALIGN			128  /* Align to cacheline */
13 
14 #define Q_SIZE_16		0ULL /* 16 entries */
15 #define Q_SIZE_64		1ULL /* 64 entries */
16 #define Q_SIZE_256		2ULL
17 #define Q_SIZE_1K		3ULL
18 #define Q_SIZE_4K		4ULL
19 #define Q_SIZE_16K		5ULL
20 #define Q_SIZE_64K		6ULL
21 #define Q_SIZE_256K		7ULL
22 #define Q_SIZE_1M		8ULL /* Million entries */
23 #define Q_SIZE_MIN		Q_SIZE_16
24 #define Q_SIZE_MAX		Q_SIZE_1M
25 
26 #define Q_COUNT(x)		(16ULL << (2 * x))
27 #define Q_SIZE(x, n)		((ilog2(x) - (n)) / 2)
28 
29 /* Admin queue info */
30 
31 /* Since we intend to add only one instruction at a time,
32  * keep queue size to it's minimum.
33  */
34 #define AQ_SIZE			Q_SIZE_16
35 /* HW head & tail pointer mask */
36 #define AQ_PTR_MASK		0xFFFFF
37 
38 struct qmem {
39 	void            *base;
40 	dma_addr_t	iova;
41 	int		alloc_sz;
42 	u16		entry_sz;
43 	u8		align;
44 	u32		qsize;
45 };
46 
qmem_alloc(struct device * dev,struct qmem ** q,int qsize,int entry_sz)47 static inline int qmem_alloc(struct device *dev, struct qmem **q,
48 			     int qsize, int entry_sz)
49 {
50 	struct qmem *qmem;
51 	int aligned_addr;
52 
53 	if (!qsize)
54 		return -EINVAL;
55 
56 	*q = devm_kzalloc(dev, sizeof(*qmem), GFP_KERNEL);
57 	if (!*q)
58 		return -ENOMEM;
59 	qmem = *q;
60 
61 	qmem->entry_sz = entry_sz;
62 	qmem->alloc_sz = (qsize * entry_sz) + OTX2_ALIGN;
63 	qmem->base = dma_alloc_attrs(dev, qmem->alloc_sz, &qmem->iova,
64 				     GFP_KERNEL, DMA_ATTR_FORCE_CONTIGUOUS);
65 	if (!qmem->base)
66 		return -ENOMEM;
67 
68 	qmem->qsize = qsize;
69 
70 	aligned_addr = ALIGN((u64)qmem->iova, OTX2_ALIGN);
71 	qmem->align = (aligned_addr - qmem->iova);
72 	qmem->base += qmem->align;
73 	qmem->iova += qmem->align;
74 	return 0;
75 }
76 
qmem_free(struct device * dev,struct qmem * qmem)77 static inline void qmem_free(struct device *dev, struct qmem *qmem)
78 {
79 	if (!qmem)
80 		return;
81 
82 	if (qmem->base)
83 		dma_free_attrs(dev, qmem->alloc_sz,
84 			       qmem->base - qmem->align,
85 			       qmem->iova - qmem->align,
86 			       DMA_ATTR_FORCE_CONTIGUOUS);
87 	devm_kfree(dev, qmem);
88 }
89 
90 struct admin_queue {
91 	struct qmem	*inst;
92 	struct qmem	*res;
93 	spinlock_t	lock; /* Serialize inst enqueue from PFs */
94 };
95 
96 /* NPA aura count */
97 enum npa_aura_sz {
98 	NPA_AURA_SZ_0,
99 	NPA_AURA_SZ_128,
100 	NPA_AURA_SZ_256,
101 	NPA_AURA_SZ_512,
102 	NPA_AURA_SZ_1K,
103 	NPA_AURA_SZ_2K,
104 	NPA_AURA_SZ_4K,
105 	NPA_AURA_SZ_8K,
106 	NPA_AURA_SZ_16K,
107 	NPA_AURA_SZ_32K,
108 	NPA_AURA_SZ_64K,
109 	NPA_AURA_SZ_128K,
110 	NPA_AURA_SZ_256K,
111 	NPA_AURA_SZ_512K,
112 	NPA_AURA_SZ_1M,
113 	NPA_AURA_SZ_MAX,
114 };
115 
116 #define NPA_AURA_COUNT(x)	(1ULL << ((x) + 6))
117 
118 /* NPA AQ result structure for init/read/write of aura HW contexts */
119 struct npa_aq_aura_res {
120 	struct	npa_aq_res_s	res;
121 	struct	npa_aura_s	aura_ctx;
122 	struct	npa_aura_s	ctx_mask;
123 };
124 
125 /* NPA AQ result structure for init/read/write of pool HW contexts */
126 struct npa_aq_pool_res {
127 	struct	npa_aq_res_s	res;
128 	struct	npa_pool_s	pool_ctx;
129 	struct	npa_pool_s	ctx_mask;
130 };
131 
132 /* NIX Transmit schedulers */
133 enum nix_scheduler {
134 	NIX_TXSCH_LVL_SMQ = 0x0,
135 	NIX_TXSCH_LVL_MDQ = 0x0,
136 	NIX_TXSCH_LVL_TL4 = 0x1,
137 	NIX_TXSCH_LVL_TL3 = 0x2,
138 	NIX_TXSCH_LVL_TL2 = 0x3,
139 	NIX_TXSCH_LVL_TL1 = 0x4,
140 	NIX_TXSCH_LVL_CNT = 0x5,
141 };
142 
143 #define TXSCH_RR_QTM_MAX		((1 << 24) - 1)
144 #define TXSCH_TL1_DFLT_RR_QTM		TXSCH_RR_QTM_MAX
145 #define TXSCH_TL1_DFLT_RR_PRIO		(0x7ull)
146 #define CN10K_MAX_DWRR_WEIGHT          16384 /* Weight is 14bit on CN10K */
147 
148 /* Don't change the order as on CN10K (except CN10KB)
149  * SMQX_CFG[SDP] value should be 1 for SDP flows.
150  */
151 #define SMQ_LINK_TYPE_RPM		0
152 #define SMQ_LINK_TYPE_SDP		1
153 #define SMQ_LINK_TYPE_LBK		2
154 
155 /* Min/Max packet sizes, excluding FCS */
156 #define	NIC_HW_MIN_FRS			40
157 #define	NIC_HW_MAX_FRS			9212
158 #define	SDP_HW_MAX_FRS			65535
159 #define	SDP_HW_MIN_FRS			16
160 #define CN10K_LMAC_LINK_MAX_FRS		16380 /* 16k - FCS */
161 #define CN10K_LBK_LINK_MAX_FRS		65535 /* 64k */
162 #define SDP_LINK_CREDIT			0x320202
163 
164 /* NIX RX action operation*/
165 #define NIX_RX_ACTIONOP_DROP		(0x0ull)
166 #define NIX_RX_ACTIONOP_UCAST		(0x1ull)
167 #define NIX_RX_ACTIONOP_UCAST_IPSEC	(0x2ull)
168 #define NIX_RX_ACTIONOP_MCAST		(0x3ull)
169 #define NIX_RX_ACTIONOP_RSS		(0x4ull)
170 /* Use the RX action set in the default unicast entry */
171 #define NIX_RX_ACTION_DEFAULT		(0xfull)
172 
173 /* NIX TX action operation*/
174 #define NIX_TX_ACTIONOP_DROP		(0x0ull)
175 #define NIX_TX_ACTIONOP_UCAST_DEFAULT	(0x1ull)
176 #define NIX_TX_ACTIONOP_UCAST_CHAN	(0x2ull)
177 #define NIX_TX_ACTIONOP_MCAST		(0x3ull)
178 #define NIX_TX_ACTIONOP_DROP_VIOL	(0x5ull)
179 
180 #define NPC_MCAM_KEY_X1			0
181 #define NPC_MCAM_KEY_X2			1
182 #define NPC_MCAM_KEY_X4			2
183 
184 #define NIX_INTFX_RX(a)			(0x0ull | (a) << 1)
185 #define NIX_INTFX_TX(a)			(0x1ull | (a) << 1)
186 
187 /* Default interfaces are NIX0_RX and NIX0_TX */
188 #define NIX_INTF_RX			NIX_INTFX_RX(0)
189 #define NIX_INTF_TX			NIX_INTFX_TX(0)
190 
191 #define NIX_INTF_TYPE_CGX		0
192 #define NIX_INTF_TYPE_LBK		1
193 #define NIX_INTF_TYPE_SDP		2
194 
195 #define MAX_LMAC_PKIND			12
196 #define NIX_LINK_CGX_LMAC(a, b)		(0 + 4 * (a) + (b))
197 #define NIX_LINK_LBK(a)			(12 + (a))
198 #define NIX_CHAN_CGX_LMAC_CHX(a, b, c)	(0x800 + 0x100 * (a) + 0x10 * (b) + (c))
199 #define NIX_CHAN_LBK_CHX(a, b)		(0 + 0x100 * (a) + (b))
200 #define NIX_CHAN_SDP_CH_START          (0x700ull)
201 #define NIX_CHAN_SDP_CHX(a)            (NIX_CHAN_SDP_CH_START + (a))
202 #define NIX_CHAN_SDP_NUM_CHANS		256
203 #define NIX_CHAN_CPT_CH_START          (0x800ull)
204 
205 /* The mask is to extract lower 10-bits of channel number
206  * which CPT will pass to X2P.
207  */
208 #define NIX_CHAN_CPT_X2P_MASK          (0x3ffull)
209 
210 /* NIX LSO format indices.
211  * As of now TSO is the only one using, so statically assigning indices.
212  */
213 #define NIX_LSO_FORMAT_IDX_TSOV4	0
214 #define NIX_LSO_FORMAT_IDX_TSOV6	1
215 
216 /* RSS info */
217 #define MAX_RSS_GROUPS			8
218 /* Group 0 has to be used in default pkt forwarding MCAM entries
219  * reserved for NIXLFs. Groups 1-7 can be used for RSS for ntuple
220  * filters.
221  */
222 #define DEFAULT_RSS_CONTEXT_GROUP	0
223 #define MAX_RSS_INDIR_TBL_SIZE		256 /* 1 << Max adder bits */
224 
225 /* NDC info */
226 enum ndc_idx_e {
227 	NIX0_RX = 0x0,
228 	NIX0_TX = 0x1,
229 	NPA0_U  = 0x2,
230 	NIX1_RX = 0x4,
231 	NIX1_TX = 0x5,
232 };
233 
234 enum ndc_ctype_e {
235 	CACHING = 0x0,
236 	BYPASS = 0x1,
237 };
238 
239 #define NDC_MAX_PORT 6
240 #define NDC_READ_TRANS 0
241 #define NDC_WRITE_TRANS 1
242 
243 #endif /* COMMON_H */
244