xref: /linux/drivers/net/ethernet/marvell/octeontx2/af/mcs.h (revision 41c177cf354126a22443b5c80cec9fdd313e67e1)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Marvell CN10K MCS driver
3  *
4  * Copyright (C) 2022 Marvell.
5  */
6 
7 #ifndef MCS_H
8 #define MCS_H
9 
10 #include <linux/bits.h>
11 #include "rvu.h"
12 
13 #define PCI_DEVID_CN10K_MCS		0xA096
14 
15 #define MCSX_LINK_LMAC_RANGE_MASK	GENMASK_ULL(19, 16)
16 #define MCSX_LINK_LMAC_BASE_MASK	GENMASK_ULL(11, 0)
17 
18 #define MCS_ID_MASK			0x7
19 #define MCS_MAX_PFS                     128
20 
21 #define MCS_PORT_MODE_MASK		0x3
22 #define MCS_PORT_FIFO_SKID_MASK		0x3F
23 #define MCS_MAX_CUSTOM_TAGS		0x8
24 
25 #define MCS_CTRLPKT_ETYPE_RULE_MAX	8
26 #define MCS_CTRLPKT_DA_RULE_MAX		8
27 #define MCS_CTRLPKT_DA_RANGE_RULE_MAX	4
28 #define MCS_CTRLPKT_COMBO_RULE_MAX	4
29 #define MCS_CTRLPKT_MAC_RULE_MAX	1
30 
31 #define MCS_MAX_CTRLPKT_RULES	(MCS_CTRLPKT_ETYPE_RULE_MAX + \
32 				MCS_CTRLPKT_DA_RULE_MAX + \
33 				MCS_CTRLPKT_DA_RANGE_RULE_MAX + \
34 				MCS_CTRLPKT_COMBO_RULE_MAX + \
35 				MCS_CTRLPKT_MAC_RULE_MAX)
36 
37 #define MCS_CTRLPKT_ETYPE_RULE_OFFSET		0
38 #define MCS_CTRLPKT_DA_RULE_OFFSET		8
39 #define MCS_CTRLPKT_DA_RANGE_RULE_OFFSET	16
40 #define MCS_CTRLPKT_COMBO_RULE_OFFSET		20
41 #define MCS_CTRLPKT_MAC_EN_RULE_OFFSET		24
42 
43 /* Reserved resources for default bypass entry */
44 #define MCS_RSRC_RSVD_CNT		1
45 
46 /* MCS Interrupt Vector */
47 #define MCS_CNF10KB_INT_VEC_IP	0x13
48 #define MCS_CN10KB_INT_VEC_IP	0x53
49 
50 #define MCS_MAX_BBE_INT			8ULL
51 #define MCS_BBE_INT_MASK		0xFFULL
52 
53 #define MCS_MAX_PAB_INT		8ULL
54 #define MCS_PAB_INT_MASK	0xFULL
55 
56 #define MCS_BBE_RX_INT_ENA		BIT_ULL(0)
57 #define MCS_BBE_TX_INT_ENA		BIT_ULL(1)
58 #define MCS_CPM_RX_INT_ENA		BIT_ULL(2)
59 #define MCS_CPM_TX_INT_ENA		BIT_ULL(3)
60 #define MCS_PAB_RX_INT_ENA		BIT_ULL(4)
61 #define MCS_PAB_TX_INT_ENA		BIT_ULL(5)
62 
63 #define MCS_CPM_TX_INT_PACKET_XPN_EQ0		BIT_ULL(0)
64 #define MCS_CPM_TX_INT_PN_THRESH_REACHED	BIT_ULL(1)
65 #define MCS_CPM_TX_INT_SA_NOT_VALID		BIT_ULL(2)
66 
67 #define MCS_CPM_RX_INT_SECTAG_V_EQ1		BIT_ULL(0)
68 #define MCS_CPM_RX_INT_SECTAG_E_EQ0_C_EQ1	BIT_ULL(1)
69 #define MCS_CPM_RX_INT_SL_GTE48			BIT_ULL(2)
70 #define MCS_CPM_RX_INT_ES_EQ1_SC_EQ1		BIT_ULL(3)
71 #define MCS_CPM_RX_INT_SC_EQ1_SCB_EQ1		BIT_ULL(4)
72 #define MCS_CPM_RX_INT_PACKET_XPN_EQ0		BIT_ULL(5)
73 #define MCS_CPM_RX_INT_PN_THRESH_REACHED	BIT_ULL(6)
74 
75 #define MCS_CPM_RX_INT_ALL	(MCS_CPM_RX_INT_SECTAG_V_EQ1 |		\
76 				 MCS_CPM_RX_INT_SECTAG_E_EQ0_C_EQ1 |    \
77 				 MCS_CPM_RX_INT_SL_GTE48 |		\
78 				 MCS_CPM_RX_INT_ES_EQ1_SC_EQ1 |		\
79 				 MCS_CPM_RX_INT_SC_EQ1_SCB_EQ1 |	\
80 				 MCS_CPM_RX_INT_PACKET_XPN_EQ0 |	\
81 				 MCS_CPM_RX_INT_PN_THRESH_REACHED)
82 
83 struct mcs_pfvf {
84 	u64 intr_mask;	/* Enabled Interrupt mask */
85 };
86 
87 struct mcs_intr_event {
88 	u16 pcifunc;
89 	u64 intr_mask;
90 	u64 sa_id;
91 	u8 mcs_id;
92 	u8 lmac_id;
93 };
94 
95 struct mcs_intrq_entry {
96 	struct list_head node;
97 	struct mcs_intr_event intr_event;
98 };
99 
100 struct secy_mem_map {
101 	u8 flow_id;
102 	u8 secy;
103 	u8 ctrl_pkt;
104 	u8 sc;
105 	u64 sci;
106 };
107 
108 struct mcs_rsrc_map {
109 	u16 *flowid2pf_map;
110 	u16 *secy2pf_map;
111 	u16 *sc2pf_map;
112 	u16 *sa2pf_map;
113 	u16 *flowid2secy_map;	/* bitmap flowid mapped to secy*/
114 	u16 *ctrlpktrule2pf_map;
115 	struct rsrc_bmap	flow_ids;
116 	struct rsrc_bmap	secy;
117 	struct rsrc_bmap	sc;
118 	struct rsrc_bmap	sa;
119 	struct rsrc_bmap	ctrlpktrule;
120 };
121 
122 struct hwinfo {
123 	u8 tcam_entries;
124 	u8 secy_entries;
125 	u8 sc_entries;
126 	u16 sa_entries;
127 	u8 mcs_x2p_intf;
128 	u8 lmac_cnt;
129 	u8 mcs_blks;
130 	unsigned long	lmac_bmap; /* bitmap of enabled mcs lmac */
131 	u16 ip_vec;
132 };
133 
134 struct mcs {
135 	void __iomem		*reg_base;
136 	struct pci_dev		*pdev;
137 	struct device		*dev;
138 	struct hwinfo		*hw;
139 	struct mcs_rsrc_map	tx;
140 	struct mcs_rsrc_map	rx;
141 	u16                     pf_map[MCS_MAX_PFS]; /* List of PCIFUNC mapped to MCS */
142 	u8			mcs_id;
143 	struct mcs_ops		*mcs_ops;
144 	struct list_head	mcs_list;
145 	/* Lock for mcs stats */
146 	struct mutex		stats_lock;
147 	struct mcs_pfvf		*pf;
148 	struct mcs_pfvf		*vf;
149 	u16			num_vec;
150 	void			*rvu;
151 	u16			*tx_sa_active;
152 	bool                      bypass;
153 };
154 
155 struct mcs_ops {
156 	void	(*mcs_set_hw_capabilities)(struct mcs *mcs);
157 	void	(*mcs_parser_cfg)(struct mcs *mcs);
158 	void	(*mcs_tx_sa_mem_map_write)(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
159 	void	(*mcs_rx_sa_mem_map_write)(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
160 	void	(*mcs_flowid_secy_map)(struct mcs *mcs, struct secy_mem_map *map, int dir);
161 	void	(*mcs_bbe_intr_handler)(struct mcs *mcs, u64 intr, enum mcs_direction dir);
162 	void	(*mcs_pab_intr_handler)(struct mcs *mcs, u64 intr, enum mcs_direction dir);
163 };
164 
165 extern struct pci_driver mcs_driver;
166 
mcs_reg_write(struct mcs * mcs,u64 offset,u64 val)167 static inline void mcs_reg_write(struct mcs *mcs, u64 offset, u64 val)
168 {
169 	writeq(val, mcs->reg_base + offset);
170 }
171 
mcs_reg_read(struct mcs * mcs,u64 offset)172 static inline u64 mcs_reg_read(struct mcs *mcs, u64 offset)
173 {
174 	return readq(mcs->reg_base + offset);
175 }
176 
177 /* MCS APIs */
178 struct mcs *mcs_get_pdata(int mcs_id);
179 int mcs_get_blkcnt(void);
180 int mcs_set_lmac_channels(int mcs_id, u16 base);
181 int mcs_alloc_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, u16 pcifunc);
182 int mcs_free_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, int rsrc_id, u16 pcifunc);
183 int mcs_alloc_all_rsrc(struct mcs *mcs, u8 *flowid, u8 *secy_id,
184 		       u8 *sc_id, u8 *sa1_id, u8 *sa2_id, u16 pcifunc, int dir);
185 int mcs_free_all_rsrc(struct mcs *mcs, int dir, u16 pcifunc);
186 void mcs_clear_secy_plcy(struct mcs *mcs, int secy_id, int dir);
187 void mcs_ena_dis_flowid_entry(struct mcs *mcs, int id, int dir, int ena);
188 void mcs_ena_dis_sc_cam_entry(struct mcs *mcs, int id, int ena);
189 void mcs_flowid_entry_write(struct mcs *mcs, u64 *data, u64 *mask, int id, int dir);
190 void mcs_secy_plcy_write(struct mcs *mcs, u64 plcy, int id, int dir);
191 void mcs_rx_sc_cam_write(struct mcs *mcs, u64 sci, u64 secy, int sc_id);
192 void mcs_sa_plcy_write(struct mcs *mcs, u64 *plcy, int sa, int dir);
193 void mcs_map_sc_to_sa(struct mcs *mcs, u64 *sa_map, int sc, int dir);
194 void mcs_pn_table_write(struct mcs *mcs, u8 pn_id, u64 next_pn, u8 dir);
195 void mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
196 void mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir);
197 void mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
198 void mcs_pn_threshold_set(struct mcs *mcs, struct mcs_set_pn_threshold *pn);
199 int mcs_install_flowid_bypass_entry(struct mcs *mcs);
200 void mcs_set_lmac_mode(struct mcs *mcs, int lmac_id, u8 mode);
201 void mcs_reset_port(struct mcs *mcs, u8 port_id, u8 reset);
202 void mcs_set_port_cfg(struct mcs *mcs, struct mcs_port_cfg_set_req *req);
203 void mcs_get_port_cfg(struct mcs *mcs, struct mcs_port_cfg_get_req *req,
204 		      struct mcs_port_cfg_get_rsp *rsp);
205 void mcs_get_custom_tag_cfg(struct mcs *mcs, struct mcs_custom_tag_cfg_get_req *req,
206 			    struct mcs_custom_tag_cfg_get_rsp *rsp);
207 int mcs_alloc_ctrlpktrule(struct rsrc_bmap *rsrc, u16 *pf_map, u16 offset, u16 pcifunc);
208 int mcs_free_ctrlpktrule(struct mcs *mcs, struct mcs_free_ctrl_pkt_rule_req *req);
209 int mcs_ctrlpktrule_write(struct mcs *mcs, struct mcs_ctrl_pkt_rule_write_req *req);
210 bool is_mcs_bypass(int mcs_id);
211 
212 /* CN10K-B APIs */
213 void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs);
214 void cn10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
215 void cn10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir);
216 void cn10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
217 void cn10kb_mcs_parser_cfg(struct mcs *mcs);
218 void cn10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir);
219 void cn10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir);
220 
221 /* CNF10K-B APIs */
222 struct mcs_ops *cnf10kb_get_mac_ops(void);
223 void cnf10kb_mcs_set_hw_capabilities(struct mcs *mcs);
224 void cnf10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
225 void cnf10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir);
226 void cnf10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
227 void cnf10kb_mcs_parser_cfg(struct mcs *mcs);
228 void cnf10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs);
229 void cnf10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs);
230 void cnf10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir);
231 void cnf10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir);
232 
233 /* Stats APIs */
234 void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats, int id, int dir);
235 void mcs_get_sa_stats(struct mcs *mcs, struct mcs_sa_stats *stats, int id, int dir);
236 void mcs_get_port_stats(struct mcs *mcs, struct mcs_port_stats *stats, int id, int dir);
237 void mcs_get_flowid_stats(struct mcs *mcs, struct mcs_flowid_stats *stats, int id, int dir);
238 void mcs_get_rx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id);
239 void mcs_get_tx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id);
240 void mcs_clear_stats(struct mcs *mcs, u8 type, u8 id, int dir);
241 int mcs_clear_all_stats(struct mcs *mcs, u16 pcifunc, int dir);
242 int mcs_set_force_clk_en(struct mcs *mcs, bool set);
243 
244 int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event);
245 
246 #endif /* MCS_H */
247