xref: /linux/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h (revision bdd1a21b52557ea8f61d0a5dc2f77151b576eb70)
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #ifndef __HCLGE_TM_H
5 #define __HCLGE_TM_H
6 
7 #include <linux/types.h>
8 
9 /* MAC Pause */
10 #define HCLGE_TX_MAC_PAUSE_EN_MSK	BIT(0)
11 #define HCLGE_RX_MAC_PAUSE_EN_MSK	BIT(1)
12 
13 #define HCLGE_TM_PORT_BASE_MODE_MSK	BIT(0)
14 
15 #define HCLGE_DEFAULT_PAUSE_TRANS_GAP	0x7F
16 #define HCLGE_DEFAULT_PAUSE_TRANS_TIME	0xFFFF
17 
18 /* SP or DWRR */
19 #define HCLGE_TM_TX_SCHD_DWRR_MSK	BIT(0)
20 #define HCLGE_TM_TX_SCHD_SP_MSK		0xFE
21 
22 #define HCLGE_ETHER_MAX_RATE	100000
23 
24 #define HCLGE_TM_PF_MAX_PRI_NUM		8
25 #define HCLGE_TM_PF_MAX_QSET_NUM	8
26 
27 struct hclge_pg_to_pri_link_cmd {
28 	u8 pg_id;
29 	u8 rsvd1[3];
30 	u8 pri_bit_map;
31 };
32 
33 struct hclge_qs_to_pri_link_cmd {
34 	__le16 qs_id;
35 	__le16 rsvd;
36 	u8 priority;
37 #define HCLGE_TM_QS_PRI_LINK_VLD_MSK	BIT(0)
38 	u8 link_vld;
39 };
40 
41 struct hclge_nq_to_qs_link_cmd {
42 	__le16 nq_id;
43 	__le16 rsvd;
44 #define HCLGE_TM_Q_QS_LINK_VLD_MSK	BIT(10)
45 #define HCLGE_TM_QS_ID_L_MSK		GENMASK(9, 0)
46 #define HCLGE_TM_QS_ID_L_S		0
47 #define HCLGE_TM_QS_ID_H_MSK		GENMASK(14, 10)
48 #define HCLGE_TM_QS_ID_H_S		10
49 #define HCLGE_TM_QS_ID_H_EXT_S		11
50 #define HCLGE_TM_QS_ID_H_EXT_MSK	GENMASK(15, 11)
51 	__le16 qset_id;
52 };
53 
54 struct hclge_tqp_tx_queue_tc_cmd {
55 	__le16 queue_id;
56 	__le16 rsvd;
57 	u8 tc_id;
58 	u8 rev[3];
59 };
60 
61 struct hclge_pg_weight_cmd {
62 	u8 pg_id;
63 	u8 dwrr;
64 };
65 
66 struct hclge_priority_weight_cmd {
67 	u8 pri_id;
68 	u8 dwrr;
69 };
70 
71 struct hclge_pri_sch_mode_cfg_cmd {
72 	u8 pri_id;
73 	u8 rsvd[3];
74 	u8 sch_mode;
75 };
76 
77 struct hclge_qs_sch_mode_cfg_cmd {
78 	__le16 qs_id;
79 	u8 rsvd[2];
80 	u8 sch_mode;
81 };
82 
83 struct hclge_qs_weight_cmd {
84 	__le16 qs_id;
85 	u8 dwrr;
86 };
87 
88 struct hclge_ets_tc_weight_cmd {
89 	u8 tc_weight[HNAE3_MAX_TC];
90 	u8 weight_offset;
91 	u8 rsvd[15];
92 };
93 
94 #define HCLGE_TM_SHAP_IR_B_MSK  GENMASK(7, 0)
95 #define HCLGE_TM_SHAP_IR_B_LSH	0
96 #define HCLGE_TM_SHAP_IR_U_MSK  GENMASK(11, 8)
97 #define HCLGE_TM_SHAP_IR_U_LSH	8
98 #define HCLGE_TM_SHAP_IR_S_MSK  GENMASK(15, 12)
99 #define HCLGE_TM_SHAP_IR_S_LSH	12
100 #define HCLGE_TM_SHAP_BS_B_MSK  GENMASK(20, 16)
101 #define HCLGE_TM_SHAP_BS_B_LSH	16
102 #define HCLGE_TM_SHAP_BS_S_MSK  GENMASK(25, 21)
103 #define HCLGE_TM_SHAP_BS_S_LSH	21
104 
105 enum hclge_shap_bucket {
106 	HCLGE_TM_SHAP_C_BUCKET = 0,
107 	HCLGE_TM_SHAP_P_BUCKET,
108 };
109 
110 /* set bit HCLGE_TM_RATE_VLD to 1 means use 'rate' to config shaping */
111 #define HCLGE_TM_RATE_VLD	0
112 
113 struct hclge_pri_shapping_cmd {
114 	u8 pri_id;
115 	u8 rsvd[3];
116 	__le32 pri_shapping_para;
117 	u8 flag;
118 	u8 rsvd1[3];
119 	__le32 pri_rate;
120 };
121 
122 struct hclge_pg_shapping_cmd {
123 	u8 pg_id;
124 	u8 rsvd[3];
125 	__le32 pg_shapping_para;
126 	u8 flag;
127 	u8 rsvd1[3];
128 	__le32 pg_rate;
129 };
130 
131 struct hclge_qs_shapping_cmd {
132 	__le16 qs_id;
133 	u8 rsvd[2];
134 	__le32 qs_shapping_para;
135 	u8 flag;
136 	u8 rsvd1[3];
137 	__le32 qs_rate;
138 };
139 
140 #define HCLGE_BP_GRP_NUM		32
141 #define HCLGE_BP_SUB_GRP_ID_S		0
142 #define HCLGE_BP_SUB_GRP_ID_M		GENMASK(4, 0)
143 #define HCLGE_BP_GRP_ID_S		5
144 #define HCLGE_BP_GRP_ID_M		GENMASK(9, 5)
145 
146 #define HCLGE_BP_EXT_GRP_NUM		40
147 #define HCLGE_BP_EXT_GRP_ID_S		5
148 #define HCLGE_BP_EXT_GRP_ID_M		GENMASK(10, 5)
149 
150 struct hclge_bp_to_qs_map_cmd {
151 	u8 tc_id;
152 	u8 rsvd[2];
153 	u8 qs_group_id;
154 	__le32 qs_bit_map;
155 	u32 rsvd1;
156 };
157 
158 struct hclge_pfc_en_cmd {
159 	u8 tx_rx_en_bitmap;
160 	u8 pri_en_bitmap;
161 };
162 
163 struct hclge_cfg_pause_param_cmd {
164 	u8 mac_addr[ETH_ALEN];
165 	u8 pause_trans_gap;
166 	u8 rsvd;
167 	__le16 pause_trans_time;
168 	u8 rsvd1[6];
169 	/* extra mac address to do double check for pause frame */
170 	u8 mac_addr_extra[ETH_ALEN];
171 	u16 rsvd2;
172 };
173 
174 struct hclge_pfc_stats_cmd {
175 	__le64 pkt_num[3];
176 };
177 
178 struct hclge_port_shapping_cmd {
179 	__le32 port_shapping_para;
180 	u8 flag;
181 	u8 rsvd[3];
182 	__le32 port_rate;
183 };
184 
185 struct hclge_shaper_ir_para {
186 	u8 ir_b; /* IR_B parameter of IR shaper */
187 	u8 ir_u; /* IR_U parameter of IR shaper */
188 	u8 ir_s; /* IR_S parameter of IR shaper */
189 };
190 
191 struct hclge_tm_nodes_cmd {
192 	u8 pg_base_id;
193 	u8 pri_base_id;
194 	__le16 qset_base_id;
195 	__le16 queue_base_id;
196 	u8 pg_num;
197 	u8 pri_num;
198 	__le16 qset_num;
199 	__le16 queue_num;
200 };
201 
202 struct hclge_tm_shaper_para {
203 	u32 rate;
204 	u8 ir_b;
205 	u8 ir_u;
206 	u8 ir_s;
207 	u8 bs_b;
208 	u8 bs_s;
209 	u8 flag;
210 };
211 
212 #define hclge_tm_set_field(dest, string, val) \
213 			   hnae3_set_field((dest), \
214 			   (HCLGE_TM_SHAP_##string##_MSK), \
215 			   (HCLGE_TM_SHAP_##string##_LSH), val)
216 #define hclge_tm_get_field(src, string) \
217 			hnae3_get_field((src), HCLGE_TM_SHAP_##string##_MSK, \
218 					HCLGE_TM_SHAP_##string##_LSH)
219 
220 int hclge_tm_schd_init(struct hclge_dev *hdev);
221 int hclge_tm_vport_map_update(struct hclge_dev *hdev);
222 int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init);
223 int hclge_tm_schd_setup_hw(struct hclge_dev *hdev);
224 void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc);
225 void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc);
226 void hclge_tm_pfc_info_update(struct hclge_dev *hdev);
227 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev);
228 int hclge_tm_init_hw(struct hclge_dev *hdev, bool init);
229 int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx);
230 int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
231 int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats);
232 int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats);
233 int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate);
234 int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num);
235 int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num);
236 int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority,
237 			      u8 *link_vld);
238 int hclge_tm_get_qset_sch_mode(struct hclge_dev *hdev, u16 qset_id, u8 *mode);
239 int hclge_tm_get_qset_weight(struct hclge_dev *hdev, u16 qset_id, u8 *weight);
240 int hclge_tm_get_qset_shaper(struct hclge_dev *hdev, u16 qset_id,
241 			     struct hclge_tm_shaper_para *para);
242 int hclge_tm_get_pri_sch_mode(struct hclge_dev *hdev, u8 pri_id, u8 *mode);
243 int hclge_tm_get_pri_weight(struct hclge_dev *hdev, u8 pri_id, u8 *weight);
244 int hclge_tm_get_pri_shaper(struct hclge_dev *hdev, u8 pri_id,
245 			    enum hclge_opcode_type cmd,
246 			    struct hclge_tm_shaper_para *para);
247 int hclge_tm_get_q_to_qs_map(struct hclge_dev *hdev, u16 q_id, u16 *qset_id);
248 int hclge_tm_get_q_to_tc(struct hclge_dev *hdev, u16 q_id, u8 *tc_id);
249 int hclge_tm_get_pg_to_pri_map(struct hclge_dev *hdev, u8 pg_id,
250 			       u8 *pri_bit_map);
251 int hclge_tm_get_pg_weight(struct hclge_dev *hdev, u8 pg_id, u8 *weight);
252 int hclge_tm_get_pg_sch_mode(struct hclge_dev *hdev, u8 pg_id, u8 *mode);
253 int hclge_tm_get_pg_shaper(struct hclge_dev *hdev, u8 pg_id,
254 			   enum hclge_opcode_type cmd,
255 			   struct hclge_tm_shaper_para *para);
256 int hclge_tm_get_port_shaper(struct hclge_dev *hdev,
257 			     struct hclge_tm_shaper_para *para);
258 #endif
259