xref: /linux/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c (revision bd628c1bed7902ec1f24ba0fe70758949146abbe)
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include "hclge_main.h"
5 #include "hclge_tm.h"
6 #include "hnae3.h"
7 
8 #define BW_PERCENT	100
9 
10 static int hclge_ieee_ets_to_tm_info(struct hclge_dev *hdev,
11 				     struct ieee_ets *ets)
12 {
13 	u8 i;
14 
15 	for (i = 0; i < HNAE3_MAX_TC; i++) {
16 		switch (ets->tc_tsa[i]) {
17 		case IEEE_8021QAZ_TSA_STRICT:
18 			hdev->tm_info.tc_info[i].tc_sch_mode =
19 				HCLGE_SCH_MODE_SP;
20 			hdev->tm_info.pg_info[0].tc_dwrr[i] = 0;
21 			break;
22 		case IEEE_8021QAZ_TSA_ETS:
23 			hdev->tm_info.tc_info[i].tc_sch_mode =
24 				HCLGE_SCH_MODE_DWRR;
25 			hdev->tm_info.pg_info[0].tc_dwrr[i] =
26 				ets->tc_tx_bw[i];
27 			break;
28 		default:
29 			/* Hardware only supports SP (strict priority)
30 			 * or ETS (enhanced transmission selection)
31 			 * algorithms, if we receive some other value
32 			 * from dcbnl, then throw an error.
33 			 */
34 			return -EINVAL;
35 		}
36 	}
37 
38 	hclge_tm_prio_tc_info_update(hdev, ets->prio_tc);
39 
40 	return 0;
41 }
42 
43 static void hclge_tm_info_to_ieee_ets(struct hclge_dev *hdev,
44 				      struct ieee_ets *ets)
45 {
46 	u32 i;
47 
48 	memset(ets, 0, sizeof(*ets));
49 	ets->willing = 1;
50 	ets->ets_cap = hdev->tc_max;
51 
52 	for (i = 0; i < HNAE3_MAX_TC; i++) {
53 		ets->prio_tc[i] = hdev->tm_info.prio_tc[i];
54 		ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i];
55 
56 		if (hdev->tm_info.tc_info[i].tc_sch_mode ==
57 		    HCLGE_SCH_MODE_SP)
58 			ets->tc_tsa[i] = IEEE_8021QAZ_TSA_STRICT;
59 		else
60 			ets->tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
61 	}
62 }
63 
64 /* IEEE std */
65 static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets)
66 {
67 	struct hclge_vport *vport = hclge_get_vport(h);
68 	struct hclge_dev *hdev = vport->back;
69 
70 	hclge_tm_info_to_ieee_ets(hdev, ets);
71 
72 	return 0;
73 }
74 
75 static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc,
76 				     u8 *prio_tc)
77 {
78 	int i;
79 
80 	if (num_tc > hdev->tc_max) {
81 		dev_err(&hdev->pdev->dev,
82 			"tc num checking failed, %u > tc_max(%u)\n",
83 			num_tc, hdev->tc_max);
84 		return -EINVAL;
85 	}
86 
87 	for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
88 		if (prio_tc[i] >= num_tc) {
89 			dev_err(&hdev->pdev->dev,
90 				"prio_tc[%u] checking failed, %u >= num_tc(%u)\n",
91 				i, prio_tc[i], num_tc);
92 			return -EINVAL;
93 		}
94 	}
95 
96 	for (i = 0; i < hdev->num_alloc_vport; i++) {
97 		if (num_tc > hdev->vport[i].alloc_tqps) {
98 			dev_err(&hdev->pdev->dev,
99 				"allocated tqp(%u) checking failed, %u > tqp(%u)\n",
100 				i, num_tc, hdev->vport[i].alloc_tqps);
101 			return -EINVAL;
102 		}
103 	}
104 
105 	return 0;
106 }
107 
108 static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
109 			      u8 *tc, bool *changed)
110 {
111 	bool has_ets_tc = false;
112 	u32 total_ets_bw = 0;
113 	u8 max_tc = 0;
114 	int ret;
115 	u8 i;
116 
117 	for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
118 		if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i])
119 			*changed = true;
120 
121 		if (ets->prio_tc[i] > max_tc)
122 			max_tc = ets->prio_tc[i];
123 	}
124 
125 	ret = hclge_dcb_common_validate(hdev, max_tc + 1, ets->prio_tc);
126 	if (ret)
127 		return ret;
128 
129 	for (i = 0; i < HNAE3_MAX_TC; i++) {
130 		switch (ets->tc_tsa[i]) {
131 		case IEEE_8021QAZ_TSA_STRICT:
132 			if (hdev->tm_info.tc_info[i].tc_sch_mode !=
133 				HCLGE_SCH_MODE_SP)
134 				*changed = true;
135 			break;
136 		case IEEE_8021QAZ_TSA_ETS:
137 			if (hdev->tm_info.tc_info[i].tc_sch_mode !=
138 				HCLGE_SCH_MODE_DWRR)
139 				*changed = true;
140 
141 			total_ets_bw += ets->tc_tx_bw[i];
142 			has_ets_tc = true;
143 			break;
144 		default:
145 			return -EINVAL;
146 		}
147 	}
148 
149 	if (has_ets_tc && total_ets_bw != BW_PERCENT)
150 		return -EINVAL;
151 
152 	*tc = max_tc + 1;
153 	if (*tc != hdev->tm_info.num_tc)
154 		*changed = true;
155 
156 	return 0;
157 }
158 
159 static int hclge_map_update(struct hnae3_handle *h)
160 {
161 	struct hclge_vport *vport = hclge_get_vport(h);
162 	struct hclge_dev *hdev = vport->back;
163 	int ret;
164 
165 	ret = hclge_tm_map_cfg(hdev);
166 	if (ret)
167 		return ret;
168 
169 	ret = hclge_tm_schd_mode_hw(hdev);
170 	if (ret)
171 		return ret;
172 
173 	ret = hclge_pause_setup_hw(hdev);
174 	if (ret)
175 		return ret;
176 
177 	ret = hclge_buffer_alloc(hdev);
178 	if (ret)
179 		return ret;
180 
181 	hclge_rss_indir_init_cfg(hdev);
182 
183 	return hclge_rss_init_hw(hdev);
184 }
185 
186 static int hclge_client_setup_tc(struct hclge_dev *hdev)
187 {
188 	struct hclge_vport *vport = hdev->vport;
189 	struct hnae3_client *client;
190 	struct hnae3_handle *handle;
191 	int ret;
192 	u32 i;
193 
194 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
195 		handle = &vport[i].nic;
196 		client = handle->client;
197 
198 		if (!client || !client->ops || !client->ops->setup_tc)
199 			continue;
200 
201 		ret = client->ops->setup_tc(handle, hdev->tm_info.num_tc);
202 		if (ret)
203 			return ret;
204 	}
205 
206 	return 0;
207 }
208 
209 static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
210 {
211 	struct hclge_vport *vport = hclge_get_vport(h);
212 	struct hclge_dev *hdev = vport->back;
213 	bool map_changed = false;
214 	u8 num_tc = 0;
215 	int ret;
216 
217 	if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
218 	    hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
219 		return -EINVAL;
220 
221 	ret = hclge_ets_validate(hdev, ets, &num_tc, &map_changed);
222 	if (ret)
223 		return ret;
224 
225 	hclge_tm_schd_info_update(hdev, num_tc);
226 
227 	ret = hclge_ieee_ets_to_tm_info(hdev, ets);
228 	if (ret)
229 		return ret;
230 
231 	if (map_changed) {
232 		ret = hclge_client_setup_tc(hdev);
233 		if (ret)
234 			return ret;
235 	}
236 
237 	return hclge_tm_dwrr_cfg(hdev);
238 }
239 
240 static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
241 {
242 	u64 requests[HNAE3_MAX_TC], indications[HNAE3_MAX_TC];
243 	struct hclge_vport *vport = hclge_get_vport(h);
244 	struct hclge_dev *hdev = vport->back;
245 	u8 i, j, pfc_map, *prio_tc;
246 	int ret;
247 
248 	memset(pfc, 0, sizeof(*pfc));
249 	pfc->pfc_cap = hdev->pfc_max;
250 	prio_tc = hdev->tm_info.prio_tc;
251 	pfc_map = hdev->tm_info.hw_pfc_map;
252 
253 	/* Pfc setting is based on TC */
254 	for (i = 0; i < hdev->tm_info.num_tc; i++) {
255 		for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) {
256 			if ((prio_tc[j] == i) && (pfc_map & BIT(i)))
257 				pfc->pfc_en |= BIT(j);
258 		}
259 	}
260 
261 	ret = hclge_pfc_tx_stats_get(hdev, requests);
262 	if (ret)
263 		return ret;
264 
265 	ret = hclge_pfc_rx_stats_get(hdev, indications);
266 	if (ret)
267 		return ret;
268 
269 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
270 		pfc->requests[i] = requests[i];
271 		pfc->indications[i] = indications[i];
272 	}
273 	return 0;
274 }
275 
276 static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
277 {
278 	struct hclge_vport *vport = hclge_get_vport(h);
279 	struct hclge_dev *hdev = vport->back;
280 	u8 i, j, pfc_map, *prio_tc;
281 
282 	if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
283 	    hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
284 		return -EINVAL;
285 
286 	prio_tc = hdev->tm_info.prio_tc;
287 	pfc_map = 0;
288 
289 	for (i = 0; i < hdev->tm_info.num_tc; i++) {
290 		for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) {
291 			if ((prio_tc[j] == i) && (pfc->pfc_en & BIT(j))) {
292 				pfc_map |= BIT(i);
293 				break;
294 			}
295 		}
296 	}
297 
298 	if (pfc_map == hdev->tm_info.hw_pfc_map)
299 		return 0;
300 
301 	hdev->tm_info.hw_pfc_map = pfc_map;
302 
303 	return hclge_pause_setup_hw(hdev);
304 }
305 
306 /* DCBX configuration */
307 static u8 hclge_getdcbx(struct hnae3_handle *h)
308 {
309 	struct hclge_vport *vport = hclge_get_vport(h);
310 	struct hclge_dev *hdev = vport->back;
311 
312 	if (hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
313 		return 0;
314 
315 	return hdev->dcbx_cap;
316 }
317 
318 static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode)
319 {
320 	struct hclge_vport *vport = hclge_get_vport(h);
321 	struct hclge_dev *hdev = vport->back;
322 
323 	/* No support for LLD_MANAGED modes or CEE */
324 	if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
325 	    (mode & DCB_CAP_DCBX_VER_CEE) ||
326 	    !(mode & DCB_CAP_DCBX_HOST))
327 		return 1;
328 
329 	hdev->dcbx_cap = mode;
330 
331 	return 0;
332 }
333 
334 /* Set up TC for hardware offloaded mqprio in channel mode */
335 static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc)
336 {
337 	struct hclge_vport *vport = hclge_get_vport(h);
338 	struct hclge_dev *hdev = vport->back;
339 	int ret;
340 
341 	if (hdev->flag & HCLGE_FLAG_DCB_ENABLE)
342 		return -EINVAL;
343 
344 	ret = hclge_dcb_common_validate(hdev, tc, prio_tc);
345 	if (ret)
346 		return -EINVAL;
347 
348 	hclge_tm_schd_info_update(hdev, tc);
349 	hclge_tm_prio_tc_info_update(hdev, prio_tc);
350 
351 	ret = hclge_tm_init_hw(hdev);
352 	if (ret)
353 		return ret;
354 
355 	hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
356 
357 	if (tc > 1)
358 		hdev->flag |= HCLGE_FLAG_MQPRIO_ENABLE;
359 	else
360 		hdev->flag &= ~HCLGE_FLAG_MQPRIO_ENABLE;
361 
362 	return 0;
363 }
364 
365 static const struct hnae3_dcb_ops hns3_dcb_ops = {
366 	.ieee_getets	= hclge_ieee_getets,
367 	.ieee_setets	= hclge_ieee_setets,
368 	.ieee_getpfc	= hclge_ieee_getpfc,
369 	.ieee_setpfc	= hclge_ieee_setpfc,
370 	.getdcbx	= hclge_getdcbx,
371 	.setdcbx	= hclge_setdcbx,
372 	.map_update	= hclge_map_update,
373 	.setup_tc	= hclge_setup_tc,
374 };
375 
376 void hclge_dcb_ops_set(struct hclge_dev *hdev)
377 {
378 	struct hclge_vport *vport = hdev->vport;
379 	struct hnae3_knic_private_info *kinfo;
380 
381 	/* Hdev does not support DCB or vport is
382 	 * not a pf, then dcb_ops is not set.
383 	 */
384 	if (!hnae3_dev_dcb_supported(hdev) ||
385 	    vport->vport_id != 0)
386 		return;
387 
388 	kinfo = &vport->nic.kinfo;
389 	kinfo->dcb_ops = &hns3_dcb_ops;
390 	hdev->dcbx_cap = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_HOST;
391 }
392