xref: /linux/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c (revision 83a37b3292f4aca799b355179ad6fbdd78a08e10)
1 /*
2  * Copyright (c) 2016~2017 Hisilicon Limited.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  */
9 
10 #include <linux/etherdevice.h>
11 
12 #include "hclge_cmd.h"
13 #include "hclge_main.h"
14 #include "hclge_tm.h"
15 
16 enum hclge_shaper_level {
17 	HCLGE_SHAPER_LVL_PRI	= 0,
18 	HCLGE_SHAPER_LVL_PG	= 1,
19 	HCLGE_SHAPER_LVL_PORT	= 2,
20 	HCLGE_SHAPER_LVL_QSET	= 3,
21 	HCLGE_SHAPER_LVL_CNT	= 4,
22 	HCLGE_SHAPER_LVL_VF	= 0,
23 	HCLGE_SHAPER_LVL_PF	= 1,
24 };
25 
26 #define HCLGE_SHAPER_BS_U_DEF	1
27 #define HCLGE_SHAPER_BS_S_DEF	4
28 
29 #define HCLGE_ETHER_MAX_RATE	100000
30 
31 /* hclge_shaper_para_calc: calculate ir parameter for the shaper
32  * @ir: Rate to be config, its unit is Mbps
33  * @shaper_level: the shaper level. eg: port, pg, priority, queueset
34  * @ir_b: IR_B parameter of IR shaper
35  * @ir_u: IR_U parameter of IR shaper
36  * @ir_s: IR_S parameter of IR shaper
37  *
38  * the formula:
39  *
40  *		IR_b * (2 ^ IR_u) * 8
41  * IR(Mbps) = -------------------------  *  CLOCK(1000Mbps)
42  *		Tick * (2 ^ IR_s)
43  *
44  * @return: 0: calculate sucessful, negative: fail
45  */
46 static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
47 				  u8 *ir_b, u8 *ir_u, u8 *ir_s)
48 {
49 	const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
50 		6 * 256,        /* Prioriy level */
51 		6 * 32,         /* Prioriy group level */
52 		6 * 8,          /* Port level */
53 		6 * 256         /* Qset level */
54 	};
55 	u8 ir_u_calc = 0, ir_s_calc = 0;
56 	u32 ir_calc;
57 	u32 tick;
58 
59 	/* Calc tick */
60 	if (shaper_level >= HCLGE_SHAPER_LVL_CNT)
61 		return -EINVAL;
62 
63 	tick = tick_array[shaper_level];
64 
65 	/**
66 	 * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
67 	 * the formula is changed to:
68 	 *		126 * 1 * 8
69 	 * ir_calc = ---------------- * 1000
70 	 *		tick * 1
71 	 */
72 	ir_calc = (1008000 + (tick >> 1) - 1) / tick;
73 
74 	if (ir_calc == ir) {
75 		*ir_b = 126;
76 		*ir_u = 0;
77 		*ir_s = 0;
78 
79 		return 0;
80 	} else if (ir_calc > ir) {
81 		/* Increasing the denominator to select ir_s value */
82 		while (ir_calc > ir) {
83 			ir_s_calc++;
84 			ir_calc = 1008000 / (tick * (1 << ir_s_calc));
85 		}
86 
87 		if (ir_calc == ir)
88 			*ir_b = 126;
89 		else
90 			*ir_b = (ir * tick * (1 << ir_s_calc) + 4000) / 8000;
91 	} else {
92 		/* Increasing the numerator to select ir_u value */
93 		u32 numerator;
94 
95 		while (ir_calc < ir) {
96 			ir_u_calc++;
97 			numerator = 1008000 * (1 << ir_u_calc);
98 			ir_calc = (numerator + (tick >> 1)) / tick;
99 		}
100 
101 		if (ir_calc == ir) {
102 			*ir_b = 126;
103 		} else {
104 			u32 denominator = (8000 * (1 << --ir_u_calc));
105 			*ir_b = (ir * tick + (denominator >> 1)) / denominator;
106 		}
107 	}
108 
109 	*ir_u = ir_u_calc;
110 	*ir_s = ir_s_calc;
111 
112 	return 0;
113 }
114 
115 static int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
116 {
117 	struct hclge_desc desc;
118 
119 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false);
120 
121 	desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) |
122 		(rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0));
123 
124 	return hclge_cmd_send(&hdev->hw, &desc, 1);
125 }
126 
127 static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
128 				  u8 pfc_bitmap)
129 {
130 	struct hclge_desc desc;
131 	struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)&desc.data;
132 
133 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false);
134 
135 	pfc->tx_rx_en_bitmap = tx_rx_bitmap;
136 	pfc->pri_en_bitmap = pfc_bitmap;
137 
138 	return hclge_cmd_send(&hdev->hw, &desc, 1);
139 }
140 
141 static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
142 {
143 	u8 tc;
144 
145 	tc = hdev->tm_info.prio_tc[pri_id];
146 
147 	if (tc >= hdev->tm_info.num_tc)
148 		return -EINVAL;
149 
150 	/**
151 	 * the register for priority has four bytes, the first bytes includes
152 	 *  priority0 and priority1, the higher 4bit stands for priority1
153 	 *  while the lower 4bit stands for priority0, as below:
154 	 * first byte:	| pri_1 | pri_0 |
155 	 * second byte:	| pri_3 | pri_2 |
156 	 * third byte:	| pri_5 | pri_4 |
157 	 * fourth byte:	| pri_7 | pri_6 |
158 	 */
159 	pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4);
160 
161 	return 0;
162 }
163 
164 static int hclge_up_to_tc_map(struct hclge_dev *hdev)
165 {
166 	struct hclge_desc desc;
167 	u8 *pri = (u8 *)desc.data;
168 	u8 pri_id;
169 	int ret;
170 
171 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false);
172 
173 	for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) {
174 		ret = hclge_fill_pri_array(hdev, pri, pri_id);
175 		if (ret)
176 			return ret;
177 	}
178 
179 	return hclge_cmd_send(&hdev->hw, &desc, 1);
180 }
181 
182 static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
183 				      u8 pg_id, u8 pri_bit_map)
184 {
185 	struct hclge_pg_to_pri_link_cmd *map;
186 	struct hclge_desc desc;
187 
188 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false);
189 
190 	map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
191 
192 	map->pg_id = pg_id;
193 	map->pri_bit_map = pri_bit_map;
194 
195 	return hclge_cmd_send(&hdev->hw, &desc, 1);
196 }
197 
198 static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
199 				      u16 qs_id, u8 pri)
200 {
201 	struct hclge_qs_to_pri_link_cmd *map;
202 	struct hclge_desc desc;
203 
204 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false);
205 
206 	map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
207 
208 	map->qs_id = cpu_to_le16(qs_id);
209 	map->priority = pri;
210 	map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK;
211 
212 	return hclge_cmd_send(&hdev->hw, &desc, 1);
213 }
214 
215 static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev,
216 				    u8 q_id, u16 qs_id)
217 {
218 	struct hclge_nq_to_qs_link_cmd *map;
219 	struct hclge_desc desc;
220 
221 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false);
222 
223 	map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
224 
225 	map->nq_id = cpu_to_le16(q_id);
226 	map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK);
227 
228 	return hclge_cmd_send(&hdev->hw, &desc, 1);
229 }
230 
231 static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id,
232 				  u8 dwrr)
233 {
234 	struct hclge_pg_weight_cmd *weight;
235 	struct hclge_desc desc;
236 
237 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false);
238 
239 	weight = (struct hclge_pg_weight_cmd *)desc.data;
240 
241 	weight->pg_id = pg_id;
242 	weight->dwrr = dwrr;
243 
244 	return hclge_cmd_send(&hdev->hw, &desc, 1);
245 }
246 
247 static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id,
248 				   u8 dwrr)
249 {
250 	struct hclge_priority_weight_cmd *weight;
251 	struct hclge_desc desc;
252 
253 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false);
254 
255 	weight = (struct hclge_priority_weight_cmd *)desc.data;
256 
257 	weight->pri_id = pri_id;
258 	weight->dwrr = dwrr;
259 
260 	return hclge_cmd_send(&hdev->hw, &desc, 1);
261 }
262 
263 static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id,
264 				  u8 dwrr)
265 {
266 	struct hclge_qs_weight_cmd *weight;
267 	struct hclge_desc desc;
268 
269 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false);
270 
271 	weight = (struct hclge_qs_weight_cmd *)desc.data;
272 
273 	weight->qs_id = cpu_to_le16(qs_id);
274 	weight->dwrr = dwrr;
275 
276 	return hclge_cmd_send(&hdev->hw, &desc, 1);
277 }
278 
279 static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
280 				    enum hclge_shap_bucket bucket, u8 pg_id,
281 				    u8 ir_b, u8 ir_u, u8 ir_s, u8 bs_b, u8 bs_s)
282 {
283 	struct hclge_pg_shapping_cmd *shap_cfg_cmd;
284 	enum hclge_opcode_type opcode;
285 	struct hclge_desc desc;
286 	u32 shapping_para = 0;
287 
288 	opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
289 		HCLGE_OPC_TM_PG_C_SHAPPING;
290 	hclge_cmd_setup_basic_desc(&desc, opcode, false);
291 
292 	shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
293 
294 	shap_cfg_cmd->pg_id = pg_id;
295 
296 	hclge_tm_set_field(shapping_para, IR_B, ir_b);
297 	hclge_tm_set_field(shapping_para, IR_U, ir_u);
298 	hclge_tm_set_field(shapping_para, IR_S, ir_s);
299 	hclge_tm_set_field(shapping_para, BS_B, bs_b);
300 	hclge_tm_set_field(shapping_para, BS_S, bs_s);
301 
302 	shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para);
303 
304 	return hclge_cmd_send(&hdev->hw, &desc, 1);
305 }
306 
307 static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
308 {
309 	struct hclge_port_shapping_cmd *shap_cfg_cmd;
310 	struct hclge_desc desc;
311 	u32 shapping_para = 0;
312 	u8 ir_u, ir_b, ir_s;
313 	int ret;
314 
315 	ret = hclge_shaper_para_calc(HCLGE_ETHER_MAX_RATE,
316 				     HCLGE_SHAPER_LVL_PORT,
317 				     &ir_b, &ir_u, &ir_s);
318 	if (ret)
319 		return ret;
320 
321 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false);
322 	shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
323 
324 	hclge_tm_set_field(shapping_para, IR_B, ir_b);
325 	hclge_tm_set_field(shapping_para, IR_U, ir_u);
326 	hclge_tm_set_field(shapping_para, IR_S, ir_s);
327 	hclge_tm_set_field(shapping_para, BS_B, HCLGE_SHAPER_BS_U_DEF);
328 	hclge_tm_set_field(shapping_para, BS_S, HCLGE_SHAPER_BS_S_DEF);
329 
330 	shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para);
331 
332 	return hclge_cmd_send(&hdev->hw, &desc, 1);
333 }
334 
335 static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
336 				     enum hclge_shap_bucket bucket, u8 pri_id,
337 				     u8 ir_b, u8 ir_u, u8 ir_s,
338 				     u8 bs_b, u8 bs_s)
339 {
340 	struct hclge_pri_shapping_cmd *shap_cfg_cmd;
341 	enum hclge_opcode_type opcode;
342 	struct hclge_desc desc;
343 	u32 shapping_para = 0;
344 
345 	opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
346 		HCLGE_OPC_TM_PRI_C_SHAPPING;
347 
348 	hclge_cmd_setup_basic_desc(&desc, opcode, false);
349 
350 	shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
351 
352 	shap_cfg_cmd->pri_id = pri_id;
353 
354 	hclge_tm_set_field(shapping_para, IR_B, ir_b);
355 	hclge_tm_set_field(shapping_para, IR_U, ir_u);
356 	hclge_tm_set_field(shapping_para, IR_S, ir_s);
357 	hclge_tm_set_field(shapping_para, BS_B, bs_b);
358 	hclge_tm_set_field(shapping_para, BS_S, bs_s);
359 
360 	shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para);
361 
362 	return hclge_cmd_send(&hdev->hw, &desc, 1);
363 }
364 
365 static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id)
366 {
367 	struct hclge_desc desc;
368 
369 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false);
370 
371 	if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR)
372 		desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
373 	else
374 		desc.data[1] = 0;
375 
376 	desc.data[0] = cpu_to_le32(pg_id);
377 
378 	return hclge_cmd_send(&hdev->hw, &desc, 1);
379 }
380 
381 static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id)
382 {
383 	struct hclge_desc desc;
384 
385 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false);
386 
387 	if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR)
388 		desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
389 	else
390 		desc.data[1] = 0;
391 
392 	desc.data[0] = cpu_to_le32(pri_id);
393 
394 	return hclge_cmd_send(&hdev->hw, &desc, 1);
395 }
396 
397 static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode)
398 {
399 	struct hclge_desc desc;
400 
401 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false);
402 
403 	if (mode == HCLGE_SCH_MODE_DWRR)
404 		desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
405 	else
406 		desc.data[1] = 0;
407 
408 	desc.data[0] = cpu_to_le32(qs_id);
409 
410 	return hclge_cmd_send(&hdev->hw, &desc, 1);
411 }
412 
413 static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc)
414 {
415 	struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
416 	struct hclge_desc desc;
417 
418 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
419 				   false);
420 
421 	bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
422 
423 	bp_to_qs_map_cmd->tc_id = tc;
424 
425 	/* Qset and tc is one by one mapping */
426 	bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(1 << tc);
427 
428 	return hclge_cmd_send(&hdev->hw, &desc, 1);
429 }
430 
431 static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
432 {
433 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
434 	struct hclge_dev *hdev = vport->back;
435 	u8 i;
436 
437 	kinfo = &vport->nic.kinfo;
438 	vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
439 	kinfo->num_tc =
440 		min_t(u16, kinfo->num_tqps, hdev->tm_info.num_tc);
441 	kinfo->rss_size
442 		= min_t(u16, hdev->rss_size_max,
443 			kinfo->num_tqps / kinfo->num_tc);
444 	vport->qs_offset = hdev->tm_info.num_tc * vport->vport_id;
445 	vport->dwrr = 100;  /* 100 percent as init */
446 	vport->alloc_rss_size = kinfo->rss_size;
447 
448 	for (i = 0; i < kinfo->num_tc; i++) {
449 		if (hdev->hw_tc_map & BIT(i)) {
450 			kinfo->tc_info[i].enable = true;
451 			kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
452 			kinfo->tc_info[i].tqp_count = kinfo->rss_size;
453 			kinfo->tc_info[i].tc = i;
454 		} else {
455 			/* Set to default queue if TC is disable */
456 			kinfo->tc_info[i].enable = false;
457 			kinfo->tc_info[i].tqp_offset = 0;
458 			kinfo->tc_info[i].tqp_count = 1;
459 			kinfo->tc_info[i].tc = 0;
460 		}
461 	}
462 
463 	memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc,
464 	       FIELD_SIZEOF(struct hnae3_knic_private_info, prio_tc));
465 }
466 
467 static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
468 {
469 	struct hclge_vport *vport = hdev->vport;
470 	u32 i;
471 
472 	for (i = 0; i < hdev->num_alloc_vport; i++) {
473 		hclge_tm_vport_tc_info_update(vport);
474 
475 		vport++;
476 	}
477 }
478 
479 static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
480 {
481 	u8 i;
482 
483 	for (i = 0; i < hdev->tm_info.num_tc; i++) {
484 		hdev->tm_info.tc_info[i].tc_id = i;
485 		hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
486 		hdev->tm_info.tc_info[i].pgid = 0;
487 		hdev->tm_info.tc_info[i].bw_limit =
488 			hdev->tm_info.pg_info[0].bw_limit;
489 	}
490 
491 	for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
492 		hdev->tm_info.prio_tc[i] =
493 			(i >= hdev->tm_info.num_tc) ? 0 : i;
494 
495 	/* DCB is enabled if we have more than 1 TC */
496 	if (hdev->tm_info.num_tc > 1)
497 		hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
498 	else
499 		hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
500 }
501 
502 static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
503 {
504 	u8 i;
505 
506 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
507 		int k;
508 
509 		hdev->tm_info.pg_dwrr[i] = i ? 0 : 100;
510 
511 		hdev->tm_info.pg_info[i].pg_id = i;
512 		hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR;
513 
514 		hdev->tm_info.pg_info[i].bw_limit = HCLGE_ETHER_MAX_RATE;
515 
516 		if (i != 0)
517 			continue;
518 
519 		hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
520 		for (k = 0; k < hdev->tm_info.num_tc; k++)
521 			hdev->tm_info.pg_info[i].tc_dwrr[k] = 100;
522 	}
523 }
524 
525 static void hclge_pfc_info_init(struct hclge_dev *hdev)
526 {
527 	if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) {
528 		if (hdev->fc_mode_last_time == HCLGE_FC_PFC)
529 			dev_warn(&hdev->pdev->dev,
530 				 "DCB is disable, but last mode is FC_PFC\n");
531 
532 		hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
533 	} else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
534 		/* fc_mode_last_time record the last fc_mode when
535 		 * DCB is enabled, so that fc_mode can be set to
536 		 * the correct value when DCB is disabled.
537 		 */
538 		hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
539 		hdev->tm_info.fc_mode = HCLGE_FC_PFC;
540 	}
541 }
542 
543 static int hclge_tm_schd_info_init(struct hclge_dev *hdev)
544 {
545 	if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
546 	    (hdev->tm_info.num_pg != 1))
547 		return -EINVAL;
548 
549 	hclge_tm_pg_info_init(hdev);
550 
551 	hclge_tm_tc_info_init(hdev);
552 
553 	hclge_tm_vport_info_update(hdev);
554 
555 	hclge_pfc_info_init(hdev);
556 
557 	return 0;
558 }
559 
560 static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
561 {
562 	int ret;
563 	u32 i;
564 
565 	if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
566 		return 0;
567 
568 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
569 		/* Cfg mapping */
570 		ret = hclge_tm_pg_to_pri_map_cfg(
571 			hdev, i, hdev->tm_info.pg_info[i].tc_bit_map);
572 		if (ret)
573 			return ret;
574 	}
575 
576 	return 0;
577 }
578 
579 static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
580 {
581 	u8 ir_u, ir_b, ir_s;
582 	int ret;
583 	u32 i;
584 
585 	/* Cfg pg schd */
586 	if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
587 		return 0;
588 
589 	/* Pg to pri */
590 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
591 		/* Calc shaper para */
592 		ret = hclge_shaper_para_calc(
593 					hdev->tm_info.pg_info[i].bw_limit,
594 					HCLGE_SHAPER_LVL_PG,
595 					&ir_b, &ir_u, &ir_s);
596 		if (ret)
597 			return ret;
598 
599 		ret = hclge_tm_pg_shapping_cfg(hdev,
600 					       HCLGE_TM_SHAP_C_BUCKET, i,
601 					       0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
602 					       HCLGE_SHAPER_BS_S_DEF);
603 		if (ret)
604 			return ret;
605 
606 		ret = hclge_tm_pg_shapping_cfg(hdev,
607 					       HCLGE_TM_SHAP_P_BUCKET, i,
608 					       ir_b, ir_u, ir_s,
609 					       HCLGE_SHAPER_BS_U_DEF,
610 					       HCLGE_SHAPER_BS_S_DEF);
611 		if (ret)
612 			return ret;
613 	}
614 
615 	return 0;
616 }
617 
618 static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev)
619 {
620 	int ret;
621 	u32 i;
622 
623 	/* cfg pg schd */
624 	if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
625 		return 0;
626 
627 	/* pg to prio */
628 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
629 		/* Cfg dwrr */
630 		ret = hclge_tm_pg_weight_cfg(hdev, i,
631 					     hdev->tm_info.pg_dwrr[i]);
632 		if (ret)
633 			return ret;
634 	}
635 
636 	return 0;
637 }
638 
639 static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev,
640 				   struct hclge_vport *vport)
641 {
642 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
643 	struct hnae3_queue **tqp = kinfo->tqp;
644 	struct hnae3_tc_info *v_tc_info;
645 	u32 i, j;
646 	int ret;
647 
648 	for (i = 0; i < kinfo->num_tc; i++) {
649 		v_tc_info = &kinfo->tc_info[i];
650 		for (j = 0; j < v_tc_info->tqp_count; j++) {
651 			struct hnae3_queue *q = tqp[v_tc_info->tqp_offset + j];
652 
653 			ret = hclge_tm_q_to_qs_map_cfg(hdev,
654 						       hclge_get_queue_id(q),
655 						       vport->qs_offset + i);
656 			if (ret)
657 				return ret;
658 		}
659 	}
660 
661 	return 0;
662 }
663 
664 static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
665 {
666 	struct hclge_vport *vport = hdev->vport;
667 	int ret;
668 	u32 i, k;
669 
670 	if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
671 		/* Cfg qs -> pri mapping, one by one mapping */
672 		for (k = 0; k < hdev->num_alloc_vport; k++)
673 			for (i = 0; i < hdev->tm_info.num_tc; i++) {
674 				ret = hclge_tm_qs_to_pri_map_cfg(
675 					hdev, vport[k].qs_offset + i, i);
676 				if (ret)
677 					return ret;
678 			}
679 	} else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) {
680 		/* Cfg qs -> pri mapping,  qs = tc, pri = vf, 8 qs -> 1 pri */
681 		for (k = 0; k < hdev->num_alloc_vport; k++)
682 			for (i = 0; i < HNAE3_MAX_TC; i++) {
683 				ret = hclge_tm_qs_to_pri_map_cfg(
684 					hdev, vport[k].qs_offset + i, k);
685 				if (ret)
686 					return ret;
687 			}
688 	} else {
689 		return -EINVAL;
690 	}
691 
692 	/* Cfg q -> qs mapping */
693 	for (i = 0; i < hdev->num_alloc_vport; i++) {
694 		ret = hclge_vport_q_to_qs_map(hdev, vport);
695 		if (ret)
696 			return ret;
697 
698 		vport++;
699 	}
700 
701 	return 0;
702 }
703 
704 static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
705 {
706 	u8 ir_u, ir_b, ir_s;
707 	int ret;
708 	u32 i;
709 
710 	for (i = 0; i < hdev->tm_info.num_tc; i++) {
711 		ret = hclge_shaper_para_calc(
712 					hdev->tm_info.tc_info[i].bw_limit,
713 					HCLGE_SHAPER_LVL_PRI,
714 					&ir_b, &ir_u, &ir_s);
715 		if (ret)
716 			return ret;
717 
718 		ret = hclge_tm_pri_shapping_cfg(
719 			hdev, HCLGE_TM_SHAP_C_BUCKET, i,
720 			0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
721 			HCLGE_SHAPER_BS_S_DEF);
722 		if (ret)
723 			return ret;
724 
725 		ret = hclge_tm_pri_shapping_cfg(
726 			hdev, HCLGE_TM_SHAP_P_BUCKET, i,
727 			ir_b, ir_u, ir_s, HCLGE_SHAPER_BS_U_DEF,
728 			HCLGE_SHAPER_BS_S_DEF);
729 		if (ret)
730 			return ret;
731 	}
732 
733 	return 0;
734 }
735 
736 static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
737 {
738 	struct hclge_dev *hdev = vport->back;
739 	u8 ir_u, ir_b, ir_s;
740 	int ret;
741 
742 	ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF,
743 				     &ir_b, &ir_u, &ir_s);
744 	if (ret)
745 		return ret;
746 
747 	ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET,
748 					vport->vport_id,
749 					0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
750 					HCLGE_SHAPER_BS_S_DEF);
751 	if (ret)
752 		return ret;
753 
754 	ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
755 					vport->vport_id,
756 					ir_b, ir_u, ir_s,
757 					HCLGE_SHAPER_BS_U_DEF,
758 					HCLGE_SHAPER_BS_S_DEF);
759 	if (ret)
760 		return ret;
761 
762 	return 0;
763 }
764 
765 static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport)
766 {
767 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
768 	struct hclge_dev *hdev = vport->back;
769 	struct hnae3_tc_info *v_tc_info;
770 	u8 ir_u, ir_b, ir_s;
771 	u32 i;
772 	int ret;
773 
774 	for (i = 0; i < kinfo->num_tc; i++) {
775 		v_tc_info = &kinfo->tc_info[i];
776 		ret = hclge_shaper_para_calc(
777 					hdev->tm_info.tc_info[i].bw_limit,
778 					HCLGE_SHAPER_LVL_QSET,
779 					&ir_b, &ir_u, &ir_s);
780 		if (ret)
781 			return ret;
782 	}
783 
784 	return 0;
785 }
786 
787 static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev)
788 {
789 	struct hclge_vport *vport = hdev->vport;
790 	int ret;
791 	u32 i;
792 
793 	/* Need config vport shaper */
794 	for (i = 0; i < hdev->num_alloc_vport; i++) {
795 		ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport);
796 		if (ret)
797 			return ret;
798 
799 		ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport);
800 		if (ret)
801 			return ret;
802 
803 		vport++;
804 	}
805 
806 	return 0;
807 }
808 
809 static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev)
810 {
811 	int ret;
812 
813 	if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
814 		ret = hclge_tm_pri_tc_base_shaper_cfg(hdev);
815 		if (ret)
816 			return ret;
817 	} else {
818 		ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev);
819 		if (ret)
820 			return ret;
821 	}
822 
823 	return 0;
824 }
825 
826 static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
827 {
828 	struct hclge_vport *vport = hdev->vport;
829 	struct hclge_pg_info *pg_info;
830 	u8 dwrr;
831 	int ret;
832 	u32 i, k;
833 
834 	for (i = 0; i < hdev->tm_info.num_tc; i++) {
835 		pg_info =
836 			&hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
837 		dwrr = pg_info->tc_dwrr[i];
838 
839 		ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr);
840 		if (ret)
841 			return ret;
842 
843 		for (k = 0; k < hdev->num_alloc_vport; k++) {
844 			ret = hclge_tm_qs_weight_cfg(
845 				hdev, vport[k].qs_offset + i,
846 				vport[k].dwrr);
847 			if (ret)
848 				return ret;
849 		}
850 	}
851 
852 	return 0;
853 }
854 
855 static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport)
856 {
857 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
858 	struct hclge_dev *hdev = vport->back;
859 	int ret;
860 	u8 i;
861 
862 	/* Vf dwrr */
863 	ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr);
864 	if (ret)
865 		return ret;
866 
867 	/* Qset dwrr */
868 	for (i = 0; i < kinfo->num_tc; i++) {
869 		ret = hclge_tm_qs_weight_cfg(
870 			hdev, vport->qs_offset + i,
871 			hdev->tm_info.pg_info[0].tc_dwrr[i]);
872 		if (ret)
873 			return ret;
874 	}
875 
876 	return 0;
877 }
878 
879 static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev)
880 {
881 	struct hclge_vport *vport = hdev->vport;
882 	int ret;
883 	u32 i;
884 
885 	for (i = 0; i < hdev->num_alloc_vport; i++) {
886 		ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport);
887 		if (ret)
888 			return ret;
889 
890 		vport++;
891 	}
892 
893 	return 0;
894 }
895 
896 static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
897 {
898 	int ret;
899 
900 	if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
901 		ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev);
902 		if (ret)
903 			return ret;
904 	} else {
905 		ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev);
906 		if (ret)
907 			return ret;
908 	}
909 
910 	return 0;
911 }
912 
913 int hclge_tm_map_cfg(struct hclge_dev *hdev)
914 {
915 	int ret;
916 
917 	ret = hclge_up_to_tc_map(hdev);
918 	if (ret)
919 		return ret;
920 
921 	ret = hclge_tm_pg_to_pri_map(hdev);
922 	if (ret)
923 		return ret;
924 
925 	return hclge_tm_pri_q_qs_cfg(hdev);
926 }
927 
928 static int hclge_tm_shaper_cfg(struct hclge_dev *hdev)
929 {
930 	int ret;
931 
932 	ret = hclge_tm_port_shaper_cfg(hdev);
933 	if (ret)
934 		return ret;
935 
936 	ret = hclge_tm_pg_shaper_cfg(hdev);
937 	if (ret)
938 		return ret;
939 
940 	return hclge_tm_pri_shaper_cfg(hdev);
941 }
942 
943 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev)
944 {
945 	int ret;
946 
947 	ret = hclge_tm_pg_dwrr_cfg(hdev);
948 	if (ret)
949 		return ret;
950 
951 	return hclge_tm_pri_dwrr_cfg(hdev);
952 }
953 
954 static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev)
955 {
956 	int ret;
957 	u8 i;
958 
959 	/* Only being config on TC-Based scheduler mode */
960 	if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
961 		return 0;
962 
963 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
964 		ret = hclge_tm_pg_schd_mode_cfg(hdev, i);
965 		if (ret)
966 			return ret;
967 	}
968 
969 	return 0;
970 }
971 
972 static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
973 {
974 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
975 	struct hclge_dev *hdev = vport->back;
976 	int ret;
977 	u8 i;
978 
979 	ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id);
980 	if (ret)
981 		return ret;
982 
983 	for (i = 0; i < kinfo->num_tc; i++) {
984 		u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode;
985 
986 		ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i,
987 						sch_mode);
988 		if (ret)
989 			return ret;
990 	}
991 
992 	return 0;
993 }
994 
995 static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
996 {
997 	struct hclge_vport *vport = hdev->vport;
998 	int ret;
999 	u8 i, k;
1000 
1001 	if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1002 		for (i = 0; i < hdev->tm_info.num_tc; i++) {
1003 			ret = hclge_tm_pri_schd_mode_cfg(hdev, i);
1004 			if (ret)
1005 				return ret;
1006 
1007 			for (k = 0; k < hdev->num_alloc_vport; k++) {
1008 				ret = hclge_tm_qs_schd_mode_cfg(
1009 					hdev, vport[k].qs_offset + i,
1010 					HCLGE_SCH_MODE_DWRR);
1011 				if (ret)
1012 					return ret;
1013 			}
1014 		}
1015 	} else {
1016 		for (i = 0; i < hdev->num_alloc_vport; i++) {
1017 			ret = hclge_tm_schd_mode_vnet_base_cfg(vport);
1018 			if (ret)
1019 				return ret;
1020 
1021 			vport++;
1022 		}
1023 	}
1024 
1025 	return 0;
1026 }
1027 
1028 int hclge_tm_schd_mode_hw(struct hclge_dev *hdev)
1029 {
1030 	int ret;
1031 
1032 	ret = hclge_tm_lvl2_schd_mode_cfg(hdev);
1033 	if (ret)
1034 		return ret;
1035 
1036 	return hclge_tm_lvl34_schd_mode_cfg(hdev);
1037 }
1038 
1039 static int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
1040 {
1041 	int ret;
1042 
1043 	/* Cfg tm mapping  */
1044 	ret = hclge_tm_map_cfg(hdev);
1045 	if (ret)
1046 		return ret;
1047 
1048 	/* Cfg tm shaper */
1049 	ret = hclge_tm_shaper_cfg(hdev);
1050 	if (ret)
1051 		return ret;
1052 
1053 	/* Cfg dwrr */
1054 	ret = hclge_tm_dwrr_cfg(hdev);
1055 	if (ret)
1056 		return ret;
1057 
1058 	/* Cfg schd mode for each level schd */
1059 	return hclge_tm_schd_mode_hw(hdev);
1060 }
1061 
1062 static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
1063 {
1064 	u8 enable_bitmap = 0;
1065 
1066 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
1067 		enable_bitmap = HCLGE_TX_MAC_PAUSE_EN_MSK |
1068 				HCLGE_RX_MAC_PAUSE_EN_MSK;
1069 
1070 	return hclge_pfc_pause_en_cfg(hdev, enable_bitmap,
1071 				      hdev->tm_info.hw_pfc_map);
1072 }
1073 
1074 static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
1075 {
1076 	bool tx_en, rx_en;
1077 
1078 	switch (hdev->tm_info.fc_mode) {
1079 	case HCLGE_FC_NONE:
1080 		tx_en = false;
1081 		rx_en = false;
1082 		break;
1083 	case HCLGE_FC_RX_PAUSE:
1084 		tx_en = false;
1085 		rx_en = true;
1086 		break;
1087 	case HCLGE_FC_TX_PAUSE:
1088 		tx_en = true;
1089 		rx_en = false;
1090 		break;
1091 	case HCLGE_FC_FULL:
1092 		tx_en = true;
1093 		rx_en = true;
1094 		break;
1095 	default:
1096 		tx_en = true;
1097 		rx_en = true;
1098 	}
1099 
1100 	return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
1101 }
1102 
1103 int hclge_pause_setup_hw(struct hclge_dev *hdev)
1104 {
1105 	int ret;
1106 	u8 i;
1107 
1108 	if (hdev->tm_info.fc_mode != HCLGE_FC_PFC)
1109 		return hclge_mac_pause_setup_hw(hdev);
1110 
1111 	/* Only DCB-supported dev supports qset back pressure and pfc cmd */
1112 	if (!hnae3_dev_dcb_supported(hdev))
1113 		return 0;
1114 
1115 	/* When MAC is GE Mode, hdev does not support pfc setting */
1116 	ret = hclge_pfc_setup_hw(hdev);
1117 	if (ret)
1118 		dev_warn(&hdev->pdev->dev, "set pfc pause failed:%d\n", ret);
1119 
1120 	for (i = 0; i < hdev->tm_info.num_tc; i++) {
1121 		ret = hclge_tm_qs_bp_cfg(hdev, i);
1122 		if (ret)
1123 			return ret;
1124 	}
1125 
1126 	return 0;
1127 }
1128 
1129 int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
1130 {
1131 	struct hclge_vport *vport = hdev->vport;
1132 	struct hnae3_knic_private_info *kinfo;
1133 	u32 i, k;
1134 
1135 	for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
1136 		if (prio_tc[i] >= hdev->tm_info.num_tc)
1137 			return -EINVAL;
1138 		hdev->tm_info.prio_tc[i] = prio_tc[i];
1139 
1140 		for (k = 0;  k < hdev->num_alloc_vport; k++) {
1141 			kinfo = &vport[k].nic.kinfo;
1142 			kinfo->prio_tc[i] = prio_tc[i];
1143 		}
1144 	}
1145 	return 0;
1146 }
1147 
1148 void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
1149 {
1150 	u8 i, bit_map = 0;
1151 
1152 	hdev->tm_info.num_tc = num_tc;
1153 
1154 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1155 		bit_map |= BIT(i);
1156 
1157 	if (!bit_map) {
1158 		bit_map = 1;
1159 		hdev->tm_info.num_tc = 1;
1160 	}
1161 
1162 	hdev->hw_tc_map = bit_map;
1163 
1164 	hclge_tm_schd_info_init(hdev);
1165 }
1166 
1167 int hclge_tm_init_hw(struct hclge_dev *hdev)
1168 {
1169 	int ret;
1170 
1171 	if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
1172 	    (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE))
1173 		return -ENOTSUPP;
1174 
1175 	ret = hclge_tm_schd_setup_hw(hdev);
1176 	if (ret)
1177 		return ret;
1178 
1179 	ret = hclge_pause_setup_hw(hdev);
1180 	if (ret)
1181 		return ret;
1182 
1183 	return 0;
1184 }
1185 
1186 int hclge_tm_schd_init(struct hclge_dev *hdev)
1187 {
1188 	int ret;
1189 
1190 	/* fc_mode is HCLGE_FC_FULL on reset */
1191 	hdev->tm_info.fc_mode = HCLGE_FC_FULL;
1192 	hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
1193 
1194 	ret = hclge_tm_schd_info_init(hdev);
1195 	if (ret)
1196 		return ret;
1197 
1198 	return hclge_tm_init_hw(hdev);
1199 }
1200