xref: /linux/drivers/net/ethernet/marvell/octeontx2/nic/qos.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Ethernet driver
3  *
4  * Copyright (C) 2023 Marvell.
5  *
6  */
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/inetdevice.h>
10 #include <linux/bitfield.h>
11 
12 #include "otx2_common.h"
13 #include "cn10k.h"
14 #include "qos.h"
15 
16 #define OTX2_QOS_QID_INNER		0xFFFFU
17 #define OTX2_QOS_QID_NONE		0xFFFEU
18 #define OTX2_QOS_ROOT_CLASSID		0xFFFFFFFF
19 #define OTX2_QOS_CLASS_NONE		0
20 #define OTX2_QOS_DEFAULT_PRIO		0xF
21 #define OTX2_QOS_INVALID_SQ		0xFFFF
22 #define OTX2_QOS_INVALID_TXSCHQ_IDX	0xFFFF
23 #define CN10K_MAX_RR_WEIGHT		GENMASK_ULL(13, 0)
24 #define OTX2_MAX_RR_QUANTUM		GENMASK_ULL(23, 0)
25 
26 static void otx2_qos_update_tx_netdev_queues(struct otx2_nic *pfvf)
27 {
28 	struct otx2_hw *hw = &pfvf->hw;
29 	int tx_queues, qos_txqs, err;
30 
31 	qos_txqs = bitmap_weight(pfvf->qos.qos_sq_bmap,
32 				 OTX2_QOS_MAX_LEAF_NODES);
33 
34 	tx_queues = hw->tx_queues + qos_txqs;
35 
36 	err = netif_set_real_num_tx_queues(pfvf->netdev, tx_queues);
37 	if (err) {
38 		netdev_err(pfvf->netdev,
39 			   "Failed to set no of Tx queues: %d\n", tx_queues);
40 		return;
41 	}
42 }
43 
44 static void otx2_qos_get_regaddr(struct otx2_qos_node *node,
45 				 struct nix_txschq_config *cfg,
46 				 int index)
47 {
48 	if (node->level == NIX_TXSCH_LVL_SMQ) {
49 		cfg->reg[index++] = NIX_AF_MDQX_PARENT(node->schq);
50 		cfg->reg[index++] = NIX_AF_MDQX_SCHEDULE(node->schq);
51 		cfg->reg[index++] = NIX_AF_MDQX_PIR(node->schq);
52 		cfg->reg[index]   = NIX_AF_MDQX_CIR(node->schq);
53 	} else if (node->level == NIX_TXSCH_LVL_TL4) {
54 		cfg->reg[index++] = NIX_AF_TL4X_PARENT(node->schq);
55 		cfg->reg[index++] = NIX_AF_TL4X_SCHEDULE(node->schq);
56 		cfg->reg[index++] = NIX_AF_TL4X_PIR(node->schq);
57 		cfg->reg[index]   = NIX_AF_TL4X_CIR(node->schq);
58 	} else if (node->level == NIX_TXSCH_LVL_TL3) {
59 		cfg->reg[index++] = NIX_AF_TL3X_PARENT(node->schq);
60 		cfg->reg[index++] = NIX_AF_TL3X_SCHEDULE(node->schq);
61 		cfg->reg[index++] = NIX_AF_TL3X_PIR(node->schq);
62 		cfg->reg[index]   = NIX_AF_TL3X_CIR(node->schq);
63 	} else if (node->level == NIX_TXSCH_LVL_TL2) {
64 		cfg->reg[index++] = NIX_AF_TL2X_PARENT(node->schq);
65 		cfg->reg[index++] = NIX_AF_TL2X_SCHEDULE(node->schq);
66 		cfg->reg[index++] = NIX_AF_TL2X_PIR(node->schq);
67 		cfg->reg[index]   = NIX_AF_TL2X_CIR(node->schq);
68 	}
69 }
70 
71 static int otx2_qos_quantum_to_dwrr_weight(struct otx2_nic *pfvf, u32 quantum)
72 {
73 	u32 weight;
74 
75 	weight = quantum / pfvf->hw.dwrr_mtu;
76 	if (quantum % pfvf->hw.dwrr_mtu)
77 		weight += 1;
78 
79 	return weight;
80 }
81 
82 static void otx2_config_sched_shaping(struct otx2_nic *pfvf,
83 				      struct otx2_qos_node *node,
84 				      struct nix_txschq_config *cfg,
85 				      int *num_regs)
86 {
87 	u32 rr_weight;
88 	u32 quantum;
89 	u64 maxrate;
90 
91 	otx2_qos_get_regaddr(node, cfg, *num_regs);
92 
93 	/* configure parent txschq */
94 	cfg->regval[*num_regs] = node->parent->schq << 16;
95 	(*num_regs)++;
96 
97 	/* configure prio/quantum */
98 	if (node->qid == OTX2_QOS_QID_NONE) {
99 		cfg->regval[*num_regs] =  node->prio << 24 |
100 					  mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
101 		(*num_regs)++;
102 		return;
103 	}
104 
105 	/* configure priority/quantum  */
106 	if (node->is_static) {
107 		cfg->regval[*num_regs] =
108 			(node->schq - node->parent->prio_anchor) << 24;
109 	} else {
110 		quantum = node->quantum ?
111 			  node->quantum : pfvf->tx_max_pktlen;
112 		rr_weight = otx2_qos_quantum_to_dwrr_weight(pfvf, quantum);
113 		cfg->regval[*num_regs] = node->parent->child_dwrr_prio << 24 |
114 					 rr_weight;
115 	}
116 	(*num_regs)++;
117 
118 	/* configure PIR */
119 	maxrate = (node->rate > node->ceil) ? node->rate : node->ceil;
120 
121 	cfg->regval[*num_regs] =
122 		otx2_get_txschq_rate_regval(pfvf, maxrate, 65536);
123 	(*num_regs)++;
124 
125 	/* Don't configure CIR when both CIR+PIR not supported
126 	 * On 96xx, CIR + PIR + RED_ALGO=STALL causes deadlock
127 	 */
128 	if (!test_bit(QOS_CIR_PIR_SUPPORT, &pfvf->hw.cap_flag))
129 		return;
130 
131 	cfg->regval[*num_regs] =
132 		otx2_get_txschq_rate_regval(pfvf, node->rate, 65536);
133 	(*num_regs)++;
134 }
135 
136 static void __otx2_qos_txschq_cfg(struct otx2_nic *pfvf,
137 				  struct otx2_qos_node *node,
138 				  struct nix_txschq_config *cfg)
139 {
140 	struct otx2_hw *hw = &pfvf->hw;
141 	int num_regs = 0;
142 	u8 level;
143 
144 	level = node->level;
145 
146 	/* program txschq registers */
147 	if (level == NIX_TXSCH_LVL_SMQ) {
148 		cfg->reg[num_regs] = NIX_AF_SMQX_CFG(node->schq);
149 		cfg->regval[num_regs] = ((u64)pfvf->tx_max_pktlen << 8) |
150 					OTX2_MIN_MTU;
151 		cfg->regval[num_regs] |= (0x20ULL << 51) | (0x80ULL << 39) |
152 					 (0x2ULL << 36);
153 		num_regs++;
154 
155 		otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
156 	} else if (level == NIX_TXSCH_LVL_TL4) {
157 		otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
158 	} else if (level == NIX_TXSCH_LVL_TL3) {
159 		/* configure link cfg */
160 		if (level == pfvf->qos.link_cfg_lvl) {
161 			cfg->reg[num_regs] = NIX_AF_TL3_TL2X_LINKX_CFG(node->schq, hw->tx_link);
162 			cfg->regval[num_regs] = BIT_ULL(13) | BIT_ULL(12);
163 			num_regs++;
164 		}
165 
166 		otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
167 	} else if (level == NIX_TXSCH_LVL_TL2) {
168 		/* configure link cfg */
169 		if (level == pfvf->qos.link_cfg_lvl) {
170 			cfg->reg[num_regs] = NIX_AF_TL3_TL2X_LINKX_CFG(node->schq, hw->tx_link);
171 			cfg->regval[num_regs] = BIT_ULL(13) | BIT_ULL(12);
172 			num_regs++;
173 		}
174 
175 		/* check if node is root */
176 		if (node->qid == OTX2_QOS_QID_INNER && !node->parent) {
177 			cfg->reg[num_regs] = NIX_AF_TL2X_SCHEDULE(node->schq);
178 			cfg->regval[num_regs] =  (u64)hw->txschq_aggr_lvl_rr_prio << 24 |
179 						 mtu_to_dwrr_weight(pfvf,
180 								    pfvf->tx_max_pktlen);
181 			num_regs++;
182 			goto txschq_cfg_out;
183 		}
184 
185 		otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
186 	}
187 
188 txschq_cfg_out:
189 	cfg->num_regs = num_regs;
190 }
191 
192 static int otx2_qos_txschq_set_parent_topology(struct otx2_nic *pfvf,
193 					       struct otx2_qos_node *parent)
194 {
195 	struct mbox *mbox = &pfvf->mbox;
196 	struct nix_txschq_config *cfg;
197 	int rc;
198 
199 	if (parent->level == NIX_TXSCH_LVL_MDQ)
200 		return 0;
201 
202 	mutex_lock(&mbox->lock);
203 
204 	cfg = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
205 	if (!cfg) {
206 		mutex_unlock(&mbox->lock);
207 		return -ENOMEM;
208 	}
209 
210 	cfg->lvl = parent->level;
211 
212 	if (parent->level == NIX_TXSCH_LVL_TL4)
213 		cfg->reg[0] = NIX_AF_TL4X_TOPOLOGY(parent->schq);
214 	else if (parent->level == NIX_TXSCH_LVL_TL3)
215 		cfg->reg[0] = NIX_AF_TL3X_TOPOLOGY(parent->schq);
216 	else if (parent->level == NIX_TXSCH_LVL_TL2)
217 		cfg->reg[0] = NIX_AF_TL2X_TOPOLOGY(parent->schq);
218 	else if (parent->level == NIX_TXSCH_LVL_TL1)
219 		cfg->reg[0] = NIX_AF_TL1X_TOPOLOGY(parent->schq);
220 
221 	cfg->regval[0] = (u64)parent->prio_anchor << 32;
222 	cfg->regval[0] |= ((parent->child_dwrr_prio != OTX2_QOS_DEFAULT_PRIO) ?
223 			    parent->child_dwrr_prio : 0)  << 1;
224 	cfg->num_regs++;
225 
226 	rc = otx2_sync_mbox_msg(&pfvf->mbox);
227 
228 	mutex_unlock(&mbox->lock);
229 
230 	return rc;
231 }
232 
233 static void otx2_qos_free_hw_node_schq(struct otx2_nic *pfvf,
234 				       struct otx2_qos_node *parent)
235 {
236 	struct otx2_qos_node *node;
237 
238 	list_for_each_entry_reverse(node, &parent->child_schq_list, list)
239 		otx2_txschq_free_one(pfvf, node->level, node->schq);
240 }
241 
242 static void otx2_qos_free_hw_node(struct otx2_nic *pfvf,
243 				  struct otx2_qos_node *parent)
244 {
245 	struct otx2_qos_node *node, *tmp;
246 
247 	list_for_each_entry_safe(node, tmp, &parent->child_list, list) {
248 		otx2_qos_free_hw_node(pfvf, node);
249 		otx2_qos_free_hw_node_schq(pfvf, node);
250 		otx2_txschq_free_one(pfvf, node->level, node->schq);
251 	}
252 }
253 
254 static void otx2_qos_free_hw_cfg(struct otx2_nic *pfvf,
255 				 struct otx2_qos_node *node)
256 {
257 	mutex_lock(&pfvf->qos.qos_lock);
258 
259 	/* free child node hw mappings */
260 	otx2_qos_free_hw_node(pfvf, node);
261 	otx2_qos_free_hw_node_schq(pfvf, node);
262 
263 	/* free node hw mappings */
264 	otx2_txschq_free_one(pfvf, node->level, node->schq);
265 
266 	mutex_unlock(&pfvf->qos.qos_lock);
267 }
268 
269 static void otx2_qos_sw_node_delete(struct otx2_nic *pfvf,
270 				    struct otx2_qos_node *node)
271 {
272 	hash_del_rcu(&node->hlist);
273 
274 	if (node->qid != OTX2_QOS_QID_INNER && node->qid != OTX2_QOS_QID_NONE) {
275 		__clear_bit(node->qid, pfvf->qos.qos_sq_bmap);
276 		otx2_qos_update_tx_netdev_queues(pfvf);
277 	}
278 
279 	list_del(&node->list);
280 	kfree(node);
281 }
282 
283 static void otx2_qos_free_sw_node_schq(struct otx2_nic *pfvf,
284 				       struct otx2_qos_node *parent)
285 {
286 	struct otx2_qos_node *node, *tmp;
287 
288 	list_for_each_entry_safe(node, tmp, &parent->child_schq_list, list) {
289 		list_del(&node->list);
290 		kfree(node);
291 	}
292 }
293 
294 static void __otx2_qos_free_sw_node(struct otx2_nic *pfvf,
295 				    struct otx2_qos_node *parent)
296 {
297 	struct otx2_qos_node *node, *tmp;
298 
299 	list_for_each_entry_safe(node, tmp, &parent->child_list, list) {
300 		__otx2_qos_free_sw_node(pfvf, node);
301 		otx2_qos_free_sw_node_schq(pfvf, node);
302 		otx2_qos_sw_node_delete(pfvf, node);
303 	}
304 }
305 
306 static void otx2_qos_free_sw_node(struct otx2_nic *pfvf,
307 				  struct otx2_qos_node *node)
308 {
309 	mutex_lock(&pfvf->qos.qos_lock);
310 
311 	__otx2_qos_free_sw_node(pfvf, node);
312 	otx2_qos_free_sw_node_schq(pfvf, node);
313 	otx2_qos_sw_node_delete(pfvf, node);
314 
315 	mutex_unlock(&pfvf->qos.qos_lock);
316 }
317 
318 static void otx2_qos_destroy_node(struct otx2_nic *pfvf,
319 				  struct otx2_qos_node *node)
320 {
321 	otx2_qos_free_hw_cfg(pfvf, node);
322 	otx2_qos_free_sw_node(pfvf, node);
323 }
324 
325 static void otx2_qos_fill_cfg_schq(struct otx2_qos_node *parent,
326 				   struct otx2_qos_cfg *cfg)
327 {
328 	struct otx2_qos_node *node;
329 
330 	list_for_each_entry(node, &parent->child_schq_list, list)
331 		cfg->schq[node->level]++;
332 }
333 
334 static void otx2_qos_fill_cfg_tl(struct otx2_qos_node *parent,
335 				 struct otx2_qos_cfg *cfg)
336 {
337 	struct otx2_qos_node *node;
338 
339 	list_for_each_entry(node, &parent->child_list, list) {
340 		otx2_qos_fill_cfg_tl(node, cfg);
341 		otx2_qos_fill_cfg_schq(node, cfg);
342 	}
343 
344 	/* Assign the required number of transmit schedular queues under the
345 	 * given class
346 	 */
347 	cfg->schq_contig[parent->level - 1] += parent->child_dwrr_cnt +
348 					       parent->max_static_prio + 1;
349 }
350 
351 static void otx2_qos_prepare_txschq_cfg(struct otx2_nic *pfvf,
352 					struct otx2_qos_node *parent,
353 					struct otx2_qos_cfg *cfg)
354 {
355 	mutex_lock(&pfvf->qos.qos_lock);
356 	otx2_qos_fill_cfg_tl(parent, cfg);
357 	mutex_unlock(&pfvf->qos.qos_lock);
358 }
359 
360 static void otx2_qos_read_txschq_cfg_schq(struct otx2_qos_node *parent,
361 					  struct otx2_qos_cfg *cfg)
362 {
363 	struct otx2_qos_node *node;
364 	int cnt;
365 
366 	list_for_each_entry(node, &parent->child_schq_list, list) {
367 		cnt = cfg->dwrr_node_pos[node->level];
368 		cfg->schq_list[node->level][cnt] = node->schq;
369 		cfg->schq[node->level]++;
370 		cfg->dwrr_node_pos[node->level]++;
371 	}
372 }
373 
374 static void otx2_qos_read_txschq_cfg_tl(struct otx2_qos_node *parent,
375 					struct otx2_qos_cfg *cfg)
376 {
377 	struct otx2_qos_node *node;
378 	int cnt;
379 
380 	list_for_each_entry(node, &parent->child_list, list) {
381 		otx2_qos_read_txschq_cfg_tl(node, cfg);
382 		cnt = cfg->static_node_pos[node->level];
383 		cfg->schq_contig_list[node->level][cnt] = node->schq;
384 		cfg->schq_index_used[node->level][cnt] = true;
385 		cfg->schq_contig[node->level]++;
386 		cfg->static_node_pos[node->level]++;
387 		otx2_qos_read_txschq_cfg_schq(node, cfg);
388 	}
389 }
390 
391 static void otx2_qos_read_txschq_cfg(struct otx2_nic *pfvf,
392 				     struct otx2_qos_node *node,
393 				     struct otx2_qos_cfg *cfg)
394 {
395 	mutex_lock(&pfvf->qos.qos_lock);
396 	otx2_qos_read_txschq_cfg_tl(node, cfg);
397 	mutex_unlock(&pfvf->qos.qos_lock);
398 }
399 
400 static struct otx2_qos_node *
401 otx2_qos_alloc_root(struct otx2_nic *pfvf)
402 {
403 	struct otx2_qos_node *node;
404 
405 	node = kzalloc(sizeof(*node), GFP_KERNEL);
406 	if (!node)
407 		return ERR_PTR(-ENOMEM);
408 
409 	node->parent = NULL;
410 	if (!is_otx2_vf(pfvf->pcifunc)) {
411 		node->level = NIX_TXSCH_LVL_TL1;
412 	} else {
413 		node->level = NIX_TXSCH_LVL_TL2;
414 		node->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO;
415 	}
416 
417 	WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER);
418 	node->classid = OTX2_QOS_ROOT_CLASSID;
419 
420 	hash_add_rcu(pfvf->qos.qos_hlist, &node->hlist, node->classid);
421 	list_add_tail(&node->list, &pfvf->qos.qos_tree);
422 	INIT_LIST_HEAD(&node->child_list);
423 	INIT_LIST_HEAD(&node->child_schq_list);
424 
425 	return node;
426 }
427 
428 static int otx2_qos_add_child_node(struct otx2_qos_node *parent,
429 				   struct otx2_qos_node *node)
430 {
431 	struct list_head *head = &parent->child_list;
432 	struct otx2_qos_node *tmp_node;
433 	struct list_head *tmp;
434 
435 	if (node->prio > parent->max_static_prio)
436 		parent->max_static_prio = node->prio;
437 
438 	for (tmp = head->next; tmp != head; tmp = tmp->next) {
439 		tmp_node = list_entry(tmp, struct otx2_qos_node, list);
440 		if (tmp_node->prio == node->prio &&
441 		    tmp_node->is_static)
442 			return -EEXIST;
443 		if (tmp_node->prio > node->prio) {
444 			list_add_tail(&node->list, tmp);
445 			return 0;
446 		}
447 	}
448 
449 	list_add_tail(&node->list, head);
450 	return 0;
451 }
452 
453 static int otx2_qos_alloc_txschq_node(struct otx2_nic *pfvf,
454 				      struct otx2_qos_node *node)
455 {
456 	struct otx2_qos_node *txschq_node, *parent, *tmp;
457 	int lvl;
458 
459 	parent = node;
460 	for (lvl = node->level - 1; lvl >= NIX_TXSCH_LVL_MDQ; lvl--) {
461 		txschq_node = kzalloc(sizeof(*txschq_node), GFP_KERNEL);
462 		if (!txschq_node)
463 			goto err_out;
464 
465 		txschq_node->parent = parent;
466 		txschq_node->level = lvl;
467 		txschq_node->classid = OTX2_QOS_CLASS_NONE;
468 		WRITE_ONCE(txschq_node->qid, OTX2_QOS_QID_NONE);
469 		txschq_node->rate = 0;
470 		txschq_node->ceil = 0;
471 		txschq_node->prio = 0;
472 		txschq_node->quantum = 0;
473 		txschq_node->is_static = true;
474 		txschq_node->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO;
475 		txschq_node->txschq_idx = OTX2_QOS_INVALID_TXSCHQ_IDX;
476 
477 		mutex_lock(&pfvf->qos.qos_lock);
478 		list_add_tail(&txschq_node->list, &node->child_schq_list);
479 		mutex_unlock(&pfvf->qos.qos_lock);
480 
481 		INIT_LIST_HEAD(&txschq_node->child_list);
482 		INIT_LIST_HEAD(&txschq_node->child_schq_list);
483 		parent = txschq_node;
484 	}
485 
486 	return 0;
487 
488 err_out:
489 	list_for_each_entry_safe(txschq_node, tmp, &node->child_schq_list,
490 				 list) {
491 		list_del(&txschq_node->list);
492 		kfree(txschq_node);
493 	}
494 	return -ENOMEM;
495 }
496 
497 static struct otx2_qos_node *
498 otx2_qos_sw_create_leaf_node(struct otx2_nic *pfvf,
499 			     struct otx2_qos_node *parent,
500 			     u16 classid, u32 prio, u64 rate, u64 ceil,
501 			     u32 quantum, u16 qid, bool static_cfg)
502 {
503 	struct otx2_qos_node *node;
504 	int err;
505 
506 	node = kzalloc(sizeof(*node), GFP_KERNEL);
507 	if (!node)
508 		return ERR_PTR(-ENOMEM);
509 
510 	node->parent = parent;
511 	node->level = parent->level - 1;
512 	node->classid = classid;
513 	WRITE_ONCE(node->qid, qid);
514 
515 	node->rate = otx2_convert_rate(rate);
516 	node->ceil = otx2_convert_rate(ceil);
517 	node->prio = prio;
518 	node->quantum = quantum;
519 	node->is_static = static_cfg;
520 	node->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO;
521 	node->txschq_idx = OTX2_QOS_INVALID_TXSCHQ_IDX;
522 
523 	__set_bit(qid, pfvf->qos.qos_sq_bmap);
524 
525 	hash_add_rcu(pfvf->qos.qos_hlist, &node->hlist, classid);
526 
527 	mutex_lock(&pfvf->qos.qos_lock);
528 	err = otx2_qos_add_child_node(parent, node);
529 	if (err) {
530 		mutex_unlock(&pfvf->qos.qos_lock);
531 		return ERR_PTR(err);
532 	}
533 	mutex_unlock(&pfvf->qos.qos_lock);
534 
535 	INIT_LIST_HEAD(&node->child_list);
536 	INIT_LIST_HEAD(&node->child_schq_list);
537 
538 	err = otx2_qos_alloc_txschq_node(pfvf, node);
539 	if (err) {
540 		otx2_qos_sw_node_delete(pfvf, node);
541 		return ERR_PTR(-ENOMEM);
542 	}
543 
544 	return node;
545 }
546 
547 static struct otx2_qos_node
548 *otx2_sw_node_find_by_qid(struct otx2_nic *pfvf, u16 qid)
549 {
550 	struct otx2_qos_node *node = NULL;
551 	int bkt;
552 
553 	hash_for_each(pfvf->qos.qos_hlist, bkt, node, hlist) {
554 		if (node->qid == qid)
555 			break;
556 	}
557 
558 	return node;
559 }
560 
561 static struct otx2_qos_node *
562 otx2_sw_node_find(struct otx2_nic *pfvf, u32 classid)
563 {
564 	struct otx2_qos_node *node = NULL;
565 
566 	hash_for_each_possible(pfvf->qos.qos_hlist, node, hlist, classid) {
567 		if (node->classid == classid)
568 			break;
569 	}
570 
571 	return node;
572 }
573 
574 static struct otx2_qos_node *
575 otx2_sw_node_find_rcu(struct otx2_nic *pfvf, u32 classid)
576 {
577 	struct otx2_qos_node *node = NULL;
578 
579 	hash_for_each_possible_rcu(pfvf->qos.qos_hlist, node, hlist, classid) {
580 		if (node->classid == classid)
581 			break;
582 	}
583 
584 	return node;
585 }
586 
587 int otx2_get_txq_by_classid(struct otx2_nic *pfvf, u16 classid)
588 {
589 	struct otx2_qos_node *node;
590 	u16 qid;
591 	int res;
592 
593 	node = otx2_sw_node_find_rcu(pfvf, classid);
594 	if (!node) {
595 		res = -ENOENT;
596 		goto out;
597 	}
598 	qid = READ_ONCE(node->qid);
599 	if (qid == OTX2_QOS_QID_INNER) {
600 		res = -EINVAL;
601 		goto out;
602 	}
603 	res = pfvf->hw.tx_queues + qid;
604 out:
605 	return res;
606 }
607 
608 static int
609 otx2_qos_txschq_config(struct otx2_nic *pfvf, struct otx2_qos_node *node)
610 {
611 	struct mbox *mbox = &pfvf->mbox;
612 	struct nix_txschq_config *req;
613 	int rc;
614 
615 	mutex_lock(&mbox->lock);
616 
617 	req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
618 	if (!req) {
619 		mutex_unlock(&mbox->lock);
620 		return -ENOMEM;
621 	}
622 
623 	req->lvl = node->level;
624 	__otx2_qos_txschq_cfg(pfvf, node, req);
625 
626 	rc = otx2_sync_mbox_msg(&pfvf->mbox);
627 
628 	mutex_unlock(&mbox->lock);
629 
630 	return rc;
631 }
632 
633 static int otx2_qos_txschq_alloc(struct otx2_nic *pfvf,
634 				 struct otx2_qos_cfg *cfg)
635 {
636 	struct nix_txsch_alloc_req *req;
637 	struct nix_txsch_alloc_rsp *rsp;
638 	struct mbox *mbox = &pfvf->mbox;
639 	int lvl, rc, schq;
640 
641 	mutex_lock(&mbox->lock);
642 	req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox);
643 	if (!req) {
644 		mutex_unlock(&mbox->lock);
645 		return -ENOMEM;
646 	}
647 
648 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
649 		req->schq[lvl] = cfg->schq[lvl];
650 		req->schq_contig[lvl] = cfg->schq_contig[lvl];
651 	}
652 
653 	rc = otx2_sync_mbox_msg(&pfvf->mbox);
654 	if (rc) {
655 		mutex_unlock(&mbox->lock);
656 		return rc;
657 	}
658 
659 	rsp = (struct nix_txsch_alloc_rsp *)
660 	      otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
661 
662 	if (IS_ERR(rsp)) {
663 		rc = PTR_ERR(rsp);
664 		goto out;
665 	}
666 
667 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
668 		for (schq = 0; schq < rsp->schq_contig[lvl]; schq++) {
669 			cfg->schq_contig_list[lvl][schq] =
670 				rsp->schq_contig_list[lvl][schq];
671 		}
672 	}
673 
674 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
675 		for (schq = 0; schq < rsp->schq[lvl]; schq++) {
676 			cfg->schq_list[lvl][schq] =
677 				rsp->schq_list[lvl][schq];
678 		}
679 	}
680 
681 	pfvf->qos.link_cfg_lvl = rsp->link_cfg_lvl;
682 	pfvf->hw.txschq_aggr_lvl_rr_prio = rsp->aggr_lvl_rr_prio;
683 
684 out:
685 	mutex_unlock(&mbox->lock);
686 	return rc;
687 }
688 
689 static void otx2_qos_free_unused_txschq(struct otx2_nic *pfvf,
690 					struct otx2_qos_cfg *cfg)
691 {
692 	int lvl, idx, schq;
693 
694 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
695 		for (idx = 0; idx < cfg->schq_contig[lvl]; idx++) {
696 			if (!cfg->schq_index_used[lvl][idx]) {
697 				schq = cfg->schq_contig_list[lvl][idx];
698 				otx2_txschq_free_one(pfvf, lvl, schq);
699 			}
700 		}
701 	}
702 }
703 
704 static void otx2_qos_txschq_fill_cfg_schq(struct otx2_nic *pfvf,
705 					  struct otx2_qos_node *node,
706 					  struct otx2_qos_cfg *cfg)
707 {
708 	struct otx2_qos_node *tmp;
709 	int cnt;
710 
711 	list_for_each_entry(tmp, &node->child_schq_list, list) {
712 		cnt = cfg->dwrr_node_pos[tmp->level];
713 		tmp->schq = cfg->schq_list[tmp->level][cnt];
714 		cfg->dwrr_node_pos[tmp->level]++;
715 	}
716 }
717 
718 static void otx2_qos_txschq_fill_cfg_tl(struct otx2_nic *pfvf,
719 					struct otx2_qos_node *node,
720 					struct otx2_qos_cfg *cfg)
721 {
722 	struct otx2_qos_node *tmp;
723 	int cnt;
724 
725 	list_for_each_entry(tmp, &node->child_list, list) {
726 		otx2_qos_txschq_fill_cfg_tl(pfvf, tmp, cfg);
727 		cnt = cfg->static_node_pos[tmp->level];
728 		tmp->schq = cfg->schq_contig_list[tmp->level][tmp->txschq_idx];
729 		cfg->schq_index_used[tmp->level][tmp->txschq_idx] = true;
730 		if (cnt == 0)
731 			node->prio_anchor =
732 				cfg->schq_contig_list[tmp->level][0];
733 		cfg->static_node_pos[tmp->level]++;
734 		otx2_qos_txschq_fill_cfg_schq(pfvf, tmp, cfg);
735 	}
736 }
737 
738 static void otx2_qos_txschq_fill_cfg(struct otx2_nic *pfvf,
739 				     struct otx2_qos_node *node,
740 				     struct otx2_qos_cfg *cfg)
741 {
742 	mutex_lock(&pfvf->qos.qos_lock);
743 	otx2_qos_txschq_fill_cfg_tl(pfvf, node, cfg);
744 	otx2_qos_txschq_fill_cfg_schq(pfvf, node, cfg);
745 	otx2_qos_free_unused_txschq(pfvf, cfg);
746 	mutex_unlock(&pfvf->qos.qos_lock);
747 }
748 
749 static void __otx2_qos_assign_base_idx_tl(struct otx2_nic *pfvf,
750 					  struct otx2_qos_node *tmp,
751 					  unsigned long *child_idx_bmap,
752 					  int child_cnt)
753 {
754 	int idx;
755 
756 	if (tmp->txschq_idx != OTX2_QOS_INVALID_TXSCHQ_IDX)
757 		return;
758 
759 	/* assign static nodes 1:1 prio mapping first, then remaining nodes */
760 	for (idx = 0; idx < child_cnt; idx++) {
761 		if (tmp->is_static && tmp->prio == idx &&
762 		    !test_bit(idx, child_idx_bmap)) {
763 			tmp->txschq_idx = idx;
764 			set_bit(idx, child_idx_bmap);
765 			return;
766 		} else if (!tmp->is_static && idx >= tmp->prio &&
767 			   !test_bit(idx, child_idx_bmap)) {
768 			tmp->txschq_idx = idx;
769 			set_bit(idx, child_idx_bmap);
770 			return;
771 		}
772 	}
773 }
774 
775 static int otx2_qos_assign_base_idx_tl(struct otx2_nic *pfvf,
776 				       struct otx2_qos_node *node)
777 {
778 	unsigned long *child_idx_bmap;
779 	struct otx2_qos_node *tmp;
780 	int child_cnt;
781 
782 	list_for_each_entry(tmp, &node->child_list, list)
783 		tmp->txschq_idx = OTX2_QOS_INVALID_TXSCHQ_IDX;
784 
785 	/* allocate child index array */
786 	child_cnt = node->child_dwrr_cnt + node->max_static_prio + 1;
787 	child_idx_bmap = kcalloc(BITS_TO_LONGS(child_cnt),
788 				 sizeof(unsigned long),
789 				 GFP_KERNEL);
790 	if (!child_idx_bmap)
791 		return -ENOMEM;
792 
793 	list_for_each_entry(tmp, &node->child_list, list)
794 		otx2_qos_assign_base_idx_tl(pfvf, tmp);
795 
796 	/* assign base index of static priority children first */
797 	list_for_each_entry(tmp, &node->child_list, list) {
798 		if (!tmp->is_static)
799 			continue;
800 		__otx2_qos_assign_base_idx_tl(pfvf, tmp, child_idx_bmap,
801 					      child_cnt);
802 	}
803 
804 	/* assign base index of dwrr priority children */
805 	list_for_each_entry(tmp, &node->child_list, list)
806 		__otx2_qos_assign_base_idx_tl(pfvf, tmp, child_idx_bmap,
807 					      child_cnt);
808 
809 	kfree(child_idx_bmap);
810 
811 	return 0;
812 }
813 
814 static int otx2_qos_assign_base_idx(struct otx2_nic *pfvf,
815 				    struct otx2_qos_node *node)
816 {
817 	int ret = 0;
818 
819 	mutex_lock(&pfvf->qos.qos_lock);
820 	ret = otx2_qos_assign_base_idx_tl(pfvf, node);
821 	mutex_unlock(&pfvf->qos.qos_lock);
822 
823 	return ret;
824 }
825 
826 static int otx2_qos_txschq_push_cfg_schq(struct otx2_nic *pfvf,
827 					 struct otx2_qos_node *node,
828 					 struct otx2_qos_cfg *cfg)
829 {
830 	struct otx2_qos_node *tmp;
831 	int ret;
832 
833 	list_for_each_entry(tmp, &node->child_schq_list, list) {
834 		ret = otx2_qos_txschq_config(pfvf, tmp);
835 		if (ret)
836 			return -EIO;
837 		ret = otx2_qos_txschq_set_parent_topology(pfvf, tmp->parent);
838 		if (ret)
839 			return -EIO;
840 	}
841 
842 	return 0;
843 }
844 
845 static int otx2_qos_txschq_push_cfg_tl(struct otx2_nic *pfvf,
846 				       struct otx2_qos_node *node,
847 				       struct otx2_qos_cfg *cfg)
848 {
849 	struct otx2_qos_node *tmp;
850 	int ret;
851 
852 	list_for_each_entry(tmp, &node->child_list, list) {
853 		ret = otx2_qos_txschq_push_cfg_tl(pfvf, tmp, cfg);
854 		if (ret)
855 			return -EIO;
856 		ret = otx2_qos_txschq_config(pfvf, tmp);
857 		if (ret)
858 			return -EIO;
859 		ret = otx2_qos_txschq_push_cfg_schq(pfvf, tmp, cfg);
860 		if (ret)
861 			return -EIO;
862 	}
863 
864 	ret = otx2_qos_txschq_set_parent_topology(pfvf, node);
865 	if (ret)
866 		return -EIO;
867 
868 	return 0;
869 }
870 
871 static int otx2_qos_txschq_push_cfg(struct otx2_nic *pfvf,
872 				    struct otx2_qos_node *node,
873 				    struct otx2_qos_cfg *cfg)
874 {
875 	int ret;
876 
877 	mutex_lock(&pfvf->qos.qos_lock);
878 	ret = otx2_qos_txschq_push_cfg_tl(pfvf, node, cfg);
879 	if (ret)
880 		goto out;
881 	ret = otx2_qos_txschq_push_cfg_schq(pfvf, node, cfg);
882 out:
883 	mutex_unlock(&pfvf->qos.qos_lock);
884 	return ret;
885 }
886 
887 static int otx2_qos_txschq_update_config(struct otx2_nic *pfvf,
888 					 struct otx2_qos_node *node,
889 					 struct otx2_qos_cfg *cfg)
890 {
891 	otx2_qos_txschq_fill_cfg(pfvf, node, cfg);
892 
893 	return otx2_qos_txschq_push_cfg(pfvf, node, cfg);
894 }
895 
896 static int otx2_qos_txschq_update_root_cfg(struct otx2_nic *pfvf,
897 					   struct otx2_qos_node *root,
898 					   struct otx2_qos_cfg *cfg)
899 {
900 	root->schq = cfg->schq_list[root->level][0];
901 	return otx2_qos_txschq_config(pfvf, root);
902 }
903 
904 static void otx2_qos_free_cfg(struct otx2_nic *pfvf, struct otx2_qos_cfg *cfg)
905 {
906 	int lvl, idx, schq;
907 
908 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
909 		for (idx = 0; idx < cfg->schq[lvl]; idx++) {
910 			schq = cfg->schq_list[lvl][idx];
911 			otx2_txschq_free_one(pfvf, lvl, schq);
912 		}
913 	}
914 
915 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
916 		for (idx = 0; idx < cfg->schq_contig[lvl]; idx++) {
917 			if (cfg->schq_index_used[lvl][idx]) {
918 				schq = cfg->schq_contig_list[lvl][idx];
919 				otx2_txschq_free_one(pfvf, lvl, schq);
920 			}
921 		}
922 	}
923 }
924 
925 static void otx2_qos_enadis_sq(struct otx2_nic *pfvf,
926 			       struct otx2_qos_node *node,
927 			       u16 qid)
928 {
929 	if (pfvf->qos.qid_to_sqmap[qid] != OTX2_QOS_INVALID_SQ)
930 		otx2_qos_disable_sq(pfvf, qid);
931 
932 	pfvf->qos.qid_to_sqmap[qid] = node->schq;
933 	otx2_qos_txschq_config(pfvf, node);
934 	otx2_qos_enable_sq(pfvf, qid);
935 }
936 
937 static void otx2_qos_update_smq_schq(struct otx2_nic *pfvf,
938 				     struct otx2_qos_node *node,
939 				     bool action)
940 {
941 	struct otx2_qos_node *tmp;
942 
943 	if (node->qid == OTX2_QOS_QID_INNER)
944 		return;
945 
946 	list_for_each_entry(tmp, &node->child_schq_list, list) {
947 		if (tmp->level == NIX_TXSCH_LVL_MDQ) {
948 			if (action == QOS_SMQ_FLUSH)
949 				otx2_smq_flush(pfvf, tmp->schq);
950 			else
951 				otx2_qos_enadis_sq(pfvf, tmp, node->qid);
952 		}
953 	}
954 }
955 
956 static void __otx2_qos_update_smq(struct otx2_nic *pfvf,
957 				  struct otx2_qos_node *node,
958 				  bool action)
959 {
960 	struct otx2_qos_node *tmp;
961 
962 	list_for_each_entry(tmp, &node->child_list, list) {
963 		__otx2_qos_update_smq(pfvf, tmp, action);
964 		if (tmp->qid == OTX2_QOS_QID_INNER)
965 			continue;
966 		if (tmp->level == NIX_TXSCH_LVL_MDQ) {
967 			if (action == QOS_SMQ_FLUSH)
968 				otx2_smq_flush(pfvf, tmp->schq);
969 			else
970 				otx2_qos_enadis_sq(pfvf, tmp, tmp->qid);
971 		} else {
972 			otx2_qos_update_smq_schq(pfvf, tmp, action);
973 		}
974 	}
975 }
976 
977 static void otx2_qos_update_smq(struct otx2_nic *pfvf,
978 				struct otx2_qos_node *node,
979 				bool action)
980 {
981 	mutex_lock(&pfvf->qos.qos_lock);
982 	__otx2_qos_update_smq(pfvf, node, action);
983 	otx2_qos_update_smq_schq(pfvf, node, action);
984 	mutex_unlock(&pfvf->qos.qos_lock);
985 }
986 
987 static int otx2_qos_push_txschq_cfg(struct otx2_nic *pfvf,
988 				    struct otx2_qos_node *node,
989 				    struct otx2_qos_cfg *cfg)
990 {
991 	int ret;
992 
993 	ret = otx2_qos_txschq_alloc(pfvf, cfg);
994 	if (ret)
995 		return -ENOSPC;
996 
997 	ret = otx2_qos_assign_base_idx(pfvf, node);
998 	if (ret)
999 		return -ENOMEM;
1000 
1001 	if (!(pfvf->netdev->flags & IFF_UP)) {
1002 		otx2_qos_txschq_fill_cfg(pfvf, node, cfg);
1003 		return 0;
1004 	}
1005 
1006 	ret = otx2_qos_txschq_update_config(pfvf, node, cfg);
1007 	if (ret) {
1008 		otx2_qos_free_cfg(pfvf, cfg);
1009 		return -EIO;
1010 	}
1011 
1012 	otx2_qos_update_smq(pfvf, node, QOS_CFG_SQ);
1013 
1014 	return 0;
1015 }
1016 
1017 static int otx2_qos_update_tree(struct otx2_nic *pfvf,
1018 				struct otx2_qos_node *node,
1019 				struct otx2_qos_cfg *cfg)
1020 {
1021 	otx2_qos_prepare_txschq_cfg(pfvf, node->parent, cfg);
1022 	return otx2_qos_push_txschq_cfg(pfvf, node->parent, cfg);
1023 }
1024 
1025 static int otx2_qos_root_add(struct otx2_nic *pfvf, u16 htb_maj_id, u16 htb_defcls,
1026 			     struct netlink_ext_ack *extack)
1027 {
1028 	struct otx2_qos_cfg *new_cfg;
1029 	struct otx2_qos_node *root;
1030 	int err;
1031 
1032 	netdev_dbg(pfvf->netdev,
1033 		   "TC_HTB_CREATE: handle=0x%x defcls=0x%x\n",
1034 		   htb_maj_id, htb_defcls);
1035 
1036 	root = otx2_qos_alloc_root(pfvf);
1037 	if (IS_ERR(root)) {
1038 		err = PTR_ERR(root);
1039 		return err;
1040 	}
1041 
1042 	/* allocate txschq queue */
1043 	new_cfg = kzalloc(sizeof(*new_cfg), GFP_KERNEL);
1044 	if (!new_cfg) {
1045 		NL_SET_ERR_MSG_MOD(extack, "Memory allocation error");
1046 		err = -ENOMEM;
1047 		goto free_root_node;
1048 	}
1049 	/* allocate htb root node */
1050 	new_cfg->schq[root->level] = 1;
1051 	err = otx2_qos_txschq_alloc(pfvf, new_cfg);
1052 	if (err) {
1053 		NL_SET_ERR_MSG_MOD(extack, "Error allocating txschq");
1054 		goto free_root_node;
1055 	}
1056 
1057 	/* Update TL1 RR PRIO */
1058 	if (root->level == NIX_TXSCH_LVL_TL1) {
1059 		root->child_dwrr_prio = pfvf->hw.txschq_aggr_lvl_rr_prio;
1060 		netdev_dbg(pfvf->netdev,
1061 			   "TL1 DWRR Priority %d\n", root->child_dwrr_prio);
1062 	}
1063 
1064 	if (!(pfvf->netdev->flags & IFF_UP) ||
1065 	    root->level == NIX_TXSCH_LVL_TL1) {
1066 		root->schq = new_cfg->schq_list[root->level][0];
1067 		goto out;
1068 	}
1069 
1070 	/* update the txschq configuration in hw */
1071 	err = otx2_qos_txschq_update_root_cfg(pfvf, root, new_cfg);
1072 	if (err) {
1073 		NL_SET_ERR_MSG_MOD(extack,
1074 				   "Error updating txschq configuration");
1075 		goto txschq_free;
1076 	}
1077 
1078 out:
1079 	WRITE_ONCE(pfvf->qos.defcls, htb_defcls);
1080 	/* Pairs with smp_load_acquire() in ndo_select_queue */
1081 	smp_store_release(&pfvf->qos.maj_id, htb_maj_id);
1082 	kfree(new_cfg);
1083 	return 0;
1084 
1085 txschq_free:
1086 	otx2_qos_free_cfg(pfvf, new_cfg);
1087 free_root_node:
1088 	kfree(new_cfg);
1089 	otx2_qos_sw_node_delete(pfvf, root);
1090 	return err;
1091 }
1092 
1093 static int otx2_qos_root_destroy(struct otx2_nic *pfvf)
1094 {
1095 	struct otx2_qos_node *root;
1096 
1097 	netdev_dbg(pfvf->netdev, "TC_HTB_DESTROY\n");
1098 
1099 	/* find root node */
1100 	root = otx2_sw_node_find(pfvf, OTX2_QOS_ROOT_CLASSID);
1101 	if (!root)
1102 		return -ENOENT;
1103 
1104 	/* free the hw mappings */
1105 	otx2_qos_destroy_node(pfvf, root);
1106 
1107 	return 0;
1108 }
1109 
1110 static int otx2_qos_validate_quantum(struct otx2_nic *pfvf, u32 quantum)
1111 {
1112 	u32 rr_weight = otx2_qos_quantum_to_dwrr_weight(pfvf, quantum);
1113 	int err = 0;
1114 
1115 	/* Max Round robin weight supported by octeontx2 and CN10K
1116 	 * is different. Validate accordingly
1117 	 */
1118 	if (is_dev_otx2(pfvf->pdev))
1119 		err = (rr_weight > OTX2_MAX_RR_QUANTUM) ? -EINVAL : 0;
1120 	else if	(rr_weight > CN10K_MAX_RR_WEIGHT)
1121 		err = -EINVAL;
1122 
1123 	return err;
1124 }
1125 
1126 static int otx2_qos_validate_dwrr_cfg(struct otx2_qos_node *parent,
1127 				      struct netlink_ext_ack *extack,
1128 				      struct otx2_nic *pfvf,
1129 				      u64 prio, u64 quantum)
1130 {
1131 	int err;
1132 
1133 	err = otx2_qos_validate_quantum(pfvf, quantum);
1134 	if (err) {
1135 		NL_SET_ERR_MSG_MOD(extack, "Unsupported quantum value");
1136 		return err;
1137 	}
1138 
1139 	if (parent->child_dwrr_prio == OTX2_QOS_DEFAULT_PRIO) {
1140 		parent->child_dwrr_prio = prio;
1141 	} else if (prio != parent->child_dwrr_prio) {
1142 		NL_SET_ERR_MSG_MOD(extack, "Only one DWRR group is allowed");
1143 		return -EOPNOTSUPP;
1144 	}
1145 
1146 	return 0;
1147 }
1148 
1149 static int otx2_qos_validate_configuration(struct otx2_qos_node *parent,
1150 					   struct netlink_ext_ack *extack,
1151 					   struct otx2_nic *pfvf,
1152 					   u64 prio, bool static_cfg)
1153 {
1154 	if (prio == parent->child_dwrr_prio && static_cfg) {
1155 		NL_SET_ERR_MSG_MOD(extack, "DWRR child group with same priority exists");
1156 		return -EEXIST;
1157 	}
1158 
1159 	if (static_cfg && test_bit(prio, parent->prio_bmap)) {
1160 		NL_SET_ERR_MSG_MOD(extack,
1161 				   "Static priority child with same priority exists");
1162 		return -EEXIST;
1163 	}
1164 
1165 	return 0;
1166 }
1167 
1168 static void otx2_reset_dwrr_prio(struct otx2_qos_node *parent, u64 prio)
1169 {
1170 	/* For PF, root node dwrr priority is static */
1171 	if (parent->level == NIX_TXSCH_LVL_TL1)
1172 		return;
1173 
1174 	if (parent->child_dwrr_prio != OTX2_QOS_DEFAULT_PRIO) {
1175 		parent->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO;
1176 		clear_bit(prio, parent->prio_bmap);
1177 	}
1178 }
1179 
1180 static bool is_qos_node_dwrr(struct otx2_qos_node *parent,
1181 			     struct otx2_nic *pfvf,
1182 			     u64 prio)
1183 {
1184 	struct otx2_qos_node *node;
1185 	bool ret = false;
1186 
1187 	if (parent->child_dwrr_prio == prio)
1188 		return true;
1189 
1190 	mutex_lock(&pfvf->qos.qos_lock);
1191 	list_for_each_entry(node, &parent->child_list, list) {
1192 		if (prio == node->prio) {
1193 			if (parent->child_dwrr_prio != OTX2_QOS_DEFAULT_PRIO &&
1194 			    parent->child_dwrr_prio != prio)
1195 				continue;
1196 
1197 			if (otx2_qos_validate_quantum(pfvf, node->quantum)) {
1198 				netdev_err(pfvf->netdev,
1199 					   "Unsupported quantum value for existing classid=0x%x quantum=%d prio=%d",
1200 					    node->classid, node->quantum,
1201 					    node->prio);
1202 				break;
1203 			}
1204 			/* mark old node as dwrr */
1205 			node->is_static = false;
1206 			parent->child_dwrr_cnt++;
1207 			parent->child_static_cnt--;
1208 			ret = true;
1209 			break;
1210 		}
1211 	}
1212 	mutex_unlock(&pfvf->qos.qos_lock);
1213 
1214 	return ret;
1215 }
1216 
1217 static int otx2_qos_leaf_alloc_queue(struct otx2_nic *pfvf, u16 classid,
1218 				     u32 parent_classid, u64 rate, u64 ceil,
1219 				     u64 prio, u32 quantum,
1220 				     struct netlink_ext_ack *extack)
1221 {
1222 	struct otx2_qos_cfg *old_cfg, *new_cfg;
1223 	struct otx2_qos_node *node, *parent;
1224 	int qid, ret, err;
1225 	bool static_cfg;
1226 
1227 	netdev_dbg(pfvf->netdev,
1228 		   "TC_HTB_LEAF_ALLOC_QUEUE: classid=0x%x parent_classid=0x%x rate=%lld ceil=%lld prio=%lld quantum=%d\n",
1229 		   classid, parent_classid, rate, ceil, prio, quantum);
1230 
1231 	if (prio > OTX2_QOS_MAX_PRIO) {
1232 		NL_SET_ERR_MSG_MOD(extack, "Valid priority range 0 to 7");
1233 		ret = -EOPNOTSUPP;
1234 		goto out;
1235 	}
1236 
1237 	if (!quantum || quantum > INT_MAX) {
1238 		NL_SET_ERR_MSG_MOD(extack, "Invalid quantum, range 1 - 2147483647 bytes");
1239 		ret = -EOPNOTSUPP;
1240 		goto out;
1241 	}
1242 
1243 	/* get parent node */
1244 	parent = otx2_sw_node_find(pfvf, parent_classid);
1245 	if (!parent) {
1246 		NL_SET_ERR_MSG_MOD(extack, "parent node not found");
1247 		ret = -ENOENT;
1248 		goto out;
1249 	}
1250 	if (parent->level == NIX_TXSCH_LVL_MDQ) {
1251 		NL_SET_ERR_MSG_MOD(extack, "HTB qos max levels reached");
1252 		ret = -EOPNOTSUPP;
1253 		goto out;
1254 	}
1255 
1256 	static_cfg = !is_qos_node_dwrr(parent, pfvf, prio);
1257 	ret = otx2_qos_validate_configuration(parent, extack, pfvf, prio,
1258 					      static_cfg);
1259 	if (ret)
1260 		goto out;
1261 
1262 	if (!static_cfg) {
1263 		ret = otx2_qos_validate_dwrr_cfg(parent, extack, pfvf, prio,
1264 						 quantum);
1265 		if (ret)
1266 			goto out;
1267 	}
1268 
1269 	if (static_cfg)
1270 		parent->child_static_cnt++;
1271 	else
1272 		parent->child_dwrr_cnt++;
1273 
1274 	set_bit(prio, parent->prio_bmap);
1275 
1276 	/* read current txschq configuration */
1277 	old_cfg = kzalloc(sizeof(*old_cfg), GFP_KERNEL);
1278 	if (!old_cfg) {
1279 		NL_SET_ERR_MSG_MOD(extack, "Memory allocation error");
1280 		ret = -ENOMEM;
1281 		goto reset_prio;
1282 	}
1283 	otx2_qos_read_txschq_cfg(pfvf, parent, old_cfg);
1284 
1285 	/* allocate a new sq */
1286 	qid = otx2_qos_get_qid(pfvf);
1287 	if (qid < 0) {
1288 		NL_SET_ERR_MSG_MOD(extack, "Reached max supported QOS SQ's");
1289 		ret = -ENOMEM;
1290 		goto free_old_cfg;
1291 	}
1292 
1293 	/* Actual SQ mapping will be updated after SMQ alloc */
1294 	pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
1295 
1296 	/* allocate and initialize a new child node */
1297 	node = otx2_qos_sw_create_leaf_node(pfvf, parent, classid, prio, rate,
1298 					    ceil, quantum, qid, static_cfg);
1299 	if (IS_ERR(node)) {
1300 		NL_SET_ERR_MSG_MOD(extack, "Unable to allocate leaf node");
1301 		ret = PTR_ERR(node);
1302 		goto free_old_cfg;
1303 	}
1304 
1305 	/* push new txschq config to hw */
1306 	new_cfg = kzalloc(sizeof(*new_cfg), GFP_KERNEL);
1307 	if (!new_cfg) {
1308 		NL_SET_ERR_MSG_MOD(extack, "Memory allocation error");
1309 		ret = -ENOMEM;
1310 		goto free_node;
1311 	}
1312 	ret = otx2_qos_update_tree(pfvf, node, new_cfg);
1313 	if (ret) {
1314 		NL_SET_ERR_MSG_MOD(extack, "HTB HW configuration error");
1315 		kfree(new_cfg);
1316 		otx2_qos_sw_node_delete(pfvf, node);
1317 		/* restore the old qos tree */
1318 		err = otx2_qos_txschq_update_config(pfvf, parent, old_cfg);
1319 		if (err) {
1320 			netdev_err(pfvf->netdev,
1321 				   "Failed to restore txcshq configuration");
1322 			goto free_old_cfg;
1323 		}
1324 
1325 		otx2_qos_update_smq(pfvf, parent, QOS_CFG_SQ);
1326 		goto free_old_cfg;
1327 	}
1328 
1329 	/* update tx_real_queues */
1330 	otx2_qos_update_tx_netdev_queues(pfvf);
1331 
1332 	/* free new txschq config */
1333 	kfree(new_cfg);
1334 
1335 	/* free old txschq config */
1336 	otx2_qos_free_cfg(pfvf, old_cfg);
1337 	kfree(old_cfg);
1338 
1339 	return pfvf->hw.tx_queues + qid;
1340 
1341 free_node:
1342 	otx2_qos_sw_node_delete(pfvf, node);
1343 free_old_cfg:
1344 	kfree(old_cfg);
1345 reset_prio:
1346 	if (static_cfg)
1347 		parent->child_static_cnt--;
1348 	else
1349 		parent->child_dwrr_cnt--;
1350 
1351 	clear_bit(prio, parent->prio_bmap);
1352 out:
1353 	return ret;
1354 }
1355 
1356 static int otx2_qos_leaf_to_inner(struct otx2_nic *pfvf, u16 classid,
1357 				  u16 child_classid, u64 rate, u64 ceil, u64 prio,
1358 				  u32 quantum, struct netlink_ext_ack *extack)
1359 {
1360 	struct otx2_qos_cfg *old_cfg, *new_cfg;
1361 	struct otx2_qos_node *node, *child;
1362 	bool static_cfg;
1363 	int ret, err;
1364 	u16 qid;
1365 
1366 	netdev_dbg(pfvf->netdev,
1367 		   "TC_HTB_LEAF_TO_INNER classid %04x, child %04x, rate %llu, ceil %llu\n",
1368 		   classid, child_classid, rate, ceil);
1369 
1370 	if (prio > OTX2_QOS_MAX_PRIO) {
1371 		NL_SET_ERR_MSG_MOD(extack, "Valid priority range 0 to 7");
1372 		ret = -EOPNOTSUPP;
1373 		goto out;
1374 	}
1375 
1376 	if (!quantum || quantum > INT_MAX) {
1377 		NL_SET_ERR_MSG_MOD(extack, "Invalid quantum, range 1 - 2147483647 bytes");
1378 		ret = -EOPNOTSUPP;
1379 		goto out;
1380 	}
1381 
1382 	/* find node related to classid */
1383 	node = otx2_sw_node_find(pfvf, classid);
1384 	if (!node) {
1385 		NL_SET_ERR_MSG_MOD(extack, "HTB node not found");
1386 		ret = -ENOENT;
1387 		goto out;
1388 	}
1389 	/* check max qos txschq level */
1390 	if (node->level == NIX_TXSCH_LVL_MDQ) {
1391 		NL_SET_ERR_MSG_MOD(extack, "HTB qos level not supported");
1392 		ret = -EOPNOTSUPP;
1393 		goto out;
1394 	}
1395 
1396 	static_cfg = !is_qos_node_dwrr(node, pfvf, prio);
1397 	if (!static_cfg) {
1398 		ret = otx2_qos_validate_dwrr_cfg(node, extack, pfvf, prio,
1399 						 quantum);
1400 		if (ret)
1401 			goto out;
1402 	}
1403 
1404 	if (static_cfg)
1405 		node->child_static_cnt++;
1406 	else
1407 		node->child_dwrr_cnt++;
1408 
1409 	set_bit(prio, node->prio_bmap);
1410 
1411 	/* store the qid to assign to leaf node */
1412 	qid = node->qid;
1413 
1414 	/* read current txschq configuration */
1415 	old_cfg = kzalloc(sizeof(*old_cfg), GFP_KERNEL);
1416 	if (!old_cfg) {
1417 		NL_SET_ERR_MSG_MOD(extack, "Memory allocation error");
1418 		ret = -ENOMEM;
1419 		goto reset_prio;
1420 	}
1421 	otx2_qos_read_txschq_cfg(pfvf, node, old_cfg);
1422 
1423 	/* delete the txschq nodes allocated for this node */
1424 	otx2_qos_disable_sq(pfvf, qid);
1425 	otx2_qos_free_hw_node_schq(pfvf, node);
1426 	otx2_qos_free_sw_node_schq(pfvf, node);
1427 	pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
1428 
1429 	/* mark this node as htb inner node */
1430 	WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER);
1431 
1432 	/* allocate and initialize a new child node */
1433 	child = otx2_qos_sw_create_leaf_node(pfvf, node, child_classid,
1434 					     prio, rate, ceil, quantum,
1435 					     qid, static_cfg);
1436 	if (IS_ERR(child)) {
1437 		NL_SET_ERR_MSG_MOD(extack, "Unable to allocate leaf node");
1438 		ret = PTR_ERR(child);
1439 		goto free_old_cfg;
1440 	}
1441 
1442 	/* push new txschq config to hw */
1443 	new_cfg = kzalloc(sizeof(*new_cfg), GFP_KERNEL);
1444 	if (!new_cfg) {
1445 		NL_SET_ERR_MSG_MOD(extack, "Memory allocation error");
1446 		ret = -ENOMEM;
1447 		goto free_node;
1448 	}
1449 	ret = otx2_qos_update_tree(pfvf, child, new_cfg);
1450 	if (ret) {
1451 		NL_SET_ERR_MSG_MOD(extack, "HTB HW configuration error");
1452 		kfree(new_cfg);
1453 		otx2_qos_sw_node_delete(pfvf, child);
1454 		/* restore the old qos tree */
1455 		WRITE_ONCE(node->qid, qid);
1456 		err = otx2_qos_alloc_txschq_node(pfvf, node);
1457 		if (err) {
1458 			netdev_err(pfvf->netdev,
1459 				   "Failed to restore old leaf node");
1460 			goto free_old_cfg;
1461 		}
1462 		err = otx2_qos_txschq_update_config(pfvf, node, old_cfg);
1463 		if (err) {
1464 			netdev_err(pfvf->netdev,
1465 				   "Failed to restore txcshq configuration");
1466 			goto free_old_cfg;
1467 		}
1468 		otx2_qos_update_smq(pfvf, node, QOS_CFG_SQ);
1469 		goto free_old_cfg;
1470 	}
1471 
1472 	/* free new txschq config */
1473 	kfree(new_cfg);
1474 
1475 	/* free old txschq config */
1476 	otx2_qos_free_cfg(pfvf, old_cfg);
1477 	kfree(old_cfg);
1478 
1479 	return 0;
1480 
1481 free_node:
1482 	otx2_qos_sw_node_delete(pfvf, child);
1483 free_old_cfg:
1484 	kfree(old_cfg);
1485 reset_prio:
1486 	if (static_cfg)
1487 		node->child_static_cnt--;
1488 	else
1489 		node->child_dwrr_cnt--;
1490 	clear_bit(prio, node->prio_bmap);
1491 out:
1492 	return ret;
1493 }
1494 
1495 static int otx2_qos_cur_leaf_nodes(struct otx2_nic *pfvf)
1496 {
1497 	int last = find_last_bit(pfvf->qos.qos_sq_bmap, pfvf->hw.tc_tx_queues);
1498 
1499 	return last ==  pfvf->hw.tc_tx_queues ? 0 : last + 1;
1500 }
1501 
1502 static void otx2_reset_qdisc(struct net_device *dev, u16 qid)
1503 {
1504 	struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, qid);
1505 	struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
1506 
1507 	if (!qdisc)
1508 		return;
1509 
1510 	spin_lock_bh(qdisc_lock(qdisc));
1511 	qdisc_reset(qdisc);
1512 	spin_unlock_bh(qdisc_lock(qdisc));
1513 }
1514 
1515 static void otx2_cfg_smq(struct otx2_nic *pfvf, struct otx2_qos_node *node,
1516 			 int qid)
1517 {
1518 	struct otx2_qos_node *tmp;
1519 
1520 	list_for_each_entry(tmp, &node->child_schq_list, list)
1521 		if (tmp->level == NIX_TXSCH_LVL_MDQ) {
1522 			otx2_qos_txschq_config(pfvf, tmp);
1523 			pfvf->qos.qid_to_sqmap[qid] = tmp->schq;
1524 		}
1525 }
1526 
1527 static int otx2_qos_leaf_del(struct otx2_nic *pfvf, u16 *classid,
1528 			     struct netlink_ext_ack *extack)
1529 {
1530 	struct otx2_qos_node *node, *parent;
1531 	int dwrr_del_node = false;
1532 	u16 qid, moved_qid;
1533 	u64 prio;
1534 
1535 	netdev_dbg(pfvf->netdev, "TC_HTB_LEAF_DEL classid %04x\n", *classid);
1536 
1537 	/* find node related to classid */
1538 	node = otx2_sw_node_find(pfvf, *classid);
1539 	if (!node) {
1540 		NL_SET_ERR_MSG_MOD(extack, "HTB node not found");
1541 		return -ENOENT;
1542 	}
1543 	parent = node->parent;
1544 	prio   = node->prio;
1545 	qid    = node->qid;
1546 
1547 	if (!node->is_static)
1548 		dwrr_del_node = true;
1549 
1550 	otx2_qos_disable_sq(pfvf, node->qid);
1551 
1552 	otx2_qos_destroy_node(pfvf, node);
1553 	pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
1554 
1555 	if (dwrr_del_node) {
1556 		parent->child_dwrr_cnt--;
1557 	} else {
1558 		parent->child_static_cnt--;
1559 		clear_bit(prio, parent->prio_bmap);
1560 	}
1561 
1562 	/* Reset DWRR priority if all dwrr nodes are deleted */
1563 	if (!parent->child_dwrr_cnt)
1564 		otx2_reset_dwrr_prio(parent, prio);
1565 
1566 	if (!parent->child_static_cnt)
1567 		parent->max_static_prio = 0;
1568 
1569 	moved_qid = otx2_qos_cur_leaf_nodes(pfvf);
1570 
1571 	/* last node just deleted */
1572 	if (moved_qid == 0 || moved_qid == qid)
1573 		return 0;
1574 
1575 	moved_qid--;
1576 
1577 	node = otx2_sw_node_find_by_qid(pfvf, moved_qid);
1578 	if (!node)
1579 		return 0;
1580 
1581 	/* stop traffic to the old queue and disable
1582 	 * SQ associated with it
1583 	 */
1584 	node->qid =  OTX2_QOS_QID_INNER;
1585 	__clear_bit(moved_qid, pfvf->qos.qos_sq_bmap);
1586 	otx2_qos_disable_sq(pfvf, moved_qid);
1587 
1588 	otx2_reset_qdisc(pfvf->netdev, pfvf->hw.tx_queues + moved_qid);
1589 
1590 	/* enable SQ associated with qid and
1591 	 * update the node
1592 	 */
1593 	otx2_cfg_smq(pfvf, node, qid);
1594 
1595 	otx2_qos_enable_sq(pfvf, qid);
1596 	__set_bit(qid, pfvf->qos.qos_sq_bmap);
1597 	node->qid = qid;
1598 
1599 	*classid = node->classid;
1600 	return 0;
1601 }
1602 
1603 static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force,
1604 				  struct netlink_ext_ack *extack)
1605 {
1606 	struct otx2_qos_node *node, *parent;
1607 	struct otx2_qos_cfg *new_cfg;
1608 	int dwrr_del_node = false;
1609 	u64 prio;
1610 	int err;
1611 	u16 qid;
1612 
1613 	netdev_dbg(pfvf->netdev,
1614 		   "TC_HTB_LEAF_DEL_LAST classid %04x\n", classid);
1615 
1616 	/* find node related to classid */
1617 	node = otx2_sw_node_find(pfvf, classid);
1618 	if (!node) {
1619 		NL_SET_ERR_MSG_MOD(extack, "HTB node not found");
1620 		return -ENOENT;
1621 	}
1622 
1623 	/* save qid for use by parent */
1624 	qid = node->qid;
1625 	prio = node->prio;
1626 
1627 	parent = otx2_sw_node_find(pfvf, node->parent->classid);
1628 	if (!parent) {
1629 		NL_SET_ERR_MSG_MOD(extack, "parent node not found");
1630 		return -ENOENT;
1631 	}
1632 
1633 	if (!node->is_static)
1634 		dwrr_del_node = true;
1635 
1636 	/* destroy the leaf node */
1637 	otx2_qos_disable_sq(pfvf, qid);
1638 	otx2_qos_destroy_node(pfvf, node);
1639 	pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
1640 
1641 	if (dwrr_del_node) {
1642 		parent->child_dwrr_cnt--;
1643 	} else {
1644 		parent->child_static_cnt--;
1645 		clear_bit(prio, parent->prio_bmap);
1646 	}
1647 
1648 	/* Reset DWRR priority if all dwrr nodes are deleted */
1649 	if (!parent->child_dwrr_cnt)
1650 		otx2_reset_dwrr_prio(parent, prio);
1651 
1652 	if (!parent->child_static_cnt)
1653 		parent->max_static_prio = 0;
1654 
1655 	/* create downstream txschq entries to parent */
1656 	err = otx2_qos_alloc_txschq_node(pfvf, parent);
1657 	if (err) {
1658 		NL_SET_ERR_MSG_MOD(extack, "HTB failed to create txsch configuration");
1659 		return err;
1660 	}
1661 	WRITE_ONCE(parent->qid, qid);
1662 	__set_bit(qid, pfvf->qos.qos_sq_bmap);
1663 
1664 	/* push new txschq config to hw */
1665 	new_cfg = kzalloc(sizeof(*new_cfg), GFP_KERNEL);
1666 	if (!new_cfg) {
1667 		NL_SET_ERR_MSG_MOD(extack, "Memory allocation error");
1668 		return -ENOMEM;
1669 	}
1670 	/* fill txschq cfg and push txschq cfg to hw */
1671 	otx2_qos_fill_cfg_schq(parent, new_cfg);
1672 	err = otx2_qos_push_txschq_cfg(pfvf, parent, new_cfg);
1673 	if (err) {
1674 		NL_SET_ERR_MSG_MOD(extack, "HTB HW configuration error");
1675 		kfree(new_cfg);
1676 		return err;
1677 	}
1678 	kfree(new_cfg);
1679 
1680 	/* update tx_real_queues */
1681 	otx2_qos_update_tx_netdev_queues(pfvf);
1682 
1683 	return 0;
1684 }
1685 
1686 void otx2_clean_qos_queues(struct otx2_nic *pfvf)
1687 {
1688 	struct otx2_qos_node *root;
1689 
1690 	root = otx2_sw_node_find(pfvf, OTX2_QOS_ROOT_CLASSID);
1691 	if (!root)
1692 		return;
1693 
1694 	otx2_qos_update_smq(pfvf, root, QOS_SMQ_FLUSH);
1695 }
1696 
1697 void otx2_qos_config_txschq(struct otx2_nic *pfvf)
1698 {
1699 	struct otx2_qos_node *root;
1700 	int err;
1701 
1702 	root = otx2_sw_node_find(pfvf, OTX2_QOS_ROOT_CLASSID);
1703 	if (!root)
1704 		return;
1705 
1706 	if (root->level != NIX_TXSCH_LVL_TL1) {
1707 		err = otx2_qos_txschq_config(pfvf, root);
1708 		if (err) {
1709 			netdev_err(pfvf->netdev, "Error update txschq configuration\n");
1710 			goto root_destroy;
1711 		}
1712 	}
1713 
1714 	err = otx2_qos_txschq_push_cfg_tl(pfvf, root, NULL);
1715 	if (err) {
1716 		netdev_err(pfvf->netdev, "Error update txschq configuration\n");
1717 		goto root_destroy;
1718 	}
1719 
1720 	otx2_qos_update_smq(pfvf, root, QOS_CFG_SQ);
1721 	return;
1722 
1723 root_destroy:
1724 	netdev_err(pfvf->netdev, "Failed to update Scheduler/Shaping config in Hardware\n");
1725 	/* Free resources allocated */
1726 	otx2_qos_root_destroy(pfvf);
1727 }
1728 
1729 int otx2_setup_tc_htb(struct net_device *ndev, struct tc_htb_qopt_offload *htb)
1730 {
1731 	struct otx2_nic *pfvf = netdev_priv(ndev);
1732 	int res;
1733 
1734 	switch (htb->command) {
1735 	case TC_HTB_CREATE:
1736 		return otx2_qos_root_add(pfvf, htb->parent_classid,
1737 					 htb->classid, htb->extack);
1738 	case TC_HTB_DESTROY:
1739 		return otx2_qos_root_destroy(pfvf);
1740 	case TC_HTB_LEAF_ALLOC_QUEUE:
1741 		res = otx2_qos_leaf_alloc_queue(pfvf, htb->classid,
1742 						htb->parent_classid,
1743 						htb->rate, htb->ceil,
1744 						htb->prio, htb->quantum,
1745 						htb->extack);
1746 		if (res < 0)
1747 			return res;
1748 		htb->qid = res;
1749 		return 0;
1750 	case TC_HTB_LEAF_TO_INNER:
1751 		return otx2_qos_leaf_to_inner(pfvf, htb->parent_classid,
1752 					      htb->classid, htb->rate,
1753 					      htb->ceil, htb->prio,
1754 					      htb->quantum, htb->extack);
1755 	case TC_HTB_LEAF_DEL:
1756 		return otx2_qos_leaf_del(pfvf, &htb->classid, htb->extack);
1757 	case TC_HTB_LEAF_DEL_LAST:
1758 	case TC_HTB_LEAF_DEL_LAST_FORCE:
1759 		return otx2_qos_leaf_del_last(pfvf, htb->classid,
1760 				htb->command == TC_HTB_LEAF_DEL_LAST_FORCE,
1761 					      htb->extack);
1762 	case TC_HTB_LEAF_QUERY_QUEUE:
1763 		res = otx2_get_txq_by_classid(pfvf, htb->classid);
1764 		htb->qid = res;
1765 		return 0;
1766 	case TC_HTB_NODE_MODIFY:
1767 		fallthrough;
1768 	default:
1769 		return -EOPNOTSUPP;
1770 	}
1771 }
1772