xref: /linux/drivers/net/ethernet/marvell/octeontx2/nic/qos.c (revision 001821b0e79716c4e17c71d8e053a23599a7a508)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Ethernet driver
3  *
4  * Copyright (C) 2023 Marvell.
5  *
6  */
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/inetdevice.h>
10 #include <linux/bitfield.h>
11 
12 #include "otx2_common.h"
13 #include "cn10k.h"
14 #include "qos.h"
15 
16 #define OTX2_QOS_QID_INNER		0xFFFFU
17 #define OTX2_QOS_QID_NONE		0xFFFEU
18 #define OTX2_QOS_ROOT_CLASSID		0xFFFFFFFF
19 #define OTX2_QOS_CLASS_NONE		0
20 #define OTX2_QOS_DEFAULT_PRIO		0xF
21 #define OTX2_QOS_INVALID_SQ		0xFFFF
22 #define OTX2_QOS_INVALID_TXSCHQ_IDX	0xFFFF
23 #define CN10K_MAX_RR_WEIGHT		GENMASK_ULL(13, 0)
24 #define OTX2_MAX_RR_QUANTUM		GENMASK_ULL(23, 0)
25 
26 static void otx2_qos_update_tx_netdev_queues(struct otx2_nic *pfvf)
27 {
28 	struct otx2_hw *hw = &pfvf->hw;
29 	int tx_queues, qos_txqs, err;
30 
31 	qos_txqs = bitmap_weight(pfvf->qos.qos_sq_bmap,
32 				 OTX2_QOS_MAX_LEAF_NODES);
33 
34 	tx_queues = hw->tx_queues + qos_txqs;
35 
36 	err = netif_set_real_num_tx_queues(pfvf->netdev, tx_queues);
37 	if (err) {
38 		netdev_err(pfvf->netdev,
39 			   "Failed to set no of Tx queues: %d\n", tx_queues);
40 		return;
41 	}
42 }
43 
44 static void otx2_qos_get_regaddr(struct otx2_qos_node *node,
45 				 struct nix_txschq_config *cfg,
46 				 int index)
47 {
48 	if (node->level == NIX_TXSCH_LVL_SMQ) {
49 		cfg->reg[index++] = NIX_AF_MDQX_PARENT(node->schq);
50 		cfg->reg[index++] = NIX_AF_MDQX_SCHEDULE(node->schq);
51 		cfg->reg[index++] = NIX_AF_MDQX_PIR(node->schq);
52 		cfg->reg[index]   = NIX_AF_MDQX_CIR(node->schq);
53 	} else if (node->level == NIX_TXSCH_LVL_TL4) {
54 		cfg->reg[index++] = NIX_AF_TL4X_PARENT(node->schq);
55 		cfg->reg[index++] = NIX_AF_TL4X_SCHEDULE(node->schq);
56 		cfg->reg[index++] = NIX_AF_TL4X_PIR(node->schq);
57 		cfg->reg[index]   = NIX_AF_TL4X_CIR(node->schq);
58 	} else if (node->level == NIX_TXSCH_LVL_TL3) {
59 		cfg->reg[index++] = NIX_AF_TL3X_PARENT(node->schq);
60 		cfg->reg[index++] = NIX_AF_TL3X_SCHEDULE(node->schq);
61 		cfg->reg[index++] = NIX_AF_TL3X_PIR(node->schq);
62 		cfg->reg[index]   = NIX_AF_TL3X_CIR(node->schq);
63 	} else if (node->level == NIX_TXSCH_LVL_TL2) {
64 		cfg->reg[index++] = NIX_AF_TL2X_PARENT(node->schq);
65 		cfg->reg[index++] = NIX_AF_TL2X_SCHEDULE(node->schq);
66 		cfg->reg[index++] = NIX_AF_TL2X_PIR(node->schq);
67 		cfg->reg[index]   = NIX_AF_TL2X_CIR(node->schq);
68 	}
69 }
70 
71 static int otx2_qos_quantum_to_dwrr_weight(struct otx2_nic *pfvf, u32 quantum)
72 {
73 	u32 weight;
74 
75 	weight = quantum / pfvf->hw.dwrr_mtu;
76 	if (quantum % pfvf->hw.dwrr_mtu)
77 		weight += 1;
78 
79 	return weight;
80 }
81 
82 static void otx2_config_sched_shaping(struct otx2_nic *pfvf,
83 				      struct otx2_qos_node *node,
84 				      struct nix_txschq_config *cfg,
85 				      int *num_regs)
86 {
87 	u32 rr_weight;
88 	u32 quantum;
89 	u64 maxrate;
90 
91 	otx2_qos_get_regaddr(node, cfg, *num_regs);
92 
93 	/* configure parent txschq */
94 	cfg->regval[*num_regs] = node->parent->schq << 16;
95 	(*num_regs)++;
96 
97 	/* configure prio/quantum */
98 	if (node->qid == OTX2_QOS_QID_NONE) {
99 		cfg->regval[*num_regs] =  node->prio << 24 |
100 					  mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
101 		(*num_regs)++;
102 		return;
103 	}
104 
105 	/* configure priority/quantum  */
106 	if (node->is_static) {
107 		cfg->regval[*num_regs] =
108 			(node->schq - node->parent->prio_anchor) << 24;
109 	} else {
110 		quantum = node->quantum ?
111 			  node->quantum : pfvf->tx_max_pktlen;
112 		rr_weight = otx2_qos_quantum_to_dwrr_weight(pfvf, quantum);
113 		cfg->regval[*num_regs] = node->parent->child_dwrr_prio << 24 |
114 					 rr_weight;
115 	}
116 	(*num_regs)++;
117 
118 	/* configure PIR */
119 	maxrate = (node->rate > node->ceil) ? node->rate : node->ceil;
120 
121 	cfg->regval[*num_regs] =
122 		otx2_get_txschq_rate_regval(pfvf, maxrate, 65536);
123 	(*num_regs)++;
124 
125 	/* Don't configure CIR when both CIR+PIR not supported
126 	 * On 96xx, CIR + PIR + RED_ALGO=STALL causes deadlock
127 	 */
128 	if (!test_bit(QOS_CIR_PIR_SUPPORT, &pfvf->hw.cap_flag))
129 		return;
130 
131 	cfg->regval[*num_regs] =
132 		otx2_get_txschq_rate_regval(pfvf, node->rate, 65536);
133 	(*num_regs)++;
134 }
135 
136 static void __otx2_qos_txschq_cfg(struct otx2_nic *pfvf,
137 				  struct otx2_qos_node *node,
138 				  struct nix_txschq_config *cfg)
139 {
140 	struct otx2_hw *hw = &pfvf->hw;
141 	int num_regs = 0;
142 	u8 level;
143 
144 	level = node->level;
145 
146 	/* program txschq registers */
147 	if (level == NIX_TXSCH_LVL_SMQ) {
148 		cfg->reg[num_regs] = NIX_AF_SMQX_CFG(node->schq);
149 		cfg->regval[num_regs] = ((u64)pfvf->tx_max_pktlen << 8) |
150 					OTX2_MIN_MTU;
151 		cfg->regval[num_regs] |= (0x20ULL << 51) | (0x80ULL << 39) |
152 					 (0x2ULL << 36);
153 		num_regs++;
154 
155 		otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
156 
157 	} else if (level == NIX_TXSCH_LVL_TL4) {
158 		otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
159 	} else if (level == NIX_TXSCH_LVL_TL3) {
160 		/* configure link cfg */
161 		if (level == pfvf->qos.link_cfg_lvl) {
162 			cfg->reg[num_regs] = NIX_AF_TL3_TL2X_LINKX_CFG(node->schq, hw->tx_link);
163 			cfg->regval[num_regs] = BIT_ULL(13) | BIT_ULL(12);
164 			num_regs++;
165 		}
166 
167 		otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
168 	} else if (level == NIX_TXSCH_LVL_TL2) {
169 		/* configure link cfg */
170 		if (level == pfvf->qos.link_cfg_lvl) {
171 			cfg->reg[num_regs] = NIX_AF_TL3_TL2X_LINKX_CFG(node->schq, hw->tx_link);
172 			cfg->regval[num_regs] = BIT_ULL(13) | BIT_ULL(12);
173 			num_regs++;
174 		}
175 
176 		/* check if node is root */
177 		if (node->qid == OTX2_QOS_QID_INNER && !node->parent) {
178 			cfg->reg[num_regs] = NIX_AF_TL2X_SCHEDULE(node->schq);
179 			cfg->regval[num_regs] =  TXSCH_TL1_DFLT_RR_PRIO << 24 |
180 						 mtu_to_dwrr_weight(pfvf,
181 								    pfvf->tx_max_pktlen);
182 			num_regs++;
183 			goto txschq_cfg_out;
184 		}
185 
186 		otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
187 	}
188 
189 txschq_cfg_out:
190 	cfg->num_regs = num_regs;
191 }
192 
193 static int otx2_qos_txschq_set_parent_topology(struct otx2_nic *pfvf,
194 					       struct otx2_qos_node *parent)
195 {
196 	struct mbox *mbox = &pfvf->mbox;
197 	struct nix_txschq_config *cfg;
198 	int rc;
199 
200 	if (parent->level == NIX_TXSCH_LVL_MDQ)
201 		return 0;
202 
203 	mutex_lock(&mbox->lock);
204 
205 	cfg = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
206 	if (!cfg) {
207 		mutex_unlock(&mbox->lock);
208 		return -ENOMEM;
209 	}
210 
211 	cfg->lvl = parent->level;
212 
213 	if (parent->level == NIX_TXSCH_LVL_TL4)
214 		cfg->reg[0] = NIX_AF_TL4X_TOPOLOGY(parent->schq);
215 	else if (parent->level == NIX_TXSCH_LVL_TL3)
216 		cfg->reg[0] = NIX_AF_TL3X_TOPOLOGY(parent->schq);
217 	else if (parent->level == NIX_TXSCH_LVL_TL2)
218 		cfg->reg[0] = NIX_AF_TL2X_TOPOLOGY(parent->schq);
219 	else if (parent->level == NIX_TXSCH_LVL_TL1)
220 		cfg->reg[0] = NIX_AF_TL1X_TOPOLOGY(parent->schq);
221 
222 	cfg->regval[0] = (u64)parent->prio_anchor << 32;
223 	cfg->regval[0] |= ((parent->child_dwrr_prio != OTX2_QOS_DEFAULT_PRIO) ?
224 			    parent->child_dwrr_prio : 0)  << 1;
225 	cfg->num_regs++;
226 
227 	rc = otx2_sync_mbox_msg(&pfvf->mbox);
228 
229 	mutex_unlock(&mbox->lock);
230 
231 	return rc;
232 }
233 
234 static void otx2_qos_free_hw_node_schq(struct otx2_nic *pfvf,
235 				       struct otx2_qos_node *parent)
236 {
237 	struct otx2_qos_node *node;
238 
239 	list_for_each_entry_reverse(node, &parent->child_schq_list, list)
240 		otx2_txschq_free_one(pfvf, node->level, node->schq);
241 }
242 
243 static void otx2_qos_free_hw_node(struct otx2_nic *pfvf,
244 				  struct otx2_qos_node *parent)
245 {
246 	struct otx2_qos_node *node, *tmp;
247 
248 	list_for_each_entry_safe(node, tmp, &parent->child_list, list) {
249 		otx2_qos_free_hw_node(pfvf, node);
250 		otx2_qos_free_hw_node_schq(pfvf, node);
251 		otx2_txschq_free_one(pfvf, node->level, node->schq);
252 	}
253 }
254 
255 static void otx2_qos_free_hw_cfg(struct otx2_nic *pfvf,
256 				 struct otx2_qos_node *node)
257 {
258 	mutex_lock(&pfvf->qos.qos_lock);
259 
260 	/* free child node hw mappings */
261 	otx2_qos_free_hw_node(pfvf, node);
262 	otx2_qos_free_hw_node_schq(pfvf, node);
263 
264 	/* free node hw mappings */
265 	otx2_txschq_free_one(pfvf, node->level, node->schq);
266 
267 	mutex_unlock(&pfvf->qos.qos_lock);
268 }
269 
270 static void otx2_qos_sw_node_delete(struct otx2_nic *pfvf,
271 				    struct otx2_qos_node *node)
272 {
273 	hash_del_rcu(&node->hlist);
274 
275 	if (node->qid != OTX2_QOS_QID_INNER && node->qid != OTX2_QOS_QID_NONE) {
276 		__clear_bit(node->qid, pfvf->qos.qos_sq_bmap);
277 		otx2_qos_update_tx_netdev_queues(pfvf);
278 	}
279 
280 	list_del(&node->list);
281 	kfree(node);
282 }
283 
284 static void otx2_qos_free_sw_node_schq(struct otx2_nic *pfvf,
285 				       struct otx2_qos_node *parent)
286 {
287 	struct otx2_qos_node *node, *tmp;
288 
289 	list_for_each_entry_safe(node, tmp, &parent->child_schq_list, list) {
290 		list_del(&node->list);
291 		kfree(node);
292 	}
293 }
294 
295 static void __otx2_qos_free_sw_node(struct otx2_nic *pfvf,
296 				    struct otx2_qos_node *parent)
297 {
298 	struct otx2_qos_node *node, *tmp;
299 
300 	list_for_each_entry_safe(node, tmp, &parent->child_list, list) {
301 		__otx2_qos_free_sw_node(pfvf, node);
302 		otx2_qos_free_sw_node_schq(pfvf, node);
303 		otx2_qos_sw_node_delete(pfvf, node);
304 	}
305 }
306 
307 static void otx2_qos_free_sw_node(struct otx2_nic *pfvf,
308 				  struct otx2_qos_node *node)
309 {
310 	mutex_lock(&pfvf->qos.qos_lock);
311 
312 	__otx2_qos_free_sw_node(pfvf, node);
313 	otx2_qos_free_sw_node_schq(pfvf, node);
314 	otx2_qos_sw_node_delete(pfvf, node);
315 
316 	mutex_unlock(&pfvf->qos.qos_lock);
317 }
318 
319 static void otx2_qos_destroy_node(struct otx2_nic *pfvf,
320 				  struct otx2_qos_node *node)
321 {
322 	otx2_qos_free_hw_cfg(pfvf, node);
323 	otx2_qos_free_sw_node(pfvf, node);
324 }
325 
326 static void otx2_qos_fill_cfg_schq(struct otx2_qos_node *parent,
327 				   struct otx2_qos_cfg *cfg)
328 {
329 	struct otx2_qos_node *node;
330 
331 	list_for_each_entry(node, &parent->child_schq_list, list)
332 		cfg->schq[node->level]++;
333 }
334 
335 static void otx2_qos_fill_cfg_tl(struct otx2_qos_node *parent,
336 				 struct otx2_qos_cfg *cfg)
337 {
338 	struct otx2_qos_node *node;
339 
340 	list_for_each_entry(node, &parent->child_list, list) {
341 		otx2_qos_fill_cfg_tl(node, cfg);
342 		otx2_qos_fill_cfg_schq(node, cfg);
343 	}
344 
345 	/* Assign the required number of transmit schedular queues under the
346 	 * given class
347 	 */
348 	cfg->schq_contig[parent->level - 1] += parent->child_dwrr_cnt +
349 					       parent->max_static_prio + 1;
350 }
351 
352 static void otx2_qos_prepare_txschq_cfg(struct otx2_nic *pfvf,
353 					struct otx2_qos_node *parent,
354 					struct otx2_qos_cfg *cfg)
355 {
356 	mutex_lock(&pfvf->qos.qos_lock);
357 	otx2_qos_fill_cfg_tl(parent, cfg);
358 	mutex_unlock(&pfvf->qos.qos_lock);
359 }
360 
361 static void otx2_qos_read_txschq_cfg_schq(struct otx2_qos_node *parent,
362 					  struct otx2_qos_cfg *cfg)
363 {
364 	struct otx2_qos_node *node;
365 	int cnt;
366 
367 	list_for_each_entry(node, &parent->child_schq_list, list) {
368 		cnt = cfg->dwrr_node_pos[node->level];
369 		cfg->schq_list[node->level][cnt] = node->schq;
370 		cfg->schq[node->level]++;
371 		cfg->dwrr_node_pos[node->level]++;
372 	}
373 }
374 
375 static void otx2_qos_read_txschq_cfg_tl(struct otx2_qos_node *parent,
376 					struct otx2_qos_cfg *cfg)
377 {
378 	struct otx2_qos_node *node;
379 	int cnt;
380 
381 	list_for_each_entry(node, &parent->child_list, list) {
382 		otx2_qos_read_txschq_cfg_tl(node, cfg);
383 		cnt = cfg->static_node_pos[node->level];
384 		cfg->schq_contig_list[node->level][cnt] = node->schq;
385 		cfg->schq_index_used[node->level][cnt] = true;
386 		cfg->schq_contig[node->level]++;
387 		cfg->static_node_pos[node->level]++;
388 		otx2_qos_read_txschq_cfg_schq(node, cfg);
389 	}
390 }
391 
392 static void otx2_qos_read_txschq_cfg(struct otx2_nic *pfvf,
393 				     struct otx2_qos_node *node,
394 				     struct otx2_qos_cfg *cfg)
395 {
396 	mutex_lock(&pfvf->qos.qos_lock);
397 	otx2_qos_read_txschq_cfg_tl(node, cfg);
398 	mutex_unlock(&pfvf->qos.qos_lock);
399 }
400 
401 static struct otx2_qos_node *
402 otx2_qos_alloc_root(struct otx2_nic *pfvf)
403 {
404 	struct otx2_qos_node *node;
405 
406 	node = kzalloc(sizeof(*node), GFP_KERNEL);
407 	if (!node)
408 		return ERR_PTR(-ENOMEM);
409 
410 	node->parent = NULL;
411 	if (!is_otx2_vf(pfvf->pcifunc)) {
412 		node->level = NIX_TXSCH_LVL_TL1;
413 	} else {
414 		node->level = NIX_TXSCH_LVL_TL2;
415 		node->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO;
416 	}
417 
418 	WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER);
419 	node->classid = OTX2_QOS_ROOT_CLASSID;
420 
421 	hash_add_rcu(pfvf->qos.qos_hlist, &node->hlist, node->classid);
422 	list_add_tail(&node->list, &pfvf->qos.qos_tree);
423 	INIT_LIST_HEAD(&node->child_list);
424 	INIT_LIST_HEAD(&node->child_schq_list);
425 
426 	return node;
427 }
428 
429 static int otx2_qos_add_child_node(struct otx2_qos_node *parent,
430 				   struct otx2_qos_node *node)
431 {
432 	struct list_head *head = &parent->child_list;
433 	struct otx2_qos_node *tmp_node;
434 	struct list_head *tmp;
435 
436 	if (node->prio > parent->max_static_prio)
437 		parent->max_static_prio = node->prio;
438 
439 	for (tmp = head->next; tmp != head; tmp = tmp->next) {
440 		tmp_node = list_entry(tmp, struct otx2_qos_node, list);
441 		if (tmp_node->prio == node->prio &&
442 		    tmp_node->is_static)
443 			return -EEXIST;
444 		if (tmp_node->prio > node->prio) {
445 			list_add_tail(&node->list, tmp);
446 			return 0;
447 		}
448 	}
449 
450 	list_add_tail(&node->list, head);
451 	return 0;
452 }
453 
454 static int otx2_qos_alloc_txschq_node(struct otx2_nic *pfvf,
455 				      struct otx2_qos_node *node)
456 {
457 	struct otx2_qos_node *txschq_node, *parent, *tmp;
458 	int lvl;
459 
460 	parent = node;
461 	for (lvl = node->level - 1; lvl >= NIX_TXSCH_LVL_MDQ; lvl--) {
462 		txschq_node = kzalloc(sizeof(*txschq_node), GFP_KERNEL);
463 		if (!txschq_node)
464 			goto err_out;
465 
466 		txschq_node->parent = parent;
467 		txschq_node->level = lvl;
468 		txschq_node->classid = OTX2_QOS_CLASS_NONE;
469 		WRITE_ONCE(txschq_node->qid, OTX2_QOS_QID_NONE);
470 		txschq_node->rate = 0;
471 		txschq_node->ceil = 0;
472 		txschq_node->prio = 0;
473 		txschq_node->quantum = 0;
474 		txschq_node->is_static = true;
475 		txschq_node->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO;
476 		txschq_node->txschq_idx = OTX2_QOS_INVALID_TXSCHQ_IDX;
477 
478 		mutex_lock(&pfvf->qos.qos_lock);
479 		list_add_tail(&txschq_node->list, &node->child_schq_list);
480 		mutex_unlock(&pfvf->qos.qos_lock);
481 
482 		INIT_LIST_HEAD(&txschq_node->child_list);
483 		INIT_LIST_HEAD(&txschq_node->child_schq_list);
484 		parent = txschq_node;
485 	}
486 
487 	return 0;
488 
489 err_out:
490 	list_for_each_entry_safe(txschq_node, tmp, &node->child_schq_list,
491 				 list) {
492 		list_del(&txschq_node->list);
493 		kfree(txschq_node);
494 	}
495 	return -ENOMEM;
496 }
497 
498 static struct otx2_qos_node *
499 otx2_qos_sw_create_leaf_node(struct otx2_nic *pfvf,
500 			     struct otx2_qos_node *parent,
501 			     u16 classid, u32 prio, u64 rate, u64 ceil,
502 			     u32 quantum, u16 qid, bool static_cfg)
503 {
504 	struct otx2_qos_node *node;
505 	int err;
506 
507 	node = kzalloc(sizeof(*node), GFP_KERNEL);
508 	if (!node)
509 		return ERR_PTR(-ENOMEM);
510 
511 	node->parent = parent;
512 	node->level = parent->level - 1;
513 	node->classid = classid;
514 	WRITE_ONCE(node->qid, qid);
515 
516 	node->rate = otx2_convert_rate(rate);
517 	node->ceil = otx2_convert_rate(ceil);
518 	node->prio = prio;
519 	node->quantum = quantum;
520 	node->is_static = static_cfg;
521 	node->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO;
522 	node->txschq_idx = OTX2_QOS_INVALID_TXSCHQ_IDX;
523 
524 	__set_bit(qid, pfvf->qos.qos_sq_bmap);
525 
526 	hash_add_rcu(pfvf->qos.qos_hlist, &node->hlist, classid);
527 
528 	mutex_lock(&pfvf->qos.qos_lock);
529 	err = otx2_qos_add_child_node(parent, node);
530 	if (err) {
531 		mutex_unlock(&pfvf->qos.qos_lock);
532 		return ERR_PTR(err);
533 	}
534 	mutex_unlock(&pfvf->qos.qos_lock);
535 
536 	INIT_LIST_HEAD(&node->child_list);
537 	INIT_LIST_HEAD(&node->child_schq_list);
538 
539 	err = otx2_qos_alloc_txschq_node(pfvf, node);
540 	if (err) {
541 		otx2_qos_sw_node_delete(pfvf, node);
542 		return ERR_PTR(-ENOMEM);
543 	}
544 
545 	return node;
546 }
547 
548 static struct otx2_qos_node
549 *otx2_sw_node_find_by_qid(struct otx2_nic *pfvf, u16 qid)
550 {
551 	struct otx2_qos_node *node = NULL;
552 	int bkt;
553 
554 	hash_for_each(pfvf->qos.qos_hlist, bkt, node, hlist) {
555 		if (node->qid == qid)
556 			break;
557 	}
558 
559 	return node;
560 }
561 
562 static struct otx2_qos_node *
563 otx2_sw_node_find(struct otx2_nic *pfvf, u32 classid)
564 {
565 	struct otx2_qos_node *node = NULL;
566 
567 	hash_for_each_possible(pfvf->qos.qos_hlist, node, hlist, classid) {
568 		if (node->classid == classid)
569 			break;
570 	}
571 
572 	return node;
573 }
574 
575 static struct otx2_qos_node *
576 otx2_sw_node_find_rcu(struct otx2_nic *pfvf, u32 classid)
577 {
578 	struct otx2_qos_node *node = NULL;
579 
580 	hash_for_each_possible_rcu(pfvf->qos.qos_hlist, node, hlist, classid) {
581 		if (node->classid == classid)
582 			break;
583 	}
584 
585 	return node;
586 }
587 
588 int otx2_get_txq_by_classid(struct otx2_nic *pfvf, u16 classid)
589 {
590 	struct otx2_qos_node *node;
591 	u16 qid;
592 	int res;
593 
594 	node = otx2_sw_node_find_rcu(pfvf, classid);
595 	if (!node) {
596 		res = -ENOENT;
597 		goto out;
598 	}
599 	qid = READ_ONCE(node->qid);
600 	if (qid == OTX2_QOS_QID_INNER) {
601 		res = -EINVAL;
602 		goto out;
603 	}
604 	res = pfvf->hw.tx_queues + qid;
605 out:
606 	return res;
607 }
608 
609 static int
610 otx2_qos_txschq_config(struct otx2_nic *pfvf, struct otx2_qos_node *node)
611 {
612 	struct mbox *mbox = &pfvf->mbox;
613 	struct nix_txschq_config *req;
614 	int rc;
615 
616 	mutex_lock(&mbox->lock);
617 
618 	req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
619 	if (!req) {
620 		mutex_unlock(&mbox->lock);
621 		return -ENOMEM;
622 	}
623 
624 	req->lvl = node->level;
625 	__otx2_qos_txschq_cfg(pfvf, node, req);
626 
627 	rc = otx2_sync_mbox_msg(&pfvf->mbox);
628 
629 	mutex_unlock(&mbox->lock);
630 
631 	return rc;
632 }
633 
634 static int otx2_qos_txschq_alloc(struct otx2_nic *pfvf,
635 				 struct otx2_qos_cfg *cfg)
636 {
637 	struct nix_txsch_alloc_req *req;
638 	struct nix_txsch_alloc_rsp *rsp;
639 	struct mbox *mbox = &pfvf->mbox;
640 	int lvl, rc, schq;
641 
642 	mutex_lock(&mbox->lock);
643 	req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox);
644 	if (!req) {
645 		mutex_unlock(&mbox->lock);
646 		return -ENOMEM;
647 	}
648 
649 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
650 		req->schq[lvl] = cfg->schq[lvl];
651 		req->schq_contig[lvl] = cfg->schq_contig[lvl];
652 	}
653 
654 	rc = otx2_sync_mbox_msg(&pfvf->mbox);
655 	if (rc) {
656 		mutex_unlock(&mbox->lock);
657 		return rc;
658 	}
659 
660 	rsp = (struct nix_txsch_alloc_rsp *)
661 	      otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
662 
663 	if (IS_ERR(rsp)) {
664 		rc = PTR_ERR(rsp);
665 		goto out;
666 	}
667 
668 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
669 		for (schq = 0; schq < rsp->schq_contig[lvl]; schq++) {
670 			cfg->schq_contig_list[lvl][schq] =
671 				rsp->schq_contig_list[lvl][schq];
672 		}
673 	}
674 
675 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
676 		for (schq = 0; schq < rsp->schq[lvl]; schq++) {
677 			cfg->schq_list[lvl][schq] =
678 				rsp->schq_list[lvl][schq];
679 		}
680 	}
681 
682 	pfvf->qos.link_cfg_lvl = rsp->link_cfg_lvl;
683 	pfvf->hw.txschq_aggr_lvl_rr_prio = rsp->aggr_lvl_rr_prio;
684 
685 out:
686 	mutex_unlock(&mbox->lock);
687 	return rc;
688 }
689 
690 static void otx2_qos_free_unused_txschq(struct otx2_nic *pfvf,
691 					struct otx2_qos_cfg *cfg)
692 {
693 	int lvl, idx, schq;
694 
695 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
696 		for (idx = 0; idx < cfg->schq_contig[lvl]; idx++) {
697 			if (!cfg->schq_index_used[lvl][idx]) {
698 				schq = cfg->schq_contig_list[lvl][idx];
699 				otx2_txschq_free_one(pfvf, lvl, schq);
700 			}
701 		}
702 	}
703 }
704 
705 static void otx2_qos_txschq_fill_cfg_schq(struct otx2_nic *pfvf,
706 					  struct otx2_qos_node *node,
707 					  struct otx2_qos_cfg *cfg)
708 {
709 	struct otx2_qos_node *tmp;
710 	int cnt;
711 
712 	list_for_each_entry(tmp, &node->child_schq_list, list) {
713 		cnt = cfg->dwrr_node_pos[tmp->level];
714 		tmp->schq = cfg->schq_list[tmp->level][cnt];
715 		cfg->dwrr_node_pos[tmp->level]++;
716 	}
717 }
718 
719 static void otx2_qos_txschq_fill_cfg_tl(struct otx2_nic *pfvf,
720 					struct otx2_qos_node *node,
721 					struct otx2_qos_cfg *cfg)
722 {
723 	struct otx2_qos_node *tmp;
724 	int cnt;
725 
726 	list_for_each_entry(tmp, &node->child_list, list) {
727 		otx2_qos_txschq_fill_cfg_tl(pfvf, tmp, cfg);
728 		cnt = cfg->static_node_pos[tmp->level];
729 		tmp->schq = cfg->schq_contig_list[tmp->level][tmp->txschq_idx];
730 		cfg->schq_index_used[tmp->level][tmp->txschq_idx] = true;
731 		if (cnt == 0)
732 			node->prio_anchor =
733 				cfg->schq_contig_list[tmp->level][0];
734 		cfg->static_node_pos[tmp->level]++;
735 		otx2_qos_txschq_fill_cfg_schq(pfvf, tmp, cfg);
736 	}
737 }
738 
739 static void otx2_qos_txschq_fill_cfg(struct otx2_nic *pfvf,
740 				     struct otx2_qos_node *node,
741 				     struct otx2_qos_cfg *cfg)
742 {
743 	mutex_lock(&pfvf->qos.qos_lock);
744 	otx2_qos_txschq_fill_cfg_tl(pfvf, node, cfg);
745 	otx2_qos_txschq_fill_cfg_schq(pfvf, node, cfg);
746 	otx2_qos_free_unused_txschq(pfvf, cfg);
747 	mutex_unlock(&pfvf->qos.qos_lock);
748 }
749 
750 static void __otx2_qos_assign_base_idx_tl(struct otx2_nic *pfvf,
751 					  struct otx2_qos_node *tmp,
752 					  unsigned long *child_idx_bmap,
753 					  int child_cnt)
754 {
755 	int idx;
756 
757 	if (tmp->txschq_idx != OTX2_QOS_INVALID_TXSCHQ_IDX)
758 		return;
759 
760 	/* assign static nodes 1:1 prio mapping first, then remaining nodes */
761 	for (idx = 0; idx < child_cnt; idx++) {
762 		if (tmp->is_static && tmp->prio == idx &&
763 		    !test_bit(idx, child_idx_bmap)) {
764 			tmp->txschq_idx = idx;
765 			set_bit(idx, child_idx_bmap);
766 			return;
767 		} else if (!tmp->is_static && idx >= tmp->prio &&
768 			   !test_bit(idx, child_idx_bmap)) {
769 			tmp->txschq_idx = idx;
770 			set_bit(idx, child_idx_bmap);
771 			return;
772 		}
773 	}
774 }
775 
776 static int otx2_qos_assign_base_idx_tl(struct otx2_nic *pfvf,
777 				       struct otx2_qos_node *node)
778 {
779 	unsigned long *child_idx_bmap;
780 	struct otx2_qos_node *tmp;
781 	int child_cnt;
782 
783 	list_for_each_entry(tmp, &node->child_list, list)
784 		tmp->txschq_idx = OTX2_QOS_INVALID_TXSCHQ_IDX;
785 
786 	/* allocate child index array */
787 	child_cnt = node->child_dwrr_cnt + node->max_static_prio + 1;
788 	child_idx_bmap = kcalloc(BITS_TO_LONGS(child_cnt),
789 				 sizeof(unsigned long),
790 				 GFP_KERNEL);
791 	if (!child_idx_bmap)
792 		return -ENOMEM;
793 
794 	list_for_each_entry(tmp, &node->child_list, list)
795 		otx2_qos_assign_base_idx_tl(pfvf, tmp);
796 
797 	/* assign base index of static priority children first */
798 	list_for_each_entry(tmp, &node->child_list, list) {
799 		if (!tmp->is_static)
800 			continue;
801 		__otx2_qos_assign_base_idx_tl(pfvf, tmp, child_idx_bmap,
802 					      child_cnt);
803 	}
804 
805 	/* assign base index of dwrr priority children */
806 	list_for_each_entry(tmp, &node->child_list, list)
807 		__otx2_qos_assign_base_idx_tl(pfvf, tmp, child_idx_bmap,
808 					      child_cnt);
809 
810 	kfree(child_idx_bmap);
811 
812 	return 0;
813 }
814 
815 static int otx2_qos_assign_base_idx(struct otx2_nic *pfvf,
816 				    struct otx2_qos_node *node)
817 {
818 	int ret = 0;
819 
820 	mutex_lock(&pfvf->qos.qos_lock);
821 	ret = otx2_qos_assign_base_idx_tl(pfvf, node);
822 	mutex_unlock(&pfvf->qos.qos_lock);
823 
824 	return ret;
825 }
826 
827 static int otx2_qos_txschq_push_cfg_schq(struct otx2_nic *pfvf,
828 					 struct otx2_qos_node *node,
829 					 struct otx2_qos_cfg *cfg)
830 {
831 	struct otx2_qos_node *tmp;
832 	int ret;
833 
834 	list_for_each_entry(tmp, &node->child_schq_list, list) {
835 		ret = otx2_qos_txschq_config(pfvf, tmp);
836 		if (ret)
837 			return -EIO;
838 		ret = otx2_qos_txschq_set_parent_topology(pfvf, tmp->parent);
839 		if (ret)
840 			return -EIO;
841 	}
842 
843 	return 0;
844 }
845 
846 static int otx2_qos_txschq_push_cfg_tl(struct otx2_nic *pfvf,
847 				       struct otx2_qos_node *node,
848 				       struct otx2_qos_cfg *cfg)
849 {
850 	struct otx2_qos_node *tmp;
851 	int ret;
852 
853 	list_for_each_entry(tmp, &node->child_list, list) {
854 		ret = otx2_qos_txschq_push_cfg_tl(pfvf, tmp, cfg);
855 		if (ret)
856 			return -EIO;
857 		ret = otx2_qos_txschq_config(pfvf, tmp);
858 		if (ret)
859 			return -EIO;
860 		ret = otx2_qos_txschq_push_cfg_schq(pfvf, tmp, cfg);
861 		if (ret)
862 			return -EIO;
863 	}
864 
865 	ret = otx2_qos_txschq_set_parent_topology(pfvf, node);
866 	if (ret)
867 		return -EIO;
868 
869 	return 0;
870 }
871 
872 static int otx2_qos_txschq_push_cfg(struct otx2_nic *pfvf,
873 				    struct otx2_qos_node *node,
874 				    struct otx2_qos_cfg *cfg)
875 {
876 	int ret;
877 
878 	mutex_lock(&pfvf->qos.qos_lock);
879 	ret = otx2_qos_txschq_push_cfg_tl(pfvf, node, cfg);
880 	if (ret)
881 		goto out;
882 	ret = otx2_qos_txschq_push_cfg_schq(pfvf, node, cfg);
883 out:
884 	mutex_unlock(&pfvf->qos.qos_lock);
885 	return ret;
886 }
887 
888 static int otx2_qos_txschq_update_config(struct otx2_nic *pfvf,
889 					 struct otx2_qos_node *node,
890 					 struct otx2_qos_cfg *cfg)
891 {
892 	otx2_qos_txschq_fill_cfg(pfvf, node, cfg);
893 
894 	return otx2_qos_txschq_push_cfg(pfvf, node, cfg);
895 }
896 
897 static int otx2_qos_txschq_update_root_cfg(struct otx2_nic *pfvf,
898 					   struct otx2_qos_node *root,
899 					   struct otx2_qos_cfg *cfg)
900 {
901 	root->schq = cfg->schq_list[root->level][0];
902 	return otx2_qos_txschq_config(pfvf, root);
903 }
904 
905 static void otx2_qos_free_cfg(struct otx2_nic *pfvf, struct otx2_qos_cfg *cfg)
906 {
907 	int lvl, idx, schq;
908 
909 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
910 		for (idx = 0; idx < cfg->schq[lvl]; idx++) {
911 			schq = cfg->schq_list[lvl][idx];
912 			otx2_txschq_free_one(pfvf, lvl, schq);
913 		}
914 	}
915 
916 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
917 		for (idx = 0; idx < cfg->schq_contig[lvl]; idx++) {
918 			if (cfg->schq_index_used[lvl][idx]) {
919 				schq = cfg->schq_contig_list[lvl][idx];
920 				otx2_txschq_free_one(pfvf, lvl, schq);
921 			}
922 		}
923 	}
924 }
925 
926 static void otx2_qos_enadis_sq(struct otx2_nic *pfvf,
927 			       struct otx2_qos_node *node,
928 			       u16 qid)
929 {
930 	if (pfvf->qos.qid_to_sqmap[qid] != OTX2_QOS_INVALID_SQ)
931 		otx2_qos_disable_sq(pfvf, qid);
932 
933 	pfvf->qos.qid_to_sqmap[qid] = node->schq;
934 	otx2_qos_txschq_config(pfvf, node);
935 	otx2_qos_enable_sq(pfvf, qid);
936 }
937 
938 static void otx2_qos_update_smq_schq(struct otx2_nic *pfvf,
939 				     struct otx2_qos_node *node,
940 				     bool action)
941 {
942 	struct otx2_qos_node *tmp;
943 
944 	if (node->qid == OTX2_QOS_QID_INNER)
945 		return;
946 
947 	list_for_each_entry(tmp, &node->child_schq_list, list) {
948 		if (tmp->level == NIX_TXSCH_LVL_MDQ) {
949 			if (action == QOS_SMQ_FLUSH)
950 				otx2_smq_flush(pfvf, tmp->schq);
951 			else
952 				otx2_qos_enadis_sq(pfvf, tmp, node->qid);
953 		}
954 	}
955 }
956 
957 static void __otx2_qos_update_smq(struct otx2_nic *pfvf,
958 				  struct otx2_qos_node *node,
959 				  bool action)
960 {
961 	struct otx2_qos_node *tmp;
962 
963 	list_for_each_entry(tmp, &node->child_list, list) {
964 		__otx2_qos_update_smq(pfvf, tmp, action);
965 		if (tmp->qid == OTX2_QOS_QID_INNER)
966 			continue;
967 		if (tmp->level == NIX_TXSCH_LVL_MDQ) {
968 			if (action == QOS_SMQ_FLUSH)
969 				otx2_smq_flush(pfvf, tmp->schq);
970 			else
971 				otx2_qos_enadis_sq(pfvf, tmp, tmp->qid);
972 		} else {
973 			otx2_qos_update_smq_schq(pfvf, tmp, action);
974 		}
975 	}
976 }
977 
978 static void otx2_qos_update_smq(struct otx2_nic *pfvf,
979 				struct otx2_qos_node *node,
980 				bool action)
981 {
982 	mutex_lock(&pfvf->qos.qos_lock);
983 	__otx2_qos_update_smq(pfvf, node, action);
984 	otx2_qos_update_smq_schq(pfvf, node, action);
985 	mutex_unlock(&pfvf->qos.qos_lock);
986 }
987 
988 static int otx2_qos_push_txschq_cfg(struct otx2_nic *pfvf,
989 				    struct otx2_qos_node *node,
990 				    struct otx2_qos_cfg *cfg)
991 {
992 	int ret;
993 
994 	ret = otx2_qos_txschq_alloc(pfvf, cfg);
995 	if (ret)
996 		return -ENOSPC;
997 
998 	ret = otx2_qos_assign_base_idx(pfvf, node);
999 	if (ret)
1000 		return -ENOMEM;
1001 
1002 	if (!(pfvf->netdev->flags & IFF_UP)) {
1003 		otx2_qos_txschq_fill_cfg(pfvf, node, cfg);
1004 		return 0;
1005 	}
1006 
1007 	ret = otx2_qos_txschq_update_config(pfvf, node, cfg);
1008 	if (ret) {
1009 		otx2_qos_free_cfg(pfvf, cfg);
1010 		return -EIO;
1011 	}
1012 
1013 	otx2_qos_update_smq(pfvf, node, QOS_CFG_SQ);
1014 
1015 	return 0;
1016 }
1017 
1018 static int otx2_qos_update_tree(struct otx2_nic *pfvf,
1019 				struct otx2_qos_node *node,
1020 				struct otx2_qos_cfg *cfg)
1021 {
1022 	otx2_qos_prepare_txschq_cfg(pfvf, node->parent, cfg);
1023 	return otx2_qos_push_txschq_cfg(pfvf, node->parent, cfg);
1024 }
1025 
1026 static int otx2_qos_root_add(struct otx2_nic *pfvf, u16 htb_maj_id, u16 htb_defcls,
1027 			     struct netlink_ext_ack *extack)
1028 {
1029 	struct otx2_qos_cfg *new_cfg;
1030 	struct otx2_qos_node *root;
1031 	int err;
1032 
1033 	netdev_dbg(pfvf->netdev,
1034 		   "TC_HTB_CREATE: handle=0x%x defcls=0x%x\n",
1035 		   htb_maj_id, htb_defcls);
1036 
1037 	root = otx2_qos_alloc_root(pfvf);
1038 	if (IS_ERR(root)) {
1039 		err = PTR_ERR(root);
1040 		return err;
1041 	}
1042 
1043 	/* allocate txschq queue */
1044 	new_cfg = kzalloc(sizeof(*new_cfg), GFP_KERNEL);
1045 	if (!new_cfg) {
1046 		NL_SET_ERR_MSG_MOD(extack, "Memory allocation error");
1047 		err = -ENOMEM;
1048 		goto free_root_node;
1049 	}
1050 	/* allocate htb root node */
1051 	new_cfg->schq[root->level] = 1;
1052 	err = otx2_qos_txschq_alloc(pfvf, new_cfg);
1053 	if (err) {
1054 		NL_SET_ERR_MSG_MOD(extack, "Error allocating txschq");
1055 		goto free_root_node;
1056 	}
1057 
1058 	/* Update TL1 RR PRIO */
1059 	if (root->level == NIX_TXSCH_LVL_TL1) {
1060 		root->child_dwrr_prio = pfvf->hw.txschq_aggr_lvl_rr_prio;
1061 		netdev_dbg(pfvf->netdev,
1062 			   "TL1 DWRR Priority %d\n", root->child_dwrr_prio);
1063 	}
1064 
1065 	if (!(pfvf->netdev->flags & IFF_UP) ||
1066 	    root->level == NIX_TXSCH_LVL_TL1) {
1067 		root->schq = new_cfg->schq_list[root->level][0];
1068 		goto out;
1069 	}
1070 
1071 	/* update the txschq configuration in hw */
1072 	err = otx2_qos_txschq_update_root_cfg(pfvf, root, new_cfg);
1073 	if (err) {
1074 		NL_SET_ERR_MSG_MOD(extack,
1075 				   "Error updating txschq configuration");
1076 		goto txschq_free;
1077 	}
1078 
1079 out:
1080 	WRITE_ONCE(pfvf->qos.defcls, htb_defcls);
1081 	/* Pairs with smp_load_acquire() in ndo_select_queue */
1082 	smp_store_release(&pfvf->qos.maj_id, htb_maj_id);
1083 	kfree(new_cfg);
1084 	return 0;
1085 
1086 txschq_free:
1087 	otx2_qos_free_cfg(pfvf, new_cfg);
1088 free_root_node:
1089 	kfree(new_cfg);
1090 	otx2_qos_sw_node_delete(pfvf, root);
1091 	return err;
1092 }
1093 
1094 static int otx2_qos_root_destroy(struct otx2_nic *pfvf)
1095 {
1096 	struct otx2_qos_node *root;
1097 
1098 	netdev_dbg(pfvf->netdev, "TC_HTB_DESTROY\n");
1099 
1100 	/* find root node */
1101 	root = otx2_sw_node_find(pfvf, OTX2_QOS_ROOT_CLASSID);
1102 	if (!root)
1103 		return -ENOENT;
1104 
1105 	/* free the hw mappings */
1106 	otx2_qos_destroy_node(pfvf, root);
1107 
1108 	return 0;
1109 }
1110 
1111 static int otx2_qos_validate_quantum(struct otx2_nic *pfvf, u32 quantum)
1112 {
1113 	u32 rr_weight = otx2_qos_quantum_to_dwrr_weight(pfvf, quantum);
1114 	int err = 0;
1115 
1116 	/* Max Round robin weight supported by octeontx2 and CN10K
1117 	 * is different. Validate accordingly
1118 	 */
1119 	if (is_dev_otx2(pfvf->pdev))
1120 		err = (rr_weight > OTX2_MAX_RR_QUANTUM) ? -EINVAL : 0;
1121 	else if	(rr_weight > CN10K_MAX_RR_WEIGHT)
1122 		err = -EINVAL;
1123 
1124 	return err;
1125 }
1126 
1127 static int otx2_qos_validate_dwrr_cfg(struct otx2_qos_node *parent,
1128 				      struct netlink_ext_ack *extack,
1129 				      struct otx2_nic *pfvf,
1130 				      u64 prio, u64 quantum)
1131 {
1132 	int err;
1133 
1134 	err = otx2_qos_validate_quantum(pfvf, quantum);
1135 	if (err) {
1136 		NL_SET_ERR_MSG_MOD(extack, "Unsupported quantum value");
1137 		return err;
1138 	}
1139 
1140 	if (parent->child_dwrr_prio == OTX2_QOS_DEFAULT_PRIO) {
1141 		parent->child_dwrr_prio = prio;
1142 	} else if (prio != parent->child_dwrr_prio) {
1143 		NL_SET_ERR_MSG_MOD(extack, "Only one DWRR group is allowed");
1144 		return -EOPNOTSUPP;
1145 	}
1146 
1147 	return 0;
1148 }
1149 
1150 static int otx2_qos_validate_configuration(struct otx2_qos_node *parent,
1151 					   struct netlink_ext_ack *extack,
1152 					   struct otx2_nic *pfvf,
1153 					   u64 prio, bool static_cfg)
1154 {
1155 	if (prio == parent->child_dwrr_prio && static_cfg) {
1156 		NL_SET_ERR_MSG_MOD(extack, "DWRR child group with same priority exists");
1157 		return -EEXIST;
1158 	}
1159 
1160 	if (static_cfg && test_bit(prio, parent->prio_bmap)) {
1161 		NL_SET_ERR_MSG_MOD(extack,
1162 				   "Static priority child with same priority exists");
1163 		return -EEXIST;
1164 	}
1165 
1166 	return 0;
1167 }
1168 
1169 static void otx2_reset_dwrr_prio(struct otx2_qos_node *parent, u64 prio)
1170 {
1171 	/* For PF, root node dwrr priority is static */
1172 	if (parent->level == NIX_TXSCH_LVL_TL1)
1173 		return;
1174 
1175 	if (parent->child_dwrr_prio != OTX2_QOS_DEFAULT_PRIO) {
1176 		parent->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO;
1177 		clear_bit(prio, parent->prio_bmap);
1178 	}
1179 }
1180 
1181 static bool is_qos_node_dwrr(struct otx2_qos_node *parent,
1182 			     struct otx2_nic *pfvf,
1183 			     u64 prio)
1184 {
1185 	struct otx2_qos_node *node;
1186 	bool ret = false;
1187 
1188 	if (parent->child_dwrr_prio == prio)
1189 		return true;
1190 
1191 	mutex_lock(&pfvf->qos.qos_lock);
1192 	list_for_each_entry(node, &parent->child_list, list) {
1193 		if (prio == node->prio) {
1194 			if (parent->child_dwrr_prio != OTX2_QOS_DEFAULT_PRIO &&
1195 			    parent->child_dwrr_prio != prio)
1196 				continue;
1197 
1198 			if (otx2_qos_validate_quantum(pfvf, node->quantum)) {
1199 				netdev_err(pfvf->netdev,
1200 					   "Unsupported quantum value for existing classid=0x%x quantum=%d prio=%d",
1201 					    node->classid, node->quantum,
1202 					    node->prio);
1203 				break;
1204 			}
1205 			/* mark old node as dwrr */
1206 			node->is_static = false;
1207 			parent->child_dwrr_cnt++;
1208 			parent->child_static_cnt--;
1209 			ret = true;
1210 			break;
1211 		}
1212 	}
1213 	mutex_unlock(&pfvf->qos.qos_lock);
1214 
1215 	return ret;
1216 }
1217 
1218 static int otx2_qos_leaf_alloc_queue(struct otx2_nic *pfvf, u16 classid,
1219 				     u32 parent_classid, u64 rate, u64 ceil,
1220 				     u64 prio, u32 quantum,
1221 				     struct netlink_ext_ack *extack)
1222 {
1223 	struct otx2_qos_cfg *old_cfg, *new_cfg;
1224 	struct otx2_qos_node *node, *parent;
1225 	int qid, ret, err;
1226 	bool static_cfg;
1227 
1228 	netdev_dbg(pfvf->netdev,
1229 		   "TC_HTB_LEAF_ALLOC_QUEUE: classid=0x%x parent_classid=0x%x rate=%lld ceil=%lld prio=%lld quantum=%d\n",
1230 		   classid, parent_classid, rate, ceil, prio, quantum);
1231 
1232 	if (prio > OTX2_QOS_MAX_PRIO) {
1233 		NL_SET_ERR_MSG_MOD(extack, "Valid priority range 0 to 7");
1234 		ret = -EOPNOTSUPP;
1235 		goto out;
1236 	}
1237 
1238 	if (!quantum || quantum > INT_MAX) {
1239 		NL_SET_ERR_MSG_MOD(extack, "Invalid quantum, range 1 - 2147483647 bytes");
1240 		ret = -EOPNOTSUPP;
1241 		goto out;
1242 	}
1243 
1244 	/* get parent node */
1245 	parent = otx2_sw_node_find(pfvf, parent_classid);
1246 	if (!parent) {
1247 		NL_SET_ERR_MSG_MOD(extack, "parent node not found");
1248 		ret = -ENOENT;
1249 		goto out;
1250 	}
1251 	if (parent->level == NIX_TXSCH_LVL_MDQ) {
1252 		NL_SET_ERR_MSG_MOD(extack, "HTB qos max levels reached");
1253 		ret = -EOPNOTSUPP;
1254 		goto out;
1255 	}
1256 
1257 	static_cfg = !is_qos_node_dwrr(parent, pfvf, prio);
1258 	ret = otx2_qos_validate_configuration(parent, extack, pfvf, prio,
1259 					      static_cfg);
1260 	if (ret)
1261 		goto out;
1262 
1263 	if (!static_cfg) {
1264 		ret = otx2_qos_validate_dwrr_cfg(parent, extack, pfvf, prio,
1265 						 quantum);
1266 		if (ret)
1267 			goto out;
1268 	}
1269 
1270 	if (static_cfg)
1271 		parent->child_static_cnt++;
1272 	else
1273 		parent->child_dwrr_cnt++;
1274 
1275 	set_bit(prio, parent->prio_bmap);
1276 
1277 	/* read current txschq configuration */
1278 	old_cfg = kzalloc(sizeof(*old_cfg), GFP_KERNEL);
1279 	if (!old_cfg) {
1280 		NL_SET_ERR_MSG_MOD(extack, "Memory allocation error");
1281 		ret = -ENOMEM;
1282 		goto reset_prio;
1283 	}
1284 	otx2_qos_read_txschq_cfg(pfvf, parent, old_cfg);
1285 
1286 	/* allocate a new sq */
1287 	qid = otx2_qos_get_qid(pfvf);
1288 	if (qid < 0) {
1289 		NL_SET_ERR_MSG_MOD(extack, "Reached max supported QOS SQ's");
1290 		ret = -ENOMEM;
1291 		goto free_old_cfg;
1292 	}
1293 
1294 	/* Actual SQ mapping will be updated after SMQ alloc */
1295 	pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
1296 
1297 	/* allocate and initialize a new child node */
1298 	node = otx2_qos_sw_create_leaf_node(pfvf, parent, classid, prio, rate,
1299 					    ceil, quantum, qid, static_cfg);
1300 	if (IS_ERR(node)) {
1301 		NL_SET_ERR_MSG_MOD(extack, "Unable to allocate leaf node");
1302 		ret = PTR_ERR(node);
1303 		goto free_old_cfg;
1304 	}
1305 
1306 	/* push new txschq config to hw */
1307 	new_cfg = kzalloc(sizeof(*new_cfg), GFP_KERNEL);
1308 	if (!new_cfg) {
1309 		NL_SET_ERR_MSG_MOD(extack, "Memory allocation error");
1310 		ret = -ENOMEM;
1311 		goto free_node;
1312 	}
1313 	ret = otx2_qos_update_tree(pfvf, node, new_cfg);
1314 	if (ret) {
1315 		NL_SET_ERR_MSG_MOD(extack, "HTB HW configuration error");
1316 		kfree(new_cfg);
1317 		otx2_qos_sw_node_delete(pfvf, node);
1318 		/* restore the old qos tree */
1319 		err = otx2_qos_txschq_update_config(pfvf, parent, old_cfg);
1320 		if (err) {
1321 			netdev_err(pfvf->netdev,
1322 				   "Failed to restore txcshq configuration");
1323 			goto free_old_cfg;
1324 		}
1325 
1326 		otx2_qos_update_smq(pfvf, parent, QOS_CFG_SQ);
1327 		goto free_old_cfg;
1328 	}
1329 
1330 	/* update tx_real_queues */
1331 	otx2_qos_update_tx_netdev_queues(pfvf);
1332 
1333 	/* free new txschq config */
1334 	kfree(new_cfg);
1335 
1336 	/* free old txschq config */
1337 	otx2_qos_free_cfg(pfvf, old_cfg);
1338 	kfree(old_cfg);
1339 
1340 	return pfvf->hw.tx_queues + qid;
1341 
1342 free_node:
1343 	otx2_qos_sw_node_delete(pfvf, node);
1344 free_old_cfg:
1345 	kfree(old_cfg);
1346 reset_prio:
1347 	if (static_cfg)
1348 		parent->child_static_cnt--;
1349 	else
1350 		parent->child_dwrr_cnt--;
1351 
1352 	clear_bit(prio, parent->prio_bmap);
1353 out:
1354 	return ret;
1355 }
1356 
1357 static int otx2_qos_leaf_to_inner(struct otx2_nic *pfvf, u16 classid,
1358 				  u16 child_classid, u64 rate, u64 ceil, u64 prio,
1359 				  u32 quantum, struct netlink_ext_ack *extack)
1360 {
1361 	struct otx2_qos_cfg *old_cfg, *new_cfg;
1362 	struct otx2_qos_node *node, *child;
1363 	bool static_cfg;
1364 	int ret, err;
1365 	u16 qid;
1366 
1367 	netdev_dbg(pfvf->netdev,
1368 		   "TC_HTB_LEAF_TO_INNER classid %04x, child %04x, rate %llu, ceil %llu\n",
1369 		   classid, child_classid, rate, ceil);
1370 
1371 	if (prio > OTX2_QOS_MAX_PRIO) {
1372 		NL_SET_ERR_MSG_MOD(extack, "Valid priority range 0 to 7");
1373 		ret = -EOPNOTSUPP;
1374 		goto out;
1375 	}
1376 
1377 	if (!quantum || quantum > INT_MAX) {
1378 		NL_SET_ERR_MSG_MOD(extack, "Invalid quantum, range 1 - 2147483647 bytes");
1379 		ret = -EOPNOTSUPP;
1380 		goto out;
1381 	}
1382 
1383 	/* find node related to classid */
1384 	node = otx2_sw_node_find(pfvf, classid);
1385 	if (!node) {
1386 		NL_SET_ERR_MSG_MOD(extack, "HTB node not found");
1387 		ret = -ENOENT;
1388 		goto out;
1389 	}
1390 	/* check max qos txschq level */
1391 	if (node->level == NIX_TXSCH_LVL_MDQ) {
1392 		NL_SET_ERR_MSG_MOD(extack, "HTB qos level not supported");
1393 		ret = -EOPNOTSUPP;
1394 		goto out;
1395 	}
1396 
1397 	static_cfg = !is_qos_node_dwrr(node, pfvf, prio);
1398 	if (!static_cfg) {
1399 		ret = otx2_qos_validate_dwrr_cfg(node, extack, pfvf, prio,
1400 						 quantum);
1401 		if (ret)
1402 			goto out;
1403 	}
1404 
1405 	if (static_cfg)
1406 		node->child_static_cnt++;
1407 	else
1408 		node->child_dwrr_cnt++;
1409 
1410 	set_bit(prio, node->prio_bmap);
1411 
1412 	/* store the qid to assign to leaf node */
1413 	qid = node->qid;
1414 
1415 	/* read current txschq configuration */
1416 	old_cfg = kzalloc(sizeof(*old_cfg), GFP_KERNEL);
1417 	if (!old_cfg) {
1418 		NL_SET_ERR_MSG_MOD(extack, "Memory allocation error");
1419 		ret = -ENOMEM;
1420 		goto reset_prio;
1421 	}
1422 	otx2_qos_read_txschq_cfg(pfvf, node, old_cfg);
1423 
1424 	/* delete the txschq nodes allocated for this node */
1425 	otx2_qos_free_sw_node_schq(pfvf, node);
1426 
1427 	/* mark this node as htb inner node */
1428 	WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER);
1429 
1430 	/* allocate and initialize a new child node */
1431 	child = otx2_qos_sw_create_leaf_node(pfvf, node, child_classid,
1432 					     prio, rate, ceil, quantum,
1433 					     qid, static_cfg);
1434 	if (IS_ERR(child)) {
1435 		NL_SET_ERR_MSG_MOD(extack, "Unable to allocate leaf node");
1436 		ret = PTR_ERR(child);
1437 		goto free_old_cfg;
1438 	}
1439 
1440 	/* push new txschq config to hw */
1441 	new_cfg = kzalloc(sizeof(*new_cfg), GFP_KERNEL);
1442 	if (!new_cfg) {
1443 		NL_SET_ERR_MSG_MOD(extack, "Memory allocation error");
1444 		ret = -ENOMEM;
1445 		goto free_node;
1446 	}
1447 	ret = otx2_qos_update_tree(pfvf, child, new_cfg);
1448 	if (ret) {
1449 		NL_SET_ERR_MSG_MOD(extack, "HTB HW configuration error");
1450 		kfree(new_cfg);
1451 		otx2_qos_sw_node_delete(pfvf, child);
1452 		/* restore the old qos tree */
1453 		WRITE_ONCE(node->qid, qid);
1454 		err = otx2_qos_alloc_txschq_node(pfvf, node);
1455 		if (err) {
1456 			netdev_err(pfvf->netdev,
1457 				   "Failed to restore old leaf node");
1458 			goto free_old_cfg;
1459 		}
1460 		err = otx2_qos_txschq_update_config(pfvf, node, old_cfg);
1461 		if (err) {
1462 			netdev_err(pfvf->netdev,
1463 				   "Failed to restore txcshq configuration");
1464 			goto free_old_cfg;
1465 		}
1466 		otx2_qos_update_smq(pfvf, node, QOS_CFG_SQ);
1467 		goto free_old_cfg;
1468 	}
1469 
1470 	/* free new txschq config */
1471 	kfree(new_cfg);
1472 
1473 	/* free old txschq config */
1474 	otx2_qos_free_cfg(pfvf, old_cfg);
1475 	kfree(old_cfg);
1476 
1477 	return 0;
1478 
1479 free_node:
1480 	otx2_qos_sw_node_delete(pfvf, child);
1481 free_old_cfg:
1482 	kfree(old_cfg);
1483 reset_prio:
1484 	if (static_cfg)
1485 		node->child_static_cnt--;
1486 	else
1487 		node->child_dwrr_cnt--;
1488 	clear_bit(prio, node->prio_bmap);
1489 out:
1490 	return ret;
1491 }
1492 
1493 static int otx2_qos_cur_leaf_nodes(struct otx2_nic *pfvf)
1494 {
1495 	int last = find_last_bit(pfvf->qos.qos_sq_bmap, pfvf->hw.tc_tx_queues);
1496 
1497 	return last ==  pfvf->hw.tc_tx_queues ? 0 : last + 1;
1498 }
1499 
1500 static void otx2_reset_qdisc(struct net_device *dev, u16 qid)
1501 {
1502 	struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, qid);
1503 	struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
1504 
1505 	if (!qdisc)
1506 		return;
1507 
1508 	spin_lock_bh(qdisc_lock(qdisc));
1509 	qdisc_reset(qdisc);
1510 	spin_unlock_bh(qdisc_lock(qdisc));
1511 }
1512 
1513 static void otx2_cfg_smq(struct otx2_nic *pfvf, struct otx2_qos_node *node,
1514 			 int qid)
1515 {
1516 	struct otx2_qos_node *tmp;
1517 
1518 	list_for_each_entry(tmp, &node->child_schq_list, list)
1519 		if (tmp->level == NIX_TXSCH_LVL_MDQ) {
1520 			otx2_qos_txschq_config(pfvf, tmp);
1521 			pfvf->qos.qid_to_sqmap[qid] = tmp->schq;
1522 		}
1523 }
1524 
1525 static int otx2_qos_leaf_del(struct otx2_nic *pfvf, u16 *classid,
1526 			     struct netlink_ext_ack *extack)
1527 {
1528 	struct otx2_qos_node *node, *parent;
1529 	int dwrr_del_node = false;
1530 	u16 qid, moved_qid;
1531 	u64 prio;
1532 
1533 	netdev_dbg(pfvf->netdev, "TC_HTB_LEAF_DEL classid %04x\n", *classid);
1534 
1535 	/* find node related to classid */
1536 	node = otx2_sw_node_find(pfvf, *classid);
1537 	if (!node) {
1538 		NL_SET_ERR_MSG_MOD(extack, "HTB node not found");
1539 		return -ENOENT;
1540 	}
1541 	parent = node->parent;
1542 	prio   = node->prio;
1543 	qid    = node->qid;
1544 
1545 	if (!node->is_static)
1546 		dwrr_del_node = true;
1547 
1548 	otx2_qos_disable_sq(pfvf, node->qid);
1549 
1550 	otx2_qos_destroy_node(pfvf, node);
1551 	pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
1552 
1553 	if (dwrr_del_node) {
1554 		parent->child_dwrr_cnt--;
1555 	} else {
1556 		parent->child_static_cnt--;
1557 		clear_bit(prio, parent->prio_bmap);
1558 	}
1559 
1560 	/* Reset DWRR priority if all dwrr nodes are deleted */
1561 	if (!parent->child_dwrr_cnt)
1562 		otx2_reset_dwrr_prio(parent, prio);
1563 
1564 	if (!parent->child_static_cnt)
1565 		parent->max_static_prio = 0;
1566 
1567 	moved_qid = otx2_qos_cur_leaf_nodes(pfvf);
1568 
1569 	/* last node just deleted */
1570 	if (moved_qid == 0 || moved_qid == qid)
1571 		return 0;
1572 
1573 	moved_qid--;
1574 
1575 	node = otx2_sw_node_find_by_qid(pfvf, moved_qid);
1576 	if (!node)
1577 		return 0;
1578 
1579 	/* stop traffic to the old queue and disable
1580 	 * SQ associated with it
1581 	 */
1582 	node->qid =  OTX2_QOS_QID_INNER;
1583 	__clear_bit(moved_qid, pfvf->qos.qos_sq_bmap);
1584 	otx2_qos_disable_sq(pfvf, moved_qid);
1585 
1586 	otx2_reset_qdisc(pfvf->netdev, pfvf->hw.tx_queues + moved_qid);
1587 
1588 	/* enable SQ associated with qid and
1589 	 * update the node
1590 	 */
1591 	otx2_cfg_smq(pfvf, node, qid);
1592 
1593 	otx2_qos_enable_sq(pfvf, qid);
1594 	__set_bit(qid, pfvf->qos.qos_sq_bmap);
1595 	node->qid = qid;
1596 
1597 	*classid = node->classid;
1598 	return 0;
1599 }
1600 
1601 static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force,
1602 				  struct netlink_ext_ack *extack)
1603 {
1604 	struct otx2_qos_node *node, *parent;
1605 	struct otx2_qos_cfg *new_cfg;
1606 	int dwrr_del_node = false;
1607 	u64 prio;
1608 	int err;
1609 	u16 qid;
1610 
1611 	netdev_dbg(pfvf->netdev,
1612 		   "TC_HTB_LEAF_DEL_LAST classid %04x\n", classid);
1613 
1614 	/* find node related to classid */
1615 	node = otx2_sw_node_find(pfvf, classid);
1616 	if (!node) {
1617 		NL_SET_ERR_MSG_MOD(extack, "HTB node not found");
1618 		return -ENOENT;
1619 	}
1620 
1621 	/* save qid for use by parent */
1622 	qid = node->qid;
1623 	prio = node->prio;
1624 
1625 	parent = otx2_sw_node_find(pfvf, node->parent->classid);
1626 	if (!parent) {
1627 		NL_SET_ERR_MSG_MOD(extack, "parent node not found");
1628 		return -ENOENT;
1629 	}
1630 
1631 	if (!node->is_static)
1632 		dwrr_del_node = true;
1633 
1634 	/* destroy the leaf node */
1635 	otx2_qos_destroy_node(pfvf, node);
1636 	pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
1637 
1638 	if (dwrr_del_node) {
1639 		parent->child_dwrr_cnt--;
1640 	} else {
1641 		parent->child_static_cnt--;
1642 		clear_bit(prio, parent->prio_bmap);
1643 	}
1644 
1645 	/* Reset DWRR priority if all dwrr nodes are deleted */
1646 	if (!parent->child_dwrr_cnt)
1647 		otx2_reset_dwrr_prio(parent, prio);
1648 
1649 	if (!parent->child_static_cnt)
1650 		parent->max_static_prio = 0;
1651 
1652 	/* create downstream txschq entries to parent */
1653 	err = otx2_qos_alloc_txschq_node(pfvf, parent);
1654 	if (err) {
1655 		NL_SET_ERR_MSG_MOD(extack, "HTB failed to create txsch configuration");
1656 		return err;
1657 	}
1658 	WRITE_ONCE(parent->qid, qid);
1659 	__set_bit(qid, pfvf->qos.qos_sq_bmap);
1660 
1661 	/* push new txschq config to hw */
1662 	new_cfg = kzalloc(sizeof(*new_cfg), GFP_KERNEL);
1663 	if (!new_cfg) {
1664 		NL_SET_ERR_MSG_MOD(extack, "Memory allocation error");
1665 		return -ENOMEM;
1666 	}
1667 	/* fill txschq cfg and push txschq cfg to hw */
1668 	otx2_qos_fill_cfg_schq(parent, new_cfg);
1669 	err = otx2_qos_push_txschq_cfg(pfvf, parent, new_cfg);
1670 	if (err) {
1671 		NL_SET_ERR_MSG_MOD(extack, "HTB HW configuration error");
1672 		kfree(new_cfg);
1673 		return err;
1674 	}
1675 	kfree(new_cfg);
1676 
1677 	/* update tx_real_queues */
1678 	otx2_qos_update_tx_netdev_queues(pfvf);
1679 
1680 	return 0;
1681 }
1682 
1683 void otx2_clean_qos_queues(struct otx2_nic *pfvf)
1684 {
1685 	struct otx2_qos_node *root;
1686 
1687 	root = otx2_sw_node_find(pfvf, OTX2_QOS_ROOT_CLASSID);
1688 	if (!root)
1689 		return;
1690 
1691 	otx2_qos_update_smq(pfvf, root, QOS_SMQ_FLUSH);
1692 }
1693 
1694 void otx2_qos_config_txschq(struct otx2_nic *pfvf)
1695 {
1696 	struct otx2_qos_node *root;
1697 	int err;
1698 
1699 	root = otx2_sw_node_find(pfvf, OTX2_QOS_ROOT_CLASSID);
1700 	if (!root)
1701 		return;
1702 
1703 	if (root->level != NIX_TXSCH_LVL_TL1) {
1704 		err = otx2_qos_txschq_config(pfvf, root);
1705 		if (err) {
1706 			netdev_err(pfvf->netdev, "Error update txschq configuration\n");
1707 			goto root_destroy;
1708 		}
1709 	}
1710 
1711 	err = otx2_qos_txschq_push_cfg_tl(pfvf, root, NULL);
1712 	if (err) {
1713 		netdev_err(pfvf->netdev, "Error update txschq configuration\n");
1714 		goto root_destroy;
1715 	}
1716 
1717 	otx2_qos_update_smq(pfvf, root, QOS_CFG_SQ);
1718 	return;
1719 
1720 root_destroy:
1721 	netdev_err(pfvf->netdev, "Failed to update Scheduler/Shaping config in Hardware\n");
1722 	/* Free resources allocated */
1723 	otx2_qos_root_destroy(pfvf);
1724 }
1725 
1726 int otx2_setup_tc_htb(struct net_device *ndev, struct tc_htb_qopt_offload *htb)
1727 {
1728 	struct otx2_nic *pfvf = netdev_priv(ndev);
1729 	int res;
1730 
1731 	switch (htb->command) {
1732 	case TC_HTB_CREATE:
1733 		return otx2_qos_root_add(pfvf, htb->parent_classid,
1734 					 htb->classid, htb->extack);
1735 	case TC_HTB_DESTROY:
1736 		return otx2_qos_root_destroy(pfvf);
1737 	case TC_HTB_LEAF_ALLOC_QUEUE:
1738 		res = otx2_qos_leaf_alloc_queue(pfvf, htb->classid,
1739 						htb->parent_classid,
1740 						htb->rate, htb->ceil,
1741 						htb->prio, htb->quantum,
1742 						htb->extack);
1743 		if (res < 0)
1744 			return res;
1745 		htb->qid = res;
1746 		return 0;
1747 	case TC_HTB_LEAF_TO_INNER:
1748 		return otx2_qos_leaf_to_inner(pfvf, htb->parent_classid,
1749 					      htb->classid, htb->rate,
1750 					      htb->ceil, htb->prio,
1751 					      htb->quantum, htb->extack);
1752 	case TC_HTB_LEAF_DEL:
1753 		return otx2_qos_leaf_del(pfvf, &htb->classid, htb->extack);
1754 	case TC_HTB_LEAF_DEL_LAST:
1755 	case TC_HTB_LEAF_DEL_LAST_FORCE:
1756 		return otx2_qos_leaf_del_last(pfvf, htb->classid,
1757 				htb->command == TC_HTB_LEAF_DEL_LAST_FORCE,
1758 					      htb->extack);
1759 	case TC_HTB_LEAF_QUERY_QUEUE:
1760 		res = otx2_get_txq_by_classid(pfvf, htb->classid);
1761 		htb->qid = res;
1762 		return 0;
1763 	case TC_HTB_NODE_MODIFY:
1764 		fallthrough;
1765 	default:
1766 		return -EOPNOTSUPP;
1767 	}
1768 }
1769