xref: /linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c (revision 6fdcba32711044c35c0e1b094cbd8f3f0b4472c9)
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/errno.h>
6 #include <linux/netdevice.h>
7 #include <net/pkt_cls.h>
8 #include <net/red.h>
9 
10 #include "spectrum.h"
11 #include "reg.h"
12 
13 #define MLXSW_SP_PRIO_BAND_TO_TCLASS(band) (IEEE_8021QAZ_MAX_TCS - band - 1)
14 #define MLXSW_SP_PRIO_CHILD_TO_TCLASS(child) \
15 	MLXSW_SP_PRIO_BAND_TO_TCLASS((child - 1))
16 
17 enum mlxsw_sp_qdisc_type {
18 	MLXSW_SP_QDISC_NO_QDISC,
19 	MLXSW_SP_QDISC_RED,
20 	MLXSW_SP_QDISC_PRIO,
21 };
22 
23 struct mlxsw_sp_qdisc_ops {
24 	enum mlxsw_sp_qdisc_type type;
25 	int (*check_params)(struct mlxsw_sp_port *mlxsw_sp_port,
26 			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
27 			    void *params);
28 	int (*replace)(struct mlxsw_sp_port *mlxsw_sp_port,
29 		       struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
30 	int (*destroy)(struct mlxsw_sp_port *mlxsw_sp_port,
31 		       struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
32 	int (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
33 			 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
34 			 struct tc_qopt_offload_stats *stats_ptr);
35 	int (*get_xstats)(struct mlxsw_sp_port *mlxsw_sp_port,
36 			  struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
37 			  void *xstats_ptr);
38 	void (*clean_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
39 			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
40 	/* unoffload - to be used for a qdisc that stops being offloaded without
41 	 * being destroyed.
42 	 */
43 	void (*unoffload)(struct mlxsw_sp_port *mlxsw_sp_port,
44 			  struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
45 };
46 
47 struct mlxsw_sp_qdisc {
48 	u32 handle;
49 	u8 tclass_num;
50 	u8 prio_bitmap;
51 	union {
52 		struct red_stats red;
53 	} xstats_base;
54 	struct mlxsw_sp_qdisc_stats {
55 		u64 tx_bytes;
56 		u64 tx_packets;
57 		u64 drops;
58 		u64 overlimits;
59 		u64 backlog;
60 	} stats_base;
61 
62 	struct mlxsw_sp_qdisc_ops *ops;
63 };
64 
65 static bool
66 mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle,
67 		       enum mlxsw_sp_qdisc_type type)
68 {
69 	return mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
70 	       mlxsw_sp_qdisc->ops->type == type &&
71 	       mlxsw_sp_qdisc->handle == handle;
72 }
73 
74 static struct mlxsw_sp_qdisc *
75 mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent,
76 		    bool root_only)
77 {
78 	int tclass, child_index;
79 
80 	if (parent == TC_H_ROOT)
81 		return mlxsw_sp_port->root_qdisc;
82 
83 	if (root_only || !mlxsw_sp_port->root_qdisc ||
84 	    !mlxsw_sp_port->root_qdisc->ops ||
85 	    TC_H_MAJ(parent) != mlxsw_sp_port->root_qdisc->handle ||
86 	    TC_H_MIN(parent) > IEEE_8021QAZ_MAX_TCS)
87 		return NULL;
88 
89 	child_index = TC_H_MIN(parent);
90 	tclass = MLXSW_SP_PRIO_CHILD_TO_TCLASS(child_index);
91 	return &mlxsw_sp_port->tclass_qdiscs[tclass];
92 }
93 
94 static struct mlxsw_sp_qdisc *
95 mlxsw_sp_qdisc_find_by_handle(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle)
96 {
97 	int i;
98 
99 	if (mlxsw_sp_port->root_qdisc->handle == handle)
100 		return mlxsw_sp_port->root_qdisc;
101 
102 	if (mlxsw_sp_port->root_qdisc->handle == TC_H_UNSPEC)
103 		return NULL;
104 
105 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
106 		if (mlxsw_sp_port->tclass_qdiscs[i].handle == handle)
107 			return &mlxsw_sp_port->tclass_qdiscs[i];
108 
109 	return NULL;
110 }
111 
112 static int
113 mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
114 		       struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
115 {
116 	int err = 0;
117 
118 	if (!mlxsw_sp_qdisc)
119 		return 0;
120 
121 	if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->destroy)
122 		err = mlxsw_sp_qdisc->ops->destroy(mlxsw_sp_port,
123 						   mlxsw_sp_qdisc);
124 
125 	mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
126 	mlxsw_sp_qdisc->ops = NULL;
127 	return err;
128 }
129 
130 static int
131 mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
132 		       struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
133 		       struct mlxsw_sp_qdisc_ops *ops, void *params)
134 {
135 	int err;
136 
137 	if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->type != ops->type)
138 		/* In case this location contained a different qdisc of the
139 		 * same type we can override the old qdisc configuration.
140 		 * Otherwise, we need to remove the old qdisc before setting the
141 		 * new one.
142 		 */
143 		mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
144 	err = ops->check_params(mlxsw_sp_port, mlxsw_sp_qdisc, params);
145 	if (err)
146 		goto err_bad_param;
147 
148 	err = ops->replace(mlxsw_sp_port, mlxsw_sp_qdisc, params);
149 	if (err)
150 		goto err_config;
151 
152 	if (mlxsw_sp_qdisc->handle != handle) {
153 		mlxsw_sp_qdisc->ops = ops;
154 		if (ops->clean_stats)
155 			ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc);
156 	}
157 
158 	mlxsw_sp_qdisc->handle = handle;
159 	return 0;
160 
161 err_bad_param:
162 err_config:
163 	if (mlxsw_sp_qdisc->handle == handle && ops->unoffload)
164 		ops->unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, params);
165 
166 	mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
167 	return err;
168 }
169 
170 static int
171 mlxsw_sp_qdisc_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
172 			 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
173 			 struct tc_qopt_offload_stats *stats_ptr)
174 {
175 	if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
176 	    mlxsw_sp_qdisc->ops->get_stats)
177 		return mlxsw_sp_qdisc->ops->get_stats(mlxsw_sp_port,
178 						      mlxsw_sp_qdisc,
179 						      stats_ptr);
180 
181 	return -EOPNOTSUPP;
182 }
183 
184 static int
185 mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
186 			  struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
187 			  void *xstats_ptr)
188 {
189 	if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
190 	    mlxsw_sp_qdisc->ops->get_xstats)
191 		return mlxsw_sp_qdisc->ops->get_xstats(mlxsw_sp_port,
192 						      mlxsw_sp_qdisc,
193 						      xstats_ptr);
194 
195 	return -EOPNOTSUPP;
196 }
197 
198 static void
199 mlxsw_sp_qdisc_bstats_per_priority_get(struct mlxsw_sp_port_xstats *xstats,
200 				       u8 prio_bitmap, u64 *tx_packets,
201 				       u64 *tx_bytes)
202 {
203 	int i;
204 
205 	*tx_packets = 0;
206 	*tx_bytes = 0;
207 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
208 		if (prio_bitmap & BIT(i)) {
209 			*tx_packets += xstats->tx_packets[i];
210 			*tx_bytes += xstats->tx_bytes[i];
211 		}
212 	}
213 }
214 
215 static int
216 mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
217 				  int tclass_num, u32 min, u32 max,
218 				  u32 probability, bool is_ecn)
219 {
220 	char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
221 	char cwtp_cmd[MLXSW_REG_CWTP_LEN];
222 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
223 	int err;
224 
225 	mlxsw_reg_cwtp_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num);
226 	mlxsw_reg_cwtp_profile_pack(cwtp_cmd, MLXSW_REG_CWTP_DEFAULT_PROFILE,
227 				    roundup(min, MLXSW_REG_CWTP_MIN_VALUE),
228 				    roundup(max, MLXSW_REG_CWTP_MIN_VALUE),
229 				    probability);
230 
231 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtp), cwtp_cmd);
232 	if (err)
233 		return err;
234 
235 	mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
236 			     MLXSW_REG_CWTP_DEFAULT_PROFILE, true, is_ecn);
237 
238 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
239 }
240 
241 static int
242 mlxsw_sp_tclass_congestion_disable(struct mlxsw_sp_port *mlxsw_sp_port,
243 				   int tclass_num)
244 {
245 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
246 	char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
247 
248 	mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
249 			     MLXSW_REG_CWTPM_RESET_PROFILE, false, false);
250 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
251 }
252 
253 static void
254 mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
255 					struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
256 {
257 	u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
258 	struct mlxsw_sp_qdisc_stats *stats_base;
259 	struct mlxsw_sp_port_xstats *xstats;
260 	struct red_stats *red_base;
261 
262 	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
263 	stats_base = &mlxsw_sp_qdisc->stats_base;
264 	red_base = &mlxsw_sp_qdisc->xstats_base.red;
265 
266 	mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
267 					       mlxsw_sp_qdisc->prio_bitmap,
268 					       &stats_base->tx_packets,
269 					       &stats_base->tx_bytes);
270 	red_base->prob_mark = xstats->ecn;
271 	red_base->prob_drop = xstats->wred_drop[tclass_num];
272 	red_base->pdrop = xstats->tail_drop[tclass_num];
273 
274 	stats_base->overlimits = red_base->prob_drop + red_base->prob_mark;
275 	stats_base->drops = red_base->prob_drop + red_base->pdrop;
276 
277 	stats_base->backlog = 0;
278 }
279 
280 static int
281 mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
282 			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
283 {
284 	struct mlxsw_sp_qdisc *root_qdisc = mlxsw_sp_port->root_qdisc;
285 
286 	if (root_qdisc != mlxsw_sp_qdisc)
287 		root_qdisc->stats_base.backlog -=
288 					mlxsw_sp_qdisc->stats_base.backlog;
289 
290 	return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port,
291 						  mlxsw_sp_qdisc->tclass_num);
292 }
293 
294 static int
295 mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
296 				struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
297 				void *params)
298 {
299 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
300 	struct tc_red_qopt_offload_params *p = params;
301 
302 	if (p->min > p->max) {
303 		dev_err(mlxsw_sp->bus_info->dev,
304 			"spectrum: RED: min %u is bigger then max %u\n", p->min,
305 			p->max);
306 		return -EINVAL;
307 	}
308 	if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core,
309 					GUARANTEED_SHARED_BUFFER)) {
310 		dev_err(mlxsw_sp->bus_info->dev,
311 			"spectrum: RED: max value %u is too big\n", p->max);
312 		return -EINVAL;
313 	}
314 	if (p->min == 0 || p->max == 0) {
315 		dev_err(mlxsw_sp->bus_info->dev,
316 			"spectrum: RED: 0 value is illegal for min and max\n");
317 		return -EINVAL;
318 	}
319 	return 0;
320 }
321 
322 static int
323 mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port,
324 			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
325 			   void *params)
326 {
327 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
328 	struct tc_red_qopt_offload_params *p = params;
329 	u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
330 	u32 min, max;
331 	u64 prob;
332 
333 	/* calculate probability in percentage */
334 	prob = p->probability;
335 	prob *= 100;
336 	prob = DIV_ROUND_UP(prob, 1 << 16);
337 	prob = DIV_ROUND_UP(prob, 1 << 16);
338 	min = mlxsw_sp_bytes_cells(mlxsw_sp, p->min);
339 	max = mlxsw_sp_bytes_cells(mlxsw_sp, p->max);
340 	return mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num, min,
341 						 max, prob, p->is_ecn);
342 }
343 
344 static void
345 mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
346 			     struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
347 			     void *params)
348 {
349 	struct tc_red_qopt_offload_params *p = params;
350 	u64 backlog;
351 
352 	backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
353 				       mlxsw_sp_qdisc->stats_base.backlog);
354 	p->qstats->backlog -= backlog;
355 	mlxsw_sp_qdisc->stats_base.backlog = 0;
356 }
357 
358 static int
359 mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
360 			      struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
361 			      void *xstats_ptr)
362 {
363 	struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base.red;
364 	u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
365 	struct mlxsw_sp_port_xstats *xstats;
366 	struct red_stats *res = xstats_ptr;
367 	int early_drops, marks, pdrops;
368 
369 	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
370 
371 	early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
372 	marks = xstats->ecn - xstats_base->prob_mark;
373 	pdrops = xstats->tail_drop[tclass_num] - xstats_base->pdrop;
374 
375 	res->pdrop += pdrops;
376 	res->prob_drop += early_drops;
377 	res->prob_mark += marks;
378 
379 	xstats_base->pdrop += pdrops;
380 	xstats_base->prob_drop += early_drops;
381 	xstats_base->prob_mark += marks;
382 	return 0;
383 }
384 
385 static int
386 mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
387 			     struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
388 			     struct tc_qopt_offload_stats *stats_ptr)
389 {
390 	u64 tx_bytes, tx_packets, overlimits, drops, backlog;
391 	u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
392 	struct mlxsw_sp_qdisc_stats *stats_base;
393 	struct mlxsw_sp_port_xstats *xstats;
394 
395 	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
396 	stats_base = &mlxsw_sp_qdisc->stats_base;
397 
398 	mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
399 					       mlxsw_sp_qdisc->prio_bitmap,
400 					       &tx_packets, &tx_bytes);
401 	tx_bytes = tx_bytes - stats_base->tx_bytes;
402 	tx_packets = tx_packets - stats_base->tx_packets;
403 
404 	overlimits = xstats->wred_drop[tclass_num] + xstats->ecn -
405 		     stats_base->overlimits;
406 	drops = xstats->wred_drop[tclass_num] + xstats->tail_drop[tclass_num] -
407 		stats_base->drops;
408 	backlog = xstats->backlog[tclass_num];
409 
410 	_bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
411 	stats_ptr->qstats->overlimits += overlimits;
412 	stats_ptr->qstats->drops += drops;
413 	stats_ptr->qstats->backlog +=
414 				mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
415 						     backlog) -
416 				mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
417 						     stats_base->backlog);
418 
419 	stats_base->backlog = backlog;
420 	stats_base->drops +=  drops;
421 	stats_base->overlimits += overlimits;
422 	stats_base->tx_bytes += tx_bytes;
423 	stats_base->tx_packets += tx_packets;
424 	return 0;
425 }
426 
427 #define MLXSW_SP_PORT_DEFAULT_TCLASS 0
428 
429 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = {
430 	.type = MLXSW_SP_QDISC_RED,
431 	.check_params = mlxsw_sp_qdisc_red_check_params,
432 	.replace = mlxsw_sp_qdisc_red_replace,
433 	.unoffload = mlxsw_sp_qdisc_red_unoffload,
434 	.destroy = mlxsw_sp_qdisc_red_destroy,
435 	.get_stats = mlxsw_sp_qdisc_get_red_stats,
436 	.get_xstats = mlxsw_sp_qdisc_get_red_xstats,
437 	.clean_stats = mlxsw_sp_setup_tc_qdisc_red_clean_stats,
438 };
439 
440 int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
441 			  struct tc_red_qopt_offload *p)
442 {
443 	struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
444 
445 	mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
446 	if (!mlxsw_sp_qdisc)
447 		return -EOPNOTSUPP;
448 
449 	if (p->command == TC_RED_REPLACE)
450 		return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
451 					      mlxsw_sp_qdisc,
452 					      &mlxsw_sp_qdisc_ops_red,
453 					      &p->set);
454 
455 	if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
456 				    MLXSW_SP_QDISC_RED))
457 		return -EOPNOTSUPP;
458 
459 	switch (p->command) {
460 	case TC_RED_DESTROY:
461 		return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
462 	case TC_RED_XSTATS:
463 		return mlxsw_sp_qdisc_get_xstats(mlxsw_sp_port, mlxsw_sp_qdisc,
464 						 p->xstats);
465 	case TC_RED_STATS:
466 		return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
467 						&p->stats);
468 	default:
469 		return -EOPNOTSUPP;
470 	}
471 }
472 
473 static int
474 mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
475 			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
476 {
477 	int i;
478 
479 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
480 		mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i,
481 					  MLXSW_SP_PORT_DEFAULT_TCLASS);
482 		mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
483 				       &mlxsw_sp_port->tclass_qdiscs[i]);
484 		mlxsw_sp_port->tclass_qdiscs[i].prio_bitmap = 0;
485 	}
486 
487 	return 0;
488 }
489 
490 static int
491 mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
492 				 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
493 				 void *params)
494 {
495 	struct tc_prio_qopt_offload_params *p = params;
496 
497 	if (p->bands > IEEE_8021QAZ_MAX_TCS)
498 		return -EOPNOTSUPP;
499 
500 	return 0;
501 }
502 
503 static int
504 mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port,
505 			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
506 			    void *params)
507 {
508 	struct tc_prio_qopt_offload_params *p = params;
509 	struct mlxsw_sp_qdisc *child_qdisc;
510 	int tclass, i, band, backlog;
511 	u8 old_priomap;
512 	int err;
513 
514 	for (band = 0; band < p->bands; band++) {
515 		tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
516 		child_qdisc = &mlxsw_sp_port->tclass_qdiscs[tclass];
517 		old_priomap = child_qdisc->prio_bitmap;
518 		child_qdisc->prio_bitmap = 0;
519 		for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
520 			if (p->priomap[i] == band) {
521 				child_qdisc->prio_bitmap |= BIT(i);
522 				if (BIT(i) & old_priomap)
523 					continue;
524 				err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port,
525 								i, tclass);
526 				if (err)
527 					return err;
528 			}
529 		}
530 		if (old_priomap != child_qdisc->prio_bitmap &&
531 		    child_qdisc->ops && child_qdisc->ops->clean_stats) {
532 			backlog = child_qdisc->stats_base.backlog;
533 			child_qdisc->ops->clean_stats(mlxsw_sp_port,
534 						      child_qdisc);
535 			child_qdisc->stats_base.backlog = backlog;
536 		}
537 	}
538 	for (; band < IEEE_8021QAZ_MAX_TCS; band++) {
539 		tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
540 		child_qdisc = &mlxsw_sp_port->tclass_qdiscs[tclass];
541 		child_qdisc->prio_bitmap = 0;
542 		mlxsw_sp_qdisc_destroy(mlxsw_sp_port, child_qdisc);
543 	}
544 	return 0;
545 }
546 
547 static void
548 mlxsw_sp_qdisc_prio_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
549 			      struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
550 			      void *params)
551 {
552 	struct tc_prio_qopt_offload_params *p = params;
553 	u64 backlog;
554 
555 	backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
556 				       mlxsw_sp_qdisc->stats_base.backlog);
557 	p->qstats->backlog -= backlog;
558 }
559 
560 static int
561 mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port,
562 			      struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
563 			      struct tc_qopt_offload_stats *stats_ptr)
564 {
565 	u64 tx_bytes, tx_packets, drops = 0, backlog = 0;
566 	struct mlxsw_sp_qdisc_stats *stats_base;
567 	struct mlxsw_sp_port_xstats *xstats;
568 	struct rtnl_link_stats64 *stats;
569 	int i;
570 
571 	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
572 	stats = &mlxsw_sp_port->periodic_hw_stats.stats;
573 	stats_base = &mlxsw_sp_qdisc->stats_base;
574 
575 	tx_bytes = stats->tx_bytes - stats_base->tx_bytes;
576 	tx_packets = stats->tx_packets - stats_base->tx_packets;
577 
578 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
579 		drops += xstats->tail_drop[i];
580 		drops += xstats->wred_drop[i];
581 		backlog += xstats->backlog[i];
582 	}
583 	drops = drops - stats_base->drops;
584 
585 	_bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
586 	stats_ptr->qstats->drops += drops;
587 	stats_ptr->qstats->backlog +=
588 				mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
589 						     backlog) -
590 				mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
591 						     stats_base->backlog);
592 	stats_base->backlog = backlog;
593 	stats_base->drops += drops;
594 	stats_base->tx_bytes += tx_bytes;
595 	stats_base->tx_packets += tx_packets;
596 	return 0;
597 }
598 
599 static void
600 mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
601 					 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
602 {
603 	struct mlxsw_sp_qdisc_stats *stats_base;
604 	struct mlxsw_sp_port_xstats *xstats;
605 	struct rtnl_link_stats64 *stats;
606 	int i;
607 
608 	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
609 	stats = &mlxsw_sp_port->periodic_hw_stats.stats;
610 	stats_base = &mlxsw_sp_qdisc->stats_base;
611 
612 	stats_base->tx_packets = stats->tx_packets;
613 	stats_base->tx_bytes = stats->tx_bytes;
614 
615 	stats_base->drops = 0;
616 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
617 		stats_base->drops += xstats->tail_drop[i];
618 		stats_base->drops += xstats->wred_drop[i];
619 	}
620 
621 	mlxsw_sp_qdisc->stats_base.backlog = 0;
622 }
623 
624 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = {
625 	.type = MLXSW_SP_QDISC_PRIO,
626 	.check_params = mlxsw_sp_qdisc_prio_check_params,
627 	.replace = mlxsw_sp_qdisc_prio_replace,
628 	.unoffload = mlxsw_sp_qdisc_prio_unoffload,
629 	.destroy = mlxsw_sp_qdisc_prio_destroy,
630 	.get_stats = mlxsw_sp_qdisc_get_prio_stats,
631 	.clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
632 };
633 
634 /* Grafting is not supported in mlxsw. It will result in un-offloading of the
635  * grafted qdisc as well as the qdisc in the qdisc new location.
636  * (However, if the graft is to the location where the qdisc is already at, it
637  * will be ignored completely and won't cause un-offloading).
638  */
639 static int
640 mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port,
641 			  struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
642 			  struct tc_prio_qopt_offload_graft_params *p)
643 {
644 	int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(p->band);
645 	struct mlxsw_sp_qdisc *old_qdisc;
646 
647 	/* Check if the grafted qdisc is already in its "new" location. If so -
648 	 * nothing needs to be done.
649 	 */
650 	if (p->band < IEEE_8021QAZ_MAX_TCS &&
651 	    mlxsw_sp_port->tclass_qdiscs[tclass_num].handle == p->child_handle)
652 		return 0;
653 
654 	/* See if the grafted qdisc is already offloaded on any tclass. If so,
655 	 * unoffload it.
656 	 */
657 	old_qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port,
658 						  p->child_handle);
659 	if (old_qdisc)
660 		mlxsw_sp_qdisc_destroy(mlxsw_sp_port, old_qdisc);
661 
662 	mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
663 			       &mlxsw_sp_port->tclass_qdiscs[tclass_num]);
664 	return -EOPNOTSUPP;
665 }
666 
667 int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
668 			   struct tc_prio_qopt_offload *p)
669 {
670 	struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
671 
672 	mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true);
673 	if (!mlxsw_sp_qdisc)
674 		return -EOPNOTSUPP;
675 
676 	if (p->command == TC_PRIO_REPLACE)
677 		return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
678 					      mlxsw_sp_qdisc,
679 					      &mlxsw_sp_qdisc_ops_prio,
680 					      &p->replace_params);
681 
682 	if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
683 				    MLXSW_SP_QDISC_PRIO))
684 		return -EOPNOTSUPP;
685 
686 	switch (p->command) {
687 	case TC_PRIO_DESTROY:
688 		return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
689 	case TC_PRIO_STATS:
690 		return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
691 						&p->stats);
692 	case TC_PRIO_GRAFT:
693 		return mlxsw_sp_qdisc_prio_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
694 						 &p->graft_params);
695 	default:
696 		return -EOPNOTSUPP;
697 	}
698 }
699 
700 int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
701 {
702 	struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
703 	int i;
704 
705 	mlxsw_sp_qdisc = kzalloc(sizeof(*mlxsw_sp_qdisc), GFP_KERNEL);
706 	if (!mlxsw_sp_qdisc)
707 		goto err_root_qdisc_init;
708 
709 	mlxsw_sp_port->root_qdisc = mlxsw_sp_qdisc;
710 	mlxsw_sp_port->root_qdisc->prio_bitmap = 0xff;
711 	mlxsw_sp_port->root_qdisc->tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
712 
713 	mlxsw_sp_qdisc = kcalloc(IEEE_8021QAZ_MAX_TCS,
714 				 sizeof(*mlxsw_sp_qdisc),
715 				 GFP_KERNEL);
716 	if (!mlxsw_sp_qdisc)
717 		goto err_tclass_qdiscs_init;
718 
719 	mlxsw_sp_port->tclass_qdiscs = mlxsw_sp_qdisc;
720 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
721 		mlxsw_sp_port->tclass_qdiscs[i].tclass_num = i;
722 
723 	return 0;
724 
725 err_tclass_qdiscs_init:
726 	kfree(mlxsw_sp_port->root_qdisc);
727 err_root_qdisc_init:
728 	return -ENOMEM;
729 }
730 
731 void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port)
732 {
733 	kfree(mlxsw_sp_port->tclass_qdiscs);
734 	kfree(mlxsw_sp_port->root_qdisc);
735 }
736