xref: /linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c (revision 189f164e573e18d9f8876dbd3ad8fcbe11f93037)
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/errno.h>
6 #include <linux/netdevice.h>
7 #include <net/pkt_cls.h>
8 #include <net/red.h>
9 
10 #include "spectrum.h"
11 #include "spectrum_span.h"
12 #include "reg.h"
13 
14 #define MLXSW_SP_PRIO_BAND_TO_TCLASS(band) (IEEE_8021QAZ_MAX_TCS - band - 1)
15 #define MLXSW_SP_PRIO_CHILD_TO_TCLASS(child) \
16 	MLXSW_SP_PRIO_BAND_TO_TCLASS((child - 1))
17 
18 enum mlxsw_sp_qdisc_type {
19 	MLXSW_SP_QDISC_NO_QDISC,
20 	MLXSW_SP_QDISC_RED,
21 	MLXSW_SP_QDISC_PRIO,
22 	MLXSW_SP_QDISC_ETS,
23 	MLXSW_SP_QDISC_TBF,
24 	MLXSW_SP_QDISC_FIFO,
25 };
26 
27 struct mlxsw_sp_qdisc;
28 
29 struct mlxsw_sp_qdisc_ops {
30 	enum mlxsw_sp_qdisc_type type;
31 	int (*check_params)(struct mlxsw_sp_port *mlxsw_sp_port,
32 			    void *params);
33 	int (*replace)(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
34 		       struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
35 	int (*destroy)(struct mlxsw_sp_port *mlxsw_sp_port,
36 		       struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
37 	int (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
38 			 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
39 			 struct tc_qopt_offload_stats *stats_ptr);
40 	int (*get_xstats)(struct mlxsw_sp_port *mlxsw_sp_port,
41 			  struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
42 			  void *xstats_ptr);
43 	void (*clean_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
44 			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
45 	/* unoffload - to be used for a qdisc that stops being offloaded without
46 	 * being destroyed.
47 	 */
48 	void (*unoffload)(struct mlxsw_sp_port *mlxsw_sp_port,
49 			  struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
50 	struct mlxsw_sp_qdisc *(*find_class)(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
51 					     u32 parent);
52 	unsigned int num_classes;
53 
54 	u8 (*get_prio_bitmap)(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
55 			      struct mlxsw_sp_qdisc *child);
56 	int (*get_tclass_num)(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
57 			      struct mlxsw_sp_qdisc *child);
58 };
59 
60 struct mlxsw_sp_qdisc_ets_band {
61 	u8 prio_bitmap;
62 	int tclass_num;
63 };
64 
65 struct mlxsw_sp_qdisc_ets_data {
66 	struct mlxsw_sp_qdisc_ets_band bands[IEEE_8021QAZ_MAX_TCS];
67 };
68 
69 struct mlxsw_sp_qdisc {
70 	u32 handle;
71 	union {
72 		struct red_stats red;
73 	} xstats_base;
74 	struct mlxsw_sp_qdisc_stats {
75 		u64 tx_bytes;
76 		u64 tx_packets;
77 		u64 drops;
78 		u64 overlimits;
79 		u64 backlog;
80 	} stats_base;
81 
82 	union {
83 		struct mlxsw_sp_qdisc_ets_data *ets_data;
84 	};
85 
86 	struct mlxsw_sp_qdisc_ops *ops;
87 	struct mlxsw_sp_qdisc *parent;
88 	struct mlxsw_sp_qdisc *qdiscs;
89 	unsigned int num_classes;
90 };
91 
92 struct mlxsw_sp_qdisc_state {
93 	struct mlxsw_sp_qdisc root_qdisc;
94 
95 	/* When a PRIO or ETS are added, the invisible FIFOs in their bands are
96 	 * created first. When notifications for these FIFOs arrive, it is not
97 	 * known what qdisc their parent handle refers to. It could be a
98 	 * newly-created PRIO that will replace the currently-offloaded one, or
99 	 * it could be e.g. a RED that will be attached below it.
100 	 *
101 	 * As the notifications start to arrive, use them to note what the
102 	 * future parent handle is, and keep track of which child FIFOs were
103 	 * seen. Then when the parent is known, retroactively offload those
104 	 * FIFOs.
105 	 */
106 	u32 future_handle;
107 	bool future_fifos[IEEE_8021QAZ_MAX_TCS];
108 	struct mutex lock; /* Protects qdisc state. */
109 };
110 
111 static bool
mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,u32 handle)112 mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle)
113 {
114 	return mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->handle == handle;
115 }
116 
117 static struct mlxsw_sp_qdisc *
mlxsw_sp_qdisc_walk(struct mlxsw_sp_qdisc * qdisc,struct mlxsw_sp_qdisc * (* pre)(struct mlxsw_sp_qdisc *,void *),void * data)118 mlxsw_sp_qdisc_walk(struct mlxsw_sp_qdisc *qdisc,
119 		    struct mlxsw_sp_qdisc *(*pre)(struct mlxsw_sp_qdisc *,
120 						  void *),
121 		    void *data)
122 {
123 	struct mlxsw_sp_qdisc *tmp;
124 	unsigned int i;
125 
126 	if (pre) {
127 		tmp = pre(qdisc, data);
128 		if (tmp)
129 			return tmp;
130 	}
131 
132 	if (qdisc->ops) {
133 		for (i = 0; i < qdisc->num_classes; i++) {
134 			tmp = &qdisc->qdiscs[i];
135 			if (qdisc->ops) {
136 				tmp = mlxsw_sp_qdisc_walk(tmp, pre, data);
137 				if (tmp)
138 					return tmp;
139 			}
140 		}
141 	}
142 
143 	return NULL;
144 }
145 
146 static struct mlxsw_sp_qdisc *
mlxsw_sp_qdisc_walk_cb_find(struct mlxsw_sp_qdisc * qdisc,void * data)147 mlxsw_sp_qdisc_walk_cb_find(struct mlxsw_sp_qdisc *qdisc, void *data)
148 {
149 	u32 parent = *(u32 *)data;
150 
151 	if (qdisc->ops && TC_H_MAJ(qdisc->handle) == TC_H_MAJ(parent)) {
152 		if (qdisc->ops->find_class)
153 			return qdisc->ops->find_class(qdisc, parent);
154 	}
155 
156 	return NULL;
157 }
158 
159 static struct mlxsw_sp_qdisc *
mlxsw_sp_qdisc_find(struct mlxsw_sp_port * mlxsw_sp_port,u32 parent)160 mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent)
161 {
162 	struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
163 
164 	if (!qdisc_state)
165 		return NULL;
166 	if (parent == TC_H_ROOT)
167 		return &qdisc_state->root_qdisc;
168 	return mlxsw_sp_qdisc_walk(&qdisc_state->root_qdisc,
169 				   mlxsw_sp_qdisc_walk_cb_find, &parent);
170 }
171 
172 static struct mlxsw_sp_qdisc *
mlxsw_sp_qdisc_walk_cb_find_by_handle(struct mlxsw_sp_qdisc * qdisc,void * data)173 mlxsw_sp_qdisc_walk_cb_find_by_handle(struct mlxsw_sp_qdisc *qdisc, void *data)
174 {
175 	u32 handle = *(u32 *)data;
176 
177 	if (qdisc->ops && qdisc->handle == handle)
178 		return qdisc;
179 	return NULL;
180 }
181 
182 static struct mlxsw_sp_qdisc *
mlxsw_sp_qdisc_find_by_handle(struct mlxsw_sp_port * mlxsw_sp_port,u32 handle)183 mlxsw_sp_qdisc_find_by_handle(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle)
184 {
185 	struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
186 
187 	if (!qdisc_state)
188 		return NULL;
189 	return mlxsw_sp_qdisc_walk(&qdisc_state->root_qdisc,
190 				   mlxsw_sp_qdisc_walk_cb_find_by_handle,
191 				   &handle);
192 }
193 
194 static void
mlxsw_sp_qdisc_reduce_parent_backlog(struct mlxsw_sp_qdisc * mlxsw_sp_qdisc)195 mlxsw_sp_qdisc_reduce_parent_backlog(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
196 {
197 	struct mlxsw_sp_qdisc *tmp;
198 
199 	for (tmp = mlxsw_sp_qdisc->parent; tmp; tmp = tmp->parent)
200 		tmp->stats_base.backlog -= mlxsw_sp_qdisc->stats_base.backlog;
201 }
202 
mlxsw_sp_qdisc_get_prio_bitmap(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc)203 static u8 mlxsw_sp_qdisc_get_prio_bitmap(struct mlxsw_sp_port *mlxsw_sp_port,
204 					 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
205 {
206 	struct mlxsw_sp_qdisc *parent = mlxsw_sp_qdisc->parent;
207 
208 	if (!parent)
209 		return 0xff;
210 	if (!parent->ops->get_prio_bitmap)
211 		return mlxsw_sp_qdisc_get_prio_bitmap(mlxsw_sp_port, parent);
212 	return parent->ops->get_prio_bitmap(parent, mlxsw_sp_qdisc);
213 }
214 
215 #define MLXSW_SP_PORT_DEFAULT_TCLASS 0
216 
mlxsw_sp_qdisc_get_tclass_num(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc)217 static int mlxsw_sp_qdisc_get_tclass_num(struct mlxsw_sp_port *mlxsw_sp_port,
218 					 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
219 {
220 	struct mlxsw_sp_qdisc *parent = mlxsw_sp_qdisc->parent;
221 
222 	if (!parent)
223 		return MLXSW_SP_PORT_DEFAULT_TCLASS;
224 	if (!parent->ops->get_tclass_num)
225 		return mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port, parent);
226 	return parent->ops->get_tclass_num(parent, mlxsw_sp_qdisc);
227 }
228 
229 static int
mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc)230 mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
231 		       struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
232 {
233 	struct mlxsw_sp_qdisc *root_qdisc = &mlxsw_sp_port->qdisc->root_qdisc;
234 	int err_hdroom = 0;
235 	int err = 0;
236 	int i;
237 
238 	if (!mlxsw_sp_qdisc)
239 		return 0;
240 
241 	if (root_qdisc == mlxsw_sp_qdisc) {
242 		struct mlxsw_sp_hdroom hdroom = *mlxsw_sp_port->hdroom;
243 
244 		hdroom.mode = MLXSW_SP_HDROOM_MODE_DCB;
245 		mlxsw_sp_hdroom_prios_reset_buf_idx(&hdroom);
246 		mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
247 		mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
248 		err_hdroom = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
249 	}
250 
251 	if (!mlxsw_sp_qdisc->ops)
252 		return 0;
253 
254 	for (i = 0; i < mlxsw_sp_qdisc->num_classes; i++)
255 		mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
256 				       &mlxsw_sp_qdisc->qdiscs[i]);
257 	mlxsw_sp_qdisc_reduce_parent_backlog(mlxsw_sp_qdisc);
258 	if (mlxsw_sp_qdisc->ops->destroy)
259 		err = mlxsw_sp_qdisc->ops->destroy(mlxsw_sp_port,
260 						   mlxsw_sp_qdisc);
261 	if (mlxsw_sp_qdisc->ops->clean_stats)
262 		mlxsw_sp_qdisc->ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc);
263 
264 	mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
265 	mlxsw_sp_qdisc->ops = NULL;
266 	mlxsw_sp_qdisc->num_classes = 0;
267 	kfree(mlxsw_sp_qdisc->qdiscs);
268 	mlxsw_sp_qdisc->qdiscs = NULL;
269 	return err_hdroom ?: err;
270 }
271 
272 struct mlxsw_sp_qdisc_tree_validate {
273 	bool forbid_ets;
274 	bool forbid_root_tbf;
275 	bool forbid_tbf;
276 	bool forbid_red;
277 };
278 
279 static int
280 __mlxsw_sp_qdisc_tree_validate(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
281 			       struct mlxsw_sp_qdisc_tree_validate validate);
282 
283 static int
mlxsw_sp_qdisc_tree_validate_children(struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,struct mlxsw_sp_qdisc_tree_validate validate)284 mlxsw_sp_qdisc_tree_validate_children(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
285 				      struct mlxsw_sp_qdisc_tree_validate validate)
286 {
287 	unsigned int i;
288 	int err;
289 
290 	for (i = 0; i < mlxsw_sp_qdisc->num_classes; i++) {
291 		err = __mlxsw_sp_qdisc_tree_validate(&mlxsw_sp_qdisc->qdiscs[i],
292 						     validate);
293 		if (err)
294 			return err;
295 	}
296 
297 	return 0;
298 }
299 
300 static int
__mlxsw_sp_qdisc_tree_validate(struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,struct mlxsw_sp_qdisc_tree_validate validate)301 __mlxsw_sp_qdisc_tree_validate(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
302 			       struct mlxsw_sp_qdisc_tree_validate validate)
303 {
304 	if (!mlxsw_sp_qdisc->ops)
305 		return 0;
306 
307 	switch (mlxsw_sp_qdisc->ops->type) {
308 	case MLXSW_SP_QDISC_FIFO:
309 		break;
310 	case MLXSW_SP_QDISC_RED:
311 		if (validate.forbid_red)
312 			return -EINVAL;
313 		validate.forbid_red = true;
314 		validate.forbid_root_tbf = true;
315 		validate.forbid_ets = true;
316 		break;
317 	case MLXSW_SP_QDISC_TBF:
318 		if (validate.forbid_root_tbf) {
319 			if (validate.forbid_tbf)
320 				return -EINVAL;
321 			/* This is a TC TBF. */
322 			validate.forbid_tbf = true;
323 			validate.forbid_ets = true;
324 		} else {
325 			/* This is root TBF. */
326 			validate.forbid_root_tbf = true;
327 		}
328 		break;
329 	case MLXSW_SP_QDISC_PRIO:
330 	case MLXSW_SP_QDISC_ETS:
331 		if (validate.forbid_ets)
332 			return -EINVAL;
333 		validate.forbid_root_tbf = true;
334 		validate.forbid_ets = true;
335 		break;
336 	default:
337 		WARN_ON(1);
338 		return -EINVAL;
339 	}
340 
341 	return mlxsw_sp_qdisc_tree_validate_children(mlxsw_sp_qdisc, validate);
342 }
343 
mlxsw_sp_qdisc_tree_validate(struct mlxsw_sp_port * mlxsw_sp_port)344 static int mlxsw_sp_qdisc_tree_validate(struct mlxsw_sp_port *mlxsw_sp_port)
345 {
346 	struct mlxsw_sp_qdisc_tree_validate validate = {};
347 	struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
348 
349 	mlxsw_sp_qdisc = &mlxsw_sp_port->qdisc->root_qdisc;
350 	return __mlxsw_sp_qdisc_tree_validate(mlxsw_sp_qdisc, validate);
351 }
352 
mlxsw_sp_qdisc_create(struct mlxsw_sp_port * mlxsw_sp_port,u32 handle,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,struct mlxsw_sp_qdisc_ops * ops,void * params)353 static int mlxsw_sp_qdisc_create(struct mlxsw_sp_port *mlxsw_sp_port,
354 				 u32 handle,
355 				 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
356 				 struct mlxsw_sp_qdisc_ops *ops, void *params)
357 {
358 	struct mlxsw_sp_qdisc *root_qdisc = &mlxsw_sp_port->qdisc->root_qdisc;
359 	struct mlxsw_sp_hdroom orig_hdroom;
360 	unsigned int i;
361 	int err;
362 
363 	err = ops->check_params(mlxsw_sp_port, params);
364 	if (err)
365 		return err;
366 
367 	if (ops->num_classes) {
368 		mlxsw_sp_qdisc->qdiscs = kzalloc_objs(*mlxsw_sp_qdisc->qdiscs,
369 						      ops->num_classes);
370 		if (!mlxsw_sp_qdisc->qdiscs)
371 			return -ENOMEM;
372 
373 		for (i = 0; i < ops->num_classes; i++)
374 			mlxsw_sp_qdisc->qdiscs[i].parent = mlxsw_sp_qdisc;
375 	}
376 
377 	orig_hdroom = *mlxsw_sp_port->hdroom;
378 	if (root_qdisc == mlxsw_sp_qdisc) {
379 		struct mlxsw_sp_hdroom hdroom = orig_hdroom;
380 
381 		hdroom.mode = MLXSW_SP_HDROOM_MODE_TC;
382 		mlxsw_sp_hdroom_prios_reset_buf_idx(&hdroom);
383 		mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
384 		mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
385 
386 		err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
387 		if (err)
388 			goto err_hdroom_configure;
389 	}
390 
391 	mlxsw_sp_qdisc->num_classes = ops->num_classes;
392 	mlxsw_sp_qdisc->ops = ops;
393 	mlxsw_sp_qdisc->handle = handle;
394 	err = mlxsw_sp_qdisc_tree_validate(mlxsw_sp_port);
395 	if (err)
396 		goto err_replace;
397 
398 	err = ops->replace(mlxsw_sp_port, handle, mlxsw_sp_qdisc, params);
399 	if (err)
400 		goto err_replace;
401 
402 	return 0;
403 
404 err_replace:
405 	mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
406 	mlxsw_sp_qdisc->ops = NULL;
407 	mlxsw_sp_qdisc->num_classes = 0;
408 	mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
409 err_hdroom_configure:
410 	kfree(mlxsw_sp_qdisc->qdiscs);
411 	mlxsw_sp_qdisc->qdiscs = NULL;
412 	return err;
413 }
414 
415 static int
mlxsw_sp_qdisc_change(struct mlxsw_sp_port * mlxsw_sp_port,u32 handle,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * params)416 mlxsw_sp_qdisc_change(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
417 		      struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params)
418 {
419 	struct mlxsw_sp_qdisc_ops *ops = mlxsw_sp_qdisc->ops;
420 	int err;
421 
422 	err = ops->check_params(mlxsw_sp_port, params);
423 	if (err)
424 		goto unoffload;
425 
426 	err = ops->replace(mlxsw_sp_port, handle, mlxsw_sp_qdisc, params);
427 	if (err)
428 		goto unoffload;
429 
430 	/* Check if the Qdisc changed. That includes a situation where an
431 	 * invisible Qdisc replaces another one, or is being added for the
432 	 * first time.
433 	 */
434 	if (mlxsw_sp_qdisc->handle != handle) {
435 		if (ops->clean_stats)
436 			ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc);
437 	}
438 
439 	mlxsw_sp_qdisc->handle = handle;
440 	return 0;
441 
442 unoffload:
443 	if (ops->unoffload)
444 		ops->unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, params);
445 
446 	mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
447 	return err;
448 }
449 
450 static int
mlxsw_sp_qdisc_replace(struct mlxsw_sp_port * mlxsw_sp_port,u32 handle,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,struct mlxsw_sp_qdisc_ops * ops,void * params)451 mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
452 		       struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
453 		       struct mlxsw_sp_qdisc_ops *ops, void *params)
454 {
455 	if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->type != ops->type)
456 		/* In case this location contained a different qdisc of the
457 		 * same type we can override the old qdisc configuration.
458 		 * Otherwise, we need to remove the old qdisc before setting the
459 		 * new one.
460 		 */
461 		mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
462 
463 	if (!mlxsw_sp_qdisc->ops)
464 		return mlxsw_sp_qdisc_create(mlxsw_sp_port, handle,
465 					     mlxsw_sp_qdisc, ops, params);
466 	else
467 		return mlxsw_sp_qdisc_change(mlxsw_sp_port, handle,
468 					     mlxsw_sp_qdisc, params);
469 }
470 
471 static int
mlxsw_sp_qdisc_get_stats(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,struct tc_qopt_offload_stats * stats_ptr)472 mlxsw_sp_qdisc_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
473 			 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
474 			 struct tc_qopt_offload_stats *stats_ptr)
475 {
476 	if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
477 	    mlxsw_sp_qdisc->ops->get_stats)
478 		return mlxsw_sp_qdisc->ops->get_stats(mlxsw_sp_port,
479 						      mlxsw_sp_qdisc,
480 						      stats_ptr);
481 
482 	return -EOPNOTSUPP;
483 }
484 
485 static int
mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * xstats_ptr)486 mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
487 			  struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
488 			  void *xstats_ptr)
489 {
490 	if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
491 	    mlxsw_sp_qdisc->ops->get_xstats)
492 		return mlxsw_sp_qdisc->ops->get_xstats(mlxsw_sp_port,
493 						      mlxsw_sp_qdisc,
494 						      xstats_ptr);
495 
496 	return -EOPNOTSUPP;
497 }
498 
499 static u64
mlxsw_sp_xstats_backlog(struct mlxsw_sp_port_xstats * xstats,int tclass_num)500 mlxsw_sp_xstats_backlog(struct mlxsw_sp_port_xstats *xstats, int tclass_num)
501 {
502 	return xstats->backlog[tclass_num] +
503 	       xstats->backlog[tclass_num + 8];
504 }
505 
506 static u64
mlxsw_sp_xstats_tail_drop(struct mlxsw_sp_port_xstats * xstats,int tclass_num)507 mlxsw_sp_xstats_tail_drop(struct mlxsw_sp_port_xstats *xstats, int tclass_num)
508 {
509 	return xstats->tail_drop[tclass_num] +
510 	       xstats->tail_drop[tclass_num + 8];
511 }
512 
513 static void
mlxsw_sp_qdisc_bstats_per_priority_get(struct mlxsw_sp_port_xstats * xstats,u8 prio_bitmap,u64 * tx_packets,u64 * tx_bytes)514 mlxsw_sp_qdisc_bstats_per_priority_get(struct mlxsw_sp_port_xstats *xstats,
515 				       u8 prio_bitmap, u64 *tx_packets,
516 				       u64 *tx_bytes)
517 {
518 	int i;
519 
520 	*tx_packets = 0;
521 	*tx_bytes = 0;
522 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
523 		if (prio_bitmap & BIT(i)) {
524 			*tx_packets += xstats->tx_packets[i];
525 			*tx_bytes += xstats->tx_bytes[i];
526 		}
527 	}
528 }
529 
530 static void
mlxsw_sp_qdisc_collect_tc_stats(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,u64 * p_tx_bytes,u64 * p_tx_packets,u64 * p_drops,u64 * p_backlog)531 mlxsw_sp_qdisc_collect_tc_stats(struct mlxsw_sp_port *mlxsw_sp_port,
532 				struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
533 				u64 *p_tx_bytes, u64 *p_tx_packets,
534 				u64 *p_drops, u64 *p_backlog)
535 {
536 	struct mlxsw_sp_port_xstats *xstats;
537 	u64 tx_bytes, tx_packets;
538 	u8 prio_bitmap;
539 	int tclass_num;
540 
541 	prio_bitmap = mlxsw_sp_qdisc_get_prio_bitmap(mlxsw_sp_port,
542 						     mlxsw_sp_qdisc);
543 	tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
544 						   mlxsw_sp_qdisc);
545 	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
546 	mlxsw_sp_qdisc_bstats_per_priority_get(xstats, prio_bitmap,
547 					       &tx_packets, &tx_bytes);
548 
549 	*p_tx_packets += tx_packets;
550 	*p_tx_bytes += tx_bytes;
551 	*p_drops += xstats->wred_drop[tclass_num] +
552 		    mlxsw_sp_xstats_tail_drop(xstats, tclass_num);
553 	*p_backlog += mlxsw_sp_xstats_backlog(xstats, tclass_num);
554 }
555 
556 static void
mlxsw_sp_qdisc_update_stats(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,u64 tx_bytes,u64 tx_packets,u64 drops,u64 backlog,struct tc_qopt_offload_stats * stats_ptr)557 mlxsw_sp_qdisc_update_stats(struct mlxsw_sp *mlxsw_sp,
558 			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
559 			    u64 tx_bytes, u64 tx_packets,
560 			    u64 drops, u64 backlog,
561 			    struct tc_qopt_offload_stats *stats_ptr)
562 {
563 	struct mlxsw_sp_qdisc_stats *stats_base = &mlxsw_sp_qdisc->stats_base;
564 
565 	tx_bytes -= stats_base->tx_bytes;
566 	tx_packets -= stats_base->tx_packets;
567 	drops -= stats_base->drops;
568 	backlog -= stats_base->backlog;
569 
570 	_bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
571 	stats_ptr->qstats->drops += drops;
572 	stats_ptr->qstats->backlog += mlxsw_sp_cells_bytes(mlxsw_sp, backlog);
573 
574 	stats_base->backlog += backlog;
575 	stats_base->drops += drops;
576 	stats_base->tx_bytes += tx_bytes;
577 	stats_base->tx_packets += tx_packets;
578 }
579 
580 static void
mlxsw_sp_qdisc_get_tc_stats(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,struct tc_qopt_offload_stats * stats_ptr)581 mlxsw_sp_qdisc_get_tc_stats(struct mlxsw_sp_port *mlxsw_sp_port,
582 			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
583 			    struct tc_qopt_offload_stats *stats_ptr)
584 {
585 	u64 tx_packets = 0;
586 	u64 tx_bytes = 0;
587 	u64 backlog = 0;
588 	u64 drops = 0;
589 
590 	mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
591 					&tx_bytes, &tx_packets,
592 					&drops, &backlog);
593 	mlxsw_sp_qdisc_update_stats(mlxsw_sp_port->mlxsw_sp, mlxsw_sp_qdisc,
594 				    tx_bytes, tx_packets, drops, backlog,
595 				    stats_ptr);
596 }
597 
598 static int
mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port * mlxsw_sp_port,int tclass_num,u32 min,u32 max,u32 probability,bool is_wred,bool is_ecn)599 mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
600 				  int tclass_num, u32 min, u32 max,
601 				  u32 probability, bool is_wred, bool is_ecn)
602 {
603 	char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
604 	char cwtp_cmd[MLXSW_REG_CWTP_LEN];
605 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
606 	int err;
607 
608 	mlxsw_reg_cwtp_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num);
609 	mlxsw_reg_cwtp_profile_pack(cwtp_cmd, MLXSW_REG_CWTP_DEFAULT_PROFILE,
610 				    roundup(min, MLXSW_REG_CWTP_MIN_VALUE),
611 				    roundup(max, MLXSW_REG_CWTP_MIN_VALUE),
612 				    probability);
613 
614 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtp), cwtp_cmd);
615 	if (err)
616 		return err;
617 
618 	mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
619 			     MLXSW_REG_CWTP_DEFAULT_PROFILE, is_wred, is_ecn);
620 
621 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
622 }
623 
624 static int
mlxsw_sp_tclass_congestion_disable(struct mlxsw_sp_port * mlxsw_sp_port,int tclass_num)625 mlxsw_sp_tclass_congestion_disable(struct mlxsw_sp_port *mlxsw_sp_port,
626 				   int tclass_num)
627 {
628 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
629 	char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
630 
631 	mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
632 			     MLXSW_REG_CWTPM_RESET_PROFILE, false, false);
633 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
634 }
635 
636 static void
mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc)637 mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
638 					struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
639 {
640 	struct mlxsw_sp_qdisc_stats *stats_base;
641 	struct mlxsw_sp_port_xstats *xstats;
642 	struct red_stats *red_base;
643 	u8 prio_bitmap;
644 	int tclass_num;
645 
646 	prio_bitmap = mlxsw_sp_qdisc_get_prio_bitmap(mlxsw_sp_port,
647 						     mlxsw_sp_qdisc);
648 	tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
649 						   mlxsw_sp_qdisc);
650 	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
651 	stats_base = &mlxsw_sp_qdisc->stats_base;
652 	red_base = &mlxsw_sp_qdisc->xstats_base.red;
653 
654 	mlxsw_sp_qdisc_bstats_per_priority_get(xstats, prio_bitmap,
655 					       &stats_base->tx_packets,
656 					       &stats_base->tx_bytes);
657 	red_base->prob_mark = xstats->tc_ecn[tclass_num];
658 	red_base->prob_drop = xstats->wred_drop[tclass_num];
659 	red_base->pdrop = mlxsw_sp_xstats_tail_drop(xstats, tclass_num);
660 
661 	stats_base->overlimits = red_base->prob_drop + red_base->prob_mark;
662 	stats_base->drops = red_base->prob_drop + red_base->pdrop;
663 
664 	stats_base->backlog = 0;
665 }
666 
667 static int
mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc)668 mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
669 			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
670 {
671 	int tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
672 						       mlxsw_sp_qdisc);
673 
674 	return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port, tclass_num);
675 }
676 
677 static int
mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port * mlxsw_sp_port,void * params)678 mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
679 				void *params)
680 {
681 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
682 	struct tc_red_qopt_offload_params *p = params;
683 
684 	if (p->min > p->max) {
685 		dev_err(mlxsw_sp->bus_info->dev,
686 			"spectrum: RED: min %u is bigger then max %u\n", p->min,
687 			p->max);
688 		return -EINVAL;
689 	}
690 	if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core,
691 					GUARANTEED_SHARED_BUFFER)) {
692 		dev_err(mlxsw_sp->bus_info->dev,
693 			"spectrum: RED: max value %u is too big\n", p->max);
694 		return -EINVAL;
695 	}
696 	if (p->min == 0 || p->max == 0) {
697 		dev_err(mlxsw_sp->bus_info->dev,
698 			"spectrum: RED: 0 value is illegal for min and max\n");
699 		return -EINVAL;
700 	}
701 	return 0;
702 }
703 
704 static int
705 mlxsw_sp_qdisc_future_fifo_replace(struct mlxsw_sp_port *mlxsw_sp_port,
706 				   u32 handle, unsigned int band,
707 				   struct mlxsw_sp_qdisc *child_qdisc);
708 static void
709 mlxsw_sp_qdisc_future_fifos_init(struct mlxsw_sp_port *mlxsw_sp_port,
710 				 u32 handle);
711 
712 static int
mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port * mlxsw_sp_port,u32 handle,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * params)713 mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
714 			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
715 			   void *params)
716 {
717 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
718 	struct tc_red_qopt_offload_params *p = params;
719 	int tclass_num;
720 	u32 min, max;
721 	u64 prob;
722 	int err;
723 
724 	err = mlxsw_sp_qdisc_future_fifo_replace(mlxsw_sp_port, handle, 0,
725 						 &mlxsw_sp_qdisc->qdiscs[0]);
726 	if (err)
727 		return err;
728 	mlxsw_sp_qdisc_future_fifos_init(mlxsw_sp_port, TC_H_UNSPEC);
729 
730 	tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
731 						   mlxsw_sp_qdisc);
732 
733 	/* calculate probability in percentage */
734 	prob = p->probability;
735 	prob *= 100;
736 	prob = DIV_ROUND_UP(prob, 1 << 16);
737 	prob = DIV_ROUND_UP(prob, 1 << 16);
738 	min = mlxsw_sp_bytes_cells(mlxsw_sp, p->min);
739 	max = mlxsw_sp_bytes_cells(mlxsw_sp, p->max);
740 	return mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num,
741 						 min, max, prob,
742 						 !p->is_nodrop, p->is_ecn);
743 }
744 
745 static void
mlxsw_sp_qdisc_leaf_unoffload(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,struct gnet_stats_queue * qstats)746 mlxsw_sp_qdisc_leaf_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
747 			      struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
748 			      struct gnet_stats_queue *qstats)
749 {
750 	u64 backlog;
751 
752 	backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
753 				       mlxsw_sp_qdisc->stats_base.backlog);
754 	qstats->backlog -= backlog;
755 	mlxsw_sp_qdisc->stats_base.backlog = 0;
756 }
757 
758 static void
mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * params)759 mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
760 			     struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
761 			     void *params)
762 {
763 	struct tc_red_qopt_offload_params *p = params;
764 
765 	mlxsw_sp_qdisc_leaf_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, p->qstats);
766 }
767 
768 static int
mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * xstats_ptr)769 mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
770 			      struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
771 			      void *xstats_ptr)
772 {
773 	struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base.red;
774 	struct mlxsw_sp_port_xstats *xstats;
775 	struct red_stats *res = xstats_ptr;
776 	int early_drops, marks, pdrops;
777 	int tclass_num;
778 
779 	tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
780 						   mlxsw_sp_qdisc);
781 	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
782 
783 	early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
784 	marks = xstats->tc_ecn[tclass_num] - xstats_base->prob_mark;
785 	pdrops = mlxsw_sp_xstats_tail_drop(xstats, tclass_num) -
786 		 xstats_base->pdrop;
787 
788 	res->pdrop += pdrops;
789 	res->prob_drop += early_drops;
790 	res->prob_mark += marks;
791 
792 	xstats_base->pdrop += pdrops;
793 	xstats_base->prob_drop += early_drops;
794 	xstats_base->prob_mark += marks;
795 	return 0;
796 }
797 
798 static int
mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,struct tc_qopt_offload_stats * stats_ptr)799 mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
800 			     struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
801 			     struct tc_qopt_offload_stats *stats_ptr)
802 {
803 	struct mlxsw_sp_qdisc_stats *stats_base;
804 	struct mlxsw_sp_port_xstats *xstats;
805 	u64 overlimits;
806 	int tclass_num;
807 
808 	tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
809 						   mlxsw_sp_qdisc);
810 	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
811 	stats_base = &mlxsw_sp_qdisc->stats_base;
812 
813 	mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc, stats_ptr);
814 	overlimits = xstats->wred_drop[tclass_num] +
815 		     xstats->tc_ecn[tclass_num] - stats_base->overlimits;
816 
817 	stats_ptr->qstats->overlimits += overlimits;
818 	stats_base->overlimits += overlimits;
819 
820 	return 0;
821 }
822 
823 static struct mlxsw_sp_qdisc *
mlxsw_sp_qdisc_leaf_find_class(struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,u32 parent)824 mlxsw_sp_qdisc_leaf_find_class(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
825 			       u32 parent)
826 {
827 	/* RED and TBF are formally classful qdiscs, but all class references,
828 	 * including X:0, just refer to the same one class.
829 	 */
830 	return &mlxsw_sp_qdisc->qdiscs[0];
831 }
832 
833 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = {
834 	.type = MLXSW_SP_QDISC_RED,
835 	.check_params = mlxsw_sp_qdisc_red_check_params,
836 	.replace = mlxsw_sp_qdisc_red_replace,
837 	.unoffload = mlxsw_sp_qdisc_red_unoffload,
838 	.destroy = mlxsw_sp_qdisc_red_destroy,
839 	.get_stats = mlxsw_sp_qdisc_get_red_stats,
840 	.get_xstats = mlxsw_sp_qdisc_get_red_xstats,
841 	.clean_stats = mlxsw_sp_setup_tc_qdisc_red_clean_stats,
842 	.find_class = mlxsw_sp_qdisc_leaf_find_class,
843 	.num_classes = 1,
844 };
845 
846 static int mlxsw_sp_qdisc_graft(struct mlxsw_sp_port *mlxsw_sp_port,
847 				struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
848 				u8 band, u32 child_handle);
849 
__mlxsw_sp_setup_tc_red(struct mlxsw_sp_port * mlxsw_sp_port,struct tc_red_qopt_offload * p)850 static int __mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
851 				   struct tc_red_qopt_offload *p)
852 {
853 	struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
854 
855 	mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent);
856 	if (!mlxsw_sp_qdisc)
857 		return -EOPNOTSUPP;
858 
859 	if (p->command == TC_RED_REPLACE)
860 		return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
861 					      mlxsw_sp_qdisc,
862 					      &mlxsw_sp_qdisc_ops_red,
863 					      &p->set);
864 
865 	if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
866 		return -EOPNOTSUPP;
867 
868 	switch (p->command) {
869 	case TC_RED_DESTROY:
870 		return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
871 	case TC_RED_XSTATS:
872 		return mlxsw_sp_qdisc_get_xstats(mlxsw_sp_port, mlxsw_sp_qdisc,
873 						 p->xstats);
874 	case TC_RED_STATS:
875 		return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
876 						&p->stats);
877 	case TC_RED_GRAFT:
878 		return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc, 0,
879 					    p->child_handle);
880 	default:
881 		return -EOPNOTSUPP;
882 	}
883 }
884 
mlxsw_sp_setup_tc_red(struct mlxsw_sp_port * mlxsw_sp_port,struct tc_red_qopt_offload * p)885 int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
886 			  struct tc_red_qopt_offload *p)
887 {
888 	int err;
889 
890 	mutex_lock(&mlxsw_sp_port->qdisc->lock);
891 	err = __mlxsw_sp_setup_tc_red(mlxsw_sp_port, p);
892 	mutex_unlock(&mlxsw_sp_port->qdisc->lock);
893 
894 	return err;
895 }
896 
897 static void
mlxsw_sp_setup_tc_qdisc_leaf_clean_stats(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc)898 mlxsw_sp_setup_tc_qdisc_leaf_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
899 					 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
900 {
901 	u64 backlog_cells = 0;
902 	u64 tx_packets = 0;
903 	u64 tx_bytes = 0;
904 	u64 drops = 0;
905 
906 	mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
907 					&tx_bytes, &tx_packets,
908 					&drops, &backlog_cells);
909 
910 	mlxsw_sp_qdisc->stats_base.tx_packets = tx_packets;
911 	mlxsw_sp_qdisc->stats_base.tx_bytes = tx_bytes;
912 	mlxsw_sp_qdisc->stats_base.drops = drops;
913 	mlxsw_sp_qdisc->stats_base.backlog = 0;
914 }
915 
916 static enum mlxsw_reg_qeec_hr
mlxsw_sp_qdisc_tbf_hr(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc)917 mlxsw_sp_qdisc_tbf_hr(struct mlxsw_sp_port *mlxsw_sp_port,
918 		      struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
919 {
920 	if (mlxsw_sp_qdisc == &mlxsw_sp_port->qdisc->root_qdisc)
921 		return MLXSW_REG_QEEC_HR_PORT;
922 
923 	/* Configure subgroup shaper, so that both UC and MC traffic is subject
924 	 * to shaping. That is unlike RED, however UC queue lengths are going to
925 	 * be different than MC ones due to different pool and quota
926 	 * configurations, so the configuration is not applicable. For shaper on
927 	 * the other hand, subjecting the overall stream to the configured
928 	 * shaper makes sense. Also note that that is what we do for
929 	 * ieee_setmaxrate().
930 	 */
931 	return MLXSW_REG_QEEC_HR_SUBGROUP;
932 }
933 
934 static int
mlxsw_sp_qdisc_tbf_destroy(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc)935 mlxsw_sp_qdisc_tbf_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
936 			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
937 {
938 	enum mlxsw_reg_qeec_hr hr = mlxsw_sp_qdisc_tbf_hr(mlxsw_sp_port,
939 							  mlxsw_sp_qdisc);
940 	int tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
941 						       mlxsw_sp_qdisc);
942 
943 	return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, hr, tclass_num, 0,
944 					     MLXSW_REG_QEEC_MAS_DIS, 0);
945 }
946 
947 static int
mlxsw_sp_qdisc_tbf_bs(struct mlxsw_sp_port * mlxsw_sp_port,u32 max_size,u8 * p_burst_size)948 mlxsw_sp_qdisc_tbf_bs(struct mlxsw_sp_port *mlxsw_sp_port,
949 		      u32 max_size, u8 *p_burst_size)
950 {
951 	/* TBF burst size is configured in bytes. The ASIC burst size value is
952 	 * ((2 ^ bs) * 512 bits. Convert the TBF bytes to 512-bit units.
953 	 */
954 	u32 bs512 = max_size / 64;
955 	u8 bs = fls(bs512);
956 
957 	if (!bs)
958 		return -EINVAL;
959 	--bs;
960 
961 	/* Demand a power of two. */
962 	if ((1 << bs) != bs512)
963 		return -EINVAL;
964 
965 	if (bs < mlxsw_sp_port->mlxsw_sp->lowest_shaper_bs ||
966 	    bs > MLXSW_REG_QEEC_HIGHEST_SHAPER_BS)
967 		return -EINVAL;
968 
969 	*p_burst_size = bs;
970 	return 0;
971 }
972 
973 static u32
mlxsw_sp_qdisc_tbf_max_size(u8 bs)974 mlxsw_sp_qdisc_tbf_max_size(u8 bs)
975 {
976 	return (1U << bs) * 64;
977 }
978 
979 static u64
mlxsw_sp_qdisc_tbf_rate_kbps(struct tc_tbf_qopt_offload_replace_params * p)980 mlxsw_sp_qdisc_tbf_rate_kbps(struct tc_tbf_qopt_offload_replace_params *p)
981 {
982 	/* TBF interface is in bytes/s, whereas Spectrum ASIC is configured in
983 	 * Kbits/s.
984 	 */
985 	return div_u64(p->rate.rate_bytes_ps, 1000) * 8;
986 }
987 
988 static int
mlxsw_sp_qdisc_tbf_check_params(struct mlxsw_sp_port * mlxsw_sp_port,void * params)989 mlxsw_sp_qdisc_tbf_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
990 				void *params)
991 {
992 	struct tc_tbf_qopt_offload_replace_params *p = params;
993 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
994 	u64 rate_kbps = mlxsw_sp_qdisc_tbf_rate_kbps(p);
995 	u8 burst_size;
996 	int err;
997 
998 	if (rate_kbps >= MLXSW_REG_QEEC_MAS_DIS) {
999 		dev_err(mlxsw_sp_port->mlxsw_sp->bus_info->dev,
1000 			"spectrum: TBF: rate of %lluKbps must be below %u\n",
1001 			rate_kbps, MLXSW_REG_QEEC_MAS_DIS);
1002 		return -EINVAL;
1003 	}
1004 
1005 	err = mlxsw_sp_qdisc_tbf_bs(mlxsw_sp_port, p->max_size, &burst_size);
1006 	if (err) {
1007 		u8 highest_shaper_bs = MLXSW_REG_QEEC_HIGHEST_SHAPER_BS;
1008 
1009 		dev_err(mlxsw_sp->bus_info->dev,
1010 			"spectrum: TBF: invalid burst size of %u, must be a power of two between %u and %u",
1011 			p->max_size,
1012 			mlxsw_sp_qdisc_tbf_max_size(mlxsw_sp->lowest_shaper_bs),
1013 			mlxsw_sp_qdisc_tbf_max_size(highest_shaper_bs));
1014 		return -EINVAL;
1015 	}
1016 
1017 	return 0;
1018 }
1019 
1020 static int
mlxsw_sp_qdisc_tbf_replace(struct mlxsw_sp_port * mlxsw_sp_port,u32 handle,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * params)1021 mlxsw_sp_qdisc_tbf_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
1022 			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1023 			   void *params)
1024 {
1025 	enum mlxsw_reg_qeec_hr hr = mlxsw_sp_qdisc_tbf_hr(mlxsw_sp_port,
1026 							  mlxsw_sp_qdisc);
1027 	struct tc_tbf_qopt_offload_replace_params *p = params;
1028 	u64 rate_kbps = mlxsw_sp_qdisc_tbf_rate_kbps(p);
1029 	int tclass_num;
1030 	u8 burst_size;
1031 	int err;
1032 
1033 	err = mlxsw_sp_qdisc_future_fifo_replace(mlxsw_sp_port, handle, 0,
1034 						 &mlxsw_sp_qdisc->qdiscs[0]);
1035 	if (err)
1036 		return err;
1037 	mlxsw_sp_qdisc_future_fifos_init(mlxsw_sp_port, TC_H_UNSPEC);
1038 
1039 	tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
1040 						   mlxsw_sp_qdisc);
1041 
1042 	err = mlxsw_sp_qdisc_tbf_bs(mlxsw_sp_port, p->max_size, &burst_size);
1043 	if (WARN_ON_ONCE(err))
1044 		/* check_params above was supposed to reject this value. */
1045 		return -EINVAL;
1046 
1047 	return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, hr, tclass_num, 0,
1048 					     rate_kbps, burst_size);
1049 }
1050 
1051 static void
mlxsw_sp_qdisc_tbf_unoffload(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * params)1052 mlxsw_sp_qdisc_tbf_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
1053 			     struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1054 			     void *params)
1055 {
1056 	struct tc_tbf_qopt_offload_replace_params *p = params;
1057 
1058 	mlxsw_sp_qdisc_leaf_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, p->qstats);
1059 }
1060 
1061 static int
mlxsw_sp_qdisc_get_tbf_stats(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,struct tc_qopt_offload_stats * stats_ptr)1062 mlxsw_sp_qdisc_get_tbf_stats(struct mlxsw_sp_port *mlxsw_sp_port,
1063 			     struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1064 			     struct tc_qopt_offload_stats *stats_ptr)
1065 {
1066 	mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
1067 				    stats_ptr);
1068 	return 0;
1069 }
1070 
1071 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_tbf = {
1072 	.type = MLXSW_SP_QDISC_TBF,
1073 	.check_params = mlxsw_sp_qdisc_tbf_check_params,
1074 	.replace = mlxsw_sp_qdisc_tbf_replace,
1075 	.unoffload = mlxsw_sp_qdisc_tbf_unoffload,
1076 	.destroy = mlxsw_sp_qdisc_tbf_destroy,
1077 	.get_stats = mlxsw_sp_qdisc_get_tbf_stats,
1078 	.clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats,
1079 	.find_class = mlxsw_sp_qdisc_leaf_find_class,
1080 	.num_classes = 1,
1081 };
1082 
__mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port * mlxsw_sp_port,struct tc_tbf_qopt_offload * p)1083 static int __mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
1084 				   struct tc_tbf_qopt_offload *p)
1085 {
1086 	struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
1087 
1088 	mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent);
1089 	if (!mlxsw_sp_qdisc)
1090 		return -EOPNOTSUPP;
1091 
1092 	if (p->command == TC_TBF_REPLACE)
1093 		return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
1094 					      mlxsw_sp_qdisc,
1095 					      &mlxsw_sp_qdisc_ops_tbf,
1096 					      &p->replace_params);
1097 
1098 	if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
1099 		return -EOPNOTSUPP;
1100 
1101 	switch (p->command) {
1102 	case TC_TBF_DESTROY:
1103 		return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1104 	case TC_TBF_STATS:
1105 		return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
1106 						&p->stats);
1107 	case TC_TBF_GRAFT:
1108 		return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc, 0,
1109 					    p->child_handle);
1110 	default:
1111 		return -EOPNOTSUPP;
1112 	}
1113 }
1114 
mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port * mlxsw_sp_port,struct tc_tbf_qopt_offload * p)1115 int mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
1116 			  struct tc_tbf_qopt_offload *p)
1117 {
1118 	int err;
1119 
1120 	mutex_lock(&mlxsw_sp_port->qdisc->lock);
1121 	err = __mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, p);
1122 	mutex_unlock(&mlxsw_sp_port->qdisc->lock);
1123 
1124 	return err;
1125 }
1126 
1127 static int
mlxsw_sp_qdisc_fifo_check_params(struct mlxsw_sp_port * mlxsw_sp_port,void * params)1128 mlxsw_sp_qdisc_fifo_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
1129 				 void *params)
1130 {
1131 	return 0;
1132 }
1133 
1134 static int
mlxsw_sp_qdisc_fifo_replace(struct mlxsw_sp_port * mlxsw_sp_port,u32 handle,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * params)1135 mlxsw_sp_qdisc_fifo_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
1136 			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1137 			    void *params)
1138 {
1139 	return 0;
1140 }
1141 
1142 static int
mlxsw_sp_qdisc_get_fifo_stats(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,struct tc_qopt_offload_stats * stats_ptr)1143 mlxsw_sp_qdisc_get_fifo_stats(struct mlxsw_sp_port *mlxsw_sp_port,
1144 			      struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1145 			      struct tc_qopt_offload_stats *stats_ptr)
1146 {
1147 	mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
1148 				    stats_ptr);
1149 	return 0;
1150 }
1151 
1152 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_fifo = {
1153 	.type = MLXSW_SP_QDISC_FIFO,
1154 	.check_params = mlxsw_sp_qdisc_fifo_check_params,
1155 	.replace = mlxsw_sp_qdisc_fifo_replace,
1156 	.get_stats = mlxsw_sp_qdisc_get_fifo_stats,
1157 	.clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats,
1158 };
1159 
1160 static int
mlxsw_sp_qdisc_future_fifo_replace(struct mlxsw_sp_port * mlxsw_sp_port,u32 handle,unsigned int band,struct mlxsw_sp_qdisc * child_qdisc)1161 mlxsw_sp_qdisc_future_fifo_replace(struct mlxsw_sp_port *mlxsw_sp_port,
1162 				   u32 handle, unsigned int band,
1163 				   struct mlxsw_sp_qdisc *child_qdisc)
1164 {
1165 	struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
1166 
1167 	if (handle == qdisc_state->future_handle &&
1168 	    qdisc_state->future_fifos[band])
1169 		return mlxsw_sp_qdisc_replace(mlxsw_sp_port, TC_H_UNSPEC,
1170 					      child_qdisc,
1171 					      &mlxsw_sp_qdisc_ops_fifo,
1172 					      NULL);
1173 	return 0;
1174 }
1175 
1176 static void
mlxsw_sp_qdisc_future_fifos_init(struct mlxsw_sp_port * mlxsw_sp_port,u32 handle)1177 mlxsw_sp_qdisc_future_fifos_init(struct mlxsw_sp_port *mlxsw_sp_port,
1178 				 u32 handle)
1179 {
1180 	struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
1181 
1182 	qdisc_state->future_handle = handle;
1183 	memset(qdisc_state->future_fifos, 0, sizeof(qdisc_state->future_fifos));
1184 }
1185 
__mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port * mlxsw_sp_port,struct tc_fifo_qopt_offload * p)1186 static int __mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
1187 				    struct tc_fifo_qopt_offload *p)
1188 {
1189 	struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
1190 	struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
1191 	unsigned int band;
1192 	u32 parent_handle;
1193 
1194 	mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent);
1195 	if (!mlxsw_sp_qdisc && p->handle == TC_H_UNSPEC) {
1196 		parent_handle = TC_H_MAJ(p->parent);
1197 		if (parent_handle != qdisc_state->future_handle) {
1198 			/* This notifications is for a different Qdisc than
1199 			 * previously. Wipe the future cache.
1200 			 */
1201 			mlxsw_sp_qdisc_future_fifos_init(mlxsw_sp_port,
1202 							 parent_handle);
1203 		}
1204 
1205 		band = TC_H_MIN(p->parent) - 1;
1206 		if (band < IEEE_8021QAZ_MAX_TCS) {
1207 			if (p->command == TC_FIFO_REPLACE)
1208 				qdisc_state->future_fifos[band] = true;
1209 			else if (p->command == TC_FIFO_DESTROY)
1210 				qdisc_state->future_fifos[band] = false;
1211 		}
1212 	}
1213 	if (!mlxsw_sp_qdisc)
1214 		return -EOPNOTSUPP;
1215 
1216 	if (p->command == TC_FIFO_REPLACE) {
1217 		return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
1218 					      mlxsw_sp_qdisc,
1219 					      &mlxsw_sp_qdisc_ops_fifo, NULL);
1220 	}
1221 
1222 	if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
1223 		return -EOPNOTSUPP;
1224 
1225 	switch (p->command) {
1226 	case TC_FIFO_DESTROY:
1227 		return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1228 	case TC_FIFO_STATS:
1229 		return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
1230 						&p->stats);
1231 	case TC_FIFO_REPLACE: /* Handled above. */
1232 		break;
1233 	}
1234 
1235 	return -EOPNOTSUPP;
1236 }
1237 
mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port * mlxsw_sp_port,struct tc_fifo_qopt_offload * p)1238 int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
1239 			   struct tc_fifo_qopt_offload *p)
1240 {
1241 	int err;
1242 
1243 	mutex_lock(&mlxsw_sp_port->qdisc->lock);
1244 	err = __mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, p);
1245 	mutex_unlock(&mlxsw_sp_port->qdisc->lock);
1246 
1247 	return err;
1248 }
1249 
__mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc)1250 static int __mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
1251 					struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
1252 {
1253 	int i;
1254 
1255 	for (i = 0; i < mlxsw_sp_qdisc->num_classes; i++) {
1256 		mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i,
1257 					  MLXSW_SP_PORT_DEFAULT_TCLASS);
1258 		mlxsw_sp_port_ets_set(mlxsw_sp_port,
1259 				      MLXSW_REG_QEEC_HR_SUBGROUP,
1260 				      i, 0, false, 0);
1261 	}
1262 
1263 	kfree(mlxsw_sp_qdisc->ets_data);
1264 	mlxsw_sp_qdisc->ets_data = NULL;
1265 	return 0;
1266 }
1267 
1268 static int
mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc)1269 mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
1270 			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
1271 {
1272 	return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1273 }
1274 
1275 static int
__mlxsw_sp_qdisc_ets_check_params(unsigned int nbands)1276 __mlxsw_sp_qdisc_ets_check_params(unsigned int nbands)
1277 {
1278 	if (nbands > IEEE_8021QAZ_MAX_TCS)
1279 		return -EOPNOTSUPP;
1280 
1281 	return 0;
1282 }
1283 
1284 static int
mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port * mlxsw_sp_port,void * params)1285 mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
1286 				 void *params)
1287 {
1288 	struct tc_prio_qopt_offload_params *p = params;
1289 
1290 	return __mlxsw_sp_qdisc_ets_check_params(p->bands);
1291 }
1292 
1293 static struct mlxsw_sp_qdisc *
mlxsw_sp_qdisc_walk_cb_clean_stats(struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * mlxsw_sp_port)1294 mlxsw_sp_qdisc_walk_cb_clean_stats(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1295 				   void *mlxsw_sp_port)
1296 {
1297 	u64 backlog;
1298 
1299 	if (mlxsw_sp_qdisc->ops) {
1300 		backlog = mlxsw_sp_qdisc->stats_base.backlog;
1301 		if (mlxsw_sp_qdisc->ops->clean_stats)
1302 			mlxsw_sp_qdisc->ops->clean_stats(mlxsw_sp_port,
1303 							 mlxsw_sp_qdisc);
1304 		mlxsw_sp_qdisc->stats_base.backlog = backlog;
1305 	}
1306 
1307 	return NULL;
1308 }
1309 
1310 static void
mlxsw_sp_qdisc_tree_clean_stats(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc)1311 mlxsw_sp_qdisc_tree_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
1312 				struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
1313 {
1314 	mlxsw_sp_qdisc_walk(mlxsw_sp_qdisc, mlxsw_sp_qdisc_walk_cb_clean_stats,
1315 			    mlxsw_sp_port);
1316 }
1317 
1318 static int
__mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,u32 handle,unsigned int nbands,const unsigned int * quanta,const unsigned int * weights,const u8 * priomap)1319 __mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port,
1320 			     struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1321 			     u32 handle, unsigned int nbands,
1322 			     const unsigned int *quanta,
1323 			     const unsigned int *weights,
1324 			     const u8 *priomap)
1325 {
1326 	struct mlxsw_sp_qdisc_ets_data *ets_data = mlxsw_sp_qdisc->ets_data;
1327 	struct mlxsw_sp_qdisc_ets_band *ets_band;
1328 	struct mlxsw_sp_qdisc *child_qdisc;
1329 	u8 old_priomap, new_priomap;
1330 	int i, band;
1331 	int err;
1332 
1333 	if (!ets_data) {
1334 		ets_data = kzalloc_obj(*ets_data);
1335 		if (!ets_data)
1336 			return -ENOMEM;
1337 		mlxsw_sp_qdisc->ets_data = ets_data;
1338 
1339 		for (band = 0; band < mlxsw_sp_qdisc->num_classes; band++) {
1340 			int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
1341 
1342 			ets_band = &ets_data->bands[band];
1343 			ets_band->tclass_num = tclass_num;
1344 		}
1345 	}
1346 
1347 	for (band = 0; band < nbands; band++) {
1348 		int tclass_num;
1349 
1350 		child_qdisc = &mlxsw_sp_qdisc->qdiscs[band];
1351 		ets_band = &ets_data->bands[band];
1352 
1353 		tclass_num = ets_band->tclass_num;
1354 		old_priomap = ets_band->prio_bitmap;
1355 		new_priomap = 0;
1356 
1357 		err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1358 					    MLXSW_REG_QEEC_HR_SUBGROUP,
1359 					    tclass_num, 0, !!quanta[band],
1360 					    weights[band]);
1361 		if (err)
1362 			return err;
1363 
1364 		for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1365 			if (priomap[i] == band) {
1366 				new_priomap |= BIT(i);
1367 				if (BIT(i) & old_priomap)
1368 					continue;
1369 				err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port,
1370 								i, tclass_num);
1371 				if (err)
1372 					return err;
1373 			}
1374 		}
1375 
1376 		ets_band->prio_bitmap = new_priomap;
1377 
1378 		if (old_priomap != new_priomap)
1379 			mlxsw_sp_qdisc_tree_clean_stats(mlxsw_sp_port,
1380 							child_qdisc);
1381 
1382 		err = mlxsw_sp_qdisc_future_fifo_replace(mlxsw_sp_port, handle,
1383 							 band, child_qdisc);
1384 		if (err)
1385 			return err;
1386 	}
1387 	for (; band < IEEE_8021QAZ_MAX_TCS; band++) {
1388 		ets_band = &ets_data->bands[band];
1389 		ets_band->prio_bitmap = 0;
1390 
1391 		child_qdisc = &mlxsw_sp_qdisc->qdiscs[band];
1392 		mlxsw_sp_qdisc_destroy(mlxsw_sp_port, child_qdisc);
1393 
1394 		mlxsw_sp_port_ets_set(mlxsw_sp_port,
1395 				      MLXSW_REG_QEEC_HR_SUBGROUP,
1396 				      ets_band->tclass_num, 0, false, 0);
1397 	}
1398 
1399 	mlxsw_sp_qdisc_future_fifos_init(mlxsw_sp_port, TC_H_UNSPEC);
1400 	return 0;
1401 }
1402 
1403 static int
mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port * mlxsw_sp_port,u32 handle,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * params)1404 mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
1405 			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1406 			    void *params)
1407 {
1408 	struct tc_prio_qopt_offload_params *p = params;
1409 	unsigned int zeroes[TCQ_ETS_MAX_BANDS] = {0};
1410 
1411 	return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, mlxsw_sp_qdisc,
1412 					    handle, p->bands, zeroes,
1413 					    zeroes, p->priomap);
1414 }
1415 
1416 static void
__mlxsw_sp_qdisc_ets_unoffload(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,struct gnet_stats_queue * qstats)1417 __mlxsw_sp_qdisc_ets_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
1418 			       struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1419 			       struct gnet_stats_queue *qstats)
1420 {
1421 	u64 backlog;
1422 
1423 	backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
1424 				       mlxsw_sp_qdisc->stats_base.backlog);
1425 	qstats->backlog -= backlog;
1426 }
1427 
1428 static void
mlxsw_sp_qdisc_prio_unoffload(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * params)1429 mlxsw_sp_qdisc_prio_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
1430 			      struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1431 			      void *params)
1432 {
1433 	struct tc_prio_qopt_offload_params *p = params;
1434 
1435 	__mlxsw_sp_qdisc_ets_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc,
1436 				       p->qstats);
1437 }
1438 
1439 static int
mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,struct tc_qopt_offload_stats * stats_ptr)1440 mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port,
1441 			      struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1442 			      struct tc_qopt_offload_stats *stats_ptr)
1443 {
1444 	struct mlxsw_sp_qdisc *tc_qdisc;
1445 	u64 tx_packets = 0;
1446 	u64 tx_bytes = 0;
1447 	u64 backlog = 0;
1448 	u64 drops = 0;
1449 	int i;
1450 
1451 	for (i = 0; i < mlxsw_sp_qdisc->num_classes; i++) {
1452 		tc_qdisc = &mlxsw_sp_qdisc->qdiscs[i];
1453 		mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, tc_qdisc,
1454 						&tx_bytes, &tx_packets,
1455 						&drops, &backlog);
1456 	}
1457 
1458 	mlxsw_sp_qdisc_update_stats(mlxsw_sp_port->mlxsw_sp, mlxsw_sp_qdisc,
1459 				    tx_bytes, tx_packets, drops, backlog,
1460 				    stats_ptr);
1461 	return 0;
1462 }
1463 
1464 static void
mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc)1465 mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
1466 					 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
1467 {
1468 	struct mlxsw_sp_qdisc_stats *stats_base;
1469 	struct mlxsw_sp_port_xstats *xstats;
1470 	struct rtnl_link_stats64 *stats;
1471 	int i;
1472 
1473 	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
1474 	stats = &mlxsw_sp_port->periodic_hw_stats.stats;
1475 	stats_base = &mlxsw_sp_qdisc->stats_base;
1476 
1477 	stats_base->tx_packets = stats->tx_packets;
1478 	stats_base->tx_bytes = stats->tx_bytes;
1479 
1480 	stats_base->drops = 0;
1481 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1482 		stats_base->drops += mlxsw_sp_xstats_tail_drop(xstats, i);
1483 		stats_base->drops += xstats->wred_drop[i];
1484 	}
1485 
1486 	mlxsw_sp_qdisc->stats_base.backlog = 0;
1487 }
1488 
1489 static struct mlxsw_sp_qdisc *
mlxsw_sp_qdisc_prio_find_class(struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,u32 parent)1490 mlxsw_sp_qdisc_prio_find_class(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1491 			       u32 parent)
1492 {
1493 	int child_index = TC_H_MIN(parent);
1494 	int band = child_index - 1;
1495 
1496 	if (band < 0 || band >= mlxsw_sp_qdisc->num_classes)
1497 		return NULL;
1498 	return &mlxsw_sp_qdisc->qdiscs[band];
1499 }
1500 
1501 static struct mlxsw_sp_qdisc_ets_band *
mlxsw_sp_qdisc_ets_get_band(struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,struct mlxsw_sp_qdisc * child)1502 mlxsw_sp_qdisc_ets_get_band(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1503 			    struct mlxsw_sp_qdisc *child)
1504 {
1505 	unsigned int band = child - mlxsw_sp_qdisc->qdiscs;
1506 
1507 	if (WARN_ON(band >= IEEE_8021QAZ_MAX_TCS))
1508 		band = 0;
1509 	return &mlxsw_sp_qdisc->ets_data->bands[band];
1510 }
1511 
1512 static u8
mlxsw_sp_qdisc_ets_get_prio_bitmap(struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,struct mlxsw_sp_qdisc * child)1513 mlxsw_sp_qdisc_ets_get_prio_bitmap(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1514 				   struct mlxsw_sp_qdisc *child)
1515 {
1516 	return mlxsw_sp_qdisc_ets_get_band(mlxsw_sp_qdisc, child)->prio_bitmap;
1517 }
1518 
1519 static int
mlxsw_sp_qdisc_ets_get_tclass_num(struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,struct mlxsw_sp_qdisc * child)1520 mlxsw_sp_qdisc_ets_get_tclass_num(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1521 				  struct mlxsw_sp_qdisc *child)
1522 {
1523 	return mlxsw_sp_qdisc_ets_get_band(mlxsw_sp_qdisc, child)->tclass_num;
1524 }
1525 
1526 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = {
1527 	.type = MLXSW_SP_QDISC_PRIO,
1528 	.check_params = mlxsw_sp_qdisc_prio_check_params,
1529 	.replace = mlxsw_sp_qdisc_prio_replace,
1530 	.unoffload = mlxsw_sp_qdisc_prio_unoffload,
1531 	.destroy = mlxsw_sp_qdisc_prio_destroy,
1532 	.get_stats = mlxsw_sp_qdisc_get_prio_stats,
1533 	.clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
1534 	.find_class = mlxsw_sp_qdisc_prio_find_class,
1535 	.num_classes = IEEE_8021QAZ_MAX_TCS,
1536 	.get_prio_bitmap = mlxsw_sp_qdisc_ets_get_prio_bitmap,
1537 	.get_tclass_num = mlxsw_sp_qdisc_ets_get_tclass_num,
1538 };
1539 
1540 static int
mlxsw_sp_qdisc_ets_check_params(struct mlxsw_sp_port * mlxsw_sp_port,void * params)1541 mlxsw_sp_qdisc_ets_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
1542 				void *params)
1543 {
1544 	struct tc_ets_qopt_offload_replace_params *p = params;
1545 
1546 	return __mlxsw_sp_qdisc_ets_check_params(p->bands);
1547 }
1548 
1549 static int
mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port * mlxsw_sp_port,u32 handle,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * params)1550 mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
1551 			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1552 			   void *params)
1553 {
1554 	struct tc_ets_qopt_offload_replace_params *p = params;
1555 
1556 	return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, mlxsw_sp_qdisc,
1557 					    handle, p->bands, p->quanta,
1558 					    p->weights, p->priomap);
1559 }
1560 
1561 static void
mlxsw_sp_qdisc_ets_unoffload(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * params)1562 mlxsw_sp_qdisc_ets_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
1563 			     struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1564 			     void *params)
1565 {
1566 	struct tc_ets_qopt_offload_replace_params *p = params;
1567 
1568 	__mlxsw_sp_qdisc_ets_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc,
1569 				       p->qstats);
1570 }
1571 
1572 static int
mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc)1573 mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
1574 			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
1575 {
1576 	return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1577 }
1578 
1579 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_ets = {
1580 	.type = MLXSW_SP_QDISC_ETS,
1581 	.check_params = mlxsw_sp_qdisc_ets_check_params,
1582 	.replace = mlxsw_sp_qdisc_ets_replace,
1583 	.unoffload = mlxsw_sp_qdisc_ets_unoffload,
1584 	.destroy = mlxsw_sp_qdisc_ets_destroy,
1585 	.get_stats = mlxsw_sp_qdisc_get_prio_stats,
1586 	.clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
1587 	.find_class = mlxsw_sp_qdisc_prio_find_class,
1588 	.num_classes = IEEE_8021QAZ_MAX_TCS,
1589 	.get_prio_bitmap = mlxsw_sp_qdisc_ets_get_prio_bitmap,
1590 	.get_tclass_num = mlxsw_sp_qdisc_ets_get_tclass_num,
1591 };
1592 
1593 /* Linux allows linking of Qdiscs to arbitrary classes (so long as the resulting
1594  * graph is free of cycles). These operations do not change the parent handle
1595  * though, which means it can be incomplete (if there is more than one class
1596  * where the Qdisc in question is grafted) or outright wrong (if the Qdisc was
1597  * linked to a different class and then removed from the original class).
1598  *
1599  * E.g. consider this sequence of operations:
1600  *
1601  *  # tc qdisc add dev swp1 root handle 1: prio
1602  *  # tc qdisc add dev swp1 parent 1:3 handle 13: red limit 1000000 avpkt 10000
1603  *  RED: set bandwidth to 10Mbit
1604  *  # tc qdisc link dev swp1 handle 13: parent 1:2
1605  *
1606  * At this point, both 1:2 and 1:3 have the same RED Qdisc instance as their
1607  * child. But RED will still only claim that 1:3 is its parent. If it's removed
1608  * from that band, its only parent will be 1:2, but it will continue to claim
1609  * that it is in fact 1:3.
1610  *
1611  * The notification for child Qdisc replace (e.g. TC_RED_REPLACE) comes before
1612  * the notification for parent graft (e.g. TC_PRIO_GRAFT). We take the replace
1613  * notification to offload the child Qdisc, based on its parent handle, and use
1614  * the graft operation to validate that the class where the child is actually
1615  * grafted corresponds to the parent handle. If the two don't match, we
1616  * unoffload the child.
1617  */
mlxsw_sp_qdisc_graft(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,u8 band,u32 child_handle)1618 static int mlxsw_sp_qdisc_graft(struct mlxsw_sp_port *mlxsw_sp_port,
1619 				struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1620 				u8 band, u32 child_handle)
1621 {
1622 	struct mlxsw_sp_qdisc *old_qdisc;
1623 	u32 parent;
1624 
1625 	if (band < mlxsw_sp_qdisc->num_classes &&
1626 	    mlxsw_sp_qdisc->qdiscs[band].handle == child_handle)
1627 		return 0;
1628 
1629 	if (!child_handle) {
1630 		/* This is an invisible FIFO replacing the original Qdisc.
1631 		 * Ignore it--the original Qdisc's destroy will follow.
1632 		 */
1633 		return 0;
1634 	}
1635 
1636 	/* See if the grafted qdisc is already offloaded on any tclass. If so,
1637 	 * unoffload it.
1638 	 */
1639 	old_qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port,
1640 						  child_handle);
1641 	if (old_qdisc)
1642 		mlxsw_sp_qdisc_destroy(mlxsw_sp_port, old_qdisc);
1643 
1644 	parent = TC_H_MAKE(mlxsw_sp_qdisc->handle, band + 1);
1645 	mlxsw_sp_qdisc = mlxsw_sp_qdisc->ops->find_class(mlxsw_sp_qdisc,
1646 							 parent);
1647 	if (!WARN_ON(!mlxsw_sp_qdisc))
1648 		mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1649 
1650 	return -EOPNOTSUPP;
1651 }
1652 
__mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port * mlxsw_sp_port,struct tc_prio_qopt_offload * p)1653 static int __mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
1654 				    struct tc_prio_qopt_offload *p)
1655 {
1656 	struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
1657 
1658 	mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent);
1659 	if (!mlxsw_sp_qdisc)
1660 		return -EOPNOTSUPP;
1661 
1662 	if (p->command == TC_PRIO_REPLACE)
1663 		return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
1664 					      mlxsw_sp_qdisc,
1665 					      &mlxsw_sp_qdisc_ops_prio,
1666 					      &p->replace_params);
1667 
1668 	if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
1669 		return -EOPNOTSUPP;
1670 
1671 	switch (p->command) {
1672 	case TC_PRIO_DESTROY:
1673 		return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1674 	case TC_PRIO_STATS:
1675 		return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
1676 						&p->stats);
1677 	case TC_PRIO_GRAFT:
1678 		return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
1679 					    p->graft_params.band,
1680 					    p->graft_params.child_handle);
1681 	default:
1682 		return -EOPNOTSUPP;
1683 	}
1684 }
1685 
mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port * mlxsw_sp_port,struct tc_prio_qopt_offload * p)1686 int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
1687 			   struct tc_prio_qopt_offload *p)
1688 {
1689 	int err;
1690 
1691 	mutex_lock(&mlxsw_sp_port->qdisc->lock);
1692 	err = __mlxsw_sp_setup_tc_prio(mlxsw_sp_port, p);
1693 	mutex_unlock(&mlxsw_sp_port->qdisc->lock);
1694 
1695 	return err;
1696 }
1697 
__mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port * mlxsw_sp_port,struct tc_ets_qopt_offload * p)1698 static int __mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
1699 				   struct tc_ets_qopt_offload *p)
1700 {
1701 	struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
1702 
1703 	mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent);
1704 	if (!mlxsw_sp_qdisc)
1705 		return -EOPNOTSUPP;
1706 
1707 	if (p->command == TC_ETS_REPLACE)
1708 		return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
1709 					      mlxsw_sp_qdisc,
1710 					      &mlxsw_sp_qdisc_ops_ets,
1711 					      &p->replace_params);
1712 
1713 	if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
1714 		return -EOPNOTSUPP;
1715 
1716 	switch (p->command) {
1717 	case TC_ETS_DESTROY:
1718 		return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1719 	case TC_ETS_STATS:
1720 		return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
1721 						&p->stats);
1722 	case TC_ETS_GRAFT:
1723 		return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
1724 					    p->graft_params.band,
1725 					    p->graft_params.child_handle);
1726 	default:
1727 		return -EOPNOTSUPP;
1728 	}
1729 }
1730 
mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port * mlxsw_sp_port,struct tc_ets_qopt_offload * p)1731 int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
1732 			  struct tc_ets_qopt_offload *p)
1733 {
1734 	int err;
1735 
1736 	mutex_lock(&mlxsw_sp_port->qdisc->lock);
1737 	err = __mlxsw_sp_setup_tc_ets(mlxsw_sp_port, p);
1738 	mutex_unlock(&mlxsw_sp_port->qdisc->lock);
1739 
1740 	return err;
1741 }
1742 
1743 struct mlxsw_sp_qevent_block {
1744 	struct list_head binding_list;
1745 	struct list_head mall_entry_list;
1746 	struct mlxsw_sp *mlxsw_sp;
1747 };
1748 
1749 struct mlxsw_sp_qevent_binding {
1750 	struct list_head list;
1751 	struct mlxsw_sp_port *mlxsw_sp_port;
1752 	u32 handle;
1753 	int tclass_num;
1754 	enum mlxsw_sp_span_trigger span_trigger;
1755 	unsigned int action_mask;
1756 };
1757 
1758 static LIST_HEAD(mlxsw_sp_qevent_block_cb_list);
1759 
mlxsw_sp_qevent_span_configure(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_mall_entry * mall_entry,struct mlxsw_sp_qevent_binding * qevent_binding,const struct mlxsw_sp_span_agent_parms * agent_parms,int * p_span_id)1760 static int mlxsw_sp_qevent_span_configure(struct mlxsw_sp *mlxsw_sp,
1761 					  struct mlxsw_sp_mall_entry *mall_entry,
1762 					  struct mlxsw_sp_qevent_binding *qevent_binding,
1763 					  const struct mlxsw_sp_span_agent_parms *agent_parms,
1764 					  int *p_span_id)
1765 {
1766 	enum mlxsw_sp_span_trigger span_trigger = qevent_binding->span_trigger;
1767 	struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port;
1768 	struct mlxsw_sp_span_trigger_parms trigger_parms = {};
1769 	bool ingress;
1770 	int span_id;
1771 	int err;
1772 
1773 	err = mlxsw_sp_span_agent_get(mlxsw_sp, &span_id, agent_parms);
1774 	if (err)
1775 		return err;
1776 
1777 	ingress = mlxsw_sp_span_trigger_is_ingress(span_trigger);
1778 	err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, ingress);
1779 	if (err)
1780 		goto err_analyzed_port_get;
1781 
1782 	trigger_parms.span_id = span_id;
1783 	trigger_parms.probability_rate = 1;
1784 	err = mlxsw_sp_span_agent_bind(mlxsw_sp, span_trigger, mlxsw_sp_port,
1785 				       &trigger_parms);
1786 	if (err)
1787 		goto err_agent_bind;
1788 
1789 	err = mlxsw_sp_span_trigger_enable(mlxsw_sp_port, span_trigger,
1790 					   qevent_binding->tclass_num);
1791 	if (err)
1792 		goto err_trigger_enable;
1793 
1794 	*p_span_id = span_id;
1795 	return 0;
1796 
1797 err_trigger_enable:
1798 	mlxsw_sp_span_agent_unbind(mlxsw_sp, span_trigger, mlxsw_sp_port,
1799 				   &trigger_parms);
1800 err_agent_bind:
1801 	mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, ingress);
1802 err_analyzed_port_get:
1803 	mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
1804 	return err;
1805 }
1806 
mlxsw_sp_qevent_span_deconfigure(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_qevent_binding * qevent_binding,int span_id)1807 static void mlxsw_sp_qevent_span_deconfigure(struct mlxsw_sp *mlxsw_sp,
1808 					     struct mlxsw_sp_qevent_binding *qevent_binding,
1809 					     int span_id)
1810 {
1811 	enum mlxsw_sp_span_trigger span_trigger = qevent_binding->span_trigger;
1812 	struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port;
1813 	struct mlxsw_sp_span_trigger_parms trigger_parms = {
1814 		.span_id = span_id,
1815 	};
1816 	bool ingress;
1817 
1818 	ingress = mlxsw_sp_span_trigger_is_ingress(span_trigger);
1819 
1820 	mlxsw_sp_span_trigger_disable(mlxsw_sp_port, span_trigger,
1821 				      qevent_binding->tclass_num);
1822 	mlxsw_sp_span_agent_unbind(mlxsw_sp, span_trigger, mlxsw_sp_port,
1823 				   &trigger_parms);
1824 	mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, ingress);
1825 	mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
1826 }
1827 
mlxsw_sp_qevent_mirror_configure(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_mall_entry * mall_entry,struct mlxsw_sp_qevent_binding * qevent_binding)1828 static int mlxsw_sp_qevent_mirror_configure(struct mlxsw_sp *mlxsw_sp,
1829 					    struct mlxsw_sp_mall_entry *mall_entry,
1830 					    struct mlxsw_sp_qevent_binding *qevent_binding)
1831 {
1832 	struct mlxsw_sp_span_agent_parms agent_parms = {
1833 		.to_dev = mall_entry->mirror.to_dev,
1834 	};
1835 
1836 	return mlxsw_sp_qevent_span_configure(mlxsw_sp, mall_entry, qevent_binding,
1837 					      &agent_parms, &mall_entry->mirror.span_id);
1838 }
1839 
mlxsw_sp_qevent_mirror_deconfigure(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_mall_entry * mall_entry,struct mlxsw_sp_qevent_binding * qevent_binding)1840 static void mlxsw_sp_qevent_mirror_deconfigure(struct mlxsw_sp *mlxsw_sp,
1841 					       struct mlxsw_sp_mall_entry *mall_entry,
1842 					       struct mlxsw_sp_qevent_binding *qevent_binding)
1843 {
1844 	mlxsw_sp_qevent_span_deconfigure(mlxsw_sp, qevent_binding, mall_entry->mirror.span_id);
1845 }
1846 
mlxsw_sp_qevent_trap_configure(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_mall_entry * mall_entry,struct mlxsw_sp_qevent_binding * qevent_binding)1847 static int mlxsw_sp_qevent_trap_configure(struct mlxsw_sp *mlxsw_sp,
1848 					  struct mlxsw_sp_mall_entry *mall_entry,
1849 					  struct mlxsw_sp_qevent_binding *qevent_binding)
1850 {
1851 	struct mlxsw_sp_span_agent_parms agent_parms = {
1852 		.session_id = MLXSW_SP_SPAN_SESSION_ID_BUFFER,
1853 	};
1854 	int err;
1855 
1856 	err = mlxsw_sp_trap_group_policer_hw_id_get(mlxsw_sp,
1857 						    DEVLINK_TRAP_GROUP_GENERIC_ID_BUFFER_DROPS,
1858 						    &agent_parms.policer_enable,
1859 						    &agent_parms.policer_id);
1860 	if (err)
1861 		return err;
1862 
1863 	return mlxsw_sp_qevent_span_configure(mlxsw_sp, mall_entry, qevent_binding,
1864 					      &agent_parms, &mall_entry->trap.span_id);
1865 }
1866 
mlxsw_sp_qevent_trap_deconfigure(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_mall_entry * mall_entry,struct mlxsw_sp_qevent_binding * qevent_binding)1867 static void mlxsw_sp_qevent_trap_deconfigure(struct mlxsw_sp *mlxsw_sp,
1868 					     struct mlxsw_sp_mall_entry *mall_entry,
1869 					     struct mlxsw_sp_qevent_binding *qevent_binding)
1870 {
1871 	mlxsw_sp_qevent_span_deconfigure(mlxsw_sp, qevent_binding, mall_entry->trap.span_id);
1872 }
1873 
1874 static int
mlxsw_sp_qevent_entry_configure(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_mall_entry * mall_entry,struct mlxsw_sp_qevent_binding * qevent_binding,struct netlink_ext_ack * extack)1875 mlxsw_sp_qevent_entry_configure(struct mlxsw_sp *mlxsw_sp,
1876 				struct mlxsw_sp_mall_entry *mall_entry,
1877 				struct mlxsw_sp_qevent_binding *qevent_binding,
1878 				struct netlink_ext_ack *extack)
1879 {
1880 	if (!(BIT(mall_entry->type) & qevent_binding->action_mask)) {
1881 		NL_SET_ERR_MSG(extack, "Action not supported at this qevent");
1882 		return -EOPNOTSUPP;
1883 	}
1884 
1885 	switch (mall_entry->type) {
1886 	case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
1887 		return mlxsw_sp_qevent_mirror_configure(mlxsw_sp, mall_entry, qevent_binding);
1888 	case MLXSW_SP_MALL_ACTION_TYPE_TRAP:
1889 		return mlxsw_sp_qevent_trap_configure(mlxsw_sp, mall_entry, qevent_binding);
1890 	default:
1891 		/* This should have been validated away. */
1892 		WARN_ON(1);
1893 		return -EOPNOTSUPP;
1894 	}
1895 }
1896 
mlxsw_sp_qevent_entry_deconfigure(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_mall_entry * mall_entry,struct mlxsw_sp_qevent_binding * qevent_binding)1897 static void mlxsw_sp_qevent_entry_deconfigure(struct mlxsw_sp *mlxsw_sp,
1898 					      struct mlxsw_sp_mall_entry *mall_entry,
1899 					      struct mlxsw_sp_qevent_binding *qevent_binding)
1900 {
1901 	switch (mall_entry->type) {
1902 	case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
1903 		return mlxsw_sp_qevent_mirror_deconfigure(mlxsw_sp, mall_entry, qevent_binding);
1904 	case MLXSW_SP_MALL_ACTION_TYPE_TRAP:
1905 		return mlxsw_sp_qevent_trap_deconfigure(mlxsw_sp, mall_entry, qevent_binding);
1906 	default:
1907 		WARN_ON(1);
1908 		return;
1909 	}
1910 }
1911 
1912 static int
mlxsw_sp_qevent_binding_configure(struct mlxsw_sp_qevent_block * qevent_block,struct mlxsw_sp_qevent_binding * qevent_binding,struct netlink_ext_ack * extack)1913 mlxsw_sp_qevent_binding_configure(struct mlxsw_sp_qevent_block *qevent_block,
1914 				  struct mlxsw_sp_qevent_binding *qevent_binding,
1915 				  struct netlink_ext_ack *extack)
1916 {
1917 	struct mlxsw_sp_mall_entry *mall_entry;
1918 	int err;
1919 
1920 	list_for_each_entry(mall_entry, &qevent_block->mall_entry_list, list) {
1921 		err = mlxsw_sp_qevent_entry_configure(qevent_block->mlxsw_sp, mall_entry,
1922 						      qevent_binding, extack);
1923 		if (err)
1924 			goto err_entry_configure;
1925 	}
1926 
1927 	return 0;
1928 
1929 err_entry_configure:
1930 	list_for_each_entry_continue_reverse(mall_entry, &qevent_block->mall_entry_list, list)
1931 		mlxsw_sp_qevent_entry_deconfigure(qevent_block->mlxsw_sp, mall_entry,
1932 						  qevent_binding);
1933 	return err;
1934 }
1935 
mlxsw_sp_qevent_binding_deconfigure(struct mlxsw_sp_qevent_block * qevent_block,struct mlxsw_sp_qevent_binding * qevent_binding)1936 static void mlxsw_sp_qevent_binding_deconfigure(struct mlxsw_sp_qevent_block *qevent_block,
1937 						struct mlxsw_sp_qevent_binding *qevent_binding)
1938 {
1939 	struct mlxsw_sp_mall_entry *mall_entry;
1940 
1941 	list_for_each_entry(mall_entry, &qevent_block->mall_entry_list, list)
1942 		mlxsw_sp_qevent_entry_deconfigure(qevent_block->mlxsw_sp, mall_entry,
1943 						  qevent_binding);
1944 }
1945 
1946 static int
mlxsw_sp_qevent_block_configure(struct mlxsw_sp_qevent_block * qevent_block,struct netlink_ext_ack * extack)1947 mlxsw_sp_qevent_block_configure(struct mlxsw_sp_qevent_block *qevent_block,
1948 				struct netlink_ext_ack *extack)
1949 {
1950 	struct mlxsw_sp_qevent_binding *qevent_binding;
1951 	int err;
1952 
1953 	list_for_each_entry(qevent_binding, &qevent_block->binding_list, list) {
1954 		err = mlxsw_sp_qevent_binding_configure(qevent_block,
1955 							qevent_binding,
1956 							extack);
1957 		if (err)
1958 			goto err_binding_configure;
1959 	}
1960 
1961 	return 0;
1962 
1963 err_binding_configure:
1964 	list_for_each_entry_continue_reverse(qevent_binding, &qevent_block->binding_list, list)
1965 		mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding);
1966 	return err;
1967 }
1968 
mlxsw_sp_qevent_block_deconfigure(struct mlxsw_sp_qevent_block * qevent_block)1969 static void mlxsw_sp_qevent_block_deconfigure(struct mlxsw_sp_qevent_block *qevent_block)
1970 {
1971 	struct mlxsw_sp_qevent_binding *qevent_binding;
1972 
1973 	list_for_each_entry(qevent_binding, &qevent_block->binding_list, list)
1974 		mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding);
1975 }
1976 
1977 static struct mlxsw_sp_mall_entry *
mlxsw_sp_qevent_mall_entry_find(struct mlxsw_sp_qevent_block * block,unsigned long cookie)1978 mlxsw_sp_qevent_mall_entry_find(struct mlxsw_sp_qevent_block *block, unsigned long cookie)
1979 {
1980 	struct mlxsw_sp_mall_entry *mall_entry;
1981 
1982 	list_for_each_entry(mall_entry, &block->mall_entry_list, list)
1983 		if (mall_entry->cookie == cookie)
1984 			return mall_entry;
1985 
1986 	return NULL;
1987 }
1988 
mlxsw_sp_qevent_mall_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_qevent_block * qevent_block,struct tc_cls_matchall_offload * f)1989 static int mlxsw_sp_qevent_mall_replace(struct mlxsw_sp *mlxsw_sp,
1990 					struct mlxsw_sp_qevent_block *qevent_block,
1991 					struct tc_cls_matchall_offload *f)
1992 {
1993 	struct mlxsw_sp_mall_entry *mall_entry;
1994 	struct flow_action_entry *act;
1995 	int err;
1996 
1997 	/* It should not currently be possible to replace a matchall rule. So
1998 	 * this must be a new rule.
1999 	 */
2000 	if (!list_empty(&qevent_block->mall_entry_list)) {
2001 		NL_SET_ERR_MSG(f->common.extack, "At most one filter supported");
2002 		return -EOPNOTSUPP;
2003 	}
2004 	if (f->rule->action.num_entries != 1) {
2005 		NL_SET_ERR_MSG(f->common.extack, "Only singular actions supported");
2006 		return -EOPNOTSUPP;
2007 	}
2008 	if (f->common.chain_index) {
2009 		NL_SET_ERR_MSG(f->common.extack, "Only chain 0 is supported");
2010 		return -EOPNOTSUPP;
2011 	}
2012 	if (f->common.protocol != htons(ETH_P_ALL)) {
2013 		NL_SET_ERR_MSG(f->common.extack, "Protocol matching not supported");
2014 		return -EOPNOTSUPP;
2015 	}
2016 
2017 	act = &f->rule->action.entries[0];
2018 	if (!(act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED)) {
2019 		NL_SET_ERR_MSG(f->common.extack, "HW counters not supported on qevents");
2020 		return -EOPNOTSUPP;
2021 	}
2022 
2023 	mall_entry = kzalloc_obj(*mall_entry);
2024 	if (!mall_entry)
2025 		return -ENOMEM;
2026 	mall_entry->cookie = f->cookie;
2027 
2028 	if (act->id == FLOW_ACTION_MIRRED) {
2029 		mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_MIRROR;
2030 		mall_entry->mirror.to_dev = act->dev;
2031 	} else if (act->id == FLOW_ACTION_TRAP) {
2032 		mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_TRAP;
2033 	} else {
2034 		NL_SET_ERR_MSG(f->common.extack, "Unsupported action");
2035 		err = -EOPNOTSUPP;
2036 		goto err_unsupported_action;
2037 	}
2038 
2039 	list_add_tail(&mall_entry->list, &qevent_block->mall_entry_list);
2040 
2041 	err = mlxsw_sp_qevent_block_configure(qevent_block, f->common.extack);
2042 	if (err)
2043 		goto err_block_configure;
2044 
2045 	return 0;
2046 
2047 err_block_configure:
2048 	list_del(&mall_entry->list);
2049 err_unsupported_action:
2050 	kfree(mall_entry);
2051 	return err;
2052 }
2053 
mlxsw_sp_qevent_mall_destroy(struct mlxsw_sp_qevent_block * qevent_block,struct tc_cls_matchall_offload * f)2054 static void mlxsw_sp_qevent_mall_destroy(struct mlxsw_sp_qevent_block *qevent_block,
2055 					 struct tc_cls_matchall_offload *f)
2056 {
2057 	struct mlxsw_sp_mall_entry *mall_entry;
2058 
2059 	mall_entry = mlxsw_sp_qevent_mall_entry_find(qevent_block, f->cookie);
2060 	if (!mall_entry)
2061 		return;
2062 
2063 	mlxsw_sp_qevent_block_deconfigure(qevent_block);
2064 
2065 	list_del(&mall_entry->list);
2066 	kfree(mall_entry);
2067 }
2068 
mlxsw_sp_qevent_block_mall_cb(struct mlxsw_sp_qevent_block * qevent_block,struct tc_cls_matchall_offload * f)2069 static int mlxsw_sp_qevent_block_mall_cb(struct mlxsw_sp_qevent_block *qevent_block,
2070 					 struct tc_cls_matchall_offload *f)
2071 {
2072 	struct mlxsw_sp *mlxsw_sp = qevent_block->mlxsw_sp;
2073 
2074 	switch (f->command) {
2075 	case TC_CLSMATCHALL_REPLACE:
2076 		return mlxsw_sp_qevent_mall_replace(mlxsw_sp, qevent_block, f);
2077 	case TC_CLSMATCHALL_DESTROY:
2078 		mlxsw_sp_qevent_mall_destroy(qevent_block, f);
2079 		return 0;
2080 	default:
2081 		return -EOPNOTSUPP;
2082 	}
2083 }
2084 
mlxsw_sp_qevent_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)2085 static int mlxsw_sp_qevent_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
2086 {
2087 	struct mlxsw_sp_qevent_block *qevent_block = cb_priv;
2088 
2089 	switch (type) {
2090 	case TC_SETUP_CLSMATCHALL:
2091 		return mlxsw_sp_qevent_block_mall_cb(qevent_block, type_data);
2092 	default:
2093 		return -EOPNOTSUPP;
2094 	}
2095 }
2096 
mlxsw_sp_qevent_block_create(struct mlxsw_sp * mlxsw_sp,struct net * net)2097 static struct mlxsw_sp_qevent_block *mlxsw_sp_qevent_block_create(struct mlxsw_sp *mlxsw_sp,
2098 								  struct net *net)
2099 {
2100 	struct mlxsw_sp_qevent_block *qevent_block;
2101 
2102 	qevent_block = kzalloc_obj(*qevent_block);
2103 	if (!qevent_block)
2104 		return NULL;
2105 
2106 	INIT_LIST_HEAD(&qevent_block->binding_list);
2107 	INIT_LIST_HEAD(&qevent_block->mall_entry_list);
2108 	qevent_block->mlxsw_sp = mlxsw_sp;
2109 	return qevent_block;
2110 }
2111 
2112 static void
mlxsw_sp_qevent_block_destroy(struct mlxsw_sp_qevent_block * qevent_block)2113 mlxsw_sp_qevent_block_destroy(struct mlxsw_sp_qevent_block *qevent_block)
2114 {
2115 	WARN_ON(!list_empty(&qevent_block->binding_list));
2116 	WARN_ON(!list_empty(&qevent_block->mall_entry_list));
2117 	kfree(qevent_block);
2118 }
2119 
mlxsw_sp_qevent_block_release(void * cb_priv)2120 static void mlxsw_sp_qevent_block_release(void *cb_priv)
2121 {
2122 	struct mlxsw_sp_qevent_block *qevent_block = cb_priv;
2123 
2124 	mlxsw_sp_qevent_block_destroy(qevent_block);
2125 }
2126 
2127 static struct mlxsw_sp_qevent_binding *
mlxsw_sp_qevent_binding_create(struct mlxsw_sp_port * mlxsw_sp_port,u32 handle,int tclass_num,enum mlxsw_sp_span_trigger span_trigger,unsigned int action_mask)2128 mlxsw_sp_qevent_binding_create(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, int tclass_num,
2129 			       enum mlxsw_sp_span_trigger span_trigger,
2130 			       unsigned int action_mask)
2131 {
2132 	struct mlxsw_sp_qevent_binding *binding;
2133 
2134 	binding = kzalloc_obj(*binding);
2135 	if (!binding)
2136 		return ERR_PTR(-ENOMEM);
2137 
2138 	binding->mlxsw_sp_port = mlxsw_sp_port;
2139 	binding->handle = handle;
2140 	binding->tclass_num = tclass_num;
2141 	binding->span_trigger = span_trigger;
2142 	binding->action_mask = action_mask;
2143 	return binding;
2144 }
2145 
2146 static void
mlxsw_sp_qevent_binding_destroy(struct mlxsw_sp_qevent_binding * binding)2147 mlxsw_sp_qevent_binding_destroy(struct mlxsw_sp_qevent_binding *binding)
2148 {
2149 	kfree(binding);
2150 }
2151 
2152 static struct mlxsw_sp_qevent_binding *
mlxsw_sp_qevent_binding_lookup(struct mlxsw_sp_qevent_block * block,struct mlxsw_sp_port * mlxsw_sp_port,u32 handle,enum mlxsw_sp_span_trigger span_trigger)2153 mlxsw_sp_qevent_binding_lookup(struct mlxsw_sp_qevent_block *block,
2154 			       struct mlxsw_sp_port *mlxsw_sp_port,
2155 			       u32 handle,
2156 			       enum mlxsw_sp_span_trigger span_trigger)
2157 {
2158 	struct mlxsw_sp_qevent_binding *qevent_binding;
2159 
2160 	list_for_each_entry(qevent_binding, &block->binding_list, list)
2161 		if (qevent_binding->mlxsw_sp_port == mlxsw_sp_port &&
2162 		    qevent_binding->handle == handle &&
2163 		    qevent_binding->span_trigger == span_trigger)
2164 			return qevent_binding;
2165 	return NULL;
2166 }
2167 
2168 static int
mlxsw_sp_setup_tc_block_qevent_bind(struct mlxsw_sp_port * mlxsw_sp_port,struct flow_block_offload * f,enum mlxsw_sp_span_trigger span_trigger,unsigned int action_mask)2169 mlxsw_sp_setup_tc_block_qevent_bind(struct mlxsw_sp_port *mlxsw_sp_port,
2170 				    struct flow_block_offload *f,
2171 				    enum mlxsw_sp_span_trigger span_trigger,
2172 				    unsigned int action_mask)
2173 {
2174 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2175 	struct mlxsw_sp_qevent_binding *qevent_binding;
2176 	struct mlxsw_sp_qevent_block *qevent_block;
2177 	struct flow_block_cb *block_cb;
2178 	struct mlxsw_sp_qdisc *qdisc;
2179 	bool register_block = false;
2180 	int tclass_num;
2181 	int err;
2182 
2183 	block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_qevent_block_cb, mlxsw_sp);
2184 	if (!block_cb) {
2185 		qevent_block = mlxsw_sp_qevent_block_create(mlxsw_sp, f->net);
2186 		if (!qevent_block)
2187 			return -ENOMEM;
2188 		block_cb = flow_block_cb_alloc(mlxsw_sp_qevent_block_cb, mlxsw_sp, qevent_block,
2189 					       mlxsw_sp_qevent_block_release);
2190 		if (IS_ERR(block_cb)) {
2191 			mlxsw_sp_qevent_block_destroy(qevent_block);
2192 			return PTR_ERR(block_cb);
2193 		}
2194 		register_block = true;
2195 	} else {
2196 		qevent_block = flow_block_cb_priv(block_cb);
2197 	}
2198 	flow_block_cb_incref(block_cb);
2199 
2200 	qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port, f->sch->handle);
2201 	if (!qdisc) {
2202 		NL_SET_ERR_MSG(f->extack, "Qdisc not offloaded");
2203 		err = -ENOENT;
2204 		goto err_find_qdisc;
2205 	}
2206 
2207 	if (WARN_ON(mlxsw_sp_qevent_binding_lookup(qevent_block, mlxsw_sp_port, f->sch->handle,
2208 						   span_trigger))) {
2209 		err = -EEXIST;
2210 		goto err_binding_exists;
2211 	}
2212 
2213 	tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port, qdisc);
2214 	qevent_binding = mlxsw_sp_qevent_binding_create(mlxsw_sp_port,
2215 							f->sch->handle,
2216 							tclass_num,
2217 							span_trigger,
2218 							action_mask);
2219 	if (IS_ERR(qevent_binding)) {
2220 		err = PTR_ERR(qevent_binding);
2221 		goto err_binding_create;
2222 	}
2223 
2224 	err = mlxsw_sp_qevent_binding_configure(qevent_block, qevent_binding,
2225 						f->extack);
2226 	if (err)
2227 		goto err_binding_configure;
2228 
2229 	list_add(&qevent_binding->list, &qevent_block->binding_list);
2230 
2231 	if (register_block) {
2232 		flow_block_cb_add(block_cb, f);
2233 		list_add_tail(&block_cb->driver_list, &mlxsw_sp_qevent_block_cb_list);
2234 	}
2235 
2236 	return 0;
2237 
2238 err_binding_configure:
2239 	mlxsw_sp_qevent_binding_destroy(qevent_binding);
2240 err_binding_create:
2241 err_binding_exists:
2242 err_find_qdisc:
2243 	if (!flow_block_cb_decref(block_cb))
2244 		flow_block_cb_free(block_cb);
2245 	return err;
2246 }
2247 
mlxsw_sp_setup_tc_block_qevent_unbind(struct mlxsw_sp_port * mlxsw_sp_port,struct flow_block_offload * f,enum mlxsw_sp_span_trigger span_trigger)2248 static void mlxsw_sp_setup_tc_block_qevent_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
2249 						  struct flow_block_offload *f,
2250 						  enum mlxsw_sp_span_trigger span_trigger)
2251 {
2252 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2253 	struct mlxsw_sp_qevent_binding *qevent_binding;
2254 	struct mlxsw_sp_qevent_block *qevent_block;
2255 	struct flow_block_cb *block_cb;
2256 
2257 	block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_qevent_block_cb, mlxsw_sp);
2258 	if (!block_cb)
2259 		return;
2260 	qevent_block = flow_block_cb_priv(block_cb);
2261 
2262 	qevent_binding = mlxsw_sp_qevent_binding_lookup(qevent_block, mlxsw_sp_port, f->sch->handle,
2263 							span_trigger);
2264 	if (!qevent_binding)
2265 		return;
2266 
2267 	list_del(&qevent_binding->list);
2268 	mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding);
2269 	mlxsw_sp_qevent_binding_destroy(qevent_binding);
2270 
2271 	if (!flow_block_cb_decref(block_cb)) {
2272 		flow_block_cb_remove(block_cb, f);
2273 		list_del(&block_cb->driver_list);
2274 	}
2275 }
2276 
2277 static int
mlxsw_sp_setup_tc_block_qevent(struct mlxsw_sp_port * mlxsw_sp_port,struct flow_block_offload * f,enum mlxsw_sp_span_trigger span_trigger,unsigned int action_mask)2278 mlxsw_sp_setup_tc_block_qevent(struct mlxsw_sp_port *mlxsw_sp_port,
2279 			       struct flow_block_offload *f,
2280 			       enum mlxsw_sp_span_trigger span_trigger,
2281 			       unsigned int action_mask)
2282 {
2283 	f->driver_block_list = &mlxsw_sp_qevent_block_cb_list;
2284 
2285 	switch (f->command) {
2286 	case FLOW_BLOCK_BIND:
2287 		return mlxsw_sp_setup_tc_block_qevent_bind(mlxsw_sp_port, f,
2288 							   span_trigger,
2289 							   action_mask);
2290 	case FLOW_BLOCK_UNBIND:
2291 		mlxsw_sp_setup_tc_block_qevent_unbind(mlxsw_sp_port, f, span_trigger);
2292 		return 0;
2293 	default:
2294 		return -EOPNOTSUPP;
2295 	}
2296 }
2297 
mlxsw_sp_setup_tc_block_qevent_early_drop(struct mlxsw_sp_port * mlxsw_sp_port,struct flow_block_offload * f)2298 int mlxsw_sp_setup_tc_block_qevent_early_drop(struct mlxsw_sp_port *mlxsw_sp_port,
2299 					      struct flow_block_offload *f)
2300 {
2301 	unsigned int action_mask = BIT(MLXSW_SP_MALL_ACTION_TYPE_MIRROR) |
2302 				   BIT(MLXSW_SP_MALL_ACTION_TYPE_TRAP);
2303 
2304 	return mlxsw_sp_setup_tc_block_qevent(mlxsw_sp_port, f,
2305 					      MLXSW_SP_SPAN_TRIGGER_EARLY_DROP,
2306 					      action_mask);
2307 }
2308 
mlxsw_sp_setup_tc_block_qevent_mark(struct mlxsw_sp_port * mlxsw_sp_port,struct flow_block_offload * f)2309 int mlxsw_sp_setup_tc_block_qevent_mark(struct mlxsw_sp_port *mlxsw_sp_port,
2310 					struct flow_block_offload *f)
2311 {
2312 	unsigned int action_mask = BIT(MLXSW_SP_MALL_ACTION_TYPE_MIRROR);
2313 
2314 	return mlxsw_sp_setup_tc_block_qevent(mlxsw_sp_port, f,
2315 					      MLXSW_SP_SPAN_TRIGGER_ECN,
2316 					      action_mask);
2317 }
2318 
mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port * mlxsw_sp_port)2319 int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
2320 {
2321 	struct mlxsw_sp_qdisc_state *qdisc_state;
2322 
2323 	qdisc_state = kzalloc_obj(*qdisc_state);
2324 	if (!qdisc_state)
2325 		return -ENOMEM;
2326 
2327 	mutex_init(&qdisc_state->lock);
2328 	mlxsw_sp_port->qdisc = qdisc_state;
2329 	return 0;
2330 }
2331 
mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port * mlxsw_sp_port)2332 void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port)
2333 {
2334 	mutex_destroy(&mlxsw_sp_port->qdisc->lock);
2335 	kfree(mlxsw_sp_port->qdisc);
2336 }
2337