xref: /linux/drivers/net/ethernet/mscc/ocelot_devlink.c (revision 9a87ffc99ec8eb8d35eed7c4f816d75f5cc9662e)
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Copyright 2020-2021 NXP
3  */
4 #include <net/devlink.h>
5 #include "ocelot.h"
6 
7 /* The queue system tracks four resource consumptions:
8  * Resource 0: Memory tracked per source port
9  * Resource 1: Frame references tracked per source port
10  * Resource 2: Memory tracked per destination port
11  * Resource 3: Frame references tracked per destination port
12  */
13 #define OCELOT_RESOURCE_SZ		256
14 #define OCELOT_NUM_RESOURCES		4
15 
16 #define BUF_xxxx_I			(0 * OCELOT_RESOURCE_SZ)
17 #define REF_xxxx_I			(1 * OCELOT_RESOURCE_SZ)
18 #define BUF_xxxx_E			(2 * OCELOT_RESOURCE_SZ)
19 #define REF_xxxx_E			(3 * OCELOT_RESOURCE_SZ)
20 
21 /* For each resource type there are 4 types of watermarks:
22  * Q_RSRV: reservation per QoS class per port
23  * PRIO_SHR: sharing watermark per QoS class across all ports
24  * P_RSRV: reservation per port
25  * COL_SHR: sharing watermark per color (drop precedence) across all ports
26  */
27 #define xxx_Q_RSRV_x			0
28 #define xxx_PRIO_SHR_x			216
29 #define xxx_P_RSRV_x			224
30 #define xxx_COL_SHR_x			254
31 
32 /* Reservation Watermarks
33  * ----------------------
34  *
35  * For setting up the reserved areas, egress watermarks exist per port and per
36  * QoS class for both ingress and egress.
37  */
38 
39 /*  Amount of packet buffer
40  *  |  per QoS class
41  *  |  |  reserved
42  *  |  |  |   per egress port
43  *  |  |  |   |
44  *  V  V  v   v
45  * BUF_Q_RSRV_E
46  */
47 #define BUF_Q_RSRV_E(port, prio) \
48 	(BUF_xxxx_E + xxx_Q_RSRV_x + OCELOT_NUM_TC * (port) + (prio))
49 
50 /*  Amount of packet buffer
51  *  |  for all port's traffic classes
52  *  |  |  reserved
53  *  |  |  |   per egress port
54  *  |  |  |   |
55  *  V  V  v   v
56  * BUF_P_RSRV_E
57  */
58 #define BUF_P_RSRV_E(port) \
59 	(BUF_xxxx_E + xxx_P_RSRV_x + (port))
60 
61 /*  Amount of packet buffer
62  *  |  per QoS class
63  *  |  |  reserved
64  *  |  |  |   per ingress port
65  *  |  |  |   |
66  *  V  V  v   v
67  * BUF_Q_RSRV_I
68  */
69 #define BUF_Q_RSRV_I(port, prio) \
70 	(BUF_xxxx_I + xxx_Q_RSRV_x + OCELOT_NUM_TC * (port) + (prio))
71 
72 /*  Amount of packet buffer
73  *  |  for all port's traffic classes
74  *  |  |  reserved
75  *  |  |  |   per ingress port
76  *  |  |  |   |
77  *  V  V  v   v
78  * BUF_P_RSRV_I
79  */
80 #define BUF_P_RSRV_I(port) \
81 	(BUF_xxxx_I + xxx_P_RSRV_x + (port))
82 
83 /*  Amount of frame references
84  *  |  per QoS class
85  *  |  |  reserved
86  *  |  |  |   per egress port
87  *  |  |  |   |
88  *  V  V  v   v
89  * REF_Q_RSRV_E
90  */
91 #define REF_Q_RSRV_E(port, prio) \
92 	(REF_xxxx_E + xxx_Q_RSRV_x + OCELOT_NUM_TC * (port) + (prio))
93 
94 /*  Amount of frame references
95  *  |  for all port's traffic classes
96  *  |  |  reserved
97  *  |  |  |   per egress port
98  *  |  |  |   |
99  *  V  V  v   v
100  * REF_P_RSRV_E
101  */
102 #define REF_P_RSRV_E(port) \
103 	(REF_xxxx_E + xxx_P_RSRV_x + (port))
104 
105 /*  Amount of frame references
106  *  |  per QoS class
107  *  |  |  reserved
108  *  |  |  |   per ingress port
109  *  |  |  |   |
110  *  V  V  v   v
111  * REF_Q_RSRV_I
112  */
113 #define REF_Q_RSRV_I(port, prio) \
114 	(REF_xxxx_I + xxx_Q_RSRV_x + OCELOT_NUM_TC * (port) + (prio))
115 
116 /*  Amount of frame references
117  *  |  for all port's traffic classes
118  *  |  |  reserved
119  *  |  |  |   per ingress port
120  *  |  |  |   |
121  *  V  V  v   v
122  * REF_P_RSRV_I
123  */
124 #define REF_P_RSRV_I(port) \
125 	(REF_xxxx_I + xxx_P_RSRV_x + (port))
126 
127 /* Sharing Watermarks
128  * ------------------
129  *
130  * The shared memory area is shared between all ports.
131  */
132 
133 /* Amount of buffer
134  *  |   per QoS class
135  *  |   |    from the shared memory area
136  *  |   |    |  for egress traffic
137  *  |   |    |  |
138  *  V   V    v  v
139  * BUF_PRIO_SHR_E
140  */
141 #define BUF_PRIO_SHR_E(prio) \
142 	(BUF_xxxx_E + xxx_PRIO_SHR_x + (prio))
143 
144 /* Amount of buffer
145  *  |   per color (drop precedence level)
146  *  |   |   from the shared memory area
147  *  |   |   |  for egress traffic
148  *  |   |   |  |
149  *  V   V   v  v
150  * BUF_COL_SHR_E
151  */
152 #define BUF_COL_SHR_E(dp) \
153 	(BUF_xxxx_E + xxx_COL_SHR_x + (1 - (dp)))
154 
155 /* Amount of buffer
156  *  |   per QoS class
157  *  |   |    from the shared memory area
158  *  |   |    |  for ingress traffic
159  *  |   |    |  |
160  *  V   V    v  v
161  * BUF_PRIO_SHR_I
162  */
163 #define BUF_PRIO_SHR_I(prio) \
164 	(BUF_xxxx_I + xxx_PRIO_SHR_x + (prio))
165 
166 /* Amount of buffer
167  *  |   per color (drop precedence level)
168  *  |   |   from the shared memory area
169  *  |   |   |  for ingress traffic
170  *  |   |   |  |
171  *  V   V   v  v
172  * BUF_COL_SHR_I
173  */
174 #define BUF_COL_SHR_I(dp) \
175 	(BUF_xxxx_I + xxx_COL_SHR_x + (1 - (dp)))
176 
177 /* Amount of frame references
178  *  |   per QoS class
179  *  |   |    from the shared area
180  *  |   |    |  for egress traffic
181  *  |   |    |  |
182  *  V   V    v  v
183  * REF_PRIO_SHR_E
184  */
185 #define REF_PRIO_SHR_E(prio) \
186 	(REF_xxxx_E + xxx_PRIO_SHR_x + (prio))
187 
188 /* Amount of frame references
189  *  |   per color (drop precedence level)
190  *  |   |   from the shared area
191  *  |   |   |  for egress traffic
192  *  |   |   |  |
193  *  V   V   v  v
194  * REF_COL_SHR_E
195  */
196 #define REF_COL_SHR_E(dp) \
197 	(REF_xxxx_E + xxx_COL_SHR_x + (1 - (dp)))
198 
199 /* Amount of frame references
200  *  |   per QoS class
201  *  |   |    from the shared area
202  *  |   |    |  for ingress traffic
203  *  |   |    |  |
204  *  V   V    v  v
205  * REF_PRIO_SHR_I
206  */
207 #define REF_PRIO_SHR_I(prio) \
208 	(REF_xxxx_I + xxx_PRIO_SHR_x + (prio))
209 
210 /* Amount of frame references
211  *  |   per color (drop precedence level)
212  *  |   |   from the shared area
213  *  |   |   |  for ingress traffic
214  *  |   |   |  |
215  *  V   V   v  v
216  * REF_COL_SHR_I
217  */
218 #define REF_COL_SHR_I(dp) \
219 	(REF_xxxx_I + xxx_COL_SHR_x + (1 - (dp)))
220 
ocelot_wm_read(struct ocelot * ocelot,int index)221 static u32 ocelot_wm_read(struct ocelot *ocelot, int index)
222 {
223 	int wm = ocelot_read_gix(ocelot, QSYS_RES_CFG, index);
224 
225 	return ocelot->ops->wm_dec(wm);
226 }
227 
ocelot_wm_write(struct ocelot * ocelot,int index,u32 val)228 static void ocelot_wm_write(struct ocelot *ocelot, int index, u32 val)
229 {
230 	u32 wm = ocelot->ops->wm_enc(val);
231 
232 	ocelot_write_gix(ocelot, wm, QSYS_RES_CFG, index);
233 }
234 
ocelot_wm_status(struct ocelot * ocelot,int index,u32 * inuse,u32 * maxuse)235 static void ocelot_wm_status(struct ocelot *ocelot, int index, u32 *inuse,
236 			     u32 *maxuse)
237 {
238 	int res_stat = ocelot_read_gix(ocelot, QSYS_RES_STAT, index);
239 
240 	return ocelot->ops->wm_stat(res_stat, inuse, maxuse);
241 }
242 
243 /* The hardware comes out of reset with strange defaults: the sum of all
244  * reservations for frame memory is larger than the total buffer size.
245  * One has to wonder how can the reservation watermarks still guarantee
246  * anything under congestion.
247  * Bring some sense into the hardware by changing the defaults to disable all
248  * reservations and rely only on the sharing watermark for frames with drop
249  * precedence 0. The user can still explicitly request reservations per port
250  * and per port-tc through devlink-sb.
251  */
ocelot_disable_reservation_watermarks(struct ocelot * ocelot,int port)252 static void ocelot_disable_reservation_watermarks(struct ocelot *ocelot,
253 						  int port)
254 {
255 	int prio;
256 
257 	for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
258 		ocelot_wm_write(ocelot, BUF_Q_RSRV_I(port, prio), 0);
259 		ocelot_wm_write(ocelot, BUF_Q_RSRV_E(port, prio), 0);
260 		ocelot_wm_write(ocelot, REF_Q_RSRV_I(port, prio), 0);
261 		ocelot_wm_write(ocelot, REF_Q_RSRV_E(port, prio), 0);
262 	}
263 
264 	ocelot_wm_write(ocelot, BUF_P_RSRV_I(port), 0);
265 	ocelot_wm_write(ocelot, BUF_P_RSRV_E(port), 0);
266 	ocelot_wm_write(ocelot, REF_P_RSRV_I(port), 0);
267 	ocelot_wm_write(ocelot, REF_P_RSRV_E(port), 0);
268 }
269 
270 /* We want the sharing watermarks to consume all nonreserved resources, for
271  * efficient resource utilization (a single traffic flow should be able to use
272  * up the entire buffer space and frame resources as long as there's no
273  * interference).
274  * The switch has 10 sharing watermarks per lookup: 8 per traffic class and 2
275  * per color (drop precedence).
276  * The trouble with configuring these sharing watermarks is that:
277  * (1) There's a risk that we overcommit the resources if we configure
278  *     (a) all 8 per-TC sharing watermarks to the max
279  *     (b) all 2 per-color sharing watermarks to the max
280  * (2) There's a risk that we undercommit the resources if we configure
281  *     (a) all 8 per-TC sharing watermarks to "max / 8"
282  *     (b) all 2 per-color sharing watermarks to "max / 2"
283  * So for Linux, let's just disable the sharing watermarks per traffic class
284  * (setting them to 0 will make them always exceeded), and rely only on the
285  * sharing watermark for drop priority 0. So frames with drop priority set to 1
286  * by QoS classification or policing will still be allowed, but only as long as
287  * the port and port-TC reservations are not exceeded.
288  */
ocelot_disable_tc_sharing_watermarks(struct ocelot * ocelot)289 static void ocelot_disable_tc_sharing_watermarks(struct ocelot *ocelot)
290 {
291 	int prio;
292 
293 	for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
294 		ocelot_wm_write(ocelot, BUF_PRIO_SHR_I(prio), 0);
295 		ocelot_wm_write(ocelot, BUF_PRIO_SHR_E(prio), 0);
296 		ocelot_wm_write(ocelot, REF_PRIO_SHR_I(prio), 0);
297 		ocelot_wm_write(ocelot, REF_PRIO_SHR_E(prio), 0);
298 	}
299 }
300 
ocelot_get_buf_rsrv(struct ocelot * ocelot,u32 * buf_rsrv_i,u32 * buf_rsrv_e)301 static void ocelot_get_buf_rsrv(struct ocelot *ocelot, u32 *buf_rsrv_i,
302 				u32 *buf_rsrv_e)
303 {
304 	int port, prio;
305 
306 	*buf_rsrv_i = 0;
307 	*buf_rsrv_e = 0;
308 
309 	for (port = 0; port <= ocelot->num_phys_ports; port++) {
310 		for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
311 			*buf_rsrv_i += ocelot_wm_read(ocelot,
312 						      BUF_Q_RSRV_I(port, prio));
313 			*buf_rsrv_e += ocelot_wm_read(ocelot,
314 						      BUF_Q_RSRV_E(port, prio));
315 		}
316 
317 		*buf_rsrv_i += ocelot_wm_read(ocelot, BUF_P_RSRV_I(port));
318 		*buf_rsrv_e += ocelot_wm_read(ocelot, BUF_P_RSRV_E(port));
319 	}
320 
321 	*buf_rsrv_i *= OCELOT_BUFFER_CELL_SZ;
322 	*buf_rsrv_e *= OCELOT_BUFFER_CELL_SZ;
323 }
324 
ocelot_get_ref_rsrv(struct ocelot * ocelot,u32 * ref_rsrv_i,u32 * ref_rsrv_e)325 static void ocelot_get_ref_rsrv(struct ocelot *ocelot, u32 *ref_rsrv_i,
326 				u32 *ref_rsrv_e)
327 {
328 	int port, prio;
329 
330 	*ref_rsrv_i = 0;
331 	*ref_rsrv_e = 0;
332 
333 	for (port = 0; port <= ocelot->num_phys_ports; port++) {
334 		for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
335 			*ref_rsrv_i += ocelot_wm_read(ocelot,
336 						      REF_Q_RSRV_I(port, prio));
337 			*ref_rsrv_e += ocelot_wm_read(ocelot,
338 						      REF_Q_RSRV_E(port, prio));
339 		}
340 
341 		*ref_rsrv_i += ocelot_wm_read(ocelot, REF_P_RSRV_I(port));
342 		*ref_rsrv_e += ocelot_wm_read(ocelot, REF_P_RSRV_E(port));
343 	}
344 }
345 
346 /* Calculate all reservations, then set up the sharing watermark for DP=0 to
347  * consume the remaining resources up to the pool's configured size.
348  */
ocelot_setup_sharing_watermarks(struct ocelot * ocelot)349 static void ocelot_setup_sharing_watermarks(struct ocelot *ocelot)
350 {
351 	u32 buf_rsrv_i, buf_rsrv_e;
352 	u32 ref_rsrv_i, ref_rsrv_e;
353 	u32 buf_shr_i, buf_shr_e;
354 	u32 ref_shr_i, ref_shr_e;
355 
356 	ocelot_get_buf_rsrv(ocelot, &buf_rsrv_i, &buf_rsrv_e);
357 	ocelot_get_ref_rsrv(ocelot, &ref_rsrv_i, &ref_rsrv_e);
358 
359 	buf_shr_i = ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_ING] -
360 		    buf_rsrv_i;
361 	buf_shr_e = ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_EGR] -
362 		    buf_rsrv_e;
363 	ref_shr_i = ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_ING] -
364 		    ref_rsrv_i;
365 	ref_shr_e = ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_EGR] -
366 		    ref_rsrv_e;
367 
368 	buf_shr_i /= OCELOT_BUFFER_CELL_SZ;
369 	buf_shr_e /= OCELOT_BUFFER_CELL_SZ;
370 
371 	ocelot_wm_write(ocelot, BUF_COL_SHR_I(0), buf_shr_i);
372 	ocelot_wm_write(ocelot, BUF_COL_SHR_E(0), buf_shr_e);
373 	ocelot_wm_write(ocelot, REF_COL_SHR_E(0), ref_shr_e);
374 	ocelot_wm_write(ocelot, REF_COL_SHR_I(0), ref_shr_i);
375 	ocelot_wm_write(ocelot, BUF_COL_SHR_I(1), 0);
376 	ocelot_wm_write(ocelot, BUF_COL_SHR_E(1), 0);
377 	ocelot_wm_write(ocelot, REF_COL_SHR_E(1), 0);
378 	ocelot_wm_write(ocelot, REF_COL_SHR_I(1), 0);
379 }
380 
381 /* Ensure that all reservations can be enforced */
ocelot_watermark_validate(struct ocelot * ocelot,struct netlink_ext_ack * extack)382 static int ocelot_watermark_validate(struct ocelot *ocelot,
383 				     struct netlink_ext_ack *extack)
384 {
385 	u32 buf_rsrv_i, buf_rsrv_e;
386 	u32 ref_rsrv_i, ref_rsrv_e;
387 
388 	ocelot_get_buf_rsrv(ocelot, &buf_rsrv_i, &buf_rsrv_e);
389 	ocelot_get_ref_rsrv(ocelot, &ref_rsrv_i, &ref_rsrv_e);
390 
391 	if (buf_rsrv_i > ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_ING]) {
392 		NL_SET_ERR_MSG_MOD(extack,
393 				   "Ingress frame reservations exceed pool size");
394 		return -ERANGE;
395 	}
396 	if (buf_rsrv_e > ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_EGR]) {
397 		NL_SET_ERR_MSG_MOD(extack,
398 				   "Egress frame reservations exceed pool size");
399 		return -ERANGE;
400 	}
401 	if (ref_rsrv_i > ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_ING]) {
402 		NL_SET_ERR_MSG_MOD(extack,
403 				   "Ingress reference reservations exceed pool size");
404 		return -ERANGE;
405 	}
406 	if (ref_rsrv_e > ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_EGR]) {
407 		NL_SET_ERR_MSG_MOD(extack,
408 				   "Egress reference reservations exceed pool size");
409 		return -ERANGE;
410 	}
411 
412 	return 0;
413 }
414 
415 /* The hardware works like this:
416  *
417  *                         Frame forwarding decision taken
418  *                                       |
419  *                                       v
420  *       +--------------------+--------------------+--------------------+
421  *       |                    |                    |                    |
422  *       v                    v                    v                    v
423  * Ingress memory       Egress memory        Ingress frame        Egress frame
424  *     check                check           reference check      reference check
425  *       |                    |                    |                    |
426  *       v                    v                    v                    v
427  *  BUF_Q_RSRV_I   ok    BUF_Q_RSRV_E   ok    REF_Q_RSRV_I   ok     REF_Q_RSRV_E   ok
428  *(src port, prio) -+  (dst port, prio) -+  (src port, prio) -+   (dst port, prio) -+
429  *       |          |         |          |         |          |         |           |
430  *       |exceeded  |         |exceeded  |         |exceeded  |         |exceeded   |
431  *       v          |         v          |         v          |         v           |
432  *  BUF_P_RSRV_I  ok|    BUF_P_RSRV_E  ok|    REF_P_RSRV_I  ok|    REF_P_RSRV_E   ok|
433  *   (src port) ----+     (dst port) ----+     (src port) ----+     (dst port) -----+
434  *       |          |         |          |         |          |         |           |
435  *       |exceeded  |         |exceeded  |         |exceeded  |         |exceeded   |
436  *       v          |         v          |         v          |         v           |
437  * BUF_PRIO_SHR_I ok|   BUF_PRIO_SHR_E ok|   REF_PRIO_SHR_I ok|   REF_PRIO_SHR_E  ok|
438  *     (prio) ------+       (prio) ------+       (prio) ------+       (prio) -------+
439  *       |          |         |          |         |          |         |           |
440  *       |exceeded  |         |exceeded  |         |exceeded  |         |exceeded   |
441  *       v          |         v          |         v          |         v           |
442  * BUF_COL_SHR_I  ok|   BUF_COL_SHR_E  ok|   REF_COL_SHR_I  ok|   REF_COL_SHR_E   ok|
443  *      (dp) -------+        (dp) -------+        (dp) -------+        (dp) --------+
444  *       |          |         |          |         |          |         |           |
445  *       |exceeded  |         |exceeded  |         |exceeded  |         |exceeded   |
446  *       v          v         v          v         v          v         v           v
447  *      fail     success     fail     success     fail     success     fail      success
448  *       |          |         |          |         |          |         |           |
449  *       v          v         v          v         v          v         v           v
450  *       +-----+----+         +-----+----+         +-----+----+         +-----+-----+
451  *             |                    |                    |                    |
452  *             +-------> OR <-------+                    +-------> OR <-------+
453  *                        |                                        |
454  *                        v                                        v
455  *                        +----------------> AND <-----------------+
456  *                                            |
457  *                                            v
458  *                                    FIFO drop / accept
459  *
460  * We are modeling each of the 4 parallel lookups as a devlink-sb pool.
461  * At least one (ingress or egress) memory pool and one (ingress or egress)
462  * frame reference pool need to have resources for frame acceptance to succeed.
463  *
464  * The following watermarks are controlled explicitly through devlink-sb:
465  * BUF_Q_RSRV_I, BUF_Q_RSRV_E, REF_Q_RSRV_I, REF_Q_RSRV_E
466  * BUF_P_RSRV_I, BUF_P_RSRV_E, REF_P_RSRV_I, REF_P_RSRV_E
467  * The following watermarks are controlled implicitly through devlink-sb:
468  * BUF_COL_SHR_I, BUF_COL_SHR_E, REF_COL_SHR_I, REF_COL_SHR_E
469  * The following watermarks are unused and disabled:
470  * BUF_PRIO_SHR_I, BUF_PRIO_SHR_E, REF_PRIO_SHR_I, REF_PRIO_SHR_E
471  *
472  * This function overrides the hardware defaults with more sane ones (no
473  * reservations by default, let sharing use all resources) and disables the
474  * unused watermarks.
475  */
ocelot_watermark_init(struct ocelot * ocelot)476 static void ocelot_watermark_init(struct ocelot *ocelot)
477 {
478 	int all_tcs = GENMASK(OCELOT_NUM_TC - 1, 0);
479 	int port;
480 
481 	ocelot_write(ocelot, all_tcs, QSYS_RES_QOS_MODE);
482 
483 	for (port = 0; port <= ocelot->num_phys_ports; port++)
484 		ocelot_disable_reservation_watermarks(ocelot, port);
485 
486 	ocelot_disable_tc_sharing_watermarks(ocelot);
487 	ocelot_setup_sharing_watermarks(ocelot);
488 }
489 
490 /* Watermark encode
491  * Bit 8:   Unit; 0:1, 1:16
492  * Bit 7-0: Value to be multiplied with unit
493  */
ocelot_wm_enc(u16 value)494 u16 ocelot_wm_enc(u16 value)
495 {
496 	WARN_ON(value >= 16 * BIT(8));
497 
498 	if (value >= BIT(8))
499 		return BIT(8) | (value / 16);
500 
501 	return value;
502 }
503 EXPORT_SYMBOL(ocelot_wm_enc);
504 
ocelot_wm_dec(u16 wm)505 u16 ocelot_wm_dec(u16 wm)
506 {
507 	if (wm & BIT(8))
508 		return (wm & GENMASK(7, 0)) * 16;
509 
510 	return wm;
511 }
512 EXPORT_SYMBOL(ocelot_wm_dec);
513 
ocelot_wm_stat(u32 val,u32 * inuse,u32 * maxuse)514 void ocelot_wm_stat(u32 val, u32 *inuse, u32 *maxuse)
515 {
516 	*inuse = (val & GENMASK(23, 12)) >> 12;
517 	*maxuse = val & GENMASK(11, 0);
518 }
519 EXPORT_SYMBOL(ocelot_wm_stat);
520 
521 /* Pool size and type are fixed up at runtime. Keeping this structure to
522  * look up the cell size multipliers.
523  */
524 static const struct devlink_sb_pool_info ocelot_sb_pool[] = {
525 	[OCELOT_SB_BUF] = {
526 		.cell_size = OCELOT_BUFFER_CELL_SZ,
527 		.threshold_type = DEVLINK_SB_THRESHOLD_TYPE_STATIC,
528 	},
529 	[OCELOT_SB_REF] = {
530 		.cell_size = 1,
531 		.threshold_type = DEVLINK_SB_THRESHOLD_TYPE_STATIC,
532 	},
533 };
534 
535 /* Returns the pool size configured through ocelot_sb_pool_set */
ocelot_sb_pool_get(struct ocelot * ocelot,unsigned int sb_index,u16 pool_index,struct devlink_sb_pool_info * pool_info)536 int ocelot_sb_pool_get(struct ocelot *ocelot, unsigned int sb_index,
537 		       u16 pool_index,
538 		       struct devlink_sb_pool_info *pool_info)
539 {
540 	if (sb_index >= OCELOT_SB_NUM)
541 		return -ENODEV;
542 	if (pool_index >= OCELOT_SB_POOL_NUM)
543 		return -ENODEV;
544 
545 	*pool_info = ocelot_sb_pool[sb_index];
546 	pool_info->size = ocelot->pool_size[sb_index][pool_index];
547 	if (pool_index)
548 		pool_info->pool_type = DEVLINK_SB_POOL_TYPE_INGRESS;
549 	else
550 		pool_info->pool_type = DEVLINK_SB_POOL_TYPE_EGRESS;
551 
552 	return 0;
553 }
554 EXPORT_SYMBOL(ocelot_sb_pool_get);
555 
556 /* The pool size received here configures the total amount of resources used on
557  * ingress (or on egress, depending upon the pool index). The pool size, minus
558  * the values for the port and port-tc reservations, is written into the
559  * COL_SHR(dp=0) sharing watermark.
560  */
ocelot_sb_pool_set(struct ocelot * ocelot,unsigned int sb_index,u16 pool_index,u32 size,enum devlink_sb_threshold_type threshold_type,struct netlink_ext_ack * extack)561 int ocelot_sb_pool_set(struct ocelot *ocelot, unsigned int sb_index,
562 		       u16 pool_index, u32 size,
563 		       enum devlink_sb_threshold_type threshold_type,
564 		       struct netlink_ext_ack *extack)
565 {
566 	u32 old_pool_size;
567 	int err;
568 
569 	if (sb_index >= OCELOT_SB_NUM) {
570 		NL_SET_ERR_MSG_MOD(extack,
571 				   "Invalid sb, use 0 for buffers and 1 for frame references");
572 		return -ENODEV;
573 	}
574 	if (pool_index >= OCELOT_SB_POOL_NUM) {
575 		NL_SET_ERR_MSG_MOD(extack,
576 				   "Invalid pool, use 0 for ingress and 1 for egress");
577 		return -ENODEV;
578 	}
579 	if (threshold_type != DEVLINK_SB_THRESHOLD_TYPE_STATIC) {
580 		NL_SET_ERR_MSG_MOD(extack,
581 				   "Only static threshold supported");
582 		return -EOPNOTSUPP;
583 	}
584 
585 	old_pool_size = ocelot->pool_size[sb_index][pool_index];
586 	ocelot->pool_size[sb_index][pool_index] = size;
587 
588 	err = ocelot_watermark_validate(ocelot, extack);
589 	if (err) {
590 		ocelot->pool_size[sb_index][pool_index] = old_pool_size;
591 		return err;
592 	}
593 
594 	ocelot_setup_sharing_watermarks(ocelot);
595 
596 	return 0;
597 }
598 EXPORT_SYMBOL(ocelot_sb_pool_set);
599 
600 /* This retrieves the configuration made with ocelot_sb_port_pool_set */
ocelot_sb_port_pool_get(struct ocelot * ocelot,int port,unsigned int sb_index,u16 pool_index,u32 * p_threshold)601 int ocelot_sb_port_pool_get(struct ocelot *ocelot, int port,
602 			    unsigned int sb_index, u16 pool_index,
603 			    u32 *p_threshold)
604 {
605 	int wm_index;
606 
607 	switch (sb_index) {
608 	case OCELOT_SB_BUF:
609 		if (pool_index == OCELOT_SB_POOL_ING)
610 			wm_index = BUF_P_RSRV_I(port);
611 		else
612 			wm_index = BUF_P_RSRV_E(port);
613 		break;
614 	case OCELOT_SB_REF:
615 		if (pool_index == OCELOT_SB_POOL_ING)
616 			wm_index = REF_P_RSRV_I(port);
617 		else
618 			wm_index = REF_P_RSRV_E(port);
619 		break;
620 	default:
621 		return -ENODEV;
622 	}
623 
624 	*p_threshold = ocelot_wm_read(ocelot, wm_index);
625 	*p_threshold *= ocelot_sb_pool[sb_index].cell_size;
626 
627 	return 0;
628 }
629 EXPORT_SYMBOL(ocelot_sb_port_pool_get);
630 
631 /* This configures the P_RSRV per-port reserved resource watermark */
ocelot_sb_port_pool_set(struct ocelot * ocelot,int port,unsigned int sb_index,u16 pool_index,u32 threshold,struct netlink_ext_ack * extack)632 int ocelot_sb_port_pool_set(struct ocelot *ocelot, int port,
633 			    unsigned int sb_index, u16 pool_index,
634 			    u32 threshold, struct netlink_ext_ack *extack)
635 {
636 	int wm_index, err;
637 	u32 old_thr;
638 
639 	switch (sb_index) {
640 	case OCELOT_SB_BUF:
641 		if (pool_index == OCELOT_SB_POOL_ING)
642 			wm_index = BUF_P_RSRV_I(port);
643 		else
644 			wm_index = BUF_P_RSRV_E(port);
645 		break;
646 	case OCELOT_SB_REF:
647 		if (pool_index == OCELOT_SB_POOL_ING)
648 			wm_index = REF_P_RSRV_I(port);
649 		else
650 			wm_index = REF_P_RSRV_E(port);
651 		break;
652 	default:
653 		NL_SET_ERR_MSG_MOD(extack, "Invalid shared buffer");
654 		return -ENODEV;
655 	}
656 
657 	threshold /= ocelot_sb_pool[sb_index].cell_size;
658 
659 	old_thr = ocelot_wm_read(ocelot, wm_index);
660 	ocelot_wm_write(ocelot, wm_index, threshold);
661 
662 	err = ocelot_watermark_validate(ocelot, extack);
663 	if (err) {
664 		ocelot_wm_write(ocelot, wm_index, old_thr);
665 		return err;
666 	}
667 
668 	ocelot_setup_sharing_watermarks(ocelot);
669 
670 	return 0;
671 }
672 EXPORT_SYMBOL(ocelot_sb_port_pool_set);
673 
674 /* This retrieves the configuration done by ocelot_sb_tc_pool_bind_set */
ocelot_sb_tc_pool_bind_get(struct ocelot * ocelot,int port,unsigned int sb_index,u16 tc_index,enum devlink_sb_pool_type pool_type,u16 * p_pool_index,u32 * p_threshold)675 int ocelot_sb_tc_pool_bind_get(struct ocelot *ocelot, int port,
676 			       unsigned int sb_index, u16 tc_index,
677 			       enum devlink_sb_pool_type pool_type,
678 			       u16 *p_pool_index, u32 *p_threshold)
679 {
680 	int wm_index;
681 
682 	switch (sb_index) {
683 	case OCELOT_SB_BUF:
684 		if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
685 			wm_index = BUF_Q_RSRV_I(port, tc_index);
686 		else
687 			wm_index = BUF_Q_RSRV_E(port, tc_index);
688 		break;
689 	case OCELOT_SB_REF:
690 		if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
691 			wm_index = REF_Q_RSRV_I(port, tc_index);
692 		else
693 			wm_index = REF_Q_RSRV_E(port, tc_index);
694 		break;
695 	default:
696 		return -ENODEV;
697 	}
698 
699 	*p_threshold = ocelot_wm_read(ocelot, wm_index);
700 	*p_threshold *= ocelot_sb_pool[sb_index].cell_size;
701 
702 	if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
703 		*p_pool_index = 0;
704 	else
705 		*p_pool_index = 1;
706 
707 	return 0;
708 }
709 EXPORT_SYMBOL(ocelot_sb_tc_pool_bind_get);
710 
711 /* This configures the Q_RSRV per-port-tc reserved resource watermark */
ocelot_sb_tc_pool_bind_set(struct ocelot * ocelot,int port,unsigned int sb_index,u16 tc_index,enum devlink_sb_pool_type pool_type,u16 pool_index,u32 threshold,struct netlink_ext_ack * extack)712 int ocelot_sb_tc_pool_bind_set(struct ocelot *ocelot, int port,
713 			       unsigned int sb_index, u16 tc_index,
714 			       enum devlink_sb_pool_type pool_type,
715 			       u16 pool_index, u32 threshold,
716 			       struct netlink_ext_ack *extack)
717 {
718 	int wm_index, err;
719 	u32 old_thr;
720 
721 	/* Paranoid check? */
722 	if (pool_index == OCELOT_SB_POOL_ING &&
723 	    pool_type != DEVLINK_SB_POOL_TYPE_INGRESS)
724 		return -EINVAL;
725 	if (pool_index == OCELOT_SB_POOL_EGR &&
726 	    pool_type != DEVLINK_SB_POOL_TYPE_EGRESS)
727 		return -EINVAL;
728 
729 	switch (sb_index) {
730 	case OCELOT_SB_BUF:
731 		if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
732 			wm_index = BUF_Q_RSRV_I(port, tc_index);
733 		else
734 			wm_index = BUF_Q_RSRV_E(port, tc_index);
735 		break;
736 	case OCELOT_SB_REF:
737 		if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
738 			wm_index = REF_Q_RSRV_I(port, tc_index);
739 		else
740 			wm_index = REF_Q_RSRV_E(port, tc_index);
741 		break;
742 	default:
743 		NL_SET_ERR_MSG_MOD(extack, "Invalid shared buffer");
744 		return -ENODEV;
745 	}
746 
747 	threshold /= ocelot_sb_pool[sb_index].cell_size;
748 
749 	old_thr = ocelot_wm_read(ocelot, wm_index);
750 	ocelot_wm_write(ocelot, wm_index, threshold);
751 	err = ocelot_watermark_validate(ocelot, extack);
752 	if (err) {
753 		ocelot_wm_write(ocelot, wm_index, old_thr);
754 		return err;
755 	}
756 
757 	ocelot_setup_sharing_watermarks(ocelot);
758 
759 	return 0;
760 }
761 EXPORT_SYMBOL(ocelot_sb_tc_pool_bind_set);
762 
763 /* The hardware does not support atomic snapshots, we'll read out the
764  * occupancy registers individually and have this as just a stub.
765  */
ocelot_sb_occ_snapshot(struct ocelot * ocelot,unsigned int sb_index)766 int ocelot_sb_occ_snapshot(struct ocelot *ocelot, unsigned int sb_index)
767 {
768 	return 0;
769 }
770 EXPORT_SYMBOL(ocelot_sb_occ_snapshot);
771 
772 /* The watermark occupancy registers are cleared upon read,
773  * so let's read them.
774  */
ocelot_sb_occ_max_clear(struct ocelot * ocelot,unsigned int sb_index)775 int ocelot_sb_occ_max_clear(struct ocelot *ocelot, unsigned int sb_index)
776 {
777 	u32 inuse, maxuse;
778 	int port, prio;
779 
780 	switch (sb_index) {
781 	case OCELOT_SB_BUF:
782 		for (port = 0; port <= ocelot->num_phys_ports; port++) {
783 			for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
784 				ocelot_wm_status(ocelot, BUF_Q_RSRV_I(port, prio),
785 						 &inuse, &maxuse);
786 				ocelot_wm_status(ocelot, BUF_Q_RSRV_E(port, prio),
787 						 &inuse, &maxuse);
788 			}
789 			ocelot_wm_status(ocelot, BUF_P_RSRV_I(port),
790 					 &inuse, &maxuse);
791 			ocelot_wm_status(ocelot, BUF_P_RSRV_E(port),
792 					 &inuse, &maxuse);
793 		}
794 		break;
795 	case OCELOT_SB_REF:
796 		for (port = 0; port <= ocelot->num_phys_ports; port++) {
797 			for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
798 				ocelot_wm_status(ocelot, REF_Q_RSRV_I(port, prio),
799 						 &inuse, &maxuse);
800 				ocelot_wm_status(ocelot, REF_Q_RSRV_E(port, prio),
801 						 &inuse, &maxuse);
802 			}
803 			ocelot_wm_status(ocelot, REF_P_RSRV_I(port),
804 					 &inuse, &maxuse);
805 			ocelot_wm_status(ocelot, REF_P_RSRV_E(port),
806 					 &inuse, &maxuse);
807 		}
808 		break;
809 	default:
810 		return -ENODEV;
811 	}
812 
813 	return 0;
814 }
815 EXPORT_SYMBOL(ocelot_sb_occ_max_clear);
816 
817 /* This retrieves the watermark occupancy for per-port P_RSRV watermarks */
ocelot_sb_occ_port_pool_get(struct ocelot * ocelot,int port,unsigned int sb_index,u16 pool_index,u32 * p_cur,u32 * p_max)818 int ocelot_sb_occ_port_pool_get(struct ocelot *ocelot, int port,
819 				unsigned int sb_index, u16 pool_index,
820 				u32 *p_cur, u32 *p_max)
821 {
822 	int wm_index;
823 
824 	switch (sb_index) {
825 	case OCELOT_SB_BUF:
826 		if (pool_index == OCELOT_SB_POOL_ING)
827 			wm_index = BUF_P_RSRV_I(port);
828 		else
829 			wm_index = BUF_P_RSRV_E(port);
830 		break;
831 	case OCELOT_SB_REF:
832 		if (pool_index == OCELOT_SB_POOL_ING)
833 			wm_index = REF_P_RSRV_I(port);
834 		else
835 			wm_index = REF_P_RSRV_E(port);
836 		break;
837 	default:
838 		return -ENODEV;
839 	}
840 
841 	ocelot_wm_status(ocelot, wm_index, p_cur, p_max);
842 	*p_cur *= ocelot_sb_pool[sb_index].cell_size;
843 	*p_max *= ocelot_sb_pool[sb_index].cell_size;
844 
845 	return 0;
846 }
847 EXPORT_SYMBOL(ocelot_sb_occ_port_pool_get);
848 
849 /* This retrieves the watermark occupancy for per-port-tc Q_RSRV watermarks */
ocelot_sb_occ_tc_port_bind_get(struct ocelot * ocelot,int port,unsigned int sb_index,u16 tc_index,enum devlink_sb_pool_type pool_type,u32 * p_cur,u32 * p_max)850 int ocelot_sb_occ_tc_port_bind_get(struct ocelot *ocelot, int port,
851 				   unsigned int sb_index, u16 tc_index,
852 				   enum devlink_sb_pool_type pool_type,
853 				   u32 *p_cur, u32 *p_max)
854 {
855 	int wm_index;
856 
857 	switch (sb_index) {
858 	case OCELOT_SB_BUF:
859 		if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
860 			wm_index = BUF_Q_RSRV_I(port, tc_index);
861 		else
862 			wm_index = BUF_Q_RSRV_E(port, tc_index);
863 		break;
864 	case OCELOT_SB_REF:
865 		if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
866 			wm_index = REF_Q_RSRV_I(port, tc_index);
867 		else
868 			wm_index = REF_Q_RSRV_E(port, tc_index);
869 		break;
870 	default:
871 		return -ENODEV;
872 	}
873 
874 	ocelot_wm_status(ocelot, wm_index, p_cur, p_max);
875 	*p_cur *= ocelot_sb_pool[sb_index].cell_size;
876 	*p_max *= ocelot_sb_pool[sb_index].cell_size;
877 
878 	return 0;
879 }
880 EXPORT_SYMBOL(ocelot_sb_occ_tc_port_bind_get);
881 
ocelot_devlink_sb_register(struct ocelot * ocelot)882 int ocelot_devlink_sb_register(struct ocelot *ocelot)
883 {
884 	int err;
885 
886 	err = devlink_sb_register(ocelot->devlink, OCELOT_SB_BUF,
887 				  ocelot->packet_buffer_size, 1, 1,
888 				  OCELOT_NUM_TC, OCELOT_NUM_TC);
889 	if (err)
890 		return err;
891 
892 	err = devlink_sb_register(ocelot->devlink, OCELOT_SB_REF,
893 				  ocelot->num_frame_refs, 1, 1,
894 				  OCELOT_NUM_TC, OCELOT_NUM_TC);
895 	if (err) {
896 		devlink_sb_unregister(ocelot->devlink, OCELOT_SB_BUF);
897 		return err;
898 	}
899 
900 	ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_ING] = ocelot->packet_buffer_size;
901 	ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_EGR] = ocelot->packet_buffer_size;
902 	ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_ING] = ocelot->num_frame_refs;
903 	ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_EGR] = ocelot->num_frame_refs;
904 
905 	ocelot_watermark_init(ocelot);
906 
907 	return 0;
908 }
909 EXPORT_SYMBOL(ocelot_devlink_sb_register);
910 
ocelot_devlink_sb_unregister(struct ocelot * ocelot)911 void ocelot_devlink_sb_unregister(struct ocelot *ocelot)
912 {
913 	devlink_sb_unregister(ocelot->devlink, OCELOT_SB_BUF);
914 	devlink_sb_unregister(ocelot->devlink, OCELOT_SB_REF);
915 }
916 EXPORT_SYMBOL(ocelot_devlink_sb_unregister);
917