xref: /linux/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c (revision ae22a94997b8a03dcb3c922857c203246711f9d4)
1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip Sparx5 Switch driver
3  *
4  * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
5  */
6 
7 #include <linux/module.h>
8 #include <linux/device.h>
9 
10 #include "sparx5_main_regs.h"
11 #include "sparx5_main.h"
12 
13 /* QSYS calendar information */
14 #define SPX5_PORTS_PER_CALREG          10  /* Ports mapped in a calendar register */
15 #define SPX5_CALBITS_PER_PORT          3   /* Bit per port in calendar register */
16 
17 /* DSM calendar information */
18 #define SPX5_DSM_CAL_LEN               64
19 #define SPX5_DSM_CAL_EMPTY             0xFFFF
20 #define SPX5_DSM_CAL_MAX_DEVS_PER_TAXI 13
21 #define SPX5_DSM_CAL_TAXIS             8
22 #define SPX5_DSM_CAL_BW_LOSS           553
23 
24 #define SPX5_TAXI_PORT_MAX             70
25 
26 #define SPEED_12500                    12500
27 
28 /* Maps from taxis to port numbers */
29 static u32 sparx5_taxi_ports[SPX5_DSM_CAL_TAXIS][SPX5_DSM_CAL_MAX_DEVS_PER_TAXI] = {
30 	{57, 12, 0, 1, 2, 16, 17, 18, 19, 20, 21, 22, 23},
31 	{58, 13, 3, 4, 5, 24, 25, 26, 27, 28, 29, 30, 31},
32 	{59, 14, 6, 7, 8, 32, 33, 34, 35, 36, 37, 38, 39},
33 	{60, 15, 9, 10, 11, 40, 41, 42, 43, 44, 45, 46, 47},
34 	{61, 48, 49, 50, 99, 99, 99, 99, 99, 99, 99, 99, 99},
35 	{62, 51, 52, 53, 99, 99, 99, 99, 99, 99, 99, 99, 99},
36 	{56, 63, 54, 55, 99, 99, 99, 99, 99, 99, 99, 99, 99},
37 	{64, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99},
38 };
39 
40 struct sparx5_calendar_data {
41 	u32 schedule[SPX5_DSM_CAL_LEN];
42 	u32 avg_dist[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
43 	u32 taxi_ports[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
44 	u32 taxi_speeds[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
45 	u32 dev_slots[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
46 	u32 new_slots[SPX5_DSM_CAL_LEN];
47 	u32 temp_sched[SPX5_DSM_CAL_LEN];
48 	u32 indices[SPX5_DSM_CAL_LEN];
49 	u32 short_list[SPX5_DSM_CAL_LEN];
50 	u32 long_list[SPX5_DSM_CAL_LEN];
51 };
52 
53 static u32 sparx5_target_bandwidth(struct sparx5 *sparx5)
54 {
55 	switch (sparx5->target_ct) {
56 	case SPX5_TARGET_CT_7546:
57 	case SPX5_TARGET_CT_7546TSN:
58 		return 65000;
59 	case SPX5_TARGET_CT_7549:
60 	case SPX5_TARGET_CT_7549TSN:
61 		return 91000;
62 	case SPX5_TARGET_CT_7552:
63 	case SPX5_TARGET_CT_7552TSN:
64 		return 129000;
65 	case SPX5_TARGET_CT_7556:
66 	case SPX5_TARGET_CT_7556TSN:
67 		return 161000;
68 	case SPX5_TARGET_CT_7558:
69 	case SPX5_TARGET_CT_7558TSN:
70 		return 201000;
71 	default:
72 		return 0;
73 	}
74 }
75 
76 /* This is used in calendar configuration */
77 enum sparx5_cal_bw {
78 	SPX5_CAL_SPEED_NONE = 0,
79 	SPX5_CAL_SPEED_1G   = 1,
80 	SPX5_CAL_SPEED_2G5  = 2,
81 	SPX5_CAL_SPEED_5G   = 3,
82 	SPX5_CAL_SPEED_10G  = 4,
83 	SPX5_CAL_SPEED_25G  = 5,
84 	SPX5_CAL_SPEED_0G5  = 6,
85 	SPX5_CAL_SPEED_12G5 = 7
86 };
87 
88 static u32 sparx5_clk_to_bandwidth(enum sparx5_core_clockfreq cclock)
89 {
90 	switch (cclock) {
91 	case SPX5_CORE_CLOCK_250MHZ: return 83000; /* 250000 / 3 */
92 	case SPX5_CORE_CLOCK_500MHZ: return 166000; /* 500000 / 3 */
93 	case SPX5_CORE_CLOCK_625MHZ: return  208000; /* 625000 / 3 */
94 	default: return 0;
95 	}
96 	return 0;
97 }
98 
99 static u32 sparx5_cal_speed_to_value(enum sparx5_cal_bw speed)
100 {
101 	switch (speed) {
102 	case SPX5_CAL_SPEED_1G:   return 1000;
103 	case SPX5_CAL_SPEED_2G5:  return 2500;
104 	case SPX5_CAL_SPEED_5G:   return 5000;
105 	case SPX5_CAL_SPEED_10G:  return 10000;
106 	case SPX5_CAL_SPEED_25G:  return 25000;
107 	case SPX5_CAL_SPEED_0G5:  return 500;
108 	case SPX5_CAL_SPEED_12G5: return 12500;
109 	default: return 0;
110 	}
111 }
112 
113 static u32 sparx5_bandwidth_to_calendar(u32 bw)
114 {
115 	switch (bw) {
116 	case SPEED_10:      return SPX5_CAL_SPEED_0G5;
117 	case SPEED_100:     return SPX5_CAL_SPEED_0G5;
118 	case SPEED_1000:    return SPX5_CAL_SPEED_1G;
119 	case SPEED_2500:    return SPX5_CAL_SPEED_2G5;
120 	case SPEED_5000:    return SPX5_CAL_SPEED_5G;
121 	case SPEED_10000:   return SPX5_CAL_SPEED_10G;
122 	case SPEED_12500:   return SPX5_CAL_SPEED_12G5;
123 	case SPEED_25000:   return SPX5_CAL_SPEED_25G;
124 	case SPEED_UNKNOWN: return SPX5_CAL_SPEED_1G;
125 	default:            return SPX5_CAL_SPEED_NONE;
126 	}
127 }
128 
129 static enum sparx5_cal_bw sparx5_get_port_cal_speed(struct sparx5 *sparx5,
130 						    u32 portno)
131 {
132 	struct sparx5_port *port;
133 
134 	if (portno >= SPX5_PORTS) {
135 		/* Internal ports */
136 		if (portno == SPX5_PORT_CPU_0 || portno == SPX5_PORT_CPU_1) {
137 			/* Equals 1.25G */
138 			return SPX5_CAL_SPEED_2G5;
139 		} else if (portno == SPX5_PORT_VD0) {
140 			/* IPMC only idle BW */
141 			return SPX5_CAL_SPEED_NONE;
142 		} else if (portno == SPX5_PORT_VD1) {
143 			/* OAM only idle BW */
144 			return SPX5_CAL_SPEED_NONE;
145 		} else if (portno == SPX5_PORT_VD2) {
146 			/* IPinIP gets only idle BW */
147 			return SPX5_CAL_SPEED_NONE;
148 		}
149 		/* not in port map */
150 		return SPX5_CAL_SPEED_NONE;
151 	}
152 	/* Front ports - may be used */
153 	port = sparx5->ports[portno];
154 	if (!port)
155 		return SPX5_CAL_SPEED_NONE;
156 	return sparx5_bandwidth_to_calendar(port->conf.bandwidth);
157 }
158 
159 /* Auto configure the QSYS calendar based on port configuration */
160 int sparx5_config_auto_calendar(struct sparx5 *sparx5)
161 {
162 	u32 cal[7], value, idx, portno;
163 	u32 max_core_bw;
164 	u32 total_bw = 0, used_port_bw = 0;
165 	int err = 0;
166 	enum sparx5_cal_bw spd;
167 
168 	memset(cal, 0, sizeof(cal));
169 
170 	max_core_bw = sparx5_clk_to_bandwidth(sparx5->coreclock);
171 	if (max_core_bw == 0) {
172 		dev_err(sparx5->dev, "Core clock not supported");
173 		return -EINVAL;
174 	}
175 
176 	/* Setup the calendar with the bandwidth to each port */
177 	for (portno = 0; portno < SPX5_PORTS_ALL; portno++) {
178 		u64 reg, offset, this_bw;
179 
180 		spd = sparx5_get_port_cal_speed(sparx5, portno);
181 		if (spd == SPX5_CAL_SPEED_NONE)
182 			continue;
183 
184 		this_bw = sparx5_cal_speed_to_value(spd);
185 		if (portno < SPX5_PORTS)
186 			used_port_bw += this_bw;
187 		else
188 			/* Internal ports are granted half the value */
189 			this_bw = this_bw / 2;
190 		total_bw += this_bw;
191 		reg = portno;
192 		offset = do_div(reg, SPX5_PORTS_PER_CALREG);
193 		cal[reg] |= spd << (offset * SPX5_CALBITS_PER_PORT);
194 	}
195 
196 	if (used_port_bw > sparx5_target_bandwidth(sparx5)) {
197 		dev_err(sparx5->dev,
198 			"Port BW %u above target BW %u\n",
199 			used_port_bw, sparx5_target_bandwidth(sparx5));
200 		return -EINVAL;
201 	}
202 
203 	if (total_bw > max_core_bw) {
204 		dev_err(sparx5->dev,
205 			"Total BW %u above switch core BW %u\n",
206 			total_bw, max_core_bw);
207 		return -EINVAL;
208 	}
209 
210 	/* Halt the calendar while changing it */
211 	spx5_rmw(QSYS_CAL_CTRL_CAL_MODE_SET(10),
212 		 QSYS_CAL_CTRL_CAL_MODE,
213 		 sparx5, QSYS_CAL_CTRL);
214 
215 	/* Assign port bandwidth to auto calendar */
216 	for (idx = 0; idx < ARRAY_SIZE(cal); idx++)
217 		spx5_wr(cal[idx], sparx5, QSYS_CAL_AUTO(idx));
218 
219 	/* Increase grant rate of all ports to account for
220 	 * core clock ppm deviations
221 	 */
222 	spx5_rmw(QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE_SET(671), /* 672->671 */
223 		 QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE,
224 		 sparx5,
225 		 QSYS_CAL_CTRL);
226 
227 	/* Grant idle usage to VD 0-2 */
228 	for (idx = 2; idx < 5; idx++)
229 		spx5_wr(HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA_SET(12),
230 			sparx5,
231 			HSCH_OUTB_SHARE_ENA(idx));
232 
233 	/* Enable Auto mode */
234 	spx5_rmw(QSYS_CAL_CTRL_CAL_MODE_SET(8),
235 		 QSYS_CAL_CTRL_CAL_MODE,
236 		 sparx5, QSYS_CAL_CTRL);
237 
238 	/* Verify successful calendar config */
239 	value = spx5_rd(sparx5, QSYS_CAL_CTRL);
240 	if (QSYS_CAL_CTRL_CAL_AUTO_ERROR_GET(value)) {
241 		dev_err(sparx5->dev, "QSYS calendar error\n");
242 		err = -EINVAL;
243 	}
244 	return err;
245 }
246 
247 static u32 sparx5_dsm_exb_gcd(u32 a, u32 b)
248 {
249 	if (b == 0)
250 		return a;
251 	return sparx5_dsm_exb_gcd(b, a % b);
252 }
253 
254 static u32 sparx5_dsm_cal_len(u32 *cal)
255 {
256 	u32 idx = 0, len = 0;
257 
258 	while (idx < SPX5_DSM_CAL_LEN) {
259 		if (cal[idx] != SPX5_DSM_CAL_EMPTY)
260 			len++;
261 		idx++;
262 	}
263 	return len;
264 }
265 
266 static u32 sparx5_dsm_cp_cal(u32 *sched)
267 {
268 	u32 idx = 0, tmp;
269 
270 	while (idx < SPX5_DSM_CAL_LEN) {
271 		if (sched[idx] != SPX5_DSM_CAL_EMPTY) {
272 			tmp = sched[idx];
273 			sched[idx] = SPX5_DSM_CAL_EMPTY;
274 			return tmp;
275 		}
276 		idx++;
277 	}
278 	return SPX5_DSM_CAL_EMPTY;
279 }
280 
281 static int sparx5_dsm_calendar_calc(struct sparx5 *sparx5, u32 taxi,
282 				    struct sparx5_calendar_data *data)
283 {
284 	bool slow_mode;
285 	u32 gcd, idx, sum, min, factor;
286 	u32 num_of_slots, slot_spd, empty_slots;
287 	u32 taxi_bw, clk_period_ps;
288 
289 	clk_period_ps = sparx5_clk_period(sparx5->coreclock);
290 	taxi_bw = 128 * 1000000 / clk_period_ps;
291 	slow_mode = !!(clk_period_ps > 2000);
292 	memcpy(data->taxi_ports, &sparx5_taxi_ports[taxi],
293 	       sizeof(data->taxi_ports));
294 
295 	for (idx = 0; idx < SPX5_DSM_CAL_LEN; idx++) {
296 		data->new_slots[idx] = SPX5_DSM_CAL_EMPTY;
297 		data->schedule[idx] = SPX5_DSM_CAL_EMPTY;
298 		data->temp_sched[idx] = SPX5_DSM_CAL_EMPTY;
299 	}
300 	/* Default empty calendar */
301 	data->schedule[0] = SPX5_DSM_CAL_MAX_DEVS_PER_TAXI;
302 
303 	/* Map ports to taxi positions */
304 	for (idx = 0; idx < SPX5_DSM_CAL_MAX_DEVS_PER_TAXI; idx++) {
305 		u32 portno = data->taxi_ports[idx];
306 
307 		if (portno < SPX5_TAXI_PORT_MAX) {
308 			data->taxi_speeds[idx] = sparx5_cal_speed_to_value
309 				(sparx5_get_port_cal_speed(sparx5, portno));
310 		} else {
311 			data->taxi_speeds[idx] = 0;
312 		}
313 	}
314 
315 	sum = 0;
316 	min = 25000;
317 	for (idx = 0; idx < ARRAY_SIZE(data->taxi_speeds); idx++) {
318 		u32 jdx;
319 
320 		sum += data->taxi_speeds[idx];
321 		if (data->taxi_speeds[idx] && data->taxi_speeds[idx] < min)
322 			min = data->taxi_speeds[idx];
323 		gcd = min;
324 		for (jdx = 0; jdx < ARRAY_SIZE(data->taxi_speeds); jdx++)
325 			gcd = sparx5_dsm_exb_gcd(gcd, data->taxi_speeds[jdx]);
326 	}
327 	if (sum == 0) /* Empty calendar */
328 		return 0;
329 	/* Make room for overhead traffic */
330 	factor = 100 * 100 * 1000 / (100 * 100 - SPX5_DSM_CAL_BW_LOSS);
331 
332 	if (sum * factor > (taxi_bw * 1000)) {
333 		dev_err(sparx5->dev,
334 			"Taxi %u, Requested BW %u above available BW %u\n",
335 			taxi, sum, taxi_bw);
336 		return -EINVAL;
337 	}
338 	for (idx = 0; idx < 4; idx++) {
339 		u32 raw_spd;
340 
341 		if (idx == 0)
342 			raw_spd = gcd / 5;
343 		else if (idx == 1)
344 			raw_spd = gcd / 2;
345 		else if (idx == 2)
346 			raw_spd = gcd;
347 		else
348 			raw_spd = min;
349 		slot_spd = raw_spd * factor / 1000;
350 		num_of_slots = taxi_bw / slot_spd;
351 		if (num_of_slots <= 64)
352 			break;
353 	}
354 
355 	num_of_slots = num_of_slots > 64 ? 64 : num_of_slots;
356 	slot_spd = taxi_bw / num_of_slots;
357 
358 	sum = 0;
359 	for (idx = 0; idx < ARRAY_SIZE(data->taxi_speeds); idx++) {
360 		u32 spd = data->taxi_speeds[idx];
361 		u32 adjusted_speed = data->taxi_speeds[idx] * factor / 1000;
362 
363 		if (adjusted_speed > 0) {
364 			data->avg_dist[idx] = (128 * 1000000 * 10) /
365 				(adjusted_speed * clk_period_ps);
366 		} else {
367 			data->avg_dist[idx] = -1;
368 		}
369 		data->dev_slots[idx] = ((spd * factor / slot_spd) + 999) / 1000;
370 		if (spd != 25000 && (spd != 10000 || !slow_mode)) {
371 			if (num_of_slots < (5 * data->dev_slots[idx])) {
372 				dev_err(sparx5->dev,
373 					"Taxi %u, speed %u, Low slot sep.\n",
374 					taxi, spd);
375 				return -EINVAL;
376 			}
377 		}
378 		sum += data->dev_slots[idx];
379 		if (sum > num_of_slots) {
380 			dev_err(sparx5->dev,
381 				"Taxi %u with overhead factor %u\n",
382 				taxi, factor);
383 			return -EINVAL;
384 		}
385 	}
386 
387 	empty_slots = num_of_slots - sum;
388 
389 	for (idx = 0; idx < empty_slots; idx++)
390 		data->schedule[idx] = SPX5_DSM_CAL_MAX_DEVS_PER_TAXI;
391 
392 	for (idx = 1; idx < num_of_slots; idx++) {
393 		u32 indices_len = 0;
394 		u32 slot, jdx, kdx, ts;
395 		s32 cnt;
396 		u32 num_of_old_slots, num_of_new_slots, tgt_score;
397 
398 		for (slot = 0; slot < ARRAY_SIZE(data->dev_slots); slot++) {
399 			if (data->dev_slots[slot] == idx) {
400 				data->indices[indices_len] = slot;
401 				indices_len++;
402 			}
403 		}
404 		if (indices_len == 0)
405 			continue;
406 		kdx = 0;
407 		for (slot = 0; slot < idx; slot++) {
408 			for (jdx = 0; jdx < indices_len; jdx++, kdx++)
409 				data->new_slots[kdx] = data->indices[jdx];
410 		}
411 
412 		for (slot = 0; slot < SPX5_DSM_CAL_LEN; slot++) {
413 			if (data->schedule[slot] == SPX5_DSM_CAL_EMPTY)
414 				break;
415 		}
416 
417 		num_of_old_slots =  slot;
418 		num_of_new_slots =  kdx;
419 		cnt = 0;
420 		ts = 0;
421 
422 		if (num_of_new_slots > num_of_old_slots) {
423 			memcpy(data->short_list, data->schedule,
424 			       sizeof(data->short_list));
425 			memcpy(data->long_list, data->new_slots,
426 			       sizeof(data->long_list));
427 			tgt_score = 100000 * num_of_old_slots /
428 				num_of_new_slots;
429 		} else {
430 			memcpy(data->short_list, data->new_slots,
431 			       sizeof(data->short_list));
432 			memcpy(data->long_list, data->schedule,
433 			       sizeof(data->long_list));
434 			tgt_score = 100000 * num_of_new_slots /
435 				num_of_old_slots;
436 		}
437 
438 		while (sparx5_dsm_cal_len(data->short_list) > 0 ||
439 		       sparx5_dsm_cal_len(data->long_list) > 0) {
440 			u32 act = 0;
441 
442 			if (sparx5_dsm_cal_len(data->short_list) > 0) {
443 				data->temp_sched[ts] =
444 					sparx5_dsm_cp_cal(data->short_list);
445 				ts++;
446 				cnt += 100000;
447 				act = 1;
448 			}
449 			while (sparx5_dsm_cal_len(data->long_list) > 0 &&
450 			       cnt > 0) {
451 				data->temp_sched[ts] =
452 					sparx5_dsm_cp_cal(data->long_list);
453 				ts++;
454 				cnt -= tgt_score;
455 				act = 1;
456 			}
457 			if (act == 0) {
458 				dev_err(sparx5->dev,
459 					"Error in DSM calendar calculation\n");
460 				return -EINVAL;
461 			}
462 		}
463 
464 		for (slot = 0; slot < SPX5_DSM_CAL_LEN; slot++) {
465 			if (data->temp_sched[slot] == SPX5_DSM_CAL_EMPTY)
466 				break;
467 		}
468 		for (slot = 0; slot < SPX5_DSM_CAL_LEN; slot++) {
469 			data->schedule[slot] = data->temp_sched[slot];
470 			data->temp_sched[slot] = SPX5_DSM_CAL_EMPTY;
471 			data->new_slots[slot] = SPX5_DSM_CAL_EMPTY;
472 		}
473 	}
474 	return 0;
475 }
476 
477 static int sparx5_dsm_calendar_check(struct sparx5 *sparx5,
478 				     struct sparx5_calendar_data *data)
479 {
480 	u32 num_of_slots, idx, port;
481 	int cnt, max_dist;
482 	u32 slot_indices[SPX5_DSM_CAL_LEN], distances[SPX5_DSM_CAL_LEN];
483 	u32 cal_length = sparx5_dsm_cal_len(data->schedule);
484 
485 	for (port = 0; port < SPX5_DSM_CAL_MAX_DEVS_PER_TAXI; port++) {
486 		num_of_slots = 0;
487 		max_dist = data->avg_dist[port];
488 		for (idx = 0; idx < SPX5_DSM_CAL_LEN; idx++) {
489 			slot_indices[idx] = SPX5_DSM_CAL_EMPTY;
490 			distances[idx] = SPX5_DSM_CAL_EMPTY;
491 		}
492 
493 		for (idx = 0; idx < cal_length; idx++) {
494 			if (data->schedule[idx] == port) {
495 				slot_indices[num_of_slots] = idx;
496 				num_of_slots++;
497 			}
498 		}
499 
500 		slot_indices[num_of_slots] = slot_indices[0] + cal_length;
501 
502 		for (idx = 0; idx < num_of_slots; idx++) {
503 			distances[idx] = (slot_indices[idx + 1] -
504 					  slot_indices[idx]) * 10;
505 		}
506 
507 		for (idx = 0; idx < num_of_slots; idx++) {
508 			u32 jdx, kdx;
509 
510 			cnt = distances[idx] - max_dist;
511 			if (cnt < 0)
512 				cnt = -cnt;
513 			kdx = 0;
514 			for (jdx = (idx + 1) % num_of_slots;
515 			     jdx != idx;
516 			     jdx = (jdx + 1) % num_of_slots, kdx++) {
517 				cnt =  cnt + distances[jdx] - max_dist;
518 				if (cnt < 0)
519 					cnt = -cnt;
520 				if (cnt > max_dist)
521 					goto check_err;
522 			}
523 		}
524 	}
525 	return 0;
526 check_err:
527 	dev_err(sparx5->dev,
528 		"Port %u: distance %u above limit %d\n",
529 		port, cnt, max_dist);
530 	return -EINVAL;
531 }
532 
533 static int sparx5_dsm_calendar_update(struct sparx5 *sparx5, u32 taxi,
534 				      struct sparx5_calendar_data *data)
535 {
536 	u32 idx;
537 	u32 cal_len = sparx5_dsm_cal_len(data->schedule), len;
538 
539 	spx5_wr(DSM_TAXI_CAL_CFG_CAL_PGM_ENA_SET(1),
540 		sparx5,
541 		DSM_TAXI_CAL_CFG(taxi));
542 	for (idx = 0; idx < cal_len; idx++) {
543 		spx5_rmw(DSM_TAXI_CAL_CFG_CAL_IDX_SET(idx),
544 			 DSM_TAXI_CAL_CFG_CAL_IDX,
545 			 sparx5,
546 			 DSM_TAXI_CAL_CFG(taxi));
547 		spx5_rmw(DSM_TAXI_CAL_CFG_CAL_PGM_VAL_SET(data->schedule[idx]),
548 			 DSM_TAXI_CAL_CFG_CAL_PGM_VAL,
549 			 sparx5,
550 			 DSM_TAXI_CAL_CFG(taxi));
551 	}
552 	spx5_wr(DSM_TAXI_CAL_CFG_CAL_PGM_ENA_SET(0),
553 		sparx5,
554 		DSM_TAXI_CAL_CFG(taxi));
555 	len = DSM_TAXI_CAL_CFG_CAL_CUR_LEN_GET(spx5_rd(sparx5,
556 						       DSM_TAXI_CAL_CFG(taxi)));
557 	if (len != cal_len - 1)
558 		goto update_err;
559 	return 0;
560 update_err:
561 	dev_err(sparx5->dev, "Incorrect calendar length: %u\n", len);
562 	return -EINVAL;
563 }
564 
565 /* Configure the DSM calendar based on port configuration */
566 int sparx5_config_dsm_calendar(struct sparx5 *sparx5)
567 {
568 	int taxi;
569 	struct sparx5_calendar_data *data;
570 	int err = 0;
571 
572 	data = kzalloc(sizeof(*data), GFP_KERNEL);
573 	if (!data)
574 		return -ENOMEM;
575 
576 	for (taxi = 0; taxi < SPX5_DSM_CAL_TAXIS; ++taxi) {
577 		err = sparx5_dsm_calendar_calc(sparx5, taxi, data);
578 		if (err) {
579 			dev_err(sparx5->dev, "DSM calendar calculation failed\n");
580 			goto cal_out;
581 		}
582 		err = sparx5_dsm_calendar_check(sparx5, data);
583 		if (err) {
584 			dev_err(sparx5->dev, "DSM calendar check failed\n");
585 			goto cal_out;
586 		}
587 		err = sparx5_dsm_calendar_update(sparx5, taxi, data);
588 		if (err) {
589 			dev_err(sparx5->dev, "DSM calendar update failed\n");
590 			goto cal_out;
591 		}
592 	}
593 cal_out:
594 	kfree(data);
595 	return err;
596 }
597