xref: /linux/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c (revision 9c0fc36ec493d20599cf088d21b6bddcdc184242)
1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip Sparx5 Switch driver
3  *
4  * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
5  */
6 
7 #include <linux/module.h>
8 #include <linux/device.h>
9 
10 #include "sparx5_main_regs.h"
11 #include "sparx5_main.h"
12 
13 /* QSYS calendar information */
14 #define SPX5_PORTS_PER_CALREG          10  /* Ports mapped in a calendar register */
15 #define SPX5_CALBITS_PER_PORT          3   /* Bit per port in calendar register */
16 
17 /* DSM calendar information */
18 #define SPX5_DSM_CAL_EMPTY             0xFFFF
19 #define SPX5_DSM_CAL_TAXIS             8
20 #define SPX5_DSM_CAL_BW_LOSS           553
21 
22 #define SPX5_TAXI_PORT_MAX             70
23 
24 #define SPEED_12500                    12500
25 
26 /* Maps from taxis to port numbers */
27 static u32 sparx5_taxi_ports[SPX5_DSM_CAL_TAXIS][SPX5_DSM_CAL_MAX_DEVS_PER_TAXI] = {
28 	{57, 12, 0, 1, 2, 16, 17, 18, 19, 20, 21, 22, 23},
29 	{58, 13, 3, 4, 5, 24, 25, 26, 27, 28, 29, 30, 31},
30 	{59, 14, 6, 7, 8, 32, 33, 34, 35, 36, 37, 38, 39},
31 	{60, 15, 9, 10, 11, 40, 41, 42, 43, 44, 45, 46, 47},
32 	{61, 48, 49, 50, 99, 99, 99, 99, 99, 99, 99, 99, 99},
33 	{62, 51, 52, 53, 99, 99, 99, 99, 99, 99, 99, 99, 99},
34 	{56, 63, 54, 55, 99, 99, 99, 99, 99, 99, 99, 99, 99},
35 	{64, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99},
36 };
37 
38 static u32 sparx5_target_bandwidth(struct sparx5 *sparx5)
39 {
40 	switch (sparx5->target_ct) {
41 	case SPX5_TARGET_CT_7546:
42 	case SPX5_TARGET_CT_7546TSN:
43 		return 65000;
44 	case SPX5_TARGET_CT_7549:
45 	case SPX5_TARGET_CT_7549TSN:
46 		return 91000;
47 	case SPX5_TARGET_CT_7552:
48 	case SPX5_TARGET_CT_7552TSN:
49 		return 129000;
50 	case SPX5_TARGET_CT_7556:
51 	case SPX5_TARGET_CT_7556TSN:
52 		return 161000;
53 	case SPX5_TARGET_CT_7558:
54 	case SPX5_TARGET_CT_7558TSN:
55 		return 201000;
56 	default:
57 		return 0;
58 	}
59 }
60 
61 /* This is used in calendar configuration */
62 enum sparx5_cal_bw {
63 	SPX5_CAL_SPEED_NONE = 0,
64 	SPX5_CAL_SPEED_1G   = 1,
65 	SPX5_CAL_SPEED_2G5  = 2,
66 	SPX5_CAL_SPEED_5G   = 3,
67 	SPX5_CAL_SPEED_10G  = 4,
68 	SPX5_CAL_SPEED_25G  = 5,
69 	SPX5_CAL_SPEED_0G5  = 6,
70 	SPX5_CAL_SPEED_12G5 = 7
71 };
72 
73 static u32 sparx5_clk_to_bandwidth(enum sparx5_core_clockfreq cclock)
74 {
75 	switch (cclock) {
76 	case SPX5_CORE_CLOCK_250MHZ: return 83000; /* 250000 / 3 */
77 	case SPX5_CORE_CLOCK_500MHZ: return 166000; /* 500000 / 3 */
78 	case SPX5_CORE_CLOCK_625MHZ: return  208000; /* 625000 / 3 */
79 	default: return 0;
80 	}
81 	return 0;
82 }
83 
84 static u32 sparx5_cal_speed_to_value(enum sparx5_cal_bw speed)
85 {
86 	switch (speed) {
87 	case SPX5_CAL_SPEED_1G:   return 1000;
88 	case SPX5_CAL_SPEED_2G5:  return 2500;
89 	case SPX5_CAL_SPEED_5G:   return 5000;
90 	case SPX5_CAL_SPEED_10G:  return 10000;
91 	case SPX5_CAL_SPEED_25G:  return 25000;
92 	case SPX5_CAL_SPEED_0G5:  return 500;
93 	case SPX5_CAL_SPEED_12G5: return 12500;
94 	default: return 0;
95 	}
96 }
97 
98 static u32 sparx5_bandwidth_to_calendar(u32 bw)
99 {
100 	switch (bw) {
101 	case SPEED_10:      return SPX5_CAL_SPEED_0G5;
102 	case SPEED_100:     return SPX5_CAL_SPEED_0G5;
103 	case SPEED_1000:    return SPX5_CAL_SPEED_1G;
104 	case SPEED_2500:    return SPX5_CAL_SPEED_2G5;
105 	case SPEED_5000:    return SPX5_CAL_SPEED_5G;
106 	case SPEED_10000:   return SPX5_CAL_SPEED_10G;
107 	case SPEED_12500:   return SPX5_CAL_SPEED_12G5;
108 	case SPEED_25000:   return SPX5_CAL_SPEED_25G;
109 	case SPEED_UNKNOWN: return SPX5_CAL_SPEED_1G;
110 	default:            return SPX5_CAL_SPEED_NONE;
111 	}
112 }
113 
114 static enum sparx5_cal_bw sparx5_get_port_cal_speed(struct sparx5 *sparx5,
115 						    u32 portno)
116 {
117 	struct sparx5_port *port;
118 
119 	if (portno >= sparx5->data->consts->n_ports) {
120 		/* Internal ports */
121 		if (portno ==
122 			    sparx5_get_internal_port(sparx5, SPX5_PORT_CPU_0) ||
123 		    portno ==
124 			    sparx5_get_internal_port(sparx5, SPX5_PORT_CPU_1)) {
125 			/* Equals 1.25G */
126 			return SPX5_CAL_SPEED_2G5;
127 		} else if (portno ==
128 			   sparx5_get_internal_port(sparx5, SPX5_PORT_VD0)) {
129 			/* IPMC only idle BW */
130 			return SPX5_CAL_SPEED_NONE;
131 		} else if (portno ==
132 			   sparx5_get_internal_port(sparx5, SPX5_PORT_VD1)) {
133 			/* OAM only idle BW */
134 			return SPX5_CAL_SPEED_NONE;
135 		} else if (portno ==
136 			   sparx5_get_internal_port(sparx5, SPX5_PORT_VD2)) {
137 			/* IPinIP gets only idle BW */
138 			return SPX5_CAL_SPEED_NONE;
139 		}
140 		/* not in port map */
141 		return SPX5_CAL_SPEED_NONE;
142 	}
143 	/* Front ports - may be used */
144 	port = sparx5->ports[portno];
145 	if (!port)
146 		return SPX5_CAL_SPEED_NONE;
147 	return sparx5_bandwidth_to_calendar(port->conf.bandwidth);
148 }
149 
150 /* Auto configure the QSYS calendar based on port configuration */
151 int sparx5_config_auto_calendar(struct sparx5 *sparx5)
152 {
153 	const struct sparx5_consts *consts = sparx5->data->consts;
154 	u32 cal[7], value, idx, portno;
155 	u32 max_core_bw;
156 	u32 total_bw = 0, used_port_bw = 0;
157 	int err = 0;
158 	enum sparx5_cal_bw spd;
159 
160 	memset(cal, 0, sizeof(cal));
161 
162 	max_core_bw = sparx5_clk_to_bandwidth(sparx5->coreclock);
163 	if (max_core_bw == 0) {
164 		dev_err(sparx5->dev, "Core clock not supported");
165 		return -EINVAL;
166 	}
167 
168 	/* Setup the calendar with the bandwidth to each port */
169 	for (portno = 0; portno < consts->n_ports_all; portno++) {
170 		u64 reg, offset, this_bw;
171 
172 		spd = sparx5_get_port_cal_speed(sparx5, portno);
173 		if (spd == SPX5_CAL_SPEED_NONE)
174 			continue;
175 
176 		this_bw = sparx5_cal_speed_to_value(spd);
177 		if (portno < consts->n_ports)
178 			used_port_bw += this_bw;
179 		else
180 			/* Internal ports are granted half the value */
181 			this_bw = this_bw / 2;
182 		total_bw += this_bw;
183 		reg = portno;
184 		offset = do_div(reg, SPX5_PORTS_PER_CALREG);
185 		cal[reg] |= spd << (offset * SPX5_CALBITS_PER_PORT);
186 	}
187 
188 	if (used_port_bw > sparx5_target_bandwidth(sparx5)) {
189 		dev_err(sparx5->dev,
190 			"Port BW %u above target BW %u\n",
191 			used_port_bw, sparx5_target_bandwidth(sparx5));
192 		return -EINVAL;
193 	}
194 
195 	if (total_bw > max_core_bw) {
196 		dev_err(sparx5->dev,
197 			"Total BW %u above switch core BW %u\n",
198 			total_bw, max_core_bw);
199 		return -EINVAL;
200 	}
201 
202 	/* Halt the calendar while changing it */
203 	if (is_sparx5(sparx5))
204 		spx5_rmw(QSYS_CAL_CTRL_CAL_MODE_SET(10),
205 			 QSYS_CAL_CTRL_CAL_MODE,
206 			 sparx5, QSYS_CAL_CTRL);
207 
208 	/* Assign port bandwidth to auto calendar */
209 	for (idx = 0; idx < consts->n_auto_cals; idx++)
210 		spx5_wr(cal[idx], sparx5, QSYS_CAL_AUTO(idx));
211 
212 	/* Increase grant rate of all ports to account for
213 	 * core clock ppm deviations
214 	 */
215 	spx5_rmw(QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE_SET(671), /* 672->671 */
216 		 QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE,
217 		 sparx5,
218 		 QSYS_CAL_CTRL);
219 
220 	/* Grant idle usage to VD 0-2 */
221 	for (idx = 2; idx < 5; idx++)
222 		spx5_wr(HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA_SET(12),
223 			sparx5,
224 			HSCH_OUTB_SHARE_ENA(idx));
225 
226 	/* Enable Auto mode */
227 	spx5_rmw(QSYS_CAL_CTRL_CAL_MODE_SET(8),
228 		 QSYS_CAL_CTRL_CAL_MODE,
229 		 sparx5, QSYS_CAL_CTRL);
230 
231 	/* Verify successful calendar config */
232 	value = spx5_rd(sparx5, QSYS_CAL_CTRL);
233 	if (QSYS_CAL_CTRL_CAL_AUTO_ERROR_GET(value)) {
234 		dev_err(sparx5->dev, "QSYS calendar error\n");
235 		err = -EINVAL;
236 	}
237 	return err;
238 }
239 
240 static u32 sparx5_dsm_exb_gcd(u32 a, u32 b)
241 {
242 	if (b == 0)
243 		return a;
244 	return sparx5_dsm_exb_gcd(b, a % b);
245 }
246 
247 static u32 sparx5_dsm_cal_len(u32 *cal)
248 {
249 	u32 idx = 0, len = 0;
250 
251 	while (idx < SPX5_DSM_CAL_LEN) {
252 		if (cal[idx] != SPX5_DSM_CAL_EMPTY)
253 			len++;
254 		idx++;
255 	}
256 	return len;
257 }
258 
259 static u32 sparx5_dsm_cp_cal(u32 *sched)
260 {
261 	u32 idx = 0, tmp;
262 
263 	while (idx < SPX5_DSM_CAL_LEN) {
264 		if (sched[idx] != SPX5_DSM_CAL_EMPTY) {
265 			tmp = sched[idx];
266 			sched[idx] = SPX5_DSM_CAL_EMPTY;
267 			return tmp;
268 		}
269 		idx++;
270 	}
271 	return SPX5_DSM_CAL_EMPTY;
272 }
273 
274 int sparx5_dsm_calendar_calc(struct sparx5 *sparx5, u32 taxi,
275 			     struct sparx5_calendar_data *data)
276 {
277 	bool slow_mode;
278 	u32 gcd, idx, sum, min, factor;
279 	u32 num_of_slots, slot_spd, empty_slots;
280 	u32 taxi_bw, clk_period_ps;
281 
282 	clk_period_ps = sparx5_clk_period(sparx5->coreclock);
283 	taxi_bw = 128 * 1000000 / clk_period_ps;
284 	slow_mode = !!(clk_period_ps > 2000);
285 	memcpy(data->taxi_ports, &sparx5_taxi_ports[taxi],
286 	       sizeof(data->taxi_ports));
287 
288 	for (idx = 0; idx < SPX5_DSM_CAL_LEN; idx++) {
289 		data->new_slots[idx] = SPX5_DSM_CAL_EMPTY;
290 		data->schedule[idx] = SPX5_DSM_CAL_EMPTY;
291 		data->temp_sched[idx] = SPX5_DSM_CAL_EMPTY;
292 	}
293 	/* Default empty calendar */
294 	data->schedule[0] = SPX5_DSM_CAL_MAX_DEVS_PER_TAXI;
295 
296 	/* Map ports to taxi positions */
297 	for (idx = 0; idx < SPX5_DSM_CAL_MAX_DEVS_PER_TAXI; idx++) {
298 		u32 portno = data->taxi_ports[idx];
299 
300 		if (portno < sparx5->data->consts->n_ports_all) {
301 			data->taxi_speeds[idx] = sparx5_cal_speed_to_value
302 				(sparx5_get_port_cal_speed(sparx5, portno));
303 		} else {
304 			data->taxi_speeds[idx] = 0;
305 		}
306 	}
307 
308 	sum = 0;
309 	min = 25000;
310 	for (idx = 0; idx < ARRAY_SIZE(data->taxi_speeds); idx++) {
311 		u32 jdx;
312 
313 		sum += data->taxi_speeds[idx];
314 		if (data->taxi_speeds[idx] && data->taxi_speeds[idx] < min)
315 			min = data->taxi_speeds[idx];
316 		gcd = min;
317 		for (jdx = 0; jdx < ARRAY_SIZE(data->taxi_speeds); jdx++)
318 			gcd = sparx5_dsm_exb_gcd(gcd, data->taxi_speeds[jdx]);
319 	}
320 	if (sum == 0) /* Empty calendar */
321 		return 0;
322 	/* Make room for overhead traffic */
323 	factor = 100 * 100 * 1000 / (100 * 100 - SPX5_DSM_CAL_BW_LOSS);
324 
325 	if (sum * factor > (taxi_bw * 1000)) {
326 		dev_err(sparx5->dev,
327 			"Taxi %u, Requested BW %u above available BW %u\n",
328 			taxi, sum, taxi_bw);
329 		return -EINVAL;
330 	}
331 	for (idx = 0; idx < 4; idx++) {
332 		u32 raw_spd;
333 
334 		if (idx == 0)
335 			raw_spd = gcd / 5;
336 		else if (idx == 1)
337 			raw_spd = gcd / 2;
338 		else if (idx == 2)
339 			raw_spd = gcd;
340 		else
341 			raw_spd = min;
342 		slot_spd = raw_spd * factor / 1000;
343 		num_of_slots = taxi_bw / slot_spd;
344 		if (num_of_slots <= 64)
345 			break;
346 	}
347 
348 	num_of_slots = num_of_slots > 64 ? 64 : num_of_slots;
349 	slot_spd = taxi_bw / num_of_slots;
350 
351 	sum = 0;
352 	for (idx = 0; idx < ARRAY_SIZE(data->taxi_speeds); idx++) {
353 		u32 spd = data->taxi_speeds[idx];
354 		u32 adjusted_speed = data->taxi_speeds[idx] * factor / 1000;
355 
356 		if (adjusted_speed > 0) {
357 			data->avg_dist[idx] = (128 * 1000000 * 10) /
358 				(adjusted_speed * clk_period_ps);
359 		} else {
360 			data->avg_dist[idx] = -1;
361 		}
362 		data->dev_slots[idx] = ((spd * factor / slot_spd) + 999) / 1000;
363 		if (spd != 25000 && (spd != 10000 || !slow_mode)) {
364 			if (num_of_slots < (5 * data->dev_slots[idx])) {
365 				dev_err(sparx5->dev,
366 					"Taxi %u, speed %u, Low slot sep.\n",
367 					taxi, spd);
368 				return -EINVAL;
369 			}
370 		}
371 		sum += data->dev_slots[idx];
372 		if (sum > num_of_slots) {
373 			dev_err(sparx5->dev,
374 				"Taxi %u with overhead factor %u\n",
375 				taxi, factor);
376 			return -EINVAL;
377 		}
378 	}
379 
380 	empty_slots = num_of_slots - sum;
381 
382 	for (idx = 0; idx < empty_slots; idx++)
383 		data->schedule[idx] = SPX5_DSM_CAL_MAX_DEVS_PER_TAXI;
384 
385 	for (idx = 1; idx < num_of_slots; idx++) {
386 		u32 indices_len = 0;
387 		u32 slot, jdx, kdx, ts;
388 		s32 cnt;
389 		u32 num_of_old_slots, num_of_new_slots, tgt_score;
390 
391 		for (slot = 0; slot < ARRAY_SIZE(data->dev_slots); slot++) {
392 			if (data->dev_slots[slot] == idx) {
393 				data->indices[indices_len] = slot;
394 				indices_len++;
395 			}
396 		}
397 		if (indices_len == 0)
398 			continue;
399 		kdx = 0;
400 		for (slot = 0; slot < idx; slot++) {
401 			for (jdx = 0; jdx < indices_len; jdx++, kdx++)
402 				data->new_slots[kdx] = data->indices[jdx];
403 		}
404 
405 		for (slot = 0; slot < SPX5_DSM_CAL_LEN; slot++) {
406 			if (data->schedule[slot] == SPX5_DSM_CAL_EMPTY)
407 				break;
408 		}
409 
410 		num_of_old_slots =  slot;
411 		num_of_new_slots =  kdx;
412 		cnt = 0;
413 		ts = 0;
414 
415 		if (num_of_new_slots > num_of_old_slots) {
416 			memcpy(data->short_list, data->schedule,
417 			       sizeof(data->short_list));
418 			memcpy(data->long_list, data->new_slots,
419 			       sizeof(data->long_list));
420 			tgt_score = 100000 * num_of_old_slots /
421 				num_of_new_slots;
422 		} else {
423 			memcpy(data->short_list, data->new_slots,
424 			       sizeof(data->short_list));
425 			memcpy(data->long_list, data->schedule,
426 			       sizeof(data->long_list));
427 			tgt_score = 100000 * num_of_new_slots /
428 				num_of_old_slots;
429 		}
430 
431 		while (sparx5_dsm_cal_len(data->short_list) > 0 ||
432 		       sparx5_dsm_cal_len(data->long_list) > 0) {
433 			u32 act = 0;
434 
435 			if (sparx5_dsm_cal_len(data->short_list) > 0) {
436 				data->temp_sched[ts] =
437 					sparx5_dsm_cp_cal(data->short_list);
438 				ts++;
439 				cnt += 100000;
440 				act = 1;
441 			}
442 			while (sparx5_dsm_cal_len(data->long_list) > 0 &&
443 			       cnt > 0) {
444 				data->temp_sched[ts] =
445 					sparx5_dsm_cp_cal(data->long_list);
446 				ts++;
447 				cnt -= tgt_score;
448 				act = 1;
449 			}
450 			if (act == 0) {
451 				dev_err(sparx5->dev,
452 					"Error in DSM calendar calculation\n");
453 				return -EINVAL;
454 			}
455 		}
456 
457 		for (slot = 0; slot < SPX5_DSM_CAL_LEN; slot++) {
458 			if (data->temp_sched[slot] == SPX5_DSM_CAL_EMPTY)
459 				break;
460 		}
461 		for (slot = 0; slot < SPX5_DSM_CAL_LEN; slot++) {
462 			data->schedule[slot] = data->temp_sched[slot];
463 			data->temp_sched[slot] = SPX5_DSM_CAL_EMPTY;
464 			data->new_slots[slot] = SPX5_DSM_CAL_EMPTY;
465 		}
466 	}
467 	return 0;
468 }
469 
470 static int sparx5_dsm_calendar_check(struct sparx5 *sparx5,
471 				     struct sparx5_calendar_data *data)
472 {
473 	u32 num_of_slots, idx, port;
474 	int cnt, max_dist;
475 	u32 slot_indices[SPX5_DSM_CAL_LEN], distances[SPX5_DSM_CAL_LEN];
476 	u32 cal_length = sparx5_dsm_cal_len(data->schedule);
477 
478 	for (port = 0; port < SPX5_DSM_CAL_MAX_DEVS_PER_TAXI; port++) {
479 		num_of_slots = 0;
480 		max_dist = data->avg_dist[port];
481 		for (idx = 0; idx < SPX5_DSM_CAL_LEN; idx++) {
482 			slot_indices[idx] = SPX5_DSM_CAL_EMPTY;
483 			distances[idx] = SPX5_DSM_CAL_EMPTY;
484 		}
485 
486 		for (idx = 0; idx < cal_length; idx++) {
487 			if (data->schedule[idx] == port) {
488 				slot_indices[num_of_slots] = idx;
489 				num_of_slots++;
490 			}
491 		}
492 
493 		slot_indices[num_of_slots] = slot_indices[0] + cal_length;
494 
495 		for (idx = 0; idx < num_of_slots; idx++) {
496 			distances[idx] = (slot_indices[idx + 1] -
497 					  slot_indices[idx]) * 10;
498 		}
499 
500 		for (idx = 0; idx < num_of_slots; idx++) {
501 			u32 jdx, kdx;
502 
503 			cnt = distances[idx] - max_dist;
504 			if (cnt < 0)
505 				cnt = -cnt;
506 			kdx = 0;
507 			for (jdx = (idx + 1) % num_of_slots;
508 			     jdx != idx;
509 			     jdx = (jdx + 1) % num_of_slots, kdx++) {
510 				cnt =  cnt + distances[jdx] - max_dist;
511 				if (cnt < 0)
512 					cnt = -cnt;
513 				if (cnt > max_dist)
514 					goto check_err;
515 			}
516 		}
517 	}
518 	return 0;
519 check_err:
520 	dev_err(sparx5->dev,
521 		"Port %u: distance %u above limit %d\n",
522 		port, cnt, max_dist);
523 	return -EINVAL;
524 }
525 
526 static int sparx5_dsm_calendar_update(struct sparx5 *sparx5, u32 taxi,
527 				      struct sparx5_calendar_data *data)
528 {
529 	u32 idx;
530 	u32 cal_len = sparx5_dsm_cal_len(data->schedule), len;
531 
532 	spx5_wr(DSM_TAXI_CAL_CFG_CAL_PGM_ENA_SET(1),
533 		sparx5,
534 		DSM_TAXI_CAL_CFG(taxi));
535 	for (idx = 0; idx < cal_len; idx++) {
536 		spx5_rmw(DSM_TAXI_CAL_CFG_CAL_IDX_SET(idx),
537 			 DSM_TAXI_CAL_CFG_CAL_IDX,
538 			 sparx5,
539 			 DSM_TAXI_CAL_CFG(taxi));
540 		spx5_rmw(DSM_TAXI_CAL_CFG_CAL_PGM_VAL_SET(data->schedule[idx]),
541 			 DSM_TAXI_CAL_CFG_CAL_PGM_VAL,
542 			 sparx5,
543 			 DSM_TAXI_CAL_CFG(taxi));
544 	}
545 	spx5_wr(DSM_TAXI_CAL_CFG_CAL_PGM_ENA_SET(0),
546 		sparx5,
547 		DSM_TAXI_CAL_CFG(taxi));
548 	len = DSM_TAXI_CAL_CFG_CAL_CUR_LEN_GET(spx5_rd(sparx5,
549 						       DSM_TAXI_CAL_CFG(taxi)));
550 	if (len != cal_len - 1)
551 		goto update_err;
552 	return 0;
553 update_err:
554 	dev_err(sparx5->dev, "Incorrect calendar length: %u\n", len);
555 	return -EINVAL;
556 }
557 
558 /* Configure the DSM calendar based on port configuration */
559 int sparx5_config_dsm_calendar(struct sparx5 *sparx5)
560 {
561 	const struct sparx5_ops *ops = sparx5->data->ops;
562 	int taxi;
563 	struct sparx5_calendar_data *data;
564 	int err = 0;
565 
566 	data = kzalloc(sizeof(*data), GFP_KERNEL);
567 	if (!data)
568 		return -ENOMEM;
569 
570 	for (taxi = 0; taxi < sparx5->data->consts->n_dsm_cal_taxis; ++taxi) {
571 		err = ops->dsm_calendar_calc(sparx5, taxi, data);
572 		if (err) {
573 			dev_err(sparx5->dev, "DSM calendar calculation failed\n");
574 			goto cal_out;
575 		}
576 		err = sparx5_dsm_calendar_check(sparx5, data);
577 		if (err) {
578 			dev_err(sparx5->dev, "DSM calendar check failed\n");
579 			goto cal_out;
580 		}
581 		err = sparx5_dsm_calendar_update(sparx5, taxi, data);
582 		if (err) {
583 			dev_err(sparx5->dev, "DSM calendar update failed\n");
584 			goto cal_out;
585 		}
586 	}
587 cal_out:
588 	kfree(data);
589 	return err;
590 }
591