xref: /linux/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c (revision 150b567e0d572342ef08bace7ee7aff80fd75327)
1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip Sparx5 Switch driver
3  *
4  * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
5  */
6 
7 #include <linux/module.h>
8 #include <linux/device.h>
9 
10 #include "sparx5_main_regs.h"
11 #include "sparx5_main.h"
12 
13 /* QSYS calendar information */
14 #define SPX5_PORTS_PER_CALREG          10  /* Ports mapped in a calendar register */
15 #define SPX5_CALBITS_PER_PORT          3   /* Bit per port in calendar register */
16 
17 /* DSM calendar information */
18 #define SPX5_DSM_CAL_TAXIS             8
19 #define SPX5_DSM_CAL_BW_LOSS           553
20 
21 #define SPX5_TAXI_PORT_MAX             70
22 
23 #define SPEED_12500                    12500
24 
25 /* Maps from taxis to port numbers */
26 static u32 sparx5_taxi_ports[SPX5_DSM_CAL_TAXIS][SPX5_DSM_CAL_MAX_DEVS_PER_TAXI] = {
27 	{57, 12, 0, 1, 2, 16, 17, 18, 19, 20, 21, 22, 23},
28 	{58, 13, 3, 4, 5, 24, 25, 26, 27, 28, 29, 30, 31},
29 	{59, 14, 6, 7, 8, 32, 33, 34, 35, 36, 37, 38, 39},
30 	{60, 15, 9, 10, 11, 40, 41, 42, 43, 44, 45, 46, 47},
31 	{61, 48, 49, 50, 99, 99, 99, 99, 99, 99, 99, 99, 99},
32 	{62, 51, 52, 53, 99, 99, 99, 99, 99, 99, 99, 99, 99},
33 	{56, 63, 54, 55, 99, 99, 99, 99, 99, 99, 99, 99, 99},
34 	{64, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99},
35 };
36 
sparx5_target_bandwidth(struct sparx5 * sparx5)37 static u32 sparx5_target_bandwidth(struct sparx5 *sparx5)
38 {
39 	switch (sparx5->target_ct) {
40 	case SPX5_TARGET_CT_7546:
41 	case SPX5_TARGET_CT_7546TSN:
42 		return 65000;
43 	case SPX5_TARGET_CT_7549:
44 	case SPX5_TARGET_CT_7549TSN:
45 		return 91000;
46 	case SPX5_TARGET_CT_7552:
47 	case SPX5_TARGET_CT_7552TSN:
48 		return 129000;
49 	case SPX5_TARGET_CT_7556:
50 	case SPX5_TARGET_CT_7556TSN:
51 		return 161000;
52 	case SPX5_TARGET_CT_7558:
53 	case SPX5_TARGET_CT_7558TSN:
54 		return 201000;
55 	case SPX5_TARGET_CT_LAN9691VAO:
56 		return 46000;
57 	case SPX5_TARGET_CT_LAN9694RED:
58 	case SPX5_TARGET_CT_LAN9694TSN:
59 	case SPX5_TARGET_CT_LAN9694:
60 		return 68000;
61 	case SPX5_TARGET_CT_LAN9696RED:
62 	case SPX5_TARGET_CT_LAN9696TSN:
63 	case SPX5_TARGET_CT_LAN9692VAO:
64 	case SPX5_TARGET_CT_LAN9696:
65 		return 88000;
66 	case SPX5_TARGET_CT_LAN9698RED:
67 	case SPX5_TARGET_CT_LAN9698TSN:
68 	case SPX5_TARGET_CT_LAN9693VAO:
69 	case SPX5_TARGET_CT_LAN9698:
70 		return 101000;
71 	default:
72 		return 0;
73 	}
74 }
75 
sparx5_clk_to_bandwidth(enum sparx5_core_clockfreq cclock)76 static u32 sparx5_clk_to_bandwidth(enum sparx5_core_clockfreq cclock)
77 {
78 	switch (cclock) {
79 	case SPX5_CORE_CLOCK_250MHZ: return 83000; /* 250000 / 3 */
80 	case SPX5_CORE_CLOCK_328MHZ: return 109375; /* 328000 / 3 */
81 	case SPX5_CORE_CLOCK_500MHZ: return 166000; /* 500000 / 3 */
82 	case SPX5_CORE_CLOCK_625MHZ: return  208000; /* 625000 / 3 */
83 	default: return 0;
84 	}
85 	return 0;
86 }
87 
sparx5_cal_speed_to_value(enum sparx5_cal_bw speed)88 u32 sparx5_cal_speed_to_value(enum sparx5_cal_bw speed)
89 {
90 	switch (speed) {
91 	case SPX5_CAL_SPEED_1G:   return 1000;
92 	case SPX5_CAL_SPEED_2G5:  return 2500;
93 	case SPX5_CAL_SPEED_5G:   return 5000;
94 	case SPX5_CAL_SPEED_10G:  return 10000;
95 	case SPX5_CAL_SPEED_25G:  return 25000;
96 	case SPX5_CAL_SPEED_0G5:  return 500;
97 	case SPX5_CAL_SPEED_12G5: return 12500;
98 	default: return 0;
99 	}
100 }
101 
sparx5_bandwidth_to_calendar(u32 bw)102 static u32 sparx5_bandwidth_to_calendar(u32 bw)
103 {
104 	switch (bw) {
105 	case SPEED_10:      return SPX5_CAL_SPEED_0G5;
106 	case SPEED_100:     return SPX5_CAL_SPEED_0G5;
107 	case SPEED_1000:    return SPX5_CAL_SPEED_1G;
108 	case SPEED_2500:    return SPX5_CAL_SPEED_2G5;
109 	case SPEED_5000:    return SPX5_CAL_SPEED_5G;
110 	case SPEED_10000:   return SPX5_CAL_SPEED_10G;
111 	case SPEED_12500:   return SPX5_CAL_SPEED_12G5;
112 	case SPEED_25000:   return SPX5_CAL_SPEED_25G;
113 	case SPEED_UNKNOWN: return SPX5_CAL_SPEED_1G;
114 	default:            return SPX5_CAL_SPEED_NONE;
115 	}
116 }
117 
sparx5_get_port_cal_speed(struct sparx5 * sparx5,u32 portno)118 enum sparx5_cal_bw sparx5_get_port_cal_speed(struct sparx5 *sparx5, u32 portno)
119 {
120 	struct sparx5_port *port;
121 
122 	if (portno >= sparx5->data->consts->n_ports) {
123 		/* Internal ports */
124 		if (portno ==
125 			    sparx5_get_internal_port(sparx5, SPX5_PORT_CPU_0) ||
126 		    portno ==
127 			    sparx5_get_internal_port(sparx5, SPX5_PORT_CPU_1)) {
128 			/* Equals 1.25G */
129 			return SPX5_CAL_SPEED_2G5;
130 		} else if (portno ==
131 			   sparx5_get_internal_port(sparx5, SPX5_PORT_VD0)) {
132 			/* IPMC only idle BW */
133 			return SPX5_CAL_SPEED_NONE;
134 		} else if (portno ==
135 			   sparx5_get_internal_port(sparx5, SPX5_PORT_VD1)) {
136 			/* OAM only idle BW */
137 			return SPX5_CAL_SPEED_NONE;
138 		} else if (portno ==
139 			   sparx5_get_internal_port(sparx5, SPX5_PORT_VD2)) {
140 			/* IPinIP gets only idle BW */
141 			return SPX5_CAL_SPEED_NONE;
142 		}
143 		/* not in port map */
144 		return SPX5_CAL_SPEED_NONE;
145 	}
146 	/* Front ports - may be used */
147 	port = sparx5->ports[portno];
148 	if (!port)
149 		return SPX5_CAL_SPEED_NONE;
150 	return sparx5_bandwidth_to_calendar(port->conf.bandwidth);
151 }
152 
153 /* Auto configure the QSYS calendar based on port configuration */
sparx5_config_auto_calendar(struct sparx5 * sparx5)154 int sparx5_config_auto_calendar(struct sparx5 *sparx5)
155 {
156 	const struct sparx5_consts *consts = sparx5->data->consts;
157 	u32 cal[7], value, idx, portno;
158 	u32 max_core_bw;
159 	u32 total_bw = 0, used_port_bw = 0;
160 	int err = 0;
161 	enum sparx5_cal_bw spd;
162 
163 	memset(cal, 0, sizeof(cal));
164 
165 	max_core_bw = sparx5_clk_to_bandwidth(sparx5->coreclock);
166 	if (max_core_bw == 0) {
167 		dev_err(sparx5->dev, "Core clock not supported");
168 		return -EINVAL;
169 	}
170 
171 	/* Setup the calendar with the bandwidth to each port */
172 	for (portno = 0; portno < consts->n_ports_all; portno++) {
173 		u64 reg, offset, this_bw;
174 
175 		spd = sparx5_get_port_cal_speed(sparx5, portno);
176 		if (spd == SPX5_CAL_SPEED_NONE)
177 			continue;
178 
179 		this_bw = sparx5_cal_speed_to_value(spd);
180 		if (portno < consts->n_ports)
181 			used_port_bw += this_bw;
182 		else
183 			/* Internal ports are granted half the value */
184 			this_bw = this_bw / 2;
185 		total_bw += this_bw;
186 		reg = portno;
187 		offset = do_div(reg, SPX5_PORTS_PER_CALREG);
188 		cal[reg] |= spd << (offset * SPX5_CALBITS_PER_PORT);
189 	}
190 
191 	if (used_port_bw > sparx5_target_bandwidth(sparx5)) {
192 		dev_err(sparx5->dev,
193 			"Port BW %u above target BW %u\n",
194 			used_port_bw, sparx5_target_bandwidth(sparx5));
195 		return -EINVAL;
196 	}
197 
198 	if (total_bw > max_core_bw) {
199 		dev_err(sparx5->dev,
200 			"Total BW %u above switch core BW %u\n",
201 			total_bw, max_core_bw);
202 		return -EINVAL;
203 	}
204 
205 	/* Halt the calendar while changing it */
206 	if (is_sparx5(sparx5))
207 		spx5_rmw(QSYS_CAL_CTRL_CAL_MODE_SET(10),
208 			 QSYS_CAL_CTRL_CAL_MODE,
209 			 sparx5, QSYS_CAL_CTRL);
210 
211 	/* Assign port bandwidth to auto calendar */
212 	for (idx = 0; idx < consts->n_auto_cals; idx++)
213 		spx5_wr(cal[idx], sparx5, QSYS_CAL_AUTO(idx));
214 
215 	/* Increase grant rate of all ports to account for
216 	 * core clock ppm deviations
217 	 */
218 	spx5_rmw(QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE_SET(671), /* 672->671 */
219 		 QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE,
220 		 sparx5,
221 		 QSYS_CAL_CTRL);
222 
223 	/* Grant idle usage to VD 0-2 */
224 	for (idx = 2; idx < 5; idx++)
225 		spx5_wr(HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA_SET(12),
226 			sparx5,
227 			HSCH_OUTB_SHARE_ENA(idx));
228 
229 	/* Enable Auto mode */
230 	spx5_rmw(QSYS_CAL_CTRL_CAL_MODE_SET(8),
231 		 QSYS_CAL_CTRL_CAL_MODE,
232 		 sparx5, QSYS_CAL_CTRL);
233 
234 	/* Verify successful calendar config */
235 	value = spx5_rd(sparx5, QSYS_CAL_CTRL);
236 	if (QSYS_CAL_CTRL_CAL_AUTO_ERROR_GET(value)) {
237 		dev_err(sparx5->dev, "QSYS calendar error\n");
238 		err = -EINVAL;
239 	}
240 	return err;
241 }
242 
sparx5_dsm_exb_gcd(u32 a,u32 b)243 static u32 sparx5_dsm_exb_gcd(u32 a, u32 b)
244 {
245 	if (b == 0)
246 		return a;
247 	return sparx5_dsm_exb_gcd(b, a % b);
248 }
249 
sparx5_dsm_cal_len(u32 * cal)250 static u32 sparx5_dsm_cal_len(u32 *cal)
251 {
252 	u32 idx = 0, len = 0;
253 
254 	while (idx < SPX5_DSM_CAL_LEN) {
255 		if (cal[idx] != SPX5_DSM_CAL_EMPTY)
256 			len++;
257 		idx++;
258 	}
259 	return len;
260 }
261 
sparx5_dsm_cp_cal(u32 * sched)262 static u32 sparx5_dsm_cp_cal(u32 *sched)
263 {
264 	u32 idx = 0, tmp;
265 
266 	while (idx < SPX5_DSM_CAL_LEN) {
267 		if (sched[idx] != SPX5_DSM_CAL_EMPTY) {
268 			tmp = sched[idx];
269 			sched[idx] = SPX5_DSM_CAL_EMPTY;
270 			return tmp;
271 		}
272 		idx++;
273 	}
274 	return SPX5_DSM_CAL_EMPTY;
275 }
276 
sparx5_dsm_calendar_calc(struct sparx5 * sparx5,u32 taxi,struct sparx5_calendar_data * data)277 int sparx5_dsm_calendar_calc(struct sparx5 *sparx5, u32 taxi,
278 			     struct sparx5_calendar_data *data)
279 {
280 	bool slow_mode;
281 	u32 gcd, idx, sum, min, factor;
282 	u32 num_of_slots, slot_spd, empty_slots;
283 	u32 taxi_bw, clk_period_ps;
284 
285 	clk_period_ps = sparx5_clk_period(sparx5->coreclock);
286 	taxi_bw = 128 * 1000000 / clk_period_ps;
287 	slow_mode = !!(clk_period_ps > 2000);
288 	memcpy(data->taxi_ports, &sparx5_taxi_ports[taxi],
289 	       sizeof(data->taxi_ports));
290 
291 	for (idx = 0; idx < SPX5_DSM_CAL_LEN; idx++) {
292 		data->new_slots[idx] = SPX5_DSM_CAL_EMPTY;
293 		data->schedule[idx] = SPX5_DSM_CAL_EMPTY;
294 		data->temp_sched[idx] = SPX5_DSM_CAL_EMPTY;
295 	}
296 	/* Default empty calendar */
297 	data->schedule[0] = SPX5_DSM_CAL_MAX_DEVS_PER_TAXI;
298 
299 	/* Map ports to taxi positions */
300 	for (idx = 0; idx < SPX5_DSM_CAL_MAX_DEVS_PER_TAXI; idx++) {
301 		u32 portno = data->taxi_ports[idx];
302 
303 		if (portno < sparx5->data->consts->n_ports_all) {
304 			data->taxi_speeds[idx] = sparx5_cal_speed_to_value
305 				(sparx5_get_port_cal_speed(sparx5, portno));
306 		} else {
307 			data->taxi_speeds[idx] = 0;
308 		}
309 	}
310 
311 	sum = 0;
312 	min = 25000;
313 	for (idx = 0; idx < ARRAY_SIZE(data->taxi_speeds); idx++) {
314 		u32 jdx;
315 
316 		sum += data->taxi_speeds[idx];
317 		if (data->taxi_speeds[idx] && data->taxi_speeds[idx] < min)
318 			min = data->taxi_speeds[idx];
319 		gcd = min;
320 		for (jdx = 0; jdx < ARRAY_SIZE(data->taxi_speeds); jdx++)
321 			gcd = sparx5_dsm_exb_gcd(gcd, data->taxi_speeds[jdx]);
322 	}
323 	if (sum == 0) /* Empty calendar */
324 		return 0;
325 	/* Make room for overhead traffic */
326 	factor = 100 * 100 * 1000 / (100 * 100 - SPX5_DSM_CAL_BW_LOSS);
327 
328 	if (sum * factor > (taxi_bw * 1000)) {
329 		dev_err(sparx5->dev,
330 			"Taxi %u, Requested BW %u above available BW %u\n",
331 			taxi, sum, taxi_bw);
332 		return -EINVAL;
333 	}
334 	for (idx = 0; idx < 4; idx++) {
335 		u32 raw_spd;
336 
337 		if (idx == 0)
338 			raw_spd = gcd / 5;
339 		else if (idx == 1)
340 			raw_spd = gcd / 2;
341 		else if (idx == 2)
342 			raw_spd = gcd;
343 		else
344 			raw_spd = min;
345 		slot_spd = raw_spd * factor / 1000;
346 		num_of_slots = taxi_bw / slot_spd;
347 		if (num_of_slots <= 64)
348 			break;
349 	}
350 
351 	num_of_slots = num_of_slots > 64 ? 64 : num_of_slots;
352 	slot_spd = taxi_bw / num_of_slots;
353 
354 	sum = 0;
355 	for (idx = 0; idx < ARRAY_SIZE(data->taxi_speeds); idx++) {
356 		u32 spd = data->taxi_speeds[idx];
357 		u32 adjusted_speed = data->taxi_speeds[idx] * factor / 1000;
358 
359 		if (adjusted_speed > 0) {
360 			data->avg_dist[idx] = (128 * 1000000 * 10) /
361 				(adjusted_speed * clk_period_ps);
362 		} else {
363 			data->avg_dist[idx] = -1;
364 		}
365 		data->dev_slots[idx] = ((spd * factor / slot_spd) + 999) / 1000;
366 		if (spd != 25000 && (spd != 10000 || !slow_mode)) {
367 			if (num_of_slots < (5 * data->dev_slots[idx])) {
368 				dev_err(sparx5->dev,
369 					"Taxi %u, speed %u, Low slot sep.\n",
370 					taxi, spd);
371 				return -EINVAL;
372 			}
373 		}
374 		sum += data->dev_slots[idx];
375 		if (sum > num_of_slots) {
376 			dev_err(sparx5->dev,
377 				"Taxi %u with overhead factor %u\n",
378 				taxi, factor);
379 			return -EINVAL;
380 		}
381 	}
382 
383 	empty_slots = num_of_slots - sum;
384 
385 	for (idx = 0; idx < empty_slots; idx++)
386 		data->schedule[idx] = SPX5_DSM_CAL_MAX_DEVS_PER_TAXI;
387 
388 	for (idx = 1; idx < num_of_slots; idx++) {
389 		u32 indices_len = 0;
390 		u32 slot, jdx, kdx, ts;
391 		s32 cnt;
392 		u32 num_of_old_slots, num_of_new_slots, tgt_score;
393 
394 		for (slot = 0; slot < ARRAY_SIZE(data->dev_slots); slot++) {
395 			if (data->dev_slots[slot] == idx) {
396 				data->indices[indices_len] = slot;
397 				indices_len++;
398 			}
399 		}
400 		if (indices_len == 0)
401 			continue;
402 		kdx = 0;
403 		for (slot = 0; slot < idx; slot++) {
404 			for (jdx = 0; jdx < indices_len; jdx++, kdx++)
405 				data->new_slots[kdx] = data->indices[jdx];
406 		}
407 
408 		for (slot = 0; slot < SPX5_DSM_CAL_LEN; slot++) {
409 			if (data->schedule[slot] == SPX5_DSM_CAL_EMPTY)
410 				break;
411 		}
412 
413 		num_of_old_slots =  slot;
414 		num_of_new_slots =  kdx;
415 		cnt = 0;
416 		ts = 0;
417 
418 		if (num_of_new_slots > num_of_old_slots) {
419 			memcpy(data->short_list, data->schedule,
420 			       sizeof(data->short_list));
421 			memcpy(data->long_list, data->new_slots,
422 			       sizeof(data->long_list));
423 			tgt_score = 100000 * num_of_old_slots /
424 				num_of_new_slots;
425 		} else {
426 			memcpy(data->short_list, data->new_slots,
427 			       sizeof(data->short_list));
428 			memcpy(data->long_list, data->schedule,
429 			       sizeof(data->long_list));
430 			tgt_score = 100000 * num_of_new_slots /
431 				num_of_old_slots;
432 		}
433 
434 		while (sparx5_dsm_cal_len(data->short_list) > 0 ||
435 		       sparx5_dsm_cal_len(data->long_list) > 0) {
436 			u32 act = 0;
437 
438 			if (sparx5_dsm_cal_len(data->short_list) > 0) {
439 				data->temp_sched[ts] =
440 					sparx5_dsm_cp_cal(data->short_list);
441 				ts++;
442 				cnt += 100000;
443 				act = 1;
444 			}
445 			while (sparx5_dsm_cal_len(data->long_list) > 0 &&
446 			       cnt > 0) {
447 				data->temp_sched[ts] =
448 					sparx5_dsm_cp_cal(data->long_list);
449 				ts++;
450 				cnt -= tgt_score;
451 				act = 1;
452 			}
453 			if (act == 0) {
454 				dev_err(sparx5->dev,
455 					"Error in DSM calendar calculation\n");
456 				return -EINVAL;
457 			}
458 		}
459 
460 		for (slot = 0; slot < SPX5_DSM_CAL_LEN; slot++) {
461 			if (data->temp_sched[slot] == SPX5_DSM_CAL_EMPTY)
462 				break;
463 		}
464 		for (slot = 0; slot < SPX5_DSM_CAL_LEN; slot++) {
465 			data->schedule[slot] = data->temp_sched[slot];
466 			data->temp_sched[slot] = SPX5_DSM_CAL_EMPTY;
467 			data->new_slots[slot] = SPX5_DSM_CAL_EMPTY;
468 		}
469 	}
470 	return 0;
471 }
472 
sparx5_dsm_calendar_check(struct sparx5 * sparx5,struct sparx5_calendar_data * data)473 static int sparx5_dsm_calendar_check(struct sparx5 *sparx5,
474 				     struct sparx5_calendar_data *data)
475 {
476 	u32 num_of_slots, idx, port;
477 	int cnt, max_dist;
478 	u32 slot_indices[SPX5_DSM_CAL_LEN], distances[SPX5_DSM_CAL_LEN];
479 	u32 cal_length = sparx5_dsm_cal_len(data->schedule);
480 
481 	for (port = 0; port < SPX5_DSM_CAL_MAX_DEVS_PER_TAXI; port++) {
482 		num_of_slots = 0;
483 		max_dist = data->avg_dist[port];
484 		for (idx = 0; idx < SPX5_DSM_CAL_LEN; idx++) {
485 			slot_indices[idx] = SPX5_DSM_CAL_EMPTY;
486 			distances[idx] = SPX5_DSM_CAL_EMPTY;
487 		}
488 
489 		for (idx = 0; idx < cal_length; idx++) {
490 			if (data->schedule[idx] == port) {
491 				slot_indices[num_of_slots] = idx;
492 				num_of_slots++;
493 			}
494 		}
495 
496 		slot_indices[num_of_slots] = slot_indices[0] + cal_length;
497 
498 		for (idx = 0; idx < num_of_slots; idx++) {
499 			distances[idx] = (slot_indices[idx + 1] -
500 					  slot_indices[idx]) * 10;
501 		}
502 
503 		for (idx = 0; idx < num_of_slots; idx++) {
504 			u32 jdx, kdx;
505 
506 			cnt = distances[idx] - max_dist;
507 			if (cnt < 0)
508 				cnt = -cnt;
509 			kdx = 0;
510 			for (jdx = (idx + 1) % num_of_slots;
511 			     jdx != idx;
512 			     jdx = (jdx + 1) % num_of_slots, kdx++) {
513 				cnt =  cnt + distances[jdx] - max_dist;
514 				if (cnt < 0)
515 					cnt = -cnt;
516 				if (cnt > max_dist)
517 					goto check_err;
518 			}
519 		}
520 	}
521 	return 0;
522 check_err:
523 	dev_err(sparx5->dev,
524 		"Port %u: distance %u above limit %d\n",
525 		port, cnt, max_dist);
526 	return -EINVAL;
527 }
528 
sparx5_dsm_calendar_update(struct sparx5 * sparx5,u32 taxi,struct sparx5_calendar_data * data)529 static int sparx5_dsm_calendar_update(struct sparx5 *sparx5, u32 taxi,
530 				      struct sparx5_calendar_data *data)
531 {
532 	u32 cal_len = sparx5_dsm_cal_len(data->schedule), len, idx;
533 
534 	if (!is_sparx5(sparx5)) {
535 		u32 val, act;
536 
537 		val = spx5_rd(sparx5, DSM_TAXI_CAL_CFG(taxi));
538 		act = DSM_TAXI_CAL_CFG_CAL_SEL_STAT_GET(val);
539 
540 		spx5_rmw(DSM_TAXI_CAL_CFG_CAL_PGM_SEL_SET(!act),
541 			 DSM_TAXI_CAL_CFG_CAL_PGM_SEL,
542 			 sparx5, DSM_TAXI_CAL_CFG(taxi));
543 	}
544 
545 	spx5_rmw(DSM_TAXI_CAL_CFG_CAL_PGM_ENA_SET(1),
546 		 DSM_TAXI_CAL_CFG_CAL_PGM_ENA,
547 		 sparx5,
548 		 DSM_TAXI_CAL_CFG(taxi));
549 	for (idx = 0; idx < cal_len; idx++) {
550 		spx5_rmw(DSM_TAXI_CAL_CFG_CAL_IDX_SET(idx),
551 			 DSM_TAXI_CAL_CFG_CAL_IDX,
552 			 sparx5,
553 			 DSM_TAXI_CAL_CFG(taxi));
554 		spx5_rmw(DSM_TAXI_CAL_CFG_CAL_PGM_VAL_SET(data->schedule[idx]),
555 			 DSM_TAXI_CAL_CFG_CAL_PGM_VAL,
556 			 sparx5,
557 			 DSM_TAXI_CAL_CFG(taxi));
558 	}
559 	spx5_rmw(DSM_TAXI_CAL_CFG_CAL_PGM_ENA_SET(0),
560 		 DSM_TAXI_CAL_CFG_CAL_PGM_ENA,
561 		 sparx5,
562 		 DSM_TAXI_CAL_CFG(taxi));
563 	len = DSM_TAXI_CAL_CFG_CAL_CUR_LEN_GET(spx5_rd(sparx5,
564 						       DSM_TAXI_CAL_CFG(taxi)));
565 	if (len != cal_len - 1)
566 		goto update_err;
567 
568 	if (!is_sparx5(sparx5)) {
569 		spx5_rmw(DSM_TAXI_CAL_CFG_CAL_SWITCH_SET(1),
570 			 DSM_TAXI_CAL_CFG_CAL_SWITCH,
571 			 sparx5, DSM_TAXI_CAL_CFG(taxi));
572 	}
573 
574 	return 0;
575 update_err:
576 	dev_err(sparx5->dev, "Incorrect calendar length: %u\n", len);
577 	return -EINVAL;
578 }
579 
580 /* Configure the DSM calendar based on port configuration */
sparx5_config_dsm_calendar(struct sparx5 * sparx5)581 int sparx5_config_dsm_calendar(struct sparx5 *sparx5)
582 {
583 	const struct sparx5_ops *ops = sparx5->data->ops;
584 	int taxi;
585 	struct sparx5_calendar_data *data;
586 	int err = 0;
587 
588 	data = kzalloc(sizeof(*data), GFP_KERNEL);
589 	if (!data)
590 		return -ENOMEM;
591 
592 	for (taxi = 0; taxi < sparx5->data->consts->n_dsm_cal_taxis; ++taxi) {
593 		err = ops->dsm_calendar_calc(sparx5, taxi, data);
594 		if (err) {
595 			dev_err(sparx5->dev, "DSM calendar calculation failed\n");
596 			goto cal_out;
597 		}
598 		err = sparx5_dsm_calendar_check(sparx5, data);
599 		if (err) {
600 			dev_err(sparx5->dev, "DSM calendar check failed\n");
601 			goto cal_out;
602 		}
603 		err = sparx5_dsm_calendar_update(sparx5, taxi, data);
604 		if (err) {
605 			dev_err(sparx5->dev, "DSM calendar update failed\n");
606 			goto cal_out;
607 		}
608 	}
609 cal_out:
610 	kfree(data);
611 	return err;
612 }
613