xref: /linux/drivers/net/ethernet/stmicro/stmmac/dwmac5.c (revision 170aafe35cb98e0f3fbacb446ea86389fbce22ea)
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 // Copyright (c) 2017 Synopsys, Inc. and/or its affiliates.
3 // stmmac Support for 5.xx Ethernet QoS cores
4 
5 #include <linux/bitops.h>
6 #include <linux/iopoll.h>
7 #include "common.h"
8 #include "dwmac4.h"
9 #include "dwmac5.h"
10 #include "stmmac.h"
11 #include "stmmac_ptp.h"
12 
13 struct dwmac5_error_desc {
14 	bool valid;
15 	const char *desc;
16 	const char *detailed_desc;
17 };
18 
19 #define STAT_OFF(field)		offsetof(struct stmmac_safety_stats, field)
20 
21 static void dwmac5_log_error(struct net_device *ndev, u32 value, bool corr,
22 		const char *module_name, const struct dwmac5_error_desc *desc,
23 		unsigned long field_offset, struct stmmac_safety_stats *stats)
24 {
25 	unsigned long loc, mask;
26 	u8 *bptr = (u8 *)stats;
27 	unsigned long *ptr;
28 
29 	ptr = (unsigned long *)(bptr + field_offset);
30 
31 	mask = value;
32 	for_each_set_bit(loc, &mask, 32) {
33 		netdev_err(ndev, "Found %s error in %s: '%s: %s'\n", corr ?
34 				"correctable" : "uncorrectable", module_name,
35 				desc[loc].desc, desc[loc].detailed_desc);
36 
37 		/* Update counters */
38 		ptr[loc]++;
39 	}
40 }
41 
42 static const struct dwmac5_error_desc dwmac5_mac_errors[32]= {
43 	{ true, "ATPES", "Application Transmit Interface Parity Check Error" },
44 	{ true, "TPES", "TSO Data Path Parity Check Error" },
45 	{ true, "RDPES", "Read Descriptor Parity Check Error" },
46 	{ true, "MPES", "MTL Data Path Parity Check Error" },
47 	{ true, "MTSPES", "MTL TX Status Data Path Parity Check Error" },
48 	{ true, "ARPES", "Application Receive Interface Data Path Parity Check Error" },
49 	{ true, "CWPES", "CSR Write Data Path Parity Check Error" },
50 	{ true, "ASRPES", "AXI Slave Read Data Path Parity Check Error" },
51 	{ true, "TTES", "TX FSM Timeout Error" },
52 	{ true, "RTES", "RX FSM Timeout Error" },
53 	{ true, "CTES", "CSR FSM Timeout Error" },
54 	{ true, "ATES", "APP FSM Timeout Error" },
55 	{ true, "PTES", "PTP FSM Timeout Error" },
56 	{ true, "T125ES", "TX125 FSM Timeout Error" },
57 	{ true, "R125ES", "RX125 FSM Timeout Error" },
58 	{ true, "RVCTES", "REV MDC FSM Timeout Error" },
59 	{ true, "MSTTES", "Master Read/Write Timeout Error" },
60 	{ true, "SLVTES", "Slave Read/Write Timeout Error" },
61 	{ true, "ATITES", "Application Timeout on ATI Interface Error" },
62 	{ true, "ARITES", "Application Timeout on ARI Interface Error" },
63 	{ false, "UNKNOWN", "Unknown Error" }, /* 20 */
64 	{ false, "UNKNOWN", "Unknown Error" }, /* 21 */
65 	{ false, "UNKNOWN", "Unknown Error" }, /* 22 */
66 	{ false, "UNKNOWN", "Unknown Error" }, /* 23 */
67 	{ true, "FSMPES", "FSM State Parity Error" },
68 	{ false, "UNKNOWN", "Unknown Error" }, /* 25 */
69 	{ false, "UNKNOWN", "Unknown Error" }, /* 26 */
70 	{ false, "UNKNOWN", "Unknown Error" }, /* 27 */
71 	{ false, "UNKNOWN", "Unknown Error" }, /* 28 */
72 	{ false, "UNKNOWN", "Unknown Error" }, /* 29 */
73 	{ false, "UNKNOWN", "Unknown Error" }, /* 30 */
74 	{ false, "UNKNOWN", "Unknown Error" }, /* 31 */
75 };
76 
77 static void dwmac5_handle_mac_err(struct net_device *ndev,
78 		void __iomem *ioaddr, bool correctable,
79 		struct stmmac_safety_stats *stats)
80 {
81 	u32 value;
82 
83 	value = readl(ioaddr + MAC_DPP_FSM_INT_STATUS);
84 	writel(value, ioaddr + MAC_DPP_FSM_INT_STATUS);
85 
86 	dwmac5_log_error(ndev, value, correctable, "MAC", dwmac5_mac_errors,
87 			STAT_OFF(mac_errors), stats);
88 }
89 
90 static const struct dwmac5_error_desc dwmac5_mtl_errors[32]= {
91 	{ true, "TXCES", "MTL TX Memory Error" },
92 	{ true, "TXAMS", "MTL TX Memory Address Mismatch Error" },
93 	{ true, "TXUES", "MTL TX Memory Error" },
94 	{ false, "UNKNOWN", "Unknown Error" }, /* 3 */
95 	{ true, "RXCES", "MTL RX Memory Error" },
96 	{ true, "RXAMS", "MTL RX Memory Address Mismatch Error" },
97 	{ true, "RXUES", "MTL RX Memory Error" },
98 	{ false, "UNKNOWN", "Unknown Error" }, /* 7 */
99 	{ true, "ECES", "MTL EST Memory Error" },
100 	{ true, "EAMS", "MTL EST Memory Address Mismatch Error" },
101 	{ true, "EUES", "MTL EST Memory Error" },
102 	{ false, "UNKNOWN", "Unknown Error" }, /* 11 */
103 	{ true, "RPCES", "MTL RX Parser Memory Error" },
104 	{ true, "RPAMS", "MTL RX Parser Memory Address Mismatch Error" },
105 	{ true, "RPUES", "MTL RX Parser Memory Error" },
106 	{ false, "UNKNOWN", "Unknown Error" }, /* 15 */
107 	{ false, "UNKNOWN", "Unknown Error" }, /* 16 */
108 	{ false, "UNKNOWN", "Unknown Error" }, /* 17 */
109 	{ false, "UNKNOWN", "Unknown Error" }, /* 18 */
110 	{ false, "UNKNOWN", "Unknown Error" }, /* 19 */
111 	{ false, "UNKNOWN", "Unknown Error" }, /* 20 */
112 	{ false, "UNKNOWN", "Unknown Error" }, /* 21 */
113 	{ false, "UNKNOWN", "Unknown Error" }, /* 22 */
114 	{ false, "UNKNOWN", "Unknown Error" }, /* 23 */
115 	{ false, "UNKNOWN", "Unknown Error" }, /* 24 */
116 	{ false, "UNKNOWN", "Unknown Error" }, /* 25 */
117 	{ false, "UNKNOWN", "Unknown Error" }, /* 26 */
118 	{ false, "UNKNOWN", "Unknown Error" }, /* 27 */
119 	{ false, "UNKNOWN", "Unknown Error" }, /* 28 */
120 	{ false, "UNKNOWN", "Unknown Error" }, /* 29 */
121 	{ false, "UNKNOWN", "Unknown Error" }, /* 30 */
122 	{ false, "UNKNOWN", "Unknown Error" }, /* 31 */
123 };
124 
125 static void dwmac5_handle_mtl_err(struct net_device *ndev,
126 		void __iomem *ioaddr, bool correctable,
127 		struct stmmac_safety_stats *stats)
128 {
129 	u32 value;
130 
131 	value = readl(ioaddr + MTL_ECC_INT_STATUS);
132 	writel(value, ioaddr + MTL_ECC_INT_STATUS);
133 
134 	dwmac5_log_error(ndev, value, correctable, "MTL", dwmac5_mtl_errors,
135 			STAT_OFF(mtl_errors), stats);
136 }
137 
138 static const struct dwmac5_error_desc dwmac5_dma_errors[32]= {
139 	{ true, "TCES", "DMA TSO Memory Error" },
140 	{ true, "TAMS", "DMA TSO Memory Address Mismatch Error" },
141 	{ true, "TUES", "DMA TSO Memory Error" },
142 	{ false, "UNKNOWN", "Unknown Error" }, /* 3 */
143 	{ false, "UNKNOWN", "Unknown Error" }, /* 4 */
144 	{ false, "UNKNOWN", "Unknown Error" }, /* 5 */
145 	{ false, "UNKNOWN", "Unknown Error" }, /* 6 */
146 	{ false, "UNKNOWN", "Unknown Error" }, /* 7 */
147 	{ false, "UNKNOWN", "Unknown Error" }, /* 8 */
148 	{ false, "UNKNOWN", "Unknown Error" }, /* 9 */
149 	{ false, "UNKNOWN", "Unknown Error" }, /* 10 */
150 	{ false, "UNKNOWN", "Unknown Error" }, /* 11 */
151 	{ false, "UNKNOWN", "Unknown Error" }, /* 12 */
152 	{ false, "UNKNOWN", "Unknown Error" }, /* 13 */
153 	{ false, "UNKNOWN", "Unknown Error" }, /* 14 */
154 	{ false, "UNKNOWN", "Unknown Error" }, /* 15 */
155 	{ false, "UNKNOWN", "Unknown Error" }, /* 16 */
156 	{ false, "UNKNOWN", "Unknown Error" }, /* 17 */
157 	{ false, "UNKNOWN", "Unknown Error" }, /* 18 */
158 	{ false, "UNKNOWN", "Unknown Error" }, /* 19 */
159 	{ false, "UNKNOWN", "Unknown Error" }, /* 20 */
160 	{ false, "UNKNOWN", "Unknown Error" }, /* 21 */
161 	{ false, "UNKNOWN", "Unknown Error" }, /* 22 */
162 	{ false, "UNKNOWN", "Unknown Error" }, /* 23 */
163 	{ false, "UNKNOWN", "Unknown Error" }, /* 24 */
164 	{ false, "UNKNOWN", "Unknown Error" }, /* 25 */
165 	{ false, "UNKNOWN", "Unknown Error" }, /* 26 */
166 	{ false, "UNKNOWN", "Unknown Error" }, /* 27 */
167 	{ false, "UNKNOWN", "Unknown Error" }, /* 28 */
168 	{ false, "UNKNOWN", "Unknown Error" }, /* 29 */
169 	{ false, "UNKNOWN", "Unknown Error" }, /* 30 */
170 	{ false, "UNKNOWN", "Unknown Error" }, /* 31 */
171 };
172 
173 static void dwmac5_handle_dma_err(struct net_device *ndev,
174 		void __iomem *ioaddr, bool correctable,
175 		struct stmmac_safety_stats *stats)
176 {
177 	u32 value;
178 
179 	value = readl(ioaddr + DMA_ECC_INT_STATUS);
180 	writel(value, ioaddr + DMA_ECC_INT_STATUS);
181 
182 	dwmac5_log_error(ndev, value, correctable, "DMA", dwmac5_dma_errors,
183 			STAT_OFF(dma_errors), stats);
184 }
185 
186 int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp,
187 			      struct stmmac_safety_feature_cfg *safety_feat_cfg)
188 {
189 	struct stmmac_safety_feature_cfg all_safety_feats = {
190 		.tsoee = 1,
191 		.mrxpee = 1,
192 		.mestee = 1,
193 		.mrxee = 1,
194 		.mtxee = 1,
195 		.epsi = 1,
196 		.edpp = 1,
197 		.prtyen = 1,
198 		.tmouten = 1,
199 	};
200 	u32 value;
201 
202 	if (!asp)
203 		return -EINVAL;
204 
205 	if (!safety_feat_cfg)
206 		safety_feat_cfg = &all_safety_feats;
207 
208 	/* 1. Enable Safety Features */
209 	value = readl(ioaddr + MTL_ECC_CONTROL);
210 	value |= MEEAO; /* MTL ECC Error Addr Status Override */
211 	if (safety_feat_cfg->tsoee)
212 		value |= TSOEE; /* TSO ECC */
213 	if (safety_feat_cfg->mrxpee)
214 		value |= MRXPEE; /* MTL RX Parser ECC */
215 	if (safety_feat_cfg->mestee)
216 		value |= MESTEE; /* MTL EST ECC */
217 	if (safety_feat_cfg->mrxee)
218 		value |= MRXEE; /* MTL RX FIFO ECC */
219 	if (safety_feat_cfg->mtxee)
220 		value |= MTXEE; /* MTL TX FIFO ECC */
221 	writel(value, ioaddr + MTL_ECC_CONTROL);
222 
223 	/* 2. Enable MTL Safety Interrupts */
224 	value = readl(ioaddr + MTL_ECC_INT_ENABLE);
225 	value |= RPCEIE; /* RX Parser Memory Correctable Error */
226 	value |= ECEIE; /* EST Memory Correctable Error */
227 	value |= RXCEIE; /* RX Memory Correctable Error */
228 	value |= TXCEIE; /* TX Memory Correctable Error */
229 	writel(value, ioaddr + MTL_ECC_INT_ENABLE);
230 
231 	/* 3. Enable DMA Safety Interrupts */
232 	value = readl(ioaddr + DMA_ECC_INT_ENABLE);
233 	value |= TCEIE; /* TSO Memory Correctable Error */
234 	writel(value, ioaddr + DMA_ECC_INT_ENABLE);
235 
236 	/* Only ECC Protection for External Memory feature is selected */
237 	if (asp <= 0x1)
238 		return 0;
239 
240 	/* 5. Enable Parity and Timeout for FSM */
241 	value = readl(ioaddr + MAC_FSM_CONTROL);
242 	if (safety_feat_cfg->prtyen)
243 		value |= PRTYEN; /* FSM Parity Feature */
244 	if (safety_feat_cfg->tmouten)
245 		value |= TMOUTEN; /* FSM Timeout Feature */
246 	writel(value, ioaddr + MAC_FSM_CONTROL);
247 
248 	/* 4. Enable Data Parity Protection */
249 	value = readl(ioaddr + MTL_DPP_CONTROL);
250 	if (safety_feat_cfg->edpp)
251 		value |= EDPP;
252 	writel(value, ioaddr + MTL_DPP_CONTROL);
253 
254 	/*
255 	 * All the Automotive Safety features are selected without the "Parity
256 	 * Port Enable for external interface" feature.
257 	 */
258 	if (asp <= 0x2)
259 		return 0;
260 
261 	if (safety_feat_cfg->epsi)
262 		value |= EPSI;
263 	writel(value, ioaddr + MTL_DPP_CONTROL);
264 	return 0;
265 }
266 
267 int dwmac5_safety_feat_irq_status(struct net_device *ndev,
268 		void __iomem *ioaddr, unsigned int asp,
269 		struct stmmac_safety_stats *stats)
270 {
271 	bool err, corr;
272 	u32 mtl, dma;
273 	int ret = 0;
274 
275 	if (!asp)
276 		return -EINVAL;
277 
278 	mtl = readl(ioaddr + MTL_SAFETY_INT_STATUS);
279 	dma = readl(ioaddr + DMA_SAFETY_INT_STATUS);
280 
281 	err = (mtl & MCSIS) || (dma & MCSIS);
282 	corr = false;
283 	if (err) {
284 		dwmac5_handle_mac_err(ndev, ioaddr, corr, stats);
285 		ret |= !corr;
286 	}
287 
288 	err = (mtl & (MEUIS | MECIS)) || (dma & (MSUIS | MSCIS));
289 	corr = (mtl & MECIS) || (dma & MSCIS);
290 	if (err) {
291 		dwmac5_handle_mtl_err(ndev, ioaddr, corr, stats);
292 		ret |= !corr;
293 	}
294 
295 	err = dma & (DEUIS | DECIS);
296 	corr = dma & DECIS;
297 	if (err) {
298 		dwmac5_handle_dma_err(ndev, ioaddr, corr, stats);
299 		ret |= !corr;
300 	}
301 
302 	return ret;
303 }
304 
305 static const struct dwmac5_error {
306 	const struct dwmac5_error_desc *desc;
307 } dwmac5_all_errors[] = {
308 	{ dwmac5_mac_errors },
309 	{ dwmac5_mtl_errors },
310 	{ dwmac5_dma_errors },
311 };
312 
313 int dwmac5_safety_feat_dump(struct stmmac_safety_stats *stats,
314 			int index, unsigned long *count, const char **desc)
315 {
316 	int module = index / 32, offset = index % 32;
317 	unsigned long *ptr = (unsigned long *)stats;
318 
319 	if (module >= ARRAY_SIZE(dwmac5_all_errors))
320 		return -EINVAL;
321 	if (!dwmac5_all_errors[module].desc[offset].valid)
322 		return -EINVAL;
323 	if (count)
324 		*count = *(ptr + index);
325 	if (desc)
326 		*desc = dwmac5_all_errors[module].desc[offset].desc;
327 	return 0;
328 }
329 
330 static int dwmac5_rxp_disable(void __iomem *ioaddr)
331 {
332 	u32 val;
333 
334 	val = readl(ioaddr + MTL_OPERATION_MODE);
335 	val &= ~MTL_FRPE;
336 	writel(val, ioaddr + MTL_OPERATION_MODE);
337 
338 	return readl_poll_timeout(ioaddr + MTL_RXP_CONTROL_STATUS, val,
339 			val & RXPI, 1, 10000);
340 }
341 
342 static void dwmac5_rxp_enable(void __iomem *ioaddr)
343 {
344 	u32 val;
345 
346 	val = readl(ioaddr + MTL_OPERATION_MODE);
347 	val |= MTL_FRPE;
348 	writel(val, ioaddr + MTL_OPERATION_MODE);
349 }
350 
351 static int dwmac5_rxp_update_single_entry(void __iomem *ioaddr,
352 					  struct stmmac_tc_entry *entry,
353 					  int pos)
354 {
355 	int ret, i;
356 
357 	for (i = 0; i < (sizeof(entry->val) / sizeof(u32)); i++) {
358 		int real_pos = pos * (sizeof(entry->val) / sizeof(u32)) + i;
359 		u32 val;
360 
361 		/* Wait for ready */
362 		ret = readl_poll_timeout(ioaddr + MTL_RXP_IACC_CTRL_STATUS,
363 				val, !(val & STARTBUSY), 1, 10000);
364 		if (ret)
365 			return ret;
366 
367 		/* Write data */
368 		val = *((u32 *)&entry->val + i);
369 		writel(val, ioaddr + MTL_RXP_IACC_DATA);
370 
371 		/* Write pos */
372 		val = real_pos & ADDR;
373 		writel(val, ioaddr + MTL_RXP_IACC_CTRL_STATUS);
374 
375 		/* Write OP */
376 		val |= WRRDN;
377 		writel(val, ioaddr + MTL_RXP_IACC_CTRL_STATUS);
378 
379 		/* Start Write */
380 		val |= STARTBUSY;
381 		writel(val, ioaddr + MTL_RXP_IACC_CTRL_STATUS);
382 
383 		/* Wait for done */
384 		ret = readl_poll_timeout(ioaddr + MTL_RXP_IACC_CTRL_STATUS,
385 				val, !(val & STARTBUSY), 1, 10000);
386 		if (ret)
387 			return ret;
388 	}
389 
390 	return 0;
391 }
392 
393 static struct stmmac_tc_entry *
394 dwmac5_rxp_get_next_entry(struct stmmac_tc_entry *entries, unsigned int count,
395 			  u32 curr_prio)
396 {
397 	struct stmmac_tc_entry *entry;
398 	u32 min_prio = ~0x0;
399 	int i, min_prio_idx;
400 	bool found = false;
401 
402 	for (i = count - 1; i >= 0; i--) {
403 		entry = &entries[i];
404 
405 		/* Do not update unused entries */
406 		if (!entry->in_use)
407 			continue;
408 		/* Do not update already updated entries (i.e. fragments) */
409 		if (entry->in_hw)
410 			continue;
411 		/* Let last entry be updated last */
412 		if (entry->is_last)
413 			continue;
414 		/* Do not return fragments */
415 		if (entry->is_frag)
416 			continue;
417 		/* Check if we already checked this prio */
418 		if (entry->prio < curr_prio)
419 			continue;
420 		/* Check if this is the minimum prio */
421 		if (entry->prio < min_prio) {
422 			min_prio = entry->prio;
423 			min_prio_idx = i;
424 			found = true;
425 		}
426 	}
427 
428 	if (found)
429 		return &entries[min_prio_idx];
430 	return NULL;
431 }
432 
433 int dwmac5_rxp_config(void __iomem *ioaddr, struct stmmac_tc_entry *entries,
434 		      unsigned int count)
435 {
436 	struct stmmac_tc_entry *entry, *frag;
437 	int i, ret, nve = 0;
438 	u32 curr_prio = 0;
439 	u32 old_val, val;
440 
441 	/* Force disable RX */
442 	old_val = readl(ioaddr + GMAC_CONFIG);
443 	val = old_val & ~GMAC_CONFIG_RE;
444 	writel(val, ioaddr + GMAC_CONFIG);
445 
446 	/* Disable RX Parser */
447 	ret = dwmac5_rxp_disable(ioaddr);
448 	if (ret)
449 		goto re_enable;
450 
451 	/* Set all entries as NOT in HW */
452 	for (i = 0; i < count; i++) {
453 		entry = &entries[i];
454 		entry->in_hw = false;
455 	}
456 
457 	/* Update entries by reverse order */
458 	while (1) {
459 		entry = dwmac5_rxp_get_next_entry(entries, count, curr_prio);
460 		if (!entry)
461 			break;
462 
463 		curr_prio = entry->prio;
464 		frag = entry->frag_ptr;
465 
466 		/* Set special fragment requirements */
467 		if (frag) {
468 			entry->val.af = 0;
469 			entry->val.rf = 0;
470 			entry->val.nc = 1;
471 			entry->val.ok_index = nve + 2;
472 		}
473 
474 		ret = dwmac5_rxp_update_single_entry(ioaddr, entry, nve);
475 		if (ret)
476 			goto re_enable;
477 
478 		entry->table_pos = nve++;
479 		entry->in_hw = true;
480 
481 		if (frag && !frag->in_hw) {
482 			ret = dwmac5_rxp_update_single_entry(ioaddr, frag, nve);
483 			if (ret)
484 				goto re_enable;
485 			frag->table_pos = nve++;
486 			frag->in_hw = true;
487 		}
488 	}
489 
490 	if (!nve)
491 		goto re_enable;
492 
493 	/* Update all pass entry */
494 	for (i = 0; i < count; i++) {
495 		entry = &entries[i];
496 		if (!entry->is_last)
497 			continue;
498 
499 		ret = dwmac5_rxp_update_single_entry(ioaddr, entry, nve);
500 		if (ret)
501 			goto re_enable;
502 
503 		entry->table_pos = nve++;
504 	}
505 
506 	/* Assume n. of parsable entries == n. of valid entries */
507 	val = (nve << 16) & NPE;
508 	val |= nve & NVE;
509 	writel(val, ioaddr + MTL_RXP_CONTROL_STATUS);
510 
511 	/* Enable RX Parser */
512 	dwmac5_rxp_enable(ioaddr);
513 
514 re_enable:
515 	/* Re-enable RX */
516 	writel(old_val, ioaddr + GMAC_CONFIG);
517 	return ret;
518 }
519 
520 int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
521 			   struct stmmac_pps_cfg *cfg, bool enable,
522 			   u32 sub_second_inc, u32 systime_flags)
523 {
524 	u32 tnsec = readl(ioaddr + MAC_PPSx_TARGET_TIME_NSEC(index));
525 	u32 val = readl(ioaddr + MAC_PPS_CONTROL);
526 	u64 period;
527 
528 	if (!cfg->available)
529 		return -EINVAL;
530 	if (tnsec & TRGTBUSY0)
531 		return -EBUSY;
532 	if (!sub_second_inc || !systime_flags)
533 		return -EINVAL;
534 
535 	val &= ~PPSx_MASK(index);
536 
537 	if (!enable) {
538 		val |= PPSCMDx(index, 0x5);
539 		val |= PPSEN0;
540 		writel(val, ioaddr + MAC_PPS_CONTROL);
541 		return 0;
542 	}
543 
544 	val |= TRGTMODSELx(index, 0x2);
545 	val |= PPSEN0;
546 	writel(val, ioaddr + MAC_PPS_CONTROL);
547 
548 	writel(cfg->start.tv_sec, ioaddr + MAC_PPSx_TARGET_TIME_SEC(index));
549 
550 	if (!(systime_flags & PTP_TCR_TSCTRLSSR))
551 		cfg->start.tv_nsec = (cfg->start.tv_nsec * 1000) / 465;
552 	writel(cfg->start.tv_nsec, ioaddr + MAC_PPSx_TARGET_TIME_NSEC(index));
553 
554 	period = cfg->period.tv_sec * 1000000000;
555 	period += cfg->period.tv_nsec;
556 
557 	do_div(period, sub_second_inc);
558 
559 	if (period <= 1)
560 		return -EINVAL;
561 
562 	writel(period - 1, ioaddr + MAC_PPSx_INTERVAL(index));
563 
564 	period >>= 1;
565 	if (period <= 1)
566 		return -EINVAL;
567 
568 	writel(period - 1, ioaddr + MAC_PPSx_WIDTH(index));
569 
570 	/* Finally, activate it */
571 	val |= PPSCMDx(index, 0x2);
572 	writel(val, ioaddr + MAC_PPS_CONTROL);
573 	return 0;
574 }
575 
576 void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
577 			  u32 num_txq, u32 num_rxq,
578 			  bool tx_enable, bool pmac_enable)
579 {
580 	u32 value;
581 
582 	if (tx_enable) {
583 		cfg->fpe_csr = EFPE;
584 		value = readl(ioaddr + GMAC_RXQ_CTRL1);
585 		value &= ~GMAC_RXQCTRL_FPRQ;
586 		value |= (num_rxq - 1) << GMAC_RXQCTRL_FPRQ_SHIFT;
587 		writel(value, ioaddr + GMAC_RXQ_CTRL1);
588 	} else {
589 		cfg->fpe_csr = 0;
590 	}
591 	writel(cfg->fpe_csr, ioaddr + MAC_FPE_CTRL_STS);
592 
593 	value = readl(ioaddr + GMAC_INT_EN);
594 
595 	if (pmac_enable) {
596 		if (!(value & GMAC_INT_FPE_EN)) {
597 			/* Dummy read to clear any pending masked interrupts */
598 			readl(ioaddr + MAC_FPE_CTRL_STS);
599 
600 			value |= GMAC_INT_FPE_EN;
601 		}
602 	} else {
603 		value &= ~GMAC_INT_FPE_EN;
604 	}
605 
606 	writel(value, ioaddr + GMAC_INT_EN);
607 }
608 
609 int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev)
610 {
611 	u32 value;
612 	int status;
613 
614 	status = FPE_EVENT_UNKNOWN;
615 
616 	/* Reads from the MAC_FPE_CTRL_STS register should only be performed
617 	 * here, since the status flags of MAC_FPE_CTRL_STS are "clear on read"
618 	 */
619 	value = readl(ioaddr + MAC_FPE_CTRL_STS);
620 
621 	if (value & TRSP) {
622 		status |= FPE_EVENT_TRSP;
623 		netdev_dbg(dev, "FPE: Respond mPacket is transmitted\n");
624 	}
625 
626 	if (value & TVER) {
627 		status |= FPE_EVENT_TVER;
628 		netdev_dbg(dev, "FPE: Verify mPacket is transmitted\n");
629 	}
630 
631 	if (value & RRSP) {
632 		status |= FPE_EVENT_RRSP;
633 		netdev_dbg(dev, "FPE: Respond mPacket is received\n");
634 	}
635 
636 	if (value & RVER) {
637 		status |= FPE_EVENT_RVER;
638 		netdev_dbg(dev, "FPE: Verify mPacket is received\n");
639 	}
640 
641 	return status;
642 }
643 
644 void dwmac5_fpe_send_mpacket(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
645 			     enum stmmac_mpacket_type type)
646 {
647 	u32 value = cfg->fpe_csr;
648 
649 	if (type == MPACKET_VERIFY)
650 		value |= SVER;
651 	else if (type == MPACKET_RESPONSE)
652 		value |= SRSP;
653 
654 	writel(value, ioaddr + MAC_FPE_CTRL_STS);
655 }
656 
657 int dwmac5_fpe_get_add_frag_size(const void __iomem *ioaddr)
658 {
659 	return FIELD_GET(DWMAC5_ADD_FRAG_SZ, readl(ioaddr + MTL_FPE_CTRL_STS));
660 }
661 
662 void dwmac5_fpe_set_add_frag_size(void __iomem *ioaddr, u32 add_frag_size)
663 {
664 	u32 value;
665 
666 	value = readl(ioaddr + MTL_FPE_CTRL_STS);
667 	writel(u32_replace_bits(value, add_frag_size, DWMAC5_ADD_FRAG_SZ),
668 	       ioaddr + MTL_FPE_CTRL_STS);
669 }
670 
671 #define ALG_ERR_MSG "TX algorithm SP is not suitable for one-to-many mapping"
672 #define WEIGHT_ERR_MSG "TXQ weight %u differs across other TXQs in TC: [%u]"
673 
674 int dwmac5_fpe_map_preemption_class(struct net_device *ndev,
675 				    struct netlink_ext_ack *extack, u32 pclass)
676 {
677 	u32 val, offset, count, queue_weight, preemptible_txqs = 0;
678 	struct stmmac_priv *priv = netdev_priv(ndev);
679 	u32 num_tc = ndev->num_tc;
680 
681 	if (!pclass)
682 		goto update_mapping;
683 
684 	/* DWMAC CORE4+ can not program TC:TXQ mapping to hardware.
685 	 *
686 	 * Synopsys Databook:
687 	 * "The number of Tx DMA channels is equal to the number of Tx queues,
688 	 * and is direct one-to-one mapping."
689 	 */
690 	for (u32 tc = 0; tc < num_tc; tc++) {
691 		count = ndev->tc_to_txq[tc].count;
692 		offset = ndev->tc_to_txq[tc].offset;
693 
694 		if (pclass & BIT(tc))
695 			preemptible_txqs |= GENMASK(offset + count - 1, offset);
696 
697 		/* This is 1:1 mapping, go to next TC */
698 		if (count == 1)
699 			continue;
700 
701 		if (priv->plat->tx_sched_algorithm == MTL_TX_ALGORITHM_SP) {
702 			NL_SET_ERR_MSG_MOD(extack, ALG_ERR_MSG);
703 			return -EINVAL;
704 		}
705 
706 		queue_weight = priv->plat->tx_queues_cfg[offset].weight;
707 
708 		for (u32 i = 1; i < count; i++) {
709 			if (priv->plat->tx_queues_cfg[offset + i].weight !=
710 			    queue_weight) {
711 				NL_SET_ERR_MSG_FMT_MOD(extack, WEIGHT_ERR_MSG,
712 						       queue_weight, tc);
713 				return -EINVAL;
714 			}
715 		}
716 	}
717 
718 update_mapping:
719 	val = readl(priv->ioaddr + MTL_FPE_CTRL_STS);
720 	writel(u32_replace_bits(val, preemptible_txqs, DWMAC5_PREEMPTION_CLASS),
721 	       priv->ioaddr + MTL_FPE_CTRL_STS);
722 
723 	return 0;
724 }
725