xref: /linux/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c (revision 9410645520e9b820069761f3450ef6661418e279)
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /*
3  * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
4  * stmmac XGMAC support.
5  */
6 
7 #include <linux/bitrev.h>
8 #include <linux/crc32.h>
9 #include <linux/iopoll.h>
10 #include "stmmac.h"
11 #include "stmmac_ptp.h"
12 #include "dwxlgmac2.h"
13 #include "dwxgmac2.h"
14 
dwxgmac2_core_init(struct mac_device_info * hw,struct net_device * dev)15 static void dwxgmac2_core_init(struct mac_device_info *hw,
16 			       struct net_device *dev)
17 {
18 	void __iomem *ioaddr = hw->pcsr;
19 	u32 tx, rx;
20 
21 	tx = readl(ioaddr + XGMAC_TX_CONFIG);
22 	rx = readl(ioaddr + XGMAC_RX_CONFIG);
23 
24 	tx |= XGMAC_CORE_INIT_TX;
25 	rx |= XGMAC_CORE_INIT_RX;
26 
27 	if (hw->ps) {
28 		tx |= XGMAC_CONFIG_TE;
29 		tx &= ~hw->link.speed_mask;
30 
31 		switch (hw->ps) {
32 		case SPEED_10000:
33 			tx |= hw->link.xgmii.speed10000;
34 			break;
35 		case SPEED_2500:
36 			tx |= hw->link.speed2500;
37 			break;
38 		case SPEED_1000:
39 		default:
40 			tx |= hw->link.speed1000;
41 			break;
42 		}
43 	}
44 
45 	writel(tx, ioaddr + XGMAC_TX_CONFIG);
46 	writel(rx, ioaddr + XGMAC_RX_CONFIG);
47 	writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN);
48 }
49 
dwxgmac2_set_mac(void __iomem * ioaddr,bool enable)50 static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable)
51 {
52 	u32 tx = readl(ioaddr + XGMAC_TX_CONFIG);
53 	u32 rx = readl(ioaddr + XGMAC_RX_CONFIG);
54 
55 	if (enable) {
56 		tx |= XGMAC_CONFIG_TE;
57 		rx |= XGMAC_CONFIG_RE;
58 	} else {
59 		tx &= ~XGMAC_CONFIG_TE;
60 		rx &= ~XGMAC_CONFIG_RE;
61 	}
62 
63 	writel(tx, ioaddr + XGMAC_TX_CONFIG);
64 	writel(rx, ioaddr + XGMAC_RX_CONFIG);
65 }
66 
dwxgmac2_rx_ipc(struct mac_device_info * hw)67 static int dwxgmac2_rx_ipc(struct mac_device_info *hw)
68 {
69 	void __iomem *ioaddr = hw->pcsr;
70 	u32 value;
71 
72 	value = readl(ioaddr + XGMAC_RX_CONFIG);
73 	if (hw->rx_csum)
74 		value |= XGMAC_CONFIG_IPC;
75 	else
76 		value &= ~XGMAC_CONFIG_IPC;
77 	writel(value, ioaddr + XGMAC_RX_CONFIG);
78 
79 	return !!(readl(ioaddr + XGMAC_RX_CONFIG) & XGMAC_CONFIG_IPC);
80 }
81 
dwxgmac2_rx_queue_enable(struct mac_device_info * hw,u8 mode,u32 queue)82 static void dwxgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
83 				     u32 queue)
84 {
85 	void __iomem *ioaddr = hw->pcsr;
86 	u32 value;
87 
88 	value = readl(ioaddr + XGMAC_RXQ_CTRL0) & ~XGMAC_RXQEN(queue);
89 	if (mode == MTL_QUEUE_AVB)
90 		value |= 0x1 << XGMAC_RXQEN_SHIFT(queue);
91 	else if (mode == MTL_QUEUE_DCB)
92 		value |= 0x2 << XGMAC_RXQEN_SHIFT(queue);
93 	writel(value, ioaddr + XGMAC_RXQ_CTRL0);
94 }
95 
dwxgmac2_rx_queue_prio(struct mac_device_info * hw,u32 prio,u32 queue)96 static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
97 				   u32 queue)
98 {
99 	void __iomem *ioaddr = hw->pcsr;
100 	u32 clear_mask = 0;
101 	u32 ctrl2, ctrl3;
102 	int i;
103 
104 	ctrl2 = readl(ioaddr + XGMAC_RXQ_CTRL2);
105 	ctrl3 = readl(ioaddr + XGMAC_RXQ_CTRL3);
106 
107 	/* The software must ensure that the same priority
108 	 * is not mapped to multiple Rx queues
109 	 */
110 	for (i = 0; i < 4; i++)
111 		clear_mask |= ((prio << XGMAC_PSRQ_SHIFT(i)) &
112 						XGMAC_PSRQ(i));
113 
114 	ctrl2 &= ~clear_mask;
115 	ctrl3 &= ~clear_mask;
116 
117 	/* First assign new priorities to a queue, then
118 	 * clear them from others queues
119 	 */
120 	if (queue < 4) {
121 		ctrl2 |= (prio << XGMAC_PSRQ_SHIFT(queue)) &
122 						XGMAC_PSRQ(queue);
123 
124 		writel(ctrl2, ioaddr + XGMAC_RXQ_CTRL2);
125 		writel(ctrl3, ioaddr + XGMAC_RXQ_CTRL3);
126 	} else {
127 		queue -= 4;
128 
129 		ctrl3 |= (prio << XGMAC_PSRQ_SHIFT(queue)) &
130 						XGMAC_PSRQ(queue);
131 
132 		writel(ctrl3, ioaddr + XGMAC_RXQ_CTRL3);
133 		writel(ctrl2, ioaddr + XGMAC_RXQ_CTRL2);
134 	}
135 }
136 
dwxgmac2_tx_queue_prio(struct mac_device_info * hw,u32 prio,u32 queue)137 static void dwxgmac2_tx_queue_prio(struct mac_device_info *hw, u32 prio,
138 				   u32 queue)
139 {
140 	void __iomem *ioaddr = hw->pcsr;
141 	u32 value, reg;
142 
143 	reg = (queue < 4) ? XGMAC_TC_PRTY_MAP0 : XGMAC_TC_PRTY_MAP1;
144 	if (queue >= 4)
145 		queue -= 4;
146 
147 	value = readl(ioaddr + reg);
148 	value &= ~XGMAC_PSTC(queue);
149 	value |= (prio << XGMAC_PSTC_SHIFT(queue)) & XGMAC_PSTC(queue);
150 
151 	writel(value, ioaddr + reg);
152 }
153 
dwxgmac2_rx_queue_routing(struct mac_device_info * hw,u8 packet,u32 queue)154 static void dwxgmac2_rx_queue_routing(struct mac_device_info *hw,
155 				      u8 packet, u32 queue)
156 {
157 	void __iomem *ioaddr = hw->pcsr;
158 	u32 value;
159 
160 	static const struct stmmac_rx_routing dwxgmac2_route_possibilities[] = {
161 		{ XGMAC_AVCPQ, XGMAC_AVCPQ_SHIFT },
162 		{ XGMAC_PTPQ, XGMAC_PTPQ_SHIFT },
163 		{ XGMAC_DCBCPQ, XGMAC_DCBCPQ_SHIFT },
164 		{ XGMAC_UPQ, XGMAC_UPQ_SHIFT },
165 		{ XGMAC_MCBCQ, XGMAC_MCBCQ_SHIFT },
166 	};
167 
168 	value = readl(ioaddr + XGMAC_RXQ_CTRL1);
169 
170 	/* routing configuration */
171 	value &= ~dwxgmac2_route_possibilities[packet - 1].reg_mask;
172 	value |= (queue << dwxgmac2_route_possibilities[packet - 1].reg_shift) &
173 		 dwxgmac2_route_possibilities[packet - 1].reg_mask;
174 
175 	/* some packets require extra ops */
176 	if (packet == PACKET_AVCPQ)
177 		value |= FIELD_PREP(XGMAC_TACPQE, 1);
178 	else if (packet == PACKET_MCBCQ)
179 		value |= FIELD_PREP(XGMAC_MCBCQEN, 1);
180 
181 	writel(value, ioaddr + XGMAC_RXQ_CTRL1);
182 }
183 
dwxgmac2_prog_mtl_rx_algorithms(struct mac_device_info * hw,u32 rx_alg)184 static void dwxgmac2_prog_mtl_rx_algorithms(struct mac_device_info *hw,
185 					    u32 rx_alg)
186 {
187 	void __iomem *ioaddr = hw->pcsr;
188 	u32 value;
189 
190 	value = readl(ioaddr + XGMAC_MTL_OPMODE);
191 	value &= ~XGMAC_RAA;
192 
193 	switch (rx_alg) {
194 	case MTL_RX_ALGORITHM_SP:
195 		break;
196 	case MTL_RX_ALGORITHM_WSP:
197 		value |= XGMAC_RAA;
198 		break;
199 	default:
200 		break;
201 	}
202 
203 	writel(value, ioaddr + XGMAC_MTL_OPMODE);
204 }
205 
dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info * hw,u32 tx_alg)206 static void dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info *hw,
207 					    u32 tx_alg)
208 {
209 	void __iomem *ioaddr = hw->pcsr;
210 	bool ets = true;
211 	u32 value;
212 	int i;
213 
214 	value = readl(ioaddr + XGMAC_MTL_OPMODE);
215 	value &= ~XGMAC_ETSALG;
216 
217 	switch (tx_alg) {
218 	case MTL_TX_ALGORITHM_WRR:
219 		value |= XGMAC_WRR;
220 		break;
221 	case MTL_TX_ALGORITHM_WFQ:
222 		value |= XGMAC_WFQ;
223 		break;
224 	case MTL_TX_ALGORITHM_DWRR:
225 		value |= XGMAC_DWRR;
226 		break;
227 	default:
228 		ets = false;
229 		break;
230 	}
231 
232 	writel(value, ioaddr + XGMAC_MTL_OPMODE);
233 
234 	/* Set ETS if desired */
235 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
236 		value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
237 		value &= ~XGMAC_TSA;
238 		if (ets)
239 			value |= XGMAC_ETS;
240 		writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
241 	}
242 }
243 
dwxgmac2_set_mtl_tx_queue_weight(struct stmmac_priv * priv,struct mac_device_info * hw,u32 weight,u32 queue)244 static void dwxgmac2_set_mtl_tx_queue_weight(struct stmmac_priv *priv,
245 					     struct mac_device_info *hw,
246 					     u32 weight, u32 queue)
247 {
248 	void __iomem *ioaddr = hw->pcsr;
249 
250 	writel(weight, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
251 }
252 
dwxgmac2_map_mtl_to_dma(struct mac_device_info * hw,u32 queue,u32 chan)253 static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue,
254 				    u32 chan)
255 {
256 	void __iomem *ioaddr = hw->pcsr;
257 	u32 value, reg;
258 
259 	reg = (queue < 4) ? XGMAC_MTL_RXQ_DMA_MAP0 : XGMAC_MTL_RXQ_DMA_MAP1;
260 	if (queue >= 4)
261 		queue -= 4;
262 
263 	value = readl(ioaddr + reg);
264 	value &= ~XGMAC_QxMDMACH(queue);
265 	value |= (chan << XGMAC_QxMDMACH_SHIFT(queue)) & XGMAC_QxMDMACH(queue);
266 
267 	writel(value, ioaddr + reg);
268 }
269 
dwxgmac2_config_cbs(struct stmmac_priv * priv,struct mac_device_info * hw,u32 send_slope,u32 idle_slope,u32 high_credit,u32 low_credit,u32 queue)270 static void dwxgmac2_config_cbs(struct stmmac_priv *priv,
271 				struct mac_device_info *hw,
272 				u32 send_slope, u32 idle_slope,
273 				u32 high_credit, u32 low_credit, u32 queue)
274 {
275 	void __iomem *ioaddr = hw->pcsr;
276 	u32 value;
277 
278 	writel(send_slope, ioaddr + XGMAC_MTL_TCx_SENDSLOPE(queue));
279 	writel(idle_slope, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
280 	writel(high_credit, ioaddr + XGMAC_MTL_TCx_HICREDIT(queue));
281 	writel(low_credit, ioaddr + XGMAC_MTL_TCx_LOCREDIT(queue));
282 
283 	value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
284 	value &= ~XGMAC_TSA;
285 	value |= XGMAC_CC | XGMAC_CBS;
286 	writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
287 }
288 
dwxgmac2_dump_regs(struct mac_device_info * hw,u32 * reg_space)289 static void dwxgmac2_dump_regs(struct mac_device_info *hw, u32 *reg_space)
290 {
291 	void __iomem *ioaddr = hw->pcsr;
292 	int i;
293 
294 	for (i = 0; i < XGMAC_MAC_REGSIZE; i++)
295 		reg_space[i] = readl(ioaddr + i * 4);
296 }
297 
dwxgmac2_host_irq_status(struct mac_device_info * hw,struct stmmac_extra_stats * x)298 static int dwxgmac2_host_irq_status(struct mac_device_info *hw,
299 				    struct stmmac_extra_stats *x)
300 {
301 	void __iomem *ioaddr = hw->pcsr;
302 	u32 stat, en;
303 	int ret = 0;
304 
305 	en = readl(ioaddr + XGMAC_INT_EN);
306 	stat = readl(ioaddr + XGMAC_INT_STATUS);
307 
308 	stat &= en;
309 
310 	if (stat & XGMAC_PMTIS) {
311 		x->irq_receive_pmt_irq_n++;
312 		readl(ioaddr + XGMAC_PMT);
313 	}
314 
315 	if (stat & XGMAC_LPIIS) {
316 		u32 lpi = readl(ioaddr + XGMAC_LPI_CTRL);
317 
318 		if (lpi & XGMAC_TLPIEN) {
319 			ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
320 			x->irq_tx_path_in_lpi_mode_n++;
321 		}
322 		if (lpi & XGMAC_TLPIEX) {
323 			ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
324 			x->irq_tx_path_exit_lpi_mode_n++;
325 		}
326 		if (lpi & XGMAC_RLPIEN)
327 			x->irq_rx_path_in_lpi_mode_n++;
328 		if (lpi & XGMAC_RLPIEX)
329 			x->irq_rx_path_exit_lpi_mode_n++;
330 	}
331 
332 	return ret;
333 }
334 
dwxgmac2_host_mtl_irq_status(struct stmmac_priv * priv,struct mac_device_info * hw,u32 chan)335 static int dwxgmac2_host_mtl_irq_status(struct stmmac_priv *priv,
336 					struct mac_device_info *hw, u32 chan)
337 {
338 	void __iomem *ioaddr = hw->pcsr;
339 	int ret = 0;
340 	u32 status;
341 
342 	status = readl(ioaddr + XGMAC_MTL_INT_STATUS);
343 	if (status & BIT(chan)) {
344 		u32 chan_status = readl(ioaddr + XGMAC_MTL_QINT_STATUS(chan));
345 
346 		if (chan_status & XGMAC_RXOVFIS)
347 			ret |= CORE_IRQ_MTL_RX_OVERFLOW;
348 
349 		writel(~0x0, ioaddr + XGMAC_MTL_QINT_STATUS(chan));
350 	}
351 
352 	return ret;
353 }
354 
dwxgmac2_flow_ctrl(struct mac_device_info * hw,unsigned int duplex,unsigned int fc,unsigned int pause_time,u32 tx_cnt)355 static void dwxgmac2_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
356 			       unsigned int fc, unsigned int pause_time,
357 			       u32 tx_cnt)
358 {
359 	void __iomem *ioaddr = hw->pcsr;
360 	u32 i;
361 
362 	if (fc & FLOW_RX)
363 		writel(XGMAC_RFE, ioaddr + XGMAC_RX_FLOW_CTRL);
364 	if (fc & FLOW_TX) {
365 		for (i = 0; i < tx_cnt; i++) {
366 			u32 value = XGMAC_TFE;
367 
368 			if (duplex)
369 				value |= pause_time << XGMAC_PT_SHIFT;
370 
371 			writel(value, ioaddr + XGMAC_Qx_TX_FLOW_CTRL(i));
372 		}
373 	}
374 }
375 
dwxgmac2_pmt(struct mac_device_info * hw,unsigned long mode)376 static void dwxgmac2_pmt(struct mac_device_info *hw, unsigned long mode)
377 {
378 	void __iomem *ioaddr = hw->pcsr;
379 	u32 val = 0x0;
380 
381 	if (mode & WAKE_MAGIC)
382 		val |= XGMAC_PWRDWN | XGMAC_MGKPKTEN;
383 	if (mode & WAKE_UCAST)
384 		val |= XGMAC_PWRDWN | XGMAC_GLBLUCAST | XGMAC_RWKPKTEN;
385 	if (val) {
386 		u32 cfg = readl(ioaddr + XGMAC_RX_CONFIG);
387 		cfg |= XGMAC_CONFIG_RE;
388 		writel(cfg, ioaddr + XGMAC_RX_CONFIG);
389 	}
390 
391 	writel(val, ioaddr + XGMAC_PMT);
392 }
393 
dwxgmac2_set_umac_addr(struct mac_device_info * hw,const unsigned char * addr,unsigned int reg_n)394 static void dwxgmac2_set_umac_addr(struct mac_device_info *hw,
395 				   const unsigned char *addr,
396 				   unsigned int reg_n)
397 {
398 	void __iomem *ioaddr = hw->pcsr;
399 	u32 value;
400 
401 	value = (addr[5] << 8) | addr[4];
402 	writel(value | XGMAC_AE, ioaddr + XGMAC_ADDRx_HIGH(reg_n));
403 
404 	value = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
405 	writel(value, ioaddr + XGMAC_ADDRx_LOW(reg_n));
406 }
407 
dwxgmac2_get_umac_addr(struct mac_device_info * hw,unsigned char * addr,unsigned int reg_n)408 static void dwxgmac2_get_umac_addr(struct mac_device_info *hw,
409 				   unsigned char *addr, unsigned int reg_n)
410 {
411 	void __iomem *ioaddr = hw->pcsr;
412 	u32 hi_addr, lo_addr;
413 
414 	/* Read the MAC address from the hardware */
415 	hi_addr = readl(ioaddr + XGMAC_ADDRx_HIGH(reg_n));
416 	lo_addr = readl(ioaddr + XGMAC_ADDRx_LOW(reg_n));
417 
418 	/* Extract the MAC address from the high and low words */
419 	addr[0] = lo_addr & 0xff;
420 	addr[1] = (lo_addr >> 8) & 0xff;
421 	addr[2] = (lo_addr >> 16) & 0xff;
422 	addr[3] = (lo_addr >> 24) & 0xff;
423 	addr[4] = hi_addr & 0xff;
424 	addr[5] = (hi_addr >> 8) & 0xff;
425 }
426 
dwxgmac2_set_eee_mode(struct mac_device_info * hw,bool en_tx_lpi_clockgating)427 static void dwxgmac2_set_eee_mode(struct mac_device_info *hw,
428 				  bool en_tx_lpi_clockgating)
429 {
430 	void __iomem *ioaddr = hw->pcsr;
431 	u32 value;
432 
433 	value = readl(ioaddr + XGMAC_LPI_CTRL);
434 
435 	value |= XGMAC_LPITXEN | XGMAC_LPITXA;
436 	if (en_tx_lpi_clockgating)
437 		value |= XGMAC_TXCGE;
438 
439 	writel(value, ioaddr + XGMAC_LPI_CTRL);
440 }
441 
dwxgmac2_reset_eee_mode(struct mac_device_info * hw)442 static void dwxgmac2_reset_eee_mode(struct mac_device_info *hw)
443 {
444 	void __iomem *ioaddr = hw->pcsr;
445 	u32 value;
446 
447 	value = readl(ioaddr + XGMAC_LPI_CTRL);
448 	value &= ~(XGMAC_LPITXEN | XGMAC_LPITXA | XGMAC_TXCGE);
449 	writel(value, ioaddr + XGMAC_LPI_CTRL);
450 }
451 
dwxgmac2_set_eee_pls(struct mac_device_info * hw,int link)452 static void dwxgmac2_set_eee_pls(struct mac_device_info *hw, int link)
453 {
454 	void __iomem *ioaddr = hw->pcsr;
455 	u32 value;
456 
457 	value = readl(ioaddr + XGMAC_LPI_CTRL);
458 	if (link)
459 		value |= XGMAC_PLS;
460 	else
461 		value &= ~XGMAC_PLS;
462 	writel(value, ioaddr + XGMAC_LPI_CTRL);
463 }
464 
dwxgmac2_set_eee_timer(struct mac_device_info * hw,int ls,int tw)465 static void dwxgmac2_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
466 {
467 	void __iomem *ioaddr = hw->pcsr;
468 	u32 value;
469 
470 	value = (tw & 0xffff) | ((ls & 0x3ff) << 16);
471 	writel(value, ioaddr + XGMAC_LPI_TIMER_CTRL);
472 }
473 
dwxgmac2_set_mchash(void __iomem * ioaddr,u32 * mcfilterbits,int mcbitslog2)474 static void dwxgmac2_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits,
475 				int mcbitslog2)
476 {
477 	int numhashregs, regs;
478 
479 	switch (mcbitslog2) {
480 	case 6:
481 		numhashregs = 2;
482 		break;
483 	case 7:
484 		numhashregs = 4;
485 		break;
486 	case 8:
487 		numhashregs = 8;
488 		break;
489 	default:
490 		return;
491 	}
492 
493 	for (regs = 0; regs < numhashregs; regs++)
494 		writel(mcfilterbits[regs], ioaddr + XGMAC_HASH_TABLE(regs));
495 }
496 
dwxgmac2_set_filter(struct mac_device_info * hw,struct net_device * dev)497 static void dwxgmac2_set_filter(struct mac_device_info *hw,
498 				struct net_device *dev)
499 {
500 	void __iomem *ioaddr = (void __iomem *)dev->base_addr;
501 	u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
502 	int mcbitslog2 = hw->mcast_bits_log2;
503 	u32 mc_filter[8];
504 	int i;
505 
506 	value &= ~(XGMAC_FILTER_PR | XGMAC_FILTER_HMC | XGMAC_FILTER_PM);
507 	value |= XGMAC_FILTER_HPF;
508 
509 	memset(mc_filter, 0, sizeof(mc_filter));
510 
511 	if (dev->flags & IFF_PROMISC) {
512 		value |= XGMAC_FILTER_PR;
513 		value |= XGMAC_FILTER_PCF;
514 	} else if ((dev->flags & IFF_ALLMULTI) ||
515 		   (netdev_mc_count(dev) > hw->multicast_filter_bins)) {
516 		value |= XGMAC_FILTER_PM;
517 
518 		for (i = 0; i < XGMAC_MAX_HASH_TABLE; i++)
519 			writel(~0x0, ioaddr + XGMAC_HASH_TABLE(i));
520 	} else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) {
521 		struct netdev_hw_addr *ha;
522 
523 		value |= XGMAC_FILTER_HMC;
524 
525 		netdev_for_each_mc_addr(ha, dev) {
526 			u32 nr = (bitrev32(~crc32_le(~0, ha->addr, 6)) >>
527 					(32 - mcbitslog2));
528 			mc_filter[nr >> 5] |= (1 << (nr & 0x1F));
529 		}
530 	}
531 
532 	dwxgmac2_set_mchash(ioaddr, mc_filter, mcbitslog2);
533 
534 	/* Handle multiple unicast addresses */
535 	if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
536 		value |= XGMAC_FILTER_PR;
537 	} else {
538 		struct netdev_hw_addr *ha;
539 		int reg = 1;
540 
541 		netdev_for_each_uc_addr(ha, dev) {
542 			dwxgmac2_set_umac_addr(hw, ha->addr, reg);
543 			reg++;
544 		}
545 
546 		for ( ; reg < XGMAC_ADDR_MAX; reg++) {
547 			writel(0, ioaddr + XGMAC_ADDRx_HIGH(reg));
548 			writel(0, ioaddr + XGMAC_ADDRx_LOW(reg));
549 		}
550 	}
551 
552 	writel(value, ioaddr + XGMAC_PACKET_FILTER);
553 }
554 
dwxgmac2_set_mac_loopback(void __iomem * ioaddr,bool enable)555 static void dwxgmac2_set_mac_loopback(void __iomem *ioaddr, bool enable)
556 {
557 	u32 value = readl(ioaddr + XGMAC_RX_CONFIG);
558 
559 	if (enable)
560 		value |= XGMAC_CONFIG_LM;
561 	else
562 		value &= ~XGMAC_CONFIG_LM;
563 
564 	writel(value, ioaddr + XGMAC_RX_CONFIG);
565 }
566 
dwxgmac2_rss_write_reg(void __iomem * ioaddr,bool is_key,int idx,u32 val)567 static int dwxgmac2_rss_write_reg(void __iomem *ioaddr, bool is_key, int idx,
568 				  u32 val)
569 {
570 	u32 ctrl = 0;
571 
572 	writel(val, ioaddr + XGMAC_RSS_DATA);
573 	ctrl |= idx << XGMAC_RSSIA_SHIFT;
574 	ctrl |= is_key ? XGMAC_ADDRT : 0x0;
575 	ctrl |= XGMAC_OB;
576 	writel(ctrl, ioaddr + XGMAC_RSS_ADDR);
577 
578 	return readl_poll_timeout(ioaddr + XGMAC_RSS_ADDR, ctrl,
579 				  !(ctrl & XGMAC_OB), 100, 10000);
580 }
581 
dwxgmac2_rss_configure(struct mac_device_info * hw,struct stmmac_rss * cfg,u32 num_rxq)582 static int dwxgmac2_rss_configure(struct mac_device_info *hw,
583 				  struct stmmac_rss *cfg, u32 num_rxq)
584 {
585 	void __iomem *ioaddr = hw->pcsr;
586 	u32 value, *key;
587 	int i, ret;
588 
589 	value = readl(ioaddr + XGMAC_RSS_CTRL);
590 	if (!cfg || !cfg->enable) {
591 		value &= ~XGMAC_RSSE;
592 		writel(value, ioaddr + XGMAC_RSS_CTRL);
593 		return 0;
594 	}
595 
596 	key = (u32 *)cfg->key;
597 	for (i = 0; i < (ARRAY_SIZE(cfg->key) / sizeof(u32)); i++) {
598 		ret = dwxgmac2_rss_write_reg(ioaddr, true, i, key[i]);
599 		if (ret)
600 			return ret;
601 	}
602 
603 	for (i = 0; i < ARRAY_SIZE(cfg->table); i++) {
604 		ret = dwxgmac2_rss_write_reg(ioaddr, false, i, cfg->table[i]);
605 		if (ret)
606 			return ret;
607 	}
608 
609 	for (i = 0; i < num_rxq; i++)
610 		dwxgmac2_map_mtl_to_dma(hw, i, XGMAC_QDDMACH);
611 
612 	value |= XGMAC_UDP4TE | XGMAC_TCP4TE | XGMAC_IP2TE | XGMAC_RSSE;
613 	writel(value, ioaddr + XGMAC_RSS_CTRL);
614 	return 0;
615 }
616 
dwxgmac2_update_vlan_hash(struct mac_device_info * hw,u32 hash,u16 perfect_match,bool is_double)617 static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
618 				      u16 perfect_match, bool is_double)
619 {
620 	void __iomem *ioaddr = hw->pcsr;
621 
622 	writel(hash, ioaddr + XGMAC_VLAN_HASH_TABLE);
623 
624 	if (hash) {
625 		u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
626 
627 		value |= XGMAC_FILTER_VTFE;
628 
629 		writel(value, ioaddr + XGMAC_PACKET_FILTER);
630 
631 		value = readl(ioaddr + XGMAC_VLAN_TAG);
632 
633 		value |= XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV;
634 		if (is_double) {
635 			value |= XGMAC_VLAN_EDVLP;
636 			value |= XGMAC_VLAN_ESVL;
637 			value |= XGMAC_VLAN_DOVLTC;
638 		} else {
639 			value &= ~XGMAC_VLAN_EDVLP;
640 			value &= ~XGMAC_VLAN_ESVL;
641 			value &= ~XGMAC_VLAN_DOVLTC;
642 		}
643 
644 		value &= ~XGMAC_VLAN_VID;
645 		writel(value, ioaddr + XGMAC_VLAN_TAG);
646 	} else if (perfect_match) {
647 		u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
648 
649 		value |= XGMAC_FILTER_VTFE;
650 
651 		writel(value, ioaddr + XGMAC_PACKET_FILTER);
652 
653 		value = readl(ioaddr + XGMAC_VLAN_TAG);
654 
655 		value &= ~XGMAC_VLAN_VTHM;
656 		value |= XGMAC_VLAN_ETV;
657 		if (is_double) {
658 			value |= XGMAC_VLAN_EDVLP;
659 			value |= XGMAC_VLAN_ESVL;
660 			value |= XGMAC_VLAN_DOVLTC;
661 		} else {
662 			value &= ~XGMAC_VLAN_EDVLP;
663 			value &= ~XGMAC_VLAN_ESVL;
664 			value &= ~XGMAC_VLAN_DOVLTC;
665 		}
666 
667 		value &= ~XGMAC_VLAN_VID;
668 		writel(value | perfect_match, ioaddr + XGMAC_VLAN_TAG);
669 	} else {
670 		u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
671 
672 		value &= ~XGMAC_FILTER_VTFE;
673 
674 		writel(value, ioaddr + XGMAC_PACKET_FILTER);
675 
676 		value = readl(ioaddr + XGMAC_VLAN_TAG);
677 
678 		value &= ~(XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV);
679 		value &= ~(XGMAC_VLAN_EDVLP | XGMAC_VLAN_ESVL);
680 		value &= ~XGMAC_VLAN_DOVLTC;
681 		value &= ~XGMAC_VLAN_VID;
682 
683 		writel(value, ioaddr + XGMAC_VLAN_TAG);
684 	}
685 }
686 
687 struct dwxgmac3_error_desc {
688 	bool valid;
689 	const char *desc;
690 	const char *detailed_desc;
691 };
692 
693 #define STAT_OFF(field)		offsetof(struct stmmac_safety_stats, field)
694 
dwxgmac3_log_error(struct net_device * ndev,u32 value,bool corr,const char * module_name,const struct dwxgmac3_error_desc * desc,unsigned long field_offset,struct stmmac_safety_stats * stats)695 static void dwxgmac3_log_error(struct net_device *ndev, u32 value, bool corr,
696 			       const char *module_name,
697 			       const struct dwxgmac3_error_desc *desc,
698 			       unsigned long field_offset,
699 			       struct stmmac_safety_stats *stats)
700 {
701 	unsigned long loc, mask;
702 	u8 *bptr = (u8 *)stats;
703 	unsigned long *ptr;
704 
705 	ptr = (unsigned long *)(bptr + field_offset);
706 
707 	mask = value;
708 	for_each_set_bit(loc, &mask, 32) {
709 		netdev_err(ndev, "Found %s error in %s: '%s: %s'\n", corr ?
710 				"correctable" : "uncorrectable", module_name,
711 				desc[loc].desc, desc[loc].detailed_desc);
712 
713 		/* Update counters */
714 		ptr[loc]++;
715 	}
716 }
717 
718 static const struct dwxgmac3_error_desc dwxgmac3_mac_errors[32]= {
719 	{ true, "ATPES", "Application Transmit Interface Parity Check Error" },
720 	{ true, "DPES", "Descriptor Cache Data Path Parity Check Error" },
721 	{ true, "TPES", "TSO Data Path Parity Check Error" },
722 	{ true, "TSOPES", "TSO Header Data Path Parity Check Error" },
723 	{ true, "MTPES", "MTL Data Path Parity Check Error" },
724 	{ true, "MTSPES", "MTL TX Status Data Path Parity Check Error" },
725 	{ true, "MTBUPES", "MAC TBU Data Path Parity Check Error" },
726 	{ true, "MTFCPES", "MAC TFC Data Path Parity Check Error" },
727 	{ true, "ARPES", "Application Receive Interface Data Path Parity Check Error" },
728 	{ true, "MRWCPES", "MTL RWC Data Path Parity Check Error" },
729 	{ true, "MRRCPES", "MTL RCC Data Path Parity Check Error" },
730 	{ true, "CWPES", "CSR Write Data Path Parity Check Error" },
731 	{ true, "ASRPES", "AXI Slave Read Data Path Parity Check Error" },
732 	{ true, "TTES", "TX FSM Timeout Error" },
733 	{ true, "RTES", "RX FSM Timeout Error" },
734 	{ true, "CTES", "CSR FSM Timeout Error" },
735 	{ true, "ATES", "APP FSM Timeout Error" },
736 	{ true, "PTES", "PTP FSM Timeout Error" },
737 	{ false, "UNKNOWN", "Unknown Error" }, /* 18 */
738 	{ false, "UNKNOWN", "Unknown Error" }, /* 19 */
739 	{ false, "UNKNOWN", "Unknown Error" }, /* 20 */
740 	{ true, "MSTTES", "Master Read/Write Timeout Error" },
741 	{ true, "SLVTES", "Slave Read/Write Timeout Error" },
742 	{ true, "ATITES", "Application Timeout on ATI Interface Error" },
743 	{ true, "ARITES", "Application Timeout on ARI Interface Error" },
744 	{ true, "FSMPES", "FSM State Parity Error" },
745 	{ false, "UNKNOWN", "Unknown Error" }, /* 26 */
746 	{ false, "UNKNOWN", "Unknown Error" }, /* 27 */
747 	{ false, "UNKNOWN", "Unknown Error" }, /* 28 */
748 	{ false, "UNKNOWN", "Unknown Error" }, /* 29 */
749 	{ false, "UNKNOWN", "Unknown Error" }, /* 30 */
750 	{ true, "CPI", "Control Register Parity Check Error" },
751 };
752 
dwxgmac3_handle_mac_err(struct net_device * ndev,void __iomem * ioaddr,bool correctable,struct stmmac_safety_stats * stats)753 static void dwxgmac3_handle_mac_err(struct net_device *ndev,
754 				    void __iomem *ioaddr, bool correctable,
755 				    struct stmmac_safety_stats *stats)
756 {
757 	u32 value;
758 
759 	value = readl(ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
760 	writel(value, ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
761 
762 	dwxgmac3_log_error(ndev, value, correctable, "MAC",
763 			   dwxgmac3_mac_errors, STAT_OFF(mac_errors), stats);
764 }
765 
766 static const struct dwxgmac3_error_desc dwxgmac3_mtl_errors[32]= {
767 	{ true, "TXCES", "MTL TX Memory Error" },
768 	{ true, "TXAMS", "MTL TX Memory Address Mismatch Error" },
769 	{ true, "TXUES", "MTL TX Memory Error" },
770 	{ false, "UNKNOWN", "Unknown Error" }, /* 3 */
771 	{ true, "RXCES", "MTL RX Memory Error" },
772 	{ true, "RXAMS", "MTL RX Memory Address Mismatch Error" },
773 	{ true, "RXUES", "MTL RX Memory Error" },
774 	{ false, "UNKNOWN", "Unknown Error" }, /* 7 */
775 	{ true, "ECES", "MTL EST Memory Error" },
776 	{ true, "EAMS", "MTL EST Memory Address Mismatch Error" },
777 	{ true, "EUES", "MTL EST Memory Error" },
778 	{ false, "UNKNOWN", "Unknown Error" }, /* 11 */
779 	{ true, "RPCES", "MTL RX Parser Memory Error" },
780 	{ true, "RPAMS", "MTL RX Parser Memory Address Mismatch Error" },
781 	{ true, "RPUES", "MTL RX Parser Memory Error" },
782 	{ false, "UNKNOWN", "Unknown Error" }, /* 15 */
783 	{ false, "UNKNOWN", "Unknown Error" }, /* 16 */
784 	{ false, "UNKNOWN", "Unknown Error" }, /* 17 */
785 	{ false, "UNKNOWN", "Unknown Error" }, /* 18 */
786 	{ false, "UNKNOWN", "Unknown Error" }, /* 19 */
787 	{ false, "UNKNOWN", "Unknown Error" }, /* 20 */
788 	{ false, "UNKNOWN", "Unknown Error" }, /* 21 */
789 	{ false, "UNKNOWN", "Unknown Error" }, /* 22 */
790 	{ false, "UNKNOWN", "Unknown Error" }, /* 23 */
791 	{ false, "UNKNOWN", "Unknown Error" }, /* 24 */
792 	{ false, "UNKNOWN", "Unknown Error" }, /* 25 */
793 	{ false, "UNKNOWN", "Unknown Error" }, /* 26 */
794 	{ false, "UNKNOWN", "Unknown Error" }, /* 27 */
795 	{ false, "UNKNOWN", "Unknown Error" }, /* 28 */
796 	{ false, "UNKNOWN", "Unknown Error" }, /* 29 */
797 	{ false, "UNKNOWN", "Unknown Error" }, /* 30 */
798 	{ false, "UNKNOWN", "Unknown Error" }, /* 31 */
799 };
800 
dwxgmac3_handle_mtl_err(struct net_device * ndev,void __iomem * ioaddr,bool correctable,struct stmmac_safety_stats * stats)801 static void dwxgmac3_handle_mtl_err(struct net_device *ndev,
802 				    void __iomem *ioaddr, bool correctable,
803 				    struct stmmac_safety_stats *stats)
804 {
805 	u32 value;
806 
807 	value = readl(ioaddr + XGMAC_MTL_ECC_INT_STATUS);
808 	writel(value, ioaddr + XGMAC_MTL_ECC_INT_STATUS);
809 
810 	dwxgmac3_log_error(ndev, value, correctable, "MTL",
811 			   dwxgmac3_mtl_errors, STAT_OFF(mtl_errors), stats);
812 }
813 
814 static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= {
815 	{ true, "TCES", "DMA TSO Memory Error" },
816 	{ true, "TAMS", "DMA TSO Memory Address Mismatch Error" },
817 	{ true, "TUES", "DMA TSO Memory Error" },
818 	{ false, "UNKNOWN", "Unknown Error" }, /* 3 */
819 	{ true, "DCES", "DMA DCACHE Memory Error" },
820 	{ true, "DAMS", "DMA DCACHE Address Mismatch Error" },
821 	{ true, "DUES", "DMA DCACHE Memory Error" },
822 	{ false, "UNKNOWN", "Unknown Error" }, /* 7 */
823 	{ false, "UNKNOWN", "Unknown Error" }, /* 8 */
824 	{ false, "UNKNOWN", "Unknown Error" }, /* 9 */
825 	{ false, "UNKNOWN", "Unknown Error" }, /* 10 */
826 	{ false, "UNKNOWN", "Unknown Error" }, /* 11 */
827 	{ false, "UNKNOWN", "Unknown Error" }, /* 12 */
828 	{ false, "UNKNOWN", "Unknown Error" }, /* 13 */
829 	{ false, "UNKNOWN", "Unknown Error" }, /* 14 */
830 	{ false, "UNKNOWN", "Unknown Error" }, /* 15 */
831 	{ false, "UNKNOWN", "Unknown Error" }, /* 16 */
832 	{ false, "UNKNOWN", "Unknown Error" }, /* 17 */
833 	{ false, "UNKNOWN", "Unknown Error" }, /* 18 */
834 	{ false, "UNKNOWN", "Unknown Error" }, /* 19 */
835 	{ false, "UNKNOWN", "Unknown Error" }, /* 20 */
836 	{ false, "UNKNOWN", "Unknown Error" }, /* 21 */
837 	{ false, "UNKNOWN", "Unknown Error" }, /* 22 */
838 	{ false, "UNKNOWN", "Unknown Error" }, /* 23 */
839 	{ false, "UNKNOWN", "Unknown Error" }, /* 24 */
840 	{ false, "UNKNOWN", "Unknown Error" }, /* 25 */
841 	{ false, "UNKNOWN", "Unknown Error" }, /* 26 */
842 	{ false, "UNKNOWN", "Unknown Error" }, /* 27 */
843 	{ false, "UNKNOWN", "Unknown Error" }, /* 28 */
844 	{ false, "UNKNOWN", "Unknown Error" }, /* 29 */
845 	{ false, "UNKNOWN", "Unknown Error" }, /* 30 */
846 	{ false, "UNKNOWN", "Unknown Error" }, /* 31 */
847 };
848 
849 static const char dpp_rx_err[] = "Read Rx Descriptor Parity checker Error";
850 static const char dpp_tx_err[] = "Read Tx Descriptor Parity checker Error";
851 static const struct dwxgmac3_error_desc dwxgmac3_dma_dpp_errors[32] = {
852 	{ true, "TDPES0", dpp_tx_err },
853 	{ true, "TDPES1", dpp_tx_err },
854 	{ true, "TDPES2", dpp_tx_err },
855 	{ true, "TDPES3", dpp_tx_err },
856 	{ true, "TDPES4", dpp_tx_err },
857 	{ true, "TDPES5", dpp_tx_err },
858 	{ true, "TDPES6", dpp_tx_err },
859 	{ true, "TDPES7", dpp_tx_err },
860 	{ true, "TDPES8", dpp_tx_err },
861 	{ true, "TDPES9", dpp_tx_err },
862 	{ true, "TDPES10", dpp_tx_err },
863 	{ true, "TDPES11", dpp_tx_err },
864 	{ true, "TDPES12", dpp_tx_err },
865 	{ true, "TDPES13", dpp_tx_err },
866 	{ true, "TDPES14", dpp_tx_err },
867 	{ true, "TDPES15", dpp_tx_err },
868 	{ true, "RDPES0", dpp_rx_err },
869 	{ true, "RDPES1", dpp_rx_err },
870 	{ true, "RDPES2", dpp_rx_err },
871 	{ true, "RDPES3", dpp_rx_err },
872 	{ true, "RDPES4", dpp_rx_err },
873 	{ true, "RDPES5", dpp_rx_err },
874 	{ true, "RDPES6", dpp_rx_err },
875 	{ true, "RDPES7", dpp_rx_err },
876 	{ true, "RDPES8", dpp_rx_err },
877 	{ true, "RDPES9", dpp_rx_err },
878 	{ true, "RDPES10", dpp_rx_err },
879 	{ true, "RDPES11", dpp_rx_err },
880 	{ true, "RDPES12", dpp_rx_err },
881 	{ true, "RDPES13", dpp_rx_err },
882 	{ true, "RDPES14", dpp_rx_err },
883 	{ true, "RDPES15", dpp_rx_err },
884 };
885 
dwxgmac3_handle_dma_err(struct net_device * ndev,void __iomem * ioaddr,bool correctable,struct stmmac_safety_stats * stats)886 static void dwxgmac3_handle_dma_err(struct net_device *ndev,
887 				    void __iomem *ioaddr, bool correctable,
888 				    struct stmmac_safety_stats *stats)
889 {
890 	u32 value;
891 
892 	value = readl(ioaddr + XGMAC_DMA_ECC_INT_STATUS);
893 	writel(value, ioaddr + XGMAC_DMA_ECC_INT_STATUS);
894 
895 	dwxgmac3_log_error(ndev, value, correctable, "DMA",
896 			   dwxgmac3_dma_errors, STAT_OFF(dma_errors), stats);
897 
898 	value = readl(ioaddr + XGMAC_DMA_DPP_INT_STATUS);
899 	writel(value, ioaddr + XGMAC_DMA_DPP_INT_STATUS);
900 
901 	dwxgmac3_log_error(ndev, value, false, "DMA_DPP",
902 			   dwxgmac3_dma_dpp_errors,
903 			   STAT_OFF(dma_dpp_errors), stats);
904 }
905 
906 static int
dwxgmac3_safety_feat_config(void __iomem * ioaddr,unsigned int asp,struct stmmac_safety_feature_cfg * safety_cfg)907 dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp,
908 			    struct stmmac_safety_feature_cfg *safety_cfg)
909 {
910 	u32 value;
911 
912 	if (!asp)
913 		return -EINVAL;
914 
915 	/* 1. Enable Safety Features */
916 	writel(0x0, ioaddr + XGMAC_MTL_ECC_CONTROL);
917 
918 	/* 2. Enable MTL Safety Interrupts */
919 	value = readl(ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
920 	value |= XGMAC_RPCEIE; /* RX Parser Memory Correctable Error */
921 	value |= XGMAC_ECEIE; /* EST Memory Correctable Error */
922 	value |= XGMAC_RXCEIE; /* RX Memory Correctable Error */
923 	value |= XGMAC_TXCEIE; /* TX Memory Correctable Error */
924 	writel(value, ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
925 
926 	/* 3. Enable DMA Safety Interrupts */
927 	value = readl(ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
928 	value |= XGMAC_DCEIE; /* Descriptor Cache Memory Correctable Error */
929 	value |= XGMAC_TCEIE; /* TSO Memory Correctable Error */
930 	writel(value, ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
931 
932 	/* 0x2: Without ECC or Parity Ports on External Application Interface
933 	 * 0x4: Only ECC Protection for External Memory feature is selected
934 	 */
935 	if (asp == 0x2 || asp == 0x4)
936 		return 0;
937 
938 	/* 4. Enable Parity and Timeout for FSM */
939 	value = readl(ioaddr + XGMAC_MAC_FSM_CONTROL);
940 	value |= XGMAC_PRTYEN; /* FSM Parity Feature */
941 	value |= XGMAC_TMOUTEN; /* FSM Timeout Feature */
942 	writel(value, ioaddr + XGMAC_MAC_FSM_CONTROL);
943 
944 	/* 5. Enable Data Path Parity Protection */
945 	value = readl(ioaddr + XGMAC_MTL_DPP_CONTROL);
946 	/* already enabled by default, explicit enable it again */
947 	value &= ~XGMAC_DPP_DISABLE;
948 	writel(value, ioaddr + XGMAC_MTL_DPP_CONTROL);
949 
950 	return 0;
951 }
952 
dwxgmac3_safety_feat_irq_status(struct net_device * ndev,void __iomem * ioaddr,unsigned int asp,struct stmmac_safety_stats * stats)953 static int dwxgmac3_safety_feat_irq_status(struct net_device *ndev,
954 					   void __iomem *ioaddr,
955 					   unsigned int asp,
956 					   struct stmmac_safety_stats *stats)
957 {
958 	bool err, corr;
959 	u32 mtl, dma;
960 	int ret = 0;
961 
962 	if (!asp)
963 		return -EINVAL;
964 
965 	mtl = readl(ioaddr + XGMAC_MTL_SAFETY_INT_STATUS);
966 	dma = readl(ioaddr + XGMAC_DMA_SAFETY_INT_STATUS);
967 
968 	err = (mtl & XGMAC_MCSIS) || (dma & XGMAC_MCSIS);
969 	corr = false;
970 	if (err) {
971 		dwxgmac3_handle_mac_err(ndev, ioaddr, corr, stats);
972 		ret |= !corr;
973 	}
974 
975 	err = (mtl & (XGMAC_MEUIS | XGMAC_MECIS)) ||
976 	      (dma & (XGMAC_MSUIS | XGMAC_MSCIS));
977 	corr = (mtl & XGMAC_MECIS) || (dma & XGMAC_MSCIS);
978 	if (err) {
979 		dwxgmac3_handle_mtl_err(ndev, ioaddr, corr, stats);
980 		ret |= !corr;
981 	}
982 
983 	/* DMA_DPP_Interrupt_Status is indicated by MCSIS bit in
984 	 * DMA_Safety_Interrupt_Status, so we handle DMA Data Path
985 	 * Parity Errors here
986 	 */
987 	err = dma & (XGMAC_DEUIS | XGMAC_DECIS | XGMAC_MCSIS);
988 	corr = dma & XGMAC_DECIS;
989 	if (err) {
990 		dwxgmac3_handle_dma_err(ndev, ioaddr, corr, stats);
991 		ret |= !corr;
992 	}
993 
994 	return ret;
995 }
996 
997 static const struct dwxgmac3_error {
998 	const struct dwxgmac3_error_desc *desc;
999 } dwxgmac3_all_errors[] = {
1000 	{ dwxgmac3_mac_errors },
1001 	{ dwxgmac3_mtl_errors },
1002 	{ dwxgmac3_dma_errors },
1003 	{ dwxgmac3_dma_dpp_errors },
1004 };
1005 
dwxgmac3_safety_feat_dump(struct stmmac_safety_stats * stats,int index,unsigned long * count,const char ** desc)1006 static int dwxgmac3_safety_feat_dump(struct stmmac_safety_stats *stats,
1007 				     int index, unsigned long *count,
1008 				     const char **desc)
1009 {
1010 	int module = index / 32, offset = index % 32;
1011 	unsigned long *ptr = (unsigned long *)stats;
1012 
1013 	if (module >= ARRAY_SIZE(dwxgmac3_all_errors))
1014 		return -EINVAL;
1015 	if (!dwxgmac3_all_errors[module].desc[offset].valid)
1016 		return -EINVAL;
1017 	if (count)
1018 		*count = *(ptr + index);
1019 	if (desc)
1020 		*desc = dwxgmac3_all_errors[module].desc[offset].desc;
1021 	return 0;
1022 }
1023 
dwxgmac3_rxp_disable(void __iomem * ioaddr)1024 static int dwxgmac3_rxp_disable(void __iomem *ioaddr)
1025 {
1026 	u32 val = readl(ioaddr + XGMAC_MTL_OPMODE);
1027 
1028 	val &= ~XGMAC_FRPE;
1029 	writel(val, ioaddr + XGMAC_MTL_OPMODE);
1030 
1031 	return 0;
1032 }
1033 
dwxgmac3_rxp_enable(void __iomem * ioaddr)1034 static void dwxgmac3_rxp_enable(void __iomem *ioaddr)
1035 {
1036 	u32 val;
1037 
1038 	val = readl(ioaddr + XGMAC_MTL_OPMODE);
1039 	val |= XGMAC_FRPE;
1040 	writel(val, ioaddr + XGMAC_MTL_OPMODE);
1041 }
1042 
dwxgmac3_rxp_update_single_entry(void __iomem * ioaddr,struct stmmac_tc_entry * entry,int pos)1043 static int dwxgmac3_rxp_update_single_entry(void __iomem *ioaddr,
1044 					    struct stmmac_tc_entry *entry,
1045 					    int pos)
1046 {
1047 	int ret, i;
1048 
1049 	for (i = 0; i < (sizeof(entry->val) / sizeof(u32)); i++) {
1050 		int real_pos = pos * (sizeof(entry->val) / sizeof(u32)) + i;
1051 		u32 val;
1052 
1053 		/* Wait for ready */
1054 		ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
1055 					 val, !(val & XGMAC_STARTBUSY), 1, 10000);
1056 		if (ret)
1057 			return ret;
1058 
1059 		/* Write data */
1060 		val = *((u32 *)&entry->val + i);
1061 		writel(val, ioaddr + XGMAC_MTL_RXP_IACC_DATA);
1062 
1063 		/* Write pos */
1064 		val = real_pos & XGMAC_ADDR;
1065 		writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
1066 
1067 		/* Write OP */
1068 		val |= XGMAC_WRRDN;
1069 		writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
1070 
1071 		/* Start Write */
1072 		val |= XGMAC_STARTBUSY;
1073 		writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
1074 
1075 		/* Wait for done */
1076 		ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
1077 					 val, !(val & XGMAC_STARTBUSY), 1, 10000);
1078 		if (ret)
1079 			return ret;
1080 	}
1081 
1082 	return 0;
1083 }
1084 
1085 static struct stmmac_tc_entry *
dwxgmac3_rxp_get_next_entry(struct stmmac_tc_entry * entries,unsigned int count,u32 curr_prio)1086 dwxgmac3_rxp_get_next_entry(struct stmmac_tc_entry *entries,
1087 			    unsigned int count, u32 curr_prio)
1088 {
1089 	struct stmmac_tc_entry *entry;
1090 	u32 min_prio = ~0x0;
1091 	int i, min_prio_idx;
1092 	bool found = false;
1093 
1094 	for (i = count - 1; i >= 0; i--) {
1095 		entry = &entries[i];
1096 
1097 		/* Do not update unused entries */
1098 		if (!entry->in_use)
1099 			continue;
1100 		/* Do not update already updated entries (i.e. fragments) */
1101 		if (entry->in_hw)
1102 			continue;
1103 		/* Let last entry be updated last */
1104 		if (entry->is_last)
1105 			continue;
1106 		/* Do not return fragments */
1107 		if (entry->is_frag)
1108 			continue;
1109 		/* Check if we already checked this prio */
1110 		if (entry->prio < curr_prio)
1111 			continue;
1112 		/* Check if this is the minimum prio */
1113 		if (entry->prio < min_prio) {
1114 			min_prio = entry->prio;
1115 			min_prio_idx = i;
1116 			found = true;
1117 		}
1118 	}
1119 
1120 	if (found)
1121 		return &entries[min_prio_idx];
1122 	return NULL;
1123 }
1124 
dwxgmac3_rxp_config(void __iomem * ioaddr,struct stmmac_tc_entry * entries,unsigned int count)1125 static int dwxgmac3_rxp_config(void __iomem *ioaddr,
1126 			       struct stmmac_tc_entry *entries,
1127 			       unsigned int count)
1128 {
1129 	struct stmmac_tc_entry *entry, *frag;
1130 	int i, ret, nve = 0;
1131 	u32 curr_prio = 0;
1132 	u32 old_val, val;
1133 
1134 	/* Force disable RX */
1135 	old_val = readl(ioaddr + XGMAC_RX_CONFIG);
1136 	val = old_val & ~XGMAC_CONFIG_RE;
1137 	writel(val, ioaddr + XGMAC_RX_CONFIG);
1138 
1139 	/* Disable RX Parser */
1140 	ret = dwxgmac3_rxp_disable(ioaddr);
1141 	if (ret)
1142 		goto re_enable;
1143 
1144 	/* Set all entries as NOT in HW */
1145 	for (i = 0; i < count; i++) {
1146 		entry = &entries[i];
1147 		entry->in_hw = false;
1148 	}
1149 
1150 	/* Update entries by reverse order */
1151 	while (1) {
1152 		entry = dwxgmac3_rxp_get_next_entry(entries, count, curr_prio);
1153 		if (!entry)
1154 			break;
1155 
1156 		curr_prio = entry->prio;
1157 		frag = entry->frag_ptr;
1158 
1159 		/* Set special fragment requirements */
1160 		if (frag) {
1161 			entry->val.af = 0;
1162 			entry->val.rf = 0;
1163 			entry->val.nc = 1;
1164 			entry->val.ok_index = nve + 2;
1165 		}
1166 
1167 		ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
1168 		if (ret)
1169 			goto re_enable;
1170 
1171 		entry->table_pos = nve++;
1172 		entry->in_hw = true;
1173 
1174 		if (frag && !frag->in_hw) {
1175 			ret = dwxgmac3_rxp_update_single_entry(ioaddr, frag, nve);
1176 			if (ret)
1177 				goto re_enable;
1178 			frag->table_pos = nve++;
1179 			frag->in_hw = true;
1180 		}
1181 	}
1182 
1183 	if (!nve)
1184 		goto re_enable;
1185 
1186 	/* Update all pass entry */
1187 	for (i = 0; i < count; i++) {
1188 		entry = &entries[i];
1189 		if (!entry->is_last)
1190 			continue;
1191 
1192 		ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
1193 		if (ret)
1194 			goto re_enable;
1195 
1196 		entry->table_pos = nve++;
1197 	}
1198 
1199 	/* Assume n. of parsable entries == n. of valid entries */
1200 	val = (nve << 16) & XGMAC_NPE;
1201 	val |= nve & XGMAC_NVE;
1202 	writel(val, ioaddr + XGMAC_MTL_RXP_CONTROL_STATUS);
1203 
1204 	/* Enable RX Parser */
1205 	dwxgmac3_rxp_enable(ioaddr);
1206 
1207 re_enable:
1208 	/* Re-enable RX */
1209 	writel(old_val, ioaddr + XGMAC_RX_CONFIG);
1210 	return ret;
1211 }
1212 
dwxgmac2_get_mac_tx_timestamp(struct mac_device_info * hw,u64 * ts)1213 static int dwxgmac2_get_mac_tx_timestamp(struct mac_device_info *hw, u64 *ts)
1214 {
1215 	void __iomem *ioaddr = hw->pcsr;
1216 	u32 value;
1217 
1218 	if (readl_poll_timeout_atomic(ioaddr + XGMAC_TIMESTAMP_STATUS,
1219 				      value, value & XGMAC_TXTSC, 100, 10000))
1220 		return -EBUSY;
1221 
1222 	*ts = readl(ioaddr + XGMAC_TXTIMESTAMP_NSEC) & XGMAC_TXTSSTSLO;
1223 	*ts += readl(ioaddr + XGMAC_TXTIMESTAMP_SEC) * 1000000000ULL;
1224 	return 0;
1225 }
1226 
dwxgmac2_flex_pps_config(void __iomem * ioaddr,int index,struct stmmac_pps_cfg * cfg,bool enable,u32 sub_second_inc,u32 systime_flags)1227 static int dwxgmac2_flex_pps_config(void __iomem *ioaddr, int index,
1228 				    struct stmmac_pps_cfg *cfg, bool enable,
1229 				    u32 sub_second_inc, u32 systime_flags)
1230 {
1231 	u32 tnsec = readl(ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index));
1232 	u32 val = readl(ioaddr + XGMAC_PPS_CONTROL);
1233 	u64 period;
1234 
1235 	if (!cfg->available)
1236 		return -EINVAL;
1237 	if (tnsec & XGMAC_TRGTBUSY0)
1238 		return -EBUSY;
1239 	if (!sub_second_inc || !systime_flags)
1240 		return -EINVAL;
1241 
1242 	val &= ~XGMAC_PPSx_MASK(index);
1243 
1244 	if (!enable) {
1245 		val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_STOP);
1246 		writel(val, ioaddr + XGMAC_PPS_CONTROL);
1247 		return 0;
1248 	}
1249 
1250 	val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_START);
1251 	val |= XGMAC_TRGTMODSELx(index, XGMAC_PPSCMD_START);
1252 
1253 	/* XGMAC Core has 4 PPS outputs at most.
1254 	 *
1255 	 * Prior XGMAC Core 3.20, Fixed mode or Flexible mode are selectable for
1256 	 * PPS0 only via PPSEN0. PPS{1,2,3} are in Flexible mode by default,
1257 	 * and can not be switched to Fixed mode, since PPSEN{1,2,3} are
1258 	 * read-only reserved to 0.
1259 	 * But we always set PPSEN{1,2,3} do not make things worse ;-)
1260 	 *
1261 	 * From XGMAC Core 3.20 and later, PPSEN{0,1,2,3} are writable and must
1262 	 * be set, or the PPS outputs stay in Fixed PPS mode by default.
1263 	 */
1264 	val |= XGMAC_PPSENx(index);
1265 
1266 	writel(cfg->start.tv_sec, ioaddr + XGMAC_PPSx_TARGET_TIME_SEC(index));
1267 
1268 	if (!(systime_flags & PTP_TCR_TSCTRLSSR))
1269 		cfg->start.tv_nsec = (cfg->start.tv_nsec * 1000) / 465;
1270 	writel(cfg->start.tv_nsec, ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index));
1271 
1272 	period = cfg->period.tv_sec * 1000000000;
1273 	period += cfg->period.tv_nsec;
1274 
1275 	do_div(period, sub_second_inc);
1276 
1277 	if (period <= 1)
1278 		return -EINVAL;
1279 
1280 	writel(period - 1, ioaddr + XGMAC_PPSx_INTERVAL(index));
1281 
1282 	period >>= 1;
1283 	if (period <= 1)
1284 		return -EINVAL;
1285 
1286 	writel(period - 1, ioaddr + XGMAC_PPSx_WIDTH(index));
1287 
1288 	/* Finally, activate it */
1289 	writel(val, ioaddr + XGMAC_PPS_CONTROL);
1290 	return 0;
1291 }
1292 
dwxgmac2_sarc_configure(void __iomem * ioaddr,int val)1293 static void dwxgmac2_sarc_configure(void __iomem *ioaddr, int val)
1294 {
1295 	u32 value = readl(ioaddr + XGMAC_TX_CONFIG);
1296 
1297 	value &= ~XGMAC_CONFIG_SARC;
1298 	value |= val << XGMAC_CONFIG_SARC_SHIFT;
1299 
1300 	writel(value, ioaddr + XGMAC_TX_CONFIG);
1301 }
1302 
dwxgmac2_enable_vlan(struct mac_device_info * hw,u32 type)1303 static void dwxgmac2_enable_vlan(struct mac_device_info *hw, u32 type)
1304 {
1305 	void __iomem *ioaddr = hw->pcsr;
1306 	u32 value;
1307 
1308 	value = readl(ioaddr + XGMAC_VLAN_INCL);
1309 	value |= XGMAC_VLAN_VLTI;
1310 	value |= XGMAC_VLAN_CSVL; /* Only use SVLAN */
1311 	value &= ~XGMAC_VLAN_VLC;
1312 	value |= (type << XGMAC_VLAN_VLC_SHIFT) & XGMAC_VLAN_VLC;
1313 	writel(value, ioaddr + XGMAC_VLAN_INCL);
1314 }
1315 
dwxgmac2_filter_wait(struct mac_device_info * hw)1316 static int dwxgmac2_filter_wait(struct mac_device_info *hw)
1317 {
1318 	void __iomem *ioaddr = hw->pcsr;
1319 	u32 value;
1320 
1321 	if (readl_poll_timeout(ioaddr + XGMAC_L3L4_ADDR_CTRL, value,
1322 			       !(value & XGMAC_XB), 100, 10000))
1323 		return -EBUSY;
1324 	return 0;
1325 }
1326 
dwxgmac2_filter_read(struct mac_device_info * hw,u32 filter_no,u8 reg,u32 * data)1327 static int dwxgmac2_filter_read(struct mac_device_info *hw, u32 filter_no,
1328 				u8 reg, u32 *data)
1329 {
1330 	void __iomem *ioaddr = hw->pcsr;
1331 	u32 value;
1332 	int ret;
1333 
1334 	ret = dwxgmac2_filter_wait(hw);
1335 	if (ret)
1336 		return ret;
1337 
1338 	value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT;
1339 	value |= XGMAC_TT | XGMAC_XB;
1340 	writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL);
1341 
1342 	ret = dwxgmac2_filter_wait(hw);
1343 	if (ret)
1344 		return ret;
1345 
1346 	*data = readl(ioaddr + XGMAC_L3L4_DATA);
1347 	return 0;
1348 }
1349 
dwxgmac2_filter_write(struct mac_device_info * hw,u32 filter_no,u8 reg,u32 data)1350 static int dwxgmac2_filter_write(struct mac_device_info *hw, u32 filter_no,
1351 				 u8 reg, u32 data)
1352 {
1353 	void __iomem *ioaddr = hw->pcsr;
1354 	u32 value;
1355 	int ret;
1356 
1357 	ret = dwxgmac2_filter_wait(hw);
1358 	if (ret)
1359 		return ret;
1360 
1361 	writel(data, ioaddr + XGMAC_L3L4_DATA);
1362 
1363 	value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT;
1364 	value |= XGMAC_XB;
1365 	writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL);
1366 
1367 	return dwxgmac2_filter_wait(hw);
1368 }
1369 
dwxgmac2_config_l3_filter(struct mac_device_info * hw,u32 filter_no,bool en,bool ipv6,bool sa,bool inv,u32 match)1370 static int dwxgmac2_config_l3_filter(struct mac_device_info *hw, u32 filter_no,
1371 				     bool en, bool ipv6, bool sa, bool inv,
1372 				     u32 match)
1373 {
1374 	void __iomem *ioaddr = hw->pcsr;
1375 	u32 value;
1376 	int ret;
1377 
1378 	value = readl(ioaddr + XGMAC_PACKET_FILTER);
1379 	value |= XGMAC_FILTER_IPFE;
1380 	writel(value, ioaddr + XGMAC_PACKET_FILTER);
1381 
1382 	ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L3L4_CTRL, &value);
1383 	if (ret)
1384 		return ret;
1385 
1386 	/* For IPv6 not both SA/DA filters can be active */
1387 	if (ipv6) {
1388 		value |= XGMAC_L3PEN0;
1389 		value &= ~(XGMAC_L3SAM0 | XGMAC_L3SAIM0);
1390 		value &= ~(XGMAC_L3DAM0 | XGMAC_L3DAIM0);
1391 		if (sa) {
1392 			value |= XGMAC_L3SAM0;
1393 			if (inv)
1394 				value |= XGMAC_L3SAIM0;
1395 		} else {
1396 			value |= XGMAC_L3DAM0;
1397 			if (inv)
1398 				value |= XGMAC_L3DAIM0;
1399 		}
1400 	} else {
1401 		value &= ~XGMAC_L3PEN0;
1402 		if (sa) {
1403 			value |= XGMAC_L3SAM0;
1404 			if (inv)
1405 				value |= XGMAC_L3SAIM0;
1406 		} else {
1407 			value |= XGMAC_L3DAM0;
1408 			if (inv)
1409 				value |= XGMAC_L3DAIM0;
1410 		}
1411 	}
1412 
1413 	ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value);
1414 	if (ret)
1415 		return ret;
1416 
1417 	if (sa) {
1418 		ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3_ADDR0, match);
1419 		if (ret)
1420 			return ret;
1421 	} else {
1422 		ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3_ADDR1, match);
1423 		if (ret)
1424 			return ret;
1425 	}
1426 
1427 	if (!en)
1428 		return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0);
1429 
1430 	return 0;
1431 }
1432 
dwxgmac2_config_l4_filter(struct mac_device_info * hw,u32 filter_no,bool en,bool udp,bool sa,bool inv,u32 match)1433 static int dwxgmac2_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
1434 				     bool en, bool udp, bool sa, bool inv,
1435 				     u32 match)
1436 {
1437 	void __iomem *ioaddr = hw->pcsr;
1438 	u32 value;
1439 	int ret;
1440 
1441 	value = readl(ioaddr + XGMAC_PACKET_FILTER);
1442 	value |= XGMAC_FILTER_IPFE;
1443 	writel(value, ioaddr + XGMAC_PACKET_FILTER);
1444 
1445 	ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L3L4_CTRL, &value);
1446 	if (ret)
1447 		return ret;
1448 
1449 	if (udp) {
1450 		value |= XGMAC_L4PEN0;
1451 	} else {
1452 		value &= ~XGMAC_L4PEN0;
1453 	}
1454 
1455 	value &= ~(XGMAC_L4SPM0 | XGMAC_L4SPIM0);
1456 	value &= ~(XGMAC_L4DPM0 | XGMAC_L4DPIM0);
1457 	if (sa) {
1458 		value |= XGMAC_L4SPM0;
1459 		if (inv)
1460 			value |= XGMAC_L4SPIM0;
1461 	} else {
1462 		value |= XGMAC_L4DPM0;
1463 		if (inv)
1464 			value |= XGMAC_L4DPIM0;
1465 	}
1466 
1467 	ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value);
1468 	if (ret)
1469 		return ret;
1470 
1471 	if (sa) {
1472 		value = match & XGMAC_L4SP0;
1473 
1474 		ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value);
1475 		if (ret)
1476 			return ret;
1477 	} else {
1478 		value = (match << XGMAC_L4DP0_SHIFT) & XGMAC_L4DP0;
1479 
1480 		ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value);
1481 		if (ret)
1482 			return ret;
1483 	}
1484 
1485 	if (!en)
1486 		return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0);
1487 
1488 	return 0;
1489 }
1490 
dwxgmac2_set_arp_offload(struct mac_device_info * hw,bool en,u32 addr)1491 static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en,
1492 				     u32 addr)
1493 {
1494 	void __iomem *ioaddr = hw->pcsr;
1495 	u32 value;
1496 
1497 	writel(addr, ioaddr + XGMAC_ARP_ADDR);
1498 
1499 	value = readl(ioaddr + XGMAC_RX_CONFIG);
1500 	if (en)
1501 		value |= XGMAC_CONFIG_ARPEN;
1502 	else
1503 		value &= ~XGMAC_CONFIG_ARPEN;
1504 	writel(value, ioaddr + XGMAC_RX_CONFIG);
1505 }
1506 
dwxgmac3_fpe_configure(void __iomem * ioaddr,struct stmmac_fpe_cfg * cfg,u32 num_txq,u32 num_rxq,bool tx_enable,bool pmac_enable)1507 static void dwxgmac3_fpe_configure(void __iomem *ioaddr,
1508 				   struct stmmac_fpe_cfg *cfg,
1509 				   u32 num_txq, u32 num_rxq,
1510 				   bool tx_enable, bool pmac_enable)
1511 {
1512 	u32 value;
1513 
1514 	if (!tx_enable) {
1515 		value = readl(ioaddr + XGMAC_FPE_CTRL_STS);
1516 
1517 		value &= ~XGMAC_EFPE;
1518 
1519 		writel(value, ioaddr + XGMAC_FPE_CTRL_STS);
1520 		return;
1521 	}
1522 
1523 	value = readl(ioaddr + XGMAC_RXQ_CTRL1);
1524 	value &= ~XGMAC_RQ;
1525 	value |= (num_rxq - 1) << XGMAC_RQ_SHIFT;
1526 	writel(value, ioaddr + XGMAC_RXQ_CTRL1);
1527 
1528 	value = readl(ioaddr + XGMAC_FPE_CTRL_STS);
1529 	value |= XGMAC_EFPE;
1530 	writel(value, ioaddr + XGMAC_FPE_CTRL_STS);
1531 }
1532 
1533 const struct stmmac_ops dwxgmac210_ops = {
1534 	.core_init = dwxgmac2_core_init,
1535 	.set_mac = dwxgmac2_set_mac,
1536 	.rx_ipc = dwxgmac2_rx_ipc,
1537 	.rx_queue_enable = dwxgmac2_rx_queue_enable,
1538 	.rx_queue_prio = dwxgmac2_rx_queue_prio,
1539 	.tx_queue_prio = dwxgmac2_tx_queue_prio,
1540 	.rx_queue_routing = dwxgmac2_rx_queue_routing,
1541 	.prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
1542 	.prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
1543 	.set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight,
1544 	.map_mtl_to_dma = dwxgmac2_map_mtl_to_dma,
1545 	.config_cbs = dwxgmac2_config_cbs,
1546 	.dump_regs = dwxgmac2_dump_regs,
1547 	.host_irq_status = dwxgmac2_host_irq_status,
1548 	.host_mtl_irq_status = dwxgmac2_host_mtl_irq_status,
1549 	.flow_ctrl = dwxgmac2_flow_ctrl,
1550 	.pmt = dwxgmac2_pmt,
1551 	.set_umac_addr = dwxgmac2_set_umac_addr,
1552 	.get_umac_addr = dwxgmac2_get_umac_addr,
1553 	.set_eee_mode = dwxgmac2_set_eee_mode,
1554 	.reset_eee_mode = dwxgmac2_reset_eee_mode,
1555 	.set_eee_timer = dwxgmac2_set_eee_timer,
1556 	.set_eee_pls = dwxgmac2_set_eee_pls,
1557 	.debug = NULL,
1558 	.set_filter = dwxgmac2_set_filter,
1559 	.safety_feat_config = dwxgmac3_safety_feat_config,
1560 	.safety_feat_irq_status = dwxgmac3_safety_feat_irq_status,
1561 	.safety_feat_dump = dwxgmac3_safety_feat_dump,
1562 	.set_mac_loopback = dwxgmac2_set_mac_loopback,
1563 	.rss_configure = dwxgmac2_rss_configure,
1564 	.update_vlan_hash = dwxgmac2_update_vlan_hash,
1565 	.rxp_config = dwxgmac3_rxp_config,
1566 	.get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
1567 	.flex_pps_config = dwxgmac2_flex_pps_config,
1568 	.sarc_configure = dwxgmac2_sarc_configure,
1569 	.enable_vlan = dwxgmac2_enable_vlan,
1570 	.config_l3_filter = dwxgmac2_config_l3_filter,
1571 	.config_l4_filter = dwxgmac2_config_l4_filter,
1572 	.set_arp_offload = dwxgmac2_set_arp_offload,
1573 	.fpe_configure = dwxgmac3_fpe_configure,
1574 };
1575 
dwxlgmac2_rx_queue_enable(struct mac_device_info * hw,u8 mode,u32 queue)1576 static void dwxlgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
1577 				      u32 queue)
1578 {
1579 	void __iomem *ioaddr = hw->pcsr;
1580 	u32 value;
1581 
1582 	value = readl(ioaddr + XLGMAC_RXQ_ENABLE_CTRL0) & ~XGMAC_RXQEN(queue);
1583 	if (mode == MTL_QUEUE_AVB)
1584 		value |= 0x1 << XGMAC_RXQEN_SHIFT(queue);
1585 	else if (mode == MTL_QUEUE_DCB)
1586 		value |= 0x2 << XGMAC_RXQEN_SHIFT(queue);
1587 	writel(value, ioaddr + XLGMAC_RXQ_ENABLE_CTRL0);
1588 }
1589 
1590 const struct stmmac_ops dwxlgmac2_ops = {
1591 	.core_init = dwxgmac2_core_init,
1592 	.set_mac = dwxgmac2_set_mac,
1593 	.rx_ipc = dwxgmac2_rx_ipc,
1594 	.rx_queue_enable = dwxlgmac2_rx_queue_enable,
1595 	.rx_queue_prio = dwxgmac2_rx_queue_prio,
1596 	.tx_queue_prio = dwxgmac2_tx_queue_prio,
1597 	.rx_queue_routing = dwxgmac2_rx_queue_routing,
1598 	.prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
1599 	.prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
1600 	.set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight,
1601 	.map_mtl_to_dma = dwxgmac2_map_mtl_to_dma,
1602 	.config_cbs = dwxgmac2_config_cbs,
1603 	.dump_regs = dwxgmac2_dump_regs,
1604 	.host_irq_status = dwxgmac2_host_irq_status,
1605 	.host_mtl_irq_status = dwxgmac2_host_mtl_irq_status,
1606 	.flow_ctrl = dwxgmac2_flow_ctrl,
1607 	.pmt = dwxgmac2_pmt,
1608 	.set_umac_addr = dwxgmac2_set_umac_addr,
1609 	.get_umac_addr = dwxgmac2_get_umac_addr,
1610 	.set_eee_mode = dwxgmac2_set_eee_mode,
1611 	.reset_eee_mode = dwxgmac2_reset_eee_mode,
1612 	.set_eee_timer = dwxgmac2_set_eee_timer,
1613 	.set_eee_pls = dwxgmac2_set_eee_pls,
1614 	.debug = NULL,
1615 	.set_filter = dwxgmac2_set_filter,
1616 	.safety_feat_config = dwxgmac3_safety_feat_config,
1617 	.safety_feat_irq_status = dwxgmac3_safety_feat_irq_status,
1618 	.safety_feat_dump = dwxgmac3_safety_feat_dump,
1619 	.set_mac_loopback = dwxgmac2_set_mac_loopback,
1620 	.rss_configure = dwxgmac2_rss_configure,
1621 	.update_vlan_hash = dwxgmac2_update_vlan_hash,
1622 	.rxp_config = dwxgmac3_rxp_config,
1623 	.get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
1624 	.flex_pps_config = dwxgmac2_flex_pps_config,
1625 	.sarc_configure = dwxgmac2_sarc_configure,
1626 	.enable_vlan = dwxgmac2_enable_vlan,
1627 	.config_l3_filter = dwxgmac2_config_l3_filter,
1628 	.config_l4_filter = dwxgmac2_config_l4_filter,
1629 	.set_arp_offload = dwxgmac2_set_arp_offload,
1630 	.fpe_configure = dwxgmac3_fpe_configure,
1631 };
1632 
dwxgmac2_setup(struct stmmac_priv * priv)1633 int dwxgmac2_setup(struct stmmac_priv *priv)
1634 {
1635 	struct mac_device_info *mac = priv->hw;
1636 
1637 	dev_info(priv->device, "\tXGMAC2\n");
1638 
1639 	priv->dev->priv_flags |= IFF_UNICAST_FLT;
1640 	mac->pcsr = priv->ioaddr;
1641 	mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
1642 	mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
1643 	mac->mcast_bits_log2 = 0;
1644 
1645 	if (mac->multicast_filter_bins)
1646 		mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1647 
1648 	mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1649 			 MAC_1000FD | MAC_2500FD | MAC_5000FD |
1650 			 MAC_10000FD;
1651 	mac->link.duplex = 0;
1652 	mac->link.speed10 = XGMAC_CONFIG_SS_10_MII;
1653 	mac->link.speed100 = XGMAC_CONFIG_SS_100_MII;
1654 	mac->link.speed1000 = XGMAC_CONFIG_SS_1000_GMII;
1655 	mac->link.speed2500 = XGMAC_CONFIG_SS_2500_GMII;
1656 	mac->link.xgmii.speed2500 = XGMAC_CONFIG_SS_2500;
1657 	mac->link.xgmii.speed5000 = XGMAC_CONFIG_SS_5000;
1658 	mac->link.xgmii.speed10000 = XGMAC_CONFIG_SS_10000;
1659 	mac->link.speed_mask = XGMAC_CONFIG_SS_MASK;
1660 
1661 	mac->mii.addr = XGMAC_MDIO_ADDR;
1662 	mac->mii.data = XGMAC_MDIO_DATA;
1663 	mac->mii.addr_shift = 16;
1664 	mac->mii.addr_mask = GENMASK(20, 16);
1665 	mac->mii.reg_shift = 0;
1666 	mac->mii.reg_mask = GENMASK(15, 0);
1667 	mac->mii.clk_csr_shift = 19;
1668 	mac->mii.clk_csr_mask = GENMASK(21, 19);
1669 
1670 	return 0;
1671 }
1672 
dwxlgmac2_setup(struct stmmac_priv * priv)1673 int dwxlgmac2_setup(struct stmmac_priv *priv)
1674 {
1675 	struct mac_device_info *mac = priv->hw;
1676 
1677 	dev_info(priv->device, "\tXLGMAC\n");
1678 
1679 	priv->dev->priv_flags |= IFF_UNICAST_FLT;
1680 	mac->pcsr = priv->ioaddr;
1681 	mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
1682 	mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
1683 	mac->mcast_bits_log2 = 0;
1684 
1685 	if (mac->multicast_filter_bins)
1686 		mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1687 
1688 	mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1689 			 MAC_1000FD | MAC_2500FD | MAC_5000FD |
1690 			 MAC_10000FD | MAC_25000FD |
1691 			 MAC_40000FD | MAC_50000FD |
1692 			 MAC_100000FD;
1693 	mac->link.duplex = 0;
1694 	mac->link.speed1000 = XLGMAC_CONFIG_SS_1000;
1695 	mac->link.speed2500 = XLGMAC_CONFIG_SS_2500;
1696 	mac->link.xgmii.speed10000 = XLGMAC_CONFIG_SS_10G;
1697 	mac->link.xlgmii.speed25000 = XLGMAC_CONFIG_SS_25G;
1698 	mac->link.xlgmii.speed40000 = XLGMAC_CONFIG_SS_40G;
1699 	mac->link.xlgmii.speed50000 = XLGMAC_CONFIG_SS_50G;
1700 	mac->link.xlgmii.speed100000 = XLGMAC_CONFIG_SS_100G;
1701 	mac->link.speed_mask = XLGMAC_CONFIG_SS;
1702 
1703 	mac->mii.addr = XGMAC_MDIO_ADDR;
1704 	mac->mii.data = XGMAC_MDIO_DATA;
1705 	mac->mii.addr_shift = 16;
1706 	mac->mii.addr_mask = GENMASK(20, 16);
1707 	mac->mii.reg_shift = 0;
1708 	mac->mii.reg_mask = GENMASK(15, 0);
1709 	mac->mii.clk_csr_shift = 19;
1710 	mac->mii.clk_csr_mask = GENMASK(21, 19);
1711 
1712 	return 0;
1713 }
1714