xref: /linux/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c (revision 3fd6c59042dbba50391e30862beac979491145fe)
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /*
3  * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
4  * stmmac XGMAC support.
5  */
6 
7 #include <linux/bitrev.h>
8 #include <linux/crc32.h>
9 #include <linux/iopoll.h>
10 #include "stmmac.h"
11 #include "stmmac_fpe.h"
12 #include "stmmac_ptp.h"
13 #include "dwxlgmac2.h"
14 #include "dwxgmac2.h"
15 
dwxgmac2_core_init(struct mac_device_info * hw,struct net_device * dev)16 static void dwxgmac2_core_init(struct mac_device_info *hw,
17 			       struct net_device *dev)
18 {
19 	void __iomem *ioaddr = hw->pcsr;
20 	u32 tx, rx;
21 
22 	tx = readl(ioaddr + XGMAC_TX_CONFIG);
23 	rx = readl(ioaddr + XGMAC_RX_CONFIG);
24 
25 	tx |= XGMAC_CORE_INIT_TX;
26 	rx |= XGMAC_CORE_INIT_RX;
27 
28 	if (hw->ps) {
29 		tx |= XGMAC_CONFIG_TE;
30 		tx &= ~hw->link.speed_mask;
31 
32 		switch (hw->ps) {
33 		case SPEED_10000:
34 			tx |= hw->link.xgmii.speed10000;
35 			break;
36 		case SPEED_2500:
37 			tx |= hw->link.speed2500;
38 			break;
39 		case SPEED_1000:
40 		default:
41 			tx |= hw->link.speed1000;
42 			break;
43 		}
44 	}
45 
46 	writel(tx, ioaddr + XGMAC_TX_CONFIG);
47 	writel(rx, ioaddr + XGMAC_RX_CONFIG);
48 	writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN);
49 }
50 
dwxgmac2_set_mac(void __iomem * ioaddr,bool enable)51 static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable)
52 {
53 	u32 tx = readl(ioaddr + XGMAC_TX_CONFIG);
54 	u32 rx = readl(ioaddr + XGMAC_RX_CONFIG);
55 
56 	if (enable) {
57 		tx |= XGMAC_CONFIG_TE;
58 		rx |= XGMAC_CONFIG_RE;
59 	} else {
60 		tx &= ~XGMAC_CONFIG_TE;
61 		rx &= ~XGMAC_CONFIG_RE;
62 	}
63 
64 	writel(tx, ioaddr + XGMAC_TX_CONFIG);
65 	writel(rx, ioaddr + XGMAC_RX_CONFIG);
66 }
67 
dwxgmac2_rx_ipc(struct mac_device_info * hw)68 static int dwxgmac2_rx_ipc(struct mac_device_info *hw)
69 {
70 	void __iomem *ioaddr = hw->pcsr;
71 	u32 value;
72 
73 	value = readl(ioaddr + XGMAC_RX_CONFIG);
74 	if (hw->rx_csum)
75 		value |= XGMAC_CONFIG_IPC;
76 	else
77 		value &= ~XGMAC_CONFIG_IPC;
78 	writel(value, ioaddr + XGMAC_RX_CONFIG);
79 
80 	return !!(readl(ioaddr + XGMAC_RX_CONFIG) & XGMAC_CONFIG_IPC);
81 }
82 
dwxgmac2_rx_queue_enable(struct mac_device_info * hw,u8 mode,u32 queue)83 static void dwxgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
84 				     u32 queue)
85 {
86 	void __iomem *ioaddr = hw->pcsr;
87 	u32 value;
88 
89 	value = readl(ioaddr + XGMAC_RXQ_CTRL0) & ~XGMAC_RXQEN(queue);
90 	if (mode == MTL_QUEUE_AVB)
91 		value |= 0x1 << XGMAC_RXQEN_SHIFT(queue);
92 	else if (mode == MTL_QUEUE_DCB)
93 		value |= 0x2 << XGMAC_RXQEN_SHIFT(queue);
94 	writel(value, ioaddr + XGMAC_RXQ_CTRL0);
95 }
96 
dwxgmac2_rx_queue_prio(struct mac_device_info * hw,u32 prio,u32 queue)97 static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
98 				   u32 queue)
99 {
100 	void __iomem *ioaddr = hw->pcsr;
101 	u32 clear_mask = 0;
102 	u32 ctrl2, ctrl3;
103 	int i;
104 
105 	ctrl2 = readl(ioaddr + XGMAC_RXQ_CTRL2);
106 	ctrl3 = readl(ioaddr + XGMAC_RXQ_CTRL3);
107 
108 	/* The software must ensure that the same priority
109 	 * is not mapped to multiple Rx queues
110 	 */
111 	for (i = 0; i < 4; i++)
112 		clear_mask |= ((prio << XGMAC_PSRQ_SHIFT(i)) &
113 						XGMAC_PSRQ(i));
114 
115 	ctrl2 &= ~clear_mask;
116 	ctrl3 &= ~clear_mask;
117 
118 	/* First assign new priorities to a queue, then
119 	 * clear them from others queues
120 	 */
121 	if (queue < 4) {
122 		ctrl2 |= (prio << XGMAC_PSRQ_SHIFT(queue)) &
123 						XGMAC_PSRQ(queue);
124 
125 		writel(ctrl2, ioaddr + XGMAC_RXQ_CTRL2);
126 		writel(ctrl3, ioaddr + XGMAC_RXQ_CTRL3);
127 	} else {
128 		queue -= 4;
129 
130 		ctrl3 |= (prio << XGMAC_PSRQ_SHIFT(queue)) &
131 						XGMAC_PSRQ(queue);
132 
133 		writel(ctrl3, ioaddr + XGMAC_RXQ_CTRL3);
134 		writel(ctrl2, ioaddr + XGMAC_RXQ_CTRL2);
135 	}
136 }
137 
dwxgmac2_tx_queue_prio(struct mac_device_info * hw,u32 prio,u32 queue)138 static void dwxgmac2_tx_queue_prio(struct mac_device_info *hw, u32 prio,
139 				   u32 queue)
140 {
141 	void __iomem *ioaddr = hw->pcsr;
142 	u32 value, reg;
143 
144 	reg = (queue < 4) ? XGMAC_TC_PRTY_MAP0 : XGMAC_TC_PRTY_MAP1;
145 	if (queue >= 4)
146 		queue -= 4;
147 
148 	value = readl(ioaddr + reg);
149 	value &= ~XGMAC_PSTC(queue);
150 	value |= (prio << XGMAC_PSTC_SHIFT(queue)) & XGMAC_PSTC(queue);
151 
152 	writel(value, ioaddr + reg);
153 }
154 
dwxgmac2_rx_queue_routing(struct mac_device_info * hw,u8 packet,u32 queue)155 static void dwxgmac2_rx_queue_routing(struct mac_device_info *hw,
156 				      u8 packet, u32 queue)
157 {
158 	void __iomem *ioaddr = hw->pcsr;
159 	u32 value;
160 
161 	static const struct stmmac_rx_routing dwxgmac2_route_possibilities[] = {
162 		{ XGMAC_AVCPQ, XGMAC_AVCPQ_SHIFT },
163 		{ XGMAC_PTPQ, XGMAC_PTPQ_SHIFT },
164 		{ XGMAC_DCBCPQ, XGMAC_DCBCPQ_SHIFT },
165 		{ XGMAC_UPQ, XGMAC_UPQ_SHIFT },
166 		{ XGMAC_MCBCQ, XGMAC_MCBCQ_SHIFT },
167 	};
168 
169 	value = readl(ioaddr + XGMAC_RXQ_CTRL1);
170 
171 	/* routing configuration */
172 	value &= ~dwxgmac2_route_possibilities[packet - 1].reg_mask;
173 	value |= (queue << dwxgmac2_route_possibilities[packet - 1].reg_shift) &
174 		 dwxgmac2_route_possibilities[packet - 1].reg_mask;
175 
176 	/* some packets require extra ops */
177 	if (packet == PACKET_AVCPQ)
178 		value |= FIELD_PREP(XGMAC_TACPQE, 1);
179 	else if (packet == PACKET_MCBCQ)
180 		value |= FIELD_PREP(XGMAC_MCBCQEN, 1);
181 
182 	writel(value, ioaddr + XGMAC_RXQ_CTRL1);
183 }
184 
dwxgmac2_prog_mtl_rx_algorithms(struct mac_device_info * hw,u32 rx_alg)185 static void dwxgmac2_prog_mtl_rx_algorithms(struct mac_device_info *hw,
186 					    u32 rx_alg)
187 {
188 	void __iomem *ioaddr = hw->pcsr;
189 	u32 value;
190 
191 	value = readl(ioaddr + XGMAC_MTL_OPMODE);
192 	value &= ~XGMAC_RAA;
193 
194 	switch (rx_alg) {
195 	case MTL_RX_ALGORITHM_SP:
196 		break;
197 	case MTL_RX_ALGORITHM_WSP:
198 		value |= XGMAC_RAA;
199 		break;
200 	default:
201 		break;
202 	}
203 
204 	writel(value, ioaddr + XGMAC_MTL_OPMODE);
205 }
206 
dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info * hw,u32 tx_alg)207 static void dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info *hw,
208 					    u32 tx_alg)
209 {
210 	void __iomem *ioaddr = hw->pcsr;
211 	bool ets = true;
212 	u32 value;
213 	int i;
214 
215 	value = readl(ioaddr + XGMAC_MTL_OPMODE);
216 	value &= ~XGMAC_ETSALG;
217 
218 	switch (tx_alg) {
219 	case MTL_TX_ALGORITHM_WRR:
220 		value |= XGMAC_WRR;
221 		break;
222 	case MTL_TX_ALGORITHM_WFQ:
223 		value |= XGMAC_WFQ;
224 		break;
225 	case MTL_TX_ALGORITHM_DWRR:
226 		value |= XGMAC_DWRR;
227 		break;
228 	default:
229 		ets = false;
230 		break;
231 	}
232 
233 	writel(value, ioaddr + XGMAC_MTL_OPMODE);
234 
235 	/* Set ETS if desired */
236 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
237 		value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
238 		value &= ~XGMAC_TSA;
239 		if (ets)
240 			value |= XGMAC_ETS;
241 		writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
242 	}
243 }
244 
dwxgmac2_set_mtl_tx_queue_weight(struct stmmac_priv * priv,struct mac_device_info * hw,u32 weight,u32 queue)245 static void dwxgmac2_set_mtl_tx_queue_weight(struct stmmac_priv *priv,
246 					     struct mac_device_info *hw,
247 					     u32 weight, u32 queue)
248 {
249 	void __iomem *ioaddr = hw->pcsr;
250 
251 	writel(weight, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
252 }
253 
dwxgmac2_map_mtl_to_dma(struct mac_device_info * hw,u32 queue,u32 chan)254 static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue,
255 				    u32 chan)
256 {
257 	void __iomem *ioaddr = hw->pcsr;
258 	u32 value, reg;
259 
260 	reg = (queue < 4) ? XGMAC_MTL_RXQ_DMA_MAP0 : XGMAC_MTL_RXQ_DMA_MAP1;
261 	if (queue >= 4)
262 		queue -= 4;
263 
264 	value = readl(ioaddr + reg);
265 	value &= ~XGMAC_QxMDMACH(queue);
266 	value |= (chan << XGMAC_QxMDMACH_SHIFT(queue)) & XGMAC_QxMDMACH(queue);
267 
268 	writel(value, ioaddr + reg);
269 }
270 
dwxgmac2_config_cbs(struct stmmac_priv * priv,struct mac_device_info * hw,u32 send_slope,u32 idle_slope,u32 high_credit,u32 low_credit,u32 queue)271 static void dwxgmac2_config_cbs(struct stmmac_priv *priv,
272 				struct mac_device_info *hw,
273 				u32 send_slope, u32 idle_slope,
274 				u32 high_credit, u32 low_credit, u32 queue)
275 {
276 	void __iomem *ioaddr = hw->pcsr;
277 	u32 value;
278 
279 	writel(send_slope, ioaddr + XGMAC_MTL_TCx_SENDSLOPE(queue));
280 	writel(idle_slope, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
281 	writel(high_credit, ioaddr + XGMAC_MTL_TCx_HICREDIT(queue));
282 	writel(low_credit, ioaddr + XGMAC_MTL_TCx_LOCREDIT(queue));
283 
284 	value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
285 	value &= ~XGMAC_TSA;
286 	value |= XGMAC_CC | XGMAC_CBS;
287 	writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
288 }
289 
dwxgmac2_dump_regs(struct mac_device_info * hw,u32 * reg_space)290 static void dwxgmac2_dump_regs(struct mac_device_info *hw, u32 *reg_space)
291 {
292 	void __iomem *ioaddr = hw->pcsr;
293 	int i;
294 
295 	for (i = 0; i < XGMAC_MAC_REGSIZE; i++)
296 		reg_space[i] = readl(ioaddr + i * 4);
297 }
298 
dwxgmac2_host_irq_status(struct mac_device_info * hw,struct stmmac_extra_stats * x)299 static int dwxgmac2_host_irq_status(struct mac_device_info *hw,
300 				    struct stmmac_extra_stats *x)
301 {
302 	void __iomem *ioaddr = hw->pcsr;
303 	u32 stat, en;
304 	int ret = 0;
305 
306 	en = readl(ioaddr + XGMAC_INT_EN);
307 	stat = readl(ioaddr + XGMAC_INT_STATUS);
308 
309 	stat &= en;
310 
311 	if (stat & XGMAC_PMTIS) {
312 		x->irq_receive_pmt_irq_n++;
313 		readl(ioaddr + XGMAC_PMT);
314 	}
315 
316 	if (stat & XGMAC_LPIIS) {
317 		u32 lpi = readl(ioaddr + XGMAC_LPI_CTRL);
318 
319 		if (lpi & XGMAC_TLPIEN) {
320 			ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
321 			x->irq_tx_path_in_lpi_mode_n++;
322 		}
323 		if (lpi & XGMAC_TLPIEX) {
324 			ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
325 			x->irq_tx_path_exit_lpi_mode_n++;
326 		}
327 		if (lpi & XGMAC_RLPIEN)
328 			x->irq_rx_path_in_lpi_mode_n++;
329 		if (lpi & XGMAC_RLPIEX)
330 			x->irq_rx_path_exit_lpi_mode_n++;
331 	}
332 
333 	return ret;
334 }
335 
dwxgmac2_host_mtl_irq_status(struct stmmac_priv * priv,struct mac_device_info * hw,u32 chan)336 static int dwxgmac2_host_mtl_irq_status(struct stmmac_priv *priv,
337 					struct mac_device_info *hw, u32 chan)
338 {
339 	void __iomem *ioaddr = hw->pcsr;
340 	int ret = 0;
341 	u32 status;
342 
343 	status = readl(ioaddr + XGMAC_MTL_INT_STATUS);
344 	if (status & BIT(chan)) {
345 		u32 chan_status = readl(ioaddr + XGMAC_MTL_QINT_STATUS(chan));
346 
347 		if (chan_status & XGMAC_RXOVFIS)
348 			ret |= CORE_IRQ_MTL_RX_OVERFLOW;
349 
350 		writel(~0x0, ioaddr + XGMAC_MTL_QINT_STATUS(chan));
351 	}
352 
353 	return ret;
354 }
355 
dwxgmac2_flow_ctrl(struct mac_device_info * hw,unsigned int duplex,unsigned int fc,unsigned int pause_time,u32 tx_cnt)356 static void dwxgmac2_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
357 			       unsigned int fc, unsigned int pause_time,
358 			       u32 tx_cnt)
359 {
360 	void __iomem *ioaddr = hw->pcsr;
361 	u32 i;
362 
363 	if (fc & FLOW_RX)
364 		writel(XGMAC_RFE, ioaddr + XGMAC_RX_FLOW_CTRL);
365 	if (fc & FLOW_TX) {
366 		for (i = 0; i < tx_cnt; i++) {
367 			u32 value = XGMAC_TFE;
368 
369 			if (duplex)
370 				value |= pause_time << XGMAC_PT_SHIFT;
371 
372 			writel(value, ioaddr + XGMAC_Qx_TX_FLOW_CTRL(i));
373 		}
374 	}
375 }
376 
dwxgmac2_pmt(struct mac_device_info * hw,unsigned long mode)377 static void dwxgmac2_pmt(struct mac_device_info *hw, unsigned long mode)
378 {
379 	void __iomem *ioaddr = hw->pcsr;
380 	u32 val = 0x0;
381 
382 	if (mode & WAKE_MAGIC)
383 		val |= XGMAC_PWRDWN | XGMAC_MGKPKTEN;
384 	if (mode & WAKE_UCAST)
385 		val |= XGMAC_PWRDWN | XGMAC_GLBLUCAST | XGMAC_RWKPKTEN;
386 	if (val) {
387 		u32 cfg = readl(ioaddr + XGMAC_RX_CONFIG);
388 		cfg |= XGMAC_CONFIG_RE;
389 		writel(cfg, ioaddr + XGMAC_RX_CONFIG);
390 	}
391 
392 	writel(val, ioaddr + XGMAC_PMT);
393 }
394 
dwxgmac2_set_umac_addr(struct mac_device_info * hw,const unsigned char * addr,unsigned int reg_n)395 static void dwxgmac2_set_umac_addr(struct mac_device_info *hw,
396 				   const unsigned char *addr,
397 				   unsigned int reg_n)
398 {
399 	void __iomem *ioaddr = hw->pcsr;
400 	u32 value;
401 
402 	value = (addr[5] << 8) | addr[4];
403 	writel(value | XGMAC_AE, ioaddr + XGMAC_ADDRx_HIGH(reg_n));
404 
405 	value = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
406 	writel(value, ioaddr + XGMAC_ADDRx_LOW(reg_n));
407 }
408 
dwxgmac2_get_umac_addr(struct mac_device_info * hw,unsigned char * addr,unsigned int reg_n)409 static void dwxgmac2_get_umac_addr(struct mac_device_info *hw,
410 				   unsigned char *addr, unsigned int reg_n)
411 {
412 	void __iomem *ioaddr = hw->pcsr;
413 	u32 hi_addr, lo_addr;
414 
415 	/* Read the MAC address from the hardware */
416 	hi_addr = readl(ioaddr + XGMAC_ADDRx_HIGH(reg_n));
417 	lo_addr = readl(ioaddr + XGMAC_ADDRx_LOW(reg_n));
418 
419 	/* Extract the MAC address from the high and low words */
420 	addr[0] = lo_addr & 0xff;
421 	addr[1] = (lo_addr >> 8) & 0xff;
422 	addr[2] = (lo_addr >> 16) & 0xff;
423 	addr[3] = (lo_addr >> 24) & 0xff;
424 	addr[4] = hi_addr & 0xff;
425 	addr[5] = (hi_addr >> 8) & 0xff;
426 }
427 
dwxgmac2_set_eee_mode(struct mac_device_info * hw,bool en_tx_lpi_clockgating)428 static void dwxgmac2_set_eee_mode(struct mac_device_info *hw,
429 				  bool en_tx_lpi_clockgating)
430 {
431 	void __iomem *ioaddr = hw->pcsr;
432 	u32 value;
433 
434 	value = readl(ioaddr + XGMAC_LPI_CTRL);
435 
436 	value |= XGMAC_LPITXEN | XGMAC_LPITXA;
437 	if (en_tx_lpi_clockgating)
438 		value |= XGMAC_TXCGE;
439 
440 	writel(value, ioaddr + XGMAC_LPI_CTRL);
441 }
442 
dwxgmac2_reset_eee_mode(struct mac_device_info * hw)443 static void dwxgmac2_reset_eee_mode(struct mac_device_info *hw)
444 {
445 	void __iomem *ioaddr = hw->pcsr;
446 	u32 value;
447 
448 	value = readl(ioaddr + XGMAC_LPI_CTRL);
449 	value &= ~(XGMAC_LPITXEN | XGMAC_LPITXA | XGMAC_TXCGE);
450 	writel(value, ioaddr + XGMAC_LPI_CTRL);
451 }
452 
dwxgmac2_set_eee_pls(struct mac_device_info * hw,int link)453 static void dwxgmac2_set_eee_pls(struct mac_device_info *hw, int link)
454 {
455 	void __iomem *ioaddr = hw->pcsr;
456 	u32 value;
457 
458 	value = readl(ioaddr + XGMAC_LPI_CTRL);
459 	if (link)
460 		value |= XGMAC_PLS;
461 	else
462 		value &= ~XGMAC_PLS;
463 	writel(value, ioaddr + XGMAC_LPI_CTRL);
464 }
465 
dwxgmac2_set_eee_timer(struct mac_device_info * hw,int ls,int tw)466 static void dwxgmac2_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
467 {
468 	void __iomem *ioaddr = hw->pcsr;
469 	u32 value;
470 
471 	value = (tw & 0xffff) | ((ls & 0x3ff) << 16);
472 	writel(value, ioaddr + XGMAC_LPI_TIMER_CTRL);
473 }
474 
dwxgmac2_set_mchash(void __iomem * ioaddr,u32 * mcfilterbits,int mcbitslog2)475 static void dwxgmac2_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits,
476 				int mcbitslog2)
477 {
478 	int numhashregs, regs;
479 
480 	switch (mcbitslog2) {
481 	case 6:
482 		numhashregs = 2;
483 		break;
484 	case 7:
485 		numhashregs = 4;
486 		break;
487 	case 8:
488 		numhashregs = 8;
489 		break;
490 	default:
491 		return;
492 	}
493 
494 	for (regs = 0; regs < numhashregs; regs++)
495 		writel(mcfilterbits[regs], ioaddr + XGMAC_HASH_TABLE(regs));
496 }
497 
dwxgmac2_set_filter(struct mac_device_info * hw,struct net_device * dev)498 static void dwxgmac2_set_filter(struct mac_device_info *hw,
499 				struct net_device *dev)
500 {
501 	void __iomem *ioaddr = (void __iomem *)dev->base_addr;
502 	u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
503 	int mcbitslog2 = hw->mcast_bits_log2;
504 	u32 mc_filter[8];
505 	int i;
506 
507 	value &= ~(XGMAC_FILTER_PR | XGMAC_FILTER_HMC | XGMAC_FILTER_PM);
508 	value |= XGMAC_FILTER_HPF;
509 
510 	memset(mc_filter, 0, sizeof(mc_filter));
511 
512 	if (dev->flags & IFF_PROMISC) {
513 		value |= XGMAC_FILTER_PR;
514 		value |= XGMAC_FILTER_PCF;
515 	} else if ((dev->flags & IFF_ALLMULTI) ||
516 		   (netdev_mc_count(dev) > hw->multicast_filter_bins)) {
517 		value |= XGMAC_FILTER_PM;
518 
519 		for (i = 0; i < XGMAC_MAX_HASH_TABLE; i++)
520 			writel(~0x0, ioaddr + XGMAC_HASH_TABLE(i));
521 	} else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) {
522 		struct netdev_hw_addr *ha;
523 
524 		value |= XGMAC_FILTER_HMC;
525 
526 		netdev_for_each_mc_addr(ha, dev) {
527 			u32 nr = (bitrev32(~crc32_le(~0, ha->addr, 6)) >>
528 					(32 - mcbitslog2));
529 			mc_filter[nr >> 5] |= (1 << (nr & 0x1F));
530 		}
531 	}
532 
533 	dwxgmac2_set_mchash(ioaddr, mc_filter, mcbitslog2);
534 
535 	/* Handle multiple unicast addresses */
536 	if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
537 		value |= XGMAC_FILTER_PR;
538 	} else {
539 		struct netdev_hw_addr *ha;
540 		int reg = 1;
541 
542 		netdev_for_each_uc_addr(ha, dev) {
543 			dwxgmac2_set_umac_addr(hw, ha->addr, reg);
544 			reg++;
545 		}
546 
547 		for ( ; reg < XGMAC_ADDR_MAX; reg++) {
548 			writel(0, ioaddr + XGMAC_ADDRx_HIGH(reg));
549 			writel(0, ioaddr + XGMAC_ADDRx_LOW(reg));
550 		}
551 	}
552 
553 	writel(value, ioaddr + XGMAC_PACKET_FILTER);
554 }
555 
dwxgmac2_set_mac_loopback(void __iomem * ioaddr,bool enable)556 static void dwxgmac2_set_mac_loopback(void __iomem *ioaddr, bool enable)
557 {
558 	u32 value = readl(ioaddr + XGMAC_RX_CONFIG);
559 
560 	if (enable)
561 		value |= XGMAC_CONFIG_LM;
562 	else
563 		value &= ~XGMAC_CONFIG_LM;
564 
565 	writel(value, ioaddr + XGMAC_RX_CONFIG);
566 }
567 
dwxgmac2_rss_write_reg(void __iomem * ioaddr,bool is_key,int idx,u32 val)568 static int dwxgmac2_rss_write_reg(void __iomem *ioaddr, bool is_key, int idx,
569 				  u32 val)
570 {
571 	u32 ctrl = 0;
572 
573 	writel(val, ioaddr + XGMAC_RSS_DATA);
574 	ctrl |= idx << XGMAC_RSSIA_SHIFT;
575 	ctrl |= is_key ? XGMAC_ADDRT : 0x0;
576 	ctrl |= XGMAC_OB;
577 	writel(ctrl, ioaddr + XGMAC_RSS_ADDR);
578 
579 	return readl_poll_timeout(ioaddr + XGMAC_RSS_ADDR, ctrl,
580 				  !(ctrl & XGMAC_OB), 100, 10000);
581 }
582 
dwxgmac2_rss_configure(struct mac_device_info * hw,struct stmmac_rss * cfg,u32 num_rxq)583 static int dwxgmac2_rss_configure(struct mac_device_info *hw,
584 				  struct stmmac_rss *cfg, u32 num_rxq)
585 {
586 	void __iomem *ioaddr = hw->pcsr;
587 	u32 value, *key;
588 	int i, ret;
589 
590 	value = readl(ioaddr + XGMAC_RSS_CTRL);
591 	if (!cfg || !cfg->enable) {
592 		value &= ~XGMAC_RSSE;
593 		writel(value, ioaddr + XGMAC_RSS_CTRL);
594 		return 0;
595 	}
596 
597 	key = (u32 *)cfg->key;
598 	for (i = 0; i < (ARRAY_SIZE(cfg->key) / sizeof(u32)); i++) {
599 		ret = dwxgmac2_rss_write_reg(ioaddr, true, i, key[i]);
600 		if (ret)
601 			return ret;
602 	}
603 
604 	for (i = 0; i < ARRAY_SIZE(cfg->table); i++) {
605 		ret = dwxgmac2_rss_write_reg(ioaddr, false, i, cfg->table[i]);
606 		if (ret)
607 			return ret;
608 	}
609 
610 	for (i = 0; i < num_rxq; i++)
611 		dwxgmac2_map_mtl_to_dma(hw, i, XGMAC_QDDMACH);
612 
613 	value |= XGMAC_UDP4TE | XGMAC_TCP4TE | XGMAC_IP2TE | XGMAC_RSSE;
614 	writel(value, ioaddr + XGMAC_RSS_CTRL);
615 	return 0;
616 }
617 
dwxgmac2_update_vlan_hash(struct mac_device_info * hw,u32 hash,u16 perfect_match,bool is_double)618 static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
619 				      u16 perfect_match, bool is_double)
620 {
621 	void __iomem *ioaddr = hw->pcsr;
622 
623 	writel(hash, ioaddr + XGMAC_VLAN_HASH_TABLE);
624 
625 	if (hash) {
626 		u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
627 
628 		value |= XGMAC_FILTER_VTFE;
629 
630 		writel(value, ioaddr + XGMAC_PACKET_FILTER);
631 
632 		value = readl(ioaddr + XGMAC_VLAN_TAG);
633 
634 		value |= XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV;
635 		if (is_double) {
636 			value |= XGMAC_VLAN_EDVLP;
637 			value |= XGMAC_VLAN_ESVL;
638 			value |= XGMAC_VLAN_DOVLTC;
639 		} else {
640 			value &= ~XGMAC_VLAN_EDVLP;
641 			value &= ~XGMAC_VLAN_ESVL;
642 			value &= ~XGMAC_VLAN_DOVLTC;
643 		}
644 
645 		value &= ~XGMAC_VLAN_VID;
646 		writel(value, ioaddr + XGMAC_VLAN_TAG);
647 	} else if (perfect_match) {
648 		u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
649 
650 		value |= XGMAC_FILTER_VTFE;
651 
652 		writel(value, ioaddr + XGMAC_PACKET_FILTER);
653 
654 		value = readl(ioaddr + XGMAC_VLAN_TAG);
655 
656 		value &= ~XGMAC_VLAN_VTHM;
657 		value |= XGMAC_VLAN_ETV;
658 		if (is_double) {
659 			value |= XGMAC_VLAN_EDVLP;
660 			value |= XGMAC_VLAN_ESVL;
661 			value |= XGMAC_VLAN_DOVLTC;
662 		} else {
663 			value &= ~XGMAC_VLAN_EDVLP;
664 			value &= ~XGMAC_VLAN_ESVL;
665 			value &= ~XGMAC_VLAN_DOVLTC;
666 		}
667 
668 		value &= ~XGMAC_VLAN_VID;
669 		writel(value | perfect_match, ioaddr + XGMAC_VLAN_TAG);
670 	} else {
671 		u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
672 
673 		value &= ~XGMAC_FILTER_VTFE;
674 
675 		writel(value, ioaddr + XGMAC_PACKET_FILTER);
676 
677 		value = readl(ioaddr + XGMAC_VLAN_TAG);
678 
679 		value &= ~(XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV);
680 		value &= ~(XGMAC_VLAN_EDVLP | XGMAC_VLAN_ESVL);
681 		value &= ~XGMAC_VLAN_DOVLTC;
682 		value &= ~XGMAC_VLAN_VID;
683 
684 		writel(value, ioaddr + XGMAC_VLAN_TAG);
685 	}
686 }
687 
688 struct dwxgmac3_error_desc {
689 	bool valid;
690 	const char *desc;
691 	const char *detailed_desc;
692 };
693 
694 #define STAT_OFF(field)		offsetof(struct stmmac_safety_stats, field)
695 
dwxgmac3_log_error(struct net_device * ndev,u32 value,bool corr,const char * module_name,const struct dwxgmac3_error_desc * desc,unsigned long field_offset,struct stmmac_safety_stats * stats)696 static void dwxgmac3_log_error(struct net_device *ndev, u32 value, bool corr,
697 			       const char *module_name,
698 			       const struct dwxgmac3_error_desc *desc,
699 			       unsigned long field_offset,
700 			       struct stmmac_safety_stats *stats)
701 {
702 	unsigned long loc, mask;
703 	u8 *bptr = (u8 *)stats;
704 	unsigned long *ptr;
705 
706 	ptr = (unsigned long *)(bptr + field_offset);
707 
708 	mask = value;
709 	for_each_set_bit(loc, &mask, 32) {
710 		netdev_err(ndev, "Found %s error in %s: '%s: %s'\n", corr ?
711 				"correctable" : "uncorrectable", module_name,
712 				desc[loc].desc, desc[loc].detailed_desc);
713 
714 		/* Update counters */
715 		ptr[loc]++;
716 	}
717 }
718 
719 static const struct dwxgmac3_error_desc dwxgmac3_mac_errors[32]= {
720 	{ true, "ATPES", "Application Transmit Interface Parity Check Error" },
721 	{ true, "DPES", "Descriptor Cache Data Path Parity Check Error" },
722 	{ true, "TPES", "TSO Data Path Parity Check Error" },
723 	{ true, "TSOPES", "TSO Header Data Path Parity Check Error" },
724 	{ true, "MTPES", "MTL Data Path Parity Check Error" },
725 	{ true, "MTSPES", "MTL TX Status Data Path Parity Check Error" },
726 	{ true, "MTBUPES", "MAC TBU Data Path Parity Check Error" },
727 	{ true, "MTFCPES", "MAC TFC Data Path Parity Check Error" },
728 	{ true, "ARPES", "Application Receive Interface Data Path Parity Check Error" },
729 	{ true, "MRWCPES", "MTL RWC Data Path Parity Check Error" },
730 	{ true, "MRRCPES", "MTL RCC Data Path Parity Check Error" },
731 	{ true, "CWPES", "CSR Write Data Path Parity Check Error" },
732 	{ true, "ASRPES", "AXI Slave Read Data Path Parity Check Error" },
733 	{ true, "TTES", "TX FSM Timeout Error" },
734 	{ true, "RTES", "RX FSM Timeout Error" },
735 	{ true, "CTES", "CSR FSM Timeout Error" },
736 	{ true, "ATES", "APP FSM Timeout Error" },
737 	{ true, "PTES", "PTP FSM Timeout Error" },
738 	{ false, "UNKNOWN", "Unknown Error" }, /* 18 */
739 	{ false, "UNKNOWN", "Unknown Error" }, /* 19 */
740 	{ false, "UNKNOWN", "Unknown Error" }, /* 20 */
741 	{ true, "MSTTES", "Master Read/Write Timeout Error" },
742 	{ true, "SLVTES", "Slave Read/Write Timeout Error" },
743 	{ true, "ATITES", "Application Timeout on ATI Interface Error" },
744 	{ true, "ARITES", "Application Timeout on ARI Interface Error" },
745 	{ true, "FSMPES", "FSM State Parity Error" },
746 	{ false, "UNKNOWN", "Unknown Error" }, /* 26 */
747 	{ false, "UNKNOWN", "Unknown Error" }, /* 27 */
748 	{ false, "UNKNOWN", "Unknown Error" }, /* 28 */
749 	{ false, "UNKNOWN", "Unknown Error" }, /* 29 */
750 	{ false, "UNKNOWN", "Unknown Error" }, /* 30 */
751 	{ true, "CPI", "Control Register Parity Check Error" },
752 };
753 
dwxgmac3_handle_mac_err(struct net_device * ndev,void __iomem * ioaddr,bool correctable,struct stmmac_safety_stats * stats)754 static void dwxgmac3_handle_mac_err(struct net_device *ndev,
755 				    void __iomem *ioaddr, bool correctable,
756 				    struct stmmac_safety_stats *stats)
757 {
758 	u32 value;
759 
760 	value = readl(ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
761 	writel(value, ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
762 
763 	dwxgmac3_log_error(ndev, value, correctable, "MAC",
764 			   dwxgmac3_mac_errors, STAT_OFF(mac_errors), stats);
765 }
766 
767 static const struct dwxgmac3_error_desc dwxgmac3_mtl_errors[32]= {
768 	{ true, "TXCES", "MTL TX Memory Error" },
769 	{ true, "TXAMS", "MTL TX Memory Address Mismatch Error" },
770 	{ true, "TXUES", "MTL TX Memory Error" },
771 	{ false, "UNKNOWN", "Unknown Error" }, /* 3 */
772 	{ true, "RXCES", "MTL RX Memory Error" },
773 	{ true, "RXAMS", "MTL RX Memory Address Mismatch Error" },
774 	{ true, "RXUES", "MTL RX Memory Error" },
775 	{ false, "UNKNOWN", "Unknown Error" }, /* 7 */
776 	{ true, "ECES", "MTL EST Memory Error" },
777 	{ true, "EAMS", "MTL EST Memory Address Mismatch Error" },
778 	{ true, "EUES", "MTL EST Memory Error" },
779 	{ false, "UNKNOWN", "Unknown Error" }, /* 11 */
780 	{ true, "RPCES", "MTL RX Parser Memory Error" },
781 	{ true, "RPAMS", "MTL RX Parser Memory Address Mismatch Error" },
782 	{ true, "RPUES", "MTL RX Parser Memory Error" },
783 	{ false, "UNKNOWN", "Unknown Error" }, /* 15 */
784 	{ false, "UNKNOWN", "Unknown Error" }, /* 16 */
785 	{ false, "UNKNOWN", "Unknown Error" }, /* 17 */
786 	{ false, "UNKNOWN", "Unknown Error" }, /* 18 */
787 	{ false, "UNKNOWN", "Unknown Error" }, /* 19 */
788 	{ false, "UNKNOWN", "Unknown Error" }, /* 20 */
789 	{ false, "UNKNOWN", "Unknown Error" }, /* 21 */
790 	{ false, "UNKNOWN", "Unknown Error" }, /* 22 */
791 	{ false, "UNKNOWN", "Unknown Error" }, /* 23 */
792 	{ false, "UNKNOWN", "Unknown Error" }, /* 24 */
793 	{ false, "UNKNOWN", "Unknown Error" }, /* 25 */
794 	{ false, "UNKNOWN", "Unknown Error" }, /* 26 */
795 	{ false, "UNKNOWN", "Unknown Error" }, /* 27 */
796 	{ false, "UNKNOWN", "Unknown Error" }, /* 28 */
797 	{ false, "UNKNOWN", "Unknown Error" }, /* 29 */
798 	{ false, "UNKNOWN", "Unknown Error" }, /* 30 */
799 	{ false, "UNKNOWN", "Unknown Error" }, /* 31 */
800 };
801 
dwxgmac3_handle_mtl_err(struct net_device * ndev,void __iomem * ioaddr,bool correctable,struct stmmac_safety_stats * stats)802 static void dwxgmac3_handle_mtl_err(struct net_device *ndev,
803 				    void __iomem *ioaddr, bool correctable,
804 				    struct stmmac_safety_stats *stats)
805 {
806 	u32 value;
807 
808 	value = readl(ioaddr + XGMAC_MTL_ECC_INT_STATUS);
809 	writel(value, ioaddr + XGMAC_MTL_ECC_INT_STATUS);
810 
811 	dwxgmac3_log_error(ndev, value, correctable, "MTL",
812 			   dwxgmac3_mtl_errors, STAT_OFF(mtl_errors), stats);
813 }
814 
815 static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= {
816 	{ true, "TCES", "DMA TSO Memory Error" },
817 	{ true, "TAMS", "DMA TSO Memory Address Mismatch Error" },
818 	{ true, "TUES", "DMA TSO Memory Error" },
819 	{ false, "UNKNOWN", "Unknown Error" }, /* 3 */
820 	{ true, "DCES", "DMA DCACHE Memory Error" },
821 	{ true, "DAMS", "DMA DCACHE Address Mismatch Error" },
822 	{ true, "DUES", "DMA DCACHE Memory Error" },
823 	{ false, "UNKNOWN", "Unknown Error" }, /* 7 */
824 	{ false, "UNKNOWN", "Unknown Error" }, /* 8 */
825 	{ false, "UNKNOWN", "Unknown Error" }, /* 9 */
826 	{ false, "UNKNOWN", "Unknown Error" }, /* 10 */
827 	{ false, "UNKNOWN", "Unknown Error" }, /* 11 */
828 	{ false, "UNKNOWN", "Unknown Error" }, /* 12 */
829 	{ false, "UNKNOWN", "Unknown Error" }, /* 13 */
830 	{ false, "UNKNOWN", "Unknown Error" }, /* 14 */
831 	{ false, "UNKNOWN", "Unknown Error" }, /* 15 */
832 	{ false, "UNKNOWN", "Unknown Error" }, /* 16 */
833 	{ false, "UNKNOWN", "Unknown Error" }, /* 17 */
834 	{ false, "UNKNOWN", "Unknown Error" }, /* 18 */
835 	{ false, "UNKNOWN", "Unknown Error" }, /* 19 */
836 	{ false, "UNKNOWN", "Unknown Error" }, /* 20 */
837 	{ false, "UNKNOWN", "Unknown Error" }, /* 21 */
838 	{ false, "UNKNOWN", "Unknown Error" }, /* 22 */
839 	{ false, "UNKNOWN", "Unknown Error" }, /* 23 */
840 	{ false, "UNKNOWN", "Unknown Error" }, /* 24 */
841 	{ false, "UNKNOWN", "Unknown Error" }, /* 25 */
842 	{ false, "UNKNOWN", "Unknown Error" }, /* 26 */
843 	{ false, "UNKNOWN", "Unknown Error" }, /* 27 */
844 	{ false, "UNKNOWN", "Unknown Error" }, /* 28 */
845 	{ false, "UNKNOWN", "Unknown Error" }, /* 29 */
846 	{ false, "UNKNOWN", "Unknown Error" }, /* 30 */
847 	{ false, "UNKNOWN", "Unknown Error" }, /* 31 */
848 };
849 
850 static const char dpp_rx_err[] = "Read Rx Descriptor Parity checker Error";
851 static const char dpp_tx_err[] = "Read Tx Descriptor Parity checker Error";
852 static const struct dwxgmac3_error_desc dwxgmac3_dma_dpp_errors[32] = {
853 	{ true, "TDPES0", dpp_tx_err },
854 	{ true, "TDPES1", dpp_tx_err },
855 	{ true, "TDPES2", dpp_tx_err },
856 	{ true, "TDPES3", dpp_tx_err },
857 	{ true, "TDPES4", dpp_tx_err },
858 	{ true, "TDPES5", dpp_tx_err },
859 	{ true, "TDPES6", dpp_tx_err },
860 	{ true, "TDPES7", dpp_tx_err },
861 	{ true, "TDPES8", dpp_tx_err },
862 	{ true, "TDPES9", dpp_tx_err },
863 	{ true, "TDPES10", dpp_tx_err },
864 	{ true, "TDPES11", dpp_tx_err },
865 	{ true, "TDPES12", dpp_tx_err },
866 	{ true, "TDPES13", dpp_tx_err },
867 	{ true, "TDPES14", dpp_tx_err },
868 	{ true, "TDPES15", dpp_tx_err },
869 	{ true, "RDPES0", dpp_rx_err },
870 	{ true, "RDPES1", dpp_rx_err },
871 	{ true, "RDPES2", dpp_rx_err },
872 	{ true, "RDPES3", dpp_rx_err },
873 	{ true, "RDPES4", dpp_rx_err },
874 	{ true, "RDPES5", dpp_rx_err },
875 	{ true, "RDPES6", dpp_rx_err },
876 	{ true, "RDPES7", dpp_rx_err },
877 	{ true, "RDPES8", dpp_rx_err },
878 	{ true, "RDPES9", dpp_rx_err },
879 	{ true, "RDPES10", dpp_rx_err },
880 	{ true, "RDPES11", dpp_rx_err },
881 	{ true, "RDPES12", dpp_rx_err },
882 	{ true, "RDPES13", dpp_rx_err },
883 	{ true, "RDPES14", dpp_rx_err },
884 	{ true, "RDPES15", dpp_rx_err },
885 };
886 
dwxgmac3_handle_dma_err(struct net_device * ndev,void __iomem * ioaddr,bool correctable,struct stmmac_safety_stats * stats)887 static void dwxgmac3_handle_dma_err(struct net_device *ndev,
888 				    void __iomem *ioaddr, bool correctable,
889 				    struct stmmac_safety_stats *stats)
890 {
891 	u32 value;
892 
893 	value = readl(ioaddr + XGMAC_DMA_ECC_INT_STATUS);
894 	writel(value, ioaddr + XGMAC_DMA_ECC_INT_STATUS);
895 
896 	dwxgmac3_log_error(ndev, value, correctable, "DMA",
897 			   dwxgmac3_dma_errors, STAT_OFF(dma_errors), stats);
898 
899 	value = readl(ioaddr + XGMAC_DMA_DPP_INT_STATUS);
900 	writel(value, ioaddr + XGMAC_DMA_DPP_INT_STATUS);
901 
902 	dwxgmac3_log_error(ndev, value, false, "DMA_DPP",
903 			   dwxgmac3_dma_dpp_errors,
904 			   STAT_OFF(dma_dpp_errors), stats);
905 }
906 
907 static int
dwxgmac3_safety_feat_config(void __iomem * ioaddr,unsigned int asp,struct stmmac_safety_feature_cfg * safety_cfg)908 dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp,
909 			    struct stmmac_safety_feature_cfg *safety_cfg)
910 {
911 	u32 value;
912 
913 	if (!asp)
914 		return -EINVAL;
915 
916 	/* 1. Enable Safety Features */
917 	writel(0x0, ioaddr + XGMAC_MTL_ECC_CONTROL);
918 
919 	/* 2. Enable MTL Safety Interrupts */
920 	value = readl(ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
921 	value |= XGMAC_RPCEIE; /* RX Parser Memory Correctable Error */
922 	value |= XGMAC_ECEIE; /* EST Memory Correctable Error */
923 	value |= XGMAC_RXCEIE; /* RX Memory Correctable Error */
924 	value |= XGMAC_TXCEIE; /* TX Memory Correctable Error */
925 	writel(value, ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
926 
927 	/* 3. Enable DMA Safety Interrupts */
928 	value = readl(ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
929 	value |= XGMAC_DCEIE; /* Descriptor Cache Memory Correctable Error */
930 	value |= XGMAC_TCEIE; /* TSO Memory Correctable Error */
931 	writel(value, ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
932 
933 	/* 0x2: Without ECC or Parity Ports on External Application Interface
934 	 * 0x4: Only ECC Protection for External Memory feature is selected
935 	 */
936 	if (asp == 0x2 || asp == 0x4)
937 		return 0;
938 
939 	/* 4. Enable Parity and Timeout for FSM */
940 	value = readl(ioaddr + XGMAC_MAC_FSM_CONTROL);
941 	value |= XGMAC_PRTYEN; /* FSM Parity Feature */
942 	value |= XGMAC_TMOUTEN; /* FSM Timeout Feature */
943 	writel(value, ioaddr + XGMAC_MAC_FSM_CONTROL);
944 
945 	/* 5. Enable Data Path Parity Protection */
946 	value = readl(ioaddr + XGMAC_MTL_DPP_CONTROL);
947 	/* already enabled by default, explicit enable it again */
948 	value &= ~XGMAC_DPP_DISABLE;
949 	writel(value, ioaddr + XGMAC_MTL_DPP_CONTROL);
950 
951 	return 0;
952 }
953 
dwxgmac3_safety_feat_irq_status(struct net_device * ndev,void __iomem * ioaddr,unsigned int asp,struct stmmac_safety_stats * stats)954 static int dwxgmac3_safety_feat_irq_status(struct net_device *ndev,
955 					   void __iomem *ioaddr,
956 					   unsigned int asp,
957 					   struct stmmac_safety_stats *stats)
958 {
959 	bool err, corr;
960 	u32 mtl, dma;
961 	int ret = 0;
962 
963 	if (!asp)
964 		return -EINVAL;
965 
966 	mtl = readl(ioaddr + XGMAC_MTL_SAFETY_INT_STATUS);
967 	dma = readl(ioaddr + XGMAC_DMA_SAFETY_INT_STATUS);
968 
969 	err = (mtl & XGMAC_MCSIS) || (dma & XGMAC_MCSIS);
970 	corr = false;
971 	if (err) {
972 		dwxgmac3_handle_mac_err(ndev, ioaddr, corr, stats);
973 		ret |= !corr;
974 	}
975 
976 	err = (mtl & (XGMAC_MEUIS | XGMAC_MECIS)) ||
977 	      (dma & (XGMAC_MSUIS | XGMAC_MSCIS));
978 	corr = (mtl & XGMAC_MECIS) || (dma & XGMAC_MSCIS);
979 	if (err) {
980 		dwxgmac3_handle_mtl_err(ndev, ioaddr, corr, stats);
981 		ret |= !corr;
982 	}
983 
984 	/* DMA_DPP_Interrupt_Status is indicated by MCSIS bit in
985 	 * DMA_Safety_Interrupt_Status, so we handle DMA Data Path
986 	 * Parity Errors here
987 	 */
988 	err = dma & (XGMAC_DEUIS | XGMAC_DECIS | XGMAC_MCSIS);
989 	corr = dma & XGMAC_DECIS;
990 	if (err) {
991 		dwxgmac3_handle_dma_err(ndev, ioaddr, corr, stats);
992 		ret |= !corr;
993 	}
994 
995 	return ret;
996 }
997 
998 static const struct dwxgmac3_error {
999 	const struct dwxgmac3_error_desc *desc;
1000 } dwxgmac3_all_errors[] = {
1001 	{ dwxgmac3_mac_errors },
1002 	{ dwxgmac3_mtl_errors },
1003 	{ dwxgmac3_dma_errors },
1004 	{ dwxgmac3_dma_dpp_errors },
1005 };
1006 
dwxgmac3_safety_feat_dump(struct stmmac_safety_stats * stats,int index,unsigned long * count,const char ** desc)1007 static int dwxgmac3_safety_feat_dump(struct stmmac_safety_stats *stats,
1008 				     int index, unsigned long *count,
1009 				     const char **desc)
1010 {
1011 	int module = index / 32, offset = index % 32;
1012 	unsigned long *ptr = (unsigned long *)stats;
1013 
1014 	if (module >= ARRAY_SIZE(dwxgmac3_all_errors))
1015 		return -EINVAL;
1016 	if (!dwxgmac3_all_errors[module].desc[offset].valid)
1017 		return -EINVAL;
1018 	if (count)
1019 		*count = *(ptr + index);
1020 	if (desc)
1021 		*desc = dwxgmac3_all_errors[module].desc[offset].desc;
1022 	return 0;
1023 }
1024 
dwxgmac3_rxp_disable(void __iomem * ioaddr)1025 static int dwxgmac3_rxp_disable(void __iomem *ioaddr)
1026 {
1027 	u32 val = readl(ioaddr + XGMAC_MTL_OPMODE);
1028 
1029 	val &= ~XGMAC_FRPE;
1030 	writel(val, ioaddr + XGMAC_MTL_OPMODE);
1031 
1032 	return 0;
1033 }
1034 
dwxgmac3_rxp_enable(void __iomem * ioaddr)1035 static void dwxgmac3_rxp_enable(void __iomem *ioaddr)
1036 {
1037 	u32 val;
1038 
1039 	val = readl(ioaddr + XGMAC_MTL_OPMODE);
1040 	val |= XGMAC_FRPE;
1041 	writel(val, ioaddr + XGMAC_MTL_OPMODE);
1042 }
1043 
dwxgmac3_rxp_update_single_entry(void __iomem * ioaddr,struct stmmac_tc_entry * entry,int pos)1044 static int dwxgmac3_rxp_update_single_entry(void __iomem *ioaddr,
1045 					    struct stmmac_tc_entry *entry,
1046 					    int pos)
1047 {
1048 	int ret, i;
1049 
1050 	for (i = 0; i < (sizeof(entry->val) / sizeof(u32)); i++) {
1051 		int real_pos = pos * (sizeof(entry->val) / sizeof(u32)) + i;
1052 		u32 val;
1053 
1054 		/* Wait for ready */
1055 		ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
1056 					 val, !(val & XGMAC_STARTBUSY), 1, 10000);
1057 		if (ret)
1058 			return ret;
1059 
1060 		/* Write data */
1061 		val = *((u32 *)&entry->val + i);
1062 		writel(val, ioaddr + XGMAC_MTL_RXP_IACC_DATA);
1063 
1064 		/* Write pos */
1065 		val = real_pos & XGMAC_ADDR;
1066 		writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
1067 
1068 		/* Write OP */
1069 		val |= XGMAC_WRRDN;
1070 		writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
1071 
1072 		/* Start Write */
1073 		val |= XGMAC_STARTBUSY;
1074 		writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
1075 
1076 		/* Wait for done */
1077 		ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
1078 					 val, !(val & XGMAC_STARTBUSY), 1, 10000);
1079 		if (ret)
1080 			return ret;
1081 	}
1082 
1083 	return 0;
1084 }
1085 
1086 static struct stmmac_tc_entry *
dwxgmac3_rxp_get_next_entry(struct stmmac_tc_entry * entries,unsigned int count,u32 curr_prio)1087 dwxgmac3_rxp_get_next_entry(struct stmmac_tc_entry *entries,
1088 			    unsigned int count, u32 curr_prio)
1089 {
1090 	struct stmmac_tc_entry *entry;
1091 	u32 min_prio = ~0x0;
1092 	int i, min_prio_idx;
1093 	bool found = false;
1094 
1095 	for (i = count - 1; i >= 0; i--) {
1096 		entry = &entries[i];
1097 
1098 		/* Do not update unused entries */
1099 		if (!entry->in_use)
1100 			continue;
1101 		/* Do not update already updated entries (i.e. fragments) */
1102 		if (entry->in_hw)
1103 			continue;
1104 		/* Let last entry be updated last */
1105 		if (entry->is_last)
1106 			continue;
1107 		/* Do not return fragments */
1108 		if (entry->is_frag)
1109 			continue;
1110 		/* Check if we already checked this prio */
1111 		if (entry->prio < curr_prio)
1112 			continue;
1113 		/* Check if this is the minimum prio */
1114 		if (entry->prio < min_prio) {
1115 			min_prio = entry->prio;
1116 			min_prio_idx = i;
1117 			found = true;
1118 		}
1119 	}
1120 
1121 	if (found)
1122 		return &entries[min_prio_idx];
1123 	return NULL;
1124 }
1125 
dwxgmac3_rxp_config(void __iomem * ioaddr,struct stmmac_tc_entry * entries,unsigned int count)1126 static int dwxgmac3_rxp_config(void __iomem *ioaddr,
1127 			       struct stmmac_tc_entry *entries,
1128 			       unsigned int count)
1129 {
1130 	struct stmmac_tc_entry *entry, *frag;
1131 	int i, ret, nve = 0;
1132 	u32 curr_prio = 0;
1133 	u32 old_val, val;
1134 
1135 	/* Force disable RX */
1136 	old_val = readl(ioaddr + XGMAC_RX_CONFIG);
1137 	val = old_val & ~XGMAC_CONFIG_RE;
1138 	writel(val, ioaddr + XGMAC_RX_CONFIG);
1139 
1140 	/* Disable RX Parser */
1141 	ret = dwxgmac3_rxp_disable(ioaddr);
1142 	if (ret)
1143 		goto re_enable;
1144 
1145 	/* Set all entries as NOT in HW */
1146 	for (i = 0; i < count; i++) {
1147 		entry = &entries[i];
1148 		entry->in_hw = false;
1149 	}
1150 
1151 	/* Update entries by reverse order */
1152 	while (1) {
1153 		entry = dwxgmac3_rxp_get_next_entry(entries, count, curr_prio);
1154 		if (!entry)
1155 			break;
1156 
1157 		curr_prio = entry->prio;
1158 		frag = entry->frag_ptr;
1159 
1160 		/* Set special fragment requirements */
1161 		if (frag) {
1162 			entry->val.af = 0;
1163 			entry->val.rf = 0;
1164 			entry->val.nc = 1;
1165 			entry->val.ok_index = nve + 2;
1166 		}
1167 
1168 		ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
1169 		if (ret)
1170 			goto re_enable;
1171 
1172 		entry->table_pos = nve++;
1173 		entry->in_hw = true;
1174 
1175 		if (frag && !frag->in_hw) {
1176 			ret = dwxgmac3_rxp_update_single_entry(ioaddr, frag, nve);
1177 			if (ret)
1178 				goto re_enable;
1179 			frag->table_pos = nve++;
1180 			frag->in_hw = true;
1181 		}
1182 	}
1183 
1184 	if (!nve)
1185 		goto re_enable;
1186 
1187 	/* Update all pass entry */
1188 	for (i = 0; i < count; i++) {
1189 		entry = &entries[i];
1190 		if (!entry->is_last)
1191 			continue;
1192 
1193 		ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
1194 		if (ret)
1195 			goto re_enable;
1196 
1197 		entry->table_pos = nve++;
1198 	}
1199 
1200 	/* Assume n. of parsable entries == n. of valid entries */
1201 	val = (nve << 16) & XGMAC_NPE;
1202 	val |= nve & XGMAC_NVE;
1203 	writel(val, ioaddr + XGMAC_MTL_RXP_CONTROL_STATUS);
1204 
1205 	/* Enable RX Parser */
1206 	dwxgmac3_rxp_enable(ioaddr);
1207 
1208 re_enable:
1209 	/* Re-enable RX */
1210 	writel(old_val, ioaddr + XGMAC_RX_CONFIG);
1211 	return ret;
1212 }
1213 
dwxgmac2_get_mac_tx_timestamp(struct mac_device_info * hw,u64 * ts)1214 static int dwxgmac2_get_mac_tx_timestamp(struct mac_device_info *hw, u64 *ts)
1215 {
1216 	void __iomem *ioaddr = hw->pcsr;
1217 	u32 value;
1218 
1219 	if (readl_poll_timeout_atomic(ioaddr + XGMAC_TIMESTAMP_STATUS,
1220 				      value, value & XGMAC_TXTSC, 100, 10000))
1221 		return -EBUSY;
1222 
1223 	*ts = readl(ioaddr + XGMAC_TXTIMESTAMP_NSEC) & XGMAC_TXTSSTSLO;
1224 	*ts += readl(ioaddr + XGMAC_TXTIMESTAMP_SEC) * 1000000000ULL;
1225 	return 0;
1226 }
1227 
dwxgmac2_flex_pps_config(void __iomem * ioaddr,int index,struct stmmac_pps_cfg * cfg,bool enable,u32 sub_second_inc,u32 systime_flags)1228 static int dwxgmac2_flex_pps_config(void __iomem *ioaddr, int index,
1229 				    struct stmmac_pps_cfg *cfg, bool enable,
1230 				    u32 sub_second_inc, u32 systime_flags)
1231 {
1232 	u32 tnsec = readl(ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index));
1233 	u32 val = readl(ioaddr + XGMAC_PPS_CONTROL);
1234 	u64 period;
1235 
1236 	if (!cfg->available)
1237 		return -EINVAL;
1238 	if (tnsec & XGMAC_TRGTBUSY0)
1239 		return -EBUSY;
1240 	if (!sub_second_inc || !systime_flags)
1241 		return -EINVAL;
1242 
1243 	val &= ~XGMAC_PPSx_MASK(index);
1244 
1245 	if (!enable) {
1246 		val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_STOP);
1247 		writel(val, ioaddr + XGMAC_PPS_CONTROL);
1248 		return 0;
1249 	}
1250 
1251 	val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_START);
1252 	val |= XGMAC_TRGTMODSELx(index, XGMAC_PPSCMD_START);
1253 
1254 	/* XGMAC Core has 4 PPS outputs at most.
1255 	 *
1256 	 * Prior XGMAC Core 3.20, Fixed mode or Flexible mode are selectable for
1257 	 * PPS0 only via PPSEN0. PPS{1,2,3} are in Flexible mode by default,
1258 	 * and can not be switched to Fixed mode, since PPSEN{1,2,3} are
1259 	 * read-only reserved to 0.
1260 	 * But we always set PPSEN{1,2,3} do not make things worse ;-)
1261 	 *
1262 	 * From XGMAC Core 3.20 and later, PPSEN{0,1,2,3} are writable and must
1263 	 * be set, or the PPS outputs stay in Fixed PPS mode by default.
1264 	 */
1265 	val |= XGMAC_PPSENx(index);
1266 
1267 	writel(cfg->start.tv_sec, ioaddr + XGMAC_PPSx_TARGET_TIME_SEC(index));
1268 
1269 	if (!(systime_flags & PTP_TCR_TSCTRLSSR))
1270 		cfg->start.tv_nsec = (cfg->start.tv_nsec * 1000) / 465;
1271 	writel(cfg->start.tv_nsec, ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index));
1272 
1273 	period = cfg->period.tv_sec * 1000000000;
1274 	period += cfg->period.tv_nsec;
1275 
1276 	do_div(period, sub_second_inc);
1277 
1278 	if (period <= 1)
1279 		return -EINVAL;
1280 
1281 	writel(period - 1, ioaddr + XGMAC_PPSx_INTERVAL(index));
1282 
1283 	period >>= 1;
1284 	if (period <= 1)
1285 		return -EINVAL;
1286 
1287 	writel(period - 1, ioaddr + XGMAC_PPSx_WIDTH(index));
1288 
1289 	/* Finally, activate it */
1290 	writel(val, ioaddr + XGMAC_PPS_CONTROL);
1291 	return 0;
1292 }
1293 
dwxgmac2_sarc_configure(void __iomem * ioaddr,int val)1294 static void dwxgmac2_sarc_configure(void __iomem *ioaddr, int val)
1295 {
1296 	u32 value = readl(ioaddr + XGMAC_TX_CONFIG);
1297 
1298 	value &= ~XGMAC_CONFIG_SARC;
1299 	value |= val << XGMAC_CONFIG_SARC_SHIFT;
1300 
1301 	writel(value, ioaddr + XGMAC_TX_CONFIG);
1302 }
1303 
dwxgmac2_enable_vlan(struct mac_device_info * hw,u32 type)1304 static void dwxgmac2_enable_vlan(struct mac_device_info *hw, u32 type)
1305 {
1306 	void __iomem *ioaddr = hw->pcsr;
1307 	u32 value;
1308 
1309 	value = readl(ioaddr + XGMAC_VLAN_INCL);
1310 	value |= XGMAC_VLAN_VLTI;
1311 	value |= XGMAC_VLAN_CSVL; /* Only use SVLAN */
1312 	value &= ~XGMAC_VLAN_VLC;
1313 	value |= (type << XGMAC_VLAN_VLC_SHIFT) & XGMAC_VLAN_VLC;
1314 	writel(value, ioaddr + XGMAC_VLAN_INCL);
1315 }
1316 
dwxgmac2_filter_wait(struct mac_device_info * hw)1317 static int dwxgmac2_filter_wait(struct mac_device_info *hw)
1318 {
1319 	void __iomem *ioaddr = hw->pcsr;
1320 	u32 value;
1321 
1322 	if (readl_poll_timeout(ioaddr + XGMAC_L3L4_ADDR_CTRL, value,
1323 			       !(value & XGMAC_XB), 100, 10000))
1324 		return -EBUSY;
1325 	return 0;
1326 }
1327 
dwxgmac2_filter_read(struct mac_device_info * hw,u32 filter_no,u8 reg,u32 * data)1328 static int dwxgmac2_filter_read(struct mac_device_info *hw, u32 filter_no,
1329 				u8 reg, u32 *data)
1330 {
1331 	void __iomem *ioaddr = hw->pcsr;
1332 	u32 value;
1333 	int ret;
1334 
1335 	ret = dwxgmac2_filter_wait(hw);
1336 	if (ret)
1337 		return ret;
1338 
1339 	value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT;
1340 	value |= XGMAC_TT | XGMAC_XB;
1341 	writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL);
1342 
1343 	ret = dwxgmac2_filter_wait(hw);
1344 	if (ret)
1345 		return ret;
1346 
1347 	*data = readl(ioaddr + XGMAC_L3L4_DATA);
1348 	return 0;
1349 }
1350 
dwxgmac2_filter_write(struct mac_device_info * hw,u32 filter_no,u8 reg,u32 data)1351 static int dwxgmac2_filter_write(struct mac_device_info *hw, u32 filter_no,
1352 				 u8 reg, u32 data)
1353 {
1354 	void __iomem *ioaddr = hw->pcsr;
1355 	u32 value;
1356 	int ret;
1357 
1358 	ret = dwxgmac2_filter_wait(hw);
1359 	if (ret)
1360 		return ret;
1361 
1362 	writel(data, ioaddr + XGMAC_L3L4_DATA);
1363 
1364 	value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT;
1365 	value |= XGMAC_XB;
1366 	writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL);
1367 
1368 	return dwxgmac2_filter_wait(hw);
1369 }
1370 
dwxgmac2_config_l3_filter(struct mac_device_info * hw,u32 filter_no,bool en,bool ipv6,bool sa,bool inv,u32 match)1371 static int dwxgmac2_config_l3_filter(struct mac_device_info *hw, u32 filter_no,
1372 				     bool en, bool ipv6, bool sa, bool inv,
1373 				     u32 match)
1374 {
1375 	void __iomem *ioaddr = hw->pcsr;
1376 	u32 value;
1377 	int ret;
1378 
1379 	value = readl(ioaddr + XGMAC_PACKET_FILTER);
1380 	value |= XGMAC_FILTER_IPFE;
1381 	writel(value, ioaddr + XGMAC_PACKET_FILTER);
1382 
1383 	ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L3L4_CTRL, &value);
1384 	if (ret)
1385 		return ret;
1386 
1387 	/* For IPv6 not both SA/DA filters can be active */
1388 	if (ipv6) {
1389 		value |= XGMAC_L3PEN0;
1390 		value &= ~(XGMAC_L3SAM0 | XGMAC_L3SAIM0);
1391 		value &= ~(XGMAC_L3DAM0 | XGMAC_L3DAIM0);
1392 		if (sa) {
1393 			value |= XGMAC_L3SAM0;
1394 			if (inv)
1395 				value |= XGMAC_L3SAIM0;
1396 		} else {
1397 			value |= XGMAC_L3DAM0;
1398 			if (inv)
1399 				value |= XGMAC_L3DAIM0;
1400 		}
1401 	} else {
1402 		value &= ~XGMAC_L3PEN0;
1403 		if (sa) {
1404 			value |= XGMAC_L3SAM0;
1405 			if (inv)
1406 				value |= XGMAC_L3SAIM0;
1407 		} else {
1408 			value |= XGMAC_L3DAM0;
1409 			if (inv)
1410 				value |= XGMAC_L3DAIM0;
1411 		}
1412 	}
1413 
1414 	ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value);
1415 	if (ret)
1416 		return ret;
1417 
1418 	if (sa) {
1419 		ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3_ADDR0, match);
1420 		if (ret)
1421 			return ret;
1422 	} else {
1423 		ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3_ADDR1, match);
1424 		if (ret)
1425 			return ret;
1426 	}
1427 
1428 	if (!en)
1429 		return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0);
1430 
1431 	return 0;
1432 }
1433 
dwxgmac2_config_l4_filter(struct mac_device_info * hw,u32 filter_no,bool en,bool udp,bool sa,bool inv,u32 match)1434 static int dwxgmac2_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
1435 				     bool en, bool udp, bool sa, bool inv,
1436 				     u32 match)
1437 {
1438 	void __iomem *ioaddr = hw->pcsr;
1439 	u32 value;
1440 	int ret;
1441 
1442 	value = readl(ioaddr + XGMAC_PACKET_FILTER);
1443 	value |= XGMAC_FILTER_IPFE;
1444 	writel(value, ioaddr + XGMAC_PACKET_FILTER);
1445 
1446 	ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L3L4_CTRL, &value);
1447 	if (ret)
1448 		return ret;
1449 
1450 	if (udp) {
1451 		value |= XGMAC_L4PEN0;
1452 	} else {
1453 		value &= ~XGMAC_L4PEN0;
1454 	}
1455 
1456 	value &= ~(XGMAC_L4SPM0 | XGMAC_L4SPIM0);
1457 	value &= ~(XGMAC_L4DPM0 | XGMAC_L4DPIM0);
1458 	if (sa) {
1459 		value |= XGMAC_L4SPM0;
1460 		if (inv)
1461 			value |= XGMAC_L4SPIM0;
1462 	} else {
1463 		value |= XGMAC_L4DPM0;
1464 		if (inv)
1465 			value |= XGMAC_L4DPIM0;
1466 	}
1467 
1468 	ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value);
1469 	if (ret)
1470 		return ret;
1471 
1472 	if (sa) {
1473 		value = match & XGMAC_L4SP0;
1474 
1475 		ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value);
1476 		if (ret)
1477 			return ret;
1478 	} else {
1479 		value = (match << XGMAC_L4DP0_SHIFT) & XGMAC_L4DP0;
1480 
1481 		ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value);
1482 		if (ret)
1483 			return ret;
1484 	}
1485 
1486 	if (!en)
1487 		return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0);
1488 
1489 	return 0;
1490 }
1491 
dwxgmac2_set_arp_offload(struct mac_device_info * hw,bool en,u32 addr)1492 static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en,
1493 				     u32 addr)
1494 {
1495 	void __iomem *ioaddr = hw->pcsr;
1496 	u32 value;
1497 
1498 	writel(addr, ioaddr + XGMAC_ARP_ADDR);
1499 
1500 	value = readl(ioaddr + XGMAC_RX_CONFIG);
1501 	if (en)
1502 		value |= XGMAC_CONFIG_ARPEN;
1503 	else
1504 		value &= ~XGMAC_CONFIG_ARPEN;
1505 	writel(value, ioaddr + XGMAC_RX_CONFIG);
1506 }
1507 
1508 const struct stmmac_ops dwxgmac210_ops = {
1509 	.core_init = dwxgmac2_core_init,
1510 	.set_mac = dwxgmac2_set_mac,
1511 	.rx_ipc = dwxgmac2_rx_ipc,
1512 	.rx_queue_enable = dwxgmac2_rx_queue_enable,
1513 	.rx_queue_prio = dwxgmac2_rx_queue_prio,
1514 	.tx_queue_prio = dwxgmac2_tx_queue_prio,
1515 	.rx_queue_routing = dwxgmac2_rx_queue_routing,
1516 	.prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
1517 	.prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
1518 	.set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight,
1519 	.map_mtl_to_dma = dwxgmac2_map_mtl_to_dma,
1520 	.config_cbs = dwxgmac2_config_cbs,
1521 	.dump_regs = dwxgmac2_dump_regs,
1522 	.host_irq_status = dwxgmac2_host_irq_status,
1523 	.host_mtl_irq_status = dwxgmac2_host_mtl_irq_status,
1524 	.flow_ctrl = dwxgmac2_flow_ctrl,
1525 	.pmt = dwxgmac2_pmt,
1526 	.set_umac_addr = dwxgmac2_set_umac_addr,
1527 	.get_umac_addr = dwxgmac2_get_umac_addr,
1528 	.set_eee_mode = dwxgmac2_set_eee_mode,
1529 	.reset_eee_mode = dwxgmac2_reset_eee_mode,
1530 	.set_eee_timer = dwxgmac2_set_eee_timer,
1531 	.set_eee_pls = dwxgmac2_set_eee_pls,
1532 	.debug = NULL,
1533 	.set_filter = dwxgmac2_set_filter,
1534 	.safety_feat_config = dwxgmac3_safety_feat_config,
1535 	.safety_feat_irq_status = dwxgmac3_safety_feat_irq_status,
1536 	.safety_feat_dump = dwxgmac3_safety_feat_dump,
1537 	.set_mac_loopback = dwxgmac2_set_mac_loopback,
1538 	.rss_configure = dwxgmac2_rss_configure,
1539 	.update_vlan_hash = dwxgmac2_update_vlan_hash,
1540 	.rxp_config = dwxgmac3_rxp_config,
1541 	.get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
1542 	.flex_pps_config = dwxgmac2_flex_pps_config,
1543 	.sarc_configure = dwxgmac2_sarc_configure,
1544 	.enable_vlan = dwxgmac2_enable_vlan,
1545 	.config_l3_filter = dwxgmac2_config_l3_filter,
1546 	.config_l4_filter = dwxgmac2_config_l4_filter,
1547 	.set_arp_offload = dwxgmac2_set_arp_offload,
1548 	.fpe_map_preemption_class = dwxgmac3_fpe_map_preemption_class,
1549 };
1550 
dwxlgmac2_rx_queue_enable(struct mac_device_info * hw,u8 mode,u32 queue)1551 static void dwxlgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
1552 				      u32 queue)
1553 {
1554 	void __iomem *ioaddr = hw->pcsr;
1555 	u32 value;
1556 
1557 	value = readl(ioaddr + XLGMAC_RXQ_ENABLE_CTRL0) & ~XGMAC_RXQEN(queue);
1558 	if (mode == MTL_QUEUE_AVB)
1559 		value |= 0x1 << XGMAC_RXQEN_SHIFT(queue);
1560 	else if (mode == MTL_QUEUE_DCB)
1561 		value |= 0x2 << XGMAC_RXQEN_SHIFT(queue);
1562 	writel(value, ioaddr + XLGMAC_RXQ_ENABLE_CTRL0);
1563 }
1564 
1565 const struct stmmac_ops dwxlgmac2_ops = {
1566 	.core_init = dwxgmac2_core_init,
1567 	.set_mac = dwxgmac2_set_mac,
1568 	.rx_ipc = dwxgmac2_rx_ipc,
1569 	.rx_queue_enable = dwxlgmac2_rx_queue_enable,
1570 	.rx_queue_prio = dwxgmac2_rx_queue_prio,
1571 	.tx_queue_prio = dwxgmac2_tx_queue_prio,
1572 	.rx_queue_routing = dwxgmac2_rx_queue_routing,
1573 	.prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
1574 	.prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
1575 	.set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight,
1576 	.map_mtl_to_dma = dwxgmac2_map_mtl_to_dma,
1577 	.config_cbs = dwxgmac2_config_cbs,
1578 	.dump_regs = dwxgmac2_dump_regs,
1579 	.host_irq_status = dwxgmac2_host_irq_status,
1580 	.host_mtl_irq_status = dwxgmac2_host_mtl_irq_status,
1581 	.flow_ctrl = dwxgmac2_flow_ctrl,
1582 	.pmt = dwxgmac2_pmt,
1583 	.set_umac_addr = dwxgmac2_set_umac_addr,
1584 	.get_umac_addr = dwxgmac2_get_umac_addr,
1585 	.set_eee_mode = dwxgmac2_set_eee_mode,
1586 	.reset_eee_mode = dwxgmac2_reset_eee_mode,
1587 	.set_eee_timer = dwxgmac2_set_eee_timer,
1588 	.set_eee_pls = dwxgmac2_set_eee_pls,
1589 	.debug = NULL,
1590 	.set_filter = dwxgmac2_set_filter,
1591 	.safety_feat_config = dwxgmac3_safety_feat_config,
1592 	.safety_feat_irq_status = dwxgmac3_safety_feat_irq_status,
1593 	.safety_feat_dump = dwxgmac3_safety_feat_dump,
1594 	.set_mac_loopback = dwxgmac2_set_mac_loopback,
1595 	.rss_configure = dwxgmac2_rss_configure,
1596 	.update_vlan_hash = dwxgmac2_update_vlan_hash,
1597 	.rxp_config = dwxgmac3_rxp_config,
1598 	.get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
1599 	.flex_pps_config = dwxgmac2_flex_pps_config,
1600 	.sarc_configure = dwxgmac2_sarc_configure,
1601 	.enable_vlan = dwxgmac2_enable_vlan,
1602 	.config_l3_filter = dwxgmac2_config_l3_filter,
1603 	.config_l4_filter = dwxgmac2_config_l4_filter,
1604 	.set_arp_offload = dwxgmac2_set_arp_offload,
1605 	.fpe_map_preemption_class = dwxgmac3_fpe_map_preemption_class,
1606 };
1607 
dwxgmac2_setup(struct stmmac_priv * priv)1608 int dwxgmac2_setup(struct stmmac_priv *priv)
1609 {
1610 	struct mac_device_info *mac = priv->hw;
1611 
1612 	dev_info(priv->device, "\tXGMAC2\n");
1613 
1614 	priv->dev->priv_flags |= IFF_UNICAST_FLT;
1615 	mac->pcsr = priv->ioaddr;
1616 	mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
1617 	mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
1618 	mac->mcast_bits_log2 = 0;
1619 
1620 	if (mac->multicast_filter_bins)
1621 		mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1622 
1623 	mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1624 			 MAC_1000FD | MAC_2500FD | MAC_5000FD |
1625 			 MAC_10000FD;
1626 	mac->link.duplex = 0;
1627 	mac->link.speed10 = XGMAC_CONFIG_SS_10_MII;
1628 	mac->link.speed100 = XGMAC_CONFIG_SS_100_MII;
1629 	mac->link.speed1000 = XGMAC_CONFIG_SS_1000_GMII;
1630 	mac->link.speed2500 = XGMAC_CONFIG_SS_2500_GMII;
1631 	mac->link.xgmii.speed2500 = XGMAC_CONFIG_SS_2500;
1632 	mac->link.xgmii.speed5000 = XGMAC_CONFIG_SS_5000;
1633 	mac->link.xgmii.speed10000 = XGMAC_CONFIG_SS_10000;
1634 	mac->link.speed_mask = XGMAC_CONFIG_SS_MASK;
1635 
1636 	mac->mii.addr = XGMAC_MDIO_ADDR;
1637 	mac->mii.data = XGMAC_MDIO_DATA;
1638 	mac->mii.addr_shift = 16;
1639 	mac->mii.addr_mask = GENMASK(20, 16);
1640 	mac->mii.reg_shift = 0;
1641 	mac->mii.reg_mask = GENMASK(15, 0);
1642 	mac->mii.clk_csr_shift = 19;
1643 	mac->mii.clk_csr_mask = GENMASK(21, 19);
1644 
1645 	return 0;
1646 }
1647 
dwxlgmac2_setup(struct stmmac_priv * priv)1648 int dwxlgmac2_setup(struct stmmac_priv *priv)
1649 {
1650 	struct mac_device_info *mac = priv->hw;
1651 
1652 	dev_info(priv->device, "\tXLGMAC\n");
1653 
1654 	priv->dev->priv_flags |= IFF_UNICAST_FLT;
1655 	mac->pcsr = priv->ioaddr;
1656 	mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
1657 	mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
1658 	mac->mcast_bits_log2 = 0;
1659 
1660 	if (mac->multicast_filter_bins)
1661 		mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1662 
1663 	mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1664 			 MAC_1000FD | MAC_2500FD | MAC_5000FD |
1665 			 MAC_10000FD | MAC_25000FD |
1666 			 MAC_40000FD | MAC_50000FD |
1667 			 MAC_100000FD;
1668 	mac->link.duplex = 0;
1669 	mac->link.speed1000 = XLGMAC_CONFIG_SS_1000;
1670 	mac->link.speed2500 = XLGMAC_CONFIG_SS_2500;
1671 	mac->link.xgmii.speed10000 = XLGMAC_CONFIG_SS_10G;
1672 	mac->link.xlgmii.speed25000 = XLGMAC_CONFIG_SS_25G;
1673 	mac->link.xlgmii.speed40000 = XLGMAC_CONFIG_SS_40G;
1674 	mac->link.xlgmii.speed50000 = XLGMAC_CONFIG_SS_50G;
1675 	mac->link.xlgmii.speed100000 = XLGMAC_CONFIG_SS_100G;
1676 	mac->link.speed_mask = XLGMAC_CONFIG_SS;
1677 
1678 	mac->mii.addr = XGMAC_MDIO_ADDR;
1679 	mac->mii.data = XGMAC_MDIO_DATA;
1680 	mac->mii.addr_shift = 16;
1681 	mac->mii.addr_mask = GENMASK(20, 16);
1682 	mac->mii.reg_shift = 0;
1683 	mac->mii.reg_mask = GENMASK(15, 0);
1684 	mac->mii.clk_csr_shift = 19;
1685 	mac->mii.clk_csr_mask = GENMASK(21, 19);
1686 
1687 	return 0;
1688 }
1689