xref: /linux/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c (revision ba95c7452439756d4f6dceb5a188b7c31dbbe5b6)
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /*
3  * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
4  * stmmac XGMAC support.
5  */
6 
7 #include <linux/iopoll.h>
8 #include "stmmac.h"
9 #include "dwxgmac2.h"
10 
11 static int dwxgmac2_dma_reset(void __iomem *ioaddr)
12 {
13 	u32 value = readl(ioaddr + XGMAC_DMA_MODE);
14 
15 	/* DMA SW reset */
16 	writel(value | XGMAC_SWR, ioaddr + XGMAC_DMA_MODE);
17 
18 	return readl_poll_timeout(ioaddr + XGMAC_DMA_MODE, value,
19 				  !(value & XGMAC_SWR), 0, 100000);
20 }
21 
22 static void dwxgmac2_dma_init(void __iomem *ioaddr,
23 			      struct stmmac_dma_cfg *dma_cfg, int atds)
24 {
25 	u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE);
26 
27 	if (dma_cfg->aal)
28 		value |= XGMAC_AAL;
29 
30 	writel(value | XGMAC_EAME, ioaddr + XGMAC_DMA_SYSBUS_MODE);
31 }
32 
33 static void dwxgmac2_dma_init_chan(void __iomem *ioaddr,
34 				   struct stmmac_dma_cfg *dma_cfg, u32 chan)
35 {
36 	u32 value = readl(ioaddr + XGMAC_DMA_CH_CONTROL(chan));
37 
38 	if (dma_cfg->pblx8)
39 		value |= XGMAC_PBLx8;
40 
41 	writel(value, ioaddr + XGMAC_DMA_CH_CONTROL(chan));
42 	writel(XGMAC_DMA_INT_DEFAULT_EN, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
43 }
44 
45 static void dwxgmac2_dma_init_rx_chan(void __iomem *ioaddr,
46 				      struct stmmac_dma_cfg *dma_cfg,
47 				      u32 dma_rx_phy, u32 chan)
48 {
49 	u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
50 	u32 value;
51 
52 	value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
53 	value &= ~XGMAC_RxPBL;
54 	value |= (rxpbl << XGMAC_RxPBL_SHIFT) & XGMAC_RxPBL;
55 	writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
56 
57 	writel(dma_rx_phy, ioaddr + XGMAC_DMA_CH_RxDESC_LADDR(chan));
58 }
59 
60 static void dwxgmac2_dma_init_tx_chan(void __iomem *ioaddr,
61 				      struct stmmac_dma_cfg *dma_cfg,
62 				      u32 dma_tx_phy, u32 chan)
63 {
64 	u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
65 	u32 value;
66 
67 	value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
68 	value &= ~XGMAC_TxPBL;
69 	value |= (txpbl << XGMAC_TxPBL_SHIFT) & XGMAC_TxPBL;
70 	value |= XGMAC_OSP;
71 	writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
72 
73 	writel(dma_tx_phy, ioaddr + XGMAC_DMA_CH_TxDESC_LADDR(chan));
74 }
75 
76 static void dwxgmac2_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
77 {
78 	u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE);
79 	int i;
80 
81 	if (axi->axi_lpi_en)
82 		value |= XGMAC_EN_LPI;
83 	if (axi->axi_xit_frm)
84 		value |= XGMAC_LPI_XIT_PKT;
85 
86 	value &= ~XGMAC_WR_OSR_LMT;
87 	value |= (axi->axi_wr_osr_lmt << XGMAC_WR_OSR_LMT_SHIFT) &
88 		XGMAC_WR_OSR_LMT;
89 
90 	value &= ~XGMAC_RD_OSR_LMT;
91 	value |= (axi->axi_rd_osr_lmt << XGMAC_RD_OSR_LMT_SHIFT) &
92 		XGMAC_RD_OSR_LMT;
93 
94 	if (!axi->axi_fb)
95 		value |= XGMAC_UNDEF;
96 
97 	value &= ~XGMAC_BLEN;
98 	for (i = 0; i < AXI_BLEN; i++) {
99 		switch (axi->axi_blen[i]) {
100 		case 256:
101 			value |= XGMAC_BLEN256;
102 			break;
103 		case 128:
104 			value |= XGMAC_BLEN128;
105 			break;
106 		case 64:
107 			value |= XGMAC_BLEN64;
108 			break;
109 		case 32:
110 			value |= XGMAC_BLEN32;
111 			break;
112 		case 16:
113 			value |= XGMAC_BLEN16;
114 			break;
115 		case 8:
116 			value |= XGMAC_BLEN8;
117 			break;
118 		case 4:
119 			value |= XGMAC_BLEN4;
120 			break;
121 		}
122 	}
123 
124 	writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE);
125 	writel(XGMAC_TDPS, ioaddr + XGMAC_TX_EDMA_CTRL);
126 	writel(XGMAC_RDPS, ioaddr + XGMAC_RX_EDMA_CTRL);
127 }
128 
129 static void dwxgmac2_dma_rx_mode(void __iomem *ioaddr, int mode,
130 				 u32 channel, int fifosz, u8 qmode)
131 {
132 	u32 value = readl(ioaddr + XGMAC_MTL_RXQ_OPMODE(channel));
133 	unsigned int rqs = fifosz / 256 - 1;
134 
135 	if (mode == SF_DMA_MODE) {
136 		value |= XGMAC_RSF;
137 	} else {
138 		value &= ~XGMAC_RSF;
139 		value &= ~XGMAC_RTC;
140 
141 		if (mode <= 64)
142 			value |= 0x0 << XGMAC_RTC_SHIFT;
143 		else if (mode <= 96)
144 			value |= 0x2 << XGMAC_RTC_SHIFT;
145 		else
146 			value |= 0x3 << XGMAC_RTC_SHIFT;
147 	}
148 
149 	value &= ~XGMAC_RQS;
150 	value |= (rqs << XGMAC_RQS_SHIFT) & XGMAC_RQS;
151 
152 	if ((fifosz >= 4096) && (qmode != MTL_QUEUE_AVB)) {
153 		u32 flow = readl(ioaddr + XGMAC_MTL_RXQ_FLOW_CONTROL(channel));
154 		unsigned int rfd, rfa;
155 
156 		value |= XGMAC_EHFC;
157 
158 		/* Set Threshold for Activating Flow Control to min 2 frames,
159 		 * i.e. 1500 * 2 = 3000 bytes.
160 		 *
161 		 * Set Threshold for Deactivating Flow Control to min 1 frame,
162 		 * i.e. 1500 bytes.
163 		 */
164 		switch (fifosz) {
165 		case 4096:
166 			/* This violates the above formula because of FIFO size
167 			 * limit therefore overflow may occur in spite of this.
168 			 */
169 			rfd = 0x03; /* Full-2.5K */
170 			rfa = 0x01; /* Full-1.5K */
171 			break;
172 
173 		case 8192:
174 			rfd = 0x06; /* Full-4K */
175 			rfa = 0x0a; /* Full-6K */
176 			break;
177 
178 		case 16384:
179 			rfd = 0x06; /* Full-4K */
180 			rfa = 0x12; /* Full-10K */
181 			break;
182 
183 		default:
184 			rfd = 0x06; /* Full-4K */
185 			rfa = 0x1e; /* Full-16K */
186 			break;
187 		}
188 
189 		flow &= ~XGMAC_RFD;
190 		flow |= rfd << XGMAC_RFD_SHIFT;
191 
192 		flow &= ~XGMAC_RFA;
193 		flow |= rfa << XGMAC_RFA_SHIFT;
194 
195 		writel(flow, ioaddr + XGMAC_MTL_RXQ_FLOW_CONTROL(channel));
196 	}
197 
198 	writel(value, ioaddr + XGMAC_MTL_RXQ_OPMODE(channel));
199 
200 	/* Enable MTL RX overflow */
201 	value = readl(ioaddr + XGMAC_MTL_QINTEN(channel));
202 	writel(value | XGMAC_RXOIE, ioaddr + XGMAC_MTL_QINTEN(channel));
203 }
204 
205 static void dwxgmac2_dma_tx_mode(void __iomem *ioaddr, int mode,
206 				 u32 channel, int fifosz, u8 qmode)
207 {
208 	u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
209 	unsigned int tqs = fifosz / 256 - 1;
210 
211 	if (mode == SF_DMA_MODE) {
212 		value |= XGMAC_TSF;
213 	} else {
214 		value &= ~XGMAC_TSF;
215 		value &= ~XGMAC_TTC;
216 
217 		if (mode <= 64)
218 			value |= 0x0 << XGMAC_TTC_SHIFT;
219 		else if (mode <= 96)
220 			value |= 0x2 << XGMAC_TTC_SHIFT;
221 		else if (mode <= 128)
222 			value |= 0x3 << XGMAC_TTC_SHIFT;
223 		else if (mode <= 192)
224 			value |= 0x4 << XGMAC_TTC_SHIFT;
225 		else if (mode <= 256)
226 			value |= 0x5 << XGMAC_TTC_SHIFT;
227 		else if (mode <= 384)
228 			value |= 0x6 << XGMAC_TTC_SHIFT;
229 		else
230 			value |= 0x7 << XGMAC_TTC_SHIFT;
231 	}
232 
233 	/* Use static TC to Queue mapping */
234 	value |= (channel << XGMAC_Q2TCMAP_SHIFT) & XGMAC_Q2TCMAP;
235 
236 	value &= ~XGMAC_TXQEN;
237 	if (qmode != MTL_QUEUE_AVB)
238 		value |= 0x2 << XGMAC_TXQEN_SHIFT;
239 	else
240 		value |= 0x1 << XGMAC_TXQEN_SHIFT;
241 
242 	value &= ~XGMAC_TQS;
243 	value |= (tqs << XGMAC_TQS_SHIFT) & XGMAC_TQS;
244 
245 	writel(value, ioaddr +  XGMAC_MTL_TXQ_OPMODE(channel));
246 }
247 
248 static void dwxgmac2_enable_dma_irq(void __iomem *ioaddr, u32 chan)
249 {
250 	writel(XGMAC_DMA_INT_DEFAULT_EN, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
251 }
252 
253 static void dwxgmac2_disable_dma_irq(void __iomem *ioaddr, u32 chan)
254 {
255 	writel(0, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
256 }
257 
258 static void dwxgmac2_dma_start_tx(void __iomem *ioaddr, u32 chan)
259 {
260 	u32 value;
261 
262 	value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
263 	value |= XGMAC_TXST;
264 	writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
265 
266 	value = readl(ioaddr + XGMAC_TX_CONFIG);
267 	value |= XGMAC_CONFIG_TE;
268 	writel(value, ioaddr + XGMAC_TX_CONFIG);
269 }
270 
271 static void dwxgmac2_dma_stop_tx(void __iomem *ioaddr, u32 chan)
272 {
273 	u32 value;
274 
275 	value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
276 	value &= ~XGMAC_TXST;
277 	writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
278 
279 	value = readl(ioaddr + XGMAC_TX_CONFIG);
280 	value &= ~XGMAC_CONFIG_TE;
281 	writel(value, ioaddr + XGMAC_TX_CONFIG);
282 }
283 
284 static void dwxgmac2_dma_start_rx(void __iomem *ioaddr, u32 chan)
285 {
286 	u32 value;
287 
288 	value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
289 	value |= XGMAC_RXST;
290 	writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
291 
292 	value = readl(ioaddr + XGMAC_RX_CONFIG);
293 	value |= XGMAC_CONFIG_RE;
294 	writel(value, ioaddr + XGMAC_RX_CONFIG);
295 }
296 
297 static void dwxgmac2_dma_stop_rx(void __iomem *ioaddr, u32 chan)
298 {
299 	u32 value;
300 
301 	value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
302 	value &= ~XGMAC_RXST;
303 	writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
304 }
305 
306 static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
307 				  struct stmmac_extra_stats *x, u32 chan)
308 {
309 	u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan));
310 	u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
311 	int ret = 0;
312 
313 	/* ABNORMAL interrupts */
314 	if (unlikely(intr_status & XGMAC_AIS)) {
315 		if (unlikely(intr_status & XGMAC_TPS)) {
316 			x->tx_process_stopped_irq++;
317 			ret |= tx_hard_error;
318 		}
319 		if (unlikely(intr_status & XGMAC_FBE)) {
320 			x->fatal_bus_error_irq++;
321 			ret |= tx_hard_error;
322 		}
323 	}
324 
325 	/* TX/RX NORMAL interrupts */
326 	if (likely(intr_status & XGMAC_NIS)) {
327 		x->normal_irq_n++;
328 
329 		if (likely(intr_status & XGMAC_RI)) {
330 			x->rx_normal_irq_n++;
331 			ret |= handle_rx;
332 		}
333 		if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) {
334 			x->tx_normal_irq_n++;
335 			ret |= handle_tx;
336 		}
337 	}
338 
339 	/* Clear interrupts */
340 	writel(intr_en & intr_status, ioaddr + XGMAC_DMA_CH_STATUS(chan));
341 
342 	return ret;
343 }
344 
345 static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
346 				    struct dma_features *dma_cap)
347 {
348 	u32 hw_cap;
349 
350 	/*  MAC HW feature 0 */
351 	hw_cap = readl(ioaddr + XGMAC_HW_FEATURE0);
352 	dma_cap->rx_coe = (hw_cap & XGMAC_HWFEAT_RXCOESEL) >> 16;
353 	dma_cap->tx_coe = (hw_cap & XGMAC_HWFEAT_TXCOESEL) >> 14;
354 	dma_cap->atime_stamp = (hw_cap & XGMAC_HWFEAT_TSSEL) >> 12;
355 	dma_cap->av = (hw_cap & XGMAC_HWFEAT_AVSEL) >> 11;
356 	dma_cap->av &= (hw_cap & XGMAC_HWFEAT_RAVSEL) >> 10;
357 	dma_cap->pmt_magic_frame = (hw_cap & XGMAC_HWFEAT_MGKSEL) >> 7;
358 	dma_cap->pmt_remote_wake_up = (hw_cap & XGMAC_HWFEAT_RWKSEL) >> 6;
359 	dma_cap->mbps_1000 = (hw_cap & XGMAC_HWFEAT_GMIISEL) >> 1;
360 
361 	/* MAC HW feature 1 */
362 	hw_cap = readl(ioaddr + XGMAC_HW_FEATURE1);
363 	dma_cap->tsoen = (hw_cap & XGMAC_HWFEAT_TSOEN) >> 18;
364 
365 	dma_cap->addr64 = (hw_cap & XGMAC_HWFEAT_ADDR64) >> 14;
366 	switch (dma_cap->addr64) {
367 	case 0:
368 		dma_cap->addr64 = 32;
369 		break;
370 	case 1:
371 		dma_cap->addr64 = 40;
372 		break;
373 	case 2:
374 		dma_cap->addr64 = 48;
375 		break;
376 	default:
377 		dma_cap->addr64 = 32;
378 		break;
379 	}
380 
381 	dma_cap->tx_fifo_size =
382 		128 << ((hw_cap & XGMAC_HWFEAT_TXFIFOSIZE) >> 6);
383 	dma_cap->rx_fifo_size =
384 		128 << ((hw_cap & XGMAC_HWFEAT_RXFIFOSIZE) >> 0);
385 
386 	/* MAC HW feature 2 */
387 	hw_cap = readl(ioaddr + XGMAC_HW_FEATURE2);
388 	dma_cap->pps_out_num = (hw_cap & XGMAC_HWFEAT_PPSOUTNUM) >> 24;
389 	dma_cap->number_tx_channel =
390 		((hw_cap & XGMAC_HWFEAT_TXCHCNT) >> 18) + 1;
391 	dma_cap->number_rx_channel =
392 		((hw_cap & XGMAC_HWFEAT_RXCHCNT) >> 12) + 1;
393 	dma_cap->number_tx_queues =
394 		((hw_cap & XGMAC_HWFEAT_TXQCNT) >> 6) + 1;
395 	dma_cap->number_rx_queues =
396 		((hw_cap & XGMAC_HWFEAT_RXQCNT) >> 0) + 1;
397 }
398 
399 static void dwxgmac2_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 nchan)
400 {
401 	u32 i;
402 
403 	for (i = 0; i < nchan; i++)
404 		writel(riwt & XGMAC_RWT, ioaddr + XGMAC_DMA_CH_Rx_WATCHDOG(i));
405 }
406 
407 static void dwxgmac2_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
408 {
409 	writel(len, ioaddr + XGMAC_DMA_CH_RxDESC_RING_LEN(chan));
410 }
411 
412 static void dwxgmac2_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
413 {
414 	writel(len, ioaddr + XGMAC_DMA_CH_TxDESC_RING_LEN(chan));
415 }
416 
417 static void dwxgmac2_set_rx_tail_ptr(void __iomem *ioaddr, u32 ptr, u32 chan)
418 {
419 	writel(ptr, ioaddr + XGMAC_DMA_CH_RxDESC_TAIL_LPTR(chan));
420 }
421 
422 static void dwxgmac2_set_tx_tail_ptr(void __iomem *ioaddr, u32 ptr, u32 chan)
423 {
424 	writel(ptr, ioaddr + XGMAC_DMA_CH_TxDESC_TAIL_LPTR(chan));
425 }
426 
427 static void dwxgmac2_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
428 {
429 	u32 value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
430 
431 	if (en)
432 		value |= XGMAC_TSE;
433 	else
434 		value &= ~XGMAC_TSE;
435 
436 	writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
437 }
438 
439 static void dwxgmac2_qmode(void __iomem *ioaddr, u32 channel, u8 qmode)
440 {
441 	u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
442 
443 	value &= ~XGMAC_TXQEN;
444 	if (qmode != MTL_QUEUE_AVB) {
445 		value |= 0x2 << XGMAC_TXQEN_SHIFT;
446 		writel(0, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(channel));
447 	} else {
448 		value |= 0x1 << XGMAC_TXQEN_SHIFT;
449 	}
450 
451 	writel(value, ioaddr +  XGMAC_MTL_TXQ_OPMODE(channel));
452 }
453 
454 static void dwxgmac2_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan)
455 {
456 	u32 value;
457 
458 	value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
459 	value |= bfsize << 1;
460 	writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
461 }
462 
463 const struct stmmac_dma_ops dwxgmac210_dma_ops = {
464 	.reset = dwxgmac2_dma_reset,
465 	.init = dwxgmac2_dma_init,
466 	.init_chan = dwxgmac2_dma_init_chan,
467 	.init_rx_chan = dwxgmac2_dma_init_rx_chan,
468 	.init_tx_chan = dwxgmac2_dma_init_tx_chan,
469 	.axi = dwxgmac2_dma_axi,
470 	.dump_regs = NULL,
471 	.dma_rx_mode = dwxgmac2_dma_rx_mode,
472 	.dma_tx_mode = dwxgmac2_dma_tx_mode,
473 	.enable_dma_irq = dwxgmac2_enable_dma_irq,
474 	.disable_dma_irq = dwxgmac2_disable_dma_irq,
475 	.start_tx = dwxgmac2_dma_start_tx,
476 	.stop_tx = dwxgmac2_dma_stop_tx,
477 	.start_rx = dwxgmac2_dma_start_rx,
478 	.stop_rx = dwxgmac2_dma_stop_rx,
479 	.dma_interrupt = dwxgmac2_dma_interrupt,
480 	.get_hw_feature = dwxgmac2_get_hw_feature,
481 	.rx_watchdog = dwxgmac2_rx_watchdog,
482 	.set_rx_ring_len = dwxgmac2_set_rx_ring_len,
483 	.set_tx_ring_len = dwxgmac2_set_tx_ring_len,
484 	.set_rx_tail_ptr = dwxgmac2_set_rx_tail_ptr,
485 	.set_tx_tail_ptr = dwxgmac2_set_tx_tail_ptr,
486 	.enable_tso = dwxgmac2_enable_tso,
487 	.qmode = dwxgmac2_qmode,
488 	.set_bfsize = dwxgmac2_set_bfsize,
489 };
490