xref: /linux/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c (revision 8f7aa3d3c7323f4ca2768a9e74ebbe359c4f8f88)
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /*
3  * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
4  * stmmac XGMAC support.
5  */
6 
7 #include <linux/iopoll.h>
8 #include "stmmac.h"
9 #include "dwxgmac2.h"
10 
11 static int dwxgmac2_dma_reset(void __iomem *ioaddr)
12 {
13 	u32 value = readl(ioaddr + XGMAC_DMA_MODE);
14 
15 	/* DMA SW reset */
16 	writel(value | XGMAC_SWR, ioaddr + XGMAC_DMA_MODE);
17 
18 	return readl_poll_timeout(ioaddr + XGMAC_DMA_MODE, value,
19 				  !(value & XGMAC_SWR), 0, 100000);
20 }
21 
22 static void dwxgmac2_dma_init(void __iomem *ioaddr,
23 			      struct stmmac_dma_cfg *dma_cfg)
24 {
25 	u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE);
26 
27 	if (dma_cfg->aal)
28 		value |= XGMAC_AAL;
29 
30 	if (dma_cfg->eame)
31 		value |= XGMAC_EAME;
32 
33 	writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE);
34 }
35 
36 static void dwxgmac2_dma_init_chan(struct stmmac_priv *priv,
37 				   void __iomem *ioaddr,
38 				   struct stmmac_dma_cfg *dma_cfg, u32 chan)
39 {
40 	u32 value = readl(ioaddr + XGMAC_DMA_CH_CONTROL(chan));
41 
42 	if (dma_cfg->pblx8)
43 		value |= XGMAC_PBLx8;
44 
45 	writel(value, ioaddr + XGMAC_DMA_CH_CONTROL(chan));
46 	writel(XGMAC_DMA_INT_DEFAULT_EN, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
47 }
48 
49 static void dwxgmac2_dma_init_rx_chan(struct stmmac_priv *priv,
50 				      void __iomem *ioaddr,
51 				      struct stmmac_dma_cfg *dma_cfg,
52 				      dma_addr_t phy, u32 chan)
53 {
54 	u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
55 	u32 value;
56 
57 	value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
58 	value &= ~XGMAC_RxPBL;
59 	value |= (rxpbl << XGMAC_RxPBL_SHIFT) & XGMAC_RxPBL;
60 	writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
61 
62 	writel(upper_32_bits(phy), ioaddr + XGMAC_DMA_CH_RxDESC_HADDR(chan));
63 	writel(lower_32_bits(phy), ioaddr + XGMAC_DMA_CH_RxDESC_LADDR(chan));
64 }
65 
66 static void dwxgmac2_dma_init_tx_chan(struct stmmac_priv *priv,
67 				      void __iomem *ioaddr,
68 				      struct stmmac_dma_cfg *dma_cfg,
69 				      dma_addr_t phy, u32 chan)
70 {
71 	u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
72 	u32 value;
73 
74 	value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
75 	value &= ~XGMAC_TxPBL;
76 	value |= (txpbl << XGMAC_TxPBL_SHIFT) & XGMAC_TxPBL;
77 	value |= XGMAC_OSP;
78 	writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
79 
80 	writel(upper_32_bits(phy), ioaddr + XGMAC_DMA_CH_TxDESC_HADDR(chan));
81 	writel(lower_32_bits(phy), ioaddr + XGMAC_DMA_CH_TxDESC_LADDR(chan));
82 }
83 
84 static void dwxgmac2_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
85 {
86 	u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE);
87 
88 	if (axi->axi_lpi_en)
89 		value |= XGMAC_EN_LPI;
90 	if (axi->axi_xit_frm)
91 		value |= XGMAC_LPI_XIT_PKT;
92 
93 	value &= ~XGMAC_WR_OSR_LMT;
94 	value |= (axi->axi_wr_osr_lmt << XGMAC_WR_OSR_LMT_SHIFT) &
95 		XGMAC_WR_OSR_LMT;
96 
97 	value &= ~XGMAC_RD_OSR_LMT;
98 	value |= (axi->axi_rd_osr_lmt << XGMAC_RD_OSR_LMT_SHIFT) &
99 		XGMAC_RD_OSR_LMT;
100 
101 	if (!axi->axi_fb)
102 		value |= XGMAC_UNDEF;
103 
104 	/* Depending on the UNDEF bit the Master AXI will perform any burst
105 	 * length according to the BLEN programmed (by default all BLEN are
106 	 * set). Note that the UNDEF bit is readonly, and is the inverse of
107 	 * Bus Mode bit 16.
108 	 */
109 	value = (value & ~DMA_AXI_BLEN_MASK) | axi->axi_blen_regval;
110 
111 	writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE);
112 	writel(XGMAC_TDPS, ioaddr + XGMAC_TX_EDMA_CTRL);
113 	writel(XGMAC_RDPS, ioaddr + XGMAC_RX_EDMA_CTRL);
114 }
115 
116 static void dwxgmac2_dma_dump_regs(struct stmmac_priv *priv,
117 				   void __iomem *ioaddr, u32 *reg_space)
118 {
119 	int i;
120 
121 	for (i = (XGMAC_DMA_MODE / 4); i < XGMAC_REGSIZE; i++)
122 		reg_space[i] = readl(ioaddr + i * 4);
123 }
124 
125 static void dwxgmac2_dma_rx_mode(struct stmmac_priv *priv, void __iomem *ioaddr,
126 				 int mode, u32 channel, int fifosz, u8 qmode)
127 {
128 	u32 value = readl(ioaddr + XGMAC_MTL_RXQ_OPMODE(channel));
129 	unsigned int rqs = fifosz / 256 - 1;
130 
131 	if (mode == SF_DMA_MODE) {
132 		value |= XGMAC_RSF;
133 	} else {
134 		value &= ~XGMAC_RSF;
135 		value &= ~XGMAC_RTC;
136 
137 		if (mode <= 64)
138 			value |= 0x0 << XGMAC_RTC_SHIFT;
139 		else if (mode <= 96)
140 			value |= 0x2 << XGMAC_RTC_SHIFT;
141 		else
142 			value |= 0x3 << XGMAC_RTC_SHIFT;
143 	}
144 
145 	value &= ~XGMAC_RQS;
146 	value |= (rqs << XGMAC_RQS_SHIFT) & XGMAC_RQS;
147 
148 	if ((fifosz >= 4096) && (qmode != MTL_QUEUE_AVB)) {
149 		u32 flow = readl(ioaddr + XGMAC_MTL_RXQ_FLOW_CONTROL(channel));
150 		unsigned int rfd, rfa;
151 
152 		value |= XGMAC_EHFC;
153 
154 		/* Set Threshold for Activating Flow Control to min 2 frames,
155 		 * i.e. 1500 * 2 = 3000 bytes.
156 		 *
157 		 * Set Threshold for Deactivating Flow Control to min 1 frame,
158 		 * i.e. 1500 bytes.
159 		 */
160 		switch (fifosz) {
161 		case 4096:
162 			/* This violates the above formula because of FIFO size
163 			 * limit therefore overflow may occur in spite of this.
164 			 */
165 			rfd = 0x03; /* Full-2.5K */
166 			rfa = 0x01; /* Full-1.5K */
167 			break;
168 
169 		default:
170 			rfd = 0x07; /* Full-4.5K */
171 			rfa = 0x04; /* Full-3K */
172 			break;
173 		}
174 
175 		flow &= ~XGMAC_RFD;
176 		flow |= rfd << XGMAC_RFD_SHIFT;
177 
178 		flow &= ~XGMAC_RFA;
179 		flow |= rfa << XGMAC_RFA_SHIFT;
180 
181 		writel(flow, ioaddr + XGMAC_MTL_RXQ_FLOW_CONTROL(channel));
182 	}
183 
184 	writel(value, ioaddr + XGMAC_MTL_RXQ_OPMODE(channel));
185 }
186 
187 static void dwxgmac2_dma_tx_mode(struct stmmac_priv *priv, void __iomem *ioaddr,
188 				 int mode, u32 channel, int fifosz, u8 qmode)
189 {
190 	u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
191 	unsigned int tqs = fifosz / 256 - 1;
192 
193 	if (mode == SF_DMA_MODE) {
194 		value |= XGMAC_TSF;
195 	} else {
196 		value &= ~XGMAC_TSF;
197 		value &= ~XGMAC_TTC;
198 
199 		if (mode <= 64)
200 			value |= 0x0 << XGMAC_TTC_SHIFT;
201 		else if (mode <= 96)
202 			value |= 0x2 << XGMAC_TTC_SHIFT;
203 		else if (mode <= 128)
204 			value |= 0x3 << XGMAC_TTC_SHIFT;
205 		else if (mode <= 192)
206 			value |= 0x4 << XGMAC_TTC_SHIFT;
207 		else if (mode <= 256)
208 			value |= 0x5 << XGMAC_TTC_SHIFT;
209 		else if (mode <= 384)
210 			value |= 0x6 << XGMAC_TTC_SHIFT;
211 		else
212 			value |= 0x7 << XGMAC_TTC_SHIFT;
213 	}
214 
215 	/* Use static TC to Queue mapping */
216 	value |= (channel << XGMAC_Q2TCMAP_SHIFT) & XGMAC_Q2TCMAP;
217 
218 	value &= ~XGMAC_TXQEN;
219 	if (qmode != MTL_QUEUE_AVB)
220 		value |= 0x2 << XGMAC_TXQEN_SHIFT;
221 	else
222 		value |= 0x1 << XGMAC_TXQEN_SHIFT;
223 
224 	value &= ~XGMAC_TQS;
225 	value |= (tqs << XGMAC_TQS_SHIFT) & XGMAC_TQS;
226 
227 	writel(value, ioaddr +  XGMAC_MTL_TXQ_OPMODE(channel));
228 }
229 
230 static void dwxgmac2_enable_dma_irq(struct stmmac_priv *priv,
231 				    void __iomem *ioaddr, u32 chan,
232 				    bool rx, bool tx)
233 {
234 	u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
235 
236 	if (rx)
237 		value |= XGMAC_DMA_INT_DEFAULT_RX;
238 	if (tx)
239 		value |= XGMAC_DMA_INT_DEFAULT_TX;
240 
241 	writel(value, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
242 }
243 
244 static void dwxgmac2_disable_dma_irq(struct stmmac_priv *priv,
245 				     void __iomem *ioaddr, u32 chan,
246 				     bool rx, bool tx)
247 {
248 	u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
249 
250 	if (rx)
251 		value &= ~XGMAC_DMA_INT_DEFAULT_RX;
252 	if (tx)
253 		value &= ~XGMAC_DMA_INT_DEFAULT_TX;
254 
255 	writel(value, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
256 }
257 
258 static void dwxgmac2_dma_start_tx(struct stmmac_priv *priv,
259 				  void __iomem *ioaddr, u32 chan)
260 {
261 	u32 value;
262 
263 	value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
264 	value |= XGMAC_TXST;
265 	writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
266 
267 	value = readl(ioaddr + XGMAC_TX_CONFIG);
268 	value |= XGMAC_CONFIG_TE;
269 	writel(value, ioaddr + XGMAC_TX_CONFIG);
270 }
271 
272 static void dwxgmac2_dma_stop_tx(struct stmmac_priv *priv, void __iomem *ioaddr,
273 				 u32 chan)
274 {
275 	u32 value;
276 
277 	value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
278 	value &= ~XGMAC_TXST;
279 	writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
280 
281 	value = readl(ioaddr + XGMAC_TX_CONFIG);
282 	value &= ~XGMAC_CONFIG_TE;
283 	writel(value, ioaddr + XGMAC_TX_CONFIG);
284 }
285 
286 static void dwxgmac2_dma_start_rx(struct stmmac_priv *priv,
287 				  void __iomem *ioaddr, u32 chan)
288 {
289 	u32 value;
290 
291 	value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
292 	value |= XGMAC_RXST;
293 	writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
294 
295 	value = readl(ioaddr + XGMAC_RX_CONFIG);
296 	value |= XGMAC_CONFIG_RE;
297 	writel(value, ioaddr + XGMAC_RX_CONFIG);
298 }
299 
300 static void dwxgmac2_dma_stop_rx(struct stmmac_priv *priv, void __iomem *ioaddr,
301 				 u32 chan)
302 {
303 	u32 value;
304 
305 	value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
306 	value &= ~XGMAC_RXST;
307 	writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
308 }
309 
310 static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv,
311 				  void __iomem *ioaddr,
312 				  struct stmmac_extra_stats *x, u32 chan,
313 				  u32 dir)
314 {
315 	struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
316 	u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan));
317 	u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
318 	int ret = 0;
319 
320 	if (dir == DMA_DIR_RX)
321 		intr_status &= XGMAC_DMA_STATUS_MSK_RX;
322 	else if (dir == DMA_DIR_TX)
323 		intr_status &= XGMAC_DMA_STATUS_MSK_TX;
324 
325 	/* ABNORMAL interrupts */
326 	if (unlikely(intr_status & XGMAC_AIS)) {
327 		if (unlikely(intr_status & XGMAC_RBU)) {
328 			x->rx_buf_unav_irq++;
329 			ret |= handle_rx;
330 		}
331 		if (unlikely(intr_status & XGMAC_TPS)) {
332 			x->tx_process_stopped_irq++;
333 			ret |= tx_hard_error;
334 		}
335 		if (unlikely(intr_status & XGMAC_FBE)) {
336 			x->fatal_bus_error_irq++;
337 			ret |= tx_hard_error;
338 		}
339 	}
340 
341 	/* TX/RX NORMAL interrupts */
342 	if (likely(intr_status & XGMAC_RI)) {
343 		u64_stats_update_begin(&stats->syncp);
344 		u64_stats_inc(&stats->rx_normal_irq_n[chan]);
345 		u64_stats_update_end(&stats->syncp);
346 		ret |= handle_rx;
347 	}
348 	if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) {
349 		u64_stats_update_begin(&stats->syncp);
350 		u64_stats_inc(&stats->tx_normal_irq_n[chan]);
351 		u64_stats_update_end(&stats->syncp);
352 		ret |= handle_tx;
353 	}
354 
355 	/* Clear interrupts */
356 	writel(intr_en & intr_status, ioaddr + XGMAC_DMA_CH_STATUS(chan));
357 
358 	return ret;
359 }
360 
361 static int dwxgmac2_get_hw_feature(void __iomem *ioaddr,
362 				   struct dma_features *dma_cap)
363 {
364 	struct stmmac_priv *priv;
365 	u32 hw_cap;
366 
367 	priv = container_of(dma_cap, struct stmmac_priv, dma_cap);
368 
369 	/* MAC HW feature 0 */
370 	hw_cap = readl(ioaddr + XGMAC_HW_FEATURE0);
371 	dma_cap->edma = (hw_cap & XGMAC_HWFEAT_EDMA) >> 31;
372 	dma_cap->ediffc = (hw_cap & XGMAC_HWFEAT_EDIFFC) >> 30;
373 	dma_cap->vxn = (hw_cap & XGMAC_HWFEAT_VXN) >> 29;
374 	dma_cap->vlins = (hw_cap & XGMAC_HWFEAT_SAVLANINS) >> 27;
375 	dma_cap->tssrc = (hw_cap & XGMAC_HWFEAT_TSSTSSEL) >> 25;
376 	dma_cap->multi_addr = (hw_cap & XGMAC_HWFEAT_ADDMACADRSEL) >> 18;
377 	dma_cap->rx_coe = (hw_cap & XGMAC_HWFEAT_RXCOESEL) >> 16;
378 	dma_cap->tx_coe = (hw_cap & XGMAC_HWFEAT_TXCOESEL) >> 14;
379 	dma_cap->eee = (hw_cap & XGMAC_HWFEAT_EEESEL) >> 13;
380 	dma_cap->atime_stamp = (hw_cap & XGMAC_HWFEAT_TSSEL) >> 12;
381 	dma_cap->av = (hw_cap & XGMAC_HWFEAT_AVSEL) >> 11;
382 	dma_cap->av &= !((hw_cap & XGMAC_HWFEAT_RAVSEL) >> 10);
383 	dma_cap->arpoffsel = (hw_cap & XGMAC_HWFEAT_ARPOFFSEL) >> 9;
384 	dma_cap->rmon = (hw_cap & XGMAC_HWFEAT_MMCSEL) >> 8;
385 	dma_cap->pmt_magic_frame = (hw_cap & XGMAC_HWFEAT_MGKSEL) >> 7;
386 	dma_cap->pmt_remote_wake_up = (hw_cap & XGMAC_HWFEAT_RWKSEL) >> 6;
387 	dma_cap->sma_mdio = (hw_cap & XGMAC_HWFEAT_SMASEL) >> 5;
388 	dma_cap->vlhash = (hw_cap & XGMAC_HWFEAT_VLHASH) >> 4;
389 	dma_cap->half_duplex = (hw_cap & XGMAC_HWFEAT_HDSEL) >> 3;
390 	dma_cap->mbps_1000 = (hw_cap & XGMAC_HWFEAT_GMIISEL) >> 1;
391 	if (dma_cap->mbps_1000 && priv->synopsys_id >= DWXGMAC_CORE_2_20)
392 		dma_cap->mbps_10_100 = 1;
393 
394 	/* MAC HW feature 1 */
395 	hw_cap = readl(ioaddr + XGMAC_HW_FEATURE1);
396 	dma_cap->l3l4fnum = (hw_cap & XGMAC_HWFEAT_L3L4FNUM) >> 27;
397 	/* If L3L4FNUM < 8, then the number of L3L4 filters supported by
398 	 * XGMAC is equal to L3L4FNUM. From L3L4FNUM >= 8 the number of
399 	 * L3L4 filters goes on like 8, 16, 32, ... Current maximum of
400 	 * L3L4FNUM = 10.
401 	 */
402 	if (dma_cap->l3l4fnum >= 8 && dma_cap->l3l4fnum <= 10)
403 		dma_cap->l3l4fnum = 8 << (dma_cap->l3l4fnum - 8);
404 	else if (dma_cap->l3l4fnum > 10)
405 		dma_cap->l3l4fnum = 32;
406 
407 	dma_cap->hash_tb_sz = (hw_cap & XGMAC_HWFEAT_HASHTBLSZ) >> 24;
408 	dma_cap->numtc = ((hw_cap & XGMAC_HWFEAT_NUMTC) >> 21) + 1;
409 	dma_cap->rssen = (hw_cap & XGMAC_HWFEAT_RSSEN) >> 20;
410 	dma_cap->dbgmem = (hw_cap & XGMAC_HWFEAT_DBGMEMA) >> 19;
411 	dma_cap->tsoen = (hw_cap & XGMAC_HWFEAT_TSOEN) >> 18;
412 	dma_cap->sphen = (hw_cap & XGMAC_HWFEAT_SPHEN) >> 17;
413 	dma_cap->dcben = (hw_cap & XGMAC_HWFEAT_DCBEN) >> 16;
414 
415 	dma_cap->addr64 = (hw_cap & XGMAC_HWFEAT_ADDR64) >> 14;
416 	switch (dma_cap->addr64) {
417 	case 0:
418 		dma_cap->addr64 = 32;
419 		break;
420 	case 1:
421 		dma_cap->addr64 = 40;
422 		break;
423 	case 2:
424 		dma_cap->addr64 = 48;
425 		break;
426 	default:
427 		dma_cap->addr64 = 32;
428 		break;
429 	}
430 
431 	dma_cap->advthword = (hw_cap & XGMAC_HWFEAT_ADVTHWORD) >> 13;
432 	dma_cap->ptoen = (hw_cap & XGMAC_HWFEAT_PTOEN) >> 12;
433 	dma_cap->osten = (hw_cap & XGMAC_HWFEAT_OSTEN) >> 11;
434 	dma_cap->tx_fifo_size =
435 		128 << ((hw_cap & XGMAC_HWFEAT_TXFIFOSIZE) >> 6);
436 	dma_cap->pfcen = (hw_cap & XGMAC_HWFEAT_PFCEN) >> 5;
437 	dma_cap->rx_fifo_size =
438 		128 << ((hw_cap & XGMAC_HWFEAT_RXFIFOSIZE) >> 0);
439 
440 	/* MAC HW feature 2 */
441 	hw_cap = readl(ioaddr + XGMAC_HW_FEATURE2);
442 	dma_cap->aux_snapshot_n = (hw_cap & XGMAC_HWFEAT_AUXSNAPNUM) >> 28;
443 	dma_cap->pps_out_num = (hw_cap & XGMAC_HWFEAT_PPSOUTNUM) >> 24;
444 	dma_cap->number_tx_channel =
445 		((hw_cap & XGMAC_HWFEAT_TXCHCNT) >> 18) + 1;
446 	dma_cap->number_rx_channel =
447 		((hw_cap & XGMAC_HWFEAT_RXCHCNT) >> 12) + 1;
448 	dma_cap->number_tx_queues =
449 		((hw_cap & XGMAC_HWFEAT_TXQCNT) >> 6) + 1;
450 	dma_cap->number_rx_queues =
451 		((hw_cap & XGMAC_HWFEAT_RXQCNT) >> 0) + 1;
452 
453 	/* MAC HW feature 3 */
454 	hw_cap = readl(ioaddr + XGMAC_HW_FEATURE3);
455 	dma_cap->tbs_ch_num = ((hw_cap & XGMAC_HWFEAT_TBSCH) >> 28) + 1;
456 	dma_cap->tbssel = (hw_cap & XGMAC_HWFEAT_TBSSEL) >> 27;
457 	dma_cap->fpesel = (hw_cap & XGMAC_HWFEAT_FPESEL) >> 26;
458 	dma_cap->sgfsel = (hw_cap & XGMAC_HWFEAT_SGFSEL) >> 25;
459 	dma_cap->estwid = (hw_cap & XGMAC_HWFEAT_ESTWID) >> 23;
460 	dma_cap->estdep = (hw_cap & XGMAC_HWFEAT_ESTDEP) >> 20;
461 	dma_cap->estsel = (hw_cap & XGMAC_HWFEAT_ESTSEL) >> 19;
462 	dma_cap->ttsfd = (hw_cap & XGMAC_HWFEAT_TTSFD) >> 16;
463 	dma_cap->asp = (hw_cap & XGMAC_HWFEAT_ASP) >> 14;
464 	dma_cap->dvlan = (hw_cap & XGMAC_HWFEAT_DVLAN) >> 13;
465 	dma_cap->frpes = (hw_cap & XGMAC_HWFEAT_FRPES) >> 11;
466 	dma_cap->frpbs = (hw_cap & XGMAC_HWFEAT_FRPPB) >> 9;
467 	dma_cap->pou_ost_en = (hw_cap & XGMAC_HWFEAT_POUOST) >> 8;
468 	dma_cap->frppipe_num = ((hw_cap & XGMAC_HWFEAT_FRPPIPE) >> 5) + 1;
469 	dma_cap->cbtisel = (hw_cap & XGMAC_HWFEAT_CBTISEL) >> 4;
470 	dma_cap->frpsel = (hw_cap & XGMAC_HWFEAT_FRPSEL) >> 3;
471 	dma_cap->nrvf_num = (hw_cap & XGMAC_HWFEAT_NRVF) >> 0;
472 
473 	/* MAC HW feature 4 */
474 	hw_cap = readl(ioaddr + XGMAC_HW_FEATURE4);
475 	dma_cap->asp |= (hw_cap & XGMAC_HWFEAT_EASP) >> 2;
476 	dma_cap->pcsel = (hw_cap & XGMAC_HWFEAT_PCSEL) >> 0;
477 
478 	return 0;
479 }
480 
481 static void dwxgmac2_rx_watchdog(struct stmmac_priv *priv, void __iomem *ioaddr,
482 				 u32 riwt, u32 queue)
483 {
484 	writel(riwt & XGMAC_RWT, ioaddr + XGMAC_DMA_CH_Rx_WATCHDOG(queue));
485 }
486 
487 static void dwxgmac2_set_rx_ring_len(struct stmmac_priv *priv,
488 				     void __iomem *ioaddr, u32 len, u32 chan)
489 {
490 	writel(len, ioaddr + XGMAC_DMA_CH_RxDESC_RING_LEN(chan));
491 }
492 
493 static void dwxgmac2_set_tx_ring_len(struct stmmac_priv *priv,
494 				     void __iomem *ioaddr, u32 len, u32 chan)
495 {
496 	writel(len, ioaddr + XGMAC_DMA_CH_TxDESC_RING_LEN(chan));
497 }
498 
499 static void dwxgmac2_set_rx_tail_ptr(struct stmmac_priv *priv,
500 				     void __iomem *ioaddr, u32 ptr, u32 chan)
501 {
502 	writel(ptr, ioaddr + XGMAC_DMA_CH_RxDESC_TAIL_LPTR(chan));
503 }
504 
505 static void dwxgmac2_set_tx_tail_ptr(struct stmmac_priv *priv,
506 				     void __iomem *ioaddr, u32 ptr, u32 chan)
507 {
508 	writel(ptr, ioaddr + XGMAC_DMA_CH_TxDESC_TAIL_LPTR(chan));
509 }
510 
511 static void dwxgmac2_enable_tso(struct stmmac_priv *priv, void __iomem *ioaddr,
512 				bool en, u32 chan)
513 {
514 	u32 value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
515 
516 	if (en)
517 		value |= XGMAC_TSE;
518 	else
519 		value &= ~XGMAC_TSE;
520 
521 	writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
522 }
523 
524 static void dwxgmac2_qmode(struct stmmac_priv *priv, void __iomem *ioaddr,
525 			   u32 channel, u8 qmode)
526 {
527 	u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
528 	u32 flow = readl(ioaddr + XGMAC_RX_FLOW_CTRL);
529 
530 	value &= ~XGMAC_TXQEN;
531 	if (qmode != MTL_QUEUE_AVB) {
532 		value |= 0x2 << XGMAC_TXQEN_SHIFT;
533 		writel(0, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(channel));
534 	} else {
535 		value |= 0x1 << XGMAC_TXQEN_SHIFT;
536 		writel(flow & (~XGMAC_RFE), ioaddr + XGMAC_RX_FLOW_CTRL);
537 	}
538 
539 	writel(value, ioaddr +  XGMAC_MTL_TXQ_OPMODE(channel));
540 }
541 
542 static void dwxgmac2_set_bfsize(struct stmmac_priv *priv, void __iomem *ioaddr,
543 				int bfsize, u32 chan)
544 {
545 	u32 value;
546 
547 	value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
548 	value &= ~XGMAC_RBSZ;
549 	value |= bfsize << XGMAC_RBSZ_SHIFT;
550 	writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
551 }
552 
553 static void dwxgmac2_enable_sph(struct stmmac_priv *priv, void __iomem *ioaddr,
554 				bool en, u32 chan)
555 {
556 	u32 value = readl(ioaddr + XGMAC_RX_CONFIG);
557 
558 	value &= ~XGMAC_CONFIG_HDSMS;
559 	value |= XGMAC_CONFIG_HDSMS_256; /* Segment max 256 bytes */
560 	writel(value, ioaddr + XGMAC_RX_CONFIG);
561 
562 	value = readl(ioaddr + XGMAC_DMA_CH_CONTROL(chan));
563 	if (en)
564 		value |= XGMAC_SPH;
565 	else
566 		value &= ~XGMAC_SPH;
567 	writel(value, ioaddr + XGMAC_DMA_CH_CONTROL(chan));
568 }
569 
570 static int dwxgmac2_enable_tbs(struct stmmac_priv *priv, void __iomem *ioaddr,
571 			       bool en, u32 chan)
572 {
573 	u32 value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
574 
575 	if (en)
576 		value |= XGMAC_EDSE;
577 	else
578 		value &= ~XGMAC_EDSE;
579 
580 	writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
581 
582 	value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)) & XGMAC_EDSE;
583 	if (en && !value)
584 		return -EIO;
585 
586 	writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL0);
587 	writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL1);
588 	writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL2);
589 	writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL3);
590 	return 0;
591 }
592 
593 const struct stmmac_dma_ops dwxgmac210_dma_ops = {
594 	.reset = dwxgmac2_dma_reset,
595 	.init = dwxgmac2_dma_init,
596 	.init_chan = dwxgmac2_dma_init_chan,
597 	.init_rx_chan = dwxgmac2_dma_init_rx_chan,
598 	.init_tx_chan = dwxgmac2_dma_init_tx_chan,
599 	.axi = dwxgmac2_dma_axi,
600 	.dump_regs = dwxgmac2_dma_dump_regs,
601 	.dma_rx_mode = dwxgmac2_dma_rx_mode,
602 	.dma_tx_mode = dwxgmac2_dma_tx_mode,
603 	.enable_dma_irq = dwxgmac2_enable_dma_irq,
604 	.disable_dma_irq = dwxgmac2_disable_dma_irq,
605 	.start_tx = dwxgmac2_dma_start_tx,
606 	.stop_tx = dwxgmac2_dma_stop_tx,
607 	.start_rx = dwxgmac2_dma_start_rx,
608 	.stop_rx = dwxgmac2_dma_stop_rx,
609 	.dma_interrupt = dwxgmac2_dma_interrupt,
610 	.get_hw_feature = dwxgmac2_get_hw_feature,
611 	.rx_watchdog = dwxgmac2_rx_watchdog,
612 	.set_rx_ring_len = dwxgmac2_set_rx_ring_len,
613 	.set_tx_ring_len = dwxgmac2_set_tx_ring_len,
614 	.set_rx_tail_ptr = dwxgmac2_set_rx_tail_ptr,
615 	.set_tx_tail_ptr = dwxgmac2_set_tx_tail_ptr,
616 	.enable_tso = dwxgmac2_enable_tso,
617 	.qmode = dwxgmac2_qmode,
618 	.set_bfsize = dwxgmac2_set_bfsize,
619 	.enable_sph = dwxgmac2_enable_sph,
620 	.enable_tbs = dwxgmac2_enable_tbs,
621 };
622