xref: /linux/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c (revision ff124bbbca1d3a07fa1392ffdbbdeece71f68ece)
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /*
3  * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
4  * stmmac XGMAC support.
5  */
6 
7 #include <linux/iopoll.h>
8 #include "stmmac.h"
9 #include "dwxgmac2.h"
10 
11 static int dwxgmac2_dma_reset(void __iomem *ioaddr)
12 {
13 	u32 value = readl(ioaddr + XGMAC_DMA_MODE);
14 
15 	/* DMA SW reset */
16 	writel(value | XGMAC_SWR, ioaddr + XGMAC_DMA_MODE);
17 
18 	return readl_poll_timeout(ioaddr + XGMAC_DMA_MODE, value,
19 				  !(value & XGMAC_SWR), 0, 100000);
20 }
21 
22 static void dwxgmac2_dma_init(void __iomem *ioaddr,
23 			      struct stmmac_dma_cfg *dma_cfg)
24 {
25 	u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE);
26 
27 	if (dma_cfg->aal)
28 		value |= XGMAC_AAL;
29 
30 	if (dma_cfg->eame)
31 		value |= XGMAC_EAME;
32 
33 	writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE);
34 }
35 
36 static void dwxgmac2_dma_init_chan(struct stmmac_priv *priv,
37 				   void __iomem *ioaddr,
38 				   struct stmmac_dma_cfg *dma_cfg, u32 chan)
39 {
40 	u32 value = readl(ioaddr + XGMAC_DMA_CH_CONTROL(chan));
41 
42 	if (dma_cfg->pblx8)
43 		value |= XGMAC_PBLx8;
44 
45 	writel(value, ioaddr + XGMAC_DMA_CH_CONTROL(chan));
46 	writel(XGMAC_DMA_INT_DEFAULT_EN, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
47 }
48 
49 static void dwxgmac2_dma_init_rx_chan(struct stmmac_priv *priv,
50 				      void __iomem *ioaddr,
51 				      struct stmmac_dma_cfg *dma_cfg,
52 				      dma_addr_t phy, u32 chan)
53 {
54 	u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
55 	u32 value;
56 
57 	value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
58 	value = u32_replace_bits(value, rxpbl, XGMAC_RxPBL);
59 	writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
60 
61 	writel(upper_32_bits(phy), ioaddr + XGMAC_DMA_CH_RxDESC_HADDR(chan));
62 	writel(lower_32_bits(phy), ioaddr + XGMAC_DMA_CH_RxDESC_LADDR(chan));
63 }
64 
65 static void dwxgmac2_dma_init_tx_chan(struct stmmac_priv *priv,
66 				      void __iomem *ioaddr,
67 				      struct stmmac_dma_cfg *dma_cfg,
68 				      dma_addr_t phy, u32 chan)
69 {
70 	u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
71 	u32 value;
72 
73 	value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
74 	value = u32_replace_bits(value, txpbl, XGMAC_TxPBL);
75 	writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
76 
77 	writel(upper_32_bits(phy), ioaddr + XGMAC_DMA_CH_TxDESC_HADDR(chan));
78 	writel(lower_32_bits(phy), ioaddr + XGMAC_DMA_CH_TxDESC_LADDR(chan));
79 }
80 
81 static void dwxgmac2_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
82 {
83 	u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE);
84 
85 	if (axi->axi_lpi_en)
86 		value |= XGMAC_EN_LPI;
87 	if (axi->axi_xit_frm)
88 		value |= XGMAC_LPI_XIT_PKT;
89 
90 	value = u32_replace_bits(value, axi->axi_wr_osr_lmt, XGMAC_WR_OSR_LMT);
91 	value = u32_replace_bits(value, axi->axi_rd_osr_lmt, XGMAC_RD_OSR_LMT);
92 
93 	if (!axi->axi_fb)
94 		value |= XGMAC_UNDEF;
95 
96 	/* Depending on the UNDEF bit the Master AXI will perform any burst
97 	 * length according to the BLEN programmed (by default all BLEN are
98 	 * set). Note that the UNDEF bit is readonly, and is the inverse of
99 	 * Bus Mode bit 16.
100 	 */
101 	value = (value & ~DMA_AXI_BLEN_MASK) | axi->axi_blen_regval;
102 
103 	writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE);
104 	writel(XGMAC_TDPS, ioaddr + XGMAC_TX_EDMA_CTRL);
105 	writel(XGMAC_RDPS, ioaddr + XGMAC_RX_EDMA_CTRL);
106 }
107 
108 static void dwxgmac2_dma_dump_regs(struct stmmac_priv *priv,
109 				   void __iomem *ioaddr, u32 *reg_space)
110 {
111 	int i;
112 
113 	for (i = (XGMAC_DMA_MODE / 4); i < XGMAC_REGSIZE; i++)
114 		reg_space[i] = readl(ioaddr + i * 4);
115 }
116 
117 static void dwxgmac2_dma_rx_mode(struct stmmac_priv *priv, void __iomem *ioaddr,
118 				 int mode, u32 channel, int fifosz, u8 qmode)
119 {
120 	u32 value = readl(ioaddr + XGMAC_MTL_RXQ_OPMODE(channel));
121 	unsigned int rqs = fifosz / 256 - 1;
122 	unsigned int rtc;
123 
124 	if (mode == SF_DMA_MODE) {
125 		value |= XGMAC_RSF;
126 	} else {
127 		value &= ~XGMAC_RSF;
128 
129 		if (mode <= 64)
130 			rtc = 0x0;
131 		else if (mode <= 96)
132 			rtc = 0x2;
133 		else
134 			rtc = 0x3;
135 
136 		value = u32_replace_bits(value, rtc, XGMAC_RTC);
137 	}
138 
139 	value = u32_replace_bits(value, rqs, XGMAC_RQS);
140 
141 	if ((fifosz >= 4096) && (qmode != MTL_QUEUE_AVB)) {
142 		u32 flow = readl(ioaddr + XGMAC_MTL_RXQ_FLOW_CONTROL(channel));
143 		unsigned int rfd, rfa;
144 
145 		value |= XGMAC_EHFC;
146 
147 		/* Set Threshold for Activating Flow Control to min 2 frames,
148 		 * i.e. 1500 * 2 = 3000 bytes.
149 		 *
150 		 * Set Threshold for Deactivating Flow Control to min 1 frame,
151 		 * i.e. 1500 bytes.
152 		 */
153 		switch (fifosz) {
154 		case 4096:
155 			/* This violates the above formula because of FIFO size
156 			 * limit therefore overflow may occur in spite of this.
157 			 */
158 			rfd = 0x03; /* Full-2.5K */
159 			rfa = 0x01; /* Full-1.5K */
160 			break;
161 
162 		default:
163 			rfd = 0x07; /* Full-4.5K */
164 			rfa = 0x04; /* Full-3K */
165 			break;
166 		}
167 
168 		flow = u32_replace_bits(flow, rfd, XGMAC_RFD);
169 		flow = u32_replace_bits(flow, rfa, XGMAC_RFA);
170 
171 		writel(flow, ioaddr + XGMAC_MTL_RXQ_FLOW_CONTROL(channel));
172 	}
173 
174 	writel(value, ioaddr + XGMAC_MTL_RXQ_OPMODE(channel));
175 }
176 
177 static void dwxgmac2_dma_tx_mode(struct stmmac_priv *priv, void __iomem *ioaddr,
178 				 int mode, u32 channel, int fifosz, u8 qmode)
179 {
180 	u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
181 	unsigned int tqs = fifosz / 256 - 1;
182 	unsigned int ttc, txqen;
183 
184 	if (mode == SF_DMA_MODE) {
185 		value |= XGMAC_TSF;
186 	} else {
187 		value &= ~XGMAC_TSF;
188 
189 		if (mode <= 64)
190 			ttc = 0x0;
191 		else if (mode <= 96)
192 			ttc = 0x2;
193 		else if (mode <= 128)
194 			ttc = 0x3;
195 		else if (mode <= 192)
196 			ttc = 0x4;
197 		else if (mode <= 256)
198 			ttc = 0x5;
199 		else if (mode <= 384)
200 			ttc = 0x6;
201 		else
202 			ttc = 0x7;
203 
204 		value = u32_replace_bits(value, ttc, XGMAC_TTC);
205 	}
206 
207 	/* Use static TC to Queue mapping */
208 	value |= FIELD_PREP(XGMAC_Q2TCMAP, channel);
209 
210 	if (qmode != MTL_QUEUE_AVB)
211 		txqen = 0x2;
212 	else
213 		txqen = 0x1;
214 
215 	value = u32_replace_bits(value, txqen, XGMAC_TXQEN);
216 	value = u32_replace_bits(value, tqs, XGMAC_TQS);
217 
218 	writel(value, ioaddr +  XGMAC_MTL_TXQ_OPMODE(channel));
219 }
220 
221 static void dwxgmac2_enable_dma_irq(struct stmmac_priv *priv,
222 				    void __iomem *ioaddr, u32 chan,
223 				    bool rx, bool tx)
224 {
225 	u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
226 
227 	if (rx)
228 		value |= XGMAC_DMA_INT_DEFAULT_RX;
229 	if (tx)
230 		value |= XGMAC_DMA_INT_DEFAULT_TX;
231 
232 	writel(value, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
233 }
234 
235 static void dwxgmac2_disable_dma_irq(struct stmmac_priv *priv,
236 				     void __iomem *ioaddr, u32 chan,
237 				     bool rx, bool tx)
238 {
239 	u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
240 
241 	if (rx)
242 		value &= ~XGMAC_DMA_INT_DEFAULT_RX;
243 	if (tx)
244 		value &= ~XGMAC_DMA_INT_DEFAULT_TX;
245 
246 	writel(value, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
247 }
248 
249 static void dwxgmac2_dma_start_tx(struct stmmac_priv *priv,
250 				  void __iomem *ioaddr, u32 chan)
251 {
252 	u32 value;
253 
254 	value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
255 	value |= XGMAC_TXST;
256 	writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
257 
258 	value = readl(ioaddr + XGMAC_TX_CONFIG);
259 	value |= XGMAC_CONFIG_TE;
260 	writel(value, ioaddr + XGMAC_TX_CONFIG);
261 }
262 
263 static void dwxgmac2_dma_stop_tx(struct stmmac_priv *priv, void __iomem *ioaddr,
264 				 u32 chan)
265 {
266 	u32 value;
267 
268 	value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
269 	value &= ~XGMAC_TXST;
270 	writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
271 
272 	value = readl(ioaddr + XGMAC_TX_CONFIG);
273 	value &= ~XGMAC_CONFIG_TE;
274 	writel(value, ioaddr + XGMAC_TX_CONFIG);
275 }
276 
277 static void dwxgmac2_dma_start_rx(struct stmmac_priv *priv,
278 				  void __iomem *ioaddr, u32 chan)
279 {
280 	u32 value;
281 
282 	value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
283 	value |= XGMAC_RXST;
284 	writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
285 
286 	value = readl(ioaddr + XGMAC_RX_CONFIG);
287 	value |= XGMAC_CONFIG_RE;
288 	writel(value, ioaddr + XGMAC_RX_CONFIG);
289 }
290 
291 static void dwxgmac2_dma_stop_rx(struct stmmac_priv *priv, void __iomem *ioaddr,
292 				 u32 chan)
293 {
294 	u32 value;
295 
296 	value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
297 	value &= ~XGMAC_RXST;
298 	writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
299 }
300 
301 static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv,
302 				  void __iomem *ioaddr,
303 				  struct stmmac_extra_stats *x, u32 chan,
304 				  u32 dir)
305 {
306 	struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
307 	u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan));
308 	u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
309 	int ret = 0;
310 
311 	if (dir == DMA_DIR_RX)
312 		intr_status &= XGMAC_DMA_STATUS_MSK_RX;
313 	else if (dir == DMA_DIR_TX)
314 		intr_status &= XGMAC_DMA_STATUS_MSK_TX;
315 
316 	/* ABNORMAL interrupts */
317 	if (unlikely(intr_status & XGMAC_AIS)) {
318 		if (unlikely(intr_status & XGMAC_RBU)) {
319 			x->rx_buf_unav_irq++;
320 			ret |= handle_rx;
321 		}
322 		if (unlikely(intr_status & XGMAC_TPS)) {
323 			x->tx_process_stopped_irq++;
324 			ret |= tx_hard_error;
325 		}
326 		if (unlikely(intr_status & XGMAC_FBE)) {
327 			x->fatal_bus_error_irq++;
328 			ret |= tx_hard_error;
329 		}
330 	}
331 
332 	/* TX/RX NORMAL interrupts */
333 	if (likely(intr_status & XGMAC_RI)) {
334 		u64_stats_update_begin(&stats->syncp);
335 		u64_stats_inc(&stats->rx_normal_irq_n[chan]);
336 		u64_stats_update_end(&stats->syncp);
337 		ret |= handle_rx;
338 	}
339 	if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) {
340 		u64_stats_update_begin(&stats->syncp);
341 		u64_stats_inc(&stats->tx_normal_irq_n[chan]);
342 		u64_stats_update_end(&stats->syncp);
343 		ret |= handle_tx;
344 	}
345 
346 	/* Clear interrupts */
347 	writel(intr_en & intr_status, ioaddr + XGMAC_DMA_CH_STATUS(chan));
348 
349 	return ret;
350 }
351 
352 static int dwxgmac2_get_hw_feature(void __iomem *ioaddr,
353 				   struct dma_features *dma_cap)
354 {
355 	struct stmmac_priv *priv;
356 	u32 hw_cap;
357 
358 	priv = container_of(dma_cap, struct stmmac_priv, dma_cap);
359 
360 	/* MAC HW feature 0 */
361 	hw_cap = readl(ioaddr + XGMAC_HW_FEATURE0);
362 	dma_cap->edma = (hw_cap & XGMAC_HWFEAT_EDMA) >> 31;
363 	dma_cap->ediffc = (hw_cap & XGMAC_HWFEAT_EDIFFC) >> 30;
364 	dma_cap->vxn = (hw_cap & XGMAC_HWFEAT_VXN) >> 29;
365 	dma_cap->vlins = (hw_cap & XGMAC_HWFEAT_SAVLANINS) >> 27;
366 	dma_cap->tssrc = (hw_cap & XGMAC_HWFEAT_TSSTSSEL) >> 25;
367 	dma_cap->actphyif = FIELD_GET(XGMAC_HWFEAT_PHYSEL, hw_cap);
368 	dma_cap->multi_addr = (hw_cap & XGMAC_HWFEAT_ADDMACADRSEL) >> 18;
369 	dma_cap->rx_coe = (hw_cap & XGMAC_HWFEAT_RXCOESEL) >> 16;
370 	dma_cap->tx_coe = (hw_cap & XGMAC_HWFEAT_TXCOESEL) >> 14;
371 	dma_cap->eee = (hw_cap & XGMAC_HWFEAT_EEESEL) >> 13;
372 	dma_cap->atime_stamp = (hw_cap & XGMAC_HWFEAT_TSSEL) >> 12;
373 	dma_cap->av = (hw_cap & XGMAC_HWFEAT_AVSEL) >> 11;
374 	dma_cap->av &= !((hw_cap & XGMAC_HWFEAT_RAVSEL) >> 10);
375 	dma_cap->arpoffsel = (hw_cap & XGMAC_HWFEAT_ARPOFFSEL) >> 9;
376 	dma_cap->rmon = (hw_cap & XGMAC_HWFEAT_MMCSEL) >> 8;
377 	dma_cap->pmt_magic_frame = (hw_cap & XGMAC_HWFEAT_MGKSEL) >> 7;
378 	dma_cap->pmt_remote_wake_up = (hw_cap & XGMAC_HWFEAT_RWKSEL) >> 6;
379 	dma_cap->sma_mdio = (hw_cap & XGMAC_HWFEAT_SMASEL) >> 5;
380 	dma_cap->vlhash = (hw_cap & XGMAC_HWFEAT_VLHASH) >> 4;
381 	dma_cap->half_duplex = (hw_cap & XGMAC_HWFEAT_HDSEL) >> 3;
382 	dma_cap->mbps_1000 = (hw_cap & XGMAC_HWFEAT_GMIISEL) >> 1;
383 	if (dma_cap->mbps_1000 && priv->synopsys_id >= DWXGMAC_CORE_2_20)
384 		dma_cap->mbps_10_100 = 1;
385 
386 	/* MAC HW feature 1 */
387 	hw_cap = readl(ioaddr + XGMAC_HW_FEATURE1);
388 	dma_cap->l3l4fnum = (hw_cap & XGMAC_HWFEAT_L3L4FNUM) >> 27;
389 	/* If L3L4FNUM < 8, then the number of L3L4 filters supported by
390 	 * XGMAC is equal to L3L4FNUM. From L3L4FNUM >= 8 the number of
391 	 * L3L4 filters goes on like 8, 16, 32, ... Current maximum of
392 	 * L3L4FNUM = 10.
393 	 */
394 	if (dma_cap->l3l4fnum >= 8 && dma_cap->l3l4fnum <= 10)
395 		dma_cap->l3l4fnum = 8 << (dma_cap->l3l4fnum - 8);
396 	else if (dma_cap->l3l4fnum > 10)
397 		dma_cap->l3l4fnum = 32;
398 
399 	dma_cap->hash_tb_sz = (hw_cap & XGMAC_HWFEAT_HASHTBLSZ) >> 24;
400 	dma_cap->numtc = ((hw_cap & XGMAC_HWFEAT_NUMTC) >> 21) + 1;
401 	dma_cap->rssen = (hw_cap & XGMAC_HWFEAT_RSSEN) >> 20;
402 	dma_cap->dbgmem = (hw_cap & XGMAC_HWFEAT_DBGMEMA) >> 19;
403 	dma_cap->tsoen = (hw_cap & XGMAC_HWFEAT_TSOEN) >> 18;
404 	dma_cap->sphen = (hw_cap & XGMAC_HWFEAT_SPHEN) >> 17;
405 	dma_cap->dcben = (hw_cap & XGMAC_HWFEAT_DCBEN) >> 16;
406 
407 	dma_cap->addr64 = (hw_cap & XGMAC_HWFEAT_ADDR64) >> 14;
408 	switch (dma_cap->addr64) {
409 	case 0:
410 		dma_cap->addr64 = 32;
411 		break;
412 	case 1:
413 		dma_cap->addr64 = 40;
414 		break;
415 	case 2:
416 		dma_cap->addr64 = 48;
417 		break;
418 	default:
419 		dma_cap->addr64 = 32;
420 		break;
421 	}
422 
423 	dma_cap->advthword = (hw_cap & XGMAC_HWFEAT_ADVTHWORD) >> 13;
424 	dma_cap->ptoen = (hw_cap & XGMAC_HWFEAT_PTOEN) >> 12;
425 	dma_cap->osten = (hw_cap & XGMAC_HWFEAT_OSTEN) >> 11;
426 	dma_cap->tx_fifo_size =
427 		128 << ((hw_cap & XGMAC_HWFEAT_TXFIFOSIZE) >> 6);
428 	dma_cap->pfcen = (hw_cap & XGMAC_HWFEAT_PFCEN) >> 5;
429 	dma_cap->rx_fifo_size =
430 		128 << ((hw_cap & XGMAC_HWFEAT_RXFIFOSIZE) >> 0);
431 
432 	/* MAC HW feature 2 */
433 	hw_cap = readl(ioaddr + XGMAC_HW_FEATURE2);
434 	dma_cap->aux_snapshot_n = (hw_cap & XGMAC_HWFEAT_AUXSNAPNUM) >> 28;
435 	dma_cap->pps_out_num = (hw_cap & XGMAC_HWFEAT_PPSOUTNUM) >> 24;
436 	dma_cap->number_tx_channel =
437 		((hw_cap & XGMAC_HWFEAT_TXCHCNT) >> 18) + 1;
438 	dma_cap->number_rx_channel =
439 		((hw_cap & XGMAC_HWFEAT_RXCHCNT) >> 12) + 1;
440 	dma_cap->number_tx_queues =
441 		((hw_cap & XGMAC_HWFEAT_TXQCNT) >> 6) + 1;
442 	dma_cap->number_rx_queues =
443 		((hw_cap & XGMAC_HWFEAT_RXQCNT) >> 0) + 1;
444 
445 	/* MAC HW feature 3 */
446 	hw_cap = readl(ioaddr + XGMAC_HW_FEATURE3);
447 	dma_cap->tbs_ch_num = ((hw_cap & XGMAC_HWFEAT_TBSCH) >> 28) + 1;
448 	dma_cap->tbssel = (hw_cap & XGMAC_HWFEAT_TBSSEL) >> 27;
449 	dma_cap->fpesel = (hw_cap & XGMAC_HWFEAT_FPESEL) >> 26;
450 	dma_cap->sgfsel = (hw_cap & XGMAC_HWFEAT_SGFSEL) >> 25;
451 	dma_cap->estwid = (hw_cap & XGMAC_HWFEAT_ESTWID) >> 23;
452 	dma_cap->estdep = (hw_cap & XGMAC_HWFEAT_ESTDEP) >> 20;
453 	dma_cap->estsel = (hw_cap & XGMAC_HWFEAT_ESTSEL) >> 19;
454 	dma_cap->ttsfd = (hw_cap & XGMAC_HWFEAT_TTSFD) >> 16;
455 	dma_cap->asp = (hw_cap & XGMAC_HWFEAT_ASP) >> 14;
456 	dma_cap->dvlan = (hw_cap & XGMAC_HWFEAT_DVLAN) >> 13;
457 	dma_cap->frpes = (hw_cap & XGMAC_HWFEAT_FRPES) >> 11;
458 	dma_cap->frpbs = (hw_cap & XGMAC_HWFEAT_FRPPB) >> 9;
459 	dma_cap->pou_ost_en = (hw_cap & XGMAC_HWFEAT_POUOST) >> 8;
460 	dma_cap->frppipe_num = ((hw_cap & XGMAC_HWFEAT_FRPPIPE) >> 5) + 1;
461 	dma_cap->cbtisel = (hw_cap & XGMAC_HWFEAT_CBTISEL) >> 4;
462 	dma_cap->frpsel = (hw_cap & XGMAC_HWFEAT_FRPSEL) >> 3;
463 	dma_cap->nrvf_num = (hw_cap & XGMAC_HWFEAT_NRVF) >> 0;
464 
465 	/* MAC HW feature 4 */
466 	hw_cap = readl(ioaddr + XGMAC_HW_FEATURE4);
467 	dma_cap->asp |= (hw_cap & XGMAC_HWFEAT_EASP) >> 2;
468 	dma_cap->pcsel = (hw_cap & XGMAC_HWFEAT_PCSEL) >> 0;
469 
470 	return 0;
471 }
472 
473 static void dwxgmac2_rx_watchdog(struct stmmac_priv *priv, void __iomem *ioaddr,
474 				 u32 riwt, u32 queue)
475 {
476 	writel(riwt & XGMAC_RWT, ioaddr + XGMAC_DMA_CH_Rx_WATCHDOG(queue));
477 }
478 
479 static void dwxgmac2_set_rx_ring_len(struct stmmac_priv *priv,
480 				     void __iomem *ioaddr, u32 len, u32 chan)
481 {
482 	writel(len, ioaddr + XGMAC_DMA_CH_RxDESC_RING_LEN(chan));
483 }
484 
485 static void dwxgmac2_set_tx_ring_len(struct stmmac_priv *priv,
486 				     void __iomem *ioaddr, u32 len, u32 chan)
487 {
488 	writel(len, ioaddr + XGMAC_DMA_CH_TxDESC_RING_LEN(chan));
489 }
490 
491 static void dwxgmac2_set_rx_tail_ptr(struct stmmac_priv *priv,
492 				     void __iomem *ioaddr, u32 ptr, u32 chan)
493 {
494 	writel(ptr, ioaddr + XGMAC_DMA_CH_RxDESC_TAIL_LPTR(chan));
495 }
496 
497 static void dwxgmac2_set_tx_tail_ptr(struct stmmac_priv *priv,
498 				     void __iomem *ioaddr, u32 ptr, u32 chan)
499 {
500 	writel(ptr, ioaddr + XGMAC_DMA_CH_TxDESC_TAIL_LPTR(chan));
501 }
502 
503 static void dwxgmac2_enable_tso(struct stmmac_priv *priv, void __iomem *ioaddr,
504 				bool en, u32 chan)
505 {
506 	u32 value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
507 
508 	if (en)
509 		value |= XGMAC_TSE;
510 	else
511 		value &= ~XGMAC_TSE;
512 
513 	writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
514 }
515 
516 static void dwxgmac2_qmode(struct stmmac_priv *priv, void __iomem *ioaddr,
517 			   u32 channel, u8 qmode)
518 {
519 	u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
520 	u32 flow = readl(ioaddr + XGMAC_RX_FLOW_CTRL);
521 	unsigned int txqen;
522 
523 	if (qmode != MTL_QUEUE_AVB) {
524 		txqen = 0x2;
525 		writel(0, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(channel));
526 	} else {
527 		txqen = 0x1;
528 		writel(flow & (~XGMAC_RFE), ioaddr + XGMAC_RX_FLOW_CTRL);
529 	}
530 
531 	value = u32_replace_bits(value, txqen, XGMAC_TXQEN);
532 	writel(value, ioaddr +  XGMAC_MTL_TXQ_OPMODE(channel));
533 }
534 
535 static void dwxgmac2_set_bfsize(struct stmmac_priv *priv, void __iomem *ioaddr,
536 				int bfsize, u32 chan)
537 {
538 	u32 value;
539 
540 	value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
541 	value = u32_replace_bits(value, bfsize, XGMAC_RBSZ);
542 	writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
543 }
544 
545 static void dwxgmac2_enable_sph(struct stmmac_priv *priv, void __iomem *ioaddr,
546 				bool en, u32 chan)
547 {
548 	u32 value = readl(ioaddr + XGMAC_RX_CONFIG);
549 
550 	value &= ~XGMAC_CONFIG_HDSMS;
551 	value |= XGMAC_CONFIG_HDSMS_256; /* Segment max 256 bytes */
552 	writel(value, ioaddr + XGMAC_RX_CONFIG);
553 
554 	value = readl(ioaddr + XGMAC_DMA_CH_CONTROL(chan));
555 	if (en)
556 		value |= XGMAC_SPH;
557 	else
558 		value &= ~XGMAC_SPH;
559 	writel(value, ioaddr + XGMAC_DMA_CH_CONTROL(chan));
560 }
561 
562 static int dwxgmac2_enable_tbs(struct stmmac_priv *priv, void __iomem *ioaddr,
563 			       bool en, u32 chan)
564 {
565 	u32 value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
566 
567 	if (en)
568 		value |= XGMAC_EDSE;
569 	else
570 		value &= ~XGMAC_EDSE;
571 
572 	writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
573 
574 	value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)) & XGMAC_EDSE;
575 	if (en && !value)
576 		return -EIO;
577 
578 	writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL0);
579 	writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL1);
580 	writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL2);
581 	writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL3);
582 	return 0;
583 }
584 
585 const struct stmmac_dma_ops dwxgmac210_dma_ops = {
586 	.reset = dwxgmac2_dma_reset,
587 	.init = dwxgmac2_dma_init,
588 	.init_chan = dwxgmac2_dma_init_chan,
589 	.init_rx_chan = dwxgmac2_dma_init_rx_chan,
590 	.init_tx_chan = dwxgmac2_dma_init_tx_chan,
591 	.axi = dwxgmac2_dma_axi,
592 	.dump_regs = dwxgmac2_dma_dump_regs,
593 	.dma_rx_mode = dwxgmac2_dma_rx_mode,
594 	.dma_tx_mode = dwxgmac2_dma_tx_mode,
595 	.enable_dma_irq = dwxgmac2_enable_dma_irq,
596 	.disable_dma_irq = dwxgmac2_disable_dma_irq,
597 	.start_tx = dwxgmac2_dma_start_tx,
598 	.stop_tx = dwxgmac2_dma_stop_tx,
599 	.start_rx = dwxgmac2_dma_start_rx,
600 	.stop_rx = dwxgmac2_dma_stop_rx,
601 	.dma_interrupt = dwxgmac2_dma_interrupt,
602 	.get_hw_feature = dwxgmac2_get_hw_feature,
603 	.rx_watchdog = dwxgmac2_rx_watchdog,
604 	.set_rx_ring_len = dwxgmac2_set_rx_ring_len,
605 	.set_tx_ring_len = dwxgmac2_set_tx_ring_len,
606 	.set_rx_tail_ptr = dwxgmac2_set_rx_tail_ptr,
607 	.set_tx_tail_ptr = dwxgmac2_set_tx_tail_ptr,
608 	.enable_tso = dwxgmac2_enable_tso,
609 	.qmode = dwxgmac2_qmode,
610 	.set_bfsize = dwxgmac2_set_bfsize,
611 	.enable_sph = dwxgmac2_enable_sph,
612 	.enable_tbs = dwxgmac2_enable_tbs,
613 };
614