xref: /linux/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c (revision 8f7aa3d3c7323f4ca2768a9e74ebbe359c4f8f88)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
4   DWC Ether MAC 10/100/1000 Universal version 3.41a  has been used for
5   developing this code.
6 
7   This contains the functions to handle the dma.
8 
9   Copyright (C) 2007-2009  STMicroelectronics Ltd
10 
11 
12   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
13 *******************************************************************************/
14 
15 #include <linux/io.h>
16 #include "dwmac1000.h"
17 #include "dwmac_dma.h"
18 
19 static void dwmac1000_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
20 {
21 	u32 value = readl(ioaddr + DMA_AXI_BUS_MODE);
22 
23 	pr_info("dwmac1000: Master AXI performs %s burst length\n",
24 		!(value & DMA_AXI_UNDEF) ? "fixed" : "any");
25 
26 	if (axi->axi_lpi_en)
27 		value |= DMA_AXI_EN_LPI;
28 	if (axi->axi_xit_frm)
29 		value |= DMA_AXI_LPI_XIT_FRM;
30 
31 	value &= ~DMA_AXI_WR_OSR_LMT;
32 	value |= (axi->axi_wr_osr_lmt & DMA_AXI_WR_OSR_LMT_MASK) <<
33 		 DMA_AXI_WR_OSR_LMT_SHIFT;
34 
35 	value &= ~DMA_AXI_RD_OSR_LMT;
36 	value |= (axi->axi_rd_osr_lmt & DMA_AXI_RD_OSR_LMT_MASK) <<
37 		 DMA_AXI_RD_OSR_LMT_SHIFT;
38 
39 	/* Depending on the UNDEF bit the Master AXI will perform any burst
40 	 * length according to the BLEN programmed (by default all BLEN are
41 	 * set). Note that the UNDEF bit is readonly, and is the inverse of
42 	 * Bus Mode bit 16.
43 	 */
44 	value = (value & ~DMA_AXI_BLEN_MASK) | axi->axi_blen_regval;
45 
46 	writel(value, ioaddr + DMA_AXI_BUS_MODE);
47 }
48 
49 static void dwmac1000_dma_init_channel(struct stmmac_priv *priv,
50 				       void __iomem *ioaddr,
51 				       struct stmmac_dma_cfg *dma_cfg, u32 chan)
52 {
53 	int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
54 	int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
55 	u32 value;
56 
57 	value = readl(ioaddr + DMA_CHAN_BUS_MODE(chan));
58 
59 	/* Set the DMA PBL (Programmable Burst Length) mode.
60 	 *
61 	 * Note: before stmmac core 3.50 this mode bit was 4xPBL, and
62 	 * post 3.5 mode bit acts as 8*PBL.
63 	 */
64 	if (dma_cfg->pblx8)
65 		value |= DMA_BUS_MODE_MAXPBL;
66 	value |= DMA_BUS_MODE_USP;
67 	value &= ~(DMA_BUS_MODE_PBL_MASK | DMA_BUS_MODE_RPBL_MASK);
68 	value |= (txpbl << DMA_BUS_MODE_PBL_SHIFT);
69 	value |= (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
70 
71 	/* Set the Fixed burst mode */
72 	if (dma_cfg->fixed_burst)
73 		value |= DMA_BUS_MODE_FB;
74 
75 	/* Mixed Burst has no effect when fb is set */
76 	if (dma_cfg->mixed_burst)
77 		value |= DMA_BUS_MODE_MB;
78 
79 	if (dma_cfg->atds)
80 		value |= DMA_BUS_MODE_ATDS;
81 
82 	if (dma_cfg->aal)
83 		value |= DMA_BUS_MODE_AAL;
84 
85 	writel(value, ioaddr + DMA_CHAN_BUS_MODE(chan));
86 
87 	/* Mask interrupts by writing to CSR7 */
88 	writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_CHAN_INTR_ENA(chan));
89 }
90 
91 static void dwmac1000_dma_init_rx(struct stmmac_priv *priv,
92 				  void __iomem *ioaddr,
93 				  struct stmmac_dma_cfg *dma_cfg,
94 				  dma_addr_t dma_rx_phy, u32 chan)
95 {
96 	/* RX descriptor base address list must be written into DMA CSR3 */
97 	writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_CHAN_RCV_BASE_ADDR(chan));
98 }
99 
100 static void dwmac1000_dma_init_tx(struct stmmac_priv *priv,
101 				  void __iomem *ioaddr,
102 				  struct stmmac_dma_cfg *dma_cfg,
103 				  dma_addr_t dma_tx_phy, u32 chan)
104 {
105 	/* TX descriptor base address list must be written into DMA CSR4 */
106 	writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_CHAN_TX_BASE_ADDR(chan));
107 }
108 
109 static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz)
110 {
111 	csr6 &= ~DMA_CONTROL_RFA_MASK;
112 	csr6 &= ~DMA_CONTROL_RFD_MASK;
113 
114 	/* Leave flow control disabled if receive fifo size is less than
115 	 * 4K or 0. Otherwise, send XOFF when fifo is 1K less than full,
116 	 * and send XON when 2K less than full.
117 	 */
118 	if (rxfifosz < 4096) {
119 		csr6 &= ~DMA_CONTROL_EFC;
120 		pr_debug("GMAC: disabling flow control, rxfifo too small(%d)\n",
121 			 rxfifosz);
122 	} else {
123 		csr6 |= DMA_CONTROL_EFC;
124 		csr6 |= RFA_FULL_MINUS_1K;
125 		csr6 |= RFD_FULL_MINUS_2K;
126 	}
127 	return csr6;
128 }
129 
130 static void dwmac1000_dma_operation_mode_rx(struct stmmac_priv *priv,
131 					    void __iomem *ioaddr, int mode,
132 					    u32 channel, int fifosz, u8 qmode)
133 {
134 	u32 csr6 = readl(ioaddr + DMA_CHAN_CONTROL(channel));
135 
136 	if (mode == SF_DMA_MODE) {
137 		pr_debug("GMAC: enable RX store and forward mode\n");
138 		csr6 |= DMA_CONTROL_RSF | DMA_CONTROL_DFF;
139 	} else {
140 		pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode);
141 		csr6 &= ~(DMA_CONTROL_RSF | DMA_CONTROL_DFF);
142 		csr6 &= DMA_CONTROL_TC_RX_MASK;
143 		if (mode <= 32)
144 			csr6 |= DMA_CONTROL_RTC_32;
145 		else if (mode <= 64)
146 			csr6 |= DMA_CONTROL_RTC_64;
147 		else if (mode <= 96)
148 			csr6 |= DMA_CONTROL_RTC_96;
149 		else
150 			csr6 |= DMA_CONTROL_RTC_128;
151 	}
152 
153 	/* Configure flow control based on rx fifo size */
154 	csr6 = dwmac1000_configure_fc(csr6, fifosz);
155 
156 	writel(csr6, ioaddr + DMA_CHAN_CONTROL(channel));
157 }
158 
159 static void dwmac1000_dma_operation_mode_tx(struct stmmac_priv *priv,
160 					    void __iomem *ioaddr, int mode,
161 					    u32 channel, int fifosz, u8 qmode)
162 {
163 	u32 csr6 = readl(ioaddr + DMA_CHAN_CONTROL(channel));
164 
165 	if (mode == SF_DMA_MODE) {
166 		pr_debug("GMAC: enable TX store and forward mode\n");
167 		/* Transmit COE type 2 cannot be done in cut-through mode. */
168 		csr6 |= DMA_CONTROL_TSF;
169 		/* Operating on second frame increase the performance
170 		 * especially when transmit store-and-forward is used.
171 		 */
172 		csr6 |= DMA_CONTROL_OSF;
173 	} else {
174 		pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode);
175 		csr6 &= ~DMA_CONTROL_TSF;
176 		csr6 &= DMA_CONTROL_TC_TX_MASK;
177 		/* Set the transmit threshold */
178 		if (mode <= 32)
179 			csr6 |= DMA_CONTROL_TTC_32;
180 		else if (mode <= 64)
181 			csr6 |= DMA_CONTROL_TTC_64;
182 		else if (mode <= 128)
183 			csr6 |= DMA_CONTROL_TTC_128;
184 		else if (mode <= 192)
185 			csr6 |= DMA_CONTROL_TTC_192;
186 		else
187 			csr6 |= DMA_CONTROL_TTC_256;
188 	}
189 
190 	writel(csr6, ioaddr + DMA_CHAN_CONTROL(channel));
191 }
192 
193 static void dwmac1000_dump_dma_regs(struct stmmac_priv *priv,
194 				    void __iomem *ioaddr, u32 *reg_space)
195 {
196 	int i;
197 
198 	for (i = 0; i < NUM_DWMAC1000_DMA_REGS; i++)
199 		if ((i < 12) || (i > 17))
200 			reg_space[DMA_BUS_MODE / 4 + i] =
201 				readl(ioaddr + DMA_BUS_MODE + i * 4);
202 }
203 
204 static int dwmac1000_get_hw_feature(void __iomem *ioaddr,
205 				    struct dma_features *dma_cap)
206 {
207 	u32 hw_cap = readl(ioaddr + DMA_HW_FEATURE);
208 
209 	if (!hw_cap) {
210 		/* 0x00000000 is the value read on old hardware that does not
211 		 * implement this register
212 		 */
213 		return -EOPNOTSUPP;
214 	}
215 
216 	dma_cap->mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL);
217 	dma_cap->mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1;
218 	dma_cap->half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2;
219 	dma_cap->hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4;
220 	dma_cap->multi_addr = (hw_cap & DMA_HW_FEAT_ADDMAC) >> 5;
221 	dma_cap->pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6;
222 	dma_cap->sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8;
223 	dma_cap->pmt_remote_wake_up = (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9;
224 	dma_cap->pmt_magic_frame = (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10;
225 	/* MMC */
226 	dma_cap->rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11;
227 	/* IEEE 1588-2002 */
228 	dma_cap->time_stamp =
229 	    (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12;
230 	/* IEEE 1588-2008 */
231 	dma_cap->atime_stamp = (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13;
232 	/* 802.3az - Energy-Efficient Ethernet (EEE) */
233 	dma_cap->eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14;
234 	dma_cap->av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15;
235 	/* TX and RX csum */
236 	dma_cap->tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16;
237 	dma_cap->rx_coe_type1 = (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17;
238 	dma_cap->rx_coe_type2 = (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18;
239 	dma_cap->rxfifo_over_2048 = (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19;
240 	/* TX and RX number of channels */
241 	dma_cap->number_rx_channel = (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20;
242 	dma_cap->number_tx_channel = (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22;
243 	/* Alternate (enhanced) DESC mode */
244 	dma_cap->enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
245 
246 	return 0;
247 }
248 
249 static void dwmac1000_rx_watchdog(struct stmmac_priv *priv,
250 				  void __iomem *ioaddr, u32 riwt, u32 queue)
251 {
252 	writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(queue));
253 }
254 
255 const struct stmmac_dma_ops dwmac1000_dma_ops = {
256 	.reset = dwmac_dma_reset,
257 	.init_chan = dwmac1000_dma_init_channel,
258 	.init_rx_chan = dwmac1000_dma_init_rx,
259 	.init_tx_chan = dwmac1000_dma_init_tx,
260 	.axi = dwmac1000_dma_axi,
261 	.dump_regs = dwmac1000_dump_dma_regs,
262 	.dma_rx_mode = dwmac1000_dma_operation_mode_rx,
263 	.dma_tx_mode = dwmac1000_dma_operation_mode_tx,
264 	.enable_dma_transmission = dwmac_enable_dma_transmission,
265 	.enable_dma_reception = dwmac_enable_dma_reception,
266 	.enable_dma_irq = dwmac_enable_dma_irq,
267 	.disable_dma_irq = dwmac_disable_dma_irq,
268 	.start_tx = dwmac_dma_start_tx,
269 	.stop_tx = dwmac_dma_stop_tx,
270 	.start_rx = dwmac_dma_start_rx,
271 	.stop_rx = dwmac_dma_stop_rx,
272 	.dma_interrupt = dwmac_dma_interrupt,
273 	.get_hw_feature = dwmac1000_get_hw_feature,
274 	.rx_watchdog = dwmac1000_rx_watchdog,
275 };
276 EXPORT_SYMBOL_GPL(dwmac1000_dma_ops);
277