xref: /linux/drivers/net/ethernet/freescale/fs_enet/mac-scc.c (revision 566ab427f827b0256d3e8ce0235d088e6a9c28bd)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Ethernet on Serial Communications Controller (SCC) driver for Motorola MPC8xx and MPC82xx.
4  *
5  * Copyright (c) 2003 Intracom S.A.
6  *  by Pantelis Antoniou <panto@intracom.gr>
7  *
8  * 2005 (c) MontaVista Software, Inc.
9  * Vitaly Bordug <vbordug@ru.mvista.com>
10  */
11 
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/types.h>
15 #include <linux/string.h>
16 #include <linux/ptrace.h>
17 #include <linux/errno.h>
18 #include <linux/ioport.h>
19 #include <linux/interrupt.h>
20 #include <linux/delay.h>
21 #include <linux/netdevice.h>
22 #include <linux/etherdevice.h>
23 #include <linux/skbuff.h>
24 #include <linux/spinlock.h>
25 #include <linux/ethtool.h>
26 #include <linux/bitops.h>
27 #include <linux/fs.h>
28 #include <linux/platform_device.h>
29 #include <linux/of_address.h>
30 #include <linux/of_irq.h>
31 
32 #include <asm/irq.h>
33 #include <linux/uaccess.h>
34 
35 #include "fs_enet.h"
36 
37 /*************************************************/
38 #if defined(CONFIG_CPM1)
39 /* for a 8xx __raw_xxx's are sufficient */
40 #define __fs_out32(addr, x)	__raw_writel(x, addr)
41 #define __fs_out16(addr, x)	__raw_writew(x, addr)
42 #define __fs_out8(addr, x)	__raw_writeb(x, addr)
43 #define __fs_in32(addr)	__raw_readl(addr)
44 #define __fs_in16(addr)	__raw_readw(addr)
45 #define __fs_in8(addr)	__raw_readb(addr)
46 #else
47 /* for others play it safe */
48 #define __fs_out32(addr, x)	out_be32(addr, x)
49 #define __fs_out16(addr, x)	out_be16(addr, x)
50 #define __fs_in32(addr)	in_be32(addr)
51 #define __fs_in16(addr)	in_be16(addr)
52 #define __fs_out8(addr, x)	out_8(addr, x)
53 #define __fs_in8(addr)	in_8(addr)
54 #endif
55 
56 /* write, read, set bits, clear bits */
57 #define W32(_p, _m, _v) __fs_out32(&(_p)->_m, (_v))
58 #define R32(_p, _m)     __fs_in32(&(_p)->_m)
59 #define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v))
60 #define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v))
61 
62 #define W16(_p, _m, _v) __fs_out16(&(_p)->_m, (_v))
63 #define R16(_p, _m)     __fs_in16(&(_p)->_m)
64 #define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v))
65 #define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v))
66 
67 #define W8(_p, _m, _v)  __fs_out8(&(_p)->_m, (_v))
68 #define R8(_p, _m)      __fs_in8(&(_p)->_m)
69 #define S8(_p, _m, _v)  W8(_p, _m, R8(_p, _m) | (_v))
70 #define C8(_p, _m, _v)  W8(_p, _m, R8(_p, _m) & ~(_v))
71 
72 #define SCC_MAX_MULTICAST_ADDRS	64
73 
74 /*
75  * Delay to wait for SCC reset command to complete (in us)
76  */
77 #define SCC_RESET_DELAY		50
78 
79 static inline int scc_cr_cmd(struct fs_enet_private *fep, u32 op)
80 {
81 	const struct fs_platform_info *fpi = fep->fpi;
82 
83 	return cpm_command(fpi->cp_command, op);
84 }
85 
86 static int do_pd_setup(struct fs_enet_private *fep)
87 {
88 	struct platform_device *ofdev = to_platform_device(fep->dev);
89 
90 	fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
91 	if (!fep->interrupt)
92 		return -EINVAL;
93 
94 	fep->scc.sccp = of_iomap(ofdev->dev.of_node, 0);
95 	if (!fep->scc.sccp)
96 		return -EINVAL;
97 
98 	fep->scc.ep = of_iomap(ofdev->dev.of_node, 1);
99 	if (!fep->scc.ep) {
100 		iounmap(fep->scc.sccp);
101 		return -EINVAL;
102 	}
103 
104 	return 0;
105 }
106 
107 #define SCC_NAPI_EVENT_MSK	(SCCE_ENET_RXF | SCCE_ENET_RXB | SCCE_ENET_TXB)
108 #define SCC_EVENT		(SCCE_ENET_RXF | SCCE_ENET_TXB)
109 #define SCC_ERR_EVENT_MSK	(SCCE_ENET_TXE | SCCE_ENET_BSY)
110 
111 static int setup_data(struct net_device *dev)
112 {
113 	struct fs_enet_private *fep = netdev_priv(dev);
114 
115 	do_pd_setup(fep);
116 
117 	fep->scc.hthi = 0;
118 	fep->scc.htlo = 0;
119 
120 	fep->ev_napi = SCC_NAPI_EVENT_MSK;
121 	fep->ev = SCC_EVENT | SCCE_ENET_TXE;
122 	fep->ev_err = SCC_ERR_EVENT_MSK;
123 
124 	return 0;
125 }
126 
127 static int allocate_bd(struct net_device *dev)
128 {
129 	struct fs_enet_private *fep = netdev_priv(dev);
130 	struct fs_platform_info *fpi = fep->fpi;
131 
132 	fpi->dpram_offset = cpm_muram_alloc((fpi->tx_ring + fpi->rx_ring) *
133 					    sizeof(cbd_t), 8);
134 	if (IS_ERR_VALUE(fpi->dpram_offset))
135 		return -ENOMEM;
136 
137 	fep->ring_base = cpm_muram_addr(fpi->dpram_offset);
138 
139 	return 0;
140 }
141 
142 static void free_bd(struct net_device *dev)
143 {
144 	struct fs_enet_private *fep = netdev_priv(dev);
145 	const struct fs_platform_info *fpi = fep->fpi;
146 
147 	if (fep->ring_base)
148 		cpm_muram_free(fpi->dpram_offset);
149 }
150 
151 static void cleanup_data(struct net_device *dev)
152 {
153 	/* nothing */
154 }
155 
156 static void set_promiscuous_mode(struct net_device *dev)
157 {
158 	struct fs_enet_private *fep = netdev_priv(dev);
159 	scc_t __iomem *sccp = fep->scc.sccp;
160 
161 	S16(sccp, scc_psmr, SCC_PSMR_PRO);
162 }
163 
164 static void set_multicast_start(struct net_device *dev)
165 {
166 	struct fs_enet_private *fep = netdev_priv(dev);
167 	scc_enet_t __iomem *ep = fep->scc.ep;
168 
169 	W16(ep, sen_gaddr1, 0);
170 	W16(ep, sen_gaddr2, 0);
171 	W16(ep, sen_gaddr3, 0);
172 	W16(ep, sen_gaddr4, 0);
173 }
174 
175 static void set_multicast_one(struct net_device *dev, const u8 * mac)
176 {
177 	struct fs_enet_private *fep = netdev_priv(dev);
178 	scc_enet_t __iomem *ep = fep->scc.ep;
179 	u16 taddrh, taddrm, taddrl;
180 
181 	taddrh = ((u16) mac[5] << 8) | mac[4];
182 	taddrm = ((u16) mac[3] << 8) | mac[2];
183 	taddrl = ((u16) mac[1] << 8) | mac[0];
184 
185 	W16(ep, sen_taddrh, taddrh);
186 	W16(ep, sen_taddrm, taddrm);
187 	W16(ep, sen_taddrl, taddrl);
188 	scc_cr_cmd(fep, CPM_CR_SET_GADDR);
189 }
190 
191 static void set_multicast_finish(struct net_device *dev)
192 {
193 	struct fs_enet_private *fep = netdev_priv(dev);
194 	scc_t __iomem *sccp = fep->scc.sccp;
195 	scc_enet_t __iomem *ep = fep->scc.ep;
196 
197 	/* clear promiscuous always */
198 	C16(sccp, scc_psmr, SCC_PSMR_PRO);
199 
200 	/* if all multi or too many multicasts; just enable all */
201 	if ((dev->flags & IFF_ALLMULTI) != 0 ||
202 	    netdev_mc_count(dev) > SCC_MAX_MULTICAST_ADDRS) {
203 
204 		W16(ep, sen_gaddr1, 0xffff);
205 		W16(ep, sen_gaddr2, 0xffff);
206 		W16(ep, sen_gaddr3, 0xffff);
207 		W16(ep, sen_gaddr4, 0xffff);
208 	}
209 }
210 
211 static void set_multicast_list(struct net_device *dev)
212 {
213 	struct netdev_hw_addr *ha;
214 
215 	if ((dev->flags & IFF_PROMISC) == 0) {
216 		set_multicast_start(dev);
217 		netdev_for_each_mc_addr(ha, dev)
218 			set_multicast_one(dev, ha->addr);
219 		set_multicast_finish(dev);
220 	} else
221 		set_promiscuous_mode(dev);
222 }
223 
224 /*
225  * This function is called to start or restart the FEC during a link
226  * change.  This only happens when switching between half and full
227  * duplex.
228  */
229 static void restart(struct net_device *dev, phy_interface_t interface,
230 		    int speed, int duplex)
231 {
232 	struct fs_enet_private *fep = netdev_priv(dev);
233 	scc_t __iomem *sccp = fep->scc.sccp;
234 	scc_enet_t __iomem *ep = fep->scc.ep;
235 	const struct fs_platform_info *fpi = fep->fpi;
236 	u16 paddrh, paddrm, paddrl;
237 	const unsigned char *mac;
238 	int i;
239 
240 	C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
241 
242 	/* clear everything (slow & steady does it) */
243 	for (i = 0; i < sizeof(*ep); i++)
244 		__fs_out8((u8 __iomem *)ep + i, 0);
245 
246 	/* point to bds */
247 	W16(ep, sen_genscc.scc_rbase, fpi->dpram_offset);
248 	W16(ep, sen_genscc.scc_tbase,
249 	    fpi->dpram_offset + sizeof(cbd_t) * fpi->rx_ring);
250 
251 	/* Initialize function code registers for big-endian.
252 	 */
253 #ifndef CONFIG_NOT_COHERENT_CACHE
254 	W8(ep, sen_genscc.scc_rfcr, SCC_EB | SCC_GBL);
255 	W8(ep, sen_genscc.scc_tfcr, SCC_EB | SCC_GBL);
256 #else
257 	W8(ep, sen_genscc.scc_rfcr, SCC_EB);
258 	W8(ep, sen_genscc.scc_tfcr, SCC_EB);
259 #endif
260 
261 	/* Set maximum bytes per receive buffer.
262 	 * This appears to be an Ethernet frame size, not the buffer
263 	 * fragment size.  It must be a multiple of four.
264 	 */
265 	W16(ep, sen_genscc.scc_mrblr, 0x5f0);
266 
267 	/* Set CRC preset and mask.
268 	 */
269 	W32(ep, sen_cpres, 0xffffffff);
270 	W32(ep, sen_cmask, 0xdebb20e3);
271 
272 	W32(ep, sen_crcec, 0);	/* CRC Error counter */
273 	W32(ep, sen_alec, 0);	/* alignment error counter */
274 	W32(ep, sen_disfc, 0);	/* discard frame counter */
275 
276 	W16(ep, sen_pads, 0x8888);	/* Tx short frame pad character */
277 	W16(ep, sen_retlim, 15);	/* Retry limit threshold */
278 
279 	W16(ep, sen_maxflr, 0x5ee);	/* maximum frame length register */
280 
281 	W16(ep, sen_minflr, PKT_MINBUF_SIZE);	/* minimum frame length register */
282 
283 	W16(ep, sen_maxd1, 0x000005f0);	/* maximum DMA1 length */
284 	W16(ep, sen_maxd2, 0x000005f0);	/* maximum DMA2 length */
285 
286 	/* Clear hash tables.
287 	 */
288 	W16(ep, sen_gaddr1, 0);
289 	W16(ep, sen_gaddr2, 0);
290 	W16(ep, sen_gaddr3, 0);
291 	W16(ep, sen_gaddr4, 0);
292 	W16(ep, sen_iaddr1, 0);
293 	W16(ep, sen_iaddr2, 0);
294 	W16(ep, sen_iaddr3, 0);
295 	W16(ep, sen_iaddr4, 0);
296 
297 	/* set address
298 	 */
299 	mac = dev->dev_addr;
300 	paddrh = ((u16) mac[5] << 8) | mac[4];
301 	paddrm = ((u16) mac[3] << 8) | mac[2];
302 	paddrl = ((u16) mac[1] << 8) | mac[0];
303 
304 	W16(ep, sen_paddrh, paddrh);
305 	W16(ep, sen_paddrm, paddrm);
306 	W16(ep, sen_paddrl, paddrl);
307 
308 	W16(ep, sen_pper, 0);
309 	W16(ep, sen_taddrl, 0);
310 	W16(ep, sen_taddrm, 0);
311 	W16(ep, sen_taddrh, 0);
312 
313 	fs_init_bds(dev);
314 
315 	scc_cr_cmd(fep, CPM_CR_INIT_TRX);
316 
317 	W16(sccp, scc_scce, 0xffff);
318 
319 	/* Enable interrupts we wish to service.
320 	 */
321 	W16(sccp, scc_sccm, SCCE_ENET_TXE | SCCE_ENET_RXF | SCCE_ENET_TXB);
322 
323 	/* Set GSMR_H to enable all normal operating modes.
324 	 * Set GSMR_L to enable Ethernet to MC68160.
325 	 */
326 	W32(sccp, scc_gsmrh, 0);
327 	W32(sccp, scc_gsmrl,
328 	    SCC_GSMRL_TCI | SCC_GSMRL_TPL_48 | SCC_GSMRL_TPP_10 |
329 	    SCC_GSMRL_MODE_ENET);
330 
331 	/* Set sync/delimiters.
332 	 */
333 	W16(sccp, scc_dsr, 0xd555);
334 
335 	/* Set processing mode.  Use Ethernet CRC, catch broadcast, and
336 	 * start frame search 22 bit times after RENA.
337 	 */
338 	W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22);
339 
340 	/* Set full duplex mode if needed */
341 	if (duplex == DUPLEX_FULL)
342 		S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE);
343 
344 	/* Restore multicast and promiscuous settings */
345 	set_multicast_list(dev);
346 
347 	S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
348 }
349 
350 static void stop(struct net_device *dev)
351 {
352 	struct fs_enet_private *fep = netdev_priv(dev);
353 	scc_t __iomem *sccp = fep->scc.sccp;
354 	int i;
355 
356 	for (i = 0; (R16(sccp, scc_sccm) == 0) && i < SCC_RESET_DELAY; i++)
357 		udelay(1);
358 
359 	if (i == SCC_RESET_DELAY)
360 		dev_warn(fep->dev, "SCC timeout on graceful transmit stop\n");
361 
362 	W16(sccp, scc_sccm, 0);
363 	C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
364 
365 	fs_cleanup_bds(dev);
366 }
367 
368 static void napi_clear_event_fs(struct net_device *dev)
369 {
370 	struct fs_enet_private *fep = netdev_priv(dev);
371 	scc_t __iomem *sccp = fep->scc.sccp;
372 
373 	W16(sccp, scc_scce, SCC_NAPI_EVENT_MSK);
374 }
375 
376 static void napi_enable_fs(struct net_device *dev)
377 {
378 	struct fs_enet_private *fep = netdev_priv(dev);
379 	scc_t __iomem *sccp = fep->scc.sccp;
380 
381 	S16(sccp, scc_sccm, SCC_NAPI_EVENT_MSK);
382 }
383 
384 static void napi_disable_fs(struct net_device *dev)
385 {
386 	struct fs_enet_private *fep = netdev_priv(dev);
387 	scc_t __iomem *sccp = fep->scc.sccp;
388 
389 	C16(sccp, scc_sccm, SCC_NAPI_EVENT_MSK);
390 }
391 
392 static void rx_bd_done(struct net_device *dev)
393 {
394 	/* nothing */
395 }
396 
397 static void tx_kickstart(struct net_device *dev)
398 {
399 	/* nothing */
400 }
401 
402 static u32 get_int_events(struct net_device *dev)
403 {
404 	struct fs_enet_private *fep = netdev_priv(dev);
405 	scc_t __iomem *sccp = fep->scc.sccp;
406 
407 	return (u32) R16(sccp, scc_scce);
408 }
409 
410 static void clear_int_events(struct net_device *dev, u32 int_events)
411 {
412 	struct fs_enet_private *fep = netdev_priv(dev);
413 	scc_t __iomem *sccp = fep->scc.sccp;
414 
415 	W16(sccp, scc_scce, int_events & 0xffff);
416 }
417 
418 static void ev_error(struct net_device *dev, u32 int_events)
419 {
420 	struct fs_enet_private *fep = netdev_priv(dev);
421 
422 	dev_warn(fep->dev, "SCC ERROR(s) 0x%x\n", int_events);
423 }
424 
425 static int get_regs(struct net_device *dev, void *p, int *sizep)
426 {
427 	struct fs_enet_private *fep = netdev_priv(dev);
428 
429 	if (*sizep < sizeof(scc_t) + sizeof(scc_enet_t __iomem *))
430 		return -EINVAL;
431 
432 	memcpy_fromio(p, fep->scc.sccp, sizeof(scc_t));
433 	p = (char *)p + sizeof(scc_t);
434 
435 	memcpy_fromio(p, fep->scc.ep, sizeof(scc_enet_t __iomem *));
436 
437 	return 0;
438 }
439 
440 static int get_regs_len(struct net_device *dev)
441 {
442 	return sizeof(scc_t) + sizeof(scc_enet_t __iomem *);
443 }
444 
445 static void tx_restart(struct net_device *dev)
446 {
447 	struct fs_enet_private *fep = netdev_priv(dev);
448 
449 	scc_cr_cmd(fep, CPM_CR_RESTART_TX);
450 }
451 
452 
453 
454 /*************************************************************************/
455 
456 const struct fs_ops fs_scc_ops = {
457 	.setup_data		= setup_data,
458 	.cleanup_data		= cleanup_data,
459 	.set_multicast_list	= set_multicast_list,
460 	.restart		= restart,
461 	.stop			= stop,
462 	.napi_clear_event	= napi_clear_event_fs,
463 	.napi_enable		= napi_enable_fs,
464 	.napi_disable		= napi_disable_fs,
465 	.rx_bd_done		= rx_bd_done,
466 	.tx_kickstart		= tx_kickstart,
467 	.get_int_events		= get_int_events,
468 	.clear_int_events	= clear_int_events,
469 	.ev_error		= ev_error,
470 	.get_regs		= get_regs,
471 	.get_regs_len		= get_regs_len,
472 	.tx_restart		= tx_restart,
473 	.allocate_bd		= allocate_bd,
474 	.free_bd		= free_bd,
475 };
476