xref: /freebsd/sys/dev/mge/if_mge.c (revision d25a708ba7737cd31dfc109f82efed4713290e49)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
5  * Copyright (C) 2009-2015 Semihalf
6  * Copyright (C) 2015 Stormshield
7  * All rights reserved.
8  *
9  * Developed by Semihalf.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. Neither the name of MARVELL nor the names of contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 #ifdef HAVE_KERNEL_OPTION_HEADERS
37 #include "opt_device_polling.h"
38 #endif
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/endian.h>
43 #include <sys/mbuf.h>
44 #include <sys/lock.h>
45 #include <sys/mutex.h>
46 #include <sys/kernel.h>
47 #include <sys/module.h>
48 #include <sys/socket.h>
49 #include <sys/sysctl.h>
50 
51 #include <net/ethernet.h>
52 #include <net/bpf.h>
53 #include <net/if.h>
54 #include <net/if_arp.h>
55 #include <net/if_dl.h>
56 #include <net/if_media.h>
57 #include <net/if_types.h>
58 #include <net/if_vlan_var.h>
59 
60 #include <netinet/in_systm.h>
61 #include <netinet/in.h>
62 #include <netinet/ip.h>
63 
64 #include <sys/sockio.h>
65 #include <sys/bus.h>
66 #include <machine/bus.h>
67 #include <sys/rman.h>
68 #include <machine/resource.h>
69 
70 #include <dev/mii/mii.h>
71 #include <dev/mii/miivar.h>
72 
73 #include <dev/fdt/fdt_common.h>
74 #include <dev/ofw/ofw_bus.h>
75 #include <dev/ofw/ofw_bus_subr.h>
76 #include <dev/mdio/mdio.h>
77 
78 #include <dev/mge/if_mgevar.h>
79 #include <arm/mv/mvreg.h>
80 #include <arm/mv/mvvar.h>
81 
82 #include "miibus_if.h"
83 #include "mdio_if.h"
84 
85 #define	MGE_DELAY(x)	pause("SMI access sleep", (x) / tick_sbt)
86 
87 static int mge_probe(device_t dev);
88 static int mge_attach(device_t dev);
89 static int mge_detach(device_t dev);
90 static int mge_shutdown(device_t dev);
91 static int mge_suspend(device_t dev);
92 static int mge_resume(device_t dev);
93 
94 static int mge_miibus_readreg(device_t dev, int phy, int reg);
95 static int mge_miibus_writereg(device_t dev, int phy, int reg, int value);
96 
97 static int mge_mdio_readreg(device_t dev, int phy, int reg);
98 static int mge_mdio_writereg(device_t dev, int phy, int reg, int value);
99 
100 static int mge_ifmedia_upd(if_t ifp);
101 static void mge_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr);
102 
103 static void mge_init(void *arg);
104 static void mge_init_locked(void *arg);
105 static void mge_start(if_t ifp);
106 static void mge_start_locked(if_t ifp);
107 static void mge_watchdog(struct mge_softc *sc);
108 static int mge_ioctl(if_t ifp, u_long command, caddr_t data);
109 
110 static uint32_t mge_tfut_ipg(uint32_t val, int ver);
111 static uint32_t mge_rx_ipg(uint32_t val, int ver);
112 static void mge_ver_params(struct mge_softc *sc);
113 
114 static void mge_intrs_ctrl(struct mge_softc *sc, int enable);
115 static void mge_intr_rxtx(void *arg);
116 static void mge_intr_rx(void *arg);
117 static void mge_intr_rx_check(struct mge_softc *sc, uint32_t int_cause,
118     uint32_t int_cause_ext);
119 static int mge_intr_rx_locked(struct mge_softc *sc, int count);
120 static void mge_intr_tx(void *arg);
121 static void mge_intr_tx_locked(struct mge_softc *sc);
122 static void mge_intr_misc(void *arg);
123 static void mge_intr_sum(void *arg);
124 static void mge_intr_err(void *arg);
125 static void mge_stop(struct mge_softc *sc);
126 static void mge_tick(void *msc);
127 static uint32_t mge_set_port_serial_control(uint32_t media);
128 static void mge_get_mac_address(struct mge_softc *sc, uint8_t *addr);
129 static void mge_set_mac_address(struct mge_softc *sc);
130 static void mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte,
131     uint8_t queue);
132 static void mge_set_prom_mode(struct mge_softc *sc, uint8_t queue);
133 static int mge_allocate_dma(struct mge_softc *sc);
134 static int mge_alloc_desc_dma(struct mge_softc *sc,
135     struct mge_desc_wrapper* desc_tab, uint32_t size,
136     bus_dma_tag_t *buffer_tag);
137 static int mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
138     struct mbuf **mbufp, bus_addr_t *paddr);
139 static void mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg,
140     int error);
141 static void mge_free_dma(struct mge_softc *sc);
142 static void mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
143     uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs);
144 static void mge_offload_process_frame(if_t ifp, struct mbuf *frame,
145     uint32_t status, uint16_t bufsize);
146 static void mge_offload_setup_descriptor(struct mge_softc *sc,
147     struct mge_desc_wrapper *dw);
148 static uint8_t mge_crc8(uint8_t *data, int size);
149 static void mge_setup_multicast(struct mge_softc *sc);
150 static void mge_set_rxic(struct mge_softc *sc);
151 static void mge_set_txic(struct mge_softc *sc);
152 static void mge_add_sysctls(struct mge_softc *sc);
153 static int mge_sysctl_ic(SYSCTL_HANDLER_ARGS);
154 
155 static device_method_t mge_methods[] = {
156 	/* Device interface */
157 	DEVMETHOD(device_probe,		mge_probe),
158 	DEVMETHOD(device_attach,	mge_attach),
159 	DEVMETHOD(device_detach,	mge_detach),
160 	DEVMETHOD(device_shutdown,	mge_shutdown),
161 	DEVMETHOD(device_suspend,	mge_suspend),
162 	DEVMETHOD(device_resume,	mge_resume),
163 	/* MII interface */
164 	DEVMETHOD(miibus_readreg,	mge_miibus_readreg),
165 	DEVMETHOD(miibus_writereg,	mge_miibus_writereg),
166 	/* MDIO interface */
167 	DEVMETHOD(mdio_readreg,		mge_mdio_readreg),
168 	DEVMETHOD(mdio_writereg,	mge_mdio_writereg),
169 	{ 0, 0 }
170 };
171 
172 DEFINE_CLASS_0(mge, mge_driver, mge_methods, sizeof(struct mge_softc));
173 
174 static int switch_attached = 0;
175 
176 DRIVER_MODULE(mge, simplebus, mge_driver, 0, 0);
177 DRIVER_MODULE(miibus, mge, miibus_driver, 0, 0);
178 DRIVER_MODULE(mdio, mge, mdio_driver, 0, 0);
179 MODULE_DEPEND(mge, ether, 1, 1, 1);
180 MODULE_DEPEND(mge, miibus, 1, 1, 1);
181 MODULE_DEPEND(mge, mdio, 1, 1, 1);
182 
183 static struct resource_spec res_spec[] = {
184 	{ SYS_RES_MEMORY, 0, RF_ACTIVE },
185 	{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
186 	{ SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
187 	{ SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
188 	{ -1, 0 }
189 };
190 
191 static struct {
192 	driver_intr_t *handler;
193 	char * description;
194 } mge_intrs[MGE_INTR_COUNT + 1] = {
195 	{ mge_intr_rxtx,"GbE aggregated interrupt" },
196 	{ mge_intr_rx,	"GbE receive interrupt" },
197 	{ mge_intr_tx,	"GbE transmit interrupt" },
198 	{ mge_intr_misc,"GbE misc interrupt" },
199 	{ mge_intr_sum,	"GbE summary interrupt" },
200 	{ mge_intr_err,	"GbE error interrupt" },
201 };
202 
203 /* SMI access interlock */
204 static struct sx sx_smi;
205 
206 static uint32_t
mv_read_ge_smi(device_t dev,int phy,int reg)207 mv_read_ge_smi(device_t dev, int phy, int reg)
208 {
209 	uint32_t timeout;
210 	uint32_t ret;
211 	struct mge_softc *sc;
212 
213 	sc = device_get_softc(dev);
214 	KASSERT(sc != NULL, ("NULL softc ptr!"));
215 	timeout = MGE_SMI_WRITE_RETRIES;
216 
217 	MGE_SMI_LOCK();
218 	while (--timeout &&
219 	    (MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_BUSY))
220 		MGE_DELAY(MGE_SMI_WRITE_DELAY);
221 
222 	if (timeout == 0) {
223 		device_printf(dev, "SMI write timeout.\n");
224 		ret = ~0U;
225 		goto out;
226 	}
227 
228 	MGE_WRITE(sc, MGE_REG_SMI, MGE_SMI_MASK &
229 	    (MGE_SMI_READ | (reg << 21) | (phy << 16)));
230 
231 	/* Wait till finished. */
232 	timeout = MGE_SMI_WRITE_RETRIES;
233 	while (--timeout &&
234 	    !((MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_READVALID)))
235 		MGE_DELAY(MGE_SMI_WRITE_DELAY);
236 
237 	if (timeout == 0) {
238 		device_printf(dev, "SMI write validation timeout.\n");
239 		ret = ~0U;
240 		goto out;
241 	}
242 
243 	/* Wait for the data to update in the SMI register */
244 	MGE_DELAY(MGE_SMI_DELAY);
245 	ret = MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_DATA_MASK;
246 
247 out:
248 	MGE_SMI_UNLOCK();
249 	return (ret);
250 
251 }
252 
253 static void
mv_write_ge_smi(device_t dev,int phy,int reg,uint32_t value)254 mv_write_ge_smi(device_t dev, int phy, int reg, uint32_t value)
255 {
256 	uint32_t timeout;
257 	struct mge_softc *sc;
258 
259 	sc = device_get_softc(dev);
260 	KASSERT(sc != NULL, ("NULL softc ptr!"));
261 
262 	MGE_SMI_LOCK();
263 	timeout = MGE_SMI_READ_RETRIES;
264 	while (--timeout &&
265 	    (MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_BUSY))
266 		MGE_DELAY(MGE_SMI_READ_DELAY);
267 
268 	if (timeout == 0) {
269 		device_printf(dev, "SMI read timeout.\n");
270 		goto out;
271 	}
272 
273 	MGE_WRITE(sc, MGE_REG_SMI, MGE_SMI_MASK &
274 	    (MGE_SMI_WRITE | (reg << 21) | (phy << 16) |
275 	    (value & MGE_SMI_DATA_MASK)));
276 
277 out:
278 	MGE_SMI_UNLOCK();
279 }
280 
281 static int
mv_read_ext_phy(device_t dev,int phy,int reg)282 mv_read_ext_phy(device_t dev, int phy, int reg)
283 {
284 	uint32_t retries;
285 	struct mge_softc *sc;
286 	uint32_t ret;
287 
288 	sc = device_get_softc(dev);
289 
290 	MGE_SMI_LOCK();
291 	MGE_WRITE(sc->phy_sc, MGE_REG_SMI, MGE_SMI_MASK &
292 	    (MGE_SMI_READ | (reg << 21) | (phy << 16)));
293 
294 	retries = MGE_SMI_READ_RETRIES;
295 	while (--retries &&
296 	    !(MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_READVALID))
297 		DELAY(MGE_SMI_READ_DELAY);
298 
299 	if (retries == 0)
300 		device_printf(dev, "Timeout while reading from PHY\n");
301 
302 	ret = MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_DATA_MASK;
303 	MGE_SMI_UNLOCK();
304 
305 	return (ret);
306 }
307 
308 static void
mv_write_ext_phy(device_t dev,int phy,int reg,int value)309 mv_write_ext_phy(device_t dev, int phy, int reg, int value)
310 {
311 	uint32_t retries;
312 	struct mge_softc *sc;
313 
314 	sc = device_get_softc(dev);
315 
316 	MGE_SMI_LOCK();
317 	MGE_WRITE(sc->phy_sc, MGE_REG_SMI, MGE_SMI_MASK &
318 	    (MGE_SMI_WRITE | (reg << 21) | (phy << 16) |
319 	    (value & MGE_SMI_DATA_MASK)));
320 
321 	retries = MGE_SMI_WRITE_RETRIES;
322 	while (--retries && MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_BUSY)
323 		DELAY(MGE_SMI_WRITE_DELAY);
324 
325 	if (retries == 0)
326 		device_printf(dev, "Timeout while writing to PHY\n");
327 	MGE_SMI_UNLOCK();
328 }
329 
330 static void
mge_get_mac_address(struct mge_softc * sc,uint8_t * addr)331 mge_get_mac_address(struct mge_softc *sc, uint8_t *addr)
332 {
333 	uint32_t mac_l, mac_h;
334 	uint8_t lmac[6];
335 	int i, valid;
336 
337 	/*
338 	 * Retrieve hw address from the device tree.
339 	 */
340 	i = OF_getprop(sc->node, "local-mac-address", (void *)lmac, 6);
341 	if (i == 6) {
342 		valid = 0;
343 		for (i = 0; i < 6; i++)
344 			if (lmac[i] != 0) {
345 				valid = 1;
346 				break;
347 			}
348 
349 		if (valid) {
350 			bcopy(lmac, addr, 6);
351 			return;
352 		}
353 	}
354 
355 	/*
356 	 * Fall back -- use the currently programmed address.
357 	 */
358 	mac_l = MGE_READ(sc, MGE_MAC_ADDR_L);
359 	mac_h = MGE_READ(sc, MGE_MAC_ADDR_H);
360 
361 	addr[0] = (mac_h & 0xff000000) >> 24;
362 	addr[1] = (mac_h & 0x00ff0000) >> 16;
363 	addr[2] = (mac_h & 0x0000ff00) >> 8;
364 	addr[3] = (mac_h & 0x000000ff);
365 	addr[4] = (mac_l & 0x0000ff00) >> 8;
366 	addr[5] = (mac_l & 0x000000ff);
367 }
368 
369 static uint32_t
mge_tfut_ipg(uint32_t val,int ver)370 mge_tfut_ipg(uint32_t val, int ver)
371 {
372 
373 	switch (ver) {
374 	case 1:
375 		return ((val & 0x3fff) << 4);
376 	case 2:
377 	default:
378 		return ((val & 0xffff) << 4);
379 	}
380 }
381 
382 static uint32_t
mge_rx_ipg(uint32_t val,int ver)383 mge_rx_ipg(uint32_t val, int ver)
384 {
385 
386 	switch (ver) {
387 	case 1:
388 		return ((val & 0x3fff) << 8);
389 	case 2:
390 	default:
391 		return (((val & 0x8000) << 10) | ((val & 0x7fff) << 7));
392 	}
393 }
394 
395 static void
mge_ver_params(struct mge_softc * sc)396 mge_ver_params(struct mge_softc *sc)
397 {
398 	uint32_t d, r;
399 
400 	soc_id(&d, &r);
401 	sc->mge_ver = 1;
402 	sc->mge_mtu = 0x458;
403 	sc->mge_tfut_ipg_max = 0x3FFF;
404 	sc->mge_rx_ipg_max = 0x3FFF;
405 	sc->mge_tx_arb_cfg = 0x000000FF;
406 	sc->mge_tx_tok_cfg = 0x3FFFFFFF;
407 	sc->mge_tx_tok_cnt = 0x3FFFFFFF;
408 	sc->mge_intr_cnt = 2;
409 
410 	if (d == MV_DEV_MV78260 || d == MV_DEV_MV78460)
411 		sc->mge_hw_csum = 0;
412 	else
413 		sc->mge_hw_csum = 1;
414 }
415 
416 static void
mge_set_mac_address(struct mge_softc * sc)417 mge_set_mac_address(struct mge_softc *sc)
418 {
419 	char *if_mac;
420 	uint32_t mac_l, mac_h;
421 
422 	MGE_GLOBAL_LOCK_ASSERT(sc);
423 
424 	if_mac = (char *)if_getlladdr(sc->ifp);
425 
426 	mac_l = (if_mac[4] << 8) | (if_mac[5]);
427 	mac_h = (if_mac[0] << 24)| (if_mac[1] << 16) |
428 	    (if_mac[2] << 8) | (if_mac[3] << 0);
429 
430 	MGE_WRITE(sc, MGE_MAC_ADDR_L, mac_l);
431 	MGE_WRITE(sc, MGE_MAC_ADDR_H, mac_h);
432 
433 	mge_set_ucast_address(sc, if_mac[5], MGE_RX_DEFAULT_QUEUE);
434 }
435 
436 static void
mge_set_ucast_address(struct mge_softc * sc,uint8_t last_byte,uint8_t queue)437 mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte, uint8_t queue)
438 {
439 	uint32_t reg_idx, reg_off, reg_val, i;
440 
441 	last_byte &= 0xf;
442 	reg_idx = last_byte / MGE_UCAST_REG_NUMBER;
443 	reg_off = (last_byte % MGE_UCAST_REG_NUMBER) * 8;
444 	reg_val = (1 | (queue << 1)) << reg_off;
445 
446 	for (i = 0; i < MGE_UCAST_REG_NUMBER; i++) {
447 		if ( i == reg_idx)
448 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
449 		else
450 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), 0);
451 	}
452 }
453 
454 static void
mge_set_prom_mode(struct mge_softc * sc,uint8_t queue)455 mge_set_prom_mode(struct mge_softc *sc, uint8_t queue)
456 {
457 	uint32_t port_config;
458 	uint32_t reg_val, i;
459 
460 	/* Enable or disable promiscuous mode as needed */
461 	if (if_getflags(sc->ifp) & IFF_PROMISC) {
462 		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
463 		port_config |= PORT_CONFIG_UPM;
464 		MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
465 
466 		reg_val = ((1 | (queue << 1)) | (1 | (queue << 1)) << 8 |
467 		   (1 | (queue << 1)) << 16 | (1 | (queue << 1)) << 24);
468 
469 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
470 			MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), reg_val);
471 			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), reg_val);
472 		}
473 
474 		for (i = 0; i < MGE_UCAST_REG_NUMBER; i++)
475 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
476 
477 	} else {
478 		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
479 		port_config &= ~PORT_CONFIG_UPM;
480 		MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
481 
482 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
483 			MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), 0);
484 			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), 0);
485 		}
486 
487 		mge_set_mac_address(sc);
488 	}
489 }
490 
491 static void
mge_get_dma_addr(void * arg,bus_dma_segment_t * segs,int nseg,int error)492 mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
493 {
494 	u_int32_t *paddr;
495 
496 	KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
497 	paddr = arg;
498 
499 	*paddr = segs->ds_addr;
500 }
501 
502 static int
mge_new_rxbuf(bus_dma_tag_t tag,bus_dmamap_t map,struct mbuf ** mbufp,bus_addr_t * paddr)503 mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
504     bus_addr_t *paddr)
505 {
506 	struct mbuf *new_mbuf;
507 	bus_dma_segment_t seg[1];
508 	int error;
509 	int nsegs;
510 
511 	KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
512 
513 	new_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
514 	if (new_mbuf == NULL)
515 		return (ENOBUFS);
516 	new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
517 
518 	if (*mbufp) {
519 		bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
520 		bus_dmamap_unload(tag, map);
521 	}
522 
523 	error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
524 	    BUS_DMA_NOWAIT);
525 	KASSERT(nsegs == 1, ("Too many segments returned!"));
526 	if (nsegs != 1 || error)
527 		panic("mge_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
528 
529 	bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
530 
531 	(*mbufp) = new_mbuf;
532 	(*paddr) = seg->ds_addr;
533 	return (0);
534 }
535 
536 static int
mge_alloc_desc_dma(struct mge_softc * sc,struct mge_desc_wrapper * tab,uint32_t size,bus_dma_tag_t * buffer_tag)537 mge_alloc_desc_dma(struct mge_softc *sc, struct mge_desc_wrapper* tab,
538     uint32_t size, bus_dma_tag_t *buffer_tag)
539 {
540 	struct mge_desc_wrapper *dw;
541 	bus_addr_t desc_paddr;
542 	int i, error;
543 
544 	desc_paddr = 0;
545 	for (i = size - 1; i >= 0; i--) {
546 		dw = &(tab[i]);
547 		error = bus_dmamem_alloc(sc->mge_desc_dtag,
548 		    (void**)&(dw->mge_desc),
549 		    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
550 		    &(dw->desc_dmap));
551 
552 		if (error) {
553 			if_printf(sc->ifp, "failed to allocate DMA memory\n");
554 			dw->mge_desc = NULL;
555 			return (ENXIO);
556 		}
557 
558 		error = bus_dmamap_load(sc->mge_desc_dtag, dw->desc_dmap,
559 		    dw->mge_desc, sizeof(struct mge_desc), mge_get_dma_addr,
560 		    &(dw->mge_desc_paddr), BUS_DMA_NOWAIT);
561 
562 		if (error) {
563 			if_printf(sc->ifp, "can't load descriptor\n");
564 			bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
565 			    dw->desc_dmap);
566 			dw->mge_desc = NULL;
567 			return (ENXIO);
568 		}
569 
570 		/* Chain descriptors */
571 		dw->mge_desc->next_desc = desc_paddr;
572 		desc_paddr = dw->mge_desc_paddr;
573 	}
574 	tab[size - 1].mge_desc->next_desc = desc_paddr;
575 
576 	/* Allocate a busdma tag for mbufs. */
577 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev),	/* parent */
578 	    1, 0,				/* alignment, boundary */
579 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
580 	    BUS_SPACE_MAXADDR,			/* highaddr */
581 	    NULL, NULL,				/* filtfunc, filtfuncarg */
582 	    MCLBYTES, 1,			/* maxsize, nsegments */
583 	    MCLBYTES, 0,			/* maxsegsz, flags */
584 	    NULL, NULL,				/* lockfunc, lockfuncarg */
585 	    buffer_tag);			/* dmat */
586 	if (error) {
587 		if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
588 		return (ENXIO);
589 	}
590 
591 	/* Create TX busdma maps */
592 	for (i = 0; i < size; i++) {
593 		dw = &(tab[i]);
594 		error = bus_dmamap_create(*buffer_tag, 0, &dw->buffer_dmap);
595 		if (error) {
596 			if_printf(sc->ifp, "failed to create map for mbuf\n");
597 			return (ENXIO);
598 		}
599 
600 		dw->buffer = (struct mbuf*)NULL;
601 		dw->mge_desc->buffer = (bus_addr_t)NULL;
602 	}
603 
604 	return (0);
605 }
606 
607 static int
mge_allocate_dma(struct mge_softc * sc)608 mge_allocate_dma(struct mge_softc *sc)
609 {
610 	struct mge_desc_wrapper *dw;
611 	int i;
612 
613 	/* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
614 	bus_dma_tag_create(bus_get_dma_tag(sc->dev),	/* parent */
615 	    16, 0,				/* alignment, boundary */
616 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
617 	    BUS_SPACE_MAXADDR,			/* highaddr */
618 	    NULL, NULL,				/* filtfunc, filtfuncarg */
619 	    sizeof(struct mge_desc), 1,		/* maxsize, nsegments */
620 	    sizeof(struct mge_desc), 0,		/* maxsegsz, flags */
621 	    NULL, NULL,				/* lockfunc, lockfuncarg */
622 	    &sc->mge_desc_dtag);		/* dmat */
623 
624 
625 	mge_alloc_desc_dma(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM,
626 	    &sc->mge_tx_dtag);
627 	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
628 	    &sc->mge_rx_dtag);
629 
630 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
631 		dw = &(sc->mge_rx_desc[i]);
632 		mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
633 		    &dw->mge_desc->buffer);
634 	}
635 
636 	sc->tx_desc_start = sc->mge_tx_desc[0].mge_desc_paddr;
637 	sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
638 
639 	return (0);
640 }
641 
642 static void
mge_free_desc(struct mge_softc * sc,struct mge_desc_wrapper * tab,uint32_t size,bus_dma_tag_t buffer_tag,uint8_t free_mbufs)643 mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
644     uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs)
645 {
646 	struct mge_desc_wrapper *dw;
647 	int i;
648 
649 	for (i = 0; i < size; i++) {
650 		/* Free RX mbuf */
651 		dw = &(tab[i]);
652 
653 		if (dw->buffer_dmap) {
654 			if (free_mbufs) {
655 				bus_dmamap_sync(buffer_tag, dw->buffer_dmap,
656 				    BUS_DMASYNC_POSTREAD);
657 				bus_dmamap_unload(buffer_tag, dw->buffer_dmap);
658 			}
659 			bus_dmamap_destroy(buffer_tag, dw->buffer_dmap);
660 			if (free_mbufs)
661 				m_freem(dw->buffer);
662 		}
663 		/* Free RX descriptors */
664 		if (dw->desc_dmap) {
665 			bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
666 			    BUS_DMASYNC_POSTREAD);
667 			bus_dmamap_unload(sc->mge_desc_dtag, dw->desc_dmap);
668 			bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
669 			    dw->desc_dmap);
670 		}
671 	}
672 }
673 
674 static void
mge_free_dma(struct mge_softc * sc)675 mge_free_dma(struct mge_softc *sc)
676 {
677 
678 	/* Free descriptors and mbufs */
679 	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
680 	mge_free_desc(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM, sc->mge_tx_dtag, 0);
681 
682 	/* Destroy mbuf dma tag */
683 	bus_dma_tag_destroy(sc->mge_tx_dtag);
684 	bus_dma_tag_destroy(sc->mge_rx_dtag);
685 	/* Destroy descriptors tag */
686 	bus_dma_tag_destroy(sc->mge_desc_dtag);
687 }
688 
689 static void
mge_reinit_rx(struct mge_softc * sc)690 mge_reinit_rx(struct mge_softc *sc)
691 {
692 	struct mge_desc_wrapper *dw;
693 	int i;
694 
695 	MGE_RECEIVE_LOCK_ASSERT(sc);
696 
697 	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
698 
699 	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
700 	    &sc->mge_rx_dtag);
701 
702 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
703 		dw = &(sc->mge_rx_desc[i]);
704 		mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
705 		&dw->mge_desc->buffer);
706 	}
707 
708 	sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
709 	sc->rx_desc_curr = 0;
710 
711 	MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
712 	    sc->rx_desc_start);
713 
714 	/* Enable RX queue */
715 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
716 }
717 
718 #ifdef DEVICE_POLLING
719 static poll_handler_t mge_poll;
720 
721 static int
mge_poll(if_t ifp,enum poll_cmd cmd,int count)722 mge_poll(if_t ifp, enum poll_cmd cmd, int count)
723 {
724 	struct mge_softc *sc = if_getsoftc(ifp);
725 	uint32_t int_cause, int_cause_ext;
726 	int rx_npkts = 0;
727 
728 	MGE_RECEIVE_LOCK(sc);
729 
730 	if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
731 		MGE_RECEIVE_UNLOCK(sc);
732 		return (rx_npkts);
733 	}
734 
735 	if (cmd == POLL_AND_CHECK_STATUS) {
736 		int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
737 		int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
738 
739 		/* Check for resource error */
740 		if (int_cause & MGE_PORT_INT_RXERRQ0)
741 			mge_reinit_rx(sc);
742 
743 		if (int_cause || int_cause_ext) {
744 			MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
745 			MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
746 		}
747 	}
748 
749 
750 	rx_npkts = mge_intr_rx_locked(sc, count);
751 
752 	MGE_RECEIVE_UNLOCK(sc);
753 	MGE_TRANSMIT_LOCK(sc);
754 	mge_intr_tx_locked(sc);
755 	MGE_TRANSMIT_UNLOCK(sc);
756 	return (rx_npkts);
757 }
758 #endif /* DEVICE_POLLING */
759 
760 static int
mge_attach(device_t dev)761 mge_attach(device_t dev)
762 {
763 	struct mge_softc *sc;
764 	struct mii_softc *miisc;
765 	if_t ifp;
766 	uint8_t hwaddr[ETHER_ADDR_LEN];
767 	int i, error, phy;
768 
769 	sc = device_get_softc(dev);
770 	sc->dev = dev;
771 	sc->node = ofw_bus_get_node(dev);
772 	phy = 0;
773 
774 	if (fdt_get_phyaddr(sc->node, sc->dev, &phy, (void **)&sc->phy_sc) == 0) {
775 		device_printf(dev, "PHY%i attached, phy_sc points to %s\n", phy,
776 		    device_get_nameunit(sc->phy_sc->dev));
777 		sc->phy_attached = 1;
778 	} else {
779 		device_printf(dev, "PHY not attached.\n");
780 		sc->phy_attached = 0;
781 		sc->phy_sc = sc;
782 	}
783 
784 	if (fdt_find_compatible(sc->node, "mrvl,sw", 1) != 0) {
785 		device_printf(dev, "Switch attached.\n");
786 		sc->switch_attached = 1;
787 		/* additional variable available across instances */
788 		switch_attached = 1;
789 	} else {
790 		sc->switch_attached = 0;
791 	}
792 
793 	if (device_get_unit(dev) == 0) {
794 		sx_init(&sx_smi, "mge_tick() SMI access threads interlock");
795 	}
796 
797 	/* Set chip version-dependent parameters */
798 	mge_ver_params(sc);
799 
800 	/* Initialize mutexes */
801 	mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "mge TX lock",
802 	    MTX_DEF);
803 	mtx_init(&sc->receive_lock, device_get_nameunit(dev), "mge RX lock",
804 	    MTX_DEF);
805 
806 	/* Allocate IO and IRQ resources */
807 	error = bus_alloc_resources(dev, res_spec, sc->res);
808 	if (error) {
809 		device_printf(dev, "could not allocate resources\n");
810 		mge_detach(dev);
811 		return (ENXIO);
812 	}
813 
814 	/* Allocate DMA, buffers, buffer descriptors */
815 	error = mge_allocate_dma(sc);
816 	if (error) {
817 		mge_detach(dev);
818 		return (ENXIO);
819 	}
820 
821 	sc->tx_desc_curr = 0;
822 	sc->rx_desc_curr = 0;
823 	sc->tx_desc_used_idx = 0;
824 	sc->tx_desc_used_count = 0;
825 
826 	/* Configure defaults for interrupts coalescing */
827 	sc->rx_ic_time = 768;
828 	sc->tx_ic_time = 768;
829 	mge_add_sysctls(sc);
830 
831 	/* Allocate network interface */
832 	ifp = sc->ifp = if_alloc(IFT_ETHER);
833 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
834 	if_setsoftc(ifp, sc);
835 	if_setflags(ifp, IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST);
836 	if_setcapabilities(ifp, IFCAP_VLAN_MTU);
837 	if (sc->mge_hw_csum) {
838 		if_setcapabilitiesbit(ifp, IFCAP_HWCSUM, 0);
839 		if_sethwassist(ifp, MGE_CHECKSUM_FEATURES);
840 	}
841 	if_setcapenable(ifp, if_getcapabilities(ifp));
842 
843 #ifdef DEVICE_POLLING
844 	/* Advertise that polling is supported */
845 	if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
846 #endif
847 
848 	if_setinitfn(ifp, mge_init);
849 	if_setstartfn(ifp, mge_start);
850 	if_setioctlfn(ifp, mge_ioctl);
851 
852 	if_setsendqlen(ifp, MGE_TX_DESC_NUM - 1);
853 	if_setsendqready(ifp);
854 
855 	mge_get_mac_address(sc, hwaddr);
856 	ether_ifattach(ifp, hwaddr);
857 	callout_init(&sc->wd_callout, 1);
858 
859 	/* Attach PHY(s) */
860 	if (sc->phy_attached) {
861 		error = mii_attach(dev, &sc->miibus, ifp, mge_ifmedia_upd,
862 		    mge_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
863 		if (error) {
864 			device_printf(dev, "MII failed to find PHY\n");
865 			if_free(ifp);
866 			sc->ifp = NULL;
867 			mge_detach(dev);
868 			return (error);
869 		}
870 		sc->mii = device_get_softc(sc->miibus);
871 
872 		/* Tell the MAC where to find the PHY so autoneg works */
873 		miisc = LIST_FIRST(&sc->mii->mii_phys);
874 		MGE_WRITE(sc, MGE_REG_PHYDEV, miisc->mii_phy);
875 	} else {
876 		/* no PHY, so use hard-coded values */
877 		ifmedia_init(&sc->mge_ifmedia, 0,
878 		    mge_ifmedia_upd,
879 		    mge_ifmedia_sts);
880 		ifmedia_add(&sc->mge_ifmedia,
881 		    IFM_ETHER | IFM_1000_T | IFM_FDX,
882 		    0, NULL);
883 		ifmedia_set(&sc->mge_ifmedia,
884 		    IFM_ETHER | IFM_1000_T | IFM_FDX);
885 	}
886 
887 	/* Attach interrupt handlers */
888 	/* TODO: review flags, in part. mark RX as INTR_ENTROPY ? */
889 	for (i = 1; i <= sc->mge_intr_cnt; ++i) {
890 		error = bus_setup_intr(dev, sc->res[i],
891 		    INTR_TYPE_NET | INTR_MPSAFE,
892 		    NULL, *mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i)].handler,
893 		    sc, &sc->ih_cookie[i - 1]);
894 		if (error) {
895 			device_printf(dev, "could not setup %s\n",
896 			    mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i)].description);
897 			mge_detach(dev);
898 			return (error);
899 		}
900 	}
901 
902 	if (sc->switch_attached) {
903 		MGE_WRITE(sc, MGE_REG_PHYDEV, MGE_SWITCH_PHYDEV);
904 		device_add_child(dev, "mdio", DEVICE_UNIT_ANY);
905 		bus_attach_children(dev);
906 	}
907 
908 	return (0);
909 }
910 
911 static int
mge_detach(device_t dev)912 mge_detach(device_t dev)
913 {
914 	struct mge_softc *sc;
915 	int error,i;
916 
917 	sc = device_get_softc(dev);
918 
919 	/* Stop controller and free TX queue */
920 	if (sc->ifp)
921 		mge_shutdown(dev);
922 
923 	/* Wait for stopping ticks */
924         callout_drain(&sc->wd_callout);
925 
926 	/* Stop and release all interrupts */
927 	for (i = 0; i < sc->mge_intr_cnt; ++i) {
928 		if (!sc->ih_cookie[i])
929 			continue;
930 
931 		error = bus_teardown_intr(dev, sc->res[1 + i],
932 		    sc->ih_cookie[i]);
933 		if (error)
934 			device_printf(dev, "could not release %s\n",
935 			    mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i + 1)].description);
936 	}
937 
938 	/* Detach network interface */
939 	if (sc->ifp) {
940 		ether_ifdetach(sc->ifp);
941 		if_free(sc->ifp);
942 	}
943 
944 	/* Free DMA resources */
945 	mge_free_dma(sc);
946 
947 	/* Free IO memory handler */
948 	bus_release_resources(dev, res_spec, sc->res);
949 
950 	/* Destroy mutexes */
951 	mtx_destroy(&sc->receive_lock);
952 	mtx_destroy(&sc->transmit_lock);
953 
954 	if (device_get_unit(dev) == 0)
955 		sx_destroy(&sx_smi);
956 
957 	return (0);
958 }
959 
960 static void
mge_ifmedia_sts(if_t ifp,struct ifmediareq * ifmr)961 mge_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
962 {
963 	struct mge_softc *sc;
964 	struct mii_data *mii;
965 
966 	sc = if_getsoftc(ifp);
967 	MGE_GLOBAL_LOCK(sc);
968 
969 	if (!sc->phy_attached) {
970 		ifmr->ifm_active = IFM_1000_T | IFM_FDX | IFM_ETHER;
971 		ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
972 		goto out_unlock;
973 	}
974 
975 	mii = sc->mii;
976 	mii_pollstat(mii);
977 
978 	ifmr->ifm_active = mii->mii_media_active;
979 	ifmr->ifm_status = mii->mii_media_status;
980 
981 out_unlock:
982 	MGE_GLOBAL_UNLOCK(sc);
983 }
984 
985 static uint32_t
mge_set_port_serial_control(uint32_t media)986 mge_set_port_serial_control(uint32_t media)
987 {
988 	uint32_t port_config;
989 
990 	port_config = PORT_SERIAL_RES_BIT9 | PORT_SERIAL_FORCE_LINK_FAIL |
991 	    PORT_SERIAL_MRU(PORT_SERIAL_MRU_1552);
992 
993 	if (IFM_TYPE(media) == IFM_ETHER) {
994 		switch(IFM_SUBTYPE(media)) {
995 			case IFM_AUTO:
996 				break;
997 			case IFM_1000_T:
998 				port_config  |= (PORT_SERIAL_GMII_SPEED_1000 |
999 				    PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC
1000 				    | PORT_SERIAL_SPEED_AUTONEG);
1001 				break;
1002 			case IFM_100_TX:
1003 				port_config  |= (PORT_SERIAL_MII_SPEED_100 |
1004 				    PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC
1005 				    | PORT_SERIAL_SPEED_AUTONEG);
1006 				break;
1007 			case IFM_10_T:
1008 				port_config  |= (PORT_SERIAL_AUTONEG |
1009 				    PORT_SERIAL_AUTONEG_FC |
1010 				    PORT_SERIAL_SPEED_AUTONEG);
1011 				break;
1012 		}
1013 		if (media & IFM_FDX)
1014 			port_config |= PORT_SERIAL_FULL_DUPLEX;
1015 	}
1016 	return (port_config);
1017 }
1018 
1019 static int
mge_ifmedia_upd(if_t ifp)1020 mge_ifmedia_upd(if_t ifp)
1021 {
1022 	struct mge_softc *sc = if_getsoftc(ifp);
1023 
1024 	/*
1025 	 * Do not do anything for switch here, as updating media between
1026 	 * MGE MAC and switch MAC is hardcoded in PCB. Changing it here would
1027 	 * break the link.
1028 	 */
1029 	if (sc->phy_attached) {
1030 		MGE_GLOBAL_LOCK(sc);
1031 		if (if_getflags(ifp) & IFF_UP) {
1032 			sc->mge_media_status = sc->mii->mii_media.ifm_media;
1033 			mii_mediachg(sc->mii);
1034 
1035 			/* MGE MAC needs to be reinitialized. */
1036 			mge_init_locked(sc);
1037 
1038 		}
1039 		MGE_GLOBAL_UNLOCK(sc);
1040 	}
1041 
1042 	return (0);
1043 }
1044 
1045 static void
mge_init(void * arg)1046 mge_init(void *arg)
1047 {
1048 	struct mge_softc *sc;
1049 
1050 	sc = arg;
1051 	MGE_GLOBAL_LOCK(sc);
1052 
1053 	mge_init_locked(arg);
1054 
1055 	MGE_GLOBAL_UNLOCK(sc);
1056 }
1057 
1058 static void
mge_init_locked(void * arg)1059 mge_init_locked(void *arg)
1060 {
1061 	struct mge_softc *sc = arg;
1062 	struct mge_desc_wrapper *dw;
1063 	volatile uint32_t reg_val;
1064 	int i, count;
1065 	uint32_t media_status;
1066 
1067 
1068 	MGE_GLOBAL_LOCK_ASSERT(sc);
1069 
1070 	/* Stop interface */
1071 	mge_stop(sc);
1072 
1073 	/* Disable interrupts */
1074 	mge_intrs_ctrl(sc, 0);
1075 
1076 	/* Set MAC address */
1077 	mge_set_mac_address(sc);
1078 
1079 	/* Setup multicast filters */
1080 	mge_setup_multicast(sc);
1081 
1082 	if (sc->mge_ver == 2) {
1083 		MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL1, MGE_RGMII_EN);
1084 		MGE_WRITE(sc, MGE_FIXED_PRIO_CONF, MGE_FIXED_PRIO_EN(0));
1085 	}
1086 
1087 	/* Initialize TX queue configuration registers */
1088 	MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(0), sc->mge_tx_tok_cnt);
1089 	MGE_WRITE(sc, MGE_TX_TOKEN_CONF(0), sc->mge_tx_tok_cfg);
1090 	MGE_WRITE(sc, MGE_TX_ARBITER_CONF(0), sc->mge_tx_arb_cfg);
1091 
1092 	/* Clear TX queue configuration registers for unused queues */
1093 	for (i = 1; i < 7; i++) {
1094 		MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(i), 0);
1095 		MGE_WRITE(sc, MGE_TX_TOKEN_CONF(i), 0);
1096 		MGE_WRITE(sc, MGE_TX_ARBITER_CONF(i), 0);
1097 	}
1098 
1099 	/* Set default MTU */
1100 	MGE_WRITE(sc, sc->mge_mtu, 0);
1101 
1102 	/* Port configuration */
1103 	MGE_WRITE(sc, MGE_PORT_CONFIG,
1104 	    PORT_CONFIG_RXCS | PORT_CONFIG_DFLT_RXQ(0) |
1105 	    PORT_CONFIG_ARO_RXQ(0));
1106 	MGE_WRITE(sc, MGE_PORT_EXT_CONFIG , 0x0);
1107 
1108 	/* Configure promisc mode */
1109 	mge_set_prom_mode(sc, MGE_RX_DEFAULT_QUEUE);
1110 
1111 	media_status = sc->mge_media_status;
1112 	if (sc->switch_attached) {
1113 		media_status &= ~IFM_TMASK;
1114 		media_status |= IFM_1000_T;
1115 	}
1116 
1117 	/* Setup port configuration */
1118 	reg_val = mge_set_port_serial_control(media_status);
1119 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
1120 
1121 	/* Setup SDMA configuration */
1122 	MGE_WRITE(sc, MGE_SDMA_CONFIG , MGE_SDMA_RX_BYTE_SWAP |
1123 	    MGE_SDMA_TX_BYTE_SWAP |
1124 	    MGE_SDMA_RX_BURST_SIZE(MGE_SDMA_BURST_16_WORD) |
1125 	    MGE_SDMA_TX_BURST_SIZE(MGE_SDMA_BURST_16_WORD));
1126 
1127 	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, 0x0);
1128 
1129 	MGE_WRITE(sc, MGE_TX_CUR_DESC_PTR, sc->tx_desc_start);
1130 	MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
1131 	    sc->rx_desc_start);
1132 
1133 	/* Reset descriptor indexes */
1134 	sc->tx_desc_curr = 0;
1135 	sc->rx_desc_curr = 0;
1136 	sc->tx_desc_used_idx = 0;
1137 	sc->tx_desc_used_count = 0;
1138 
1139 	/* Enable RX descriptors */
1140 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
1141 		dw = &sc->mge_rx_desc[i];
1142 		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1143 		dw->mge_desc->buff_size = MCLBYTES;
1144 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1145 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1146 	}
1147 
1148 	/* Enable RX queue */
1149 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
1150 
1151 	/* Enable port */
1152 	reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1153 	reg_val |= PORT_SERIAL_ENABLE;
1154 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
1155 	count = 0x100000;
1156 	for (;;) {
1157 		reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1158 		if (reg_val & MGE_STATUS_LINKUP)
1159 			break;
1160 		DELAY(100);
1161 		if (--count == 0) {
1162 			if_printf(sc->ifp, "Timeout on link-up\n");
1163 			break;
1164 		}
1165 	}
1166 
1167 	/* Setup interrupts coalescing */
1168 	mge_set_rxic(sc);
1169 	mge_set_txic(sc);
1170 
1171 	/* Enable interrupts */
1172 #ifdef DEVICE_POLLING
1173         /*
1174 	 * * ...only if polling is not turned on. Disable interrupts explicitly
1175 	 * if polling is enabled.
1176 	 */
1177 	if (if_getcapenable(sc->ifp) & IFCAP_POLLING)
1178 		mge_intrs_ctrl(sc, 0);
1179 	else
1180 #endif /* DEVICE_POLLING */
1181 	mge_intrs_ctrl(sc, 1);
1182 
1183 	/* Activate network interface */
1184 	if_setdrvflagbits(sc->ifp, IFF_DRV_RUNNING, 0);
1185 	if_setdrvflagbits(sc->ifp, 0, IFF_DRV_OACTIVE);
1186 	sc->wd_timer = 0;
1187 
1188 	/* Schedule watchdog timeout */
1189 	if (sc->phy_attached)
1190 		callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1191 }
1192 
1193 static void
mge_intr_rxtx(void * arg)1194 mge_intr_rxtx(void *arg)
1195 {
1196 	struct mge_softc *sc;
1197 	uint32_t int_cause, int_cause_ext;
1198 
1199 	sc = arg;
1200 	MGE_GLOBAL_LOCK(sc);
1201 
1202 #ifdef DEVICE_POLLING
1203 	if (if_getcapenable(sc->ifp) & IFCAP_POLLING) {
1204 		MGE_GLOBAL_UNLOCK(sc);
1205 		return;
1206 	}
1207 #endif
1208 
1209 	/* Get interrupt cause */
1210 	int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
1211 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1212 
1213 	/* Check for Transmit interrupt */
1214 	if (int_cause_ext & (MGE_PORT_INT_EXT_TXBUF0 |
1215 	    MGE_PORT_INT_EXT_TXUR)) {
1216 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~(int_cause_ext &
1217 		    (MGE_PORT_INT_EXT_TXBUF0 | MGE_PORT_INT_EXT_TXUR)));
1218 		mge_intr_tx_locked(sc);
1219 	}
1220 
1221 	MGE_TRANSMIT_UNLOCK(sc);
1222 
1223 	/* Check for Receive interrupt */
1224 	mge_intr_rx_check(sc, int_cause, int_cause_ext);
1225 
1226 	MGE_RECEIVE_UNLOCK(sc);
1227 }
1228 
1229 static void
mge_intr_err(void * arg)1230 mge_intr_err(void *arg)
1231 {
1232 	struct mge_softc *sc;
1233 	if_t ifp;
1234 
1235 	sc = arg;
1236 	ifp = sc->ifp;
1237 	if_printf(ifp, "%s\n", __FUNCTION__);
1238 }
1239 
1240 static void
mge_intr_misc(void * arg)1241 mge_intr_misc(void *arg)
1242 {
1243 	struct mge_softc *sc;
1244 	if_t ifp;
1245 
1246 	sc = arg;
1247 	ifp = sc->ifp;
1248 	if_printf(ifp, "%s\n", __FUNCTION__);
1249 }
1250 
1251 static void
mge_intr_rx(void * arg)1252 mge_intr_rx(void *arg) {
1253 	struct mge_softc *sc;
1254 	uint32_t int_cause, int_cause_ext;
1255 
1256 	sc = arg;
1257 	MGE_RECEIVE_LOCK(sc);
1258 
1259 #ifdef DEVICE_POLLING
1260 	if (if_getcapenable(sc->ifp) & IFCAP_POLLING) {
1261 		MGE_RECEIVE_UNLOCK(sc);
1262 		return;
1263 	}
1264 #endif
1265 
1266 	/* Get interrupt cause */
1267 	int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
1268 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1269 
1270 	mge_intr_rx_check(sc, int_cause, int_cause_ext);
1271 
1272 	MGE_RECEIVE_UNLOCK(sc);
1273 }
1274 
1275 static void
mge_intr_rx_check(struct mge_softc * sc,uint32_t int_cause,uint32_t int_cause_ext)1276 mge_intr_rx_check(struct mge_softc *sc, uint32_t int_cause,
1277     uint32_t int_cause_ext)
1278 {
1279 	/* Check for resource error */
1280 	if (int_cause & MGE_PORT_INT_RXERRQ0) {
1281 		mge_reinit_rx(sc);
1282 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE,
1283 		    ~(int_cause & MGE_PORT_INT_RXERRQ0));
1284 	}
1285 
1286 	int_cause &= MGE_PORT_INT_RXQ0;
1287 	int_cause_ext &= MGE_PORT_INT_EXT_RXOR;
1288 
1289 	if (int_cause || int_cause_ext) {
1290 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
1291 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
1292 		mge_intr_rx_locked(sc, -1);
1293 	}
1294 }
1295 
1296 static int
mge_intr_rx_locked(struct mge_softc * sc,int count)1297 mge_intr_rx_locked(struct mge_softc *sc, int count)
1298 {
1299 	if_t ifp = sc->ifp;
1300 	uint32_t status;
1301 	uint16_t bufsize;
1302 	struct mge_desc_wrapper* dw;
1303 	struct mbuf *mb;
1304 	int rx_npkts = 0;
1305 
1306 	MGE_RECEIVE_LOCK_ASSERT(sc);
1307 
1308 	while (count != 0) {
1309 		dw = &sc->mge_rx_desc[sc->rx_desc_curr];
1310 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1311 		    BUS_DMASYNC_POSTREAD);
1312 
1313 		/* Get status */
1314 		status = dw->mge_desc->cmd_status;
1315 		bufsize = dw->mge_desc->buff_size;
1316 		if ((status & MGE_DMA_OWNED) != 0)
1317 			break;
1318 
1319 		if (dw->mge_desc->byte_count &&
1320 		    ~(status & MGE_ERR_SUMMARY)) {
1321 
1322 			bus_dmamap_sync(sc->mge_rx_dtag, dw->buffer_dmap,
1323 			    BUS_DMASYNC_POSTREAD);
1324 
1325 			mb = m_devget(dw->buffer->m_data,
1326 			    dw->mge_desc->byte_count - ETHER_CRC_LEN,
1327 			    0, ifp, NULL);
1328 
1329 			if (mb == NULL)
1330 				/* Give up if no mbufs */
1331 				break;
1332 
1333 			mb->m_len -= 2;
1334 			mb->m_pkthdr.len -= 2;
1335 			mb->m_data += 2;
1336 
1337 			mb->m_pkthdr.rcvif = ifp;
1338 
1339 			mge_offload_process_frame(ifp, mb, status,
1340 			    bufsize);
1341 
1342 			MGE_RECEIVE_UNLOCK(sc);
1343 			if_input(ifp, mb);
1344 			MGE_RECEIVE_LOCK(sc);
1345 			rx_npkts++;
1346 		}
1347 
1348 		dw->mge_desc->byte_count = 0;
1349 		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1350 		sc->rx_desc_curr = (++sc->rx_desc_curr % MGE_RX_DESC_NUM);
1351 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1352 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1353 
1354 		if (count > 0)
1355 			count -= 1;
1356 	}
1357 
1358 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_npkts);
1359 
1360 	return (rx_npkts);
1361 }
1362 
1363 static void
mge_intr_sum(void * arg)1364 mge_intr_sum(void *arg)
1365 {
1366 	struct mge_softc *sc = arg;
1367 	if_t ifp;
1368 
1369 	ifp = sc->ifp;
1370 	if_printf(ifp, "%s\n", __FUNCTION__);
1371 }
1372 
1373 static void
mge_intr_tx(void * arg)1374 mge_intr_tx(void *arg)
1375 {
1376 	struct mge_softc *sc = arg;
1377 	uint32_t int_cause_ext;
1378 
1379 	MGE_TRANSMIT_LOCK(sc);
1380 
1381 #ifdef DEVICE_POLLING
1382 	if (if_getcapenable(sc->ifp) & IFCAP_POLLING) {
1383 		MGE_TRANSMIT_UNLOCK(sc);
1384 		return;
1385 	}
1386 #endif
1387 
1388 	/* Ack the interrupt */
1389 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1390 	MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~(int_cause_ext &
1391 	    (MGE_PORT_INT_EXT_TXBUF0 | MGE_PORT_INT_EXT_TXUR)));
1392 
1393 	mge_intr_tx_locked(sc);
1394 
1395 	MGE_TRANSMIT_UNLOCK(sc);
1396 }
1397 
1398 static void
mge_intr_tx_locked(struct mge_softc * sc)1399 mge_intr_tx_locked(struct mge_softc *sc)
1400 {
1401 	if_t ifp = sc->ifp;
1402 	struct mge_desc_wrapper *dw;
1403 	struct mge_desc *desc;
1404 	uint32_t status;
1405 	int send = 0;
1406 
1407 	MGE_TRANSMIT_LOCK_ASSERT(sc);
1408 
1409 	/* Disable watchdog */
1410 	sc->wd_timer = 0;
1411 
1412 	while (sc->tx_desc_used_count) {
1413 		/* Get the descriptor */
1414 		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1415 		desc = dw->mge_desc;
1416 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1417 		    BUS_DMASYNC_POSTREAD);
1418 
1419 		/* Get descriptor status */
1420 		status = desc->cmd_status;
1421 
1422 		if (status & MGE_DMA_OWNED)
1423 			break;
1424 
1425 		sc->tx_desc_used_idx =
1426 			(++sc->tx_desc_used_idx) % MGE_TX_DESC_NUM;
1427 		sc->tx_desc_used_count--;
1428 
1429 		/* Update collision statistics */
1430 		if (status & MGE_ERR_SUMMARY) {
1431 			if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_LC)
1432 				if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
1433 			if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_RL)
1434 				if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 16);
1435 		}
1436 
1437 		bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1438 		    BUS_DMASYNC_POSTWRITE);
1439 		bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1440 		m_freem(dw->buffer);
1441 		dw->buffer = (struct mbuf*)NULL;
1442 		send++;
1443 
1444 		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1445 	}
1446 
1447 	if (send) {
1448 		/* Now send anything that was pending */
1449 		if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1450 		mge_start_locked(ifp);
1451 	}
1452 }
1453 static int
mge_ioctl(if_t ifp,u_long command,caddr_t data)1454 mge_ioctl(if_t ifp, u_long command, caddr_t data)
1455 {
1456 	struct mge_softc *sc = if_getsoftc(ifp);
1457 	struct ifreq *ifr = (struct ifreq *)data;
1458 	int mask, error;
1459 	uint32_t flags;
1460 
1461 	error = 0;
1462 
1463 	switch (command) {
1464 	case SIOCSIFFLAGS:
1465 		MGE_GLOBAL_LOCK(sc);
1466 
1467 		if (if_getflags(ifp) & IFF_UP) {
1468 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1469 				flags = if_getflags(ifp) ^ sc->mge_if_flags;
1470 				if (flags & IFF_PROMISC)
1471 					mge_set_prom_mode(sc,
1472 					    MGE_RX_DEFAULT_QUEUE);
1473 
1474 				if (flags & IFF_ALLMULTI)
1475 					mge_setup_multicast(sc);
1476 			} else
1477 				mge_init_locked(sc);
1478 		}
1479 		else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1480 			mge_stop(sc);
1481 
1482 		sc->mge_if_flags = if_getflags(ifp);
1483 		MGE_GLOBAL_UNLOCK(sc);
1484 		break;
1485 	case SIOCADDMULTI:
1486 	case SIOCDELMULTI:
1487 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1488 			MGE_GLOBAL_LOCK(sc);
1489 			mge_setup_multicast(sc);
1490 			MGE_GLOBAL_UNLOCK(sc);
1491 		}
1492 		break;
1493 	case SIOCSIFCAP:
1494 		mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
1495 		if (mask & IFCAP_HWCSUM) {
1496 			if_setcapenablebit(ifp, 0, IFCAP_HWCSUM);
1497 			if_setcapenablebit(ifp, IFCAP_HWCSUM & ifr->ifr_reqcap, 0);
1498 			if (if_getcapenable(ifp) & IFCAP_TXCSUM)
1499 				if_sethwassist(ifp, MGE_CHECKSUM_FEATURES);
1500 			else
1501 				if_sethwassist(ifp, 0);
1502 		}
1503 #ifdef DEVICE_POLLING
1504 		if (mask & IFCAP_POLLING) {
1505 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
1506 				error = ether_poll_register(mge_poll, ifp);
1507 				if (error)
1508 					return(error);
1509 
1510 				MGE_GLOBAL_LOCK(sc);
1511 				mge_intrs_ctrl(sc, 0);
1512 				if_setcapenablebit(ifp, IFCAP_POLLING, 0);
1513 				MGE_GLOBAL_UNLOCK(sc);
1514 			} else {
1515 				error = ether_poll_deregister(ifp);
1516 				MGE_GLOBAL_LOCK(sc);
1517 				mge_intrs_ctrl(sc, 1);
1518 				if_setcapenablebit(ifp, 0, IFCAP_POLLING);
1519 				MGE_GLOBAL_UNLOCK(sc);
1520 			}
1521 		}
1522 #endif
1523 		break;
1524 	case SIOCGIFMEDIA: /* fall through */
1525 	case SIOCSIFMEDIA:
1526 		/*
1527 		 * Setting up media type via ioctls is *not* supported for MAC
1528 		 * which is connected to switch. Use etherswitchcfg.
1529 		 */
1530 		if (!sc->phy_attached && (command == SIOCSIFMEDIA))
1531 			return (0);
1532 		else if (!sc->phy_attached) {
1533 			error = ifmedia_ioctl(ifp, ifr, &sc->mge_ifmedia,
1534 			    command);
1535 			break;
1536 		}
1537 
1538 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T
1539 		    && !(ifr->ifr_media & IFM_FDX)) {
1540 			device_printf(sc->dev,
1541 			    "1000baseTX half-duplex unsupported\n");
1542 			return 0;
1543 		}
1544 		error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1545 		break;
1546 	default:
1547 		error = ether_ioctl(ifp, command, data);
1548 	}
1549 	return (error);
1550 }
1551 
1552 static int
mge_miibus_readreg(device_t dev,int phy,int reg)1553 mge_miibus_readreg(device_t dev, int phy, int reg)
1554 {
1555 
1556 	KASSERT(!switch_attached, ("miibus used with switch attached"));
1557 
1558 	return (mv_read_ext_phy(dev, phy, reg));
1559 }
1560 
1561 static int
mge_miibus_writereg(device_t dev,int phy,int reg,int value)1562 mge_miibus_writereg(device_t dev, int phy, int reg, int value)
1563 {
1564 
1565 	KASSERT(!switch_attached, ("miibus used with switch attached"));
1566 
1567 	mv_write_ext_phy(dev, phy, reg, value);
1568 
1569 	return (0);
1570 }
1571 
1572 static int
mge_probe(device_t dev)1573 mge_probe(device_t dev)
1574 {
1575 
1576 	if (!ofw_bus_status_okay(dev))
1577 		return (ENXIO);
1578 
1579 	if (!ofw_bus_is_compatible(dev, "mrvl,ge"))
1580 		return (ENXIO);
1581 
1582 	device_set_desc(dev, "Marvell Gigabit Ethernet controller");
1583 	return (BUS_PROBE_DEFAULT);
1584 }
1585 
1586 static int
mge_resume(device_t dev)1587 mge_resume(device_t dev)
1588 {
1589 
1590 	device_printf(dev, "%s\n", __FUNCTION__);
1591 	return (0);
1592 }
1593 
1594 static int
mge_shutdown(device_t dev)1595 mge_shutdown(device_t dev)
1596 {
1597 	struct mge_softc *sc = device_get_softc(dev);
1598 
1599 	MGE_GLOBAL_LOCK(sc);
1600 
1601 #ifdef DEVICE_POLLING
1602         if (if_getcapenable(sc->ifp) & IFCAP_POLLING)
1603 		ether_poll_deregister(sc->ifp);
1604 #endif
1605 
1606 	mge_stop(sc);
1607 
1608 	MGE_GLOBAL_UNLOCK(sc);
1609 
1610 	return (0);
1611 }
1612 
1613 static int
mge_encap(struct mge_softc * sc,struct mbuf * m0)1614 mge_encap(struct mge_softc *sc, struct mbuf *m0)
1615 {
1616 	struct mge_desc_wrapper *dw = NULL;
1617 	bus_dma_segment_t segs[MGE_TX_DESC_NUM];
1618 	bus_dmamap_t mapp;
1619 	int error;
1620 	int seg, nsegs;
1621 	int desc_no;
1622 
1623 	/* Fetch unused map */
1624 	desc_no = sc->tx_desc_curr;
1625 	dw = &sc->mge_tx_desc[desc_no];
1626 	mapp = dw->buffer_dmap;
1627 
1628 	/* Create mapping in DMA memory */
1629 	error = bus_dmamap_load_mbuf_sg(sc->mge_tx_dtag, mapp, m0, segs, &nsegs,
1630 	    BUS_DMA_NOWAIT);
1631 	if (error != 0) {
1632 		m_freem(m0);
1633 		return (error);
1634 	}
1635 
1636 	/* Only one segment is supported. */
1637 	if (nsegs != 1) {
1638 		bus_dmamap_unload(sc->mge_tx_dtag, mapp);
1639 		m_freem(m0);
1640 		return (-1);
1641 	}
1642 
1643 	bus_dmamap_sync(sc->mge_tx_dtag, mapp, BUS_DMASYNC_PREWRITE);
1644 
1645 	/* Everything is ok, now we can send buffers */
1646 	for (seg = 0; seg < nsegs; seg++) {
1647 		dw->mge_desc->byte_count = segs[seg].ds_len;
1648 		dw->mge_desc->buffer = segs[seg].ds_addr;
1649 		dw->buffer = m0;
1650 		dw->mge_desc->cmd_status = 0;
1651 		if (seg == 0)
1652 			mge_offload_setup_descriptor(sc, dw);
1653 		dw->mge_desc->cmd_status |= MGE_TX_LAST | MGE_TX_FIRST |
1654 		    MGE_TX_ETH_CRC | MGE_TX_EN_INT | MGE_TX_PADDING |
1655 		    MGE_DMA_OWNED;
1656 	}
1657 
1658 	bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1659 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1660 
1661 	sc->tx_desc_curr = (++sc->tx_desc_curr) % MGE_TX_DESC_NUM;
1662 	sc->tx_desc_used_count++;
1663 	return (0);
1664 }
1665 
1666 static void
mge_tick(void * msc)1667 mge_tick(void *msc)
1668 {
1669 	struct mge_softc *sc = msc;
1670 
1671 	KASSERT(sc->phy_attached == 1, ("mge_tick while PHY not attached"));
1672 
1673 	MGE_GLOBAL_LOCK(sc);
1674 
1675 	/* Check for TX timeout */
1676 	mge_watchdog(sc);
1677 
1678 	mii_tick(sc->mii);
1679 
1680 	/* Check for media type change */
1681 	if(sc->mge_media_status != sc->mii->mii_media.ifm_media)
1682 		mge_ifmedia_upd(sc->ifp);
1683 
1684 	MGE_GLOBAL_UNLOCK(sc);
1685 
1686 	/* Schedule another timeout one second from now */
1687 	callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1688 
1689 	return;
1690 }
1691 
1692 static void
mge_watchdog(struct mge_softc * sc)1693 mge_watchdog(struct mge_softc *sc)
1694 {
1695 	if_t ifp;
1696 
1697 	ifp = sc->ifp;
1698 
1699 	if (sc->wd_timer == 0 || --sc->wd_timer) {
1700 		return;
1701 	}
1702 
1703 	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1704 	if_printf(ifp, "watchdog timeout\n");
1705 
1706 	mge_stop(sc);
1707 	mge_init_locked(sc);
1708 }
1709 
1710 static void
mge_start(if_t ifp)1711 mge_start(if_t ifp)
1712 {
1713 	struct mge_softc *sc = if_getsoftc(ifp);
1714 
1715 	MGE_TRANSMIT_LOCK(sc);
1716 
1717 	mge_start_locked(ifp);
1718 
1719 	MGE_TRANSMIT_UNLOCK(sc);
1720 }
1721 
1722 static void
mge_start_locked(if_t ifp)1723 mge_start_locked(if_t ifp)
1724 {
1725 	struct mge_softc *sc;
1726 	struct mbuf *m0, *mtmp;
1727 	uint32_t reg_val, queued = 0;
1728 
1729 	sc = if_getsoftc(ifp);
1730 
1731 	MGE_TRANSMIT_LOCK_ASSERT(sc);
1732 
1733 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1734 	    IFF_DRV_RUNNING)
1735 		return;
1736 
1737 	for (;;) {
1738 		/* Get packet from the queue */
1739 		m0 = if_dequeue(ifp);
1740 		if (m0 == NULL)
1741 			break;
1742 
1743 		if (m0->m_pkthdr.csum_flags & (CSUM_IP|CSUM_TCP|CSUM_UDP) ||
1744 		    m0->m_flags & M_VLANTAG) {
1745 			if (M_WRITABLE(m0) == 0) {
1746 				mtmp = m_dup(m0, M_NOWAIT);
1747 				m_freem(m0);
1748 				if (mtmp == NULL)
1749 					continue;
1750 				m0 = mtmp;
1751 			}
1752 		}
1753 		/* The driver support only one DMA fragment. */
1754 		if (m0->m_next != NULL) {
1755 			mtmp = m_defrag(m0, M_NOWAIT);
1756 			if (mtmp != NULL)
1757 				m0 = mtmp;
1758 		}
1759 
1760 		/* Check for free descriptors */
1761 		if (sc->tx_desc_used_count + 1 >= MGE_TX_DESC_NUM) {
1762 			if_sendq_prepend(ifp, m0);
1763 			if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1764 			break;
1765 		}
1766 
1767 		if (mge_encap(sc, m0) != 0)
1768 			break;
1769 
1770 		queued++;
1771 		BPF_MTAP(ifp, m0);
1772 	}
1773 
1774 	if (queued) {
1775 		/* Enable transmitter and watchdog timer */
1776 		reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1777 		MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_ENABLE_TXQ);
1778 		sc->wd_timer = 5;
1779 	}
1780 }
1781 
1782 static void
mge_stop(struct mge_softc * sc)1783 mge_stop(struct mge_softc *sc)
1784 {
1785 	if_t ifp;
1786 	volatile uint32_t reg_val, status;
1787 	struct mge_desc_wrapper *dw;
1788 	struct mge_desc *desc;
1789 	int count;
1790 
1791 	ifp = sc->ifp;
1792 
1793 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
1794 		return;
1795 
1796 	/* Stop tick engine */
1797 	callout_stop(&sc->wd_callout);
1798 
1799 	/* Disable interface */
1800 	if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
1801 	sc->wd_timer = 0;
1802 
1803 	/* Disable interrupts */
1804 	mge_intrs_ctrl(sc, 0);
1805 
1806 	/* Disable Rx and Tx */
1807 	reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1808 	MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_DISABLE_TXQ);
1809 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_DISABLE_RXQ_ALL);
1810 
1811 	/* Remove pending data from TX queue */
1812 	while (sc->tx_desc_used_idx != sc->tx_desc_curr &&
1813 	    sc->tx_desc_used_count) {
1814 		/* Get the descriptor */
1815 		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1816 		desc = dw->mge_desc;
1817 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1818 		    BUS_DMASYNC_POSTREAD);
1819 
1820 		/* Get descriptor status */
1821 		status = desc->cmd_status;
1822 
1823 		if (status & MGE_DMA_OWNED)
1824 			break;
1825 
1826 		sc->tx_desc_used_idx = (++sc->tx_desc_used_idx) %
1827 		    MGE_TX_DESC_NUM;
1828 		sc->tx_desc_used_count--;
1829 
1830 		bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1831 		    BUS_DMASYNC_POSTWRITE);
1832 		bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1833 
1834 		m_freem(dw->buffer);
1835 		dw->buffer = (struct mbuf*)NULL;
1836 	}
1837 
1838 	/* Wait for end of transmission */
1839 	count = 0x100000;
1840 	while (count--) {
1841 		reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1842 		if ( !(reg_val & MGE_STATUS_TX_IN_PROG) &&
1843 		    (reg_val & MGE_STATUS_TX_FIFO_EMPTY))
1844 			break;
1845 		DELAY(100);
1846 	}
1847 
1848 	if (count == 0)
1849 		if_printf(ifp,
1850 		    "%s: timeout while waiting for end of transmission\n",
1851 		    __FUNCTION__);
1852 
1853 	reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1854 	reg_val &= ~(PORT_SERIAL_ENABLE);
1855 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL ,reg_val);
1856 }
1857 
1858 static int
mge_suspend(device_t dev)1859 mge_suspend(device_t dev)
1860 {
1861 
1862 	device_printf(dev, "%s\n", __FUNCTION__);
1863 	return (0);
1864 }
1865 
1866 static void
mge_offload_process_frame(if_t ifp,struct mbuf * frame,uint32_t status,uint16_t bufsize)1867 mge_offload_process_frame(if_t ifp, struct mbuf *frame,
1868     uint32_t status, uint16_t bufsize)
1869 {
1870 	int csum_flags = 0;
1871 
1872 	if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
1873 		if ((status & MGE_RX_L3_IS_IP) && (status & MGE_RX_IP_OK))
1874 			csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
1875 
1876 		if ((bufsize & MGE_RX_IP_FRAGMENT) == 0 &&
1877 		    (MGE_RX_L4_IS_TCP(status) || MGE_RX_L4_IS_UDP(status)) &&
1878 		    (status & MGE_RX_L4_CSUM_OK)) {
1879 			csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1880 			frame->m_pkthdr.csum_data = 0xFFFF;
1881 		}
1882 
1883 		frame->m_pkthdr.csum_flags = csum_flags;
1884 	}
1885 }
1886 
1887 static void
mge_offload_setup_descriptor(struct mge_softc * sc,struct mge_desc_wrapper * dw)1888 mge_offload_setup_descriptor(struct mge_softc *sc, struct mge_desc_wrapper *dw)
1889 {
1890 	struct mbuf *m0 = dw->buffer;
1891 	struct ether_vlan_header *eh = mtod(m0, struct ether_vlan_header *);
1892 	int csum_flags = m0->m_pkthdr.csum_flags;
1893 	int cmd_status = 0;
1894 	struct ip *ip;
1895 	int ehlen, etype;
1896 
1897 	if (csum_flags != 0) {
1898 		if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1899 			etype = ntohs(eh->evl_proto);
1900 			ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1901 			csum_flags |= MGE_TX_VLAN_TAGGED;
1902 		} else {
1903 			etype = ntohs(eh->evl_encap_proto);
1904 			ehlen = ETHER_HDR_LEN;
1905 		}
1906 
1907 		if (etype != ETHERTYPE_IP) {
1908 			if_printf(sc->ifp,
1909 			    "TCP/IP Offload enabled for unsupported "
1910 			    "protocol!\n");
1911 			return;
1912 		}
1913 
1914 		ip = (struct ip *)(m0->m_data + ehlen);
1915 		cmd_status |= MGE_TX_IP_HDR_SIZE(ip->ip_hl);
1916 		cmd_status |= MGE_TX_NOT_FRAGMENT;
1917 	}
1918 
1919 	if (csum_flags & CSUM_IP)
1920 		cmd_status |= MGE_TX_GEN_IP_CSUM;
1921 
1922 	if (csum_flags & CSUM_TCP)
1923 		cmd_status |= MGE_TX_GEN_L4_CSUM;
1924 
1925 	if (csum_flags & CSUM_UDP)
1926 		cmd_status |= MGE_TX_GEN_L4_CSUM | MGE_TX_UDP;
1927 
1928 	dw->mge_desc->cmd_status |= cmd_status;
1929 }
1930 
1931 static void
mge_intrs_ctrl(struct mge_softc * sc,int enable)1932 mge_intrs_ctrl(struct mge_softc *sc, int enable)
1933 {
1934 
1935 	if (enable) {
1936 		MGE_WRITE(sc, MGE_PORT_INT_MASK , MGE_PORT_INT_RXQ0 |
1937 		    MGE_PORT_INT_EXTEND | MGE_PORT_INT_RXERRQ0);
1938 		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT , MGE_PORT_INT_EXT_TXERR0 |
1939 		    MGE_PORT_INT_EXT_RXOR | MGE_PORT_INT_EXT_TXUR |
1940 		    MGE_PORT_INT_EXT_TXBUF0);
1941 	} else {
1942 		MGE_WRITE(sc, MGE_INT_CAUSE, 0x0);
1943 		MGE_WRITE(sc, MGE_INT_MASK, 0x0);
1944 
1945 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, 0x0);
1946 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, 0x0);
1947 
1948 		MGE_WRITE(sc, MGE_PORT_INT_MASK, 0x0);
1949 		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT, 0x0);
1950 	}
1951 }
1952 
1953 static uint8_t
mge_crc8(uint8_t * data,int size)1954 mge_crc8(uint8_t *data, int size)
1955 {
1956 	uint8_t crc = 0;
1957 	static const uint8_t ct[256] = {
1958 		0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15,
1959 		0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D,
1960 		0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65,
1961 		0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D,
1962 		0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5,
1963 		0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD,
1964 		0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85,
1965 		0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD,
1966 		0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2,
1967 		0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA,
1968 		0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2,
1969 		0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A,
1970 		0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32,
1971 		0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A,
1972 		0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42,
1973 		0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A,
1974 		0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C,
1975 		0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4,
1976 		0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC,
1977 		0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4,
1978 		0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C,
1979 		0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44,
1980 		0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C,
1981 		0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34,
1982 		0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B,
1983 		0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63,
1984 		0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B,
1985 		0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13,
1986 		0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB,
1987 		0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83,
1988 		0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB,
1989 		0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3
1990 	};
1991 
1992 	while(size--)
1993 		crc = ct[crc ^ *(data++)];
1994 
1995 	return(crc);
1996 }
1997 
1998 struct mge_hash_maddr_ctx {
1999 	uint32_t smt[MGE_MCAST_REG_NUMBER];
2000 	uint32_t omt[MGE_MCAST_REG_NUMBER];
2001 };
2002 
2003 static u_int
mge_hash_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)2004 mge_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2005 {
2006 	static const uint8_t special[5] = { 0x01, 0x00, 0x5E, 0x00, 0x00 };
2007 	struct mge_hash_maddr_ctx *ctx = arg;
2008 	static const uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
2009 	uint8_t *mac;
2010 	int i;
2011 
2012 	mac = LLADDR(sdl);
2013 	if (memcmp(mac, special, sizeof(special)) == 0) {
2014 		i = mac[5];
2015 		ctx->smt[i >> 2] |= v << ((i & 0x03) << 3);
2016 	} else {
2017 		i = mge_crc8(mac, ETHER_ADDR_LEN);
2018 		ctx->omt[i >> 2] |= v << ((i & 0x03) << 3);
2019 	}
2020 	return (1);
2021 }
2022 
2023 static void
mge_setup_multicast(struct mge_softc * sc)2024 mge_setup_multicast(struct mge_softc *sc)
2025 {
2026 	struct mge_hash_maddr_ctx ctx;
2027 	if_t ifp = sc->ifp;
2028 	static const uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
2029 	int i;
2030 
2031 	if (if_getflags(ifp) & IFF_ALLMULTI) {
2032 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++)
2033 			ctx.smt[i] = ctx.omt[i] =
2034 			    (v << 24) | (v << 16) | (v << 8) | v;
2035 	} else {
2036 		memset(&ctx, 0, sizeof(ctx));
2037 		if_foreach_llmaddr(ifp, mge_hash_maddr, &ctx);
2038 	}
2039 
2040 	for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
2041 		MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), ctx.smt[i]);
2042 		MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), ctx.omt[i]);
2043 	}
2044 }
2045 
2046 static void
mge_set_rxic(struct mge_softc * sc)2047 mge_set_rxic(struct mge_softc *sc)
2048 {
2049 	uint32_t reg;
2050 
2051 	if (sc->rx_ic_time > sc->mge_rx_ipg_max)
2052 		sc->rx_ic_time = sc->mge_rx_ipg_max;
2053 
2054 	reg = MGE_READ(sc, MGE_SDMA_CONFIG);
2055 	reg &= ~mge_rx_ipg(sc->mge_rx_ipg_max, sc->mge_ver);
2056 	reg |= mge_rx_ipg(sc->rx_ic_time, sc->mge_ver);
2057 	MGE_WRITE(sc, MGE_SDMA_CONFIG, reg);
2058 }
2059 
2060 static void
mge_set_txic(struct mge_softc * sc)2061 mge_set_txic(struct mge_softc *sc)
2062 {
2063 	uint32_t reg;
2064 
2065 	if (sc->tx_ic_time > sc->mge_tfut_ipg_max)
2066 		sc->tx_ic_time = sc->mge_tfut_ipg_max;
2067 
2068 	reg = MGE_READ(sc, MGE_TX_FIFO_URGENT_TRSH);
2069 	reg &= ~mge_tfut_ipg(sc->mge_tfut_ipg_max, sc->mge_ver);
2070 	reg |= mge_tfut_ipg(sc->tx_ic_time, sc->mge_ver);
2071 	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, reg);
2072 }
2073 
2074 static int
mge_sysctl_ic(SYSCTL_HANDLER_ARGS)2075 mge_sysctl_ic(SYSCTL_HANDLER_ARGS)
2076 {
2077 	struct mge_softc *sc = (struct mge_softc *)arg1;
2078 	uint32_t time;
2079 	int error;
2080 
2081 	time = (arg2 == MGE_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time;
2082 	error = sysctl_handle_int(oidp, &time, 0, req);
2083 	if (error != 0)
2084 		return(error);
2085 
2086 	MGE_GLOBAL_LOCK(sc);
2087 	if (arg2 == MGE_IC_RX) {
2088 		sc->rx_ic_time = time;
2089 		mge_set_rxic(sc);
2090 	} else {
2091 		sc->tx_ic_time = time;
2092 		mge_set_txic(sc);
2093 	}
2094 	MGE_GLOBAL_UNLOCK(sc);
2095 
2096 	return(0);
2097 }
2098 
2099 static void
mge_add_sysctls(struct mge_softc * sc)2100 mge_add_sysctls(struct mge_softc *sc)
2101 {
2102 	struct sysctl_ctx_list *ctx;
2103 	struct sysctl_oid_list *children;
2104 	struct sysctl_oid *tree;
2105 
2106 	ctx = device_get_sysctl_ctx(sc->dev);
2107 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
2108 	tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal",
2109 	    CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "MGE Interrupts coalescing");
2110 	children = SYSCTL_CHILDREN(tree);
2111 
2112 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time",
2113 	    CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, MGE_IC_RX,
2114 	    mge_sysctl_ic, "I", "IC RX time threshold");
2115 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time",
2116 	    CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, MGE_IC_TX,
2117 	    mge_sysctl_ic, "I", "IC TX time threshold");
2118 }
2119 
2120 static int
mge_mdio_writereg(device_t dev,int phy,int reg,int value)2121 mge_mdio_writereg(device_t dev, int phy, int reg, int value)
2122 {
2123 
2124 	mv_write_ge_smi(dev, phy, reg, value);
2125 
2126 	return (0);
2127 }
2128 
2129 
2130 static int
mge_mdio_readreg(device_t dev,int phy,int reg)2131 mge_mdio_readreg(device_t dev, int phy, int reg)
2132 {
2133 	int ret;
2134 
2135 	ret = mv_read_ge_smi(dev, phy, reg);
2136 
2137 	return (ret);
2138 }
2139