xref: /freebsd/sys/dev/tsec/if_tsec.c (revision 02f27f1cfa619cdf9509c65366f55f7c8803de5c)
1 /*-
2  * Copyright (C) 2006-2008 Semihalf
3  * All rights reserved.
4  *
5  * Written by: Piotr Kruszynski <ppk@semihalf.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The name of the author may not be used to endorse or promote products
16  *    derived from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
21  * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
23  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
24  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
25  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
26  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
27  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 /*
31  * Freescale integrated Three-Speed Ethernet Controller (TSEC) driver.
32  */
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/endian.h>
39 #include <sys/mbuf.h>
40 #include <sys/kernel.h>
41 #include <sys/module.h>
42 #include <sys/socket.h>
43 #include <sys/sysctl.h>
44 
45 #include <net/if.h>
46 #include <net/if_dl.h>
47 #include <net/if_media.h>
48 
49 #include <net/bpf.h>
50 #include <sys/sockio.h>
51 #include <sys/bus.h>
52 #include <machine/bus.h>
53 #include <sys/rman.h>
54 #include <machine/resource.h>
55 
56 #include <net/ethernet.h>
57 #include <net/if_arp.h>
58 
59 #include <net/if_types.h>
60 #include <net/if_vlan_var.h>
61 
62 #include <dev/mii/mii.h>
63 #include <dev/mii/miivar.h>
64 
65 #include <machine/ocpbus.h>
66 
67 #include <dev/tsec/if_tsec.h>
68 #include <dev/tsec/if_tsecreg.h>
69 
70 #include "miibus_if.h"
71 
72 #define TSEC_DEBUG
73 
74 #ifdef TSEC_DEBUG
75 #define PDEBUG(a) {printf("%s:%d: ", __func__, __LINE__), printf a; printf("\n");}
76 #else
77 #define PDEBUG(a) /* nop */
78 #endif
79 
80 static int	tsec_probe(device_t dev);
81 static int	tsec_attach(device_t dev);
82 static int	tsec_setup_intr(device_t dev, struct resource **ires,
83     void **ihand, int *irid, driver_intr_t handler, const char *iname);
84 static void	tsec_release_intr(device_t dev, struct resource *ires,
85     void *ihand, int irid, const char *iname);
86 static void	tsec_free_dma(struct tsec_softc *sc);
87 static int	tsec_detach(device_t dev);
88 static void	tsec_shutdown(device_t dev);
89 static int	tsec_suspend(device_t dev); /* XXX */
90 static int	tsec_resume(device_t dev); /* XXX */
91 
92 static void	tsec_init(void *xsc);
93 static void	tsec_init_locked(struct tsec_softc *sc);
94 static void	tsec_set_mac_address(struct tsec_softc *sc);
95 static void	tsec_dma_ctl(struct tsec_softc *sc, int state);
96 static void	tsec_intrs_ctl(struct tsec_softc *sc, int state);
97 static void	tsec_reset_mac(struct tsec_softc *sc);
98 
99 static void	tsec_watchdog(struct ifnet *ifp);
100 static void	tsec_start(struct ifnet *ifp);
101 static void	tsec_start_locked(struct ifnet *ifp);
102 static int	tsec_encap(struct tsec_softc *sc,
103     struct mbuf *m_head);
104 static void	tsec_setfilter(struct tsec_softc *sc);
105 static int	tsec_ioctl(struct ifnet *ifp, u_long command,
106     caddr_t data);
107 static int	tsec_ifmedia_upd(struct ifnet *ifp);
108 static void	tsec_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
109 static int	tsec_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
110     struct mbuf **mbufp, uint32_t *paddr);
111 static void	tsec_map_dma_addr(void *arg, bus_dma_segment_t *segs,
112     int nseg, int error);
113 static int	tsec_alloc_dma_desc(device_t dev, bus_dma_tag_t *dtag,
114     bus_dmamap_t *dmap, bus_size_t dsize, void **vaddr, void *raddr,
115     const char *dname);
116 static void	tsec_free_dma_desc(bus_dma_tag_t dtag, bus_dmamap_t dmap,
117     void *vaddr);
118 
119 static void	tsec_stop(struct tsec_softc *sc);
120 
121 static void	tsec_receive_intr(void *arg);
122 static void	tsec_transmit_intr(void *arg);
123 static void	tsec_error_intr(void *arg);
124 
125 static void	tsec_tick(void *arg);
126 static int	tsec_miibus_readreg(device_t dev, int phy, int reg);
127 static void	tsec_miibus_writereg(device_t dev, int phy, int reg, int value);
128 static void	tsec_miibus_statchg(device_t dev);
129 
130 static struct tsec_softc *tsec0_sc = NULL; /* XXX ugly hack! */
131 
132 static device_method_t tsec_methods[] = {
133 	/* Device interface */
134 	DEVMETHOD(device_probe,		tsec_probe),
135 	DEVMETHOD(device_attach,	tsec_attach),
136 	DEVMETHOD(device_detach,	tsec_detach),
137 	DEVMETHOD(device_shutdown,	tsec_shutdown),
138 	DEVMETHOD(device_suspend,	tsec_suspend),
139 	DEVMETHOD(device_resume,	tsec_resume),
140 
141 	/* bus interface */
142 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
143 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
144 
145 	/* MII interface */
146 	DEVMETHOD(miibus_readreg,	tsec_miibus_readreg),
147 	DEVMETHOD(miibus_writereg,	tsec_miibus_writereg),
148 	DEVMETHOD(miibus_statchg,	tsec_miibus_statchg),
149 	{ 0, 0 }
150 };
151 
152 static driver_t tsec_driver = {
153 	"tsec",
154 	tsec_methods,
155 	sizeof(struct tsec_softc),
156 };
157 
158 static devclass_t tsec_devclass;
159 
160 DRIVER_MODULE(tsec, ocpbus, tsec_driver, tsec_devclass, 0, 0);
161 DRIVER_MODULE(miibus, tsec, miibus_driver, miibus_devclass, 0, 0);
162 MODULE_DEPEND(tsec, ether, 1, 1, 1);
163 MODULE_DEPEND(tsec, miibus, 1, 1, 1);
164 
165 static void
166 tsec_get_hwaddr(struct tsec_softc *sc, uint8_t *addr)
167 {
168 	union {
169 		uint32_t reg[2];
170 		uint8_t addr[6];
171 	} curmac;
172 	uint32_t a[6];
173 	int count, i;
174 	char *cp;
175 
176 	/* Use the currently programmed MAC address by default. */
177 	curmac.reg[0] = TSEC_READ(sc, TSEC_REG_MACSTNADDR1);
178 	curmac.reg[1] = TSEC_READ(sc, TSEC_REG_MACSTNADDR2);
179 	for (i = 0; i < 6; i++)
180 		a[5-i] = curmac.addr[i];
181 
182 	cp = getenv("ethaddr");
183 	if (cp != NULL) {
184 		count = sscanf(cp, "%x:%x:%x:%x:%x:%x", &a[0], &a[1], &a[2],
185 		    &a[3], &a[4], &a[5]);
186 		freeenv(cp);
187 	}
188 
189 	addr[0] = a[0];
190 	addr[1] = a[1];
191 	addr[2] = a[2];
192 	addr[3] = a[3];
193 	addr[4] = a[4];
194 	addr[5] = a[5];
195 }
196 
197 static void
198 tsec_init(void *xsc)
199 {
200 	struct tsec_softc *sc = xsc;
201 
202 	TSEC_GLOBAL_LOCK(sc);
203 	tsec_init_locked(sc);
204 	TSEC_GLOBAL_UNLOCK(sc);
205 }
206 
207 static void
208 tsec_init_locked(struct tsec_softc *sc)
209 {
210 	struct tsec_desc *tx_desc = sc->tsec_tx_vaddr;
211 	struct tsec_desc *rx_desc = sc->tsec_rx_vaddr;
212 	struct ifnet *ifp = sc->tsec_ifp;
213 	uint32_t timeout;
214 	uint32_t val;
215 	uint32_t i;
216 
217 	TSEC_GLOBAL_LOCK_ASSERT(sc);
218 	tsec_stop(sc);
219 
220 	/*
221 	 * These steps are according to the MPC8555E PowerQUICCIII RM:
222 	 * 14.7 Initialization/Application Information
223 	 */
224 
225 	/* Step 1: soft reset MAC */
226 	tsec_reset_mac(sc);
227 
228 	/* Step 2: Initialize MACCFG2 */
229 	TSEC_WRITE(sc, TSEC_REG_MACCFG2,
230 	    TSEC_MACCFG2_FULLDUPLEX |	/* Full Duplex = 1 */
231 	    TSEC_MACCFG2_PADCRC |	/* PAD/CRC append */
232 	    TSEC_MACCFG2_GMII |		/* I/F Mode bit */
233 	    TSEC_MACCFG2_PRECNT		/* Preamble count = 7 */
234 	);
235 
236 	/* Step 3: Initialize ECNTRL
237 	 * While the documentation states that R100M is ignored if RPM is
238 	 * not set, it does seem to be needed to get the orange boxes to
239 	 * work (which have a Marvell 88E1111 PHY). Go figure.
240 	 */
241 
242 	/*
243 	 * XXX kludge - use circumstancial evidence to program ECNTRL
244 	 * correctly. Ideally we need some board information to guide
245 	 * us here.
246 	 */
247 	i = TSEC_READ(sc, TSEC_REG_ID2);
248 	val = (i & 0xffff)
249 	    ? (TSEC_ECNTRL_TBIM | TSEC_ECNTRL_SGMIIM)	/* Sumatra */
250 	    : TSEC_ECNTRL_R100M;			/* Orange + CDS */
251 	TSEC_WRITE(sc, TSEC_REG_ECNTRL, TSEC_ECNTRL_STEN | val);
252 
253 	/* Step 4: Initialize MAC station address */
254 	tsec_set_mac_address(sc);
255 
256 	/*
257 	 * Step 5: Assign a Physical address to the TBI so as to not conflict
258 	 * with the external PHY physical address
259 	 */
260 	TSEC_WRITE(sc, TSEC_REG_TBIPA, 5);
261 
262 	/* Step 6: Reset the management interface */
263 	TSEC_WRITE(tsec0_sc, TSEC_REG_MIIMCFG, TSEC_MIIMCFG_RESETMGMT);
264 
265 	/* Step 7: Setup the MII Mgmt clock speed */
266 	TSEC_WRITE(tsec0_sc, TSEC_REG_MIIMCFG, TSEC_MIIMCFG_CLKDIV28);
267 
268 	/* Step 8: Read MII Mgmt indicator register and check for Busy = 0 */
269 	timeout = TSEC_READ_RETRY;
270 	while (--timeout && (TSEC_READ(tsec0_sc, TSEC_REG_MIIMIND) &
271 	    TSEC_MIIMIND_BUSY))
272 		DELAY(TSEC_READ_DELAY);
273 	if (timeout == 0) {
274 		if_printf(ifp, "tsec_init_locked(): Mgmt busy timeout\n");
275 		return;
276 	}
277 
278 	/* Step 9: Setup the MII Mgmt */
279 	mii_mediachg(sc->tsec_mii);
280 
281 	/* Step 10: Clear IEVENT register */
282 	TSEC_WRITE(sc, TSEC_REG_IEVENT, 0xffffffff);
283 
284 	/* Step 11: Initialize IMASK */
285 	tsec_intrs_ctl(sc, 1);
286 
287 	/* Step 12: Initialize IADDRn */
288 	TSEC_WRITE(sc, TSEC_REG_IADDR0, 0);
289 	TSEC_WRITE(sc, TSEC_REG_IADDR1, 0);
290 	TSEC_WRITE(sc, TSEC_REG_IADDR2, 0);
291 	TSEC_WRITE(sc, TSEC_REG_IADDR3, 0);
292 	TSEC_WRITE(sc, TSEC_REG_IADDR4, 0);
293 	TSEC_WRITE(sc, TSEC_REG_IADDR5, 0);
294 	TSEC_WRITE(sc, TSEC_REG_IADDR6, 0);
295 	TSEC_WRITE(sc, TSEC_REG_IADDR7, 0);
296 
297 	/* Step 13: Initialize GADDRn */
298 	TSEC_WRITE(sc, TSEC_REG_GADDR0, 0);
299 	TSEC_WRITE(sc, TSEC_REG_GADDR1, 0);
300 	TSEC_WRITE(sc, TSEC_REG_GADDR2, 0);
301 	TSEC_WRITE(sc, TSEC_REG_GADDR3, 0);
302 	TSEC_WRITE(sc, TSEC_REG_GADDR4, 0);
303 	TSEC_WRITE(sc, TSEC_REG_GADDR5, 0);
304 	TSEC_WRITE(sc, TSEC_REG_GADDR6, 0);
305 	TSEC_WRITE(sc, TSEC_REG_GADDR7, 0);
306 
307 	/* Step 14: Initialize RCTRL */
308 	TSEC_WRITE(sc, TSEC_REG_RCTRL, 0);
309 
310 	/* Step 15: Initialize DMACTRL */
311 	tsec_dma_ctl(sc, 1);
312 
313 	/* Step 16: Initialize FIFO_PAUSE_CTRL */
314 	TSEC_WRITE(sc, TSEC_REG_FIFO_PAUSE_CTRL, TSEC_FIFO_PAUSE_CTRL_EN);
315 
316 	/*
317 	 * Step 17: Initialize transmit/receive descriptor rings.
318 	 * Initialize TBASE and RBASE.
319 	 */
320 	TSEC_WRITE(sc, TSEC_REG_TBASE, sc->tsec_tx_raddr);
321 	TSEC_WRITE(sc, TSEC_REG_RBASE, sc->tsec_rx_raddr);
322 
323 	for (i = 0; i < TSEC_TX_NUM_DESC; i++) {
324 		tx_desc[i].bufptr = 0;
325 		tx_desc[i].length = 0;
326 		tx_desc[i].flags = ((i == TSEC_TX_NUM_DESC-1) ? TSEC_TXBD_W : 0);
327 	}
328 	bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, BUS_DMASYNC_PREREAD |
329 	    BUS_DMASYNC_PREWRITE);
330 
331 	for (i = 0; i < TSEC_RX_NUM_DESC; i++) {
332 		rx_desc[i].bufptr = sc->rx_data[i].paddr;
333 		rx_desc[i].length = 0;
334 		rx_desc[i].flags = TSEC_RXBD_E | TSEC_RXBD_I |
335 		    ((i == TSEC_RX_NUM_DESC-1) ? TSEC_RXBD_W : 0);
336 	}
337 	bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap, BUS_DMASYNC_PREREAD |
338 	    BUS_DMASYNC_PREWRITE);
339 
340 	/* Step 18: Initialize the maximum and minimum receive buffer length */
341 	TSEC_WRITE(sc, TSEC_REG_MRBLR, TSEC_DEFAULT_MAX_RX_BUFFER_SIZE);
342 	TSEC_WRITE(sc, TSEC_REG_MINFLR, TSEC_DEFAULT_MIN_RX_BUFFER_SIZE);
343 
344 	/* Step 19: Enable Rx and RxBD sdata snooping */
345 	TSEC_WRITE(sc, TSEC_REG_ATTR, TSEC_ATTR_RDSEN | TSEC_ATTR_RBDSEN);
346 	TSEC_WRITE(sc, TSEC_REG_ATTRELI, 0);
347 
348 	/* Step 20: Reset collision counters in hardware */
349 	TSEC_WRITE(sc, TSEC_REG_MON_TSCL, 0);
350 	TSEC_WRITE(sc, TSEC_REG_MON_TMCL, 0);
351 	TSEC_WRITE(sc, TSEC_REG_MON_TLCL, 0);
352 	TSEC_WRITE(sc, TSEC_REG_MON_TXCL, 0);
353 	TSEC_WRITE(sc, TSEC_REG_MON_TNCL, 0);
354 
355 	/* Step 21: Mask all CAM interrupts */
356 	TSEC_WRITE(sc, TSEC_REG_MON_CAM1, 0xffffffff);
357 	TSEC_WRITE(sc, TSEC_REG_MON_CAM2, 0xffffffff);
358 
359 	/* Step 22: Enable Rx and Tx */
360 	val = TSEC_READ(sc, TSEC_REG_MACCFG1);
361 	val |= (TSEC_MACCFG1_RX_EN | TSEC_MACCFG1_TX_EN);
362 	TSEC_WRITE(sc, TSEC_REG_MACCFG1, val);
363 
364 	/* Step 23: Reset TSEC counters for Tx and Rx rings */
365 	TSEC_TX_RX_COUNTERS_INIT(sc);
366 
367 	/* Step 24: Activate timer for PHY */
368 	callout_reset(&sc->tsec_tick_ch, hz, tsec_tick, sc);
369 
370 	/* Step 25: Activate network interface */
371 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
372 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
373 	ifp->if_timer = 0;
374 	sc->tsec_if_flags = ifp->if_flags;
375 }
376 
377 static void
378 tsec_set_mac_address(struct tsec_softc *sc)
379 {
380 	uint32_t macbuf[2] = { 0, 0 };
381 	int i;
382 	char *macbufp;
383 	char *curmac;
384 
385 	TSEC_GLOBAL_LOCK_ASSERT(sc);
386 
387 	KASSERT((ETHER_ADDR_LEN <= sizeof(macbuf)),
388 	    ("tsec_set_mac_address: (%d <= %d",
389 	    ETHER_ADDR_LEN, sizeof(macbuf)));
390 
391 	macbufp = (char *)macbuf;
392 	curmac = (char *)IF_LLADDR(sc->tsec_ifp);
393 
394 	/* Correct order of MAC address bytes */
395 	for (i = 1; i <= ETHER_ADDR_LEN; i++)
396 		macbufp[ETHER_ADDR_LEN-i] = curmac[i-1];
397 
398 	/* Initialize MAC station address MACSTNADDR2 and MACSTNADDR1 */
399 	TSEC_WRITE(sc, TSEC_REG_MACSTNADDR2, macbuf[1]);
400 	TSEC_WRITE(sc, TSEC_REG_MACSTNADDR1, macbuf[0]);
401 }
402 
403 /*
404  * DMA control function, if argument state is:
405  * 0 - DMA engine will be disabled
406  * 1 - DMA engine will be enabled
407  */
408 static void
409 tsec_dma_ctl(struct tsec_softc *sc, int state)
410 {
411 	device_t dev;
412 	uint32_t dma_flags;
413 	uint32_t timeout;
414 
415 	dev = sc->dev;
416 
417 	dma_flags = TSEC_READ(sc, TSEC_REG_DMACTRL);
418 
419 	switch (state) {
420 	case 0:
421 		/* Temporarily clear stop graceful stop bits. */
422 		tsec_dma_ctl(sc, 1000);
423 
424 		/* Set it again */
425 		dma_flags |= (TSEC_DMACTRL_GRS | TSEC_DMACTRL_GTS);
426 		break;
427 	case 1000:
428 	case 1:
429 		/* Set write with response (WWR), wait (WOP) and snoop bits */
430 		dma_flags |= (TSEC_DMACTRL_TDSEN | TSEC_DMACTRL_TBDSEN |
431 		    DMACTRL_WWR | DMACTRL_WOP);
432 
433 		/* Clear graceful stop bits */
434 		dma_flags &= ~(TSEC_DMACTRL_GRS | TSEC_DMACTRL_GTS);
435 		break;
436 	default:
437 		device_printf(dev, "tsec_dma_ctl(): unknown state value: %d\n",
438 		    state);
439 	}
440 
441 	TSEC_WRITE(sc, TSEC_REG_DMACTRL, dma_flags);
442 
443 	switch (state) {
444 	case 0:
445 		/* Wait for DMA stop */
446 		timeout = TSEC_READ_RETRY;
447 		while (--timeout && (!(TSEC_READ(sc, TSEC_REG_IEVENT) &
448 		    (TSEC_IEVENT_GRSC | TSEC_IEVENT_GTSC))))
449 			DELAY(TSEC_READ_DELAY);
450 
451 		if (timeout == 0)
452 			device_printf(dev, "tsec_dma_ctl(): timeout!\n");
453 		break;
454 	case 1:
455 		/* Restart transmission function */
456 		TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT);
457 	}
458 }
459 
460 /*
461  * Interrupts control function, if argument state is:
462  * 0 - all TSEC interrupts will be masked
463  * 1 - all TSEC interrupts will be unmasked
464  */
465 static void
466 tsec_intrs_ctl(struct tsec_softc *sc, int state)
467 {
468 	device_t dev;
469 
470 	dev = sc->dev;
471 
472 	switch (state) {
473 	case 0:
474 		TSEC_WRITE(sc, TSEC_REG_IMASK, 0);
475 		break;
476 	case 1:
477 		TSEC_WRITE(sc, TSEC_REG_IMASK, TSEC_IMASK_BREN |
478 		    TSEC_IMASK_RXCEN | TSEC_IMASK_BSYEN |
479 		    TSEC_IMASK_EBERREN | TSEC_IMASK_BTEN |
480 		    TSEC_IMASK_TXEEN | TSEC_IMASK_TXBEN |
481 		    TSEC_IMASK_TXFEN | TSEC_IMASK_XFUNEN |
482 		    TSEC_IMASK_RXFEN
483 		  );
484 		break;
485 	default:
486 		device_printf(dev, "tsec_intrs_ctl(): unknown state value: %d\n",
487 		    state);
488 	}
489 }
490 
491 static void
492 tsec_reset_mac(struct tsec_softc *sc)
493 {
494 	uint32_t maccfg1_flags;
495 
496 	/* Set soft reset bit */
497 	maccfg1_flags = TSEC_READ(sc, TSEC_REG_MACCFG1);
498 	maccfg1_flags |= TSEC_MACCFG1_SOFT_RESET;
499 	TSEC_WRITE(sc, TSEC_REG_MACCFG1, maccfg1_flags);
500 
501 	/* Clear soft reset bit */
502 	maccfg1_flags = TSEC_READ(sc, TSEC_REG_MACCFG1);
503 	maccfg1_flags &= ~TSEC_MACCFG1_SOFT_RESET;
504 	TSEC_WRITE(sc, TSEC_REG_MACCFG1, maccfg1_flags);
505 }
506 
507 static void
508 tsec_watchdog(struct ifnet *ifp)
509 {
510 	struct tsec_softc *sc = ifp->if_softc;
511 
512 	TSEC_GLOBAL_LOCK(sc);
513 
514 	ifp->if_oerrors++;
515 	if_printf(ifp, "watchdog timeout\n");
516 
517 	tsec_stop(sc);
518 	tsec_init_locked(sc);
519 
520 	TSEC_GLOBAL_UNLOCK(sc);
521 }
522 
523 static void
524 tsec_start(struct ifnet *ifp)
525 {
526 	struct tsec_softc *sc = ifp->if_softc;
527 
528 	TSEC_TRANSMIT_LOCK(sc);
529 	tsec_start_locked(ifp);
530 	TSEC_TRANSMIT_UNLOCK(sc);
531 }
532 
533 
534 static void
535 tsec_start_locked(struct ifnet *ifp)
536 {
537 	struct tsec_softc *sc;
538 	struct mbuf *m0;
539 	struct mbuf *mtmp;
540 	unsigned int queued = 0;
541 
542 	sc = ifp->if_softc;
543 
544 	TSEC_TRANSMIT_LOCK_ASSERT(sc);
545 
546 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
547 	    IFF_DRV_RUNNING)
548 		return;
549 
550 	if (sc->tsec_link == 0)
551 		return;
552 
553 	bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap,
554 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
555 
556 	for (;;) {
557 		/* Get packet from the queue */
558 		IF_DEQUEUE(&ifp->if_snd, m0);
559 		if (m0 == NULL)
560 			break;
561 
562 		mtmp = m_defrag(m0, M_DONTWAIT);
563 		if (mtmp)
564 			m0 = mtmp;
565 
566 		if (tsec_encap(sc, m0)) {
567 			IF_PREPEND(&ifp->if_snd, m0);
568 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
569 			break;
570 		}
571 		queued++;
572 		BPF_MTAP(ifp, m0);
573 	}
574 	bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap,
575 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
576 
577 	if (queued) {
578 		/* Enable transmitter and watchdog timer */
579 		TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT);
580 		ifp->if_timer = 5;
581 	}
582 }
583 
584 static int
585 tsec_encap(struct tsec_softc *sc, struct mbuf *m0)
586 {
587 	struct tsec_desc *tx_desc = NULL;
588 	struct ifnet *ifp;
589 	bus_dma_segment_t segs[TSEC_TX_NUM_DESC];
590 	bus_dmamap_t *mapp;
591 	int error;
592 	int seg, nsegs;
593 
594 	TSEC_TRANSMIT_LOCK_ASSERT(sc);
595 
596 	ifp = sc->tsec_ifp;
597 
598 	if (TSEC_FREE_TX_DESC(sc) == 0) {
599 		/* No free descriptors */
600 		return (-1);
601 	}
602 
603 	/* Fetch unused map */
604 	mapp = TSEC_ALLOC_TX_MAP(sc);
605 
606 	/* Create mapping in DMA memory */
607 	error = bus_dmamap_load_mbuf_sg(sc->tsec_tx_mtag,
608 	   *mapp, m0, segs, &nsegs, BUS_DMA_NOWAIT);
609 	if (error != 0 || nsegs > TSEC_FREE_TX_DESC(sc) || nsegs <= 0) {
610 		bus_dmamap_unload(sc->tsec_tx_mtag, *mapp);
611 		TSEC_FREE_TX_MAP(sc, mapp);
612 		return ((error != 0) ? error : -1);
613 	}
614 	bus_dmamap_sync(sc->tsec_tx_mtag, *mapp, BUS_DMASYNC_PREWRITE);
615 
616 	if ((ifp->if_flags & IFF_DEBUG) && (nsegs > 1))
617 		if_printf(ifp, "TX buffer has %d segments\n", nsegs);
618 
619 	/* Everything is ok, now we can send buffers */
620 	for (seg = 0; seg < nsegs; seg++) {
621 		tx_desc = TSEC_GET_CUR_TX_DESC(sc);
622 
623 		tx_desc->length = segs[seg].ds_len;
624 		tx_desc->bufptr = segs[seg].ds_addr;
625 
626 		tx_desc->flags =
627 		    (tx_desc->flags & TSEC_TXBD_W) | /* wrap */
628 		    TSEC_TXBD_I |		/* interrupt */
629 		    TSEC_TXBD_R |		/* ready to send */
630 		    TSEC_TXBD_TC |		/* transmit the CRC sequence
631 						 * after the last data byte */
632 		    ((seg == nsegs-1) ? TSEC_TXBD_L : 0);/* last in frame */
633 	}
634 
635 	/* Save mbuf and DMA mapping for release at later stage */
636 	TSEC_PUT_TX_MBUF(sc, m0);
637 	TSEC_PUT_TX_MAP(sc, mapp);
638 
639 	return (0);
640 }
641 
642 static void
643 tsec_setfilter(struct tsec_softc *sc)
644 {
645 	struct ifnet *ifp;
646 	uint32_t flags;
647 
648 	ifp = sc->tsec_ifp;
649 	flags = TSEC_READ(sc, TSEC_REG_RCTRL);
650 
651 	/* Promiscuous mode */
652 	if (ifp->if_flags & IFF_PROMISC)
653 		flags |= TSEC_RCTRL_PROM;
654 	else
655 		flags &= ~TSEC_RCTRL_PROM;
656 
657 	TSEC_WRITE(sc, TSEC_REG_RCTRL, flags);
658 }
659 
660 static int
661 tsec_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
662 {
663 	struct tsec_softc *sc = ifp->if_softc;
664 	struct ifreq *ifr = (struct ifreq *)data;
665 	device_t dev;
666 	int error = 0;
667 
668 	dev = sc->dev;
669 
670 	switch (command) {
671 	case SIOCSIFFLAGS:
672 		TSEC_GLOBAL_LOCK(sc);
673 		if (ifp->if_flags & IFF_UP) {
674 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
675 				if ((sc->tsec_if_flags ^ ifp->if_flags) &
676 				    IFF_PROMISC)
677 					tsec_setfilter(sc);
678 			} else
679 				tsec_init_locked(sc);
680 		} else {
681 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
682 				tsec_stop(sc);
683 		}
684 		sc->tsec_if_flags = ifp->if_flags;
685 		TSEC_GLOBAL_UNLOCK(sc);
686 		break;
687 	case SIOCGIFMEDIA:
688 	case SIOCSIFMEDIA:
689 		error = ifmedia_ioctl(ifp, ifr, &sc->tsec_mii->mii_media,
690 		    command);
691 		break;
692 	default:
693 		error = ether_ioctl(ifp, command, data);
694 	}
695 
696 	/* Flush buffers if not empty */
697 	if (ifp->if_flags & IFF_UP)
698 		tsec_start(ifp);
699 	return (error);
700 }
701 
702 static int
703 tsec_ifmedia_upd(struct ifnet *ifp)
704 {
705 	struct tsec_softc *sc = ifp->if_softc;
706 	struct mii_data *mii;
707 
708 	TSEC_TRANSMIT_LOCK(sc);
709 
710 	mii = sc->tsec_mii;
711 	mii_mediachg(mii);
712 
713 	TSEC_TRANSMIT_UNLOCK(sc);
714 	return (0);
715 }
716 
717 static void
718 tsec_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
719 {
720 	struct tsec_softc *sc = ifp->if_softc;
721 	struct mii_data *mii;
722 
723 	TSEC_TRANSMIT_LOCK(sc);
724 
725 	mii = sc->tsec_mii;
726 	mii_pollstat(mii);
727 
728 	ifmr->ifm_active = mii->mii_media_active;
729 	ifmr->ifm_status = mii->mii_media_status;
730 
731 	TSEC_TRANSMIT_UNLOCK(sc);
732 }
733 
734 static int
735 tsec_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
736 	       uint32_t *paddr)
737 {
738 	struct mbuf *new_mbuf;
739 	bus_dma_segment_t seg[1];
740 	int error;
741 	int nsegs;
742 
743 	KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
744 
745 	new_mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
746 	if (new_mbuf == NULL)
747 		return (ENOBUFS);
748 	new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
749 
750 	if (*mbufp) {
751 		bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
752 		bus_dmamap_unload(tag, map);
753 	}
754 
755 	error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
756 	    BUS_DMA_NOWAIT);
757 	KASSERT(nsegs == 1, ("Too many segments returned!"));
758 	if (nsegs != 1 || error)
759 		panic("tsec_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
760 
761 #if 0
762 	if (error) {
763 		printf("tsec: bus_dmamap_load_mbuf_sg() returned: %d!\n",
764 			error);
765 		m_freem(new_mbuf);
766 		return (ENOBUFS);
767 	}
768 #endif
769 
770 #if 0
771 	KASSERT(((seg->ds_addr) & (TSEC_RXBUFFER_ALIGNMENT-1)) == 0,
772 		("Wrong alignment of RX buffer!"));
773 #endif
774 	bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
775 
776 	(*mbufp) = new_mbuf;
777 	(*paddr) = seg->ds_addr;
778 	return (0);
779 }
780 
781 static void
782 tsec_map_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
783 {
784 	u_int32_t *paddr;
785 
786 	KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
787 	paddr = arg;
788 	*paddr = segs->ds_addr;
789 }
790 
791 static int
792 tsec_alloc_dma_desc(device_t dev, bus_dma_tag_t *dtag, bus_dmamap_t *dmap,
793     bus_size_t dsize, void **vaddr, void *raddr, const char *dname)
794 {
795 	int error;
796 
797 	/* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
798 	error = bus_dma_tag_create(NULL,	/* parent */
799 	    PAGE_SIZE, 0,			/* alignment, boundary */
800 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
801 	    BUS_SPACE_MAXADDR,			/* highaddr */
802 	    NULL, NULL,				/* filtfunc, filtfuncarg */
803 	    dsize, 1,				/* maxsize, nsegments */
804 	    dsize, 0,				/* maxsegsz, flags */
805 	    NULL, NULL,				/* lockfunc, lockfuncarg */
806 	    dtag);				/* dmat */
807 
808 	if (error) {
809 		device_printf(dev, "failed to allocate busdma %s tag\n", dname);
810 		(*vaddr) = NULL;
811 		return (ENXIO);
812 	}
813 
814 	error = bus_dmamem_alloc(*dtag, vaddr, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
815 	    dmap);
816 	if (error) {
817 		device_printf(dev, "failed to allocate %s DMA safe memory\n",
818 		    dname);
819 		bus_dma_tag_destroy(*dtag);
820 		(*vaddr) = NULL;
821 		return (ENXIO);
822 	}
823 
824 	error = bus_dmamap_load(*dtag, *dmap, *vaddr, dsize, tsec_map_dma_addr,
825 	    raddr, BUS_DMA_NOWAIT);
826 	if (error) {
827 		device_printf(dev, "cannot get address of the %s descriptors\n",
828 		    dname);
829 		bus_dmamem_free(*dtag, *vaddr, *dmap);
830 		bus_dma_tag_destroy(*dtag);
831 		(*vaddr) = NULL;
832 		return (ENXIO);
833 	}
834 
835 	return (0);
836 }
837 
838 static void
839 tsec_free_dma_desc(bus_dma_tag_t dtag, bus_dmamap_t dmap, void *vaddr)
840 {
841 
842 	if (vaddr == NULL)
843 		return;
844 
845 	/* Unmap descriptors from DMA memory */
846 	bus_dmamap_sync(dtag, dmap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
847 	bus_dmamap_unload(dtag, dmap);
848 
849 	/* Free descriptors memory */
850 	bus_dmamem_free(dtag, vaddr, dmap);
851 
852 	/* Destroy descriptors tag */
853 	bus_dma_tag_destroy(dtag);
854 }
855 
856 static int
857 tsec_probe(device_t dev)
858 {
859 	struct tsec_softc *sc;
860 	device_t parent;
861 	uintptr_t devtype;
862 	int error;
863 	uint32_t id;
864 
865 	parent = device_get_parent(dev);
866 
867 	error = BUS_READ_IVAR(parent, dev, OCPBUS_IVAR_DEVTYPE, &devtype);
868 	if (error)
869 		return (error);
870 	if (devtype != OCPBUS_DEVTYPE_TSEC)
871 		return (ENXIO);
872 
873 	sc = device_get_softc(dev);
874 
875 	sc->sc_rrid = 0;
876 	sc->sc_rres = bus_alloc_resource(dev, SYS_RES_MEMORY, &sc->sc_rrid,
877 	    0ul, ~0ul, TSEC_IO_SIZE, RF_ACTIVE);
878 	if (sc->sc_rres == NULL)
879 		return (ENXIO);
880 
881 	sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres);
882 	sc->sc_bas.bst = rman_get_bustag(sc->sc_rres);
883 
884 	/* Check that we actually have a TSEC at this address */
885 	id = TSEC_READ(sc, TSEC_REG_ID) | TSEC_READ(sc, TSEC_REG_ID2);
886 
887 	bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres);
888 
889 	if (id == 0)
890 		return (ENXIO);
891 
892 	device_set_desc(dev, "Three-Speed Ethernet Controller");
893 	return (BUS_PROBE_DEFAULT);
894 }
895 
896 static int
897 tsec_attach(device_t dev)
898 {
899 	uint8_t hwaddr[ETHER_ADDR_LEN];
900 	struct tsec_softc *sc;
901 	struct ifnet *ifp;
902 	bus_dmamap_t *map_ptr;
903 	bus_dmamap_t **map_pptr;
904 	int error = 0;
905 	int i;
906 
907 	sc = device_get_softc(dev);
908 	sc->dev = dev;
909 
910 	if (device_get_unit(dev) == 0)
911 		tsec0_sc = sc; /* XXX */
912 
913 	callout_init(&sc->tsec_tick_ch, 1);
914 	mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "TSEC TX lock",
915 	    MTX_DEF);
916 	mtx_init(&sc->receive_lock, device_get_nameunit(dev), "TSEC RX lock",
917 	    MTX_DEF);
918 
919 	/* Reset all TSEC counters */
920 	TSEC_TX_RX_COUNTERS_INIT(sc);
921 
922 	/* Allocate IO memory for TSEC registers */
923 	sc->sc_rrid = 0;
924 	sc->sc_rres = bus_alloc_resource(dev, SYS_RES_MEMORY, &sc->sc_rrid,
925 	    0ul, ~0ul, TSEC_IO_SIZE, RF_ACTIVE);
926 	if (sc->sc_rres == NULL) {
927 		device_printf(dev, "could not allocate IO memory range!\n");
928 		tsec_detach(dev);
929 		return (ENXIO);
930 	}
931 	sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres);
932 	sc->sc_bas.bst = rman_get_bustag(sc->sc_rres);
933 
934 	/* Stop DMA engine if enabled by firmware */
935 	tsec_dma_ctl(sc, 0);
936 
937 	/* Reset MAC */
938 	tsec_reset_mac(sc);
939 
940 	/* Disable interrupts for now */
941 	tsec_intrs_ctl(sc, 0);
942 
943 	/* Allocate a busdma tag and DMA safe memory for TX descriptors. */
944 	error = tsec_alloc_dma_desc(dev, &sc->tsec_tx_dtag, &sc->tsec_tx_dmap,
945 	    sizeof(*sc->tsec_tx_vaddr) * TSEC_TX_NUM_DESC,
946 	    (void **)&sc->tsec_tx_vaddr, &sc->tsec_tx_raddr, "TX");
947 	if (error) {
948 		tsec_detach(dev);
949 		return (ENXIO);
950 	}
951 
952 	/* Allocate a busdma tag and DMA safe memory for RX descriptors. */
953 	error = tsec_alloc_dma_desc(dev, &sc->tsec_rx_dtag, &sc->tsec_rx_dmap,
954 	    sizeof(*sc->tsec_rx_vaddr) * TSEC_RX_NUM_DESC,
955 	    (void **)&sc->tsec_rx_vaddr, &sc->tsec_rx_raddr, "RX");
956 	if (error) {
957 		tsec_detach(dev);
958 		return (ENXIO);
959 	}
960 
961 	/* Allocate a busdma tag for TX mbufs. */
962 	error = bus_dma_tag_create(NULL,	/* parent */
963 	    TSEC_TXBUFFER_ALIGNMENT, 0,		/* alignment, boundary */
964 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
965 	    BUS_SPACE_MAXADDR,			/* highaddr */
966 	    NULL, NULL,				/* filtfunc, filtfuncarg */
967 	    MCLBYTES * (TSEC_TX_NUM_DESC - 1),	/* maxsize */
968 	    TSEC_TX_NUM_DESC - 1,		/* nsegments */
969 	    MCLBYTES, 0,			/* maxsegsz, flags */
970 	    NULL, NULL,				/* lockfunc, lockfuncarg */
971 	    &sc->tsec_tx_mtag);			/* dmat */
972 	if (error) {
973 		device_printf(dev, "failed to allocate busdma tag(tx mbufs)\n");
974 		tsec_detach(dev);
975 		return (ENXIO);
976 	}
977 
978 	/* Allocate a busdma tag for RX mbufs. */
979 	error = bus_dma_tag_create(NULL,	/* parent */
980 	    TSEC_RXBUFFER_ALIGNMENT, 0,		/* alignment, boundary */
981 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
982 	    BUS_SPACE_MAXADDR,			/* highaddr */
983 	    NULL, NULL,				/* filtfunc, filtfuncarg */
984 	    MCLBYTES,				/* maxsize */
985 	    1,					/* nsegments */
986 	    MCLBYTES, 0,				/* maxsegsz, flags */
987 	    NULL, NULL,			/* lockfunc, lockfuncarg */
988 	    &sc->tsec_rx_mtag);			/* dmat */
989 	if (error) {
990 		device_printf(dev, "failed to allocate busdma tag(rx mbufs)\n");
991 		tsec_detach(dev);
992 		return (ENXIO);
993 	}
994 
995 	/* Create TX busdma maps */
996 	map_ptr = sc->tx_map_data;
997 	map_pptr = sc->tx_map_unused_data;
998 
999 	for (i = 0; i < TSEC_TX_NUM_DESC; i++) {
1000 		map_pptr[i] = &map_ptr[i];
1001 		error = bus_dmamap_create(sc->tsec_tx_mtag, 0,
1002 		    map_pptr[i]);
1003 		if (error) {
1004 			device_printf(dev, "failed to init TX ring\n");
1005 			tsec_detach(dev);
1006 			return (ENXIO);
1007 		}
1008 	}
1009 
1010 	/* Create RX busdma maps and zero mbuf handlers */
1011 	for (i = 0; i < TSEC_RX_NUM_DESC; i++) {
1012 		error = bus_dmamap_create(sc->tsec_rx_mtag, 0,
1013 		    &sc->rx_data[i].map);
1014 		if (error) {
1015 			device_printf(dev, "failed to init RX ring\n");
1016 			tsec_detach(dev);
1017 			return (ENXIO);
1018 		}
1019 		sc->rx_data[i].mbuf = NULL;
1020 	}
1021 
1022 	/* Create mbufs for RX buffers */
1023 	for (i = 0; i < TSEC_RX_NUM_DESC; i++) {
1024 		error = tsec_new_rxbuf(sc->tsec_rx_mtag, sc->rx_data[i].map,
1025 		    &sc->rx_data[i].mbuf, &sc->rx_data[i].paddr);
1026 		if (error) {
1027 			device_printf(dev, "can't load rx DMA map %d, error = "
1028 			    "%d\n", i, error);
1029 			tsec_detach(dev);
1030 			return (error);
1031 		}
1032 	}
1033 
1034 	/* Create network interface for upper layers */
1035 	ifp = sc->tsec_ifp = if_alloc(IFT_ETHER);
1036 	if (ifp == NULL) {
1037 		device_printf(dev, "if_alloc() failed\n");
1038 		tsec_detach(dev);
1039 		return (ENOMEM);
1040 	}
1041 
1042 	ifp->if_softc = sc;
1043 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1044 	ifp->if_mtu = ETHERMTU;
1045 	ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST;
1046 	ifp->if_init = tsec_init;
1047 	ifp->if_start = tsec_start;
1048 	ifp->if_watchdog = tsec_watchdog;
1049 	ifp->if_ioctl = tsec_ioctl;
1050 
1051 	IFQ_SET_MAXLEN(&ifp->if_snd, TSEC_TX_NUM_DESC - 1);
1052 	ifp->if_snd.ifq_drv_maxlen = TSEC_TX_NUM_DESC - 1;
1053 	IFQ_SET_READY(&ifp->if_snd);
1054 
1055 	/* XXX No special features of TSEC are supported currently */
1056 	ifp->if_capabilities = 0;
1057 	ifp->if_capenable = ifp->if_capabilities;
1058 
1059 	/* Probe PHY(s) */
1060 	error = mii_phy_probe(dev, &sc->tsec_miibus, tsec_ifmedia_upd,
1061 	    tsec_ifmedia_sts);
1062 	if (error) {
1063 		device_printf(dev, "MII failed to find PHY!\n");
1064 		if_free(ifp);
1065 		sc->tsec_ifp = NULL;
1066 		tsec_detach(dev);
1067 		return (error);
1068 	}
1069 	sc->tsec_mii = device_get_softc(sc->tsec_miibus);
1070 
1071 	tsec_get_hwaddr(sc, hwaddr);
1072 	ether_ifattach(ifp, hwaddr);
1073 
1074 	/* Interrupts configuration (TX/RX/ERR) */
1075 	sc->sc_transmit_irid = OCP_TSEC_RID_TXIRQ;
1076 	error = tsec_setup_intr(dev, &sc->sc_transmit_ires,
1077 	    &sc->sc_transmit_ihand, &sc->sc_transmit_irid,
1078 	    tsec_transmit_intr, "TX");
1079 	if (error) {
1080 		tsec_detach(dev);
1081 		return (error);
1082 	}
1083 
1084 	sc->sc_receive_irid = OCP_TSEC_RID_RXIRQ;
1085 	error = tsec_setup_intr(dev, &sc->sc_receive_ires,
1086 	    &sc->sc_receive_ihand, &sc->sc_receive_irid,
1087 	    tsec_receive_intr, "RX");
1088 	if (error) {
1089 		tsec_detach(dev);
1090 		return (error);
1091 	}
1092 
1093 	sc->sc_error_irid = OCP_TSEC_RID_ERRIRQ;
1094 	error = tsec_setup_intr(dev, &sc->sc_error_ires,
1095 	    &sc->sc_error_ihand, &sc->sc_error_irid,
1096 	    tsec_error_intr, "ERR");
1097 	if (error) {
1098 		tsec_detach(dev);
1099 		return (error);
1100 	}
1101 
1102 	return (0);
1103 }
1104 
1105 static int
1106 tsec_setup_intr(device_t dev, struct resource **ires, void **ihand, int *irid,
1107     driver_intr_t handler, const char *iname)
1108 {
1109 	struct tsec_softc *sc;
1110 	int error;
1111 
1112 	sc = device_get_softc(dev);
1113 
1114 	(*ires) = bus_alloc_resource_any(dev, SYS_RES_IRQ, irid, RF_ACTIVE);
1115 	if ((*ires) == NULL) {
1116 		device_printf(dev, "could not allocate %s IRQ\n", iname);
1117 		return (ENXIO);
1118 	}
1119 	error = bus_setup_intr(dev, *ires, INTR_TYPE_NET | INTR_MPSAFE,
1120 	    NULL, handler, sc, ihand);
1121 	if (error) {
1122 		device_printf(dev, "failed to set up %s IRQ\n", iname);
1123 		if (bus_release_resource(dev, SYS_RES_IRQ, *irid, *ires))
1124 			device_printf(dev, "could not release %s IRQ\n", iname);
1125 		(*ires) = NULL;
1126 		return (error);
1127 	}
1128 	return (0);
1129 }
1130 
1131 static void
1132 tsec_release_intr(device_t dev, struct resource *ires, void *ihand, int irid,
1133     const char *iname)
1134 {
1135 	int error;
1136 
1137 	if (ires == NULL)
1138 		return;
1139 
1140 	error = bus_teardown_intr(dev, ires, ihand);
1141 	if (error)
1142 		device_printf(dev, "bus_teardown_intr() failed for %s intr"
1143 		    ", error %d\n", iname, error);
1144 
1145 	error = bus_release_resource(dev, SYS_RES_IRQ, irid, ires);
1146 	if (error)
1147 		device_printf(dev, "bus_release_resource() failed for %s intr"
1148 		    ", error %d\n", iname, error);
1149 }
1150 
1151 static void
1152 tsec_free_dma(struct tsec_softc *sc)
1153 {
1154 	int i;
1155 
1156 	/* Free TX maps */
1157 	for (i = 0; i < TSEC_TX_NUM_DESC; i++)
1158 		if (sc->tx_map_data[i] != NULL)
1159 			bus_dmamap_destroy(sc->tsec_tx_mtag,
1160 			    sc->tx_map_data[i]);
1161 	/* Destroy tag for Tx mbufs */
1162 	bus_dma_tag_destroy(sc->tsec_tx_mtag);
1163 
1164 	/* Free RX mbufs and maps */
1165 	for (i = 0; i < TSEC_RX_NUM_DESC; i++) {
1166 		if (sc->rx_data[i].mbuf) {
1167 			/* Unload buffer from DMA */
1168 			bus_dmamap_sync(sc->tsec_rx_mtag, sc->rx_data[i].map,
1169 			    BUS_DMASYNC_POSTREAD);
1170 			bus_dmamap_unload(sc->tsec_rx_mtag, sc->rx_data[i].map);
1171 
1172 			/* Free buffer */
1173 			m_freem(sc->rx_data[i].mbuf);
1174 		}
1175 		/* Destroy map for this buffer */
1176 		if (sc->rx_data[i].map != NULL)
1177 			bus_dmamap_destroy(sc->tsec_rx_mtag,
1178 			    sc->rx_data[i].map);
1179 	}
1180 	/* Destroy tag for Rx mbufs */
1181 	bus_dma_tag_destroy(sc->tsec_rx_mtag);
1182 
1183 	/* Unload TX/RX descriptors */
1184 	tsec_free_dma_desc(sc->tsec_tx_dtag, sc->tsec_tx_dmap,
1185 	    sc->tsec_tx_vaddr);
1186 	tsec_free_dma_desc(sc->tsec_rx_dtag, sc->tsec_rx_dmap,
1187 	    sc->tsec_rx_vaddr);
1188 }
1189 
1190 static int
1191 tsec_detach(device_t dev)
1192 {
1193 	struct tsec_softc *sc;
1194 	int error;
1195 
1196 	sc = device_get_softc(dev);
1197 
1198 	/* Stop TSEC controller and free TX queue */
1199 	if (sc->sc_rres && sc->tsec_ifp)
1200 		tsec_shutdown(dev);
1201 
1202 	/* Wait for stopping TSEC ticks */
1203 	callout_drain(&sc->tsec_tick_ch);
1204 
1205 	/* Stop and release all interrupts */
1206 	tsec_release_intr(dev, sc->sc_transmit_ires, sc->sc_transmit_ihand,
1207 	    sc->sc_transmit_irid, "TX");
1208 	tsec_release_intr(dev, sc->sc_receive_ires, sc->sc_receive_ihand,
1209 	    sc->sc_receive_irid, "RX");
1210 	tsec_release_intr(dev, sc->sc_error_ires, sc->sc_error_ihand,
1211 	    sc->sc_error_irid, "ERR");
1212 
1213 	/* Detach network interface */
1214 	if (sc->tsec_ifp) {
1215 		ether_ifdetach(sc->tsec_ifp);
1216 		if_free(sc->tsec_ifp);
1217 		sc->tsec_ifp = NULL;
1218 	}
1219 
1220 	/* Free DMA resources */
1221 	tsec_free_dma(sc);
1222 
1223 	/* Free IO memory handler */
1224 	if (sc->sc_rres) {
1225 		error = bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid,
1226 		    sc->sc_rres);
1227 		if (error)
1228 			device_printf(dev, "bus_release_resource() failed for"
1229 			    " IO memory, error %d\n", error);
1230 	}
1231 
1232 	/* Destroy locks */
1233 	mtx_destroy(&sc->receive_lock);
1234 	mtx_destroy(&sc->transmit_lock);
1235 	return (0);
1236 }
1237 
1238 static void
1239 tsec_shutdown(device_t dev)
1240 {
1241 	struct tsec_softc *sc;
1242 
1243 	sc = device_get_softc(dev);
1244 
1245 	TSEC_GLOBAL_LOCK(sc);
1246 	tsec_stop(sc);
1247 	TSEC_GLOBAL_UNLOCK(sc);
1248 }
1249 
1250 static int
1251 tsec_suspend(device_t dev)
1252 {
1253 
1254 	/* TODO not implemented! */
1255 	return (ENODEV);
1256 }
1257 
1258 static int
1259 tsec_resume(device_t dev)
1260 {
1261 
1262 	/* TODO not implemented! */
1263 	return (ENODEV);
1264 }
1265 
1266 static void
1267 tsec_stop(struct tsec_softc *sc)
1268 {
1269 	struct ifnet *ifp;
1270 	struct mbuf *m0;
1271 	bus_dmamap_t *mapp;
1272 	uint32_t tmpval;
1273 
1274 	TSEC_GLOBAL_LOCK_ASSERT(sc);
1275 
1276 	ifp = sc->tsec_ifp;
1277 
1278 	/* Stop PHY tick engine */
1279 	callout_stop(&sc->tsec_tick_ch);
1280 
1281 	/* Disable interface and watchdog timer */
1282 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1283 	ifp->if_timer = 0;
1284 
1285 	/* Disable all interrupts and stop DMA */
1286 	tsec_intrs_ctl(sc, 0);
1287 	tsec_dma_ctl(sc, 0);
1288 
1289 	/* Remove pending data from TX queue */
1290 	while (!TSEC_EMPTYQ_TX_MBUF(sc)) {
1291 		m0 = TSEC_GET_TX_MBUF(sc);
1292 		mapp = TSEC_GET_TX_MAP(sc);
1293 
1294 		bus_dmamap_sync(sc->tsec_tx_mtag, *mapp, BUS_DMASYNC_POSTWRITE);
1295 		bus_dmamap_unload(sc->tsec_tx_mtag, *mapp);
1296 
1297 		TSEC_FREE_TX_MAP(sc, mapp);
1298 		m_freem(m0);
1299 	}
1300 
1301 	/* Disable Rx and Tx */
1302 	tmpval = TSEC_READ(sc, TSEC_REG_MACCFG1);
1303 	tmpval &= ~(TSEC_MACCFG1_RX_EN | TSEC_MACCFG1_TX_EN);
1304 	TSEC_WRITE(sc, TSEC_REG_MACCFG1, tmpval);
1305 	DELAY(10);
1306 }
1307 
1308 static void
1309 tsec_receive_intr(void *arg)
1310 {
1311 	struct mbuf *rcv_mbufs[TSEC_RX_NUM_DESC];
1312 	struct tsec_softc *sc = arg;
1313 	struct tsec_desc *rx_desc;
1314 	struct ifnet *ifp;
1315 	struct rx_data_type *rx_data;
1316 	struct mbuf *m;
1317 	device_t dev;
1318 	uint32_t i;
1319 	int count;
1320 	int c1 = 0;
1321 	int c2;
1322 	uint16_t flags;
1323 	uint16_t length;
1324 
1325 	ifp = sc->tsec_ifp;
1326 	rx_data = sc->rx_data;
1327 	dev = sc->dev;
1328 
1329 	/* Confirm the interrupt was received by driver */
1330 	TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_RXB | TSEC_IEVENT_RXF);
1331 
1332 	TSEC_RECEIVE_LOCK(sc);
1333 
1334 	bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap, BUS_DMASYNC_POSTREAD |
1335 	    BUS_DMASYNC_POSTWRITE);
1336 
1337 	for (count = 0; /* count < TSEC_RX_NUM_DESC */; count++) {
1338 		rx_desc = TSEC_GET_CUR_RX_DESC(sc);
1339 		flags = rx_desc->flags;
1340 
1341 		/* Check if there is anything to receive */
1342 		if ((flags & TSEC_RXBD_E) || (count >= TSEC_RX_NUM_DESC)) {
1343 			/*
1344 			 * Avoid generating another interrupt
1345 			 */
1346 			if (flags & TSEC_RXBD_E)
1347 				TSEC_WRITE(sc, TSEC_REG_IEVENT,
1348 				    TSEC_IEVENT_RXB | TSEC_IEVENT_RXF);
1349 			/*
1350 			 * We didn't consume current descriptor and have to
1351 			 * return it to the queue
1352 			 */
1353 			TSEC_BACK_CUR_RX_DESC(sc);
1354 			break;
1355 		}
1356 
1357 		if (flags & (TSEC_RXBD_LG | TSEC_RXBD_SH | TSEC_RXBD_NO |
1358 		    TSEC_RXBD_CR | TSEC_RXBD_OV | TSEC_RXBD_TR)) {
1359 			rx_desc->length = 0;
1360 			rx_desc->flags = (rx_desc->flags &
1361 			    ~TSEC_RXBD_ZEROONINIT) | TSEC_RXBD_E | TSEC_RXBD_I;
1362 			continue;
1363 		}
1364 
1365 		if ((flags & TSEC_RXBD_L) == 0)
1366 			device_printf(dev, "buf is not the last in frame!\n");
1367 
1368 		/* Ok... process frame */
1369 		length = rx_desc->length - ETHER_CRC_LEN;
1370 		i = TSEC_GET_CUR_RX_DESC_CNT(sc);
1371 
1372 		m = rx_data[i].mbuf;
1373 
1374 		if (tsec_new_rxbuf(sc->tsec_rx_mtag, rx_data[i].map,
1375 		    &rx_data[i].mbuf, &rx_data[i].paddr)) {
1376 			ifp->if_ierrors++;
1377 			continue;
1378 		}
1379 		/* Attach new buffer to descriptor, and clear flags */
1380 		rx_desc->bufptr = rx_data[i].paddr;
1381 		rx_desc->length = 0;
1382 		rx_desc->flags = (rx_desc->flags & ~TSEC_RXBD_ZEROONINIT) |
1383 		    TSEC_RXBD_E | TSEC_RXBD_I;
1384 
1385 		/* Prepare buffer for upper layers */
1386 		m->m_pkthdr.rcvif = ifp;
1387 		m->m_pkthdr.len = m->m_len = length;
1388 
1389 		/* Save it for push */
1390 		rcv_mbufs[c1++] = m;
1391 	}
1392 
1393 	bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap,
1394 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1395 
1396 	TSEC_RECEIVE_UNLOCK(sc);
1397 
1398 	/* Push it now */
1399 	for (c2 = 0; c2 < c1; c2++)
1400 		(*ifp->if_input)(ifp, rcv_mbufs[c2]);
1401 }
1402 
1403 static void
1404 tsec_transmit_intr(void *arg)
1405 {
1406 	struct tsec_softc *sc = arg;
1407 	struct tsec_desc *tx_desc;
1408 	struct ifnet *ifp;
1409 	struct mbuf *m0;
1410 	bus_dmamap_t *mapp;
1411 	int send = 0;
1412 
1413 	ifp = sc->tsec_ifp;
1414 
1415 	/* Confirm the interrupt was received by driver */
1416 	TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_TXB | TSEC_IEVENT_TXF);
1417 
1418 	TSEC_TRANSMIT_LOCK(sc);
1419 
1420 	/* Update collision statistics */
1421 	ifp->if_collisions += TSEC_READ(sc, TSEC_REG_MON_TNCL);
1422 
1423 	/* Reset collision counters in hardware */
1424 	TSEC_WRITE(sc, TSEC_REG_MON_TSCL, 0);
1425 	TSEC_WRITE(sc, TSEC_REG_MON_TMCL, 0);
1426 	TSEC_WRITE(sc, TSEC_REG_MON_TLCL, 0);
1427 	TSEC_WRITE(sc, TSEC_REG_MON_TXCL, 0);
1428 	TSEC_WRITE(sc, TSEC_REG_MON_TNCL, 0);
1429 
1430 	bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, BUS_DMASYNC_POSTREAD |
1431 	    BUS_DMASYNC_POSTWRITE);
1432 
1433 	while (TSEC_CUR_DIFF_DIRTY_TX_DESC(sc)) {
1434 		tx_desc = TSEC_GET_DIRTY_TX_DESC(sc);
1435 		if (tx_desc->flags & TSEC_TXBD_R) {
1436 			TSEC_BACK_DIRTY_TX_DESC(sc);
1437 			break;
1438 		}
1439 
1440 		if ((tx_desc->flags & TSEC_TXBD_L) == 0)
1441 			continue;
1442 
1443 		/*
1444 		 * This is the last buf in this packet, so unmap and free it.
1445 		 */
1446 		m0 = TSEC_GET_TX_MBUF(sc);
1447 		mapp = TSEC_GET_TX_MAP(sc);
1448 
1449 		bus_dmamap_sync(sc->tsec_tx_mtag, *mapp, BUS_DMASYNC_POSTWRITE);
1450 		bus_dmamap_unload(sc->tsec_tx_mtag, *mapp);
1451 
1452 		TSEC_FREE_TX_MAP(sc, mapp);
1453 		m_freem(m0);
1454 
1455 		ifp->if_opackets++;
1456 		send = 1;
1457 	}
1458 	bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap,
1459 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1460 
1461 	if (send) {
1462 		/* Now send anything that was pending */
1463 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1464 		tsec_start_locked(ifp);
1465 
1466 		/* Stop wathdog if all sent */
1467 		if (TSEC_EMPTYQ_TX_MBUF(sc))
1468 			ifp->if_timer = 0;
1469 	}
1470 	TSEC_TRANSMIT_UNLOCK(sc);
1471 }
1472 
1473 static void
1474 tsec_error_intr(void *arg)
1475 {
1476 	struct tsec_softc *sc = arg;
1477 	struct ifnet *ifp;
1478 	uint32_t eflags;
1479 
1480 	ifp = sc->tsec_ifp;
1481 
1482 	eflags = TSEC_READ(sc, TSEC_REG_IEVENT);
1483 
1484 	if (ifp->if_flags & IFF_DEBUG)
1485 		if_printf(ifp, "tsec_error_intr(): event flags: 0x%x\n", eflags);
1486 
1487 	/* Clear events bits in hardware */
1488 	TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_RXC | TSEC_IEVENT_BSY |
1489 	    TSEC_IEVENT_EBERR | TSEC_IEVENT_MSRO | TSEC_IEVENT_BABT |
1490 	    TSEC_IEVENT_TXC | TSEC_IEVENT_TXE | TSEC_IEVENT_LC |
1491 	    TSEC_IEVENT_CRL | TSEC_IEVENT_XFUN);
1492 
1493 	if (eflags & TSEC_IEVENT_EBERR)
1494 		if_printf(ifp, "System bus error occurred during"
1495 		    " a DMA transaction (flags: 0x%x)\n", eflags);
1496 
1497 	/* Check transmitter errors */
1498 	if (eflags & TSEC_IEVENT_TXE) {
1499 		ifp->if_oerrors++;
1500 
1501 		if (eflags & TSEC_IEVENT_LC)
1502 			ifp->if_collisions++;
1503 
1504 		TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT);
1505 	}
1506 	if (eflags & TSEC_IEVENT_BABT)
1507 		ifp->if_oerrors++;
1508 
1509 	/* Check receiver errors */
1510 	if (eflags & TSEC_IEVENT_BSY) {
1511 		ifp->if_ierrors++;
1512 		ifp->if_iqdrops++;
1513 
1514 		/* Get data from RX buffers */
1515 		tsec_receive_intr(arg);
1516 
1517 		/* Make receiver again active */
1518 		TSEC_WRITE(sc, TSEC_REG_RSTAT, TSEC_RSTAT_QHLT);
1519 	}
1520 	if (eflags & TSEC_IEVENT_BABR)
1521 		ifp->if_ierrors++;
1522 }
1523 
1524 static void
1525 tsec_tick(void *arg)
1526 {
1527 	struct tsec_softc *sc = arg;
1528 	struct ifnet *ifp;
1529 	int link;
1530 
1531 	TSEC_TRANSMIT_LOCK(sc);
1532 
1533 	ifp = sc->tsec_ifp;
1534 	link = sc->tsec_link;
1535 
1536 	mii_tick(sc->tsec_mii);
1537 
1538 	if (link == 0 && sc->tsec_link == 1 && (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)))
1539 		tsec_start_locked(ifp);
1540 
1541 	callout_reset(&sc->tsec_tick_ch, hz, tsec_tick, sc);
1542 	TSEC_TRANSMIT_UNLOCK(sc);
1543 }
1544 
1545 static int
1546 tsec_miibus_readreg(device_t dev, int phy, int reg)
1547 {
1548 	struct tsec_softc *sc;
1549 	uint32_t timeout;
1550 
1551 	sc = device_get_softc(dev);
1552 
1553 	if (device_get_unit(dev) != phy)
1554 		return (0);
1555 
1556 	sc = tsec0_sc;
1557 
1558 	TSEC_WRITE(sc, TSEC_REG_MIIMADD, (phy << 8) | reg);
1559 	TSEC_WRITE(sc, TSEC_REG_MIIMCOM, 0);
1560 	TSEC_WRITE(sc, TSEC_REG_MIIMCOM, TSEC_MIIMCOM_READCYCLE);
1561 
1562 	timeout = TSEC_READ_RETRY;
1563 	while (--timeout && TSEC_READ(sc, TSEC_REG_MIIMIND) &
1564 	    (TSEC_MIIMIND_NOTVALID | TSEC_MIIMIND_BUSY))
1565 		DELAY(TSEC_READ_DELAY);
1566 
1567 	if (timeout == 0)
1568 		device_printf(dev, "Timeout while reading from PHY!\n");
1569 
1570 	return (TSEC_READ(sc, TSEC_REG_MIIMSTAT));
1571 }
1572 
1573 static void
1574 tsec_miibus_writereg(device_t dev, int phy, int reg, int value)
1575 {
1576 	struct tsec_softc *sc;
1577 	uint32_t timeout;
1578 
1579 	sc = device_get_softc(dev);
1580 
1581 	if (device_get_unit(dev) != phy)
1582 		device_printf(dev, "Trying to write to an alien PHY(%d)\n", phy);
1583 
1584 	sc = tsec0_sc;
1585 
1586 	TSEC_WRITE(sc, TSEC_REG_MIIMADD, (phy << 8) | reg);
1587 	TSEC_WRITE(sc, TSEC_REG_MIIMCON, value);
1588 
1589 	timeout = TSEC_READ_RETRY;
1590 	while (--timeout && (TSEC_READ(sc, TSEC_REG_MIIMIND) & TSEC_MIIMIND_BUSY))
1591 		DELAY(TSEC_READ_DELAY);
1592 
1593 	if (timeout == 0)
1594 		device_printf(dev, "Timeout while writing to PHY!\n");
1595 }
1596 
1597 static void
1598 tsec_miibus_statchg(device_t dev)
1599 {
1600 	struct tsec_softc *sc;
1601 	struct mii_data *mii;
1602 	uint32_t ecntrl, id, tmp;
1603 	int link;
1604 
1605 	sc = device_get_softc(dev);
1606 	mii = sc->tsec_mii;
1607 	link = ((mii->mii_media_status & IFM_ACTIVE) ? 1 : 0);
1608 
1609 	tmp = TSEC_READ(sc, TSEC_REG_MACCFG2) & ~TSEC_MACCFG2_IF;
1610 
1611 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
1612 		tmp |= TSEC_MACCFG2_FULLDUPLEX;
1613 	else
1614 		tmp &= ~TSEC_MACCFG2_FULLDUPLEX;
1615 
1616 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1617 	case IFM_1000_T:
1618 	case IFM_1000_SX:
1619 		tmp |= TSEC_MACCFG2_GMII;
1620 		sc->tsec_link = link;
1621 		break;
1622 	case IFM_100_TX:
1623 	case IFM_10_T:
1624 		tmp |= TSEC_MACCFG2_MII;
1625 		sc->tsec_link = link;
1626 		break;
1627 	case IFM_NONE:
1628 		if (link)
1629 			device_printf(dev, "No speed selected but link active!\n");
1630 		sc->tsec_link = 0;
1631 		return;
1632 	default:
1633 		sc->tsec_link = 0;
1634 		device_printf(dev, "Unknown speed (%d), link %s!\n",
1635 		    IFM_SUBTYPE(mii->mii_media_active),
1636 		    ((link) ? "up" : "down"));
1637 		return;
1638 	}
1639 	TSEC_WRITE(sc, TSEC_REG_MACCFG2, tmp);
1640 
1641 	/* XXX kludge - use circumstantial evidence for reduced mode. */
1642 	id = TSEC_READ(sc, TSEC_REG_ID2);
1643 	if (id & 0xffff) {
1644 		ecntrl = TSEC_READ(sc, TSEC_REG_ECNTRL) & ~TSEC_ECNTRL_R100M;
1645 		ecntrl |= (tmp & TSEC_MACCFG2_MII) ? TSEC_ECNTRL_R100M : 0;
1646 		TSEC_WRITE(sc, TSEC_REG_ECNTRL, ecntrl);
1647 	}
1648 }
1649