xref: /freebsd/sys/dev/tsec/if_tsec.c (revision d876124d6ae9d56da5b4ff4c6015efd1d0c9222a)
1 /*-
2  * Copyright (C) 2006-2008 Semihalf
3  * All rights reserved.
4  *
5  * Written by: Piotr Kruszynski <ppk@semihalf.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The name of the author may not be used to endorse or promote products
16  *    derived from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
21  * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
23  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
24  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
25  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
26  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
27  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 /*
31  * Freescale integrated Three-Speed Ethernet Controller (TSEC) driver.
32  */
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/endian.h>
39 #include <sys/mbuf.h>
40 #include <sys/kernel.h>
41 #include <sys/module.h>
42 #include <sys/socket.h>
43 #include <sys/sysctl.h>
44 
45 #include <net/if.h>
46 #include <net/if_dl.h>
47 #include <net/if_media.h>
48 
49 #include <net/bpf.h>
50 #include <sys/sockio.h>
51 #include <sys/bus.h>
52 #include <machine/bus.h>
53 #include <sys/rman.h>
54 #include <machine/resource.h>
55 
56 #include <net/ethernet.h>
57 #include <net/if_arp.h>
58 
59 #include <net/if_types.h>
60 #include <net/if_vlan_var.h>
61 
62 #include <dev/mii/mii.h>
63 #include <dev/mii/miivar.h>
64 
65 #include <machine/ocpbus.h>
66 
67 #include <dev/tsec/if_tsec.h>
68 #include <dev/tsec/if_tsecreg.h>
69 
70 #include "miibus_if.h"
71 
72 #define TSEC_DEBUG
73 
74 #ifdef TSEC_DEBUG
75 #define PDEBUG(a) {printf("%s:%d: ", __func__, __LINE__), printf a; printf("\n");}
76 #else
77 #define PDEBUG(a) /* nop */
78 #endif
79 
80 static int	tsec_probe(device_t dev);
81 static int	tsec_attach(device_t dev);
82 static int	tsec_setup_intr(device_t dev, struct resource **ires,
83     void **ihand, int *irid, driver_intr_t handler, const char *iname);
84 static void	tsec_release_intr(device_t dev, struct resource *ires,
85     void *ihand, int irid, const char *iname);
86 static void	tsec_free_dma(struct tsec_softc *sc);
87 static int	tsec_detach(device_t dev);
88 static void	tsec_shutdown(device_t dev);
89 static int	tsec_suspend(device_t dev); /* XXX */
90 static int	tsec_resume(device_t dev); /* XXX */
91 
92 static void	tsec_init(void *xsc);
93 static void	tsec_init_locked(struct tsec_softc *sc);
94 static void	tsec_set_mac_address(struct tsec_softc *sc);
95 static void	tsec_dma_ctl(struct tsec_softc *sc, int state);
96 static void	tsec_intrs_ctl(struct tsec_softc *sc, int state);
97 static void	tsec_reset_mac(struct tsec_softc *sc);
98 
99 static void	tsec_watchdog(struct tsec_softc *sc);
100 static void	tsec_start(struct ifnet *ifp);
101 static void	tsec_start_locked(struct ifnet *ifp);
102 static int	tsec_encap(struct tsec_softc *sc,
103     struct mbuf *m_head);
104 static void	tsec_setfilter(struct tsec_softc *sc);
105 static int	tsec_ioctl(struct ifnet *ifp, u_long command,
106     caddr_t data);
107 static int	tsec_ifmedia_upd(struct ifnet *ifp);
108 static void	tsec_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
109 static int	tsec_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
110     struct mbuf **mbufp, uint32_t *paddr);
111 static void	tsec_map_dma_addr(void *arg, bus_dma_segment_t *segs,
112     int nseg, int error);
113 static int	tsec_alloc_dma_desc(device_t dev, bus_dma_tag_t *dtag,
114     bus_dmamap_t *dmap, bus_size_t dsize, void **vaddr, void *raddr,
115     const char *dname);
116 static void	tsec_free_dma_desc(bus_dma_tag_t dtag, bus_dmamap_t dmap,
117     void *vaddr);
118 
119 static void	tsec_stop(struct tsec_softc *sc);
120 
121 static void	tsec_receive_intr(void *arg);
122 static void	tsec_transmit_intr(void *arg);
123 static void	tsec_error_intr(void *arg);
124 
125 static void	tsec_tick(void *arg);
126 static int	tsec_miibus_readreg(device_t dev, int phy, int reg);
127 static void	tsec_miibus_writereg(device_t dev, int phy, int reg, int value);
128 static void	tsec_miibus_statchg(device_t dev);
129 
130 static struct tsec_softc *tsec0_sc = NULL; /* XXX ugly hack! */
131 
132 static device_method_t tsec_methods[] = {
133 	/* Device interface */
134 	DEVMETHOD(device_probe,		tsec_probe),
135 	DEVMETHOD(device_attach,	tsec_attach),
136 	DEVMETHOD(device_detach,	tsec_detach),
137 	DEVMETHOD(device_shutdown,	tsec_shutdown),
138 	DEVMETHOD(device_suspend,	tsec_suspend),
139 	DEVMETHOD(device_resume,	tsec_resume),
140 
141 	/* bus interface */
142 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
143 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
144 
145 	/* MII interface */
146 	DEVMETHOD(miibus_readreg,	tsec_miibus_readreg),
147 	DEVMETHOD(miibus_writereg,	tsec_miibus_writereg),
148 	DEVMETHOD(miibus_statchg,	tsec_miibus_statchg),
149 	{ 0, 0 }
150 };
151 
152 static driver_t tsec_driver = {
153 	"tsec",
154 	tsec_methods,
155 	sizeof(struct tsec_softc),
156 };
157 
158 static devclass_t tsec_devclass;
159 
160 DRIVER_MODULE(tsec, ocpbus, tsec_driver, tsec_devclass, 0, 0);
161 DRIVER_MODULE(miibus, tsec, miibus_driver, miibus_devclass, 0, 0);
162 MODULE_DEPEND(tsec, ether, 1, 1, 1);
163 MODULE_DEPEND(tsec, miibus, 1, 1, 1);
164 
165 static void
166 tsec_get_hwaddr(struct tsec_softc *sc, uint8_t *addr)
167 {
168 	union {
169 		uint32_t reg[2];
170 		uint8_t addr[6];
171 	} curmac;
172 	uint32_t a[6];
173 	device_t parent;
174 	uintptr_t macaddr;
175 	int i;
176 
177 	parent = device_get_parent(sc->dev);
178 	if (BUS_READ_IVAR(parent, sc->dev, OCPBUS_IVAR_MACADDR,
179 	    &macaddr) == 0) {
180 		bcopy((uint8_t *)macaddr, addr, 6);
181 		return;
182 	}
183 
184 	/*
185 	 * Fall back -- use the currently programmed address in the hope that
186 	 * it was set be firmware...
187 	 */
188 	curmac.reg[0] = TSEC_READ(sc, TSEC_REG_MACSTNADDR1);
189 	curmac.reg[1] = TSEC_READ(sc, TSEC_REG_MACSTNADDR2);
190 	for (i = 0; i < 6; i++)
191 		a[5-i] = curmac.addr[i];
192 
193 	addr[0] = a[0];
194 	addr[1] = a[1];
195 	addr[2] = a[2];
196 	addr[3] = a[3];
197 	addr[4] = a[4];
198 	addr[5] = a[5];
199 }
200 
201 static void
202 tsec_init(void *xsc)
203 {
204 	struct tsec_softc *sc = xsc;
205 
206 	TSEC_GLOBAL_LOCK(sc);
207 	tsec_init_locked(sc);
208 	TSEC_GLOBAL_UNLOCK(sc);
209 }
210 
211 static void
212 tsec_init_locked(struct tsec_softc *sc)
213 {
214 	struct tsec_desc *tx_desc = sc->tsec_tx_vaddr;
215 	struct tsec_desc *rx_desc = sc->tsec_rx_vaddr;
216 	struct ifnet *ifp = sc->tsec_ifp;
217 	uint32_t timeout;
218 	uint32_t val;
219 	uint32_t i;
220 
221 	TSEC_GLOBAL_LOCK_ASSERT(sc);
222 	tsec_stop(sc);
223 
224 	/*
225 	 * These steps are according to the MPC8555E PowerQUICCIII RM:
226 	 * 14.7 Initialization/Application Information
227 	 */
228 
229 	/* Step 1: soft reset MAC */
230 	tsec_reset_mac(sc);
231 
232 	/* Step 2: Initialize MACCFG2 */
233 	TSEC_WRITE(sc, TSEC_REG_MACCFG2,
234 	    TSEC_MACCFG2_FULLDUPLEX |	/* Full Duplex = 1 */
235 	    TSEC_MACCFG2_PADCRC |	/* PAD/CRC append */
236 	    TSEC_MACCFG2_GMII |		/* I/F Mode bit */
237 	    TSEC_MACCFG2_PRECNT		/* Preamble count = 7 */
238 	);
239 
240 	/* Step 3: Initialize ECNTRL
241 	 * While the documentation states that R100M is ignored if RPM is
242 	 * not set, it does seem to be needed to get the orange boxes to
243 	 * work (which have a Marvell 88E1111 PHY). Go figure.
244 	 */
245 
246 	/*
247 	 * XXX kludge - use circumstancial evidence to program ECNTRL
248 	 * correctly. Ideally we need some board information to guide
249 	 * us here.
250 	 */
251 	i = TSEC_READ(sc, TSEC_REG_ID2);
252 	val = (i & 0xffff)
253 	    ? (TSEC_ECNTRL_TBIM | TSEC_ECNTRL_SGMIIM)	/* Sumatra */
254 	    : TSEC_ECNTRL_R100M;			/* Orange + CDS */
255 	TSEC_WRITE(sc, TSEC_REG_ECNTRL, TSEC_ECNTRL_STEN | val);
256 
257 	/* Step 4: Initialize MAC station address */
258 	tsec_set_mac_address(sc);
259 
260 	/*
261 	 * Step 5: Assign a Physical address to the TBI so as to not conflict
262 	 * with the external PHY physical address
263 	 */
264 	TSEC_WRITE(sc, TSEC_REG_TBIPA, 5);
265 
266 	/* Step 6: Reset the management interface */
267 	TSEC_WRITE(tsec0_sc, TSEC_REG_MIIMCFG, TSEC_MIIMCFG_RESETMGMT);
268 
269 	/* Step 7: Setup the MII Mgmt clock speed */
270 	TSEC_WRITE(tsec0_sc, TSEC_REG_MIIMCFG, TSEC_MIIMCFG_CLKDIV28);
271 
272 	/* Step 8: Read MII Mgmt indicator register and check for Busy = 0 */
273 	timeout = TSEC_READ_RETRY;
274 	while (--timeout && (TSEC_READ(tsec0_sc, TSEC_REG_MIIMIND) &
275 	    TSEC_MIIMIND_BUSY))
276 		DELAY(TSEC_READ_DELAY);
277 	if (timeout == 0) {
278 		if_printf(ifp, "tsec_init_locked(): Mgmt busy timeout\n");
279 		return;
280 	}
281 
282 	/* Step 9: Setup the MII Mgmt */
283 	mii_mediachg(sc->tsec_mii);
284 
285 	/* Step 10: Clear IEVENT register */
286 	TSEC_WRITE(sc, TSEC_REG_IEVENT, 0xffffffff);
287 
288 	/* Step 11: Initialize IMASK */
289 	tsec_intrs_ctl(sc, 1);
290 
291 	/* Step 12: Initialize IADDRn */
292 	TSEC_WRITE(sc, TSEC_REG_IADDR0, 0);
293 	TSEC_WRITE(sc, TSEC_REG_IADDR1, 0);
294 	TSEC_WRITE(sc, TSEC_REG_IADDR2, 0);
295 	TSEC_WRITE(sc, TSEC_REG_IADDR3, 0);
296 	TSEC_WRITE(sc, TSEC_REG_IADDR4, 0);
297 	TSEC_WRITE(sc, TSEC_REG_IADDR5, 0);
298 	TSEC_WRITE(sc, TSEC_REG_IADDR6, 0);
299 	TSEC_WRITE(sc, TSEC_REG_IADDR7, 0);
300 
301 	/* Step 13: Initialize GADDRn */
302 	TSEC_WRITE(sc, TSEC_REG_GADDR0, 0);
303 	TSEC_WRITE(sc, TSEC_REG_GADDR1, 0);
304 	TSEC_WRITE(sc, TSEC_REG_GADDR2, 0);
305 	TSEC_WRITE(sc, TSEC_REG_GADDR3, 0);
306 	TSEC_WRITE(sc, TSEC_REG_GADDR4, 0);
307 	TSEC_WRITE(sc, TSEC_REG_GADDR5, 0);
308 	TSEC_WRITE(sc, TSEC_REG_GADDR6, 0);
309 	TSEC_WRITE(sc, TSEC_REG_GADDR7, 0);
310 
311 	/* Step 14: Initialize RCTRL */
312 	TSEC_WRITE(sc, TSEC_REG_RCTRL, 0);
313 
314 	/* Step 15: Initialize DMACTRL */
315 	tsec_dma_ctl(sc, 1);
316 
317 	/* Step 16: Initialize FIFO_PAUSE_CTRL */
318 	TSEC_WRITE(sc, TSEC_REG_FIFO_PAUSE_CTRL, TSEC_FIFO_PAUSE_CTRL_EN);
319 
320 	/*
321 	 * Step 17: Initialize transmit/receive descriptor rings.
322 	 * Initialize TBASE and RBASE.
323 	 */
324 	TSEC_WRITE(sc, TSEC_REG_TBASE, sc->tsec_tx_raddr);
325 	TSEC_WRITE(sc, TSEC_REG_RBASE, sc->tsec_rx_raddr);
326 
327 	for (i = 0; i < TSEC_TX_NUM_DESC; i++) {
328 		tx_desc[i].bufptr = 0;
329 		tx_desc[i].length = 0;
330 		tx_desc[i].flags = ((i == TSEC_TX_NUM_DESC-1) ? TSEC_TXBD_W : 0);
331 	}
332 	bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, BUS_DMASYNC_PREREAD |
333 	    BUS_DMASYNC_PREWRITE);
334 
335 	for (i = 0; i < TSEC_RX_NUM_DESC; i++) {
336 		rx_desc[i].bufptr = sc->rx_data[i].paddr;
337 		rx_desc[i].length = 0;
338 		rx_desc[i].flags = TSEC_RXBD_E | TSEC_RXBD_I |
339 		    ((i == TSEC_RX_NUM_DESC-1) ? TSEC_RXBD_W : 0);
340 	}
341 	bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap, BUS_DMASYNC_PREREAD |
342 	    BUS_DMASYNC_PREWRITE);
343 
344 	/* Step 18: Initialize the maximum and minimum receive buffer length */
345 	TSEC_WRITE(sc, TSEC_REG_MRBLR, TSEC_DEFAULT_MAX_RX_BUFFER_SIZE);
346 	TSEC_WRITE(sc, TSEC_REG_MINFLR, TSEC_DEFAULT_MIN_RX_BUFFER_SIZE);
347 
348 	/* Step 19: Enable Rx and RxBD sdata snooping */
349 	TSEC_WRITE(sc, TSEC_REG_ATTR, TSEC_ATTR_RDSEN | TSEC_ATTR_RBDSEN);
350 	TSEC_WRITE(sc, TSEC_REG_ATTRELI, 0);
351 
352 	/* Step 20: Reset collision counters in hardware */
353 	TSEC_WRITE(sc, TSEC_REG_MON_TSCL, 0);
354 	TSEC_WRITE(sc, TSEC_REG_MON_TMCL, 0);
355 	TSEC_WRITE(sc, TSEC_REG_MON_TLCL, 0);
356 	TSEC_WRITE(sc, TSEC_REG_MON_TXCL, 0);
357 	TSEC_WRITE(sc, TSEC_REG_MON_TNCL, 0);
358 
359 	/* Step 21: Mask all CAM interrupts */
360 	TSEC_WRITE(sc, TSEC_REG_MON_CAM1, 0xffffffff);
361 	TSEC_WRITE(sc, TSEC_REG_MON_CAM2, 0xffffffff);
362 
363 	/* Step 22: Enable Rx and Tx */
364 	val = TSEC_READ(sc, TSEC_REG_MACCFG1);
365 	val |= (TSEC_MACCFG1_RX_EN | TSEC_MACCFG1_TX_EN);
366 	TSEC_WRITE(sc, TSEC_REG_MACCFG1, val);
367 
368 	/* Step 23: Reset TSEC counters for Tx and Rx rings */
369 	TSEC_TX_RX_COUNTERS_INIT(sc);
370 
371 	/* Step 24: Activate timer for PHY */
372 	callout_reset(&sc->tsec_tick_ch, hz, tsec_tick, sc);
373 
374 	/* Step 25: Activate network interface */
375 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
376 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
377 	sc->tsec_if_flags = ifp->if_flags;
378 
379 	/* Schedule watchdog timeout */
380 	callout_reset(&sc->wd_callout, hz, tsec_tick, sc);
381 }
382 
383 static void
384 tsec_set_mac_address(struct tsec_softc *sc)
385 {
386 	uint32_t macbuf[2] = { 0, 0 };
387 	int i;
388 	char *macbufp;
389 	char *curmac;
390 
391 	TSEC_GLOBAL_LOCK_ASSERT(sc);
392 
393 	KASSERT((ETHER_ADDR_LEN <= sizeof(macbuf)),
394 	    ("tsec_set_mac_address: (%d <= %d",
395 	    ETHER_ADDR_LEN, sizeof(macbuf)));
396 
397 	macbufp = (char *)macbuf;
398 	curmac = (char *)IF_LLADDR(sc->tsec_ifp);
399 
400 	/* Correct order of MAC address bytes */
401 	for (i = 1; i <= ETHER_ADDR_LEN; i++)
402 		macbufp[ETHER_ADDR_LEN-i] = curmac[i-1];
403 
404 	/* Initialize MAC station address MACSTNADDR2 and MACSTNADDR1 */
405 	TSEC_WRITE(sc, TSEC_REG_MACSTNADDR2, macbuf[1]);
406 	TSEC_WRITE(sc, TSEC_REG_MACSTNADDR1, macbuf[0]);
407 }
408 
409 /*
410  * DMA control function, if argument state is:
411  * 0 - DMA engine will be disabled
412  * 1 - DMA engine will be enabled
413  */
414 static void
415 tsec_dma_ctl(struct tsec_softc *sc, int state)
416 {
417 	device_t dev;
418 	uint32_t dma_flags;
419 	uint32_t timeout;
420 
421 	dev = sc->dev;
422 
423 	dma_flags = TSEC_READ(sc, TSEC_REG_DMACTRL);
424 
425 	switch (state) {
426 	case 0:
427 		/* Temporarily clear stop graceful stop bits. */
428 		tsec_dma_ctl(sc, 1000);
429 
430 		/* Set it again */
431 		dma_flags |= (TSEC_DMACTRL_GRS | TSEC_DMACTRL_GTS);
432 		break;
433 	case 1000:
434 	case 1:
435 		/* Set write with response (WWR), wait (WOP) and snoop bits */
436 		dma_flags |= (TSEC_DMACTRL_TDSEN | TSEC_DMACTRL_TBDSEN |
437 		    DMACTRL_WWR | DMACTRL_WOP);
438 
439 		/* Clear graceful stop bits */
440 		dma_flags &= ~(TSEC_DMACTRL_GRS | TSEC_DMACTRL_GTS);
441 		break;
442 	default:
443 		device_printf(dev, "tsec_dma_ctl(): unknown state value: %d\n",
444 		    state);
445 	}
446 
447 	TSEC_WRITE(sc, TSEC_REG_DMACTRL, dma_flags);
448 
449 	switch (state) {
450 	case 0:
451 		/* Wait for DMA stop */
452 		timeout = TSEC_READ_RETRY;
453 		while (--timeout && (!(TSEC_READ(sc, TSEC_REG_IEVENT) &
454 		    (TSEC_IEVENT_GRSC | TSEC_IEVENT_GTSC))))
455 			DELAY(TSEC_READ_DELAY);
456 
457 		if (timeout == 0)
458 			device_printf(dev, "tsec_dma_ctl(): timeout!\n");
459 		break;
460 	case 1:
461 		/* Restart transmission function */
462 		TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT);
463 	}
464 }
465 
466 /*
467  * Interrupts control function, if argument state is:
468  * 0 - all TSEC interrupts will be masked
469  * 1 - all TSEC interrupts will be unmasked
470  */
471 static void
472 tsec_intrs_ctl(struct tsec_softc *sc, int state)
473 {
474 	device_t dev;
475 
476 	dev = sc->dev;
477 
478 	switch (state) {
479 	case 0:
480 		TSEC_WRITE(sc, TSEC_REG_IMASK, 0);
481 		break;
482 	case 1:
483 		TSEC_WRITE(sc, TSEC_REG_IMASK, TSEC_IMASK_BREN |
484 		    TSEC_IMASK_RXCEN | TSEC_IMASK_BSYEN |
485 		    TSEC_IMASK_EBERREN | TSEC_IMASK_BTEN |
486 		    TSEC_IMASK_TXEEN | TSEC_IMASK_TXBEN |
487 		    TSEC_IMASK_TXFEN | TSEC_IMASK_XFUNEN |
488 		    TSEC_IMASK_RXFEN
489 		  );
490 		break;
491 	default:
492 		device_printf(dev, "tsec_intrs_ctl(): unknown state value: %d\n",
493 		    state);
494 	}
495 }
496 
497 static void
498 tsec_reset_mac(struct tsec_softc *sc)
499 {
500 	uint32_t maccfg1_flags;
501 
502 	/* Set soft reset bit */
503 	maccfg1_flags = TSEC_READ(sc, TSEC_REG_MACCFG1);
504 	maccfg1_flags |= TSEC_MACCFG1_SOFT_RESET;
505 	TSEC_WRITE(sc, TSEC_REG_MACCFG1, maccfg1_flags);
506 
507 	/* Clear soft reset bit */
508 	maccfg1_flags = TSEC_READ(sc, TSEC_REG_MACCFG1);
509 	maccfg1_flags &= ~TSEC_MACCFG1_SOFT_RESET;
510 	TSEC_WRITE(sc, TSEC_REG_MACCFG1, maccfg1_flags);
511 }
512 
513 static void
514 tsec_watchdog(struct tsec_softc *sc)
515 {
516 	struct ifnet *ifp;
517 
518 	TSEC_GLOBAL_LOCK_ASSERT(sc);
519 
520 	if (sc->wd_timer == 0 || --sc->wd_timer > 0)
521 		return;
522 
523 	ifp = sc->tsec_ifp;
524 	ifp->if_oerrors++;
525 	if_printf(ifp, "watchdog timeout\n");
526 
527 	tsec_stop(sc);
528 	tsec_init_locked(sc);
529 }
530 
531 static void
532 tsec_start(struct ifnet *ifp)
533 {
534 	struct tsec_softc *sc = ifp->if_softc;
535 
536 	TSEC_TRANSMIT_LOCK(sc);
537 	tsec_start_locked(ifp);
538 	TSEC_TRANSMIT_UNLOCK(sc);
539 }
540 
541 static void
542 tsec_start_locked(struct ifnet *ifp)
543 {
544 	struct tsec_softc *sc;
545 	struct mbuf *m0;
546 	struct mbuf *mtmp;
547 	unsigned int queued = 0;
548 
549 	sc = ifp->if_softc;
550 
551 	TSEC_TRANSMIT_LOCK_ASSERT(sc);
552 
553 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
554 	    IFF_DRV_RUNNING)
555 		return;
556 
557 	if (sc->tsec_link == 0)
558 		return;
559 
560 	bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap,
561 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
562 
563 	for (;;) {
564 		/* Get packet from the queue */
565 		IF_DEQUEUE(&ifp->if_snd, m0);
566 		if (m0 == NULL)
567 			break;
568 
569 		mtmp = m_defrag(m0, M_DONTWAIT);
570 		if (mtmp)
571 			m0 = mtmp;
572 
573 		if (tsec_encap(sc, m0)) {
574 			IF_PREPEND(&ifp->if_snd, m0);
575 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
576 			break;
577 		}
578 		queued++;
579 		BPF_MTAP(ifp, m0);
580 	}
581 	bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap,
582 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
583 
584 	if (queued) {
585 		/* Enable transmitter and watchdog timer */
586 		TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT);
587 		sc->wd_timer = 5;
588 	}
589 }
590 
591 static int
592 tsec_encap(struct tsec_softc *sc, struct mbuf *m0)
593 {
594 	struct tsec_desc *tx_desc = NULL;
595 	struct ifnet *ifp;
596 	bus_dma_segment_t segs[TSEC_TX_NUM_DESC];
597 	bus_dmamap_t *mapp;
598 	int error;
599 	int seg, nsegs;
600 
601 	TSEC_TRANSMIT_LOCK_ASSERT(sc);
602 
603 	ifp = sc->tsec_ifp;
604 
605 	if (TSEC_FREE_TX_DESC(sc) == 0) {
606 		/* No free descriptors */
607 		return (-1);
608 	}
609 
610 	/* Fetch unused map */
611 	mapp = TSEC_ALLOC_TX_MAP(sc);
612 
613 	/* Create mapping in DMA memory */
614 	error = bus_dmamap_load_mbuf_sg(sc->tsec_tx_mtag,
615 	   *mapp, m0, segs, &nsegs, BUS_DMA_NOWAIT);
616 	if (error != 0 || nsegs > TSEC_FREE_TX_DESC(sc) || nsegs <= 0) {
617 		bus_dmamap_unload(sc->tsec_tx_mtag, *mapp);
618 		TSEC_FREE_TX_MAP(sc, mapp);
619 		return ((error != 0) ? error : -1);
620 	}
621 	bus_dmamap_sync(sc->tsec_tx_mtag, *mapp, BUS_DMASYNC_PREWRITE);
622 
623 	if ((ifp->if_flags & IFF_DEBUG) && (nsegs > 1))
624 		if_printf(ifp, "TX buffer has %d segments\n", nsegs);
625 
626 	/* Everything is ok, now we can send buffers */
627 	for (seg = 0; seg < nsegs; seg++) {
628 		tx_desc = TSEC_GET_CUR_TX_DESC(sc);
629 
630 		tx_desc->length = segs[seg].ds_len;
631 		tx_desc->bufptr = segs[seg].ds_addr;
632 
633 		tx_desc->flags =
634 		    (tx_desc->flags & TSEC_TXBD_W) | /* wrap */
635 		    TSEC_TXBD_I |		/* interrupt */
636 		    TSEC_TXBD_R |		/* ready to send */
637 		    TSEC_TXBD_TC |		/* transmit the CRC sequence
638 						 * after the last data byte */
639 		    ((seg == nsegs-1) ? TSEC_TXBD_L : 0);/* last in frame */
640 	}
641 
642 	/* Save mbuf and DMA mapping for release at later stage */
643 	TSEC_PUT_TX_MBUF(sc, m0);
644 	TSEC_PUT_TX_MAP(sc, mapp);
645 
646 	return (0);
647 }
648 
649 static void
650 tsec_setfilter(struct tsec_softc *sc)
651 {
652 	struct ifnet *ifp;
653 	uint32_t flags;
654 
655 	ifp = sc->tsec_ifp;
656 	flags = TSEC_READ(sc, TSEC_REG_RCTRL);
657 
658 	/* Promiscuous mode */
659 	if (ifp->if_flags & IFF_PROMISC)
660 		flags |= TSEC_RCTRL_PROM;
661 	else
662 		flags &= ~TSEC_RCTRL_PROM;
663 
664 	TSEC_WRITE(sc, TSEC_REG_RCTRL, flags);
665 }
666 
667 static int
668 tsec_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
669 {
670 	struct tsec_softc *sc = ifp->if_softc;
671 	struct ifreq *ifr = (struct ifreq *)data;
672 	device_t dev;
673 	int error = 0;
674 
675 	dev = sc->dev;
676 
677 	switch (command) {
678 	case SIOCSIFFLAGS:
679 		TSEC_GLOBAL_LOCK(sc);
680 		if (ifp->if_flags & IFF_UP) {
681 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
682 				if ((sc->tsec_if_flags ^ ifp->if_flags) &
683 				    IFF_PROMISC)
684 					tsec_setfilter(sc);
685 			} else
686 				tsec_init_locked(sc);
687 		} else {
688 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
689 				tsec_stop(sc);
690 		}
691 		sc->tsec_if_flags = ifp->if_flags;
692 		TSEC_GLOBAL_UNLOCK(sc);
693 		break;
694 	case SIOCGIFMEDIA:
695 	case SIOCSIFMEDIA:
696 		error = ifmedia_ioctl(ifp, ifr, &sc->tsec_mii->mii_media,
697 		    command);
698 		break;
699 	default:
700 		error = ether_ioctl(ifp, command, data);
701 	}
702 
703 	/* Flush buffers if not empty */
704 	if (ifp->if_flags & IFF_UP)
705 		tsec_start(ifp);
706 	return (error);
707 }
708 
709 static int
710 tsec_ifmedia_upd(struct ifnet *ifp)
711 {
712 	struct tsec_softc *sc = ifp->if_softc;
713 	struct mii_data *mii;
714 
715 	TSEC_TRANSMIT_LOCK(sc);
716 
717 	mii = sc->tsec_mii;
718 	mii_mediachg(mii);
719 
720 	TSEC_TRANSMIT_UNLOCK(sc);
721 	return (0);
722 }
723 
724 static void
725 tsec_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
726 {
727 	struct tsec_softc *sc = ifp->if_softc;
728 	struct mii_data *mii;
729 
730 	TSEC_TRANSMIT_LOCK(sc);
731 
732 	mii = sc->tsec_mii;
733 	mii_pollstat(mii);
734 
735 	ifmr->ifm_active = mii->mii_media_active;
736 	ifmr->ifm_status = mii->mii_media_status;
737 
738 	TSEC_TRANSMIT_UNLOCK(sc);
739 }
740 
741 static int
742 tsec_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
743 	       uint32_t *paddr)
744 {
745 	struct mbuf *new_mbuf;
746 	bus_dma_segment_t seg[1];
747 	int error;
748 	int nsegs;
749 
750 	KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
751 
752 	new_mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
753 	if (new_mbuf == NULL)
754 		return (ENOBUFS);
755 	new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
756 
757 	if (*mbufp) {
758 		bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
759 		bus_dmamap_unload(tag, map);
760 	}
761 
762 	error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
763 	    BUS_DMA_NOWAIT);
764 	KASSERT(nsegs == 1, ("Too many segments returned!"));
765 	if (nsegs != 1 || error)
766 		panic("tsec_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
767 
768 #if 0
769 	if (error) {
770 		printf("tsec: bus_dmamap_load_mbuf_sg() returned: %d!\n",
771 			error);
772 		m_freem(new_mbuf);
773 		return (ENOBUFS);
774 	}
775 #endif
776 
777 #if 0
778 	KASSERT(((seg->ds_addr) & (TSEC_RXBUFFER_ALIGNMENT-1)) == 0,
779 		("Wrong alignment of RX buffer!"));
780 #endif
781 	bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
782 
783 	(*mbufp) = new_mbuf;
784 	(*paddr) = seg->ds_addr;
785 	return (0);
786 }
787 
788 static void
789 tsec_map_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
790 {
791 	u_int32_t *paddr;
792 
793 	KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
794 	paddr = arg;
795 	*paddr = segs->ds_addr;
796 }
797 
798 static int
799 tsec_alloc_dma_desc(device_t dev, bus_dma_tag_t *dtag, bus_dmamap_t *dmap,
800     bus_size_t dsize, void **vaddr, void *raddr, const char *dname)
801 {
802 	int error;
803 
804 	/* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
805 	error = bus_dma_tag_create(NULL,	/* parent */
806 	    PAGE_SIZE, 0,			/* alignment, boundary */
807 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
808 	    BUS_SPACE_MAXADDR,			/* highaddr */
809 	    NULL, NULL,				/* filtfunc, filtfuncarg */
810 	    dsize, 1,				/* maxsize, nsegments */
811 	    dsize, 0,				/* maxsegsz, flags */
812 	    NULL, NULL,				/* lockfunc, lockfuncarg */
813 	    dtag);				/* dmat */
814 
815 	if (error) {
816 		device_printf(dev, "failed to allocate busdma %s tag\n", dname);
817 		(*vaddr) = NULL;
818 		return (ENXIO);
819 	}
820 
821 	error = bus_dmamem_alloc(*dtag, vaddr, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
822 	    dmap);
823 	if (error) {
824 		device_printf(dev, "failed to allocate %s DMA safe memory\n",
825 		    dname);
826 		bus_dma_tag_destroy(*dtag);
827 		(*vaddr) = NULL;
828 		return (ENXIO);
829 	}
830 
831 	error = bus_dmamap_load(*dtag, *dmap, *vaddr, dsize, tsec_map_dma_addr,
832 	    raddr, BUS_DMA_NOWAIT);
833 	if (error) {
834 		device_printf(dev, "cannot get address of the %s descriptors\n",
835 		    dname);
836 		bus_dmamem_free(*dtag, *vaddr, *dmap);
837 		bus_dma_tag_destroy(*dtag);
838 		(*vaddr) = NULL;
839 		return (ENXIO);
840 	}
841 
842 	return (0);
843 }
844 
845 static void
846 tsec_free_dma_desc(bus_dma_tag_t dtag, bus_dmamap_t dmap, void *vaddr)
847 {
848 
849 	if (vaddr == NULL)
850 		return;
851 
852 	/* Unmap descriptors from DMA memory */
853 	bus_dmamap_sync(dtag, dmap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
854 	bus_dmamap_unload(dtag, dmap);
855 
856 	/* Free descriptors memory */
857 	bus_dmamem_free(dtag, vaddr, dmap);
858 
859 	/* Destroy descriptors tag */
860 	bus_dma_tag_destroy(dtag);
861 }
862 
863 static int
864 tsec_probe(device_t dev)
865 {
866 	struct tsec_softc *sc;
867 	device_t parent;
868 	uintptr_t devtype;
869 	int error;
870 	uint32_t id;
871 
872 	parent = device_get_parent(dev);
873 
874 	error = BUS_READ_IVAR(parent, dev, OCPBUS_IVAR_DEVTYPE, &devtype);
875 	if (error)
876 		return (error);
877 	if (devtype != OCPBUS_DEVTYPE_TSEC)
878 		return (ENXIO);
879 
880 	sc = device_get_softc(dev);
881 
882 	sc->sc_rrid = 0;
883 	sc->sc_rres = bus_alloc_resource(dev, SYS_RES_MEMORY, &sc->sc_rrid,
884 	    0ul, ~0ul, TSEC_IO_SIZE, RF_ACTIVE);
885 	if (sc->sc_rres == NULL)
886 		return (ENXIO);
887 
888 	sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres);
889 	sc->sc_bas.bst = rman_get_bustag(sc->sc_rres);
890 
891 	/* Check that we actually have a TSEC at this address */
892 	id = TSEC_READ(sc, TSEC_REG_ID) | TSEC_READ(sc, TSEC_REG_ID2);
893 
894 	bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres);
895 
896 	if (id == 0)
897 		return (ENXIO);
898 
899 	device_set_desc(dev, "Three-Speed Ethernet Controller");
900 	return (BUS_PROBE_DEFAULT);
901 }
902 
903 static int
904 tsec_attach(device_t dev)
905 {
906 	uint8_t hwaddr[ETHER_ADDR_LEN];
907 	struct tsec_softc *sc;
908 	struct ifnet *ifp;
909 	bus_dmamap_t *map_ptr;
910 	bus_dmamap_t **map_pptr;
911 	int error = 0;
912 	int i;
913 
914 	sc = device_get_softc(dev);
915 	sc->dev = dev;
916 
917 	if (device_get_unit(dev) == 0)
918 		tsec0_sc = sc; /* XXX */
919 
920 	callout_init(&sc->tsec_tick_ch, 1);
921 	mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "TSEC TX lock",
922 	    MTX_DEF);
923 	mtx_init(&sc->receive_lock, device_get_nameunit(dev), "TSEC RX lock",
924 	    MTX_DEF);
925 
926 	/* Reset all TSEC counters */
927 	TSEC_TX_RX_COUNTERS_INIT(sc);
928 
929 	/* Allocate IO memory for TSEC registers */
930 	sc->sc_rrid = 0;
931 	sc->sc_rres = bus_alloc_resource(dev, SYS_RES_MEMORY, &sc->sc_rrid,
932 	    0ul, ~0ul, TSEC_IO_SIZE, RF_ACTIVE);
933 	if (sc->sc_rres == NULL) {
934 		device_printf(dev, "could not allocate IO memory range!\n");
935 		tsec_detach(dev);
936 		return (ENXIO);
937 	}
938 	sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres);
939 	sc->sc_bas.bst = rman_get_bustag(sc->sc_rres);
940 
941 	/* Stop DMA engine if enabled by firmware */
942 	tsec_dma_ctl(sc, 0);
943 
944 	/* Reset MAC */
945 	tsec_reset_mac(sc);
946 
947 	/* Disable interrupts for now */
948 	tsec_intrs_ctl(sc, 0);
949 
950 	/* Allocate a busdma tag and DMA safe memory for TX descriptors. */
951 	error = tsec_alloc_dma_desc(dev, &sc->tsec_tx_dtag, &sc->tsec_tx_dmap,
952 	    sizeof(*sc->tsec_tx_vaddr) * TSEC_TX_NUM_DESC,
953 	    (void **)&sc->tsec_tx_vaddr, &sc->tsec_tx_raddr, "TX");
954 	if (error) {
955 		tsec_detach(dev);
956 		return (ENXIO);
957 	}
958 
959 	/* Allocate a busdma tag and DMA safe memory for RX descriptors. */
960 	error = tsec_alloc_dma_desc(dev, &sc->tsec_rx_dtag, &sc->tsec_rx_dmap,
961 	    sizeof(*sc->tsec_rx_vaddr) * TSEC_RX_NUM_DESC,
962 	    (void **)&sc->tsec_rx_vaddr, &sc->tsec_rx_raddr, "RX");
963 	if (error) {
964 		tsec_detach(dev);
965 		return (ENXIO);
966 	}
967 
968 	/* Allocate a busdma tag for TX mbufs. */
969 	error = bus_dma_tag_create(NULL,	/* parent */
970 	    TSEC_TXBUFFER_ALIGNMENT, 0,		/* alignment, boundary */
971 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
972 	    BUS_SPACE_MAXADDR,			/* highaddr */
973 	    NULL, NULL,				/* filtfunc, filtfuncarg */
974 	    MCLBYTES * (TSEC_TX_NUM_DESC - 1),	/* maxsize */
975 	    TSEC_TX_NUM_DESC - 1,		/* nsegments */
976 	    MCLBYTES, 0,			/* maxsegsz, flags */
977 	    NULL, NULL,				/* lockfunc, lockfuncarg */
978 	    &sc->tsec_tx_mtag);			/* dmat */
979 	if (error) {
980 		device_printf(dev, "failed to allocate busdma tag(tx mbufs)\n");
981 		tsec_detach(dev);
982 		return (ENXIO);
983 	}
984 
985 	/* Allocate a busdma tag for RX mbufs. */
986 	error = bus_dma_tag_create(NULL,	/* parent */
987 	    TSEC_RXBUFFER_ALIGNMENT, 0,		/* alignment, boundary */
988 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
989 	    BUS_SPACE_MAXADDR,			/* highaddr */
990 	    NULL, NULL,				/* filtfunc, filtfuncarg */
991 	    MCLBYTES,				/* maxsize */
992 	    1,					/* nsegments */
993 	    MCLBYTES, 0,				/* maxsegsz, flags */
994 	    NULL, NULL,			/* lockfunc, lockfuncarg */
995 	    &sc->tsec_rx_mtag);			/* dmat */
996 	if (error) {
997 		device_printf(dev, "failed to allocate busdma tag(rx mbufs)\n");
998 		tsec_detach(dev);
999 		return (ENXIO);
1000 	}
1001 
1002 	/* Create TX busdma maps */
1003 	map_ptr = sc->tx_map_data;
1004 	map_pptr = sc->tx_map_unused_data;
1005 
1006 	for (i = 0; i < TSEC_TX_NUM_DESC; i++) {
1007 		map_pptr[i] = &map_ptr[i];
1008 		error = bus_dmamap_create(sc->tsec_tx_mtag, 0,
1009 		    map_pptr[i]);
1010 		if (error) {
1011 			device_printf(dev, "failed to init TX ring\n");
1012 			tsec_detach(dev);
1013 			return (ENXIO);
1014 		}
1015 	}
1016 
1017 	/* Create RX busdma maps and zero mbuf handlers */
1018 	for (i = 0; i < TSEC_RX_NUM_DESC; i++) {
1019 		error = bus_dmamap_create(sc->tsec_rx_mtag, 0,
1020 		    &sc->rx_data[i].map);
1021 		if (error) {
1022 			device_printf(dev, "failed to init RX ring\n");
1023 			tsec_detach(dev);
1024 			return (ENXIO);
1025 		}
1026 		sc->rx_data[i].mbuf = NULL;
1027 	}
1028 
1029 	/* Create mbufs for RX buffers */
1030 	for (i = 0; i < TSEC_RX_NUM_DESC; i++) {
1031 		error = tsec_new_rxbuf(sc->tsec_rx_mtag, sc->rx_data[i].map,
1032 		    &sc->rx_data[i].mbuf, &sc->rx_data[i].paddr);
1033 		if (error) {
1034 			device_printf(dev, "can't load rx DMA map %d, error = "
1035 			    "%d\n", i, error);
1036 			tsec_detach(dev);
1037 			return (error);
1038 		}
1039 	}
1040 
1041 	/* Create network interface for upper layers */
1042 	ifp = sc->tsec_ifp = if_alloc(IFT_ETHER);
1043 	if (ifp == NULL) {
1044 		device_printf(dev, "if_alloc() failed\n");
1045 		tsec_detach(dev);
1046 		return (ENOMEM);
1047 	}
1048 
1049 	ifp->if_softc = sc;
1050 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1051 	ifp->if_mtu = ETHERMTU;
1052 	ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST;
1053 	ifp->if_init = tsec_init;
1054 	ifp->if_start = tsec_start;
1055 	ifp->if_ioctl = tsec_ioctl;
1056 
1057 	IFQ_SET_MAXLEN(&ifp->if_snd, TSEC_TX_NUM_DESC - 1);
1058 	ifp->if_snd.ifq_drv_maxlen = TSEC_TX_NUM_DESC - 1;
1059 	IFQ_SET_READY(&ifp->if_snd);
1060 
1061 	/* XXX No special features of TSEC are supported currently */
1062 	ifp->if_capabilities = 0;
1063 	ifp->if_capenable = ifp->if_capabilities;
1064 
1065 	/* Probe PHY(s) */
1066 	error = mii_phy_probe(dev, &sc->tsec_miibus, tsec_ifmedia_upd,
1067 	    tsec_ifmedia_sts);
1068 	if (error) {
1069 		device_printf(dev, "MII failed to find PHY!\n");
1070 		if_free(ifp);
1071 		sc->tsec_ifp = NULL;
1072 		tsec_detach(dev);
1073 		return (error);
1074 	}
1075 	sc->tsec_mii = device_get_softc(sc->tsec_miibus);
1076 
1077 	tsec_get_hwaddr(sc, hwaddr);
1078 	ether_ifattach(ifp, hwaddr);
1079 	callout_init(&sc->wd_callout, 0);
1080 
1081 	/* Interrupts configuration (TX/RX/ERR) */
1082 	sc->sc_transmit_irid = OCP_TSEC_RID_TXIRQ;
1083 	error = tsec_setup_intr(dev, &sc->sc_transmit_ires,
1084 	    &sc->sc_transmit_ihand, &sc->sc_transmit_irid,
1085 	    tsec_transmit_intr, "TX");
1086 	if (error) {
1087 		tsec_detach(dev);
1088 		return (error);
1089 	}
1090 
1091 	sc->sc_receive_irid = OCP_TSEC_RID_RXIRQ;
1092 	error = tsec_setup_intr(dev, &sc->sc_receive_ires,
1093 	    &sc->sc_receive_ihand, &sc->sc_receive_irid,
1094 	    tsec_receive_intr, "RX");
1095 	if (error) {
1096 		tsec_detach(dev);
1097 		return (error);
1098 	}
1099 
1100 	sc->sc_error_irid = OCP_TSEC_RID_ERRIRQ;
1101 	error = tsec_setup_intr(dev, &sc->sc_error_ires,
1102 	    &sc->sc_error_ihand, &sc->sc_error_irid,
1103 	    tsec_error_intr, "ERR");
1104 	if (error) {
1105 		tsec_detach(dev);
1106 		return (error);
1107 	}
1108 
1109 	return (0);
1110 }
1111 
1112 static int
1113 tsec_setup_intr(device_t dev, struct resource **ires, void **ihand, int *irid,
1114     driver_intr_t handler, const char *iname)
1115 {
1116 	struct tsec_softc *sc;
1117 	int error;
1118 
1119 	sc = device_get_softc(dev);
1120 
1121 	(*ires) = bus_alloc_resource_any(dev, SYS_RES_IRQ, irid, RF_ACTIVE);
1122 	if ((*ires) == NULL) {
1123 		device_printf(dev, "could not allocate %s IRQ\n", iname);
1124 		return (ENXIO);
1125 	}
1126 	error = bus_setup_intr(dev, *ires, INTR_TYPE_NET | INTR_MPSAFE,
1127 	    NULL, handler, sc, ihand);
1128 	if (error) {
1129 		device_printf(dev, "failed to set up %s IRQ\n", iname);
1130 		if (bus_release_resource(dev, SYS_RES_IRQ, *irid, *ires))
1131 			device_printf(dev, "could not release %s IRQ\n", iname);
1132 		(*ires) = NULL;
1133 		return (error);
1134 	}
1135 	return (0);
1136 }
1137 
1138 static void
1139 tsec_release_intr(device_t dev, struct resource *ires, void *ihand, int irid,
1140     const char *iname)
1141 {
1142 	int error;
1143 
1144 	if (ires == NULL)
1145 		return;
1146 
1147 	error = bus_teardown_intr(dev, ires, ihand);
1148 	if (error)
1149 		device_printf(dev, "bus_teardown_intr() failed for %s intr"
1150 		    ", error %d\n", iname, error);
1151 
1152 	error = bus_release_resource(dev, SYS_RES_IRQ, irid, ires);
1153 	if (error)
1154 		device_printf(dev, "bus_release_resource() failed for %s intr"
1155 		    ", error %d\n", iname, error);
1156 }
1157 
1158 static void
1159 tsec_free_dma(struct tsec_softc *sc)
1160 {
1161 	int i;
1162 
1163 	/* Free TX maps */
1164 	for (i = 0; i < TSEC_TX_NUM_DESC; i++)
1165 		if (sc->tx_map_data[i] != NULL)
1166 			bus_dmamap_destroy(sc->tsec_tx_mtag,
1167 			    sc->tx_map_data[i]);
1168 	/* Destroy tag for Tx mbufs */
1169 	bus_dma_tag_destroy(sc->tsec_tx_mtag);
1170 
1171 	/* Free RX mbufs and maps */
1172 	for (i = 0; i < TSEC_RX_NUM_DESC; i++) {
1173 		if (sc->rx_data[i].mbuf) {
1174 			/* Unload buffer from DMA */
1175 			bus_dmamap_sync(sc->tsec_rx_mtag, sc->rx_data[i].map,
1176 			    BUS_DMASYNC_POSTREAD);
1177 			bus_dmamap_unload(sc->tsec_rx_mtag, sc->rx_data[i].map);
1178 
1179 			/* Free buffer */
1180 			m_freem(sc->rx_data[i].mbuf);
1181 		}
1182 		/* Destroy map for this buffer */
1183 		if (sc->rx_data[i].map != NULL)
1184 			bus_dmamap_destroy(sc->tsec_rx_mtag,
1185 			    sc->rx_data[i].map);
1186 	}
1187 	/* Destroy tag for Rx mbufs */
1188 	bus_dma_tag_destroy(sc->tsec_rx_mtag);
1189 
1190 	/* Unload TX/RX descriptors */
1191 	tsec_free_dma_desc(sc->tsec_tx_dtag, sc->tsec_tx_dmap,
1192 	    sc->tsec_tx_vaddr);
1193 	tsec_free_dma_desc(sc->tsec_rx_dtag, sc->tsec_rx_dmap,
1194 	    sc->tsec_rx_vaddr);
1195 }
1196 
1197 static int
1198 tsec_detach(device_t dev)
1199 {
1200 	struct tsec_softc *sc;
1201 	int error;
1202 
1203 	sc = device_get_softc(dev);
1204 
1205 	/* Stop TSEC controller and free TX queue */
1206 	if (sc->sc_rres && sc->tsec_ifp)
1207 		tsec_shutdown(dev);
1208 
1209 	/* Wait for stopping TSEC ticks */
1210 	callout_drain(&sc->tsec_tick_ch);
1211 
1212 	/* Stop and release all interrupts */
1213 	tsec_release_intr(dev, sc->sc_transmit_ires, sc->sc_transmit_ihand,
1214 	    sc->sc_transmit_irid, "TX");
1215 	tsec_release_intr(dev, sc->sc_receive_ires, sc->sc_receive_ihand,
1216 	    sc->sc_receive_irid, "RX");
1217 	tsec_release_intr(dev, sc->sc_error_ires, sc->sc_error_ihand,
1218 	    sc->sc_error_irid, "ERR");
1219 
1220 	/* Detach network interface */
1221 	if (sc->tsec_ifp) {
1222 		ether_ifdetach(sc->tsec_ifp);
1223 		if_free(sc->tsec_ifp);
1224 		sc->tsec_ifp = NULL;
1225 	}
1226 
1227 	/* Free DMA resources */
1228 	tsec_free_dma(sc);
1229 
1230 	/* Free IO memory handler */
1231 	if (sc->sc_rres) {
1232 		error = bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid,
1233 		    sc->sc_rres);
1234 		if (error)
1235 			device_printf(dev, "bus_release_resource() failed for"
1236 			    " IO memory, error %d\n", error);
1237 	}
1238 
1239 	/* Destroy locks */
1240 	mtx_destroy(&sc->receive_lock);
1241 	mtx_destroy(&sc->transmit_lock);
1242 	return (0);
1243 }
1244 
1245 static void
1246 tsec_shutdown(device_t dev)
1247 {
1248 	struct tsec_softc *sc;
1249 
1250 	sc = device_get_softc(dev);
1251 
1252 	TSEC_GLOBAL_LOCK(sc);
1253 	tsec_stop(sc);
1254 	TSEC_GLOBAL_UNLOCK(sc);
1255 }
1256 
1257 static int
1258 tsec_suspend(device_t dev)
1259 {
1260 
1261 	/* TODO not implemented! */
1262 	return (ENODEV);
1263 }
1264 
1265 static int
1266 tsec_resume(device_t dev)
1267 {
1268 
1269 	/* TODO not implemented! */
1270 	return (ENODEV);
1271 }
1272 
1273 static void
1274 tsec_stop(struct tsec_softc *sc)
1275 {
1276 	struct ifnet *ifp;
1277 	struct mbuf *m0;
1278 	bus_dmamap_t *mapp;
1279 	uint32_t tmpval;
1280 
1281 	TSEC_GLOBAL_LOCK_ASSERT(sc);
1282 
1283 	ifp = sc->tsec_ifp;
1284 
1285 	/* Stop PHY tick engine */
1286 	callout_stop(&sc->tsec_tick_ch);
1287 
1288 	/* Disable interface and watchdog timer */
1289 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1290 	sc->wd_timer = 0;
1291 
1292 	/* Disable all interrupts and stop DMA */
1293 	tsec_intrs_ctl(sc, 0);
1294 	tsec_dma_ctl(sc, 0);
1295 
1296 	/* Remove pending data from TX queue */
1297 	while (!TSEC_EMPTYQ_TX_MBUF(sc)) {
1298 		m0 = TSEC_GET_TX_MBUF(sc);
1299 		mapp = TSEC_GET_TX_MAP(sc);
1300 
1301 		bus_dmamap_sync(sc->tsec_tx_mtag, *mapp, BUS_DMASYNC_POSTWRITE);
1302 		bus_dmamap_unload(sc->tsec_tx_mtag, *mapp);
1303 
1304 		TSEC_FREE_TX_MAP(sc, mapp);
1305 		m_freem(m0);
1306 	}
1307 
1308 	/* Disable Rx and Tx */
1309 	tmpval = TSEC_READ(sc, TSEC_REG_MACCFG1);
1310 	tmpval &= ~(TSEC_MACCFG1_RX_EN | TSEC_MACCFG1_TX_EN);
1311 	TSEC_WRITE(sc, TSEC_REG_MACCFG1, tmpval);
1312 	DELAY(10);
1313 }
1314 
1315 static void
1316 tsec_receive_intr(void *arg)
1317 {
1318 	struct mbuf *rcv_mbufs[TSEC_RX_NUM_DESC];
1319 	struct tsec_softc *sc = arg;
1320 	struct tsec_desc *rx_desc;
1321 	struct ifnet *ifp;
1322 	struct rx_data_type *rx_data;
1323 	struct mbuf *m;
1324 	device_t dev;
1325 	uint32_t i;
1326 	int count;
1327 	int c1 = 0;
1328 	int c2;
1329 	uint16_t flags;
1330 	uint16_t length;
1331 
1332 	ifp = sc->tsec_ifp;
1333 	rx_data = sc->rx_data;
1334 	dev = sc->dev;
1335 
1336 	/* Confirm the interrupt was received by driver */
1337 	TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_RXB | TSEC_IEVENT_RXF);
1338 
1339 	TSEC_RECEIVE_LOCK(sc);
1340 
1341 	bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap, BUS_DMASYNC_POSTREAD |
1342 	    BUS_DMASYNC_POSTWRITE);
1343 
1344 	for (count = 0; /* count < TSEC_RX_NUM_DESC */; count++) {
1345 		rx_desc = TSEC_GET_CUR_RX_DESC(sc);
1346 		flags = rx_desc->flags;
1347 
1348 		/* Check if there is anything to receive */
1349 		if ((flags & TSEC_RXBD_E) || (count >= TSEC_RX_NUM_DESC)) {
1350 			/*
1351 			 * Avoid generating another interrupt
1352 			 */
1353 			if (flags & TSEC_RXBD_E)
1354 				TSEC_WRITE(sc, TSEC_REG_IEVENT,
1355 				    TSEC_IEVENT_RXB | TSEC_IEVENT_RXF);
1356 			/*
1357 			 * We didn't consume current descriptor and have to
1358 			 * return it to the queue
1359 			 */
1360 			TSEC_BACK_CUR_RX_DESC(sc);
1361 			break;
1362 		}
1363 
1364 		if (flags & (TSEC_RXBD_LG | TSEC_RXBD_SH | TSEC_RXBD_NO |
1365 		    TSEC_RXBD_CR | TSEC_RXBD_OV | TSEC_RXBD_TR)) {
1366 			rx_desc->length = 0;
1367 			rx_desc->flags = (rx_desc->flags &
1368 			    ~TSEC_RXBD_ZEROONINIT) | TSEC_RXBD_E | TSEC_RXBD_I;
1369 			continue;
1370 		}
1371 
1372 		if ((flags & TSEC_RXBD_L) == 0)
1373 			device_printf(dev, "buf is not the last in frame!\n");
1374 
1375 		/* Ok... process frame */
1376 		length = rx_desc->length - ETHER_CRC_LEN;
1377 		i = TSEC_GET_CUR_RX_DESC_CNT(sc);
1378 
1379 		m = rx_data[i].mbuf;
1380 
1381 		if (tsec_new_rxbuf(sc->tsec_rx_mtag, rx_data[i].map,
1382 		    &rx_data[i].mbuf, &rx_data[i].paddr)) {
1383 			ifp->if_ierrors++;
1384 			continue;
1385 		}
1386 		/* Attach new buffer to descriptor, and clear flags */
1387 		rx_desc->bufptr = rx_data[i].paddr;
1388 		rx_desc->length = 0;
1389 		rx_desc->flags = (rx_desc->flags & ~TSEC_RXBD_ZEROONINIT) |
1390 		    TSEC_RXBD_E | TSEC_RXBD_I;
1391 
1392 		/* Prepare buffer for upper layers */
1393 		m->m_pkthdr.rcvif = ifp;
1394 		m->m_pkthdr.len = m->m_len = length;
1395 
1396 		/* Save it for push */
1397 		rcv_mbufs[c1++] = m;
1398 	}
1399 
1400 	bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap,
1401 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1402 
1403 	TSEC_RECEIVE_UNLOCK(sc);
1404 
1405 	/* Push it now */
1406 	for (c2 = 0; c2 < c1; c2++)
1407 		(*ifp->if_input)(ifp, rcv_mbufs[c2]);
1408 }
1409 
1410 static void
1411 tsec_transmit_intr(void *arg)
1412 {
1413 	struct tsec_softc *sc = arg;
1414 	struct tsec_desc *tx_desc;
1415 	struct ifnet *ifp;
1416 	struct mbuf *m0;
1417 	bus_dmamap_t *mapp;
1418 	int send = 0;
1419 
1420 	ifp = sc->tsec_ifp;
1421 
1422 	/* Confirm the interrupt was received by driver */
1423 	TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_TXB | TSEC_IEVENT_TXF);
1424 
1425 	TSEC_TRANSMIT_LOCK(sc);
1426 
1427 	/* Update collision statistics */
1428 	ifp->if_collisions += TSEC_READ(sc, TSEC_REG_MON_TNCL);
1429 
1430 	/* Reset collision counters in hardware */
1431 	TSEC_WRITE(sc, TSEC_REG_MON_TSCL, 0);
1432 	TSEC_WRITE(sc, TSEC_REG_MON_TMCL, 0);
1433 	TSEC_WRITE(sc, TSEC_REG_MON_TLCL, 0);
1434 	TSEC_WRITE(sc, TSEC_REG_MON_TXCL, 0);
1435 	TSEC_WRITE(sc, TSEC_REG_MON_TNCL, 0);
1436 
1437 	bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, BUS_DMASYNC_POSTREAD |
1438 	    BUS_DMASYNC_POSTWRITE);
1439 
1440 	while (TSEC_CUR_DIFF_DIRTY_TX_DESC(sc)) {
1441 		tx_desc = TSEC_GET_DIRTY_TX_DESC(sc);
1442 		if (tx_desc->flags & TSEC_TXBD_R) {
1443 			TSEC_BACK_DIRTY_TX_DESC(sc);
1444 			break;
1445 		}
1446 
1447 		if ((tx_desc->flags & TSEC_TXBD_L) == 0)
1448 			continue;
1449 
1450 		/*
1451 		 * This is the last buf in this packet, so unmap and free it.
1452 		 */
1453 		m0 = TSEC_GET_TX_MBUF(sc);
1454 		mapp = TSEC_GET_TX_MAP(sc);
1455 
1456 		bus_dmamap_sync(sc->tsec_tx_mtag, *mapp, BUS_DMASYNC_POSTWRITE);
1457 		bus_dmamap_unload(sc->tsec_tx_mtag, *mapp);
1458 
1459 		TSEC_FREE_TX_MAP(sc, mapp);
1460 		m_freem(m0);
1461 
1462 		ifp->if_opackets++;
1463 		send = 1;
1464 	}
1465 	bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap,
1466 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1467 
1468 	if (send) {
1469 		/* Now send anything that was pending */
1470 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1471 		tsec_start_locked(ifp);
1472 
1473 		/* Stop watchdog if all sent */
1474 		if (TSEC_EMPTYQ_TX_MBUF(sc))
1475 			sc->wd_timer = 0;
1476 	}
1477 	TSEC_TRANSMIT_UNLOCK(sc);
1478 }
1479 
1480 static void
1481 tsec_error_intr(void *arg)
1482 {
1483 	struct tsec_softc *sc = arg;
1484 	struct ifnet *ifp;
1485 	uint32_t eflags;
1486 
1487 	ifp = sc->tsec_ifp;
1488 
1489 	eflags = TSEC_READ(sc, TSEC_REG_IEVENT);
1490 
1491 	if (ifp->if_flags & IFF_DEBUG)
1492 		if_printf(ifp, "tsec_error_intr(): event flags: 0x%x\n", eflags);
1493 
1494 	/* Clear events bits in hardware */
1495 	TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_RXC | TSEC_IEVENT_BSY |
1496 	    TSEC_IEVENT_EBERR | TSEC_IEVENT_MSRO | TSEC_IEVENT_BABT |
1497 	    TSEC_IEVENT_TXC | TSEC_IEVENT_TXE | TSEC_IEVENT_LC |
1498 	    TSEC_IEVENT_CRL | TSEC_IEVENT_XFUN);
1499 
1500 	if (eflags & TSEC_IEVENT_EBERR)
1501 		if_printf(ifp, "System bus error occurred during"
1502 		    " a DMA transaction (flags: 0x%x)\n", eflags);
1503 
1504 	/* Check transmitter errors */
1505 	if (eflags & TSEC_IEVENT_TXE) {
1506 		ifp->if_oerrors++;
1507 
1508 		if (eflags & TSEC_IEVENT_LC)
1509 			ifp->if_collisions++;
1510 
1511 		TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT);
1512 	}
1513 	if (eflags & TSEC_IEVENT_BABT)
1514 		ifp->if_oerrors++;
1515 
1516 	/* Check receiver errors */
1517 	if (eflags & TSEC_IEVENT_BSY) {
1518 		ifp->if_ierrors++;
1519 		ifp->if_iqdrops++;
1520 
1521 		/* Get data from RX buffers */
1522 		tsec_receive_intr(arg);
1523 
1524 		/* Make receiver again active */
1525 		TSEC_WRITE(sc, TSEC_REG_RSTAT, TSEC_RSTAT_QHLT);
1526 	}
1527 	if (eflags & TSEC_IEVENT_BABR)
1528 		ifp->if_ierrors++;
1529 }
1530 
1531 static void
1532 tsec_tick(void *xsc)
1533 {
1534 	struct tsec_softc *sc = xsc;
1535 	struct ifnet *ifp;
1536 	int link;
1537 
1538 	TSEC_GLOBAL_LOCK(sc);
1539 
1540 	tsec_watchdog(sc);
1541 
1542 	ifp = sc->tsec_ifp;
1543 	link = sc->tsec_link;
1544 
1545 	mii_tick(sc->tsec_mii);
1546 
1547 	if (link == 0 && sc->tsec_link == 1 && (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)))
1548 		tsec_start_locked(ifp);
1549 
1550 	/* Schedule another timeout one second from now. */
1551 	callout_reset(&sc->wd_callout, hz, tsec_tick, sc);
1552 
1553 	TSEC_GLOBAL_UNLOCK(sc);
1554 }
1555 
1556 static int
1557 tsec_miibus_readreg(device_t dev, int phy, int reg)
1558 {
1559 	struct tsec_softc *sc;
1560 	uint32_t timeout;
1561 
1562 	sc = device_get_softc(dev);
1563 
1564 	if (device_get_unit(dev) != phy)
1565 		return (0);
1566 
1567 	sc = tsec0_sc;
1568 
1569 	TSEC_WRITE(sc, TSEC_REG_MIIMADD, (phy << 8) | reg);
1570 	TSEC_WRITE(sc, TSEC_REG_MIIMCOM, 0);
1571 	TSEC_WRITE(sc, TSEC_REG_MIIMCOM, TSEC_MIIMCOM_READCYCLE);
1572 
1573 	timeout = TSEC_READ_RETRY;
1574 	while (--timeout && TSEC_READ(sc, TSEC_REG_MIIMIND) &
1575 	    (TSEC_MIIMIND_NOTVALID | TSEC_MIIMIND_BUSY))
1576 		DELAY(TSEC_READ_DELAY);
1577 
1578 	if (timeout == 0)
1579 		device_printf(dev, "Timeout while reading from PHY!\n");
1580 
1581 	return (TSEC_READ(sc, TSEC_REG_MIIMSTAT));
1582 }
1583 
1584 static void
1585 tsec_miibus_writereg(device_t dev, int phy, int reg, int value)
1586 {
1587 	struct tsec_softc *sc;
1588 	uint32_t timeout;
1589 
1590 	sc = device_get_softc(dev);
1591 
1592 	if (device_get_unit(dev) != phy)
1593 		device_printf(dev, "Trying to write to an alien PHY(%d)\n", phy);
1594 
1595 	sc = tsec0_sc;
1596 
1597 	TSEC_WRITE(sc, TSEC_REG_MIIMADD, (phy << 8) | reg);
1598 	TSEC_WRITE(sc, TSEC_REG_MIIMCON, value);
1599 
1600 	timeout = TSEC_READ_RETRY;
1601 	while (--timeout && (TSEC_READ(sc, TSEC_REG_MIIMIND) & TSEC_MIIMIND_BUSY))
1602 		DELAY(TSEC_READ_DELAY);
1603 
1604 	if (timeout == 0)
1605 		device_printf(dev, "Timeout while writing to PHY!\n");
1606 }
1607 
1608 static void
1609 tsec_miibus_statchg(device_t dev)
1610 {
1611 	struct tsec_softc *sc;
1612 	struct mii_data *mii;
1613 	uint32_t ecntrl, id, tmp;
1614 	int link;
1615 
1616 	sc = device_get_softc(dev);
1617 	mii = sc->tsec_mii;
1618 	link = ((mii->mii_media_status & IFM_ACTIVE) ? 1 : 0);
1619 
1620 	tmp = TSEC_READ(sc, TSEC_REG_MACCFG2) & ~TSEC_MACCFG2_IF;
1621 
1622 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
1623 		tmp |= TSEC_MACCFG2_FULLDUPLEX;
1624 	else
1625 		tmp &= ~TSEC_MACCFG2_FULLDUPLEX;
1626 
1627 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1628 	case IFM_1000_T:
1629 	case IFM_1000_SX:
1630 		tmp |= TSEC_MACCFG2_GMII;
1631 		sc->tsec_link = link;
1632 		break;
1633 	case IFM_100_TX:
1634 	case IFM_10_T:
1635 		tmp |= TSEC_MACCFG2_MII;
1636 		sc->tsec_link = link;
1637 		break;
1638 	case IFM_NONE:
1639 		if (link)
1640 			device_printf(dev, "No speed selected but link active!\n");
1641 		sc->tsec_link = 0;
1642 		return;
1643 	default:
1644 		sc->tsec_link = 0;
1645 		device_printf(dev, "Unknown speed (%d), link %s!\n",
1646 		    IFM_SUBTYPE(mii->mii_media_active),
1647 		    ((link) ? "up" : "down"));
1648 		return;
1649 	}
1650 	TSEC_WRITE(sc, TSEC_REG_MACCFG2, tmp);
1651 
1652 	/* XXX kludge - use circumstantial evidence for reduced mode. */
1653 	id = TSEC_READ(sc, TSEC_REG_ID2);
1654 	if (id & 0xffff) {
1655 		ecntrl = TSEC_READ(sc, TSEC_REG_ECNTRL) & ~TSEC_ECNTRL_R100M;
1656 		ecntrl |= (tmp & TSEC_MACCFG2_MII) ? TSEC_ECNTRL_R100M : 0;
1657 		TSEC_WRITE(sc, TSEC_REG_ECNTRL, ecntrl);
1658 	}
1659 }
1660