xref: /freebsd/sys/dev/tsec/if_tsec.c (revision 4a5216a6dc0c3ce4cf5f2d3ee8af0c3ff3402c4f)
1 /*-
2  * Copyright (C) 2007-2008 Semihalf, Rafal Jaworowski <raj@semihalf.com>
3  * Copyright (C) 2006-2007 Semihalf, Piotr Kruszynski <ppk@semihalf.com>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
18  * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
20  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 /*
28  * Freescale integrated Three-Speed Ethernet Controller (TSEC) driver.
29  */
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/bus.h>
36 #include <sys/endian.h>
37 #include <sys/mbuf.h>
38 #include <sys/kernel.h>
39 #include <sys/module.h>
40 #include <sys/socket.h>
41 #include <sys/sockio.h>
42 #include <sys/sysctl.h>
43 
44 #include <net/bpf.h>
45 #include <net/ethernet.h>
46 #include <net/if.h>
47 #include <net/if_arp.h>
48 #include <net/if_dl.h>
49 #include <net/if_media.h>
50 #include <net/if_types.h>
51 #include <net/if_vlan_var.h>
52 
53 #include <machine/bus.h>
54 
55 #include <dev/mii/mii.h>
56 #include <dev/mii/miivar.h>
57 
58 #include <dev/tsec/if_tsec.h>
59 #include <dev/tsec/if_tsecreg.h>
60 
61 static int	tsec_alloc_dma_desc(device_t dev, bus_dma_tag_t *dtag,
62     bus_dmamap_t *dmap, bus_size_t dsize, void **vaddr, void *raddr,
63     const char *dname);
64 static void	tsec_dma_ctl(struct tsec_softc *sc, int state);
65 static int	tsec_encap(struct tsec_softc *sc, struct mbuf *m_head);
66 static void	tsec_free_dma(struct tsec_softc *sc);
67 static void	tsec_free_dma_desc(bus_dma_tag_t dtag, bus_dmamap_t dmap, void *vaddr);
68 static int	tsec_ifmedia_upd(struct ifnet *ifp);
69 static void	tsec_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
70 static int	tsec_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
71     struct mbuf **mbufp, uint32_t *paddr);
72 static void	tsec_map_dma_addr(void *arg, bus_dma_segment_t *segs,
73     int nseg, int error);
74 static void	tsec_intrs_ctl(struct tsec_softc *sc, int state);
75 static void	tsec_init(void *xsc);
76 static void	tsec_init_locked(struct tsec_softc *sc);
77 static int	tsec_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
78 static void	tsec_reset_mac(struct tsec_softc *sc);
79 static void	tsec_setfilter(struct tsec_softc *sc);
80 static void	tsec_set_mac_address(struct tsec_softc *sc);
81 static void	tsec_start(struct ifnet *ifp);
82 static void	tsec_start_locked(struct ifnet *ifp);
83 static void	tsec_stop(struct tsec_softc *sc);
84 static void	tsec_tick(void *arg);
85 static void	tsec_watchdog(struct tsec_softc *sc);
86 
87 struct tsec_softc *tsec0_sc = NULL; /* XXX ugly hack! */
88 
89 devclass_t tsec_devclass;
90 DRIVER_MODULE(miibus, tsec, miibus_driver, miibus_devclass, 0, 0);
91 MODULE_DEPEND(tsec, ether, 1, 1, 1);
92 MODULE_DEPEND(tsec, miibus, 1, 1, 1);
93 
94 int
95 tsec_attach(struct tsec_softc *sc)
96 {
97 	uint8_t hwaddr[ETHER_ADDR_LEN];
98 	struct ifnet *ifp;
99 	bus_dmamap_t *map_ptr;
100 	bus_dmamap_t **map_pptr;
101 	int error = 0;
102 	int i;
103 
104 	/* Reset all TSEC counters */
105 	TSEC_TX_RX_COUNTERS_INIT(sc);
106 
107 	/* Stop DMA engine if enabled by firmware */
108 	tsec_dma_ctl(sc, 0);
109 
110 	/* Reset MAC */
111 	tsec_reset_mac(sc);
112 
113 	/* Disable interrupts for now */
114 	tsec_intrs_ctl(sc, 0);
115 
116 	/* Allocate a busdma tag and DMA safe memory for TX descriptors. */
117 	error = tsec_alloc_dma_desc(sc->dev, &sc->tsec_tx_dtag, &sc->tsec_tx_dmap,
118 	    sizeof(*sc->tsec_tx_vaddr) * TSEC_TX_NUM_DESC,
119 	    (void **)&sc->tsec_tx_vaddr, &sc->tsec_tx_raddr, "TX");
120 	if (error) {
121 		tsec_detach(sc);
122 		return (ENXIO);
123 	}
124 
125 	/* Allocate a busdma tag and DMA safe memory for RX descriptors. */
126 	error = tsec_alloc_dma_desc(sc->dev, &sc->tsec_rx_dtag, &sc->tsec_rx_dmap,
127 	    sizeof(*sc->tsec_rx_vaddr) * TSEC_RX_NUM_DESC,
128 	    (void **)&sc->tsec_rx_vaddr, &sc->tsec_rx_raddr, "RX");
129 	if (error) {
130 		tsec_detach(sc);
131 		return (ENXIO);
132 	}
133 
134 	/* Allocate a busdma tag for TX mbufs. */
135 	error = bus_dma_tag_create(NULL,	/* parent */
136 		TSEC_TXBUFFER_ALIGNMENT, 0,	/* alignment, boundary */
137 		BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
138 		BUS_SPACE_MAXADDR,		/* highaddr */
139 		NULL, NULL,			/* filtfunc, filtfuncarg */
140 		MCLBYTES * (TSEC_TX_NUM_DESC - 1),/* maxsize */
141 		TSEC_TX_NUM_DESC - 1,		/* nsegments */
142 		MCLBYTES, 0,			/* maxsegsz, flags */
143 		NULL, NULL,			/* lockfunc, lockfuncarg */
144 		&sc->tsec_tx_mtag);		/* dmat */
145 	if (error) {
146 		device_printf(sc->dev, "failed to allocate busdma tag(tx mbufs)\n");
147 		tsec_detach(sc);
148 		return (ENXIO);
149 	}
150 
151 	/* Allocate a busdma tag for RX mbufs. */
152 	error = bus_dma_tag_create(NULL,	/* parent */
153 		TSEC_RXBUFFER_ALIGNMENT, 0,	/* alignment, boundary */
154 		BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
155 		BUS_SPACE_MAXADDR,		/* highaddr */
156 		NULL, NULL,			/* filtfunc, filtfuncarg */
157 		MCLBYTES,			/* maxsize */
158 		1,				/* nsegments */
159 		MCLBYTES, 0,			/* maxsegsz, flags */
160 		NULL, NULL,			/* lockfunc, lockfuncarg */
161 		&sc->tsec_rx_mtag);		/* dmat */
162 	if (error) {
163 		device_printf(sc->dev, "failed to allocate busdma tag(rx mbufs)\n");
164 		tsec_detach(sc);
165 		return (ENXIO);
166 	}
167 
168 	/* Create TX busdma maps */
169 	map_ptr = sc->tx_map_data;
170 	map_pptr = sc->tx_map_unused_data;
171 
172 	for (i = 0; i < TSEC_TX_NUM_DESC; i++) {
173 		map_pptr[i] = &map_ptr[i];
174 		error = bus_dmamap_create(sc->tsec_tx_mtag, 0, map_pptr[i]);
175 		if (error) {
176 			device_printf(sc->dev, "failed to init TX ring\n");
177 			tsec_detach(sc);
178 			return (ENXIO);
179 		}
180 	}
181 
182 	/* Create RX busdma maps and zero mbuf handlers */
183 	for (i = 0; i < TSEC_RX_NUM_DESC; i++) {
184 		error = bus_dmamap_create(sc->tsec_rx_mtag, 0, &sc->rx_data[i].map);
185 		if (error) {
186 			device_printf(sc->dev, "failed to init RX ring\n");
187 			tsec_detach(sc);
188 			return (ENXIO);
189 		}
190 		sc->rx_data[i].mbuf = NULL;
191 	}
192 
193 	/* Create mbufs for RX buffers */
194 	for (i = 0; i < TSEC_RX_NUM_DESC; i++) {
195 		error = tsec_new_rxbuf(sc->tsec_rx_mtag, sc->rx_data[i].map,
196 		    &sc->rx_data[i].mbuf, &sc->rx_data[i].paddr);
197 		if (error) {
198 			device_printf(sc->dev, "can't load rx DMA map %d, error = "
199 			    "%d\n", i, error);
200 			tsec_detach(sc);
201 			return (error);
202 		}
203 	}
204 
205 	/* Create network interface for upper layers */
206 	ifp = sc->tsec_ifp = if_alloc(IFT_ETHER);
207 	if (ifp == NULL) {
208 		device_printf(sc->dev, "if_alloc() failed\n");
209 		tsec_detach(sc);
210 		return (ENOMEM);
211 	}
212 
213 	ifp->if_softc = sc;
214 	if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
215 	ifp->if_mtu = ETHERMTU;
216 	ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST;
217 	ifp->if_init = tsec_init;
218 	ifp->if_start = tsec_start;
219 	ifp->if_ioctl = tsec_ioctl;
220 
221 	IFQ_SET_MAXLEN(&ifp->if_snd, TSEC_TX_NUM_DESC - 1);
222 	ifp->if_snd.ifq_drv_maxlen = TSEC_TX_NUM_DESC - 1;
223 	IFQ_SET_READY(&ifp->if_snd);
224 
225 	/* XXX No special features of TSEC are supported currently */
226 	ifp->if_capabilities = 0;
227 	ifp->if_capenable = ifp->if_capabilities;
228 
229 	/* Probe PHY(s) */
230 	error = mii_phy_probe(sc->dev, &sc->tsec_miibus, tsec_ifmedia_upd,
231 	    tsec_ifmedia_sts);
232 	if (error) {
233 		device_printf(sc->dev, "MII failed to find PHY!\n");
234 		if_free(ifp);
235 		sc->tsec_ifp = NULL;
236 		tsec_detach(sc);
237 		return (error);
238 	}
239 	sc->tsec_mii = device_get_softc(sc->tsec_miibus);
240 
241 	/* Set MAC address */
242 	tsec_get_hwaddr(sc, hwaddr);
243 	ether_ifattach(ifp, hwaddr);
244 
245 	return (0);
246 }
247 
248 int
249 tsec_detach(struct tsec_softc *sc)
250 {
251 
252 	/* Stop TSEC controller and free TX queue */
253 	if (sc->sc_rres && sc->tsec_ifp)
254 		tsec_shutdown(sc->dev);
255 
256 	/* Detach network interface */
257 	if (sc->tsec_ifp) {
258 		ether_ifdetach(sc->tsec_ifp);
259 		if_free(sc->tsec_ifp);
260 		sc->tsec_ifp = NULL;
261 	}
262 
263 	/* Free DMA resources */
264 	tsec_free_dma(sc);
265 
266 	return (0);
267 }
268 
269 void
270 tsec_shutdown(device_t dev)
271 {
272 	struct tsec_softc *sc;
273 
274 	sc = device_get_softc(dev);
275 
276 	TSEC_GLOBAL_LOCK(sc);
277 	tsec_stop(sc);
278 	TSEC_GLOBAL_UNLOCK(sc);
279 }
280 
281 int
282 tsec_suspend(device_t dev)
283 {
284 
285 	/* TODO not implemented! */
286 	return (0);
287 }
288 
289 int
290 tsec_resume(device_t dev)
291 {
292 
293 	/* TODO not implemented! */
294 	return (0);
295 }
296 
297 static void
298 tsec_init(void *xsc)
299 {
300 	struct tsec_softc *sc = xsc;
301 
302 	TSEC_GLOBAL_LOCK(sc);
303 	tsec_init_locked(sc);
304 	TSEC_GLOBAL_UNLOCK(sc);
305 }
306 
307 static void
308 tsec_init_locked(struct tsec_softc *sc)
309 {
310 	struct tsec_desc *tx_desc = sc->tsec_tx_vaddr;
311 	struct tsec_desc *rx_desc = sc->tsec_rx_vaddr;
312 	struct ifnet *ifp = sc->tsec_ifp;
313 	uint32_t timeout;
314 	uint32_t val;
315 	uint32_t i;
316 
317 	TSEC_GLOBAL_LOCK_ASSERT(sc);
318 	tsec_stop(sc);
319 
320 	/*
321 	 * These steps are according to the MPC8555E PowerQUICCIII RM:
322 	 * 14.7 Initialization/Application Information
323 	 */
324 
325 	/* Step 1: soft reset MAC */
326 	tsec_reset_mac(sc);
327 
328 	/* Step 2: Initialize MACCFG2 */
329 	TSEC_WRITE(sc, TSEC_REG_MACCFG2,
330 	    TSEC_MACCFG2_FULLDUPLEX |	/* Full Duplex = 1 */
331 	    TSEC_MACCFG2_PADCRC |	/* PAD/CRC append */
332 	    TSEC_MACCFG2_GMII |		/* I/F Mode bit */
333 	    TSEC_MACCFG2_PRECNT		/* Preamble count = 7 */
334 	);
335 
336 	/* Step 3: Initialize ECNTRL
337 	 * While the documentation states that R100M is ignored if RPM is
338 	 * not set, it does seem to be needed to get the orange boxes to
339 	 * work (which have a Marvell 88E1111 PHY). Go figure.
340 	 */
341 
342 	/*
343 	 * XXX kludge - use circumstancial evidence to program ECNTRL
344 	 * correctly. Ideally we need some board information to guide
345 	 * us here.
346 	 */
347 	i = TSEC_READ(sc, TSEC_REG_ID2);
348 	val = (i & 0xffff)
349 	    ? (TSEC_ECNTRL_TBIM | TSEC_ECNTRL_SGMIIM)	/* Sumatra */
350 	    : TSEC_ECNTRL_R100M;			/* Orange + CDS */
351 	TSEC_WRITE(sc, TSEC_REG_ECNTRL, TSEC_ECNTRL_STEN | val);
352 
353 	/* Step 4: Initialize MAC station address */
354 	tsec_set_mac_address(sc);
355 
356 	/*
357 	 * Step 5: Assign a Physical address to the TBI so as to not conflict
358 	 * with the external PHY physical address
359 	 */
360 	TSEC_WRITE(sc, TSEC_REG_TBIPA, 5);
361 
362 	/* Step 6: Reset the management interface */
363 	TSEC_WRITE(tsec0_sc, TSEC_REG_MIIMCFG, TSEC_MIIMCFG_RESETMGMT);
364 
365 	/* Step 7: Setup the MII Mgmt clock speed */
366 	TSEC_WRITE(tsec0_sc, TSEC_REG_MIIMCFG, TSEC_MIIMCFG_CLKDIV28);
367 
368 	/* Step 8: Read MII Mgmt indicator register and check for Busy = 0 */
369 	timeout = TSEC_READ_RETRY;
370 	while (--timeout && (TSEC_READ(tsec0_sc, TSEC_REG_MIIMIND) &
371 	    TSEC_MIIMIND_BUSY))
372 		DELAY(TSEC_READ_DELAY);
373 	if (timeout == 0) {
374 		if_printf(ifp, "tsec_init_locked(): Mgmt busy timeout\n");
375 		return;
376 	}
377 
378 	/* Step 9: Setup the MII Mgmt */
379 	mii_mediachg(sc->tsec_mii);
380 
381 	/* Step 10: Clear IEVENT register */
382 	TSEC_WRITE(sc, TSEC_REG_IEVENT, 0xffffffff);
383 
384 	/* Step 11: Initialize IMASK */
385 	tsec_intrs_ctl(sc, 1);
386 
387 	/* Step 12: Initialize IADDRn */
388 	TSEC_WRITE(sc, TSEC_REG_IADDR0, 0);
389 	TSEC_WRITE(sc, TSEC_REG_IADDR1, 0);
390 	TSEC_WRITE(sc, TSEC_REG_IADDR2, 0);
391 	TSEC_WRITE(sc, TSEC_REG_IADDR3, 0);
392 	TSEC_WRITE(sc, TSEC_REG_IADDR4, 0);
393 	TSEC_WRITE(sc, TSEC_REG_IADDR5, 0);
394 	TSEC_WRITE(sc, TSEC_REG_IADDR6, 0);
395 	TSEC_WRITE(sc, TSEC_REG_IADDR7, 0);
396 
397 	/* Step 13: Initialize GADDRn */
398 	TSEC_WRITE(sc, TSEC_REG_GADDR0, 0);
399 	TSEC_WRITE(sc, TSEC_REG_GADDR1, 0);
400 	TSEC_WRITE(sc, TSEC_REG_GADDR2, 0);
401 	TSEC_WRITE(sc, TSEC_REG_GADDR3, 0);
402 	TSEC_WRITE(sc, TSEC_REG_GADDR4, 0);
403 	TSEC_WRITE(sc, TSEC_REG_GADDR5, 0);
404 	TSEC_WRITE(sc, TSEC_REG_GADDR6, 0);
405 	TSEC_WRITE(sc, TSEC_REG_GADDR7, 0);
406 
407 	/* Step 14: Initialize RCTRL */
408 	TSEC_WRITE(sc, TSEC_REG_RCTRL, 0);
409 
410 	/* Step 15: Initialize DMACTRL */
411 	tsec_dma_ctl(sc, 1);
412 
413 	/* Step 16: Initialize FIFO_PAUSE_CTRL */
414 	TSEC_WRITE(sc, TSEC_REG_FIFO_PAUSE_CTRL, TSEC_FIFO_PAUSE_CTRL_EN);
415 
416 	/*
417 	 * Step 17: Initialize transmit/receive descriptor rings.
418 	 * Initialize TBASE and RBASE.
419 	 */
420 	TSEC_WRITE(sc, TSEC_REG_TBASE, sc->tsec_tx_raddr);
421 	TSEC_WRITE(sc, TSEC_REG_RBASE, sc->tsec_rx_raddr);
422 
423 	for (i = 0; i < TSEC_TX_NUM_DESC; i++) {
424 		tx_desc[i].bufptr = 0;
425 		tx_desc[i].length = 0;
426 		tx_desc[i].flags = ((i == TSEC_TX_NUM_DESC - 1) ? TSEC_TXBD_W : 0);
427 	}
428 	bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap,
429 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
430 
431 	for (i = 0; i < TSEC_RX_NUM_DESC; i++) {
432 		rx_desc[i].bufptr = sc->rx_data[i].paddr;
433 		rx_desc[i].length = 0;
434 		rx_desc[i].flags = TSEC_RXBD_E | TSEC_RXBD_I |
435 		    ((i == TSEC_RX_NUM_DESC - 1) ? TSEC_RXBD_W : 0);
436 	}
437 	bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap, BUS_DMASYNC_PREREAD |
438 	    BUS_DMASYNC_PREWRITE);
439 
440 	/* Step 18: Initialize the maximum and minimum receive buffer length */
441 	TSEC_WRITE(sc, TSEC_REG_MRBLR, TSEC_DEFAULT_MAX_RX_BUFFER_SIZE);
442 	TSEC_WRITE(sc, TSEC_REG_MINFLR, TSEC_DEFAULT_MIN_RX_BUFFER_SIZE);
443 
444 	/* Step 19: Enable Rx and RxBD sdata snooping */
445 	TSEC_WRITE(sc, TSEC_REG_ATTR, TSEC_ATTR_RDSEN | TSEC_ATTR_RBDSEN);
446 	TSEC_WRITE(sc, TSEC_REG_ATTRELI, 0);
447 
448 	/* Step 20: Reset collision counters in hardware */
449 	TSEC_WRITE(sc, TSEC_REG_MON_TSCL, 0);
450 	TSEC_WRITE(sc, TSEC_REG_MON_TMCL, 0);
451 	TSEC_WRITE(sc, TSEC_REG_MON_TLCL, 0);
452 	TSEC_WRITE(sc, TSEC_REG_MON_TXCL, 0);
453 	TSEC_WRITE(sc, TSEC_REG_MON_TNCL, 0);
454 
455 	/* Step 21: Mask all CAM interrupts */
456 	TSEC_WRITE(sc, TSEC_REG_MON_CAM1, 0xffffffff);
457 	TSEC_WRITE(sc, TSEC_REG_MON_CAM2, 0xffffffff);
458 
459 	/* Step 22: Enable Rx and Tx */
460 	val = TSEC_READ(sc, TSEC_REG_MACCFG1);
461 	val |= (TSEC_MACCFG1_RX_EN | TSEC_MACCFG1_TX_EN);
462 	TSEC_WRITE(sc, TSEC_REG_MACCFG1, val);
463 
464 	/* Step 23: Reset TSEC counters for Tx and Rx rings */
465 	TSEC_TX_RX_COUNTERS_INIT(sc);
466 
467 	/* Step 24: Activate network interface */
468 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
469 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
470 	sc->tsec_if_flags = ifp->if_flags;
471 	sc->tsec_watchdog = 0;
472 
473 	/* Schedule watchdog timeout */
474 	callout_reset(&sc->tsec_callout, hz, tsec_tick, sc);
475 }
476 
477 static void
478 tsec_set_mac_address(struct tsec_softc *sc)
479 {
480 	uint32_t macbuf[2] = { 0, 0 };
481 	char *macbufp;
482 	char *curmac;
483 	int i;
484 
485 	TSEC_GLOBAL_LOCK_ASSERT(sc);
486 
487 	KASSERT((ETHER_ADDR_LEN <= sizeof(macbuf)),
488 	    ("tsec_set_mac_address: (%d <= %d", ETHER_ADDR_LEN, sizeof(macbuf)));
489 
490 	macbufp = (char *)macbuf;
491 	curmac = (char *)IF_LLADDR(sc->tsec_ifp);
492 
493 	/* Correct order of MAC address bytes */
494 	for (i = 1; i <= ETHER_ADDR_LEN; i++)
495 		macbufp[ETHER_ADDR_LEN-i] = curmac[i-1];
496 
497 	/* Initialize MAC station address MACSTNADDR2 and MACSTNADDR1 */
498 	TSEC_WRITE(sc, TSEC_REG_MACSTNADDR2, macbuf[1]);
499 	TSEC_WRITE(sc, TSEC_REG_MACSTNADDR1, macbuf[0]);
500 }
501 
502 /*
503  * DMA control function, if argument state is:
504  * 0 - DMA engine will be disabled
505  * 1 - DMA engine will be enabled
506  */
507 static void
508 tsec_dma_ctl(struct tsec_softc *sc, int state)
509 {
510 	device_t dev;
511 	uint32_t dma_flags;
512 	uint32_t timeout;
513 
514 	dev = sc->dev;
515 
516 	dma_flags = TSEC_READ(sc, TSEC_REG_DMACTRL);
517 
518 	switch (state) {
519 	case 0:
520 		/* Temporarily clear stop graceful stop bits. */
521 		tsec_dma_ctl(sc, 1000);
522 
523 		/* Set it again */
524 		dma_flags |= (TSEC_DMACTRL_GRS | TSEC_DMACTRL_GTS);
525 		break;
526 	case 1000:
527 	case 1:
528 		/* Set write with response (WWR), wait (WOP) and snoop bits */
529 		dma_flags |= (TSEC_DMACTRL_TDSEN | TSEC_DMACTRL_TBDSEN |
530 		    DMACTRL_WWR | DMACTRL_WOP);
531 
532 		/* Clear graceful stop bits */
533 		dma_flags &= ~(TSEC_DMACTRL_GRS | TSEC_DMACTRL_GTS);
534 		break;
535 	default:
536 		device_printf(dev, "tsec_dma_ctl(): unknown state value: %d\n",
537 		    state);
538 	}
539 
540 	TSEC_WRITE(sc, TSEC_REG_DMACTRL, dma_flags);
541 
542 	switch (state) {
543 	case 0:
544 		/* Wait for DMA stop */
545 		timeout = TSEC_READ_RETRY;
546 		while (--timeout && (!(TSEC_READ(sc, TSEC_REG_IEVENT) &
547 		    (TSEC_IEVENT_GRSC | TSEC_IEVENT_GTSC))))
548 			DELAY(TSEC_READ_DELAY);
549 
550 		if (timeout == 0)
551 			device_printf(dev, "tsec_dma_ctl(): timeout!\n");
552 		break;
553 	case 1:
554 		/* Restart transmission function */
555 		TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT);
556 	}
557 }
558 
559 /*
560  * Interrupts control function, if argument state is:
561  * 0 - all TSEC interrupts will be masked
562  * 1 - all TSEC interrupts will be unmasked
563  */
564 static void
565 tsec_intrs_ctl(struct tsec_softc *sc, int state)
566 {
567 	device_t dev;
568 
569 	dev = sc->dev;
570 
571 	switch (state) {
572 	case 0:
573 		TSEC_WRITE(sc, TSEC_REG_IMASK, 0);
574 		break;
575 	case 1:
576 		TSEC_WRITE(sc, TSEC_REG_IMASK, TSEC_IMASK_BREN | TSEC_IMASK_RXCEN |
577 		    TSEC_IMASK_BSYEN | TSEC_IMASK_EBERREN | TSEC_IMASK_BTEN |
578 		    TSEC_IMASK_TXEEN | TSEC_IMASK_TXBEN | TSEC_IMASK_TXFEN |
579 		    TSEC_IMASK_XFUNEN | TSEC_IMASK_RXFEN);
580 		break;
581 	default:
582 		device_printf(dev, "tsec_intrs_ctl(): unknown state value: %d\n",
583 		    state);
584 	}
585 }
586 
587 static void
588 tsec_reset_mac(struct tsec_softc *sc)
589 {
590 	uint32_t maccfg1_flags;
591 
592 	/* Set soft reset bit */
593 	maccfg1_flags = TSEC_READ(sc, TSEC_REG_MACCFG1);
594 	maccfg1_flags |= TSEC_MACCFG1_SOFT_RESET;
595 	TSEC_WRITE(sc, TSEC_REG_MACCFG1, maccfg1_flags);
596 
597 	/* Clear soft reset bit */
598 	maccfg1_flags = TSEC_READ(sc, TSEC_REG_MACCFG1);
599 	maccfg1_flags &= ~TSEC_MACCFG1_SOFT_RESET;
600 	TSEC_WRITE(sc, TSEC_REG_MACCFG1, maccfg1_flags);
601 }
602 
603 static void
604 tsec_watchdog(struct tsec_softc *sc)
605 {
606 	struct ifnet *ifp;
607 
608 	TSEC_GLOBAL_LOCK_ASSERT(sc);
609 
610 	if (sc->tsec_watchdog == 0 || --sc->tsec_watchdog > 0)
611 		return;
612 
613 	ifp = sc->tsec_ifp;
614 	ifp->if_oerrors++;
615 	if_printf(ifp, "watchdog timeout\n");
616 
617 	tsec_stop(sc);
618 	tsec_init_locked(sc);
619 }
620 
621 static void
622 tsec_start(struct ifnet *ifp)
623 {
624 	struct tsec_softc *sc = ifp->if_softc;
625 
626 	TSEC_TRANSMIT_LOCK(sc);
627 	tsec_start_locked(ifp);
628 	TSEC_TRANSMIT_UNLOCK(sc);
629 }
630 
631 static void
632 tsec_start_locked(struct ifnet *ifp)
633 {
634 	struct tsec_softc *sc;
635 	struct mbuf *m0;
636 	struct mbuf *mtmp;
637 	unsigned int queued = 0;
638 
639 	sc = ifp->if_softc;
640 
641 	TSEC_TRANSMIT_LOCK_ASSERT(sc);
642 
643 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
644 	    IFF_DRV_RUNNING)
645 		return;
646 
647 	if (sc->tsec_link == 0)
648 		return;
649 
650 	bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, BUS_DMASYNC_POSTREAD |
651 	    BUS_DMASYNC_POSTWRITE);
652 
653 	for (;;) {
654 		/* Get packet from the queue */
655 		IF_DEQUEUE(&ifp->if_snd, m0);
656 		if (m0 == NULL)
657 			break;
658 
659 		mtmp = m_defrag(m0, M_DONTWAIT);
660 		if (mtmp)
661 			m0 = mtmp;
662 
663 		if (tsec_encap(sc, m0)) {
664 			IF_PREPEND(&ifp->if_snd, m0);
665 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
666 			break;
667 		}
668 		queued++;
669 		BPF_MTAP(ifp, m0);
670 	}
671 	bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, BUS_DMASYNC_PREREAD |
672 	    BUS_DMASYNC_PREWRITE);
673 
674 	if (queued) {
675 		/* Enable transmitter and watchdog timer */
676 		TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT);
677 		sc->tsec_watchdog = 5;
678 	}
679 }
680 
681 static int
682 tsec_encap(struct tsec_softc *sc, struct mbuf *m0)
683 {
684 	struct tsec_desc *tx_desc = NULL;
685 	struct ifnet *ifp;
686 	bus_dma_segment_t segs[TSEC_TX_NUM_DESC];
687 	bus_dmamap_t *mapp;
688 	int error;
689 	int seg, nsegs;
690 
691 	TSEC_TRANSMIT_LOCK_ASSERT(sc);
692 
693 	ifp = sc->tsec_ifp;
694 
695 	if (TSEC_FREE_TX_DESC(sc) == 0) {
696 		/* No free descriptors */
697 		return (-1);
698 	}
699 
700 	/* Fetch unused map */
701 	mapp = TSEC_ALLOC_TX_MAP(sc);
702 
703 	/* Create mapping in DMA memory */
704 	error = bus_dmamap_load_mbuf_sg(sc->tsec_tx_mtag,
705 	   *mapp, m0, segs, &nsegs, BUS_DMA_NOWAIT);
706 	if (error != 0 || nsegs > TSEC_FREE_TX_DESC(sc) || nsegs <= 0) {
707 		bus_dmamap_unload(sc->tsec_tx_mtag, *mapp);
708 		TSEC_FREE_TX_MAP(sc, mapp);
709 		return ((error != 0) ? error : -1);
710 	}
711 	bus_dmamap_sync(sc->tsec_tx_mtag, *mapp, BUS_DMASYNC_PREWRITE);
712 
713 	if ((ifp->if_flags & IFF_DEBUG) && (nsegs > 1))
714 		if_printf(ifp, "TX buffer has %d segments\n", nsegs);
715 
716 	/* Everything is ok, now we can send buffers */
717 	for (seg = 0; seg < nsegs; seg++) {
718 		tx_desc = TSEC_GET_CUR_TX_DESC(sc);
719 
720 		tx_desc->length = segs[seg].ds_len;
721 		tx_desc->bufptr = segs[seg].ds_addr;
722 
723 		tx_desc->flags =
724 		    (tx_desc->flags & TSEC_TXBD_W) | /* wrap */
725 		    TSEC_TXBD_I |		/* interrupt */
726 		    TSEC_TXBD_R |		/* ready to send */
727 		    TSEC_TXBD_TC |		/* transmit the CRC sequence
728 						 * after the last data byte */
729 		    ((seg == nsegs-1) ? TSEC_TXBD_L : 0);/* last in frame */
730 	}
731 
732 	/* Save mbuf and DMA mapping for release at later stage */
733 	TSEC_PUT_TX_MBUF(sc, m0);
734 	TSEC_PUT_TX_MAP(sc, mapp);
735 
736 	return (0);
737 }
738 
739 static void
740 tsec_setfilter(struct tsec_softc *sc)
741 {
742 	struct ifnet *ifp;
743 	uint32_t flags;
744 
745 	ifp = sc->tsec_ifp;
746 	flags = TSEC_READ(sc, TSEC_REG_RCTRL);
747 
748 	/* Promiscuous mode */
749 	if (ifp->if_flags & IFF_PROMISC)
750 		flags |= TSEC_RCTRL_PROM;
751 	else
752 		flags &= ~TSEC_RCTRL_PROM;
753 
754 	TSEC_WRITE(sc, TSEC_REG_RCTRL, flags);
755 }
756 
757 static int
758 tsec_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
759 {
760 	struct tsec_softc *sc = ifp->if_softc;
761 	struct ifreq *ifr = (struct ifreq *)data;
762 	device_t dev;
763 	int error = 0;
764 
765 	dev = sc->dev;
766 
767 	switch (command) {
768 	case SIOCSIFFLAGS:
769 		TSEC_GLOBAL_LOCK(sc);
770 		if (ifp->if_flags & IFF_UP) {
771 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
772 				if ((sc->tsec_if_flags ^ ifp->if_flags) & IFF_PROMISC)
773 					tsec_setfilter(sc);
774 			} else
775 				tsec_init_locked(sc);
776 		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
777 			tsec_stop(sc);
778 
779 		sc->tsec_if_flags = ifp->if_flags;
780 		TSEC_GLOBAL_UNLOCK(sc);
781 		break;
782 	case SIOCGIFMEDIA:
783 	case SIOCSIFMEDIA:
784 		error = ifmedia_ioctl(ifp, ifr, &sc->tsec_mii->mii_media, command);
785 		break;
786 	default:
787 		error = ether_ioctl(ifp, command, data);
788 	}
789 
790 	/* Flush buffers if not empty */
791 	if (ifp->if_flags & IFF_UP)
792 		tsec_start(ifp);
793 	return (error);
794 }
795 
796 static int
797 tsec_ifmedia_upd(struct ifnet *ifp)
798 {
799 	struct tsec_softc *sc = ifp->if_softc;
800 	struct mii_data *mii;
801 
802 	TSEC_TRANSMIT_LOCK(sc);
803 
804 	mii = sc->tsec_mii;
805 	mii_mediachg(mii);
806 
807 	TSEC_TRANSMIT_UNLOCK(sc);
808 	return (0);
809 }
810 
811 static void
812 tsec_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
813 {
814 	struct tsec_softc *sc = ifp->if_softc;
815 	struct mii_data *mii;
816 
817 	TSEC_TRANSMIT_LOCK(sc);
818 
819 	mii = sc->tsec_mii;
820 	mii_pollstat(mii);
821 
822 	ifmr->ifm_active = mii->mii_media_active;
823 	ifmr->ifm_status = mii->mii_media_status;
824 
825 	TSEC_TRANSMIT_UNLOCK(sc);
826 }
827 
828 static int
829 tsec_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
830     uint32_t *paddr)
831 {
832 	struct mbuf *new_mbuf;
833 	bus_dma_segment_t seg[1];
834 	int error;
835 	int nsegs;
836 
837 	KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
838 
839 	new_mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
840 	if (new_mbuf == NULL)
841 		return (ENOBUFS);
842 	new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
843 
844 	if (*mbufp) {
845 		bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
846 		bus_dmamap_unload(tag, map);
847 	}
848 
849 	error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
850 			BUS_DMA_NOWAIT);
851 	KASSERT(nsegs == 1, ("Too many segments returned!"));
852 	if (nsegs != 1 || error)
853 		panic("tsec_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
854 
855 #if 0
856 	if (error) {
857 		printf("tsec: bus_dmamap_load_mbuf_sg() returned: %d!\n",
858 			error);
859 		m_freem(new_mbuf);
860 		return (ENOBUFS);
861 	}
862 #endif
863 
864 #if 0
865 	KASSERT(((seg->ds_addr) & (TSEC_RXBUFFER_ALIGNMENT-1)) == 0,
866 		("Wrong alignment of RX buffer!"));
867 #endif
868 	bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
869 
870 	(*mbufp) = new_mbuf;
871 	(*paddr) = seg->ds_addr;
872 	return (0);
873 }
874 
875 static void
876 tsec_map_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
877 {
878 	u_int32_t *paddr;
879 
880 	KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
881 	paddr = arg;
882 	*paddr = segs->ds_addr;
883 }
884 
885 static int
886 tsec_alloc_dma_desc(device_t dev, bus_dma_tag_t *dtag, bus_dmamap_t *dmap,
887     bus_size_t dsize, void **vaddr, void *raddr, const char *dname)
888 {
889 	int error;
890 
891 	/* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
892 	error = bus_dma_tag_create(NULL,	/* parent */
893 	    PAGE_SIZE, 0,			/* alignment, boundary */
894 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
895 	    BUS_SPACE_MAXADDR,			/* highaddr */
896 	    NULL, NULL,				/* filtfunc, filtfuncarg */
897 	    dsize, 1,				/* maxsize, nsegments */
898 	    dsize, 0,				/* maxsegsz, flags */
899 	    NULL, NULL,				/* lockfunc, lockfuncarg */
900 	    dtag);				/* dmat */
901 
902 	if (error) {
903 		device_printf(dev, "failed to allocate busdma %s tag\n", dname);
904 		(*vaddr) = NULL;
905 		return (ENXIO);
906 	}
907 
908 	error = bus_dmamem_alloc(*dtag, vaddr, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
909 				dmap);
910 	if (error) {
911 		device_printf(dev, "failed to allocate %s DMA safe memory\n",
912 			dname);
913 		bus_dma_tag_destroy(*dtag);
914 		(*vaddr) = NULL;
915 		return (ENXIO);
916 	}
917 
918 	error = bus_dmamap_load(*dtag, *dmap, *vaddr, dsize, tsec_map_dma_addr,
919 	    raddr, BUS_DMA_NOWAIT);
920 	if (error) {
921 		device_printf(dev, "cannot get address of the %s descriptors\n",
922 		    dname);
923 		bus_dmamem_free(*dtag, *vaddr, *dmap);
924 		bus_dma_tag_destroy(*dtag);
925 		(*vaddr) = NULL;
926 		return (ENXIO);
927 	}
928 
929 	return (0);
930 }
931 
932 static void
933 tsec_free_dma_desc(bus_dma_tag_t dtag, bus_dmamap_t dmap, void *vaddr)
934 {
935 
936 	if (vaddr == NULL)
937 		return;
938 
939 	/* Unmap descriptors from DMA memory */
940 	bus_dmamap_sync(dtag, dmap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
941 	bus_dmamap_unload(dtag, dmap);
942 
943 	/* Free descriptors memory */
944 	bus_dmamem_free(dtag, vaddr, dmap);
945 
946 	/* Destroy descriptors tag */
947 	bus_dma_tag_destroy(dtag);
948 }
949 
950 static void
951 tsec_free_dma(struct tsec_softc *sc)
952 {
953 	int i;
954 
955 	/* Free TX maps */
956 	for (i = 0; i < TSEC_TX_NUM_DESC; i++)
957 		if (sc->tx_map_data[i] != NULL)
958 			bus_dmamap_destroy(sc->tsec_tx_mtag, sc->tx_map_data[i]);
959 	/* Destroy tag for Tx mbufs */
960 	bus_dma_tag_destroy(sc->tsec_tx_mtag);
961 
962 	/* Free RX mbufs and maps */
963 	for (i = 0; i < TSEC_RX_NUM_DESC; i++) {
964 		if (sc->rx_data[i].mbuf) {
965 			/* Unload buffer from DMA */
966 			bus_dmamap_sync(sc->tsec_rx_mtag, sc->rx_data[i].map,
967 			    BUS_DMASYNC_POSTREAD);
968 			bus_dmamap_unload(sc->tsec_rx_mtag, sc->rx_data[i].map);
969 
970 			/* Free buffer */
971 			m_freem(sc->rx_data[i].mbuf);
972 		}
973 		/* Destroy map for this buffer */
974 		if (sc->rx_data[i].map != NULL)
975 			bus_dmamap_destroy(sc->tsec_rx_mtag,
976 			    sc->rx_data[i].map);
977 	}
978 	/* Destroy tag for Rx mbufs */
979 	bus_dma_tag_destroy(sc->tsec_rx_mtag);
980 
981 	/* Unload TX/RX descriptors */
982 	tsec_free_dma_desc(sc->tsec_tx_dtag, sc->tsec_tx_dmap,
983 	    sc->tsec_tx_vaddr);
984 	tsec_free_dma_desc(sc->tsec_rx_dtag, sc->tsec_rx_dmap,
985 	    sc->tsec_rx_vaddr);
986 }
987 
988 static void
989 tsec_stop(struct tsec_softc *sc)
990 {
991 	struct ifnet *ifp;
992 	struct mbuf *m0;
993 	bus_dmamap_t *mapp;
994 	uint32_t tmpval;
995 
996 	TSEC_GLOBAL_LOCK_ASSERT(sc);
997 
998 	ifp = sc->tsec_ifp;
999 
1000 	/* Stop tick engine */
1001 	callout_stop(&sc->tsec_callout);
1002 
1003 	/* Disable interface and watchdog timer */
1004 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1005 	sc->tsec_watchdog = 0;
1006 
1007 	/* Disable all interrupts and stop DMA */
1008 	tsec_intrs_ctl(sc, 0);
1009 	tsec_dma_ctl(sc, 0);
1010 
1011 	/* Remove pending data from TX queue */
1012 	while (!TSEC_EMPTYQ_TX_MBUF(sc)) {
1013 		m0 = TSEC_GET_TX_MBUF(sc);
1014 		mapp = TSEC_GET_TX_MAP(sc);
1015 
1016 		bus_dmamap_sync(sc->tsec_tx_mtag, *mapp, BUS_DMASYNC_POSTWRITE);
1017 		bus_dmamap_unload(sc->tsec_tx_mtag, *mapp);
1018 
1019 		TSEC_FREE_TX_MAP(sc, mapp);
1020 		m_freem(m0);
1021 	}
1022 
1023 	/* Disable Rx and Tx */
1024 	tmpval = TSEC_READ(sc, TSEC_REG_MACCFG1);
1025 	tmpval &= ~(TSEC_MACCFG1_RX_EN | TSEC_MACCFG1_TX_EN);
1026 	TSEC_WRITE(sc, TSEC_REG_MACCFG1, tmpval);
1027 	DELAY(10);
1028 }
1029 
1030 void
1031 tsec_receive_intr(void *arg)
1032 {
1033 	struct mbuf *rcv_mbufs[TSEC_RX_NUM_DESC];
1034 	struct tsec_softc *sc = arg;
1035 	struct tsec_desc *rx_desc;
1036 	struct ifnet *ifp;
1037 	struct rx_data_type *rx_data;
1038 	struct mbuf *m;
1039 	device_t dev;
1040 	uint32_t i;
1041 	int count;
1042 	int c1 = 0;
1043 	int c2;
1044 	uint16_t flags;
1045 	uint16_t length;
1046 
1047 	ifp = sc->tsec_ifp;
1048 	rx_data = sc->rx_data;
1049 	dev = sc->dev;
1050 
1051 	/* Confirm the interrupt was received by driver */
1052 	TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_RXB | TSEC_IEVENT_RXF);
1053 
1054 	TSEC_RECEIVE_LOCK(sc);
1055 
1056 	bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap, BUS_DMASYNC_POSTREAD |
1057 	    BUS_DMASYNC_POSTWRITE);
1058 
1059 	for (count = 0; /* count < TSEC_RX_NUM_DESC */; count++) {
1060 		rx_desc = TSEC_GET_CUR_RX_DESC(sc);
1061 		flags = rx_desc->flags;
1062 
1063 		/* Check if there is anything to receive */
1064 		if ((flags & TSEC_RXBD_E) || (count >= TSEC_RX_NUM_DESC)) {
1065 			/*
1066 			 * Avoid generating another interrupt
1067 			 */
1068 			if (flags & TSEC_RXBD_E)
1069 				TSEC_WRITE(sc, TSEC_REG_IEVENT,
1070 				    TSEC_IEVENT_RXB | TSEC_IEVENT_RXF);
1071 			/*
1072 			 * We didn't consume current descriptor and have to
1073 			 * return it to the queue
1074 			 */
1075 			TSEC_BACK_CUR_RX_DESC(sc);
1076 			break;
1077 		}
1078 
1079 		if (flags & (TSEC_RXBD_LG | TSEC_RXBD_SH | TSEC_RXBD_NO |
1080 		    TSEC_RXBD_CR | TSEC_RXBD_OV | TSEC_RXBD_TR)) {
1081 
1082 			rx_desc->length = 0;
1083 			rx_desc->flags = (rx_desc->flags & ~TSEC_RXBD_ZEROONINIT) |
1084 			    TSEC_RXBD_E | TSEC_RXBD_I;
1085 			continue;
1086 		}
1087 
1088 		if ((flags & TSEC_RXBD_L) == 0)
1089 			device_printf(dev, "buf is not the last in frame!\n");
1090 
1091 		/* Ok... process frame */
1092 		length = rx_desc->length - ETHER_CRC_LEN;
1093 		i = TSEC_GET_CUR_RX_DESC_CNT(sc);
1094 
1095 		m = rx_data[i].mbuf;
1096 
1097 		if (tsec_new_rxbuf(sc->tsec_rx_mtag, rx_data[i].map,
1098 		    &rx_data[i].mbuf, &rx_data[i].paddr)) {
1099 			ifp->if_ierrors++;
1100 			continue;
1101 		}
1102 		/* Attach new buffer to descriptor, and clear flags */
1103 		rx_desc->bufptr = rx_data[i].paddr;
1104 		rx_desc->length = 0;
1105 		rx_desc->flags = (rx_desc->flags & ~TSEC_RXBD_ZEROONINIT) |
1106 		    TSEC_RXBD_E | TSEC_RXBD_I;
1107 
1108 		/* Prepare buffer for upper layers */
1109 		m->m_pkthdr.rcvif = ifp;
1110 		m->m_pkthdr.len = m->m_len = length;
1111 
1112 		/* Save it for push */
1113 		rcv_mbufs[c1++] = m;
1114 	}
1115 
1116 	bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap, BUS_DMASYNC_PREREAD |
1117 	    BUS_DMASYNC_PREWRITE);
1118 
1119 	TSEC_RECEIVE_UNLOCK(sc);
1120 
1121 	/* Push it now */
1122 	for (c2 = 0; c2 < c1; c2++)
1123 		(*ifp->if_input)(ifp, rcv_mbufs[c2]);
1124 }
1125 
1126 void
1127 tsec_transmit_intr(void *arg)
1128 {
1129 	struct tsec_softc *sc = arg;
1130 	struct tsec_desc *tx_desc;
1131 	struct ifnet *ifp;
1132 	struct mbuf *m0;
1133 	bus_dmamap_t *mapp;
1134 	int send = 0;
1135 
1136 	ifp = sc->tsec_ifp;
1137 
1138 	/* Confirm the interrupt was received by driver */
1139 	TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_TXB | TSEC_IEVENT_TXF);
1140 
1141 	TSEC_TRANSMIT_LOCK(sc);
1142 
1143 	/* Update collision statistics */
1144 	ifp->if_collisions += TSEC_READ(sc, TSEC_REG_MON_TNCL);
1145 
1146 	/* Reset collision counters in hardware */
1147 	TSEC_WRITE(sc, TSEC_REG_MON_TSCL, 0);
1148 	TSEC_WRITE(sc, TSEC_REG_MON_TMCL, 0);
1149 	TSEC_WRITE(sc, TSEC_REG_MON_TLCL, 0);
1150 	TSEC_WRITE(sc, TSEC_REG_MON_TXCL, 0);
1151 	TSEC_WRITE(sc, TSEC_REG_MON_TNCL, 0);
1152 
1153 	bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap,
1154 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1155 
1156 	while (TSEC_CUR_DIFF_DIRTY_TX_DESC(sc)) {
1157 		tx_desc = TSEC_GET_DIRTY_TX_DESC(sc);
1158 		if (tx_desc->flags & TSEC_TXBD_R) {
1159 			TSEC_BACK_DIRTY_TX_DESC(sc);
1160 			break;
1161 		}
1162 
1163 		if ((tx_desc->flags & TSEC_TXBD_L) == 0)
1164 			continue;
1165 
1166 		/*
1167 		 * This is the last buf in this packet, so unmap and free it.
1168 		 */
1169 		m0 = TSEC_GET_TX_MBUF(sc);
1170 		mapp = TSEC_GET_TX_MAP(sc);
1171 
1172 		bus_dmamap_sync(sc->tsec_tx_mtag, *mapp, BUS_DMASYNC_POSTWRITE);
1173 		bus_dmamap_unload(sc->tsec_tx_mtag, *mapp);
1174 
1175 		TSEC_FREE_TX_MAP(sc, mapp);
1176 		m_freem(m0);
1177 
1178 		ifp->if_opackets++;
1179 		send = 1;
1180 	}
1181 	bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, BUS_DMASYNC_PREREAD |
1182 	    BUS_DMASYNC_PREWRITE);
1183 
1184 	if (send) {
1185 		/* Now send anything that was pending */
1186 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1187 		tsec_start_locked(ifp);
1188 
1189 		/* Stop watchdog if all sent */
1190 		if (TSEC_EMPTYQ_TX_MBUF(sc))
1191 			sc->tsec_watchdog = 0;
1192 	}
1193 	TSEC_TRANSMIT_UNLOCK(sc);
1194 }
1195 
1196 void
1197 tsec_error_intr(void *arg)
1198 {
1199 	struct tsec_softc *sc = arg;
1200 	struct ifnet *ifp;
1201 	uint32_t eflags;
1202 
1203 	ifp = sc->tsec_ifp;
1204 
1205 	eflags = TSEC_READ(sc, TSEC_REG_IEVENT);
1206 
1207 	if (ifp->if_flags & IFF_DEBUG)
1208 		if_printf(ifp, "tsec_error_intr(): event flags: 0x%x\n", eflags);
1209 
1210 	/* Clear events bits in hardware */
1211 	TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_RXC | TSEC_IEVENT_BSY |
1212 	    TSEC_IEVENT_EBERR | TSEC_IEVENT_MSRO | TSEC_IEVENT_BABT |
1213 	    TSEC_IEVENT_TXC | TSEC_IEVENT_TXE | TSEC_IEVENT_LC |
1214 	    TSEC_IEVENT_CRL | TSEC_IEVENT_XFUN);
1215 
1216 	if (eflags & TSEC_IEVENT_EBERR)
1217 		if_printf(ifp, "System bus error occurred during"
1218 		    " a DMA transaction (flags: 0x%x)\n", eflags);
1219 
1220 	/* Check transmitter errors */
1221 	if (eflags & TSEC_IEVENT_TXE) {
1222 		ifp->if_oerrors++;
1223 
1224 		if (eflags & TSEC_IEVENT_LC)
1225 			ifp->if_collisions++;
1226 
1227 		TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT);
1228 	}
1229 	if (eflags & TSEC_IEVENT_BABT)
1230 		ifp->if_oerrors++;
1231 
1232 	/* Check receiver errors */
1233 	if (eflags & TSEC_IEVENT_BSY) {
1234 		ifp->if_ierrors++;
1235 		ifp->if_iqdrops++;
1236 
1237 		/* Get data from RX buffers */
1238 		tsec_receive_intr(arg);
1239 
1240 		/* Make receiver again active */
1241 		TSEC_WRITE(sc, TSEC_REG_RSTAT, TSEC_RSTAT_QHLT);
1242 	}
1243 	if (eflags & TSEC_IEVENT_BABR)
1244 		ifp->if_ierrors++;
1245 }
1246 
1247 static void
1248 tsec_tick(void *xsc)
1249 {
1250 	struct tsec_softc *sc = xsc;
1251 	struct ifnet *ifp;
1252 	int link;
1253 
1254 	TSEC_GLOBAL_LOCK(sc);
1255 
1256 	tsec_watchdog(sc);
1257 
1258 	ifp = sc->tsec_ifp;
1259 	link = sc->tsec_link;
1260 
1261 	mii_tick(sc->tsec_mii);
1262 
1263 	if (link == 0 && sc->tsec_link == 1 && (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)))
1264 		tsec_start_locked(ifp);
1265 
1266 	/* Schedule another timeout one second from now. */
1267 	callout_reset(&sc->tsec_callout, hz, tsec_tick, sc);
1268 
1269 	TSEC_GLOBAL_UNLOCK(sc);
1270 }
1271 
1272 int
1273 tsec_miibus_readreg(device_t dev, int phy, int reg)
1274 {
1275 	struct tsec_softc *sc;
1276 	uint32_t timeout;
1277 
1278 	sc = device_get_softc(dev);
1279 
1280 	if (device_get_unit(dev) != phy)
1281 		return (0);
1282 
1283 	sc = tsec0_sc;
1284 
1285 	TSEC_WRITE(sc, TSEC_REG_MIIMADD, (phy << 8) | reg);
1286 	TSEC_WRITE(sc, TSEC_REG_MIIMCOM, 0);
1287 	TSEC_WRITE(sc, TSEC_REG_MIIMCOM, TSEC_MIIMCOM_READCYCLE);
1288 
1289 	timeout = TSEC_READ_RETRY;
1290 	while (--timeout && TSEC_READ(sc, TSEC_REG_MIIMIND) &
1291 	    (TSEC_MIIMIND_NOTVALID | TSEC_MIIMIND_BUSY))
1292 		DELAY(TSEC_READ_DELAY);
1293 
1294 	if (timeout == 0)
1295 		device_printf(dev, "Timeout while reading from PHY!\n");
1296 
1297 	return (TSEC_READ(sc, TSEC_REG_MIIMSTAT));
1298 }
1299 
1300 void
1301 tsec_miibus_writereg(device_t dev, int phy, int reg, int value)
1302 {
1303 	struct tsec_softc *sc;
1304 	uint32_t timeout;
1305 
1306 	sc = device_get_softc(dev);
1307 
1308 	if (device_get_unit(dev) != phy)
1309 		device_printf(dev, "Trying to write to an alien PHY(%d)\n", phy);
1310 
1311 	sc = tsec0_sc;
1312 
1313 	TSEC_WRITE(sc, TSEC_REG_MIIMADD, (phy << 8) | reg);
1314 	TSEC_WRITE(sc, TSEC_REG_MIIMCON, value);
1315 
1316 	timeout = TSEC_READ_RETRY;
1317 	while (--timeout && (TSEC_READ(sc, TSEC_REG_MIIMIND) & TSEC_MIIMIND_BUSY))
1318 		DELAY(TSEC_READ_DELAY);
1319 
1320 	if (timeout == 0)
1321 		device_printf(dev, "Timeout while writing to PHY!\n");
1322 }
1323 
1324 void
1325 tsec_miibus_statchg(device_t dev)
1326 {
1327 	struct tsec_softc *sc;
1328 	struct mii_data *mii;
1329 	uint32_t ecntrl, id, tmp;
1330 	int link;
1331 
1332 	sc = device_get_softc(dev);
1333 	mii = sc->tsec_mii;
1334 	link = ((mii->mii_media_status & IFM_ACTIVE) ? 1 : 0);
1335 
1336 	tmp = TSEC_READ(sc, TSEC_REG_MACCFG2) & ~TSEC_MACCFG2_IF;
1337 
1338 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
1339 		tmp |= TSEC_MACCFG2_FULLDUPLEX;
1340 	else
1341 		tmp &= ~TSEC_MACCFG2_FULLDUPLEX;
1342 
1343 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1344 	case IFM_1000_T:
1345 	case IFM_1000_SX:
1346 		tmp |= TSEC_MACCFG2_GMII;
1347 		sc->tsec_link = link;
1348 		break;
1349 	case IFM_100_TX:
1350 	case IFM_10_T:
1351 		tmp |= TSEC_MACCFG2_MII;
1352 		sc->tsec_link = link;
1353 		break;
1354 	case IFM_NONE:
1355 		if (link)
1356 			device_printf(dev, "No speed selected but link active!\n");
1357 		sc->tsec_link = 0;
1358 		return;
1359 	default:
1360 		sc->tsec_link = 0;
1361 		device_printf(dev, "Unknown speed (%d), link %s!\n",
1362 		    IFM_SUBTYPE(mii->mii_media_active),
1363 		        ((link) ? "up" : "down"));
1364 		return;
1365 	}
1366 	TSEC_WRITE(sc, TSEC_REG_MACCFG2, tmp);
1367 
1368 	/* XXX kludge - use circumstantial evidence for reduced mode. */
1369 	id = TSEC_READ(sc, TSEC_REG_ID2);
1370 	if (id & 0xffff) {
1371 		ecntrl = TSEC_READ(sc, TSEC_REG_ECNTRL) & ~TSEC_ECNTRL_R100M;
1372 		ecntrl |= (tmp & TSEC_MACCFG2_MII) ? TSEC_ECNTRL_R100M : 0;
1373 		TSEC_WRITE(sc, TSEC_REG_ECNTRL, ecntrl);
1374 	}
1375 }
1376