xref: /freebsd/sys/dev/tsec/if_tsec.c (revision 8d20be1e22095c27faf8fe8b2f0d089739cc742e)
1 /*-
2  * Copyright (C) 2007-2008 Semihalf, Rafal Jaworowski
3  * Copyright (C) 2006-2007 Semihalf, Piotr Kruszynski
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
18  * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
20  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 /*
28  * Freescale integrated Three-Speed Ethernet Controller (TSEC) driver.
29  */
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #ifdef HAVE_KERNEL_OPTION_HEADERS
34 #include "opt_device_polling.h"
35 #endif
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/bus.h>
40 #include <sys/endian.h>
41 #include <sys/mbuf.h>
42 #include <sys/kernel.h>
43 #include <sys/module.h>
44 #include <sys/socket.h>
45 #include <sys/sockio.h>
46 #include <sys/sysctl.h>
47 
48 #include <net/bpf.h>
49 #include <net/ethernet.h>
50 #include <net/if.h>
51 #include <net/if_var.h>
52 #include <net/if_arp.h>
53 #include <net/if_dl.h>
54 #include <net/if_media.h>
55 #include <net/if_types.h>
56 #include <net/if_vlan_var.h>
57 
58 #include <netinet/in_systm.h>
59 #include <netinet/in.h>
60 #include <netinet/ip.h>
61 
62 #include <machine/bus.h>
63 
64 #include <dev/mii/mii.h>
65 #include <dev/mii/miivar.h>
66 
67 #include <dev/tsec/if_tsec.h>
68 #include <dev/tsec/if_tsecreg.h>
69 
70 static int	tsec_alloc_dma_desc(device_t dev, bus_dma_tag_t *dtag,
71     bus_dmamap_t *dmap, bus_size_t dsize, void **vaddr, void *raddr,
72     const char *dname);
73 static void	tsec_dma_ctl(struct tsec_softc *sc, int state);
74 static int	tsec_encap(struct tsec_softc *sc, struct mbuf *m_head,
75     int fcb_inserted);
76 static void	tsec_free_dma(struct tsec_softc *sc);
77 static void	tsec_free_dma_desc(bus_dma_tag_t dtag, bus_dmamap_t dmap, void *vaddr);
78 static int	tsec_ifmedia_upd(struct ifnet *ifp);
79 static void	tsec_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
80 static int	tsec_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
81     struct mbuf **mbufp, uint32_t *paddr);
82 static void	tsec_map_dma_addr(void *arg, bus_dma_segment_t *segs,
83     int nseg, int error);
84 static void	tsec_intrs_ctl(struct tsec_softc *sc, int state);
85 static void	tsec_init(void *xsc);
86 static void	tsec_init_locked(struct tsec_softc *sc);
87 static int	tsec_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
88 static void	tsec_reset_mac(struct tsec_softc *sc);
89 static void	tsec_setfilter(struct tsec_softc *sc);
90 static void	tsec_set_mac_address(struct tsec_softc *sc);
91 static void	tsec_start(struct ifnet *ifp);
92 static void	tsec_start_locked(struct ifnet *ifp);
93 static void	tsec_stop(struct tsec_softc *sc);
94 static void	tsec_tick(void *arg);
95 static void	tsec_watchdog(struct tsec_softc *sc);
96 static void	tsec_add_sysctls(struct tsec_softc *sc);
97 static int	tsec_sysctl_ic_time(SYSCTL_HANDLER_ARGS);
98 static int	tsec_sysctl_ic_count(SYSCTL_HANDLER_ARGS);
99 static void	tsec_set_rxic(struct tsec_softc *sc);
100 static void	tsec_set_txic(struct tsec_softc *sc);
101 static int	tsec_receive_intr_locked(struct tsec_softc *sc, int count);
102 static void	tsec_transmit_intr_locked(struct tsec_softc *sc);
103 static void	tsec_error_intr_locked(struct tsec_softc *sc, int count);
104 static void	tsec_offload_setup(struct tsec_softc *sc);
105 static void	tsec_offload_process_frame(struct tsec_softc *sc,
106     struct mbuf *m);
107 static void	tsec_setup_multicast(struct tsec_softc *sc);
108 static int	tsec_set_mtu(struct tsec_softc *sc, unsigned int mtu);
109 
110 devclass_t tsec_devclass;
111 DRIVER_MODULE(miibus, tsec, miibus_driver, miibus_devclass, 0, 0);
112 MODULE_DEPEND(tsec, ether, 1, 1, 1);
113 MODULE_DEPEND(tsec, miibus, 1, 1, 1);
114 
115 int
116 tsec_attach(struct tsec_softc *sc)
117 {
118 	uint8_t hwaddr[ETHER_ADDR_LEN];
119 	struct ifnet *ifp;
120 	bus_dmamap_t *map_ptr;
121 	bus_dmamap_t **map_pptr;
122 	int error = 0;
123 	int i;
124 
125 	/* Reset all TSEC counters */
126 	TSEC_TX_RX_COUNTERS_INIT(sc);
127 
128 	/* Stop DMA engine if enabled by firmware */
129 	tsec_dma_ctl(sc, 0);
130 
131 	/* Reset MAC */
132 	tsec_reset_mac(sc);
133 
134 	/* Disable interrupts for now */
135 	tsec_intrs_ctl(sc, 0);
136 
137 	/* Configure defaults for interrupts coalescing */
138 	sc->rx_ic_time = 768;
139 	sc->rx_ic_count = 16;
140 	sc->tx_ic_time = 768;
141 	sc->tx_ic_count = 16;
142 	tsec_set_rxic(sc);
143 	tsec_set_txic(sc);
144 	tsec_add_sysctls(sc);
145 
146 	/* Allocate a busdma tag and DMA safe memory for TX descriptors. */
147 	error = tsec_alloc_dma_desc(sc->dev, &sc->tsec_tx_dtag,
148 	    &sc->tsec_tx_dmap, sizeof(*sc->tsec_tx_vaddr) * TSEC_TX_NUM_DESC,
149 	    (void **)&sc->tsec_tx_vaddr, &sc->tsec_tx_raddr, "TX");
150 
151 	if (error) {
152 		tsec_detach(sc);
153 		return (ENXIO);
154 	}
155 
156 	/* Allocate a busdma tag and DMA safe memory for RX descriptors. */
157 	error = tsec_alloc_dma_desc(sc->dev, &sc->tsec_rx_dtag,
158 	    &sc->tsec_rx_dmap, sizeof(*sc->tsec_rx_vaddr) * TSEC_RX_NUM_DESC,
159 	    (void **)&sc->tsec_rx_vaddr, &sc->tsec_rx_raddr, "RX");
160 	if (error) {
161 		tsec_detach(sc);
162 		return (ENXIO);
163 	}
164 
165 	/* Allocate a busdma tag for TX mbufs. */
166 	error = bus_dma_tag_create(NULL,	/* parent */
167 	    TSEC_TXBUFFER_ALIGNMENT, 0,		/* alignment, boundary */
168 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
169 	    BUS_SPACE_MAXADDR,			/* highaddr */
170 	    NULL, NULL,				/* filtfunc, filtfuncarg */
171 	    MCLBYTES * (TSEC_TX_NUM_DESC - 1),	/* maxsize */
172 	    TSEC_TX_NUM_DESC - 1,		/* nsegments */
173 	    MCLBYTES, 0,			/* maxsegsz, flags */
174 	    NULL, NULL,				/* lockfunc, lockfuncarg */
175 	    &sc->tsec_tx_mtag);			/* dmat */
176 	if (error) {
177 		device_printf(sc->dev, "failed to allocate busdma tag "
178 		    "(tx mbufs)\n");
179 		tsec_detach(sc);
180 		return (ENXIO);
181 	}
182 
183 	/* Allocate a busdma tag for RX mbufs. */
184 	error = bus_dma_tag_create(NULL,	/* parent */
185 	    TSEC_RXBUFFER_ALIGNMENT, 0,		/* alignment, boundary */
186 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
187 	    BUS_SPACE_MAXADDR,			/* highaddr */
188 	    NULL, NULL,				/* filtfunc, filtfuncarg */
189 	    MCLBYTES,				/* maxsize */
190 	    1,					/* nsegments */
191 	    MCLBYTES, 0,			/* maxsegsz, flags */
192 	    NULL, NULL,				/* lockfunc, lockfuncarg */
193 	    &sc->tsec_rx_mtag);			/* dmat */
194 	if (error) {
195 		device_printf(sc->dev, "failed to allocate busdma tag "
196 		    "(rx mbufs)\n");
197 		tsec_detach(sc);
198 		return (ENXIO);
199 	}
200 
201 	/* Create TX busdma maps */
202 	map_ptr = sc->tx_map_data;
203 	map_pptr = sc->tx_map_unused_data;
204 
205 	for (i = 0; i < TSEC_TX_NUM_DESC; i++) {
206 		map_pptr[i] = &map_ptr[i];
207 		error = bus_dmamap_create(sc->tsec_tx_mtag, 0, map_pptr[i]);
208 		if (error) {
209 			device_printf(sc->dev, "failed to init TX ring\n");
210 			tsec_detach(sc);
211 			return (ENXIO);
212 		}
213 	}
214 
215 	/* Create RX busdma maps and zero mbuf handlers */
216 	for (i = 0; i < TSEC_RX_NUM_DESC; i++) {
217 		error = bus_dmamap_create(sc->tsec_rx_mtag, 0,
218 		    &sc->rx_data[i].map);
219 		if (error) {
220 			device_printf(sc->dev, "failed to init RX ring\n");
221 			tsec_detach(sc);
222 			return (ENXIO);
223 		}
224 		sc->rx_data[i].mbuf = NULL;
225 	}
226 
227 	/* Create mbufs for RX buffers */
228 	for (i = 0; i < TSEC_RX_NUM_DESC; i++) {
229 		error = tsec_new_rxbuf(sc->tsec_rx_mtag, sc->rx_data[i].map,
230 		    &sc->rx_data[i].mbuf, &sc->rx_data[i].paddr);
231 		if (error) {
232 			device_printf(sc->dev, "can't load rx DMA map %d, "
233 			    "error = %d\n", i, error);
234 			tsec_detach(sc);
235 			return (error);
236 		}
237 	}
238 
239 	/* Create network interface for upper layers */
240 	ifp = sc->tsec_ifp = if_alloc(IFT_ETHER);
241 	if (ifp == NULL) {
242 		device_printf(sc->dev, "if_alloc() failed\n");
243 		tsec_detach(sc);
244 		return (ENOMEM);
245 	}
246 
247 	ifp->if_softc = sc;
248 	if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
249 	ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
250 	ifp->if_init = tsec_init;
251 	ifp->if_start = tsec_start;
252 	ifp->if_ioctl = tsec_ioctl;
253 
254 	IFQ_SET_MAXLEN(&ifp->if_snd, TSEC_TX_NUM_DESC - 1);
255 	ifp->if_snd.ifq_drv_maxlen = TSEC_TX_NUM_DESC - 1;
256 	IFQ_SET_READY(&ifp->if_snd);
257 
258 	ifp->if_capabilities = IFCAP_VLAN_MTU;
259 	if (sc->is_etsec)
260 		ifp->if_capabilities |= IFCAP_HWCSUM;
261 
262 	ifp->if_capenable = ifp->if_capabilities;
263 
264 #ifdef DEVICE_POLLING
265 	/* Advertise that polling is supported */
266 	ifp->if_capabilities |= IFCAP_POLLING;
267 #endif
268 
269 	/* Attach PHY(s) */
270 	error = mii_attach(sc->dev, &sc->tsec_miibus, ifp, tsec_ifmedia_upd,
271 	    tsec_ifmedia_sts, BMSR_DEFCAPMASK, sc->phyaddr, MII_OFFSET_ANY,
272 	    0);
273 	if (error) {
274 		device_printf(sc->dev, "attaching PHYs failed\n");
275 		if_free(ifp);
276 		sc->tsec_ifp = NULL;
277 		tsec_detach(sc);
278 		return (error);
279 	}
280 	sc->tsec_mii = device_get_softc(sc->tsec_miibus);
281 
282 	/* Set MAC address */
283 	tsec_get_hwaddr(sc, hwaddr);
284 	ether_ifattach(ifp, hwaddr);
285 
286 	return (0);
287 }
288 
289 int
290 tsec_detach(struct tsec_softc *sc)
291 {
292 
293 	if (sc->tsec_ifp != NULL) {
294 #ifdef DEVICE_POLLING
295 		if (sc->tsec_ifp->if_capenable & IFCAP_POLLING)
296 			ether_poll_deregister(sc->tsec_ifp);
297 #endif
298 
299 		/* Stop TSEC controller and free TX queue */
300 		if (sc->sc_rres)
301 			tsec_shutdown(sc->dev);
302 
303 		/* Detach network interface */
304 		ether_ifdetach(sc->tsec_ifp);
305 		if_free(sc->tsec_ifp);
306 		sc->tsec_ifp = NULL;
307 	}
308 
309 	/* Free DMA resources */
310 	tsec_free_dma(sc);
311 
312 	return (0);
313 }
314 
315 int
316 tsec_shutdown(device_t dev)
317 {
318 	struct tsec_softc *sc;
319 
320 	sc = device_get_softc(dev);
321 
322 	TSEC_GLOBAL_LOCK(sc);
323 	tsec_stop(sc);
324 	TSEC_GLOBAL_UNLOCK(sc);
325 	return (0);
326 }
327 
328 int
329 tsec_suspend(device_t dev)
330 {
331 
332 	/* TODO not implemented! */
333 	return (0);
334 }
335 
336 int
337 tsec_resume(device_t dev)
338 {
339 
340 	/* TODO not implemented! */
341 	return (0);
342 }
343 
344 static void
345 tsec_init(void *xsc)
346 {
347 	struct tsec_softc *sc = xsc;
348 
349 	TSEC_GLOBAL_LOCK(sc);
350 	tsec_init_locked(sc);
351 	TSEC_GLOBAL_UNLOCK(sc);
352 }
353 
354 static void
355 tsec_init_locked(struct tsec_softc *sc)
356 {
357 	struct tsec_desc *tx_desc = sc->tsec_tx_vaddr;
358 	struct tsec_desc *rx_desc = sc->tsec_rx_vaddr;
359 	struct ifnet *ifp = sc->tsec_ifp;
360 	uint32_t timeout, val, i;
361 
362 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
363 		return;
364 
365 	TSEC_GLOBAL_LOCK_ASSERT(sc);
366 	tsec_stop(sc);
367 
368 	/*
369 	 * These steps are according to the MPC8555E PowerQUICCIII RM:
370 	 * 14.7 Initialization/Application Information
371 	 */
372 
373 	/* Step 1: soft reset MAC */
374 	tsec_reset_mac(sc);
375 
376 	/* Step 2: Initialize MACCFG2 */
377 	TSEC_WRITE(sc, TSEC_REG_MACCFG2,
378 	    TSEC_MACCFG2_FULLDUPLEX |	/* Full Duplex = 1 */
379 	    TSEC_MACCFG2_PADCRC |	/* PAD/CRC append */
380 	    TSEC_MACCFG2_GMII |		/* I/F Mode bit */
381 	    TSEC_MACCFG2_PRECNT		/* Preamble count = 7 */
382 	);
383 
384 	/* Step 3: Initialize ECNTRL
385 	 * While the documentation states that R100M is ignored if RPM is
386 	 * not set, it does seem to be needed to get the orange boxes to
387 	 * work (which have a Marvell 88E1111 PHY). Go figure.
388 	 */
389 
390 	/*
391 	 * XXX kludge - use circumstancial evidence to program ECNTRL
392 	 * correctly. Ideally we need some board information to guide
393 	 * us here.
394 	 */
395 	i = TSEC_READ(sc, TSEC_REG_ID2);
396 	val = (i & 0xffff)
397 	    ? (TSEC_ECNTRL_TBIM | TSEC_ECNTRL_SGMIIM)	/* Sumatra */
398 	    : TSEC_ECNTRL_R100M;			/* Orange + CDS */
399 	TSEC_WRITE(sc, TSEC_REG_ECNTRL, TSEC_ECNTRL_STEN | val);
400 
401 	/* Step 4: Initialize MAC station address */
402 	tsec_set_mac_address(sc);
403 
404 	/*
405 	 * Step 5: Assign a Physical address to the TBI so as to not conflict
406 	 * with the external PHY physical address
407 	 */
408 	TSEC_WRITE(sc, TSEC_REG_TBIPA, 5);
409 
410 	/* Step 6: Reset the management interface */
411 	TSEC_WRITE(sc->phy_sc, TSEC_REG_MIIMCFG, TSEC_MIIMCFG_RESETMGMT);
412 
413 	/* Step 7: Setup the MII Mgmt clock speed */
414 	TSEC_WRITE(sc->phy_sc, TSEC_REG_MIIMCFG, TSEC_MIIMCFG_CLKDIV28);
415 
416 	/* Step 8: Read MII Mgmt indicator register and check for Busy = 0 */
417 	timeout = TSEC_READ_RETRY;
418 	while (--timeout && (TSEC_READ(sc->phy_sc, TSEC_REG_MIIMIND) &
419 	    TSEC_MIIMIND_BUSY))
420 		DELAY(TSEC_READ_DELAY);
421 	if (timeout == 0) {
422 		if_printf(ifp, "tsec_init_locked(): Mgmt busy timeout\n");
423 		return;
424 	}
425 
426 	/* Step 9: Setup the MII Mgmt */
427 	mii_mediachg(sc->tsec_mii);
428 
429 	/* Step 10: Clear IEVENT register */
430 	TSEC_WRITE(sc, TSEC_REG_IEVENT, 0xffffffff);
431 
432 	/* Step 11: Enable interrupts */
433 #ifdef DEVICE_POLLING
434 	/*
435 	 * ...only if polling is not turned on. Disable interrupts explicitly
436 	 * if polling is enabled.
437 	 */
438 	if (ifp->if_capenable & IFCAP_POLLING )
439 		tsec_intrs_ctl(sc, 0);
440 	else
441 #endif /* DEVICE_POLLING */
442 	tsec_intrs_ctl(sc, 1);
443 
444 	/* Step 12: Initialize IADDRn */
445 	TSEC_WRITE(sc, TSEC_REG_IADDR0, 0);
446 	TSEC_WRITE(sc, TSEC_REG_IADDR1, 0);
447 	TSEC_WRITE(sc, TSEC_REG_IADDR2, 0);
448 	TSEC_WRITE(sc, TSEC_REG_IADDR3, 0);
449 	TSEC_WRITE(sc, TSEC_REG_IADDR4, 0);
450 	TSEC_WRITE(sc, TSEC_REG_IADDR5, 0);
451 	TSEC_WRITE(sc, TSEC_REG_IADDR6, 0);
452 	TSEC_WRITE(sc, TSEC_REG_IADDR7, 0);
453 
454 	/* Step 13: Initialize GADDRn */
455 	TSEC_WRITE(sc, TSEC_REG_GADDR0, 0);
456 	TSEC_WRITE(sc, TSEC_REG_GADDR1, 0);
457 	TSEC_WRITE(sc, TSEC_REG_GADDR2, 0);
458 	TSEC_WRITE(sc, TSEC_REG_GADDR3, 0);
459 	TSEC_WRITE(sc, TSEC_REG_GADDR4, 0);
460 	TSEC_WRITE(sc, TSEC_REG_GADDR5, 0);
461 	TSEC_WRITE(sc, TSEC_REG_GADDR6, 0);
462 	TSEC_WRITE(sc, TSEC_REG_GADDR7, 0);
463 
464 	/* Step 14: Initialize RCTRL */
465 	TSEC_WRITE(sc, TSEC_REG_RCTRL, 0);
466 
467 	/* Step 15: Initialize DMACTRL */
468 	tsec_dma_ctl(sc, 1);
469 
470 	/* Step 16: Initialize FIFO_PAUSE_CTRL */
471 	TSEC_WRITE(sc, TSEC_REG_FIFO_PAUSE_CTRL, TSEC_FIFO_PAUSE_CTRL_EN);
472 
473 	/*
474 	 * Step 17: Initialize transmit/receive descriptor rings.
475 	 * Initialize TBASE and RBASE.
476 	 */
477 	TSEC_WRITE(sc, TSEC_REG_TBASE, sc->tsec_tx_raddr);
478 	TSEC_WRITE(sc, TSEC_REG_RBASE, sc->tsec_rx_raddr);
479 
480 	for (i = 0; i < TSEC_TX_NUM_DESC; i++) {
481 		tx_desc[i].bufptr = 0;
482 		tx_desc[i].length = 0;
483 		tx_desc[i].flags = ((i == TSEC_TX_NUM_DESC - 1) ?
484 		    TSEC_TXBD_W : 0);
485 	}
486 	bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap,
487 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
488 
489 	for (i = 0; i < TSEC_RX_NUM_DESC; i++) {
490 		rx_desc[i].bufptr = sc->rx_data[i].paddr;
491 		rx_desc[i].length = 0;
492 		rx_desc[i].flags = TSEC_RXBD_E | TSEC_RXBD_I |
493 		    ((i == TSEC_RX_NUM_DESC - 1) ? TSEC_RXBD_W : 0);
494 	}
495 	bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap,
496 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
497 
498 	/* Step 18: Initialize the maximum receive buffer length */
499 	TSEC_WRITE(sc, TSEC_REG_MRBLR, MCLBYTES);
500 
501 	/* Step 19: Configure ethernet frame sizes */
502 	TSEC_WRITE(sc, TSEC_REG_MINFLR, TSEC_MIN_FRAME_SIZE);
503 	tsec_set_mtu(sc, ifp->if_mtu);
504 
505 	/* Step 20: Enable Rx and RxBD sdata snooping */
506 	TSEC_WRITE(sc, TSEC_REG_ATTR, TSEC_ATTR_RDSEN | TSEC_ATTR_RBDSEN);
507 	TSEC_WRITE(sc, TSEC_REG_ATTRELI, 0);
508 
509 	/* Step 21: Reset collision counters in hardware */
510 	TSEC_WRITE(sc, TSEC_REG_MON_TSCL, 0);
511 	TSEC_WRITE(sc, TSEC_REG_MON_TMCL, 0);
512 	TSEC_WRITE(sc, TSEC_REG_MON_TLCL, 0);
513 	TSEC_WRITE(sc, TSEC_REG_MON_TXCL, 0);
514 	TSEC_WRITE(sc, TSEC_REG_MON_TNCL, 0);
515 
516 	/* Step 22: Mask all CAM interrupts */
517 	TSEC_WRITE(sc, TSEC_REG_MON_CAM1, 0xffffffff);
518 	TSEC_WRITE(sc, TSEC_REG_MON_CAM2, 0xffffffff);
519 
520 	/* Step 23: Enable Rx and Tx */
521 	val = TSEC_READ(sc, TSEC_REG_MACCFG1);
522 	val |= (TSEC_MACCFG1_RX_EN | TSEC_MACCFG1_TX_EN);
523 	TSEC_WRITE(sc, TSEC_REG_MACCFG1, val);
524 
525 	/* Step 24: Reset TSEC counters for Tx and Rx rings */
526 	TSEC_TX_RX_COUNTERS_INIT(sc);
527 
528 	/* Step 25: Setup TCP/IP Off-Load engine */
529 	if (sc->is_etsec)
530 		tsec_offload_setup(sc);
531 
532 	/* Step 26: Setup multicast filters */
533 	tsec_setup_multicast(sc);
534 
535 	/* Step 27: Activate network interface */
536 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
537 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
538 	sc->tsec_if_flags = ifp->if_flags;
539 	sc->tsec_watchdog = 0;
540 
541 	/* Schedule watchdog timeout */
542 	callout_reset(&sc->tsec_callout, hz, tsec_tick, sc);
543 }
544 
545 static void
546 tsec_set_mac_address(struct tsec_softc *sc)
547 {
548 	uint32_t macbuf[2] = { 0, 0 };
549 	char *macbufp, *curmac;
550 	int i;
551 
552 	TSEC_GLOBAL_LOCK_ASSERT(sc);
553 
554 	KASSERT((ETHER_ADDR_LEN <= sizeof(macbuf)),
555 	    ("tsec_set_mac_address: (%d <= %d", ETHER_ADDR_LEN,
556 	    sizeof(macbuf)));
557 
558 	macbufp = (char *)macbuf;
559 	curmac = (char *)IF_LLADDR(sc->tsec_ifp);
560 
561 	/* Correct order of MAC address bytes */
562 	for (i = 1; i <= ETHER_ADDR_LEN; i++)
563 		macbufp[ETHER_ADDR_LEN-i] = curmac[i-1];
564 
565 	/* Initialize MAC station address MACSTNADDR2 and MACSTNADDR1 */
566 	TSEC_WRITE(sc, TSEC_REG_MACSTNADDR2, macbuf[1]);
567 	TSEC_WRITE(sc, TSEC_REG_MACSTNADDR1, macbuf[0]);
568 }
569 
570 /*
571  * DMA control function, if argument state is:
572  * 0 - DMA engine will be disabled
573  * 1 - DMA engine will be enabled
574  */
575 static void
576 tsec_dma_ctl(struct tsec_softc *sc, int state)
577 {
578 	device_t dev;
579 	uint32_t dma_flags, timeout;
580 
581 	dev = sc->dev;
582 
583 	dma_flags = TSEC_READ(sc, TSEC_REG_DMACTRL);
584 
585 	switch (state) {
586 	case 0:
587 		/* Temporarily clear stop graceful stop bits. */
588 		tsec_dma_ctl(sc, 1000);
589 
590 		/* Set it again */
591 		dma_flags |= (TSEC_DMACTRL_GRS | TSEC_DMACTRL_GTS);
592 		break;
593 	case 1000:
594 	case 1:
595 		/* Set write with response (WWR), wait (WOP) and snoop bits */
596 		dma_flags |= (TSEC_DMACTRL_TDSEN | TSEC_DMACTRL_TBDSEN |
597 		    DMACTRL_WWR | DMACTRL_WOP);
598 
599 		/* Clear graceful stop bits */
600 		dma_flags &= ~(TSEC_DMACTRL_GRS | TSEC_DMACTRL_GTS);
601 		break;
602 	default:
603 		device_printf(dev, "tsec_dma_ctl(): unknown state value: %d\n",
604 		    state);
605 	}
606 
607 	TSEC_WRITE(sc, TSEC_REG_DMACTRL, dma_flags);
608 
609 	switch (state) {
610 	case 0:
611 		/* Wait for DMA stop */
612 		timeout = TSEC_READ_RETRY;
613 		while (--timeout && (!(TSEC_READ(sc, TSEC_REG_IEVENT) &
614 		    (TSEC_IEVENT_GRSC | TSEC_IEVENT_GTSC))))
615 			DELAY(TSEC_READ_DELAY);
616 
617 		if (timeout == 0)
618 			device_printf(dev, "tsec_dma_ctl(): timeout!\n");
619 		break;
620 	case 1:
621 		/* Restart transmission function */
622 		TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT);
623 	}
624 }
625 
626 /*
627  * Interrupts control function, if argument state is:
628  * 0 - all TSEC interrupts will be masked
629  * 1 - all TSEC interrupts will be unmasked
630  */
631 static void
632 tsec_intrs_ctl(struct tsec_softc *sc, int state)
633 {
634 	device_t dev;
635 
636 	dev = sc->dev;
637 
638 	switch (state) {
639 	case 0:
640 		TSEC_WRITE(sc, TSEC_REG_IMASK, 0);
641 		break;
642 	case 1:
643 		TSEC_WRITE(sc, TSEC_REG_IMASK, TSEC_IMASK_BREN |
644 		    TSEC_IMASK_RXCEN | TSEC_IMASK_BSYEN | TSEC_IMASK_EBERREN |
645 		    TSEC_IMASK_BTEN | TSEC_IMASK_TXEEN | TSEC_IMASK_TXBEN |
646 		    TSEC_IMASK_TXFEN | TSEC_IMASK_XFUNEN | TSEC_IMASK_RXFEN);
647 		break;
648 	default:
649 		device_printf(dev, "tsec_intrs_ctl(): unknown state value: %d\n",
650 		    state);
651 	}
652 }
653 
654 static void
655 tsec_reset_mac(struct tsec_softc *sc)
656 {
657 	uint32_t maccfg1_flags;
658 
659 	/* Set soft reset bit */
660 	maccfg1_flags = TSEC_READ(sc, TSEC_REG_MACCFG1);
661 	maccfg1_flags |= TSEC_MACCFG1_SOFT_RESET;
662 	TSEC_WRITE(sc, TSEC_REG_MACCFG1, maccfg1_flags);
663 
664 	/* Clear soft reset bit */
665 	maccfg1_flags = TSEC_READ(sc, TSEC_REG_MACCFG1);
666 	maccfg1_flags &= ~TSEC_MACCFG1_SOFT_RESET;
667 	TSEC_WRITE(sc, TSEC_REG_MACCFG1, maccfg1_flags);
668 }
669 
670 static void
671 tsec_watchdog(struct tsec_softc *sc)
672 {
673 	struct ifnet *ifp;
674 
675 	TSEC_GLOBAL_LOCK_ASSERT(sc);
676 
677 	if (sc->tsec_watchdog == 0 || --sc->tsec_watchdog > 0)
678 		return;
679 
680 	ifp = sc->tsec_ifp;
681 	ifp->if_oerrors++;
682 	if_printf(ifp, "watchdog timeout\n");
683 
684 	tsec_stop(sc);
685 	tsec_init_locked(sc);
686 }
687 
688 static void
689 tsec_start(struct ifnet *ifp)
690 {
691 	struct tsec_softc *sc = ifp->if_softc;
692 
693 	TSEC_TRANSMIT_LOCK(sc);
694 	tsec_start_locked(ifp);
695 	TSEC_TRANSMIT_UNLOCK(sc);
696 }
697 
698 static void
699 tsec_start_locked(struct ifnet *ifp)
700 {
701 	struct tsec_softc *sc;
702 	struct mbuf *m0, *mtmp;
703 	struct tsec_tx_fcb *tx_fcb;
704 	unsigned int queued = 0;
705 	int csum_flags, fcb_inserted = 0;
706 
707 	sc = ifp->if_softc;
708 
709 	TSEC_TRANSMIT_LOCK_ASSERT(sc);
710 
711 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
712 	    IFF_DRV_RUNNING)
713 		return;
714 
715 	if (sc->tsec_link == 0)
716 		return;
717 
718 	bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap,
719 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
720 
721 	while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
722 		/* Get packet from the queue */
723 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
724 		if (m0 == NULL)
725 			break;
726 
727 		/* Insert TCP/IP Off-load frame control block */
728 		csum_flags = m0->m_pkthdr.csum_flags;
729 		if (csum_flags) {
730 
731 			M_PREPEND(m0, sizeof(struct tsec_tx_fcb), M_NOWAIT);
732 			if (m0 == NULL)
733 				break;
734 
735 			tx_fcb = mtod(m0, struct tsec_tx_fcb *);
736 			tx_fcb->flags = 0;
737 			tx_fcb->l3_offset = ETHER_HDR_LEN;
738 			tx_fcb->l4_offset = sizeof(struct ip);
739 
740 			if (csum_flags & CSUM_IP)
741 				tx_fcb->flags |= TSEC_TX_FCB_IP4 |
742 				    TSEC_TX_FCB_CSUM_IP;
743 
744 			if (csum_flags & CSUM_TCP)
745 				tx_fcb->flags |= TSEC_TX_FCB_TCP |
746 				    TSEC_TX_FCB_CSUM_TCP_UDP;
747 
748 			if (csum_flags & CSUM_UDP)
749 				tx_fcb->flags |= TSEC_TX_FCB_UDP |
750 				    TSEC_TX_FCB_CSUM_TCP_UDP;
751 
752 			fcb_inserted = 1;
753 		}
754 
755 		mtmp = m_defrag(m0, M_NOWAIT);
756 		if (mtmp)
757 			m0 = mtmp;
758 
759 		if (tsec_encap(sc, m0, fcb_inserted)) {
760 			IFQ_DRV_PREPEND(&ifp->if_snd, m0);
761 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
762 			break;
763 		}
764 		queued++;
765 		BPF_MTAP(ifp, m0);
766 	}
767 	bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap,
768 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
769 
770 	if (queued) {
771 		/* Enable transmitter and watchdog timer */
772 		TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT);
773 		sc->tsec_watchdog = 5;
774 	}
775 }
776 
777 static int
778 tsec_encap(struct tsec_softc *sc, struct mbuf *m0, int fcb_inserted)
779 {
780 	struct tsec_desc *tx_desc = NULL;
781 	struct ifnet *ifp;
782 	bus_dma_segment_t segs[TSEC_TX_NUM_DESC];
783 	bus_dmamap_t *mapp;
784 	int csum_flag = 0, error, seg, nsegs;
785 
786 	TSEC_TRANSMIT_LOCK_ASSERT(sc);
787 
788 	ifp = sc->tsec_ifp;
789 
790 	if (TSEC_FREE_TX_DESC(sc) == 0) {
791 		/* No free descriptors */
792 		return (-1);
793 	}
794 
795 	/* Fetch unused map */
796 	mapp = TSEC_ALLOC_TX_MAP(sc);
797 
798 	/* Create mapping in DMA memory */
799 	error = bus_dmamap_load_mbuf_sg(sc->tsec_tx_mtag,
800 	    *mapp, m0, segs, &nsegs, BUS_DMA_NOWAIT);
801 	if (error != 0 || nsegs > TSEC_FREE_TX_DESC(sc) || nsegs <= 0) {
802 		bus_dmamap_unload(sc->tsec_tx_mtag, *mapp);
803 		TSEC_FREE_TX_MAP(sc, mapp);
804 		return ((error != 0) ? error : -1);
805 	}
806 	bus_dmamap_sync(sc->tsec_tx_mtag, *mapp, BUS_DMASYNC_PREWRITE);
807 
808 	if ((ifp->if_flags & IFF_DEBUG) && (nsegs > 1))
809 		if_printf(ifp, "TX buffer has %d segments\n", nsegs);
810 
811 	if (fcb_inserted)
812 		csum_flag = TSEC_TXBD_TOE;
813 
814 	/* Everything is ok, now we can send buffers */
815 	for (seg = 0; seg < nsegs; seg++) {
816 		tx_desc = TSEC_GET_CUR_TX_DESC(sc);
817 
818 		tx_desc->length = segs[seg].ds_len;
819 		tx_desc->bufptr = segs[seg].ds_addr;
820 
821 		/*
822 		 * Set flags:
823 		 *   - wrap
824 		 *   - checksum
825 		 *   - ready to send
826 		 *   - transmit the CRC sequence after the last data byte
827 		 *   - interrupt after the last buffer
828 		 */
829 		tx_desc->flags =
830 		    (tx_desc->flags & TSEC_TXBD_W) |
831 		    ((seg == 0) ? csum_flag : 0) | TSEC_TXBD_R | TSEC_TXBD_TC |
832 		    ((seg == nsegs - 1) ? TSEC_TXBD_L | TSEC_TXBD_I : 0);
833 	}
834 
835 	/* Save mbuf and DMA mapping for release at later stage */
836 	TSEC_PUT_TX_MBUF(sc, m0);
837 	TSEC_PUT_TX_MAP(sc, mapp);
838 
839 	return (0);
840 }
841 
842 static void
843 tsec_setfilter(struct tsec_softc *sc)
844 {
845 	struct ifnet *ifp;
846 	uint32_t flags;
847 
848 	ifp = sc->tsec_ifp;
849 	flags = TSEC_READ(sc, TSEC_REG_RCTRL);
850 
851 	/* Promiscuous mode */
852 	if (ifp->if_flags & IFF_PROMISC)
853 		flags |= TSEC_RCTRL_PROM;
854 	else
855 		flags &= ~TSEC_RCTRL_PROM;
856 
857 	TSEC_WRITE(sc, TSEC_REG_RCTRL, flags);
858 }
859 
860 #ifdef DEVICE_POLLING
861 static poll_handler_t tsec_poll;
862 
863 static int
864 tsec_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
865 {
866 	uint32_t ie;
867 	struct tsec_softc *sc = ifp->if_softc;
868 	int rx_npkts;
869 
870 	rx_npkts = 0;
871 
872 	TSEC_GLOBAL_LOCK(sc);
873 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
874 		TSEC_GLOBAL_UNLOCK(sc);
875 		return (rx_npkts);
876 	}
877 
878 	if (cmd == POLL_AND_CHECK_STATUS) {
879 		tsec_error_intr_locked(sc, count);
880 
881 		/* Clear all events reported */
882 		ie = TSEC_READ(sc, TSEC_REG_IEVENT);
883 		TSEC_WRITE(sc, TSEC_REG_IEVENT, ie);
884 	}
885 
886 	tsec_transmit_intr_locked(sc);
887 
888 	TSEC_GLOBAL_TO_RECEIVE_LOCK(sc);
889 
890 	rx_npkts = tsec_receive_intr_locked(sc, count);
891 
892 	TSEC_RECEIVE_UNLOCK(sc);
893 
894 	return (rx_npkts);
895 }
896 #endif /* DEVICE_POLLING */
897 
898 static int
899 tsec_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
900 {
901 	struct tsec_softc *sc = ifp->if_softc;
902 	struct ifreq *ifr = (struct ifreq *)data;
903 	device_t dev;
904 	int mask, error = 0;
905 
906 	dev = sc->dev;
907 
908 	switch (command) {
909 	case SIOCSIFMTU:
910 		TSEC_GLOBAL_LOCK(sc);
911 		if (tsec_set_mtu(sc, ifr->ifr_mtu))
912 			ifp->if_mtu = ifr->ifr_mtu;
913 		else
914 			error = EINVAL;
915 		TSEC_GLOBAL_UNLOCK(sc);
916 		break;
917 	case SIOCSIFFLAGS:
918 		TSEC_GLOBAL_LOCK(sc);
919 		if (ifp->if_flags & IFF_UP) {
920 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
921 				if ((sc->tsec_if_flags ^ ifp->if_flags) &
922 				    IFF_PROMISC)
923 					tsec_setfilter(sc);
924 
925 				if ((sc->tsec_if_flags ^ ifp->if_flags) &
926 				    IFF_ALLMULTI)
927 					tsec_setup_multicast(sc);
928 			} else
929 				tsec_init_locked(sc);
930 		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
931 			tsec_stop(sc);
932 
933 		sc->tsec_if_flags = ifp->if_flags;
934 		TSEC_GLOBAL_UNLOCK(sc);
935 		break;
936 	case SIOCADDMULTI:
937 	case SIOCDELMULTI:
938 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
939 			TSEC_GLOBAL_LOCK(sc);
940 			tsec_setup_multicast(sc);
941 			TSEC_GLOBAL_UNLOCK(sc);
942 		}
943 	case SIOCGIFMEDIA:
944 	case SIOCSIFMEDIA:
945 		error = ifmedia_ioctl(ifp, ifr, &sc->tsec_mii->mii_media,
946 		    command);
947 		break;
948 	case SIOCSIFCAP:
949 		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
950 		if ((mask & IFCAP_HWCSUM) && sc->is_etsec) {
951 			TSEC_GLOBAL_LOCK(sc);
952 			ifp->if_capenable &= ~IFCAP_HWCSUM;
953 			ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap;
954 			tsec_offload_setup(sc);
955 			TSEC_GLOBAL_UNLOCK(sc);
956 		}
957 #ifdef DEVICE_POLLING
958 		if (mask & IFCAP_POLLING) {
959 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
960 				error = ether_poll_register(tsec_poll, ifp);
961 				if (error)
962 					return (error);
963 
964 				TSEC_GLOBAL_LOCK(sc);
965 				/* Disable interrupts */
966 				tsec_intrs_ctl(sc, 0);
967 				ifp->if_capenable |= IFCAP_POLLING;
968 				TSEC_GLOBAL_UNLOCK(sc);
969 			} else {
970 				error = ether_poll_deregister(ifp);
971 				TSEC_GLOBAL_LOCK(sc);
972 				/* Enable interrupts */
973 				tsec_intrs_ctl(sc, 1);
974 				ifp->if_capenable &= ~IFCAP_POLLING;
975 				TSEC_GLOBAL_UNLOCK(sc);
976 			}
977 		}
978 #endif
979 		break;
980 
981 	default:
982 		error = ether_ioctl(ifp, command, data);
983 	}
984 
985 	/* Flush buffers if not empty */
986 	if (ifp->if_flags & IFF_UP)
987 		tsec_start(ifp);
988 	return (error);
989 }
990 
991 static int
992 tsec_ifmedia_upd(struct ifnet *ifp)
993 {
994 	struct tsec_softc *sc = ifp->if_softc;
995 	struct mii_data *mii;
996 
997 	TSEC_TRANSMIT_LOCK(sc);
998 
999 	mii = sc->tsec_mii;
1000 	mii_mediachg(mii);
1001 
1002 	TSEC_TRANSMIT_UNLOCK(sc);
1003 	return (0);
1004 }
1005 
1006 static void
1007 tsec_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1008 {
1009 	struct tsec_softc *sc = ifp->if_softc;
1010 	struct mii_data *mii;
1011 
1012 	TSEC_TRANSMIT_LOCK(sc);
1013 
1014 	mii = sc->tsec_mii;
1015 	mii_pollstat(mii);
1016 
1017 	ifmr->ifm_active = mii->mii_media_active;
1018 	ifmr->ifm_status = mii->mii_media_status;
1019 
1020 	TSEC_TRANSMIT_UNLOCK(sc);
1021 }
1022 
1023 static int
1024 tsec_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
1025     uint32_t *paddr)
1026 {
1027 	struct mbuf *new_mbuf;
1028 	bus_dma_segment_t seg[1];
1029 	int error, nsegs;
1030 
1031 	KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
1032 
1033 	new_mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MCLBYTES);
1034 	if (new_mbuf == NULL)
1035 		return (ENOBUFS);
1036 	new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
1037 
1038 	if (*mbufp) {
1039 		bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
1040 		bus_dmamap_unload(tag, map);
1041 	}
1042 
1043 	error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
1044 	    BUS_DMA_NOWAIT);
1045 	KASSERT(nsegs == 1, ("Too many segments returned!"));
1046 	if (nsegs != 1 || error)
1047 		panic("tsec_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
1048 
1049 #if 0
1050 	if (error) {
1051 		printf("tsec: bus_dmamap_load_mbuf_sg() returned: %d!\n",
1052 			error);
1053 		m_freem(new_mbuf);
1054 		return (ENOBUFS);
1055 	}
1056 #endif
1057 
1058 #if 0
1059 	KASSERT(((seg->ds_addr) & (TSEC_RXBUFFER_ALIGNMENT-1)) == 0,
1060 		("Wrong alignment of RX buffer!"));
1061 #endif
1062 	bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
1063 
1064 	(*mbufp) = new_mbuf;
1065 	(*paddr) = seg->ds_addr;
1066 	return (0);
1067 }
1068 
1069 static void
1070 tsec_map_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1071 {
1072 	u_int32_t *paddr;
1073 
1074 	KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
1075 	paddr = arg;
1076 	*paddr = segs->ds_addr;
1077 }
1078 
1079 static int
1080 tsec_alloc_dma_desc(device_t dev, bus_dma_tag_t *dtag, bus_dmamap_t *dmap,
1081     bus_size_t dsize, void **vaddr, void *raddr, const char *dname)
1082 {
1083 	int error;
1084 
1085 	/* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
1086 	error = bus_dma_tag_create(NULL,	/* parent */
1087 	    PAGE_SIZE, 0,			/* alignment, boundary */
1088 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
1089 	    BUS_SPACE_MAXADDR,			/* highaddr */
1090 	    NULL, NULL,				/* filtfunc, filtfuncarg */
1091 	    dsize, 1,				/* maxsize, nsegments */
1092 	    dsize, 0,				/* maxsegsz, flags */
1093 	    NULL, NULL,				/* lockfunc, lockfuncarg */
1094 	    dtag);				/* dmat */
1095 
1096 	if (error) {
1097 		device_printf(dev, "failed to allocate busdma %s tag\n",
1098 		    dname);
1099 		(*vaddr) = NULL;
1100 		return (ENXIO);
1101 	}
1102 
1103 	error = bus_dmamem_alloc(*dtag, vaddr, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1104 	    dmap);
1105 	if (error) {
1106 		device_printf(dev, "failed to allocate %s DMA safe memory\n",
1107 		    dname);
1108 		bus_dma_tag_destroy(*dtag);
1109 		(*vaddr) = NULL;
1110 		return (ENXIO);
1111 	}
1112 
1113 	error = bus_dmamap_load(*dtag, *dmap, *vaddr, dsize,
1114 	    tsec_map_dma_addr, raddr, BUS_DMA_NOWAIT);
1115 	if (error) {
1116 		device_printf(dev, "cannot get address of the %s "
1117 		    "descriptors\n", dname);
1118 		bus_dmamem_free(*dtag, *vaddr, *dmap);
1119 		bus_dma_tag_destroy(*dtag);
1120 		(*vaddr) = NULL;
1121 		return (ENXIO);
1122 	}
1123 
1124 	return (0);
1125 }
1126 
1127 static void
1128 tsec_free_dma_desc(bus_dma_tag_t dtag, bus_dmamap_t dmap, void *vaddr)
1129 {
1130 
1131 	if (vaddr == NULL)
1132 		return;
1133 
1134 	/* Unmap descriptors from DMA memory */
1135 	bus_dmamap_sync(dtag, dmap, BUS_DMASYNC_POSTREAD |
1136 	    BUS_DMASYNC_POSTWRITE);
1137 	bus_dmamap_unload(dtag, dmap);
1138 
1139 	/* Free descriptors memory */
1140 	bus_dmamem_free(dtag, vaddr, dmap);
1141 
1142 	/* Destroy descriptors tag */
1143 	bus_dma_tag_destroy(dtag);
1144 }
1145 
1146 static void
1147 tsec_free_dma(struct tsec_softc *sc)
1148 {
1149 	int i;
1150 
1151 	/* Free TX maps */
1152 	for (i = 0; i < TSEC_TX_NUM_DESC; i++)
1153 		if (sc->tx_map_data[i] != NULL)
1154 			bus_dmamap_destroy(sc->tsec_tx_mtag,
1155 			    sc->tx_map_data[i]);
1156 	/* Destroy tag for TX mbufs */
1157 	bus_dma_tag_destroy(sc->tsec_tx_mtag);
1158 
1159 	/* Free RX mbufs and maps */
1160 	for (i = 0; i < TSEC_RX_NUM_DESC; i++) {
1161 		if (sc->rx_data[i].mbuf) {
1162 			/* Unload buffer from DMA */
1163 			bus_dmamap_sync(sc->tsec_rx_mtag, sc->rx_data[i].map,
1164 			    BUS_DMASYNC_POSTREAD);
1165 			bus_dmamap_unload(sc->tsec_rx_mtag,
1166 			    sc->rx_data[i].map);
1167 
1168 			/* Free buffer */
1169 			m_freem(sc->rx_data[i].mbuf);
1170 		}
1171 		/* Destroy map for this buffer */
1172 		if (sc->rx_data[i].map != NULL)
1173 			bus_dmamap_destroy(sc->tsec_rx_mtag,
1174 			    sc->rx_data[i].map);
1175 	}
1176 	/* Destroy tag for RX mbufs */
1177 	bus_dma_tag_destroy(sc->tsec_rx_mtag);
1178 
1179 	/* Unload TX/RX descriptors */
1180 	tsec_free_dma_desc(sc->tsec_tx_dtag, sc->tsec_tx_dmap,
1181 	    sc->tsec_tx_vaddr);
1182 	tsec_free_dma_desc(sc->tsec_rx_dtag, sc->tsec_rx_dmap,
1183 	    sc->tsec_rx_vaddr);
1184 }
1185 
1186 static void
1187 tsec_stop(struct tsec_softc *sc)
1188 {
1189 	struct ifnet *ifp;
1190 	struct mbuf *m0;
1191 	bus_dmamap_t *mapp;
1192 	uint32_t tmpval;
1193 
1194 	TSEC_GLOBAL_LOCK_ASSERT(sc);
1195 
1196 	ifp = sc->tsec_ifp;
1197 
1198 	/* Disable interface and watchdog timer */
1199 	callout_stop(&sc->tsec_callout);
1200 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1201 	sc->tsec_watchdog = 0;
1202 
1203 	/* Disable all interrupts and stop DMA */
1204 	tsec_intrs_ctl(sc, 0);
1205 	tsec_dma_ctl(sc, 0);
1206 
1207 	/* Remove pending data from TX queue */
1208 	while (!TSEC_EMPTYQ_TX_MBUF(sc)) {
1209 		m0 = TSEC_GET_TX_MBUF(sc);
1210 		mapp = TSEC_GET_TX_MAP(sc);
1211 
1212 		bus_dmamap_sync(sc->tsec_tx_mtag, *mapp,
1213 		    BUS_DMASYNC_POSTWRITE);
1214 		bus_dmamap_unload(sc->tsec_tx_mtag, *mapp);
1215 
1216 		TSEC_FREE_TX_MAP(sc, mapp);
1217 		m_freem(m0);
1218 	}
1219 
1220 	/* Disable RX and TX */
1221 	tmpval = TSEC_READ(sc, TSEC_REG_MACCFG1);
1222 	tmpval &= ~(TSEC_MACCFG1_RX_EN | TSEC_MACCFG1_TX_EN);
1223 	TSEC_WRITE(sc, TSEC_REG_MACCFG1, tmpval);
1224 	DELAY(10);
1225 }
1226 
1227 static void
1228 tsec_tick(void *arg)
1229 {
1230 	struct tsec_softc *sc = arg;
1231 	struct ifnet *ifp;
1232 	int link;
1233 
1234 	TSEC_GLOBAL_LOCK(sc);
1235 
1236 	tsec_watchdog(sc);
1237 
1238 	ifp = sc->tsec_ifp;
1239 	link = sc->tsec_link;
1240 
1241 	mii_tick(sc->tsec_mii);
1242 
1243 	if (link == 0 && sc->tsec_link == 1 &&
1244 	    (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)))
1245 		tsec_start_locked(ifp);
1246 
1247 	/* Schedule another timeout one second from now. */
1248 	callout_reset(&sc->tsec_callout, hz, tsec_tick, sc);
1249 
1250 	TSEC_GLOBAL_UNLOCK(sc);
1251 }
1252 
1253 /*
1254  *  This is the core RX routine. It replenishes mbufs in the descriptor and
1255  *  sends data which have been dma'ed into host memory to upper layer.
1256  *
1257  *  Loops at most count times if count is > 0, or until done if count < 0.
1258  */
1259 static int
1260 tsec_receive_intr_locked(struct tsec_softc *sc, int count)
1261 {
1262 	struct tsec_desc *rx_desc;
1263 	struct ifnet *ifp;
1264 	struct rx_data_type *rx_data;
1265 	struct mbuf *m;
1266 	device_t dev;
1267 	uint32_t i;
1268 	int c, rx_npkts;
1269 	uint16_t flags;
1270 
1271 	TSEC_RECEIVE_LOCK_ASSERT(sc);
1272 
1273 	ifp = sc->tsec_ifp;
1274 	rx_data = sc->rx_data;
1275 	dev = sc->dev;
1276 	rx_npkts = 0;
1277 
1278 	bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap,
1279 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1280 
1281 	for (c = 0; ; c++) {
1282 		if (count >= 0 && count-- == 0)
1283 			break;
1284 
1285 		rx_desc = TSEC_GET_CUR_RX_DESC(sc);
1286 		flags = rx_desc->flags;
1287 
1288 		/* Check if there is anything to receive */
1289 		if ((flags & TSEC_RXBD_E) || (c >= TSEC_RX_NUM_DESC)) {
1290 			/*
1291 			 * Avoid generating another interrupt
1292 			 */
1293 			if (flags & TSEC_RXBD_E)
1294 				TSEC_WRITE(sc, TSEC_REG_IEVENT,
1295 				    TSEC_IEVENT_RXB | TSEC_IEVENT_RXF);
1296 			/*
1297 			 * We didn't consume current descriptor and have to
1298 			 * return it to the queue
1299 			 */
1300 			TSEC_BACK_CUR_RX_DESC(sc);
1301 			break;
1302 		}
1303 
1304 		if (flags & (TSEC_RXBD_LG | TSEC_RXBD_SH | TSEC_RXBD_NO |
1305 		    TSEC_RXBD_CR | TSEC_RXBD_OV | TSEC_RXBD_TR)) {
1306 
1307 			rx_desc->length = 0;
1308 			rx_desc->flags = (rx_desc->flags &
1309 			    ~TSEC_RXBD_ZEROONINIT) | TSEC_RXBD_E | TSEC_RXBD_I;
1310 
1311 			if (sc->frame != NULL) {
1312 				m_free(sc->frame);
1313 				sc->frame = NULL;
1314 			}
1315 
1316 			continue;
1317 		}
1318 
1319 		/* Ok... process frame */
1320 		i = TSEC_GET_CUR_RX_DESC_CNT(sc);
1321 		m = rx_data[i].mbuf;
1322 		m->m_len = rx_desc->length;
1323 
1324 		if (sc->frame != NULL) {
1325 			if ((flags & TSEC_RXBD_L) != 0)
1326 				m->m_len -= m_length(sc->frame, NULL);
1327 
1328 			m->m_flags &= ~M_PKTHDR;
1329 			m_cat(sc->frame, m);
1330 		} else {
1331 			sc->frame = m;
1332 		}
1333 
1334 		m = NULL;
1335 
1336 		if ((flags & TSEC_RXBD_L) != 0) {
1337 			m = sc->frame;
1338 			sc->frame = NULL;
1339 		}
1340 
1341 		if (tsec_new_rxbuf(sc->tsec_rx_mtag, rx_data[i].map,
1342 		    &rx_data[i].mbuf, &rx_data[i].paddr)) {
1343 			ifp->if_ierrors++;
1344 			/*
1345 			 * We ran out of mbufs; didn't consume current
1346 			 * descriptor and have to return it to the queue.
1347 			 */
1348 			TSEC_BACK_CUR_RX_DESC(sc);
1349 			break;
1350 		}
1351 
1352 		/* Attach new buffer to descriptor and clear flags */
1353 		rx_desc->bufptr = rx_data[i].paddr;
1354 		rx_desc->length = 0;
1355 		rx_desc->flags = (rx_desc->flags & ~TSEC_RXBD_ZEROONINIT) |
1356 		    TSEC_RXBD_E | TSEC_RXBD_I;
1357 
1358 		if (m != NULL) {
1359 			m->m_pkthdr.rcvif = ifp;
1360 
1361 			m_fixhdr(m);
1362 			m_adj(m, -ETHER_CRC_LEN);
1363 
1364 			if (sc->is_etsec)
1365 				tsec_offload_process_frame(sc, m);
1366 
1367 			TSEC_RECEIVE_UNLOCK(sc);
1368 			(*ifp->if_input)(ifp, m);
1369 			TSEC_RECEIVE_LOCK(sc);
1370 			rx_npkts++;
1371 		}
1372 	}
1373 
1374 	bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap,
1375 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1376 
1377 	/*
1378 	 * Make sure TSEC receiver is not halted.
1379 	 *
1380 	 * Various conditions can stop the TSEC receiver, but not all are
1381 	 * signaled and handled by error interrupt, so make sure the receiver
1382 	 * is running. Writing to TSEC_REG_RSTAT restarts the receiver when
1383 	 * halted, and is harmless if already running.
1384 	 */
1385 	TSEC_WRITE(sc, TSEC_REG_RSTAT, TSEC_RSTAT_QHLT);
1386 	return (rx_npkts);
1387 }
1388 
1389 void
1390 tsec_receive_intr(void *arg)
1391 {
1392 	struct tsec_softc *sc = arg;
1393 
1394 	TSEC_RECEIVE_LOCK(sc);
1395 
1396 #ifdef DEVICE_POLLING
1397 	if (sc->tsec_ifp->if_capenable & IFCAP_POLLING) {
1398 		TSEC_RECEIVE_UNLOCK(sc);
1399 		return;
1400 	}
1401 #endif
1402 
1403 	/* Confirm the interrupt was received by driver */
1404 	TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_RXB | TSEC_IEVENT_RXF);
1405 	tsec_receive_intr_locked(sc, -1);
1406 
1407 	TSEC_RECEIVE_UNLOCK(sc);
1408 }
1409 
1410 static void
1411 tsec_transmit_intr_locked(struct tsec_softc *sc)
1412 {
1413 	struct tsec_desc *tx_desc;
1414 	struct ifnet *ifp;
1415 	struct mbuf *m0;
1416 	bus_dmamap_t *mapp;
1417 	int send = 0;
1418 
1419 	TSEC_TRANSMIT_LOCK_ASSERT(sc);
1420 
1421 	ifp = sc->tsec_ifp;
1422 
1423 	/* Update collision statistics */
1424 	ifp->if_collisions += TSEC_READ(sc, TSEC_REG_MON_TNCL);
1425 
1426 	/* Reset collision counters in hardware */
1427 	TSEC_WRITE(sc, TSEC_REG_MON_TSCL, 0);
1428 	TSEC_WRITE(sc, TSEC_REG_MON_TMCL, 0);
1429 	TSEC_WRITE(sc, TSEC_REG_MON_TLCL, 0);
1430 	TSEC_WRITE(sc, TSEC_REG_MON_TXCL, 0);
1431 	TSEC_WRITE(sc, TSEC_REG_MON_TNCL, 0);
1432 
1433 	bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap,
1434 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1435 
1436 	while (TSEC_CUR_DIFF_DIRTY_TX_DESC(sc)) {
1437 		tx_desc = TSEC_GET_DIRTY_TX_DESC(sc);
1438 		if (tx_desc->flags & TSEC_TXBD_R) {
1439 			TSEC_BACK_DIRTY_TX_DESC(sc);
1440 			break;
1441 		}
1442 
1443 		if ((tx_desc->flags & TSEC_TXBD_L) == 0)
1444 			continue;
1445 
1446 		/*
1447 		 * This is the last buf in this packet, so unmap and free it.
1448 		 */
1449 		m0 = TSEC_GET_TX_MBUF(sc);
1450 		mapp = TSEC_GET_TX_MAP(sc);
1451 
1452 		bus_dmamap_sync(sc->tsec_tx_mtag, *mapp,
1453 		    BUS_DMASYNC_POSTWRITE);
1454 		bus_dmamap_unload(sc->tsec_tx_mtag, *mapp);
1455 
1456 		TSEC_FREE_TX_MAP(sc, mapp);
1457 		m_freem(m0);
1458 
1459 		ifp->if_opackets++;
1460 		send = 1;
1461 	}
1462 	bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap,
1463 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1464 
1465 	if (send) {
1466 		/* Now send anything that was pending */
1467 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1468 		tsec_start_locked(ifp);
1469 
1470 		/* Stop wathdog if all sent */
1471 		if (TSEC_EMPTYQ_TX_MBUF(sc))
1472 			sc->tsec_watchdog = 0;
1473 	}
1474 }
1475 
1476 void
1477 tsec_transmit_intr(void *arg)
1478 {
1479 	struct tsec_softc *sc = arg;
1480 
1481 	TSEC_TRANSMIT_LOCK(sc);
1482 
1483 #ifdef DEVICE_POLLING
1484 	if (sc->tsec_ifp->if_capenable & IFCAP_POLLING) {
1485 		TSEC_TRANSMIT_UNLOCK(sc);
1486 		return;
1487 	}
1488 #endif
1489 	/* Confirm the interrupt was received by driver */
1490 	TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_TXB | TSEC_IEVENT_TXF);
1491 	tsec_transmit_intr_locked(sc);
1492 
1493 	TSEC_TRANSMIT_UNLOCK(sc);
1494 }
1495 
1496 static void
1497 tsec_error_intr_locked(struct tsec_softc *sc, int count)
1498 {
1499 	struct ifnet *ifp;
1500 	uint32_t eflags;
1501 
1502 	TSEC_GLOBAL_LOCK_ASSERT(sc);
1503 
1504 	ifp = sc->tsec_ifp;
1505 
1506 	eflags = TSEC_READ(sc, TSEC_REG_IEVENT);
1507 
1508 	/* Clear events bits in hardware */
1509 	TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_RXC | TSEC_IEVENT_BSY |
1510 	    TSEC_IEVENT_EBERR | TSEC_IEVENT_MSRO | TSEC_IEVENT_BABT |
1511 	    TSEC_IEVENT_TXC | TSEC_IEVENT_TXE | TSEC_IEVENT_LC |
1512 	    TSEC_IEVENT_CRL | TSEC_IEVENT_XFUN);
1513 
1514 	/* Check transmitter errors */
1515 	if (eflags & TSEC_IEVENT_TXE) {
1516 		ifp->if_oerrors++;
1517 
1518 		if (eflags & TSEC_IEVENT_LC)
1519 			ifp->if_collisions++;
1520 
1521 		TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT);
1522 	}
1523 
1524 	/* Check receiver errors */
1525 	if (eflags & TSEC_IEVENT_BSY) {
1526 		ifp->if_ierrors++;
1527 		ifp->if_iqdrops++;
1528 
1529 		/* Get data from RX buffers */
1530 		tsec_receive_intr_locked(sc, count);
1531 	}
1532 
1533 	if (ifp->if_flags & IFF_DEBUG)
1534 		if_printf(ifp, "tsec_error_intr(): event flags: 0x%x\n",
1535 		    eflags);
1536 
1537 	if (eflags & TSEC_IEVENT_EBERR) {
1538 		if_printf(ifp, "System bus error occurred during"
1539 		    "DMA transaction (flags: 0x%x)\n", eflags);
1540 		tsec_init_locked(sc);
1541 	}
1542 
1543 	if (eflags & TSEC_IEVENT_BABT)
1544 		ifp->if_oerrors++;
1545 
1546 	if (eflags & TSEC_IEVENT_BABR)
1547 		ifp->if_ierrors++;
1548 }
1549 
1550 void
1551 tsec_error_intr(void *arg)
1552 {
1553 	struct tsec_softc *sc = arg;
1554 
1555 	TSEC_GLOBAL_LOCK(sc);
1556 	tsec_error_intr_locked(sc, -1);
1557 	TSEC_GLOBAL_UNLOCK(sc);
1558 }
1559 
1560 int
1561 tsec_miibus_readreg(device_t dev, int phy, int reg)
1562 {
1563 	struct tsec_softc *sc;
1564 	uint32_t timeout;
1565 
1566 	sc = device_get_softc(dev);
1567 
1568 	TSEC_WRITE(sc->phy_sc, TSEC_REG_MIIMADD, (phy << 8) | reg);
1569 	TSEC_WRITE(sc->phy_sc, TSEC_REG_MIIMCOM, 0);
1570 	TSEC_WRITE(sc->phy_sc, TSEC_REG_MIIMCOM, TSEC_MIIMCOM_READCYCLE);
1571 
1572 	timeout = TSEC_READ_RETRY;
1573 	while (--timeout && TSEC_READ(sc->phy_sc, TSEC_REG_MIIMIND) &
1574 	    (TSEC_MIIMIND_NOTVALID | TSEC_MIIMIND_BUSY))
1575 		DELAY(TSEC_READ_DELAY);
1576 
1577 	if (timeout == 0)
1578 		device_printf(dev, "Timeout while reading from PHY!\n");
1579 
1580 	return (TSEC_READ(sc->phy_sc, TSEC_REG_MIIMSTAT));
1581 }
1582 
1583 int
1584 tsec_miibus_writereg(device_t dev, int phy, int reg, int value)
1585 {
1586 	struct tsec_softc *sc;
1587 	uint32_t timeout;
1588 
1589 	sc = device_get_softc(dev);
1590 
1591 	TSEC_WRITE(sc->phy_sc, TSEC_REG_MIIMADD, (phy << 8) | reg);
1592 	TSEC_WRITE(sc->phy_sc, TSEC_REG_MIIMCON, value);
1593 
1594 	timeout = TSEC_READ_RETRY;
1595 	while (--timeout && (TSEC_READ(sc->phy_sc, TSEC_REG_MIIMIND) &
1596 	    TSEC_MIIMIND_BUSY))
1597 		DELAY(TSEC_READ_DELAY);
1598 
1599 	if (timeout == 0)
1600 		device_printf(dev, "Timeout while writing to PHY!\n");
1601 
1602 	return (0);
1603 }
1604 
1605 void
1606 tsec_miibus_statchg(device_t dev)
1607 {
1608 	struct tsec_softc *sc;
1609 	struct mii_data *mii;
1610 	uint32_t ecntrl, id, tmp;
1611 	int link;
1612 
1613 	sc = device_get_softc(dev);
1614 	mii = sc->tsec_mii;
1615 	link = ((mii->mii_media_status & IFM_ACTIVE) ? 1 : 0);
1616 
1617 	tmp = TSEC_READ(sc, TSEC_REG_MACCFG2) & ~TSEC_MACCFG2_IF;
1618 
1619 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
1620 		tmp |= TSEC_MACCFG2_FULLDUPLEX;
1621 	else
1622 		tmp &= ~TSEC_MACCFG2_FULLDUPLEX;
1623 
1624 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1625 	case IFM_1000_T:
1626 	case IFM_1000_SX:
1627 		tmp |= TSEC_MACCFG2_GMII;
1628 		sc->tsec_link = link;
1629 		break;
1630 	case IFM_100_TX:
1631 	case IFM_10_T:
1632 		tmp |= TSEC_MACCFG2_MII;
1633 		sc->tsec_link = link;
1634 		break;
1635 	case IFM_NONE:
1636 		if (link)
1637 			device_printf(dev, "No speed selected but link "
1638 			    "active!\n");
1639 		sc->tsec_link = 0;
1640 		return;
1641 	default:
1642 		sc->tsec_link = 0;
1643 		device_printf(dev, "Unknown speed (%d), link %s!\n",
1644 		    IFM_SUBTYPE(mii->mii_media_active),
1645 		        ((link) ? "up" : "down"));
1646 		return;
1647 	}
1648 	TSEC_WRITE(sc, TSEC_REG_MACCFG2, tmp);
1649 
1650 	/* XXX kludge - use circumstantial evidence for reduced mode. */
1651 	id = TSEC_READ(sc, TSEC_REG_ID2);
1652 	if (id & 0xffff) {
1653 		ecntrl = TSEC_READ(sc, TSEC_REG_ECNTRL) & ~TSEC_ECNTRL_R100M;
1654 		ecntrl |= (tmp & TSEC_MACCFG2_MII) ? TSEC_ECNTRL_R100M : 0;
1655 		TSEC_WRITE(sc, TSEC_REG_ECNTRL, ecntrl);
1656 	}
1657 }
1658 
1659 static void
1660 tsec_add_sysctls(struct tsec_softc *sc)
1661 {
1662 	struct sysctl_ctx_list *ctx;
1663 	struct sysctl_oid_list *children;
1664 	struct sysctl_oid *tree;
1665 
1666 	ctx = device_get_sysctl_ctx(sc->dev);
1667 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1668 	tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal",
1669 	    CTLFLAG_RD, 0, "TSEC Interrupts coalescing");
1670 	children = SYSCTL_CHILDREN(tree);
1671 
1672 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time",
1673 	    CTLTYPE_UINT | CTLFLAG_RW, sc, TSEC_IC_RX, tsec_sysctl_ic_time,
1674 	    "I", "IC RX time threshold (0-65535)");
1675 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_count",
1676 	    CTLTYPE_UINT | CTLFLAG_RW, sc, TSEC_IC_RX, tsec_sysctl_ic_count,
1677 	    "I", "IC RX frame count threshold (0-255)");
1678 
1679 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time",
1680 	    CTLTYPE_UINT | CTLFLAG_RW, sc, TSEC_IC_TX, tsec_sysctl_ic_time,
1681 	    "I", "IC TX time threshold (0-65535)");
1682 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_count",
1683 	    CTLTYPE_UINT | CTLFLAG_RW, sc, TSEC_IC_TX, tsec_sysctl_ic_count,
1684 	    "I", "IC TX frame count threshold (0-255)");
1685 }
1686 
1687 /*
1688  * With Interrupt Coalescing (IC) active, a transmit/receive frame
1689  * interrupt is raised either upon:
1690  *
1691  * - threshold-defined period of time elapsed, or
1692  * - threshold-defined number of frames is received/transmitted,
1693  *   whichever occurs first.
1694  *
1695  * The following sysctls regulate IC behaviour (for TX/RX separately):
1696  *
1697  * dev.tsec.<unit>.int_coal.rx_time
1698  * dev.tsec.<unit>.int_coal.rx_count
1699  * dev.tsec.<unit>.int_coal.tx_time
1700  * dev.tsec.<unit>.int_coal.tx_count
1701  *
1702  * Values:
1703  *
1704  * - 0 for either time or count disables IC on the given TX/RX path
1705  *
1706  * - count: 1-255 (expresses frame count number; note that value of 1 is
1707  *   effectively IC off)
1708  *
1709  * - time: 1-65535 (value corresponds to a real time period and is
1710  *   expressed in units equivalent to 64 TSEC interface clocks, i.e. one timer
1711  *   threshold unit is 26.5 us, 2.56 us, or 512 ns, corresponding to 10 Mbps,
1712  *   100 Mbps, or 1Gbps, respectively. For detailed discussion consult the
1713  *   TSEC reference manual.
1714  */
1715 static int
1716 tsec_sysctl_ic_time(SYSCTL_HANDLER_ARGS)
1717 {
1718 	int error;
1719 	uint32_t time;
1720 	struct tsec_softc *sc = (struct tsec_softc *)arg1;
1721 
1722 	time = (arg2 == TSEC_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time;
1723 
1724 	error = sysctl_handle_int(oidp, &time, 0, req);
1725 	if (error != 0)
1726 		return (error);
1727 
1728 	if (time > 65535)
1729 		return (EINVAL);
1730 
1731 	TSEC_IC_LOCK(sc);
1732 	if (arg2 == TSEC_IC_RX) {
1733 		sc->rx_ic_time = time;
1734 		tsec_set_rxic(sc);
1735 	} else {
1736 		sc->tx_ic_time = time;
1737 		tsec_set_txic(sc);
1738 	}
1739 	TSEC_IC_UNLOCK(sc);
1740 
1741 	return (0);
1742 }
1743 
1744 static int
1745 tsec_sysctl_ic_count(SYSCTL_HANDLER_ARGS)
1746 {
1747 	int error;
1748 	uint32_t count;
1749 	struct tsec_softc *sc = (struct tsec_softc *)arg1;
1750 
1751 	count = (arg2 == TSEC_IC_RX) ? sc->rx_ic_count : sc->tx_ic_count;
1752 
1753 	error = sysctl_handle_int(oidp, &count, 0, req);
1754 	if (error != 0)
1755 		return (error);
1756 
1757 	if (count > 255)
1758 		return (EINVAL);
1759 
1760 	TSEC_IC_LOCK(sc);
1761 	if (arg2 == TSEC_IC_RX) {
1762 		sc->rx_ic_count = count;
1763 		tsec_set_rxic(sc);
1764 	} else {
1765 		sc->tx_ic_count = count;
1766 		tsec_set_txic(sc);
1767 	}
1768 	TSEC_IC_UNLOCK(sc);
1769 
1770 	return (0);
1771 }
1772 
1773 static void
1774 tsec_set_rxic(struct tsec_softc *sc)
1775 {
1776 	uint32_t rxic_val;
1777 
1778 	if (sc->rx_ic_count == 0 || sc->rx_ic_time == 0)
1779 		/* Disable RX IC */
1780 		rxic_val = 0;
1781 	else {
1782 		rxic_val = 0x80000000;
1783 		rxic_val |= (sc->rx_ic_count << 21);
1784 		rxic_val |= sc->rx_ic_time;
1785 	}
1786 
1787 	TSEC_WRITE(sc, TSEC_REG_RXIC, rxic_val);
1788 }
1789 
1790 static void
1791 tsec_set_txic(struct tsec_softc *sc)
1792 {
1793 	uint32_t txic_val;
1794 
1795 	if (sc->tx_ic_count == 0 || sc->tx_ic_time == 0)
1796 		/* Disable TX IC */
1797 		txic_val = 0;
1798 	else {
1799 		txic_val = 0x80000000;
1800 		txic_val |= (sc->tx_ic_count << 21);
1801 		txic_val |= sc->tx_ic_time;
1802 	}
1803 
1804 	TSEC_WRITE(sc, TSEC_REG_TXIC, txic_val);
1805 }
1806 
1807 static void
1808 tsec_offload_setup(struct tsec_softc *sc)
1809 {
1810 	struct ifnet *ifp = sc->tsec_ifp;
1811 	uint32_t reg;
1812 
1813 	TSEC_GLOBAL_LOCK_ASSERT(sc);
1814 
1815 	reg = TSEC_READ(sc, TSEC_REG_TCTRL);
1816 	reg |= TSEC_TCTRL_IPCSEN | TSEC_TCTRL_TUCSEN;
1817 
1818 	if (ifp->if_capenable & IFCAP_TXCSUM)
1819 		ifp->if_hwassist = TSEC_CHECKSUM_FEATURES;
1820 	else
1821 		ifp->if_hwassist = 0;
1822 
1823 	TSEC_WRITE(sc, TSEC_REG_TCTRL, reg);
1824 
1825 	reg = TSEC_READ(sc, TSEC_REG_RCTRL);
1826 	reg &= ~(TSEC_RCTRL_IPCSEN | TSEC_RCTRL_TUCSEN | TSEC_RCTRL_PRSDEP);
1827 	reg |= TSEC_RCTRL_PRSDEP_PARSE_L2 | TSEC_RCTRL_VLEX;
1828 
1829 	if (ifp->if_capenable & IFCAP_RXCSUM)
1830 		reg |= TSEC_RCTRL_IPCSEN | TSEC_RCTRL_TUCSEN |
1831 		    TSEC_RCTRL_PRSDEP_PARSE_L234;
1832 
1833 	TSEC_WRITE(sc, TSEC_REG_RCTRL, reg);
1834 }
1835 
1836 
1837 static void
1838 tsec_offload_process_frame(struct tsec_softc *sc, struct mbuf *m)
1839 {
1840 	struct tsec_rx_fcb rx_fcb;
1841 	int csum_flags = 0;
1842 	int protocol, flags;
1843 
1844 	TSEC_RECEIVE_LOCK_ASSERT(sc);
1845 
1846 	m_copydata(m, 0, sizeof(struct tsec_rx_fcb), (caddr_t)(&rx_fcb));
1847 	flags = rx_fcb.flags;
1848 	protocol = rx_fcb.protocol;
1849 
1850 	if (TSEC_RX_FCB_IP_CSUM_CHECKED(flags)) {
1851 		csum_flags |= CSUM_IP_CHECKED;
1852 
1853 		if ((flags & TSEC_RX_FCB_IP_CSUM_ERROR) == 0)
1854 			csum_flags |= CSUM_IP_VALID;
1855 	}
1856 
1857 	if ((protocol == IPPROTO_TCP || protocol == IPPROTO_UDP) &&
1858 	    TSEC_RX_FCB_TCP_UDP_CSUM_CHECKED(flags) &&
1859 	    (flags & TSEC_RX_FCB_TCP_UDP_CSUM_ERROR) == 0) {
1860 
1861 		csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1862 		m->m_pkthdr.csum_data = 0xFFFF;
1863 	}
1864 
1865 	m->m_pkthdr.csum_flags = csum_flags;
1866 
1867 	if (flags & TSEC_RX_FCB_VLAN) {
1868 		m->m_pkthdr.ether_vtag = rx_fcb.vlan;
1869 		m->m_flags |= M_VLANTAG;
1870 	}
1871 
1872 	m_adj(m, sizeof(struct tsec_rx_fcb));
1873 }
1874 
1875 static void
1876 tsec_setup_multicast(struct tsec_softc *sc)
1877 {
1878 	uint32_t hashtable[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
1879 	struct ifnet *ifp = sc->tsec_ifp;
1880 	struct ifmultiaddr *ifma;
1881 	uint32_t h;
1882 	int i;
1883 
1884 	TSEC_GLOBAL_LOCK_ASSERT(sc);
1885 
1886 	if (ifp->if_flags & IFF_ALLMULTI) {
1887 		for (i = 0; i < 8; i++)
1888 			TSEC_WRITE(sc, TSEC_REG_GADDR(i), 0xFFFFFFFF);
1889 
1890 		return;
1891 	}
1892 
1893 	if_maddr_rlock(ifp);
1894 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1895 
1896 		if (ifma->ifma_addr->sa_family != AF_LINK)
1897 			continue;
1898 
1899 		h = (ether_crc32_be(LLADDR((struct sockaddr_dl *)
1900 		    ifma->ifma_addr), ETHER_ADDR_LEN) >> 24) & 0xFF;
1901 
1902 		hashtable[(h >> 5)] |= 1 << (0x1F - (h & 0x1F));
1903 	}
1904 	if_maddr_runlock(ifp);
1905 
1906 	for (i = 0; i < 8; i++)
1907 		TSEC_WRITE(sc, TSEC_REG_GADDR(i), hashtable[i]);
1908 }
1909 
1910 static int
1911 tsec_set_mtu(struct tsec_softc *sc, unsigned int mtu)
1912 {
1913 
1914 	mtu += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN;
1915 
1916 	TSEC_GLOBAL_LOCK_ASSERT(sc);
1917 
1918 	if (mtu >= TSEC_MIN_FRAME_SIZE && mtu <= TSEC_MAX_FRAME_SIZE) {
1919 		TSEC_WRITE(sc, TSEC_REG_MAXFRM, mtu);
1920 		return (mtu);
1921 	}
1922 
1923 	return (0);
1924 }
1925