xref: /freebsd/sys/dev/usb/net/if_axe.c (revision 675be9115aae86ad6b3d877155d4fd7822892105)
1 /*-
2  * Copyright (c) 1997, 1998, 1999, 2000-2003
3  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by Bill Paul.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 /*
37  * ASIX Electronics AX88172/AX88178/AX88778 USB 2.0 ethernet driver.
38  * Used in the LinkSys USB200M and various other adapters.
39  *
40  * Manuals available from:
41  * http://www.asix.com.tw/datasheet/mac/Ax88172.PDF
42  * Note: you need the manual for the AX88170 chip (USB 1.x ethernet
43  * controller) to find the definitions for the RX control register.
44  * http://www.asix.com.tw/datasheet/mac/Ax88170.PDF
45  *
46  * Written by Bill Paul <wpaul@windriver.com>
47  * Senior Engineer
48  * Wind River Systems
49  */
50 
51 /*
52  * The AX88172 provides USB ethernet supports at 10 and 100Mbps.
53  * It uses an external PHY (reference designs use a RealTek chip),
54  * and has a 64-bit multicast hash filter. There is some information
55  * missing from the manual which one needs to know in order to make
56  * the chip function:
57  *
58  * - You must set bit 7 in the RX control register, otherwise the
59  *   chip won't receive any packets.
60  * - You must initialize all 3 IPG registers, or you won't be able
61  *   to send any packets.
62  *
63  * Note that this device appears to only support loading the station
64  * address via autload from the EEPROM (i.e. there's no way to manaully
65  * set it).
66  *
67  * (Adam Weinberger wanted me to name this driver if_gir.c.)
68  */
69 
70 /*
71  * Ax88178 and Ax88772 support backported from the OpenBSD driver.
72  * 2007/02/12, J.R. Oldroyd, fbsd@opal.com
73  *
74  * Manual here:
75  * http://www.asix.com.tw/FrootAttach/datasheet/AX88178_datasheet_Rev10.pdf
76  * http://www.asix.com.tw/FrootAttach/datasheet/AX88772_datasheet_Rev10.pdf
77  */
78 
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/bus.h>
82 #include <sys/condvar.h>
83 #include <sys/endian.h>
84 #include <sys/kernel.h>
85 #include <sys/lock.h>
86 #include <sys/malloc.h>
87 #include <sys/mbuf.h>
88 #include <sys/module.h>
89 #include <sys/mutex.h>
90 #include <sys/socket.h>
91 #include <sys/sockio.h>
92 #include <sys/sysctl.h>
93 #include <sys/sx.h>
94 
95 #include <net/if.h>
96 #include <net/ethernet.h>
97 #include <net/if_types.h>
98 #include <net/if_media.h>
99 #include <net/if_vlan_var.h>
100 
101 #include <dev/mii/mii.h>
102 #include <dev/mii/miivar.h>
103 
104 #include <dev/usb/usb.h>
105 #include <dev/usb/usbdi.h>
106 #include <dev/usb/usbdi_util.h>
107 #include "usbdevs.h"
108 
109 #define	USB_DEBUG_VAR axe_debug
110 #include <dev/usb/usb_debug.h>
111 #include <dev/usb/usb_process.h>
112 
113 #include <dev/usb/net/usb_ethernet.h>
114 #include <dev/usb/net/if_axereg.h>
115 
116 /*
117  * AXE_178_MAX_FRAME_BURST
118  * max frame burst size for Ax88178 and Ax88772
119  *	0	2048 bytes
120  *	1	4096 bytes
121  *	2	8192 bytes
122  *	3	16384 bytes
123  * use the largest your system can handle without USB stalling.
124  *
125  * NB: 88772 parts appear to generate lots of input errors with
126  * a 2K rx buffer and 8K is only slightly faster than 4K on an
127  * EHCI port on a T42 so change at your own risk.
128  */
129 #define AXE_178_MAX_FRAME_BURST	1
130 
131 #define	AXE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
132 
133 #ifdef USB_DEBUG
134 static int axe_debug = 0;
135 
136 static SYSCTL_NODE(_hw_usb, OID_AUTO, axe, CTLFLAG_RW, 0, "USB axe");
137 SYSCTL_INT(_hw_usb_axe, OID_AUTO, debug, CTLFLAG_RW, &axe_debug, 0,
138     "Debug level");
139 #endif
140 
141 /*
142  * Various supported device vendors/products.
143  */
144 static const STRUCT_USB_HOST_ID axe_devs[] = {
145 #define	AXE_DEV(v,p,i) { USB_VPI(USB_VENDOR_##v, USB_PRODUCT_##v##_##p, i) }
146 	AXE_DEV(ABOCOM, UF200, 0),
147 	AXE_DEV(ACERCM, EP1427X2, 0),
148 	AXE_DEV(APPLE, ETHERNET, AXE_FLAG_772),
149 	AXE_DEV(ASIX, AX88172, 0),
150 	AXE_DEV(ASIX, AX88178, AXE_FLAG_178),
151 	AXE_DEV(ASIX, AX88772, AXE_FLAG_772),
152 	AXE_DEV(ASIX, AX88772A, AXE_FLAG_772A),
153 	AXE_DEV(ASIX, AX88772B, AXE_FLAG_772B),
154 	AXE_DEV(ATEN, UC210T, 0),
155 	AXE_DEV(BELKIN, F5D5055, AXE_FLAG_178),
156 	AXE_DEV(BILLIONTON, USB2AR, 0),
157 	AXE_DEV(CISCOLINKSYS, USB200MV2, AXE_FLAG_772A),
158 	AXE_DEV(COREGA, FETHER_USB2_TX, 0),
159 	AXE_DEV(DLINK, DUBE100, 0),
160 	AXE_DEV(DLINK, DUBE100B1, AXE_FLAG_772),
161 	AXE_DEV(GOODWAY, GWUSB2E, 0),
162 	AXE_DEV(IODATA, ETGUS2, AXE_FLAG_178),
163 	AXE_DEV(JVC, MP_PRX1, 0),
164 	AXE_DEV(LINKSYS2, USB200M, 0),
165 	AXE_DEV(LINKSYS4, USB1000, AXE_FLAG_178),
166 	AXE_DEV(LOGITEC, LAN_GTJU2A, AXE_FLAG_178),
167 	AXE_DEV(MELCO, LUAU2KTX, 0),
168 	AXE_DEV(MELCO, LUA3U2AGT, AXE_FLAG_178),
169 	AXE_DEV(NETGEAR, FA120, 0),
170 	AXE_DEV(OQO, ETHER01PLUS, AXE_FLAG_772),
171 	AXE_DEV(PLANEX3, GU1000T, AXE_FLAG_178),
172 	AXE_DEV(SITECOM, LN029, 0),
173 	AXE_DEV(SITECOMEU, LN028, AXE_FLAG_178),
174 	AXE_DEV(SYSTEMTALKS, SGCX2UL, 0),
175 #undef AXE_DEV
176 };
177 
178 static device_probe_t axe_probe;
179 static device_attach_t axe_attach;
180 static device_detach_t axe_detach;
181 
182 static usb_callback_t axe_bulk_read_callback;
183 static usb_callback_t axe_bulk_write_callback;
184 
185 static miibus_readreg_t axe_miibus_readreg;
186 static miibus_writereg_t axe_miibus_writereg;
187 static miibus_statchg_t axe_miibus_statchg;
188 
189 static uether_fn_t axe_attach_post;
190 static uether_fn_t axe_init;
191 static uether_fn_t axe_stop;
192 static uether_fn_t axe_start;
193 static uether_fn_t axe_tick;
194 static uether_fn_t axe_setmulti;
195 static uether_fn_t axe_setpromisc;
196 
197 static int	axe_attach_post_sub(struct usb_ether *);
198 static int	axe_ifmedia_upd(struct ifnet *);
199 static void	axe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
200 static int	axe_cmd(struct axe_softc *, int, int, int, void *);
201 static void	axe_ax88178_init(struct axe_softc *);
202 static void	axe_ax88772_init(struct axe_softc *);
203 static void	axe_ax88772_phywake(struct axe_softc *);
204 static void	axe_ax88772a_init(struct axe_softc *);
205 static void	axe_ax88772b_init(struct axe_softc *);
206 static int	axe_get_phyno(struct axe_softc *, int);
207 static int	axe_ioctl(struct ifnet *, u_long, caddr_t);
208 static int	axe_rx_frame(struct usb_ether *, struct usb_page_cache *, int);
209 static int	axe_rxeof(struct usb_ether *, struct usb_page_cache *,
210 		    unsigned int offset, unsigned int, struct axe_csum_hdr *);
211 static void	axe_csum_cfg(struct usb_ether *);
212 
213 static const struct usb_config axe_config[AXE_N_TRANSFER] = {
214 
215 	[AXE_BULK_DT_WR] = {
216 		.type = UE_BULK,
217 		.endpoint = UE_ADDR_ANY,
218 		.direction = UE_DIR_OUT,
219 		.frames = 16,
220 		.bufsize = 16 * MCLBYTES,
221 		.flags = {.pipe_bof = 1,.force_short_xfer = 1,},
222 		.callback = axe_bulk_write_callback,
223 		.timeout = 10000,	/* 10 seconds */
224 	},
225 
226 	[AXE_BULK_DT_RD] = {
227 		.type = UE_BULK,
228 		.endpoint = UE_ADDR_ANY,
229 		.direction = UE_DIR_IN,
230 		.bufsize = 16384,	/* bytes */
231 		.flags = {.pipe_bof = 1,.short_xfer_ok = 1,},
232 		.callback = axe_bulk_read_callback,
233 		.timeout = 0,	/* no timeout */
234 	},
235 };
236 
237 static const struct ax88772b_mfb ax88772b_mfb_table[] = {
238 	{ 0x8000, 0x8001, 2048 },
239 	{ 0x8100, 0x8147, 4096},
240 	{ 0x8200, 0x81EB, 6144},
241 	{ 0x8300, 0x83D7, 8192},
242 	{ 0x8400, 0x851E, 16384},
243 	{ 0x8500, 0x8666, 20480},
244 	{ 0x8600, 0x87AE, 24576},
245 	{ 0x8700, 0x8A3D, 32768}
246 };
247 
248 static device_method_t axe_methods[] = {
249 	/* Device interface */
250 	DEVMETHOD(device_probe, axe_probe),
251 	DEVMETHOD(device_attach, axe_attach),
252 	DEVMETHOD(device_detach, axe_detach),
253 
254 	/* MII interface */
255 	DEVMETHOD(miibus_readreg, axe_miibus_readreg),
256 	DEVMETHOD(miibus_writereg, axe_miibus_writereg),
257 	DEVMETHOD(miibus_statchg, axe_miibus_statchg),
258 
259 	DEVMETHOD_END
260 };
261 
262 static driver_t axe_driver = {
263 	.name = "axe",
264 	.methods = axe_methods,
265 	.size = sizeof(struct axe_softc),
266 };
267 
268 static devclass_t axe_devclass;
269 
270 DRIVER_MODULE(axe, uhub, axe_driver, axe_devclass, NULL, 0);
271 DRIVER_MODULE(miibus, axe, miibus_driver, miibus_devclass, 0, 0);
272 MODULE_DEPEND(axe, uether, 1, 1, 1);
273 MODULE_DEPEND(axe, usb, 1, 1, 1);
274 MODULE_DEPEND(axe, ether, 1, 1, 1);
275 MODULE_DEPEND(axe, miibus, 1, 1, 1);
276 MODULE_VERSION(axe, 1);
277 
278 static const struct usb_ether_methods axe_ue_methods = {
279 	.ue_attach_post = axe_attach_post,
280 	.ue_attach_post_sub = axe_attach_post_sub,
281 	.ue_start = axe_start,
282 	.ue_init = axe_init,
283 	.ue_stop = axe_stop,
284 	.ue_tick = axe_tick,
285 	.ue_setmulti = axe_setmulti,
286 	.ue_setpromisc = axe_setpromisc,
287 	.ue_mii_upd = axe_ifmedia_upd,
288 	.ue_mii_sts = axe_ifmedia_sts,
289 };
290 
291 static int
292 axe_cmd(struct axe_softc *sc, int cmd, int index, int val, void *buf)
293 {
294 	struct usb_device_request req;
295 	usb_error_t err;
296 
297 	AXE_LOCK_ASSERT(sc, MA_OWNED);
298 
299 	req.bmRequestType = (AXE_CMD_IS_WRITE(cmd) ?
300 	    UT_WRITE_VENDOR_DEVICE :
301 	    UT_READ_VENDOR_DEVICE);
302 	req.bRequest = AXE_CMD_CMD(cmd);
303 	USETW(req.wValue, val);
304 	USETW(req.wIndex, index);
305 	USETW(req.wLength, AXE_CMD_LEN(cmd));
306 
307 	err = uether_do_request(&sc->sc_ue, &req, buf, 1000);
308 
309 	return (err);
310 }
311 
312 static int
313 axe_miibus_readreg(device_t dev, int phy, int reg)
314 {
315 	struct axe_softc *sc = device_get_softc(dev);
316 	uint16_t val;
317 	int locked;
318 
319 	locked = mtx_owned(&sc->sc_mtx);
320 	if (!locked)
321 		AXE_LOCK(sc);
322 
323 	axe_cmd(sc, AXE_CMD_MII_OPMODE_SW, 0, 0, NULL);
324 	axe_cmd(sc, AXE_CMD_MII_READ_REG, reg, phy, &val);
325 	axe_cmd(sc, AXE_CMD_MII_OPMODE_HW, 0, 0, NULL);
326 
327 	val = le16toh(val);
328 	if (AXE_IS_772(sc) && reg == MII_BMSR) {
329 		/*
330 		 * BMSR of AX88772 indicates that it supports extended
331 		 * capability but the extended status register is
332 		 * revered for embedded ethernet PHY. So clear the
333 		 * extended capability bit of BMSR.
334 		 */
335 		val &= ~BMSR_EXTCAP;
336 	}
337 
338 	if (!locked)
339 		AXE_UNLOCK(sc);
340 	return (val);
341 }
342 
343 static int
344 axe_miibus_writereg(device_t dev, int phy, int reg, int val)
345 {
346 	struct axe_softc *sc = device_get_softc(dev);
347 	int locked;
348 
349 	val = htole32(val);
350 	locked = mtx_owned(&sc->sc_mtx);
351 	if (!locked)
352 		AXE_LOCK(sc);
353 
354 	axe_cmd(sc, AXE_CMD_MII_OPMODE_SW, 0, 0, NULL);
355 	axe_cmd(sc, AXE_CMD_MII_WRITE_REG, reg, phy, &val);
356 	axe_cmd(sc, AXE_CMD_MII_OPMODE_HW, 0, 0, NULL);
357 
358 	if (!locked)
359 		AXE_UNLOCK(sc);
360 	return (0);
361 }
362 
363 static void
364 axe_miibus_statchg(device_t dev)
365 {
366 	struct axe_softc *sc = device_get_softc(dev);
367 	struct mii_data *mii = GET_MII(sc);
368 	struct ifnet *ifp;
369 	uint16_t val;
370 	int err, locked;
371 
372 	locked = mtx_owned(&sc->sc_mtx);
373 	if (!locked)
374 		AXE_LOCK(sc);
375 
376 	ifp = uether_getifp(&sc->sc_ue);
377 	if (mii == NULL || ifp == NULL ||
378 	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
379 		goto done;
380 
381 	sc->sc_flags &= ~AXE_FLAG_LINK;
382 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
383 	    (IFM_ACTIVE | IFM_AVALID)) {
384 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
385 		case IFM_10_T:
386 		case IFM_100_TX:
387 			sc->sc_flags |= AXE_FLAG_LINK;
388 			break;
389 		case IFM_1000_T:
390 			if ((sc->sc_flags & AXE_FLAG_178) == 0)
391 				break;
392 			sc->sc_flags |= AXE_FLAG_LINK;
393 			break;
394 		default:
395 			break;
396 		}
397 	}
398 
399 	/* Lost link, do nothing. */
400 	if ((sc->sc_flags & AXE_FLAG_LINK) == 0)
401 		goto done;
402 
403 	val = 0;
404 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
405 		val |= AXE_MEDIA_FULL_DUPLEX;
406 		if (AXE_IS_178_FAMILY(sc)) {
407 			if ((IFM_OPTIONS(mii->mii_media_active) &
408 			    IFM_ETH_TXPAUSE) != 0)
409 				val |= AXE_178_MEDIA_TXFLOW_CONTROL_EN;
410 			if ((IFM_OPTIONS(mii->mii_media_active) &
411 			    IFM_ETH_RXPAUSE) != 0)
412 				val |= AXE_178_MEDIA_RXFLOW_CONTROL_EN;
413 		}
414 	}
415 	if (AXE_IS_178_FAMILY(sc)) {
416 		val |= AXE_178_MEDIA_RX_EN | AXE_178_MEDIA_MAGIC;
417 		if ((sc->sc_flags & AXE_FLAG_178) != 0)
418 			val |= AXE_178_MEDIA_ENCK;
419 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
420 		case IFM_1000_T:
421 			val |= AXE_178_MEDIA_GMII | AXE_178_MEDIA_ENCK;
422 			break;
423 		case IFM_100_TX:
424 			val |= AXE_178_MEDIA_100TX;
425 			break;
426 		case IFM_10_T:
427 			/* doesn't need to be handled */
428 			break;
429 		}
430 	}
431 	err = axe_cmd(sc, AXE_CMD_WRITE_MEDIA, 0, val, NULL);
432 	if (err)
433 		device_printf(dev, "media change failed, error %d\n", err);
434 done:
435 	if (!locked)
436 		AXE_UNLOCK(sc);
437 }
438 
439 /*
440  * Set media options.
441  */
442 static int
443 axe_ifmedia_upd(struct ifnet *ifp)
444 {
445 	struct axe_softc *sc = ifp->if_softc;
446 	struct mii_data *mii = GET_MII(sc);
447 	struct mii_softc *miisc;
448 	int error;
449 
450 	AXE_LOCK_ASSERT(sc, MA_OWNED);
451 
452 	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
453 		PHY_RESET(miisc);
454 	error = mii_mediachg(mii);
455 	return (error);
456 }
457 
458 /*
459  * Report current media status.
460  */
461 static void
462 axe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
463 {
464 	struct axe_softc *sc = ifp->if_softc;
465 	struct mii_data *mii = GET_MII(sc);
466 
467 	AXE_LOCK(sc);
468 	mii_pollstat(mii);
469 	ifmr->ifm_active = mii->mii_media_active;
470 	ifmr->ifm_status = mii->mii_media_status;
471 	AXE_UNLOCK(sc);
472 }
473 
474 static void
475 axe_setmulti(struct usb_ether *ue)
476 {
477 	struct axe_softc *sc = uether_getsc(ue);
478 	struct ifnet *ifp = uether_getifp(ue);
479 	struct ifmultiaddr *ifma;
480 	uint32_t h = 0;
481 	uint16_t rxmode;
482 	uint8_t hashtbl[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
483 
484 	AXE_LOCK_ASSERT(sc, MA_OWNED);
485 
486 	axe_cmd(sc, AXE_CMD_RXCTL_READ, 0, 0, &rxmode);
487 	rxmode = le16toh(rxmode);
488 
489 	if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) {
490 		rxmode |= AXE_RXCMD_ALLMULTI;
491 		axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, rxmode, NULL);
492 		return;
493 	}
494 	rxmode &= ~AXE_RXCMD_ALLMULTI;
495 
496 	if_maddr_rlock(ifp);
497 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link)
498 	{
499 		if (ifma->ifma_addr->sa_family != AF_LINK)
500 			continue;
501 		h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
502 		    ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
503 		hashtbl[h / 8] |= 1 << (h % 8);
504 	}
505 	if_maddr_runlock(ifp);
506 
507 	axe_cmd(sc, AXE_CMD_WRITE_MCAST, 0, 0, (void *)&hashtbl);
508 	axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, rxmode, NULL);
509 }
510 
511 static int
512 axe_get_phyno(struct axe_softc *sc, int sel)
513 {
514 	int phyno;
515 
516 	switch (AXE_PHY_TYPE(sc->sc_phyaddrs[sel])) {
517 	case PHY_TYPE_100_HOME:
518 	case PHY_TYPE_GIG:
519 		phyno = AXE_PHY_NO(sc->sc_phyaddrs[sel]);
520 		break;
521 	case PHY_TYPE_SPECIAL:
522 		/* FALLTHROUGH */
523 	case PHY_TYPE_RSVD:
524 		/* FALLTHROUGH */
525 	case PHY_TYPE_NON_SUP:
526 		/* FALLTHROUGH */
527 	default:
528 		phyno = -1;
529 		break;
530 	}
531 
532 	return (phyno);
533 }
534 
535 #define	AXE_GPIO_WRITE(x, y)	do {				\
536 	axe_cmd(sc, AXE_CMD_WRITE_GPIO, 0, (x), NULL);		\
537 	uether_pause(ue, (y));					\
538 } while (0)
539 
540 static void
541 axe_ax88178_init(struct axe_softc *sc)
542 {
543 	struct usb_ether *ue;
544 	int gpio0, ledmode, phymode;
545 	uint16_t eeprom, val;
546 
547 	ue = &sc->sc_ue;
548 	axe_cmd(sc, AXE_CMD_SROM_WR_ENABLE, 0, 0, NULL);
549 	/* XXX magic */
550 	axe_cmd(sc, AXE_CMD_SROM_READ, 0, 0x0017, &eeprom);
551 	eeprom = le16toh(eeprom);
552 	axe_cmd(sc, AXE_CMD_SROM_WR_DISABLE, 0, 0, NULL);
553 
554 	/* if EEPROM is invalid we have to use to GPIO0 */
555 	if (eeprom == 0xffff) {
556 		phymode = AXE_PHY_MODE_MARVELL;
557 		gpio0 = 1;
558 		ledmode = 0;
559 	} else {
560 		phymode = eeprom & 0x7f;
561 		gpio0 = (eeprom & 0x80) ? 0 : 1;
562 		ledmode = eeprom >> 8;
563 	}
564 
565 	if (bootverbose)
566 		device_printf(sc->sc_ue.ue_dev,
567 		    "EEPROM data : 0x%04x, phymode : 0x%02x\n", eeprom,
568 		    phymode);
569 	/* Program GPIOs depending on PHY hardware. */
570 	switch (phymode) {
571 	case AXE_PHY_MODE_MARVELL:
572 		if (gpio0 == 1) {
573 			AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM | AXE_GPIO0_EN,
574 			    hz / 32);
575 			AXE_GPIO_WRITE(AXE_GPIO0_EN | AXE_GPIO2 | AXE_GPIO2_EN,
576 			    hz / 32);
577 			AXE_GPIO_WRITE(AXE_GPIO0_EN | AXE_GPIO2_EN, hz / 4);
578 			AXE_GPIO_WRITE(AXE_GPIO0_EN | AXE_GPIO2 | AXE_GPIO2_EN,
579 			    hz / 32);
580 		} else {
581 			AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM | AXE_GPIO1 |
582 			    AXE_GPIO1_EN, hz / 3);
583 			if (ledmode == 1) {
584 				AXE_GPIO_WRITE(AXE_GPIO1_EN, hz / 3);
585 				AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN,
586 				    hz / 3);
587 			} else {
588 				AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN |
589 				    AXE_GPIO2 | AXE_GPIO2_EN, hz / 32);
590 				AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN |
591 				    AXE_GPIO2_EN, hz / 4);
592 				AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN |
593 				    AXE_GPIO2 | AXE_GPIO2_EN, hz / 32);
594 			}
595 		}
596 		break;
597 	case AXE_PHY_MODE_CICADA:
598 	case AXE_PHY_MODE_CICADA_V2:
599 	case AXE_PHY_MODE_CICADA_V2_ASIX:
600 		if (gpio0 == 1)
601 			AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM | AXE_GPIO0 |
602 			    AXE_GPIO0_EN, hz / 32);
603 		else
604 			AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM | AXE_GPIO1 |
605 			    AXE_GPIO1_EN, hz / 32);
606 		break;
607 	case AXE_PHY_MODE_AGERE:
608 		AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM | AXE_GPIO1 |
609 		    AXE_GPIO1_EN, hz / 32);
610 		AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN | AXE_GPIO2 |
611 		    AXE_GPIO2_EN, hz / 32);
612 		AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN | AXE_GPIO2_EN, hz / 4);
613 		AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN | AXE_GPIO2 |
614 		    AXE_GPIO2_EN, hz / 32);
615 		break;
616 	case AXE_PHY_MODE_REALTEK_8211CL:
617 	case AXE_PHY_MODE_REALTEK_8211BN:
618 	case AXE_PHY_MODE_REALTEK_8251CL:
619 		val = gpio0 == 1 ? AXE_GPIO0 | AXE_GPIO0_EN :
620 		    AXE_GPIO1 | AXE_GPIO1_EN;
621 		AXE_GPIO_WRITE(val, hz / 32);
622 		AXE_GPIO_WRITE(val | AXE_GPIO2 | AXE_GPIO2_EN, hz / 32);
623 		AXE_GPIO_WRITE(val | AXE_GPIO2_EN, hz / 4);
624 		AXE_GPIO_WRITE(val | AXE_GPIO2 | AXE_GPIO2_EN, hz / 32);
625 		if (phymode == AXE_PHY_MODE_REALTEK_8211CL) {
626 			axe_miibus_writereg(ue->ue_dev, sc->sc_phyno,
627 			    0x1F, 0x0005);
628 			axe_miibus_writereg(ue->ue_dev, sc->sc_phyno,
629 			    0x0C, 0x0000);
630 			val = axe_miibus_readreg(ue->ue_dev, sc->sc_phyno,
631 			    0x0001);
632 			axe_miibus_writereg(ue->ue_dev, sc->sc_phyno,
633 			    0x01, val | 0x0080);
634 			axe_miibus_writereg(ue->ue_dev, sc->sc_phyno,
635 			    0x1F, 0x0000);
636 		}
637 		break;
638 	default:
639 		/* Unknown PHY model or no need to program GPIOs. */
640 		break;
641 	}
642 
643 	/* soft reset */
644 	axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_CLEAR, NULL);
645 	uether_pause(ue, hz / 4);
646 
647 	axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0,
648 	    AXE_SW_RESET_PRL | AXE_178_RESET_MAGIC, NULL);
649 	uether_pause(ue, hz / 4);
650 	/* Enable MII/GMII/RGMII interface to work with external PHY. */
651 	axe_cmd(sc, AXE_CMD_SW_PHY_SELECT, 0, 0, NULL);
652 	uether_pause(ue, hz / 4);
653 
654 	axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, 0, NULL);
655 }
656 
657 static void
658 axe_ax88772_init(struct axe_softc *sc)
659 {
660 	axe_cmd(sc, AXE_CMD_WRITE_GPIO, 0, 0x00b0, NULL);
661 	uether_pause(&sc->sc_ue, hz / 16);
662 
663 	if (sc->sc_phyno == AXE_772_PHY_NO_EPHY) {
664 		/* ask for the embedded PHY */
665 		axe_cmd(sc, AXE_CMD_SW_PHY_SELECT, 0, 0x01, NULL);
666 		uether_pause(&sc->sc_ue, hz / 64);
667 
668 		/* power down and reset state, pin reset state */
669 		axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0,
670 		    AXE_SW_RESET_CLEAR, NULL);
671 		uether_pause(&sc->sc_ue, hz / 16);
672 
673 		/* power down/reset state, pin operating state */
674 		axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0,
675 		    AXE_SW_RESET_IPPD | AXE_SW_RESET_PRL, NULL);
676 		uether_pause(&sc->sc_ue, hz / 4);
677 
678 		/* power up, reset */
679 		axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_PRL, NULL);
680 
681 		/* power up, operating */
682 		axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0,
683 		    AXE_SW_RESET_IPRL | AXE_SW_RESET_PRL, NULL);
684 	} else {
685 		/* ask for external PHY */
686 		axe_cmd(sc, AXE_CMD_SW_PHY_SELECT, 0, 0x00, NULL);
687 		uether_pause(&sc->sc_ue, hz / 64);
688 
689 		/* power down internal PHY */
690 		axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0,
691 		    AXE_SW_RESET_IPPD | AXE_SW_RESET_PRL, NULL);
692 	}
693 
694 	uether_pause(&sc->sc_ue, hz / 4);
695 	axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, 0, NULL);
696 }
697 
698 static void
699 axe_ax88772_phywake(struct axe_softc *sc)
700 {
701 	struct usb_ether *ue;
702 
703 	ue = &sc->sc_ue;
704 	if (sc->sc_phyno == AXE_772_PHY_NO_EPHY) {
705 		/* Manually select internal(embedded) PHY - MAC mode. */
706 		axe_cmd(sc, AXE_CMD_SW_PHY_SELECT, 0, AXE_SW_PHY_SELECT_SS_ENB |
707 		    AXE_SW_PHY_SELECT_EMBEDDED | AXE_SW_PHY_SELECT_SS_MII,
708 		    NULL);
709 		uether_pause(&sc->sc_ue, hz / 32);
710 	} else {
711 		/*
712 		 * Manually select external PHY - MAC mode.
713 		 * Reverse MII/RMII is for AX88772A PHY mode.
714 		 */
715 		axe_cmd(sc, AXE_CMD_SW_PHY_SELECT, 0, AXE_SW_PHY_SELECT_SS_ENB |
716 		    AXE_SW_PHY_SELECT_EXT | AXE_SW_PHY_SELECT_SS_MII, NULL);
717 		uether_pause(&sc->sc_ue, hz / 32);
718 	}
719 	/* Take PHY out of power down. */
720 	axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_IPPD |
721 	    AXE_SW_RESET_IPRL, NULL);
722 	uether_pause(&sc->sc_ue, hz / 4);
723 	axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_IPRL, NULL);
724 	uether_pause(&sc->sc_ue, hz);
725 	axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_CLEAR, NULL);
726 	uether_pause(&sc->sc_ue, hz / 32);
727 	axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_IPRL, NULL);
728 	uether_pause(&sc->sc_ue, hz / 32);
729 }
730 
731 static void
732 axe_ax88772a_init(struct axe_softc *sc)
733 {
734 	struct usb_ether *ue;
735 
736 	ue = &sc->sc_ue;
737 	/* Reload EEPROM. */
738 	AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM, hz / 32);
739 	axe_ax88772_phywake(sc);
740 	/* Stop MAC. */
741 	axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, 0, NULL);
742 }
743 
744 static void
745 axe_ax88772b_init(struct axe_softc *sc)
746 {
747 	struct usb_ether *ue;
748 	uint16_t eeprom;
749 	uint8_t *eaddr;
750 	int i;
751 
752 	ue = &sc->sc_ue;
753 	/* Reload EEPROM. */
754 	AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM, hz / 32);
755 	/*
756 	 * Save PHY power saving configuration(high byte) and
757 	 * clear EEPROM checksum value(low byte).
758 	 */
759 	axe_cmd(sc, AXE_CMD_SROM_READ, 0, AXE_EEPROM_772B_PHY_PWRCFG, &eeprom);
760 	sc->sc_pwrcfg = le16toh(eeprom) & 0xFF00;
761 
762 	/*
763 	 * Auto-loaded default station address from internal ROM is
764 	 * 00:00:00:00:00:00 such that an explicit access to EEPROM
765 	 * is required to get real station address.
766 	 */
767 	eaddr = ue->ue_eaddr;
768 	for (i = 0; i < ETHER_ADDR_LEN / 2; i++) {
769 		axe_cmd(sc, AXE_CMD_SROM_READ, 0, AXE_EEPROM_772B_NODE_ID + i,
770 		    &eeprom);
771 		eeprom = le16toh(eeprom);
772 		*eaddr++ = (uint8_t)(eeprom & 0xFF);
773 		*eaddr++ = (uint8_t)((eeprom >> 8) & 0xFF);
774 	}
775 	/* Wakeup PHY. */
776 	axe_ax88772_phywake(sc);
777 	/* Stop MAC. */
778 	axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, 0, NULL);
779 }
780 
781 #undef	AXE_GPIO_WRITE
782 
783 static void
784 axe_reset(struct axe_softc *sc)
785 {
786 	struct usb_config_descriptor *cd;
787 	usb_error_t err;
788 
789 	cd = usbd_get_config_descriptor(sc->sc_ue.ue_udev);
790 
791 	err = usbd_req_set_config(sc->sc_ue.ue_udev, &sc->sc_mtx,
792 	    cd->bConfigurationValue);
793 	if (err)
794 		DPRINTF("reset failed (ignored)\n");
795 
796 	/* Wait a little while for the chip to get its brains in order. */
797 	uether_pause(&sc->sc_ue, hz / 100);
798 
799 	/* Reinitialize controller to achieve full reset. */
800 	if (sc->sc_flags & AXE_FLAG_178)
801 		axe_ax88178_init(sc);
802 	else if (sc->sc_flags & AXE_FLAG_772)
803 		axe_ax88772_init(sc);
804 	else if (sc->sc_flags & AXE_FLAG_772A)
805 		axe_ax88772a_init(sc);
806 	else if (sc->sc_flags & AXE_FLAG_772B)
807 		axe_ax88772b_init(sc);
808 }
809 
810 static void
811 axe_attach_post(struct usb_ether *ue)
812 {
813 	struct axe_softc *sc = uether_getsc(ue);
814 
815 	/*
816 	 * Load PHY indexes first. Needed by axe_xxx_init().
817 	 */
818 	axe_cmd(sc, AXE_CMD_READ_PHYID, 0, 0, sc->sc_phyaddrs);
819 	if (bootverbose)
820 		device_printf(sc->sc_ue.ue_dev, "PHYADDR 0x%02x:0x%02x\n",
821 		    sc->sc_phyaddrs[0], sc->sc_phyaddrs[1]);
822 	sc->sc_phyno = axe_get_phyno(sc, AXE_PHY_SEL_PRI);
823 	if (sc->sc_phyno == -1)
824 		sc->sc_phyno = axe_get_phyno(sc, AXE_PHY_SEL_SEC);
825 	if (sc->sc_phyno == -1) {
826 		device_printf(sc->sc_ue.ue_dev,
827 		    "no valid PHY address found, assuming PHY address 0\n");
828 		sc->sc_phyno = 0;
829 	}
830 
831 	/* Initialize controller and get station address. */
832 	if (sc->sc_flags & AXE_FLAG_178) {
833 		axe_ax88178_init(sc);
834 		sc->sc_tx_bufsz = 16 * 1024;
835 		axe_cmd(sc, AXE_178_CMD_READ_NODEID, 0, 0, ue->ue_eaddr);
836 	} else if (sc->sc_flags & AXE_FLAG_772) {
837 		axe_ax88772_init(sc);
838 		sc->sc_tx_bufsz = 8 * 1024;
839 		axe_cmd(sc, AXE_178_CMD_READ_NODEID, 0, 0, ue->ue_eaddr);
840 	} else if (sc->sc_flags & AXE_FLAG_772A) {
841 		axe_ax88772a_init(sc);
842 		sc->sc_tx_bufsz = 8 * 1024;
843 		axe_cmd(sc, AXE_178_CMD_READ_NODEID, 0, 0, ue->ue_eaddr);
844 	} else if (sc->sc_flags & AXE_FLAG_772B) {
845 		axe_ax88772b_init(sc);
846 		sc->sc_tx_bufsz = 8 * 1024;
847 	} else
848 		axe_cmd(sc, AXE_172_CMD_READ_NODEID, 0, 0, ue->ue_eaddr);
849 
850 	/*
851 	 * Fetch IPG values.
852 	 */
853 	if (sc->sc_flags & (AXE_FLAG_772A | AXE_FLAG_772B)) {
854 		/* Set IPG values. */
855 		sc->sc_ipgs[0] = 0x15;
856 		sc->sc_ipgs[1] = 0x16;
857 		sc->sc_ipgs[2] = 0x1A;
858 	} else
859 		axe_cmd(sc, AXE_CMD_READ_IPG012, 0, 0, sc->sc_ipgs);
860 }
861 
862 static int
863 axe_attach_post_sub(struct usb_ether *ue)
864 {
865 	struct axe_softc *sc;
866 	struct ifnet *ifp;
867 	u_int adv_pause;
868 	int error;
869 
870 	sc = uether_getsc(ue);
871 	ifp = ue->ue_ifp;
872 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
873 	ifp->if_start = uether_start;
874 	ifp->if_ioctl = axe_ioctl;
875 	ifp->if_init = uether_init;
876 	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
877 	ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
878 	IFQ_SET_READY(&ifp->if_snd);
879 
880 	if (AXE_IS_178_FAMILY(sc))
881 		ifp->if_capabilities |= IFCAP_VLAN_MTU;
882 	if (sc->sc_flags & AXE_FLAG_772B) {
883 		ifp->if_capabilities |= IFCAP_TXCSUM | IFCAP_RXCSUM;
884 		ifp->if_hwassist = AXE_CSUM_FEATURES;
885 		/*
886 		 * Checksum offloading of AX88772B also works with VLAN
887 		 * tagged frames but there is no way to take advantage
888 		 * of the feature because vlan(4) assumes
889 		 * IFCAP_VLAN_HWTAGGING is prerequisite condition to
890 		 * support checksum offloading with VLAN. VLAN hardware
891 		 * tagging support of AX88772B is very limited so it's
892 		 * not possible to announce IFCAP_VLAN_HWTAGGING.
893 		 */
894 	}
895 	ifp->if_capenable = ifp->if_capabilities;
896 	if (sc->sc_flags & (AXE_FLAG_772A | AXE_FLAG_772B | AXE_FLAG_178))
897 		adv_pause = MIIF_DOPAUSE;
898 	else
899 		adv_pause = 0;
900 	mtx_lock(&Giant);
901 	error = mii_attach(ue->ue_dev, &ue->ue_miibus, ifp,
902 	    uether_ifmedia_upd, ue->ue_methods->ue_mii_sts,
903 	    BMSR_DEFCAPMASK, sc->sc_phyno, MII_OFFSET_ANY, adv_pause);
904 	mtx_unlock(&Giant);
905 
906 	return (error);
907 }
908 
909 /*
910  * Probe for a AX88172 chip.
911  */
912 static int
913 axe_probe(device_t dev)
914 {
915 	struct usb_attach_arg *uaa = device_get_ivars(dev);
916 
917 	if (uaa->usb_mode != USB_MODE_HOST)
918 		return (ENXIO);
919 	if (uaa->info.bConfigIndex != AXE_CONFIG_IDX)
920 		return (ENXIO);
921 	if (uaa->info.bIfaceIndex != AXE_IFACE_IDX)
922 		return (ENXIO);
923 
924 	return (usbd_lookup_id_by_uaa(axe_devs, sizeof(axe_devs), uaa));
925 }
926 
927 /*
928  * Attach the interface. Allocate softc structures, do ifmedia
929  * setup and ethernet/BPF attach.
930  */
931 static int
932 axe_attach(device_t dev)
933 {
934 	struct usb_attach_arg *uaa = device_get_ivars(dev);
935 	struct axe_softc *sc = device_get_softc(dev);
936 	struct usb_ether *ue = &sc->sc_ue;
937 	uint8_t iface_index;
938 	int error;
939 
940 	sc->sc_flags = USB_GET_DRIVER_INFO(uaa);
941 
942 	device_set_usb_desc(dev);
943 
944 	mtx_init(&sc->sc_mtx, device_get_nameunit(dev), NULL, MTX_DEF);
945 
946 	iface_index = AXE_IFACE_IDX;
947 	error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_xfer,
948 	    axe_config, AXE_N_TRANSFER, sc, &sc->sc_mtx);
949 	if (error) {
950 		device_printf(dev, "allocating USB transfers failed\n");
951 		goto detach;
952 	}
953 
954 	ue->ue_sc = sc;
955 	ue->ue_dev = dev;
956 	ue->ue_udev = uaa->device;
957 	ue->ue_mtx = &sc->sc_mtx;
958 	ue->ue_methods = &axe_ue_methods;
959 
960 	error = uether_ifattach(ue);
961 	if (error) {
962 		device_printf(dev, "could not attach interface\n");
963 		goto detach;
964 	}
965 	return (0);			/* success */
966 
967 detach:
968 	axe_detach(dev);
969 	return (ENXIO);			/* failure */
970 }
971 
972 static int
973 axe_detach(device_t dev)
974 {
975 	struct axe_softc *sc = device_get_softc(dev);
976 	struct usb_ether *ue = &sc->sc_ue;
977 
978 	usbd_transfer_unsetup(sc->sc_xfer, AXE_N_TRANSFER);
979 	uether_ifdetach(ue);
980 	mtx_destroy(&sc->sc_mtx);
981 
982 	return (0);
983 }
984 
985 #if (AXE_BULK_BUF_SIZE >= 0x10000)
986 #error "Please update axe_bulk_read_callback()!"
987 #endif
988 
989 static void
990 axe_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error)
991 {
992 	struct axe_softc *sc = usbd_xfer_softc(xfer);
993 	struct usb_ether *ue = &sc->sc_ue;
994 	struct usb_page_cache *pc;
995 	int actlen;
996 
997 	usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
998 
999 	switch (USB_GET_STATE(xfer)) {
1000 	case USB_ST_TRANSFERRED:
1001 		pc = usbd_xfer_get_frame(xfer, 0);
1002 		axe_rx_frame(ue, pc, actlen);
1003 
1004 		/* FALLTHROUGH */
1005 	case USB_ST_SETUP:
1006 tr_setup:
1007 		usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer));
1008 		usbd_transfer_submit(xfer);
1009 		uether_rxflush(ue);
1010 		return;
1011 
1012 	default:			/* Error */
1013 		DPRINTF("bulk read error, %s\n", usbd_errstr(error));
1014 
1015 		if (error != USB_ERR_CANCELLED) {
1016 			/* try to clear stall first */
1017 			usbd_xfer_set_stall(xfer);
1018 			goto tr_setup;
1019 		}
1020 		return;
1021 
1022 	}
1023 }
1024 
1025 static int
1026 axe_rx_frame(struct usb_ether *ue, struct usb_page_cache *pc, int actlen)
1027 {
1028 	struct axe_softc *sc;
1029 	struct axe_sframe_hdr hdr;
1030 	struct axe_csum_hdr csum_hdr;
1031 	int error, len, pos;
1032 
1033 	sc = uether_getsc(ue);
1034 	pos = 0;
1035 	len = 0;
1036 	error = 0;
1037 	if ((sc->sc_flags & AXE_FLAG_STD_FRAME) != 0) {
1038 		while (pos < actlen) {
1039 			if ((pos + sizeof(hdr)) > actlen) {
1040 				/* too little data */
1041 				error = EINVAL;
1042 				break;
1043 			}
1044 			usbd_copy_out(pc, pos, &hdr, sizeof(hdr));
1045 
1046 			if ((hdr.len ^ hdr.ilen) != sc->sc_lenmask) {
1047 				/* we lost sync */
1048 				error = EINVAL;
1049 				break;
1050 			}
1051 			pos += sizeof(hdr);
1052 			len = le16toh(hdr.len);
1053 			if (pos + len > actlen) {
1054 				/* invalid length */
1055 				error = EINVAL;
1056 				break;
1057 			}
1058 			axe_rxeof(ue, pc, pos, len, NULL);
1059 			pos += len + (len % 2);
1060 		}
1061 	} else if ((sc->sc_flags & AXE_FLAG_CSUM_FRAME) != 0) {
1062 		while (pos < actlen) {
1063 			if ((pos + sizeof(csum_hdr)) > actlen) {
1064 				/* too little data */
1065 				error = EINVAL;
1066 				break;
1067 			}
1068 			usbd_copy_out(pc, pos, &csum_hdr, sizeof(csum_hdr));
1069 
1070 			csum_hdr.len = le16toh(csum_hdr.len);
1071 			csum_hdr.ilen = le16toh(csum_hdr.ilen);
1072 			csum_hdr.cstatus = le16toh(csum_hdr.cstatus);
1073 			if ((AXE_CSUM_RXBYTES(csum_hdr.len) ^
1074 			    AXE_CSUM_RXBYTES(csum_hdr.ilen)) !=
1075 			    sc->sc_lenmask) {
1076 				/* we lost sync */
1077 				error = EINVAL;
1078 				break;
1079 			}
1080 			/*
1081 			 * Get total transferred frame length including
1082 			 * checksum header.  The length should be multiple
1083 			 * of 4.
1084 			 */
1085 			len = sizeof(csum_hdr) + AXE_CSUM_RXBYTES(csum_hdr.len);
1086 			len = (len + 3) & ~3;
1087 			if (pos + len > actlen) {
1088 				/* invalid length */
1089 				error = EINVAL;
1090 				break;
1091 			}
1092 			axe_rxeof(ue, pc, pos + sizeof(csum_hdr),
1093 			    AXE_CSUM_RXBYTES(csum_hdr.len), &csum_hdr);
1094 			pos += len;
1095 		}
1096 	} else
1097 		axe_rxeof(ue, pc, 0, actlen, NULL);
1098 
1099 	if (error != 0)
1100 		ue->ue_ifp->if_ierrors++;
1101 	return (error);
1102 }
1103 
1104 static int
1105 axe_rxeof(struct usb_ether *ue, struct usb_page_cache *pc, unsigned int offset,
1106     unsigned int len, struct axe_csum_hdr *csum_hdr)
1107 {
1108 	struct ifnet *ifp = ue->ue_ifp;
1109 	struct mbuf *m;
1110 
1111 	if (len < ETHER_HDR_LEN || len > MCLBYTES - ETHER_ALIGN) {
1112 		ifp->if_ierrors++;
1113 		return (EINVAL);
1114 	}
1115 
1116 	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1117 	if (m == NULL) {
1118 		ifp->if_iqdrops++;
1119 		return (ENOMEM);
1120 	}
1121 	m->m_len = m->m_pkthdr.len = MCLBYTES;
1122 	m_adj(m, ETHER_ALIGN);
1123 
1124 	usbd_copy_out(pc, offset, mtod(m, uint8_t *), len);
1125 
1126 	ifp->if_ipackets++;
1127 	m->m_pkthdr.rcvif = ifp;
1128 	m->m_pkthdr.len = m->m_len = len;
1129 
1130 	if (csum_hdr != NULL && csum_hdr->cstatus & AXE_CSUM_HDR_L3_TYPE_IPV4) {
1131 		if ((csum_hdr->cstatus & (AXE_CSUM_HDR_L4_CSUM_ERR |
1132 		    AXE_CSUM_HDR_L3_CSUM_ERR)) == 0) {
1133 			m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED |
1134 			    CSUM_IP_VALID;
1135 			if ((csum_hdr->cstatus & AXE_CSUM_HDR_L4_TYPE_MASK) ==
1136 			    AXE_CSUM_HDR_L4_TYPE_TCP ||
1137 			    (csum_hdr->cstatus & AXE_CSUM_HDR_L4_TYPE_MASK) ==
1138 			    AXE_CSUM_HDR_L4_TYPE_UDP) {
1139 				m->m_pkthdr.csum_flags |=
1140 				    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1141 				m->m_pkthdr.csum_data = 0xffff;
1142 			}
1143 		}
1144 	}
1145 
1146 	_IF_ENQUEUE(&ue->ue_rxq, m);
1147 	return (0);
1148 }
1149 
1150 #if ((AXE_BULK_BUF_SIZE >= 0x10000) || (AXE_BULK_BUF_SIZE < (MCLBYTES+4)))
1151 #error "Please update axe_bulk_write_callback()!"
1152 #endif
1153 
1154 static void
1155 axe_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error)
1156 {
1157 	struct axe_softc *sc = usbd_xfer_softc(xfer);
1158 	struct axe_sframe_hdr hdr;
1159 	struct ifnet *ifp = uether_getifp(&sc->sc_ue);
1160 	struct usb_page_cache *pc;
1161 	struct mbuf *m;
1162 	int nframes, pos;
1163 
1164 	switch (USB_GET_STATE(xfer)) {
1165 	case USB_ST_TRANSFERRED:
1166 		DPRINTFN(11, "transfer complete\n");
1167 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1168 		/* FALLTHROUGH */
1169 	case USB_ST_SETUP:
1170 tr_setup:
1171 		if ((sc->sc_flags & AXE_FLAG_LINK) == 0 ||
1172 		    (ifp->if_drv_flags & IFF_DRV_OACTIVE) != 0) {
1173 			/*
1174 			 * Don't send anything if there is no link or
1175 			 * controller is busy.
1176 			 */
1177 			return;
1178 		}
1179 
1180 		for (nframes = 0; nframes < 16 &&
1181 		    !IFQ_DRV_IS_EMPTY(&ifp->if_snd); nframes++) {
1182 			IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1183 			if (m == NULL)
1184 				break;
1185 			usbd_xfer_set_frame_offset(xfer, nframes * MCLBYTES,
1186 			    nframes);
1187 			pos = 0;
1188 			pc = usbd_xfer_get_frame(xfer, nframes);
1189 			if (AXE_IS_178_FAMILY(sc)) {
1190 				hdr.len = htole16(m->m_pkthdr.len);
1191 				hdr.ilen = ~hdr.len;
1192 				/*
1193 				 * If upper stack computed checksum, driver
1194 				 * should tell controller not to insert
1195 				 * computed checksum for checksum offloading
1196 				 * enabled controller.
1197 				 */
1198 				if (ifp->if_capabilities & IFCAP_TXCSUM) {
1199 					if ((m->m_pkthdr.csum_flags &
1200 					    AXE_CSUM_FEATURES) != 0)
1201 						hdr.len |= htole16(
1202 						    AXE_TX_CSUM_PSEUDO_HDR);
1203 					else
1204 						hdr.len |= htole16(
1205 						    AXE_TX_CSUM_DIS);
1206 				}
1207 				usbd_copy_in(pc, pos, &hdr, sizeof(hdr));
1208 				pos += sizeof(hdr);
1209 				usbd_m_copy_in(pc, pos, m, 0, m->m_pkthdr.len);
1210 				pos += m->m_pkthdr.len;
1211 				if ((pos % 512) == 0) {
1212 					hdr.len = 0;
1213 					hdr.ilen = 0xffff;
1214 					usbd_copy_in(pc, pos, &hdr,
1215 					    sizeof(hdr));
1216 					pos += sizeof(hdr);
1217 				}
1218 			} else {
1219 				usbd_m_copy_in(pc, pos, m, 0, m->m_pkthdr.len);
1220 				pos += m->m_pkthdr.len;
1221 			}
1222 
1223 			/*
1224 			 * XXX
1225 			 * Update TX packet counter here. This is not
1226 			 * correct way but it seems that there is no way
1227 			 * to know how many packets are sent at the end
1228 			 * of transfer because controller combines
1229 			 * multiple writes into single one if there is
1230 			 * room in TX buffer of controller.
1231 			 */
1232 			ifp->if_opackets++;
1233 
1234 			/*
1235 			 * if there's a BPF listener, bounce a copy
1236 			 * of this frame to him:
1237 			 */
1238 			BPF_MTAP(ifp, m);
1239 
1240 			m_freem(m);
1241 
1242 			/* Set frame length. */
1243 			usbd_xfer_set_frame_len(xfer, nframes, pos);
1244 		}
1245 		if (nframes != 0) {
1246 			usbd_xfer_set_frames(xfer, nframes);
1247 			usbd_transfer_submit(xfer);
1248 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1249 		}
1250 		return;
1251 		/* NOTREACHED */
1252 	default:			/* Error */
1253 		DPRINTFN(11, "transfer error, %s\n",
1254 		    usbd_errstr(error));
1255 
1256 		ifp->if_oerrors++;
1257 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1258 
1259 		if (error != USB_ERR_CANCELLED) {
1260 			/* try to clear stall first */
1261 			usbd_xfer_set_stall(xfer);
1262 			goto tr_setup;
1263 		}
1264 		return;
1265 
1266 	}
1267 }
1268 
1269 static void
1270 axe_tick(struct usb_ether *ue)
1271 {
1272 	struct axe_softc *sc = uether_getsc(ue);
1273 	struct mii_data *mii = GET_MII(sc);
1274 
1275 	AXE_LOCK_ASSERT(sc, MA_OWNED);
1276 
1277 	mii_tick(mii);
1278 	if ((sc->sc_flags & AXE_FLAG_LINK) == 0) {
1279 		axe_miibus_statchg(ue->ue_dev);
1280 		if ((sc->sc_flags & AXE_FLAG_LINK) != 0)
1281 			axe_start(ue);
1282 	}
1283 }
1284 
1285 static void
1286 axe_start(struct usb_ether *ue)
1287 {
1288 	struct axe_softc *sc = uether_getsc(ue);
1289 
1290 	/*
1291 	 * start the USB transfers, if not already started:
1292 	 */
1293 	usbd_transfer_start(sc->sc_xfer[AXE_BULK_DT_RD]);
1294 	usbd_transfer_start(sc->sc_xfer[AXE_BULK_DT_WR]);
1295 }
1296 
1297 static void
1298 axe_csum_cfg(struct usb_ether *ue)
1299 {
1300 	struct axe_softc *sc;
1301 	struct ifnet *ifp;
1302 	uint16_t csum1, csum2;
1303 
1304 	sc = uether_getsc(ue);
1305 	AXE_LOCK_ASSERT(sc, MA_OWNED);
1306 
1307 	if ((sc->sc_flags & AXE_FLAG_772B) != 0) {
1308 		ifp = uether_getifp(ue);
1309 		csum1 = 0;
1310 		csum2 = 0;
1311 		if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1312 			csum1 |= AXE_TXCSUM_IP | AXE_TXCSUM_TCP |
1313 			    AXE_TXCSUM_UDP;
1314 		axe_cmd(sc, AXE_772B_CMD_WRITE_TXCSUM, csum2, csum1, NULL);
1315 		csum1 = 0;
1316 		csum2 = 0;
1317 		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
1318 			csum1 |= AXE_RXCSUM_IP | AXE_RXCSUM_IPVE |
1319 			    AXE_RXCSUM_TCP | AXE_RXCSUM_UDP | AXE_RXCSUM_ICMP |
1320 			    AXE_RXCSUM_IGMP;
1321 		axe_cmd(sc, AXE_772B_CMD_WRITE_RXCSUM, csum2, csum1, NULL);
1322 	}
1323 }
1324 
1325 static void
1326 axe_init(struct usb_ether *ue)
1327 {
1328 	struct axe_softc *sc = uether_getsc(ue);
1329 	struct ifnet *ifp = uether_getifp(ue);
1330 	uint16_t rxmode;
1331 
1332 	AXE_LOCK_ASSERT(sc, MA_OWNED);
1333 
1334 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1335 		return;
1336 
1337 	/* Cancel pending I/O */
1338 	axe_stop(ue);
1339 
1340 	axe_reset(sc);
1341 
1342 	/* Set MAC address and transmitter IPG values. */
1343 	if (AXE_IS_178_FAMILY(sc)) {
1344 		axe_cmd(sc, AXE_178_CMD_WRITE_NODEID, 0, 0, IF_LLADDR(ifp));
1345 		axe_cmd(sc, AXE_178_CMD_WRITE_IPG012, sc->sc_ipgs[2],
1346 		    (sc->sc_ipgs[1] << 8) | (sc->sc_ipgs[0]), NULL);
1347 	} else {
1348 		axe_cmd(sc, AXE_172_CMD_WRITE_NODEID, 0, 0, IF_LLADDR(ifp));
1349 		axe_cmd(sc, AXE_172_CMD_WRITE_IPG0, 0, sc->sc_ipgs[0], NULL);
1350 		axe_cmd(sc, AXE_172_CMD_WRITE_IPG1, 0, sc->sc_ipgs[1], NULL);
1351 		axe_cmd(sc, AXE_172_CMD_WRITE_IPG2, 0, sc->sc_ipgs[2], NULL);
1352 	}
1353 
1354 	if (AXE_IS_178_FAMILY(sc)) {
1355 		sc->sc_flags &= ~(AXE_FLAG_STD_FRAME | AXE_FLAG_CSUM_FRAME);
1356 		if ((sc->sc_flags & AXE_FLAG_772B) != 0)
1357 			sc->sc_lenmask = AXE_CSUM_HDR_LEN_MASK;
1358 		else
1359 			sc->sc_lenmask = AXE_HDR_LEN_MASK;
1360 		if ((sc->sc_flags & AXE_FLAG_772B) != 0 &&
1361 		    (ifp->if_capenable & IFCAP_RXCSUM) != 0)
1362 			sc->sc_flags |= AXE_FLAG_CSUM_FRAME;
1363 		else
1364 			sc->sc_flags |= AXE_FLAG_STD_FRAME;
1365 	}
1366 
1367 	/* Configure TX/RX checksum offloading. */
1368 	axe_csum_cfg(ue);
1369 
1370 	if (sc->sc_flags & AXE_FLAG_772B) {
1371 		/* AX88772B uses different maximum frame burst configuration. */
1372 		axe_cmd(sc, AXE_772B_CMD_RXCTL_WRITE_CFG,
1373 		    ax88772b_mfb_table[AX88772B_MFB_16K].threshold,
1374 		    ax88772b_mfb_table[AX88772B_MFB_16K].byte_cnt, NULL);
1375 	}
1376 
1377 	/* Enable receiver, set RX mode. */
1378 	rxmode = (AXE_RXCMD_MULTICAST | AXE_RXCMD_ENABLE);
1379 	if (AXE_IS_178_FAMILY(sc)) {
1380 		if (sc->sc_flags & AXE_FLAG_772B) {
1381 			/*
1382 			 * Select RX header format type 1.  Aligning IP
1383 			 * header on 4 byte boundary is not needed when
1384 			 * checksum offloading feature is not used
1385 			 * because we always copy the received frame in
1386 			 * RX handler.  When RX checksum offloading is
1387 			 * active, aligning IP header is required to
1388 			 * reflect actual frame length including RX
1389 			 * header size.
1390 			 */
1391 			rxmode |= AXE_772B_RXCMD_HDR_TYPE_1;
1392 			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
1393 				rxmode |= AXE_772B_RXCMD_IPHDR_ALIGN;
1394 		} else {
1395 			/*
1396 			 * Default Rx buffer size is too small to get
1397 			 * maximum performance.
1398 			 */
1399 			rxmode |= AXE_178_RXCMD_MFB_16384;
1400 		}
1401 	} else {
1402 		rxmode |= AXE_172_RXCMD_UNICAST;
1403 	}
1404 
1405 	/* If we want promiscuous mode, set the allframes bit. */
1406 	if (ifp->if_flags & IFF_PROMISC)
1407 		rxmode |= AXE_RXCMD_PROMISC;
1408 
1409 	if (ifp->if_flags & IFF_BROADCAST)
1410 		rxmode |= AXE_RXCMD_BROADCAST;
1411 
1412 	axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, rxmode, NULL);
1413 
1414 	/* Load the multicast filter. */
1415 	axe_setmulti(ue);
1416 
1417 	usbd_xfer_set_stall(sc->sc_xfer[AXE_BULK_DT_WR]);
1418 
1419 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1420 	/* Switch to selected media. */
1421 	axe_ifmedia_upd(ifp);
1422 }
1423 
1424 static void
1425 axe_setpromisc(struct usb_ether *ue)
1426 {
1427 	struct axe_softc *sc = uether_getsc(ue);
1428 	struct ifnet *ifp = uether_getifp(ue);
1429 	uint16_t rxmode;
1430 
1431 	axe_cmd(sc, AXE_CMD_RXCTL_READ, 0, 0, &rxmode);
1432 
1433 	rxmode = le16toh(rxmode);
1434 
1435 	if (ifp->if_flags & IFF_PROMISC) {
1436 		rxmode |= AXE_RXCMD_PROMISC;
1437 	} else {
1438 		rxmode &= ~AXE_RXCMD_PROMISC;
1439 	}
1440 
1441 	axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, rxmode, NULL);
1442 
1443 	axe_setmulti(ue);
1444 }
1445 
1446 static void
1447 axe_stop(struct usb_ether *ue)
1448 {
1449 	struct axe_softc *sc = uether_getsc(ue);
1450 	struct ifnet *ifp = uether_getifp(ue);
1451 
1452 	AXE_LOCK_ASSERT(sc, MA_OWNED);
1453 
1454 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1455 	sc->sc_flags &= ~AXE_FLAG_LINK;
1456 
1457 	/*
1458 	 * stop all the transfers, if not already stopped:
1459 	 */
1460 	usbd_transfer_stop(sc->sc_xfer[AXE_BULK_DT_WR]);
1461 	usbd_transfer_stop(sc->sc_xfer[AXE_BULK_DT_RD]);
1462 }
1463 
1464 static int
1465 axe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1466 {
1467 	struct usb_ether *ue = ifp->if_softc;
1468 	struct axe_softc *sc;
1469 	struct ifreq *ifr;
1470 	int error, mask, reinit;
1471 
1472 	sc = uether_getsc(ue);
1473 	ifr = (struct ifreq *)data;
1474 	error = 0;
1475 	reinit = 0;
1476 	if (cmd == SIOCSIFCAP) {
1477 		AXE_LOCK(sc);
1478 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1479 		if ((mask & IFCAP_TXCSUM) != 0 &&
1480 		    (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
1481 			ifp->if_capenable ^= IFCAP_TXCSUM;
1482 			if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1483 				ifp->if_hwassist |= AXE_CSUM_FEATURES;
1484 			else
1485 				ifp->if_hwassist &= ~AXE_CSUM_FEATURES;
1486 			reinit++;
1487 		}
1488 		if ((mask & IFCAP_RXCSUM) != 0 &&
1489 		    (ifp->if_capabilities & IFCAP_RXCSUM) != 0) {
1490 			ifp->if_capenable ^= IFCAP_RXCSUM;
1491 			reinit++;
1492 		}
1493 		if (reinit > 0 && ifp->if_drv_flags & IFF_DRV_RUNNING)
1494 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1495 		else
1496 			reinit = 0;
1497 		AXE_UNLOCK(sc);
1498 		if (reinit > 0)
1499 			uether_init(ue);
1500 	} else
1501 		error = uether_ioctl(ifp, cmd, data);
1502 
1503 	return (error);
1504 }
1505