xref: /freebsd/sys/dev/usb/net/if_axe.c (revision 685dc743dc3b5645e34836464128e1c0558b404b)
1 /*-
2  * SPDX-License-Identifier: BSD-4-Clause
3  *
4  * Copyright (c) 1997, 1998, 1999, 2000-2003
5  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 /*
37  * ASIX Electronics AX88172/AX88178/AX88778 USB 2.0 ethernet driver.
38  * Used in the LinkSys USB200M and various other adapters.
39  *
40  * Manuals available from:
41  * http://www.asix.com.tw/datasheet/mac/Ax88172.PDF
42  * Note: you need the manual for the AX88170 chip (USB 1.x ethernet
43  * controller) to find the definitions for the RX control register.
44  * http://www.asix.com.tw/datasheet/mac/Ax88170.PDF
45  *
46  * Written by Bill Paul <wpaul@windriver.com>
47  * Senior Engineer
48  * Wind River Systems
49  */
50 
51 /*
52  * The AX88172 provides USB ethernet supports at 10 and 100Mbps.
53  * It uses an external PHY (reference designs use a RealTek chip),
54  * and has a 64-bit multicast hash filter. There is some information
55  * missing from the manual which one needs to know in order to make
56  * the chip function:
57  *
58  * - You must set bit 7 in the RX control register, otherwise the
59  *   chip won't receive any packets.
60  * - You must initialize all 3 IPG registers, or you won't be able
61  *   to send any packets.
62  *
63  * Note that this device appears to only support loading the station
64  * address via autload from the EEPROM (i.e. there's no way to manually
65  * set it).
66  *
67  * (Adam Weinberger wanted me to name this driver if_gir.c.)
68  */
69 
70 /*
71  * Ax88178 and Ax88772 support backported from the OpenBSD driver.
72  * 2007/02/12, J.R. Oldroyd, fbsd@opal.com
73  *
74  * Manual here:
75  * http://www.asix.com.tw/FrootAttach/datasheet/AX88178_datasheet_Rev10.pdf
76  * http://www.asix.com.tw/FrootAttach/datasheet/AX88772_datasheet_Rev10.pdf
77  */
78 
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/bus.h>
82 #include <sys/condvar.h>
83 #include <sys/endian.h>
84 #include <sys/kernel.h>
85 #include <sys/lock.h>
86 #include <sys/malloc.h>
87 #include <sys/mbuf.h>
88 #include <sys/module.h>
89 #include <sys/mutex.h>
90 #include <sys/socket.h>
91 #include <sys/sockio.h>
92 #include <sys/sysctl.h>
93 #include <sys/sx.h>
94 
95 #include <net/if.h>
96 #include <net/if_var.h>
97 #include <net/ethernet.h>
98 #include <net/if_types.h>
99 #include <net/if_media.h>
100 #include <net/if_vlan_var.h>
101 
102 #include <dev/mii/mii.h>
103 #include <dev/mii/miivar.h>
104 
105 #include <dev/usb/usb.h>
106 #include <dev/usb/usbdi.h>
107 #include <dev/usb/usbdi_util.h>
108 #include "usbdevs.h"
109 
110 #define	USB_DEBUG_VAR axe_debug
111 #include <dev/usb/usb_debug.h>
112 #include <dev/usb/usb_process.h>
113 
114 #include <dev/usb/net/usb_ethernet.h>
115 #include <dev/usb/net/if_axereg.h>
116 
117 #include "miibus_if.h"
118 
119 /*
120  * AXE_178_MAX_FRAME_BURST
121  * max frame burst size for Ax88178 and Ax88772
122  *	0	2048 bytes
123  *	1	4096 bytes
124  *	2	8192 bytes
125  *	3	16384 bytes
126  * use the largest your system can handle without USB stalling.
127  *
128  * NB: 88772 parts appear to generate lots of input errors with
129  * a 2K rx buffer and 8K is only slightly faster than 4K on an
130  * EHCI port on a T42 so change at your own risk.
131  */
132 #define AXE_178_MAX_FRAME_BURST	1
133 
134 #define	AXE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
135 
136 #ifdef USB_DEBUG
137 static int axe_debug = 0;
138 
139 static SYSCTL_NODE(_hw_usb, OID_AUTO, axe, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
140     "USB axe");
141 SYSCTL_INT(_hw_usb_axe, OID_AUTO, debug, CTLFLAG_RWTUN, &axe_debug, 0,
142     "Debug level");
143 #endif
144 
145 /*
146  * Various supported device vendors/products.
147  */
148 static const STRUCT_USB_HOST_ID axe_devs[] = {
149 #define	AXE_DEV(v,p,i) { USB_VPI(USB_VENDOR_##v, USB_PRODUCT_##v##_##p, i) }
150 	AXE_DEV(ABOCOM, UF200, 0),
151 	AXE_DEV(ACERCM, EP1427X2, 0),
152 	AXE_DEV(APPLE, ETHERNET, AXE_FLAG_772),
153 	AXE_DEV(ASIX, AX88172, 0),
154 	AXE_DEV(ASIX, AX88178, AXE_FLAG_178),
155 	AXE_DEV(ASIX, AX88772, AXE_FLAG_772),
156 	AXE_DEV(ASIX, AX88772A, AXE_FLAG_772A),
157 	AXE_DEV(ASIX, AX88772B, AXE_FLAG_772B),
158 	AXE_DEV(ASIX, AX88772B_1, AXE_FLAG_772B),
159 	AXE_DEV(ATEN, UC210T, 0),
160 	AXE_DEV(BELKIN, F5D5055, AXE_FLAG_178),
161 	AXE_DEV(BILLIONTON, USB2AR, 0),
162 	AXE_DEV(CISCOLINKSYS, USB200MV2, AXE_FLAG_772A),
163 	AXE_DEV(COREGA, FETHER_USB2_TX, 0),
164 	AXE_DEV(DLINK, DUBE100, 0),
165 	AXE_DEV(DLINK, DUBE100B1, AXE_FLAG_772),
166 	AXE_DEV(DLINK, DUBE100C1, AXE_FLAG_772B),
167 	AXE_DEV(GOODWAY, GWUSB2E, 0),
168 	AXE_DEV(IODATA, ETGUS2, AXE_FLAG_178),
169 	AXE_DEV(JVC, MP_PRX1, 0),
170 	AXE_DEV(LENOVO, ETHERNET, AXE_FLAG_772B),
171 	AXE_DEV(LINKSYS2, USB200M, 0),
172 	AXE_DEV(LINKSYS4, USB1000, AXE_FLAG_178),
173 	AXE_DEV(LOGITEC, LAN_GTJU2A, AXE_FLAG_178),
174 	AXE_DEV(MELCO, LUAU2KTX, 0),
175 	AXE_DEV(MELCO, LUA3U2AGT, AXE_FLAG_178),
176 	AXE_DEV(NETGEAR, FA120, 0),
177 	AXE_DEV(OQO, ETHER01PLUS, AXE_FLAG_772),
178 	AXE_DEV(PLANEX3, GU1000T, AXE_FLAG_178),
179 	AXE_DEV(SITECOM, LN029, 0),
180 	AXE_DEV(SITECOMEU, LN028, AXE_FLAG_178),
181 	AXE_DEV(SITECOMEU, LN031, AXE_FLAG_178),
182 	AXE_DEV(SYSTEMTALKS, SGCX2UL, 0),
183 #undef AXE_DEV
184 };
185 
186 static device_probe_t axe_probe;
187 static device_attach_t axe_attach;
188 static device_detach_t axe_detach;
189 
190 static usb_callback_t axe_bulk_read_callback;
191 static usb_callback_t axe_bulk_write_callback;
192 
193 static miibus_readreg_t axe_miibus_readreg;
194 static miibus_writereg_t axe_miibus_writereg;
195 static miibus_statchg_t axe_miibus_statchg;
196 
197 static uether_fn_t axe_attach_post;
198 static uether_fn_t axe_init;
199 static uether_fn_t axe_stop;
200 static uether_fn_t axe_start;
201 static uether_fn_t axe_tick;
202 static uether_fn_t axe_setmulti;
203 static uether_fn_t axe_setpromisc;
204 
205 static int	axe_attach_post_sub(struct usb_ether *);
206 static int	axe_ifmedia_upd(if_t);
207 static void	axe_ifmedia_sts(if_t, struct ifmediareq *);
208 static int	axe_cmd(struct axe_softc *, int, int, int, void *);
209 static void	axe_ax88178_init(struct axe_softc *);
210 static void	axe_ax88772_init(struct axe_softc *);
211 static void	axe_ax88772_phywake(struct axe_softc *);
212 static void	axe_ax88772a_init(struct axe_softc *);
213 static void	axe_ax88772b_init(struct axe_softc *);
214 static int	axe_get_phyno(struct axe_softc *, int);
215 static int	axe_ioctl(if_t, u_long, caddr_t);
216 static int	axe_rx_frame(struct usb_ether *, struct usb_page_cache *, int);
217 static int	axe_rxeof(struct usb_ether *, struct usb_page_cache *,
218 		    unsigned offset, unsigned, struct axe_csum_hdr *);
219 static void	axe_csum_cfg(struct usb_ether *);
220 
221 static const struct usb_config axe_config[AXE_N_TRANSFER] = {
222 	[AXE_BULK_DT_WR] = {
223 		.type = UE_BULK,
224 		.endpoint = UE_ADDR_ANY,
225 		.direction = UE_DIR_OUT,
226 		.frames = 16,
227 		.bufsize = 16 * MCLBYTES,
228 		.flags = {.pipe_bof = 1,.force_short_xfer = 1,},
229 		.callback = axe_bulk_write_callback,
230 		.timeout = 10000,	/* 10 seconds */
231 	},
232 
233 	[AXE_BULK_DT_RD] = {
234 		.type = UE_BULK,
235 		.endpoint = UE_ADDR_ANY,
236 		.direction = UE_DIR_IN,
237 		.bufsize = 16384,	/* bytes */
238 		.flags = {.pipe_bof = 1,.short_xfer_ok = 1,},
239 		.callback = axe_bulk_read_callback,
240 		.timeout = 0,	/* no timeout */
241 	},
242 };
243 
244 static const struct ax88772b_mfb ax88772b_mfb_table[] = {
245 	{ 0x8000, 0x8001, 2048 },
246 	{ 0x8100, 0x8147, 4096},
247 	{ 0x8200, 0x81EB, 6144},
248 	{ 0x8300, 0x83D7, 8192},
249 	{ 0x8400, 0x851E, 16384},
250 	{ 0x8500, 0x8666, 20480},
251 	{ 0x8600, 0x87AE, 24576},
252 	{ 0x8700, 0x8A3D, 32768}
253 };
254 
255 static device_method_t axe_methods[] = {
256 	/* Device interface */
257 	DEVMETHOD(device_probe, axe_probe),
258 	DEVMETHOD(device_attach, axe_attach),
259 	DEVMETHOD(device_detach, axe_detach),
260 
261 	/* MII interface */
262 	DEVMETHOD(miibus_readreg, axe_miibus_readreg),
263 	DEVMETHOD(miibus_writereg, axe_miibus_writereg),
264 	DEVMETHOD(miibus_statchg, axe_miibus_statchg),
265 
266 	DEVMETHOD_END
267 };
268 
269 static driver_t axe_driver = {
270 	.name = "axe",
271 	.methods = axe_methods,
272 	.size = sizeof(struct axe_softc),
273 };
274 
275 DRIVER_MODULE(axe, uhub, axe_driver, NULL, NULL);
276 DRIVER_MODULE(miibus, axe, miibus_driver, 0, 0);
277 MODULE_DEPEND(axe, uether, 1, 1, 1);
278 MODULE_DEPEND(axe, usb, 1, 1, 1);
279 MODULE_DEPEND(axe, ether, 1, 1, 1);
280 MODULE_DEPEND(axe, miibus, 1, 1, 1);
281 MODULE_VERSION(axe, 1);
282 USB_PNP_HOST_INFO(axe_devs);
283 
284 static const struct usb_ether_methods axe_ue_methods = {
285 	.ue_attach_post = axe_attach_post,
286 	.ue_attach_post_sub = axe_attach_post_sub,
287 	.ue_start = axe_start,
288 	.ue_init = axe_init,
289 	.ue_stop = axe_stop,
290 	.ue_tick = axe_tick,
291 	.ue_setmulti = axe_setmulti,
292 	.ue_setpromisc = axe_setpromisc,
293 	.ue_mii_upd = axe_ifmedia_upd,
294 	.ue_mii_sts = axe_ifmedia_sts,
295 };
296 
297 static int
axe_cmd(struct axe_softc * sc,int cmd,int index,int val,void * buf)298 axe_cmd(struct axe_softc *sc, int cmd, int index, int val, void *buf)
299 {
300 	struct usb_device_request req;
301 	usb_error_t err;
302 
303 	AXE_LOCK_ASSERT(sc, MA_OWNED);
304 
305 	req.bmRequestType = (AXE_CMD_IS_WRITE(cmd) ?
306 	    UT_WRITE_VENDOR_DEVICE :
307 	    UT_READ_VENDOR_DEVICE);
308 	req.bRequest = AXE_CMD_CMD(cmd);
309 	USETW(req.wValue, val);
310 	USETW(req.wIndex, index);
311 	USETW(req.wLength, AXE_CMD_LEN(cmd));
312 
313 	err = uether_do_request(&sc->sc_ue, &req, buf, 1000);
314 
315 	return (err);
316 }
317 
318 static int
axe_miibus_readreg(device_t dev,int phy,int reg)319 axe_miibus_readreg(device_t dev, int phy, int reg)
320 {
321 	struct axe_softc *sc = device_get_softc(dev);
322 	uint16_t val;
323 	int locked;
324 
325 	locked = mtx_owned(&sc->sc_mtx);
326 	if (!locked)
327 		AXE_LOCK(sc);
328 
329 	axe_cmd(sc, AXE_CMD_MII_OPMODE_SW, 0, 0, NULL);
330 	axe_cmd(sc, AXE_CMD_MII_READ_REG, reg, phy, &val);
331 	axe_cmd(sc, AXE_CMD_MII_OPMODE_HW, 0, 0, NULL);
332 
333 	val = le16toh(val);
334 	if (AXE_IS_772(sc) && reg == MII_BMSR) {
335 		/*
336 		 * BMSR of AX88772 indicates that it supports extended
337 		 * capability but the extended status register is
338 		 * revered for embedded ethernet PHY. So clear the
339 		 * extended capability bit of BMSR.
340 		 */
341 		val &= ~BMSR_EXTCAP;
342 	}
343 
344 	if (!locked)
345 		AXE_UNLOCK(sc);
346 	return (val);
347 }
348 
349 static int
axe_miibus_writereg(device_t dev,int phy,int reg,int val)350 axe_miibus_writereg(device_t dev, int phy, int reg, int val)
351 {
352 	struct axe_softc *sc = device_get_softc(dev);
353 	int locked;
354 
355 	val = htole32(val);
356 	locked = mtx_owned(&sc->sc_mtx);
357 	if (!locked)
358 		AXE_LOCK(sc);
359 
360 	axe_cmd(sc, AXE_CMD_MII_OPMODE_SW, 0, 0, NULL);
361 	axe_cmd(sc, AXE_CMD_MII_WRITE_REG, reg, phy, &val);
362 	axe_cmd(sc, AXE_CMD_MII_OPMODE_HW, 0, 0, NULL);
363 
364 	if (!locked)
365 		AXE_UNLOCK(sc);
366 	return (0);
367 }
368 
369 static void
axe_miibus_statchg(device_t dev)370 axe_miibus_statchg(device_t dev)
371 {
372 	struct axe_softc *sc = device_get_softc(dev);
373 	struct mii_data *mii = GET_MII(sc);
374 	if_t ifp;
375 	uint16_t val;
376 	int err, locked;
377 
378 	locked = mtx_owned(&sc->sc_mtx);
379 	if (!locked)
380 		AXE_LOCK(sc);
381 
382 	ifp = uether_getifp(&sc->sc_ue);
383 	if (mii == NULL || ifp == NULL ||
384 	    (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
385 		goto done;
386 
387 	sc->sc_flags &= ~AXE_FLAG_LINK;
388 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
389 	    (IFM_ACTIVE | IFM_AVALID)) {
390 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
391 		case IFM_10_T:
392 		case IFM_100_TX:
393 			sc->sc_flags |= AXE_FLAG_LINK;
394 			break;
395 		case IFM_1000_T:
396 			if ((sc->sc_flags & AXE_FLAG_178) == 0)
397 				break;
398 			sc->sc_flags |= AXE_FLAG_LINK;
399 			break;
400 		default:
401 			break;
402 		}
403 	}
404 
405 	/* Lost link, do nothing. */
406 	if ((sc->sc_flags & AXE_FLAG_LINK) == 0)
407 		goto done;
408 
409 	val = 0;
410 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
411 		val |= AXE_MEDIA_FULL_DUPLEX;
412 		if (AXE_IS_178_FAMILY(sc)) {
413 			if ((IFM_OPTIONS(mii->mii_media_active) &
414 			    IFM_ETH_TXPAUSE) != 0)
415 				val |= AXE_178_MEDIA_TXFLOW_CONTROL_EN;
416 			if ((IFM_OPTIONS(mii->mii_media_active) &
417 			    IFM_ETH_RXPAUSE) != 0)
418 				val |= AXE_178_MEDIA_RXFLOW_CONTROL_EN;
419 		}
420 	}
421 	if (AXE_IS_178_FAMILY(sc)) {
422 		val |= AXE_178_MEDIA_RX_EN | AXE_178_MEDIA_MAGIC;
423 		if ((sc->sc_flags & AXE_FLAG_178) != 0)
424 			val |= AXE_178_MEDIA_ENCK;
425 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
426 		case IFM_1000_T:
427 			val |= AXE_178_MEDIA_GMII | AXE_178_MEDIA_ENCK;
428 			break;
429 		case IFM_100_TX:
430 			val |= AXE_178_MEDIA_100TX;
431 			break;
432 		case IFM_10_T:
433 			/* doesn't need to be handled */
434 			break;
435 		}
436 	}
437 	err = axe_cmd(sc, AXE_CMD_WRITE_MEDIA, 0, val, NULL);
438 	if (err)
439 		device_printf(dev, "media change failed, error %d\n", err);
440 done:
441 	if (!locked)
442 		AXE_UNLOCK(sc);
443 }
444 
445 /*
446  * Set media options.
447  */
448 static int
axe_ifmedia_upd(if_t ifp)449 axe_ifmedia_upd(if_t ifp)
450 {
451 	struct axe_softc *sc = if_getsoftc(ifp);
452 	struct mii_data *mii = GET_MII(sc);
453 	struct mii_softc *miisc;
454 	int error;
455 
456 	AXE_LOCK_ASSERT(sc, MA_OWNED);
457 
458 	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
459 		PHY_RESET(miisc);
460 	error = mii_mediachg(mii);
461 	return (error);
462 }
463 
464 /*
465  * Report current media status.
466  */
467 static void
axe_ifmedia_sts(if_t ifp,struct ifmediareq * ifmr)468 axe_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
469 {
470 	struct axe_softc *sc = if_getsoftc(ifp);
471 	struct mii_data *mii = GET_MII(sc);
472 
473 	AXE_LOCK(sc);
474 	mii_pollstat(mii);
475 	ifmr->ifm_active = mii->mii_media_active;
476 	ifmr->ifm_status = mii->mii_media_status;
477 	AXE_UNLOCK(sc);
478 }
479 
480 static u_int
axe_hash_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)481 axe_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
482 {
483 	uint8_t *hashtbl = arg;
484 	uint32_t h;
485 
486 	h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26;
487 	hashtbl[h / 8] |= 1 << (h % 8);
488 
489 	return (1);
490 }
491 
492 static void
axe_setmulti(struct usb_ether * ue)493 axe_setmulti(struct usb_ether *ue)
494 {
495 	struct axe_softc *sc = uether_getsc(ue);
496 	if_t ifp = uether_getifp(ue);
497 	uint16_t rxmode;
498 	uint8_t hashtbl[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
499 
500 	AXE_LOCK_ASSERT(sc, MA_OWNED);
501 
502 	axe_cmd(sc, AXE_CMD_RXCTL_READ, 0, 0, &rxmode);
503 	rxmode = le16toh(rxmode);
504 
505 	if (if_getflags(ifp) & (IFF_ALLMULTI | IFF_PROMISC)) {
506 		rxmode |= AXE_RXCMD_ALLMULTI;
507 		axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, rxmode, NULL);
508 		return;
509 	}
510 	rxmode &= ~AXE_RXCMD_ALLMULTI;
511 
512 	if_foreach_llmaddr(ifp, axe_hash_maddr, &hashtbl);
513 
514 	axe_cmd(sc, AXE_CMD_WRITE_MCAST, 0, 0, (void *)&hashtbl);
515 	axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, rxmode, NULL);
516 }
517 
518 static int
axe_get_phyno(struct axe_softc * sc,int sel)519 axe_get_phyno(struct axe_softc *sc, int sel)
520 {
521 	int phyno;
522 
523 	switch (AXE_PHY_TYPE(sc->sc_phyaddrs[sel])) {
524 	case PHY_TYPE_100_HOME:
525 	case PHY_TYPE_GIG:
526 		phyno = AXE_PHY_NO(sc->sc_phyaddrs[sel]);
527 		break;
528 	case PHY_TYPE_SPECIAL:
529 		/* FALLTHROUGH */
530 	case PHY_TYPE_RSVD:
531 		/* FALLTHROUGH */
532 	case PHY_TYPE_NON_SUP:
533 		/* FALLTHROUGH */
534 	default:
535 		phyno = -1;
536 		break;
537 	}
538 
539 	return (phyno);
540 }
541 
542 #define	AXE_GPIO_WRITE(x, y)	do {				\
543 	axe_cmd(sc, AXE_CMD_WRITE_GPIO, 0, (x), NULL);		\
544 	uether_pause(ue, (y));					\
545 } while (0)
546 
547 static void
axe_ax88178_init(struct axe_softc * sc)548 axe_ax88178_init(struct axe_softc *sc)
549 {
550 	struct usb_ether *ue;
551 	int gpio0, ledmode, phymode;
552 	uint16_t eeprom, val;
553 
554 	ue = &sc->sc_ue;
555 	axe_cmd(sc, AXE_CMD_SROM_WR_ENABLE, 0, 0, NULL);
556 	/* XXX magic */
557 	axe_cmd(sc, AXE_CMD_SROM_READ, 0, 0x0017, &eeprom);
558 	eeprom = le16toh(eeprom);
559 	axe_cmd(sc, AXE_CMD_SROM_WR_DISABLE, 0, 0, NULL);
560 
561 	/* if EEPROM is invalid we have to use to GPIO0 */
562 	if (eeprom == 0xffff) {
563 		phymode = AXE_PHY_MODE_MARVELL;
564 		gpio0 = 1;
565 		ledmode = 0;
566 	} else {
567 		phymode = eeprom & 0x7f;
568 		gpio0 = (eeprom & 0x80) ? 0 : 1;
569 		ledmode = eeprom >> 8;
570 	}
571 
572 	if (bootverbose)
573 		device_printf(sc->sc_ue.ue_dev,
574 		    "EEPROM data : 0x%04x, phymode : 0x%02x\n", eeprom,
575 		    phymode);
576 	/* Program GPIOs depending on PHY hardware. */
577 	switch (phymode) {
578 	case AXE_PHY_MODE_MARVELL:
579 		if (gpio0 == 1) {
580 			AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM | AXE_GPIO0_EN,
581 			    hz / 32);
582 			AXE_GPIO_WRITE(AXE_GPIO0_EN | AXE_GPIO2 | AXE_GPIO2_EN,
583 			    hz / 32);
584 			AXE_GPIO_WRITE(AXE_GPIO0_EN | AXE_GPIO2_EN, hz / 4);
585 			AXE_GPIO_WRITE(AXE_GPIO0_EN | AXE_GPIO2 | AXE_GPIO2_EN,
586 			    hz / 32);
587 		} else {
588 			AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM | AXE_GPIO1 |
589 			    AXE_GPIO1_EN, hz / 3);
590 			if (ledmode == 1) {
591 				AXE_GPIO_WRITE(AXE_GPIO1_EN, hz / 3);
592 				AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN,
593 				    hz / 3);
594 			} else {
595 				AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN |
596 				    AXE_GPIO2 | AXE_GPIO2_EN, hz / 32);
597 				AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN |
598 				    AXE_GPIO2_EN, hz / 4);
599 				AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN |
600 				    AXE_GPIO2 | AXE_GPIO2_EN, hz / 32);
601 			}
602 		}
603 		break;
604 	case AXE_PHY_MODE_CICADA:
605 	case AXE_PHY_MODE_CICADA_V2:
606 	case AXE_PHY_MODE_CICADA_V2_ASIX:
607 		if (gpio0 == 1)
608 			AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM | AXE_GPIO0 |
609 			    AXE_GPIO0_EN, hz / 32);
610 		else
611 			AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM | AXE_GPIO1 |
612 			    AXE_GPIO1_EN, hz / 32);
613 		break;
614 	case AXE_PHY_MODE_AGERE:
615 		AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM | AXE_GPIO1 |
616 		    AXE_GPIO1_EN, hz / 32);
617 		AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN | AXE_GPIO2 |
618 		    AXE_GPIO2_EN, hz / 32);
619 		AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN | AXE_GPIO2_EN, hz / 4);
620 		AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN | AXE_GPIO2 |
621 		    AXE_GPIO2_EN, hz / 32);
622 		break;
623 	case AXE_PHY_MODE_REALTEK_8211CL:
624 	case AXE_PHY_MODE_REALTEK_8211BN:
625 	case AXE_PHY_MODE_REALTEK_8251CL:
626 		val = gpio0 == 1 ? AXE_GPIO0 | AXE_GPIO0_EN :
627 		    AXE_GPIO1 | AXE_GPIO1_EN;
628 		AXE_GPIO_WRITE(val, hz / 32);
629 		AXE_GPIO_WRITE(val | AXE_GPIO2 | AXE_GPIO2_EN, hz / 32);
630 		AXE_GPIO_WRITE(val | AXE_GPIO2_EN, hz / 4);
631 		AXE_GPIO_WRITE(val | AXE_GPIO2 | AXE_GPIO2_EN, hz / 32);
632 		if (phymode == AXE_PHY_MODE_REALTEK_8211CL) {
633 			axe_miibus_writereg(ue->ue_dev, sc->sc_phyno,
634 			    0x1F, 0x0005);
635 			axe_miibus_writereg(ue->ue_dev, sc->sc_phyno,
636 			    0x0C, 0x0000);
637 			val = axe_miibus_readreg(ue->ue_dev, sc->sc_phyno,
638 			    0x0001);
639 			axe_miibus_writereg(ue->ue_dev, sc->sc_phyno,
640 			    0x01, val | 0x0080);
641 			axe_miibus_writereg(ue->ue_dev, sc->sc_phyno,
642 			    0x1F, 0x0000);
643 		}
644 		break;
645 	default:
646 		/* Unknown PHY model or no need to program GPIOs. */
647 		break;
648 	}
649 
650 	/* soft reset */
651 	axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_CLEAR, NULL);
652 	uether_pause(ue, hz / 4);
653 
654 	axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0,
655 	    AXE_SW_RESET_PRL | AXE_178_RESET_MAGIC, NULL);
656 	uether_pause(ue, hz / 4);
657 	/* Enable MII/GMII/RGMII interface to work with external PHY. */
658 	axe_cmd(sc, AXE_CMD_SW_PHY_SELECT, 0, 0, NULL);
659 	uether_pause(ue, hz / 4);
660 
661 	axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, 0, NULL);
662 }
663 
664 static void
axe_ax88772_init(struct axe_softc * sc)665 axe_ax88772_init(struct axe_softc *sc)
666 {
667 	axe_cmd(sc, AXE_CMD_WRITE_GPIO, 0, 0x00b0, NULL);
668 	uether_pause(&sc->sc_ue, hz / 16);
669 
670 	if (sc->sc_phyno == AXE_772_PHY_NO_EPHY) {
671 		/* ask for the embedded PHY */
672 		axe_cmd(sc, AXE_CMD_SW_PHY_SELECT, 0, 0x01, NULL);
673 		uether_pause(&sc->sc_ue, hz / 64);
674 
675 		/* power down and reset state, pin reset state */
676 		axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0,
677 		    AXE_SW_RESET_CLEAR, NULL);
678 		uether_pause(&sc->sc_ue, hz / 16);
679 
680 		/* power down/reset state, pin operating state */
681 		axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0,
682 		    AXE_SW_RESET_IPPD | AXE_SW_RESET_PRL, NULL);
683 		uether_pause(&sc->sc_ue, hz / 4);
684 
685 		/* power up, reset */
686 		axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_PRL, NULL);
687 
688 		/* power up, operating */
689 		axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0,
690 		    AXE_SW_RESET_IPRL | AXE_SW_RESET_PRL, NULL);
691 	} else {
692 		/* ask for external PHY */
693 		axe_cmd(sc, AXE_CMD_SW_PHY_SELECT, 0, 0x00, NULL);
694 		uether_pause(&sc->sc_ue, hz / 64);
695 
696 		/* power down internal PHY */
697 		axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0,
698 		    AXE_SW_RESET_IPPD | AXE_SW_RESET_PRL, NULL);
699 	}
700 
701 	uether_pause(&sc->sc_ue, hz / 4);
702 	axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, 0, NULL);
703 }
704 
705 static void
axe_ax88772_phywake(struct axe_softc * sc)706 axe_ax88772_phywake(struct axe_softc *sc)
707 {
708 	if (sc->sc_phyno == AXE_772_PHY_NO_EPHY) {
709 		/* Manually select internal(embedded) PHY - MAC mode. */
710 		axe_cmd(sc, AXE_CMD_SW_PHY_SELECT, 0, AXE_SW_PHY_SELECT_SS_ENB |
711 		    AXE_SW_PHY_SELECT_EMBEDDED | AXE_SW_PHY_SELECT_SS_MII,
712 		    NULL);
713 		uether_pause(&sc->sc_ue, hz / 32);
714 	} else {
715 		/*
716 		 * Manually select external PHY - MAC mode.
717 		 * Reverse MII/RMII is for AX88772A PHY mode.
718 		 */
719 		axe_cmd(sc, AXE_CMD_SW_PHY_SELECT, 0, AXE_SW_PHY_SELECT_SS_ENB |
720 		    AXE_SW_PHY_SELECT_EXT | AXE_SW_PHY_SELECT_SS_MII, NULL);
721 		uether_pause(&sc->sc_ue, hz / 32);
722 	}
723 	/* Take PHY out of power down. */
724 	axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_IPPD |
725 	    AXE_SW_RESET_IPRL, NULL);
726 	uether_pause(&sc->sc_ue, hz / 4);
727 	axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_IPRL, NULL);
728 	uether_pause(&sc->sc_ue, hz);
729 	axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_CLEAR, NULL);
730 	uether_pause(&sc->sc_ue, hz / 32);
731 	axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_IPRL, NULL);
732 	uether_pause(&sc->sc_ue, hz / 32);
733 }
734 
735 static void
axe_ax88772a_init(struct axe_softc * sc)736 axe_ax88772a_init(struct axe_softc *sc)
737 {
738 	struct usb_ether *ue;
739 
740 	ue = &sc->sc_ue;
741 	/* Reload EEPROM. */
742 	AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM, hz / 32);
743 	axe_ax88772_phywake(sc);
744 	/* Stop MAC. */
745 	axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, 0, NULL);
746 }
747 
748 static void
axe_ax88772b_init(struct axe_softc * sc)749 axe_ax88772b_init(struct axe_softc *sc)
750 {
751 	struct usb_ether *ue;
752 	uint16_t eeprom;
753 	uint8_t *eaddr;
754 	int i;
755 
756 	ue = &sc->sc_ue;
757 	/* Reload EEPROM. */
758 	AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM, hz / 32);
759 	/*
760 	 * Save PHY power saving configuration(high byte) and
761 	 * clear EEPROM checksum value(low byte).
762 	 */
763 	axe_cmd(sc, AXE_CMD_SROM_READ, 0, AXE_EEPROM_772B_PHY_PWRCFG, &eeprom);
764 	sc->sc_pwrcfg = le16toh(eeprom) & 0xFF00;
765 
766 	/*
767 	 * Auto-loaded default station address from internal ROM is
768 	 * 00:00:00:00:00:00 such that an explicit access to EEPROM
769 	 * is required to get real station address.
770 	 */
771 	eaddr = ue->ue_eaddr;
772 	for (i = 0; i < ETHER_ADDR_LEN / 2; i++) {
773 		axe_cmd(sc, AXE_CMD_SROM_READ, 0, AXE_EEPROM_772B_NODE_ID + i,
774 		    &eeprom);
775 		eeprom = le16toh(eeprom);
776 		*eaddr++ = (uint8_t)(eeprom & 0xFF);
777 		*eaddr++ = (uint8_t)((eeprom >> 8) & 0xFF);
778 	}
779 	/* Wakeup PHY. */
780 	axe_ax88772_phywake(sc);
781 	/* Stop MAC. */
782 	axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, 0, NULL);
783 }
784 
785 #undef	AXE_GPIO_WRITE
786 
787 static void
axe_reset(struct axe_softc * sc)788 axe_reset(struct axe_softc *sc)
789 {
790 	struct usb_config_descriptor *cd;
791 	usb_error_t err;
792 
793 	cd = usbd_get_config_descriptor(sc->sc_ue.ue_udev);
794 
795 	err = usbd_req_set_config(sc->sc_ue.ue_udev, &sc->sc_mtx,
796 	    cd->bConfigurationValue);
797 	if (err)
798 		DPRINTF("reset failed (ignored)\n");
799 
800 	/* Wait a little while for the chip to get its brains in order. */
801 	uether_pause(&sc->sc_ue, hz / 100);
802 
803 	/* Reinitialize controller to achieve full reset. */
804 	if (sc->sc_flags & AXE_FLAG_178)
805 		axe_ax88178_init(sc);
806 	else if (sc->sc_flags & AXE_FLAG_772)
807 		axe_ax88772_init(sc);
808 	else if (sc->sc_flags & AXE_FLAG_772A)
809 		axe_ax88772a_init(sc);
810 	else if (sc->sc_flags & AXE_FLAG_772B)
811 		axe_ax88772b_init(sc);
812 }
813 
814 static void
axe_attach_post(struct usb_ether * ue)815 axe_attach_post(struct usb_ether *ue)
816 {
817 	struct axe_softc *sc = uether_getsc(ue);
818 
819 	/*
820 	 * Load PHY indexes first. Needed by axe_xxx_init().
821 	 */
822 	axe_cmd(sc, AXE_CMD_READ_PHYID, 0, 0, sc->sc_phyaddrs);
823 	if (bootverbose)
824 		device_printf(sc->sc_ue.ue_dev, "PHYADDR 0x%02x:0x%02x\n",
825 		    sc->sc_phyaddrs[0], sc->sc_phyaddrs[1]);
826 	sc->sc_phyno = axe_get_phyno(sc, AXE_PHY_SEL_PRI);
827 	if (sc->sc_phyno == -1)
828 		sc->sc_phyno = axe_get_phyno(sc, AXE_PHY_SEL_SEC);
829 	if (sc->sc_phyno == -1) {
830 		device_printf(sc->sc_ue.ue_dev,
831 		    "no valid PHY address found, assuming PHY address 0\n");
832 		sc->sc_phyno = 0;
833 	}
834 
835 	/* Initialize controller and get station address. */
836 	if (sc->sc_flags & AXE_FLAG_178) {
837 		axe_ax88178_init(sc);
838 		axe_cmd(sc, AXE_178_CMD_READ_NODEID, 0, 0, ue->ue_eaddr);
839 	} else if (sc->sc_flags & AXE_FLAG_772) {
840 		axe_ax88772_init(sc);
841 		axe_cmd(sc, AXE_178_CMD_READ_NODEID, 0, 0, ue->ue_eaddr);
842 	} else if (sc->sc_flags & AXE_FLAG_772A) {
843 		axe_ax88772a_init(sc);
844 		axe_cmd(sc, AXE_178_CMD_READ_NODEID, 0, 0, ue->ue_eaddr);
845 	} else if (sc->sc_flags & AXE_FLAG_772B) {
846 		axe_ax88772b_init(sc);
847 	} else
848 		axe_cmd(sc, AXE_172_CMD_READ_NODEID, 0, 0, ue->ue_eaddr);
849 
850 	/*
851 	 * Fetch IPG values.
852 	 */
853 	if (sc->sc_flags & (AXE_FLAG_772A | AXE_FLAG_772B)) {
854 		/* Set IPG values. */
855 		sc->sc_ipgs[0] = 0x15;
856 		sc->sc_ipgs[1] = 0x16;
857 		sc->sc_ipgs[2] = 0x1A;
858 	} else
859 		axe_cmd(sc, AXE_CMD_READ_IPG012, 0, 0, sc->sc_ipgs);
860 }
861 
862 static int
axe_attach_post_sub(struct usb_ether * ue)863 axe_attach_post_sub(struct usb_ether *ue)
864 {
865 	struct axe_softc *sc;
866 	if_t ifp;
867 	u_int adv_pause;
868 	int error;
869 
870 	sc = uether_getsc(ue);
871 	ifp = ue->ue_ifp;
872 	if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
873 	if_setstartfn(ifp, uether_start);
874 	if_setioctlfn(ifp, axe_ioctl);
875 	if_setinitfn(ifp, uether_init);
876 	if_setsendqlen(ifp, ifqmaxlen);
877 	if_setsendqready(ifp);
878 
879 	if (AXE_IS_178_FAMILY(sc))
880 		if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
881 	if (sc->sc_flags & AXE_FLAG_772B) {
882 		if_setcapabilitiesbit(ifp, IFCAP_TXCSUM | IFCAP_RXCSUM, 0);
883 		if_sethwassist(ifp, AXE_CSUM_FEATURES);
884 		/*
885 		 * Checksum offloading of AX88772B also works with VLAN
886 		 * tagged frames but there is no way to take advantage
887 		 * of the feature because vlan(4) assumes
888 		 * IFCAP_VLAN_HWTAGGING is prerequisite condition to
889 		 * support checksum offloading with VLAN. VLAN hardware
890 		 * tagging support of AX88772B is very limited so it's
891 		 * not possible to announce IFCAP_VLAN_HWTAGGING.
892 		 */
893 	}
894 	if_setcapenable(ifp, if_getcapabilities(ifp));
895 	if (sc->sc_flags & (AXE_FLAG_772A | AXE_FLAG_772B | AXE_FLAG_178))
896 		adv_pause = MIIF_DOPAUSE;
897 	else
898 		adv_pause = 0;
899 	bus_topo_lock();
900 	error = mii_attach(ue->ue_dev, &ue->ue_miibus, ifp,
901 	    uether_ifmedia_upd, ue->ue_methods->ue_mii_sts,
902 	    BMSR_DEFCAPMASK, sc->sc_phyno, MII_OFFSET_ANY, adv_pause);
903 	bus_topo_unlock();
904 
905 	return (error);
906 }
907 
908 /*
909  * Probe for a AX88172 chip.
910  */
911 static int
axe_probe(device_t dev)912 axe_probe(device_t dev)
913 {
914 	struct usb_attach_arg *uaa = device_get_ivars(dev);
915 
916 	if (uaa->usb_mode != USB_MODE_HOST)
917 		return (ENXIO);
918 	if (uaa->info.bConfigIndex != AXE_CONFIG_IDX)
919 		return (ENXIO);
920 	if (uaa->info.bIfaceIndex != AXE_IFACE_IDX)
921 		return (ENXIO);
922 
923 	return (usbd_lookup_id_by_uaa(axe_devs, sizeof(axe_devs), uaa));
924 }
925 
926 /*
927  * Attach the interface. Allocate softc structures, do ifmedia
928  * setup and ethernet/BPF attach.
929  */
930 static int
axe_attach(device_t dev)931 axe_attach(device_t dev)
932 {
933 	struct usb_attach_arg *uaa = device_get_ivars(dev);
934 	struct axe_softc *sc = device_get_softc(dev);
935 	struct usb_ether *ue = &sc->sc_ue;
936 	uint8_t iface_index;
937 	int error;
938 
939 	sc->sc_flags = USB_GET_DRIVER_INFO(uaa);
940 
941 	device_set_usb_desc(dev);
942 
943 	mtx_init(&sc->sc_mtx, device_get_nameunit(dev), NULL, MTX_DEF);
944 
945 	iface_index = AXE_IFACE_IDX;
946 	error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_xfer,
947 	    axe_config, AXE_N_TRANSFER, sc, &sc->sc_mtx);
948 	if (error) {
949 		device_printf(dev, "allocating USB transfers failed\n");
950 		goto detach;
951 	}
952 
953 	ue->ue_sc = sc;
954 	ue->ue_dev = dev;
955 	ue->ue_udev = uaa->device;
956 	ue->ue_mtx = &sc->sc_mtx;
957 	ue->ue_methods = &axe_ue_methods;
958 
959 	error = uether_ifattach(ue);
960 	if (error) {
961 		device_printf(dev, "could not attach interface\n");
962 		goto detach;
963 	}
964 	return (0);			/* success */
965 
966 detach:
967 	axe_detach(dev);
968 	return (ENXIO);			/* failure */
969 }
970 
971 static int
axe_detach(device_t dev)972 axe_detach(device_t dev)
973 {
974 	struct axe_softc *sc = device_get_softc(dev);
975 	struct usb_ether *ue = &sc->sc_ue;
976 
977 	usbd_transfer_unsetup(sc->sc_xfer, AXE_N_TRANSFER);
978 	uether_ifdetach(ue);
979 	mtx_destroy(&sc->sc_mtx);
980 
981 	return (0);
982 }
983 
984 #if (AXE_BULK_BUF_SIZE >= 0x10000)
985 #error "Please update axe_bulk_read_callback()!"
986 #endif
987 
988 static void
axe_bulk_read_callback(struct usb_xfer * xfer,usb_error_t error)989 axe_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error)
990 {
991 	struct axe_softc *sc = usbd_xfer_softc(xfer);
992 	struct usb_ether *ue = &sc->sc_ue;
993 	struct usb_page_cache *pc;
994 	int actlen;
995 
996 	usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
997 
998 	switch (USB_GET_STATE(xfer)) {
999 	case USB_ST_TRANSFERRED:
1000 		pc = usbd_xfer_get_frame(xfer, 0);
1001 		axe_rx_frame(ue, pc, actlen);
1002 
1003 		/* FALLTHROUGH */
1004 	case USB_ST_SETUP:
1005 tr_setup:
1006 		usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer));
1007 		usbd_transfer_submit(xfer);
1008 		uether_rxflush(ue);
1009 		return;
1010 
1011 	default:			/* Error */
1012 		DPRINTF("bulk read error, %s\n", usbd_errstr(error));
1013 
1014 		if (error != USB_ERR_CANCELLED) {
1015 			/* try to clear stall first */
1016 			usbd_xfer_set_stall(xfer);
1017 			goto tr_setup;
1018 		}
1019 		return;
1020 	}
1021 }
1022 
1023 static int
axe_rx_frame(struct usb_ether * ue,struct usb_page_cache * pc,int actlen)1024 axe_rx_frame(struct usb_ether *ue, struct usb_page_cache *pc, int actlen)
1025 {
1026 	struct axe_softc *sc;
1027 	struct axe_sframe_hdr hdr;
1028 	struct axe_csum_hdr csum_hdr;
1029 	int error, len, pos;
1030 
1031 	sc = uether_getsc(ue);
1032 	pos = 0;
1033 	len = 0;
1034 	error = 0;
1035 	if ((sc->sc_flags & AXE_FLAG_STD_FRAME) != 0) {
1036 		while (pos < actlen) {
1037 			if ((int)(pos + sizeof(hdr)) > actlen) {
1038 				/* too little data */
1039 				error = EINVAL;
1040 				break;
1041 			}
1042 			usbd_copy_out(pc, pos, &hdr, sizeof(hdr));
1043 
1044 			if ((hdr.len ^ hdr.ilen) != sc->sc_lenmask) {
1045 				/* we lost sync */
1046 				error = EINVAL;
1047 				break;
1048 			}
1049 			pos += sizeof(hdr);
1050 			len = le16toh(hdr.len);
1051 			if (pos + len > actlen) {
1052 				/* invalid length */
1053 				error = EINVAL;
1054 				break;
1055 			}
1056 			axe_rxeof(ue, pc, pos, len, NULL);
1057 			pos += len + (len % 2);
1058 		}
1059 	} else if ((sc->sc_flags & AXE_FLAG_CSUM_FRAME) != 0) {
1060 		while (pos < actlen) {
1061 			if ((int)(pos + sizeof(csum_hdr)) > actlen) {
1062 				/* too little data */
1063 				error = EINVAL;
1064 				break;
1065 			}
1066 			usbd_copy_out(pc, pos, &csum_hdr, sizeof(csum_hdr));
1067 
1068 			csum_hdr.len = le16toh(csum_hdr.len);
1069 			csum_hdr.ilen = le16toh(csum_hdr.ilen);
1070 			csum_hdr.cstatus = le16toh(csum_hdr.cstatus);
1071 			if ((AXE_CSUM_RXBYTES(csum_hdr.len) ^
1072 			    AXE_CSUM_RXBYTES(csum_hdr.ilen)) !=
1073 			    sc->sc_lenmask) {
1074 				/* we lost sync */
1075 				error = EINVAL;
1076 				break;
1077 			}
1078 			/*
1079 			 * Get total transferred frame length including
1080 			 * checksum header.  The length should be multiple
1081 			 * of 4.
1082 			 */
1083 			len = sizeof(csum_hdr) + AXE_CSUM_RXBYTES(csum_hdr.len);
1084 			len = (len + 3) & ~3;
1085 			if (pos + len > actlen) {
1086 				/* invalid length */
1087 				error = EINVAL;
1088 				break;
1089 			}
1090 			axe_rxeof(ue, pc, pos + sizeof(csum_hdr),
1091 			    AXE_CSUM_RXBYTES(csum_hdr.len), &csum_hdr);
1092 			pos += len;
1093 		}
1094 	} else
1095 		axe_rxeof(ue, pc, 0, actlen, NULL);
1096 
1097 	if (error != 0)
1098 		if_inc_counter(ue->ue_ifp, IFCOUNTER_IERRORS, 1);
1099 	return (error);
1100 }
1101 
1102 static int
axe_rxeof(struct usb_ether * ue,struct usb_page_cache * pc,unsigned offset,unsigned len,struct axe_csum_hdr * csum_hdr)1103 axe_rxeof(struct usb_ether *ue, struct usb_page_cache *pc, unsigned offset,
1104     unsigned len, struct axe_csum_hdr *csum_hdr)
1105 {
1106 	if_t ifp = ue->ue_ifp;
1107 	struct mbuf *m;
1108 
1109 	if (len < ETHER_HDR_LEN || len > MCLBYTES - ETHER_ALIGN) {
1110 		if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1111 		return (EINVAL);
1112 	}
1113 
1114 	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1115 	if (m == NULL) {
1116 		if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1117 		return (ENOMEM);
1118 	}
1119 	m->m_len = m->m_pkthdr.len = MCLBYTES;
1120 	m_adj(m, ETHER_ALIGN);
1121 
1122 	usbd_copy_out(pc, offset, mtod(m, uint8_t *), len);
1123 
1124 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1125 	m->m_pkthdr.rcvif = ifp;
1126 	m->m_pkthdr.len = m->m_len = len;
1127 
1128 	if (csum_hdr != NULL && csum_hdr->cstatus & AXE_CSUM_HDR_L3_TYPE_IPV4) {
1129 		if ((csum_hdr->cstatus & (AXE_CSUM_HDR_L4_CSUM_ERR |
1130 		    AXE_CSUM_HDR_L3_CSUM_ERR)) == 0) {
1131 			m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED |
1132 			    CSUM_IP_VALID;
1133 			if ((csum_hdr->cstatus & AXE_CSUM_HDR_L4_TYPE_MASK) ==
1134 			    AXE_CSUM_HDR_L4_TYPE_TCP ||
1135 			    (csum_hdr->cstatus & AXE_CSUM_HDR_L4_TYPE_MASK) ==
1136 			    AXE_CSUM_HDR_L4_TYPE_UDP) {
1137 				m->m_pkthdr.csum_flags |=
1138 				    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1139 				m->m_pkthdr.csum_data = 0xffff;
1140 			}
1141 		}
1142 	}
1143 
1144 	(void)mbufq_enqueue(&ue->ue_rxq, m);
1145 	return (0);
1146 }
1147 
1148 #if ((AXE_BULK_BUF_SIZE >= 0x10000) || (AXE_BULK_BUF_SIZE < (MCLBYTES+4)))
1149 #error "Please update axe_bulk_write_callback()!"
1150 #endif
1151 
1152 static void
axe_bulk_write_callback(struct usb_xfer * xfer,usb_error_t error)1153 axe_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error)
1154 {
1155 	struct axe_softc *sc = usbd_xfer_softc(xfer);
1156 	struct axe_sframe_hdr hdr;
1157 	if_t ifp = uether_getifp(&sc->sc_ue);
1158 	struct usb_page_cache *pc;
1159 	struct mbuf *m;
1160 	int nframes, pos;
1161 
1162 	switch (USB_GET_STATE(xfer)) {
1163 	case USB_ST_TRANSFERRED:
1164 		DPRINTFN(11, "transfer complete\n");
1165 		if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1166 		/* FALLTHROUGH */
1167 	case USB_ST_SETUP:
1168 tr_setup:
1169 		if ((sc->sc_flags & AXE_FLAG_LINK) == 0 ||
1170 		    (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) != 0) {
1171 			/*
1172 			 * Don't send anything if there is no link or
1173 			 * controller is busy.
1174 			 */
1175 			return;
1176 		}
1177 
1178 		for (nframes = 0; nframes < 16 &&
1179 		    !if_sendq_empty(ifp); nframes++) {
1180 			m = if_dequeue(ifp);
1181 			if (m == NULL)
1182 				break;
1183 			usbd_xfer_set_frame_offset(xfer, nframes * MCLBYTES,
1184 			    nframes);
1185 			pos = 0;
1186 			pc = usbd_xfer_get_frame(xfer, nframes);
1187 			if (AXE_IS_178_FAMILY(sc)) {
1188 				hdr.len = htole16(m->m_pkthdr.len);
1189 				hdr.ilen = ~hdr.len;
1190 				/*
1191 				 * If upper stack computed checksum, driver
1192 				 * should tell controller not to insert
1193 				 * computed checksum for checksum offloading
1194 				 * enabled controller.
1195 				 */
1196 				if (if_getcapabilities(ifp) & IFCAP_TXCSUM) {
1197 					if ((m->m_pkthdr.csum_flags &
1198 					    AXE_CSUM_FEATURES) != 0)
1199 						hdr.len |= htole16(
1200 						    AXE_TX_CSUM_PSEUDO_HDR);
1201 					else
1202 						hdr.len |= htole16(
1203 						    AXE_TX_CSUM_DIS);
1204 				}
1205 				usbd_copy_in(pc, pos, &hdr, sizeof(hdr));
1206 				pos += sizeof(hdr);
1207 				usbd_m_copy_in(pc, pos, m, 0, m->m_pkthdr.len);
1208 				pos += m->m_pkthdr.len;
1209 				if ((pos % 512) == 0) {
1210 					hdr.len = 0;
1211 					hdr.ilen = 0xffff;
1212 					usbd_copy_in(pc, pos, &hdr,
1213 					    sizeof(hdr));
1214 					pos += sizeof(hdr);
1215 				}
1216 			} else {
1217 				usbd_m_copy_in(pc, pos, m, 0, m->m_pkthdr.len);
1218 				pos += m->m_pkthdr.len;
1219 			}
1220 
1221 			/*
1222 			 * XXX
1223 			 * Update TX packet counter here. This is not
1224 			 * correct way but it seems that there is no way
1225 			 * to know how many packets are sent at the end
1226 			 * of transfer because controller combines
1227 			 * multiple writes into single one if there is
1228 			 * room in TX buffer of controller.
1229 			 */
1230 			if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1231 
1232 			/*
1233 			 * if there's a BPF listener, bounce a copy
1234 			 * of this frame to him:
1235 			 */
1236 			BPF_MTAP(ifp, m);
1237 
1238 			m_freem(m);
1239 
1240 			/* Set frame length. */
1241 			usbd_xfer_set_frame_len(xfer, nframes, pos);
1242 		}
1243 		if (nframes != 0) {
1244 			usbd_xfer_set_frames(xfer, nframes);
1245 			usbd_transfer_submit(xfer);
1246 			if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1247 		}
1248 		return;
1249 		/* NOTREACHED */
1250 	default:			/* Error */
1251 		DPRINTFN(11, "transfer error, %s\n",
1252 		    usbd_errstr(error));
1253 
1254 		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1255 		if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1256 
1257 		if (error != USB_ERR_CANCELLED) {
1258 			/* try to clear stall first */
1259 			usbd_xfer_set_stall(xfer);
1260 			goto tr_setup;
1261 		}
1262 		return;
1263 	}
1264 }
1265 
1266 static void
axe_tick(struct usb_ether * ue)1267 axe_tick(struct usb_ether *ue)
1268 {
1269 	struct axe_softc *sc = uether_getsc(ue);
1270 	struct mii_data *mii = GET_MII(sc);
1271 
1272 	AXE_LOCK_ASSERT(sc, MA_OWNED);
1273 
1274 	mii_tick(mii);
1275 	if ((sc->sc_flags & AXE_FLAG_LINK) == 0) {
1276 		axe_miibus_statchg(ue->ue_dev);
1277 		if ((sc->sc_flags & AXE_FLAG_LINK) != 0)
1278 			axe_start(ue);
1279 	}
1280 }
1281 
1282 static void
axe_start(struct usb_ether * ue)1283 axe_start(struct usb_ether *ue)
1284 {
1285 	struct axe_softc *sc = uether_getsc(ue);
1286 
1287 	/*
1288 	 * start the USB transfers, if not already started:
1289 	 */
1290 	usbd_transfer_start(sc->sc_xfer[AXE_BULK_DT_RD]);
1291 	usbd_transfer_start(sc->sc_xfer[AXE_BULK_DT_WR]);
1292 }
1293 
1294 static void
axe_csum_cfg(struct usb_ether * ue)1295 axe_csum_cfg(struct usb_ether *ue)
1296 {
1297 	struct axe_softc *sc;
1298 	if_t ifp;
1299 	uint16_t csum1, csum2;
1300 
1301 	sc = uether_getsc(ue);
1302 	AXE_LOCK_ASSERT(sc, MA_OWNED);
1303 
1304 	if ((sc->sc_flags & AXE_FLAG_772B) != 0) {
1305 		ifp = uether_getifp(ue);
1306 		csum1 = 0;
1307 		csum2 = 0;
1308 		if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
1309 			csum1 |= AXE_TXCSUM_IP | AXE_TXCSUM_TCP |
1310 			    AXE_TXCSUM_UDP;
1311 		axe_cmd(sc, AXE_772B_CMD_WRITE_TXCSUM, csum2, csum1, NULL);
1312 		csum1 = 0;
1313 		csum2 = 0;
1314 		if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
1315 			csum1 |= AXE_RXCSUM_IP | AXE_RXCSUM_IPVE |
1316 			    AXE_RXCSUM_TCP | AXE_RXCSUM_UDP | AXE_RXCSUM_ICMP |
1317 			    AXE_RXCSUM_IGMP;
1318 		axe_cmd(sc, AXE_772B_CMD_WRITE_RXCSUM, csum2, csum1, NULL);
1319 	}
1320 }
1321 
1322 static void
axe_init(struct usb_ether * ue)1323 axe_init(struct usb_ether *ue)
1324 {
1325 	struct axe_softc *sc = uether_getsc(ue);
1326 	if_t ifp = uether_getifp(ue);
1327 	uint16_t rxmode;
1328 
1329 	AXE_LOCK_ASSERT(sc, MA_OWNED);
1330 
1331 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
1332 		return;
1333 
1334 	/* Cancel pending I/O */
1335 	axe_stop(ue);
1336 
1337 	axe_reset(sc);
1338 
1339 	/* Set MAC address and transmitter IPG values. */
1340 	if (AXE_IS_178_FAMILY(sc)) {
1341 		axe_cmd(sc, AXE_178_CMD_WRITE_NODEID, 0, 0, if_getlladdr(ifp));
1342 		axe_cmd(sc, AXE_178_CMD_WRITE_IPG012, sc->sc_ipgs[2],
1343 		    (sc->sc_ipgs[1] << 8) | (sc->sc_ipgs[0]), NULL);
1344 	} else {
1345 		axe_cmd(sc, AXE_172_CMD_WRITE_NODEID, 0, 0, if_getlladdr(ifp));
1346 		axe_cmd(sc, AXE_172_CMD_WRITE_IPG0, 0, sc->sc_ipgs[0], NULL);
1347 		axe_cmd(sc, AXE_172_CMD_WRITE_IPG1, 0, sc->sc_ipgs[1], NULL);
1348 		axe_cmd(sc, AXE_172_CMD_WRITE_IPG2, 0, sc->sc_ipgs[2], NULL);
1349 	}
1350 
1351 	if (AXE_IS_178_FAMILY(sc)) {
1352 		sc->sc_flags &= ~(AXE_FLAG_STD_FRAME | AXE_FLAG_CSUM_FRAME);
1353 		if ((sc->sc_flags & AXE_FLAG_772B) != 0 &&
1354 		    (if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
1355 			sc->sc_lenmask = AXE_CSUM_HDR_LEN_MASK;
1356 			sc->sc_flags |= AXE_FLAG_CSUM_FRAME;
1357 		} else {
1358 			sc->sc_lenmask = AXE_HDR_LEN_MASK;
1359 			sc->sc_flags |= AXE_FLAG_STD_FRAME;
1360 		}
1361 	}
1362 
1363 	/* Configure TX/RX checksum offloading. */
1364 	axe_csum_cfg(ue);
1365 
1366 	if (sc->sc_flags & AXE_FLAG_772B) {
1367 		/* AX88772B uses different maximum frame burst configuration. */
1368 		axe_cmd(sc, AXE_772B_CMD_RXCTL_WRITE_CFG,
1369 		    ax88772b_mfb_table[AX88772B_MFB_16K].threshold,
1370 		    ax88772b_mfb_table[AX88772B_MFB_16K].byte_cnt, NULL);
1371 	}
1372 
1373 	/* Enable receiver, set RX mode. */
1374 	rxmode = (AXE_RXCMD_MULTICAST | AXE_RXCMD_ENABLE);
1375 	if (AXE_IS_178_FAMILY(sc)) {
1376 		if (sc->sc_flags & AXE_FLAG_772B) {
1377 			/*
1378 			 * Select RX header format type 1.  Aligning IP
1379 			 * header on 4 byte boundary is not needed when
1380 			 * checksum offloading feature is not used
1381 			 * because we always copy the received frame in
1382 			 * RX handler.  When RX checksum offloading is
1383 			 * active, aligning IP header is required to
1384 			 * reflect actual frame length including RX
1385 			 * header size.
1386 			 */
1387 			rxmode |= AXE_772B_RXCMD_HDR_TYPE_1;
1388 			if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
1389 				rxmode |= AXE_772B_RXCMD_IPHDR_ALIGN;
1390 		} else {
1391 			/*
1392 			 * Default Rx buffer size is too small to get
1393 			 * maximum performance.
1394 			 */
1395 			rxmode |= AXE_178_RXCMD_MFB_16384;
1396 		}
1397 	} else {
1398 		rxmode |= AXE_172_RXCMD_UNICAST;
1399 	}
1400 
1401 	/* If we want promiscuous mode, set the allframes bit. */
1402 	if (if_getflags(ifp) & IFF_PROMISC)
1403 		rxmode |= AXE_RXCMD_PROMISC;
1404 
1405 	if (if_getflags(ifp) & IFF_BROADCAST)
1406 		rxmode |= AXE_RXCMD_BROADCAST;
1407 
1408 	axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, rxmode, NULL);
1409 
1410 	/* Load the multicast filter. */
1411 	axe_setmulti(ue);
1412 
1413 	usbd_xfer_set_stall(sc->sc_xfer[AXE_BULK_DT_WR]);
1414 
1415 	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
1416 	/* Switch to selected media. */
1417 	axe_ifmedia_upd(ifp);
1418 }
1419 
1420 static void
axe_setpromisc(struct usb_ether * ue)1421 axe_setpromisc(struct usb_ether *ue)
1422 {
1423 	struct axe_softc *sc = uether_getsc(ue);
1424 	if_t ifp = uether_getifp(ue);
1425 	uint16_t rxmode;
1426 
1427 	axe_cmd(sc, AXE_CMD_RXCTL_READ, 0, 0, &rxmode);
1428 
1429 	rxmode = le16toh(rxmode);
1430 
1431 	if (if_getflags(ifp) & IFF_PROMISC) {
1432 		rxmode |= AXE_RXCMD_PROMISC;
1433 	} else {
1434 		rxmode &= ~AXE_RXCMD_PROMISC;
1435 	}
1436 
1437 	axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, rxmode, NULL);
1438 
1439 	axe_setmulti(ue);
1440 }
1441 
1442 static void
axe_stop(struct usb_ether * ue)1443 axe_stop(struct usb_ether *ue)
1444 {
1445 	struct axe_softc *sc = uether_getsc(ue);
1446 	if_t ifp = uether_getifp(ue);
1447 
1448 	AXE_LOCK_ASSERT(sc, MA_OWNED);
1449 
1450 	if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
1451 	sc->sc_flags &= ~AXE_FLAG_LINK;
1452 
1453 	/*
1454 	 * stop all the transfers, if not already stopped:
1455 	 */
1456 	usbd_transfer_stop(sc->sc_xfer[AXE_BULK_DT_WR]);
1457 	usbd_transfer_stop(sc->sc_xfer[AXE_BULK_DT_RD]);
1458 }
1459 
1460 static int
axe_ioctl(if_t ifp,u_long cmd,caddr_t data)1461 axe_ioctl(if_t ifp, u_long cmd, caddr_t data)
1462 {
1463 	struct usb_ether *ue = if_getsoftc(ifp);
1464 	struct axe_softc *sc;
1465 	struct ifreq *ifr;
1466 	int error, mask, reinit;
1467 
1468 	sc = uether_getsc(ue);
1469 	ifr = (struct ifreq *)data;
1470 	error = 0;
1471 	reinit = 0;
1472 	if (cmd == SIOCSIFCAP) {
1473 		AXE_LOCK(sc);
1474 		mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
1475 		if ((mask & IFCAP_TXCSUM) != 0 &&
1476 		    (if_getcapabilities(ifp) & IFCAP_TXCSUM) != 0) {
1477 			if_togglecapenable(ifp, IFCAP_TXCSUM);
1478 			if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
1479 				if_sethwassistbits(ifp, AXE_CSUM_FEATURES, 0);
1480 			else
1481 				if_sethwassistbits(ifp, 0, AXE_CSUM_FEATURES);
1482 			reinit++;
1483 		}
1484 		if ((mask & IFCAP_RXCSUM) != 0 &&
1485 		    (if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0) {
1486 			if_togglecapenable(ifp, IFCAP_RXCSUM);
1487 			reinit++;
1488 		}
1489 		if (reinit > 0 && if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1490 			if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1491 		else
1492 			reinit = 0;
1493 		AXE_UNLOCK(sc);
1494 		if (reinit > 0)
1495 			uether_init(ue);
1496 	} else
1497 		error = uether_ioctl(ifp, cmd, data);
1498 
1499 	return (error);
1500 }
1501