xref: /freebsd/sys/dev/usb/net/if_axge.c (revision 1719886f6d08408b834d270c59ffcfd821c8f63a)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2013-2014 Kevin Lo
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 /*
31  * ASIX Electronics AX88178A/AX88179/AX88179A USB 2.0/3.0 gigabit ethernet
32  * driver.
33  */
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/bus.h>
38 #include <sys/condvar.h>
39 #include <sys/endian.h>
40 #include <sys/kernel.h>
41 #include <sys/lock.h>
42 #include <sys/module.h>
43 #include <sys/mutex.h>
44 #include <sys/socket.h>
45 #include <sys/sysctl.h>
46 #include <sys/unistd.h>
47 
48 #include <net/if.h>
49 #include <net/if_var.h>
50 #include <net/if_media.h>
51 
52 #include <dev/mii/mii.h>
53 #include <dev/mii/miivar.h>
54 
55 #include <dev/usb/usb.h>
56 #include <dev/usb/usbdi.h>
57 #include <dev/usb/usbdi_util.h>
58 #include "usbdevs.h"
59 
60 #define	USB_DEBUG_VAR 	axge_debug
61 #include <dev/usb/usb_debug.h>
62 #include <dev/usb/usb_process.h>
63 
64 #include <dev/usb/net/usb_ethernet.h>
65 #include <dev/usb/net/if_axgereg.h>
66 
67 #include "miibus_if.h"
68 
69 /*
70  * Various supported device vendors/products.
71  */
72 
73 static const STRUCT_USB_HOST_ID axge_devs[] = {
74 #define	AXGE_DEV(v,p,i,...)	\
75 	{ USB_VPI(USB_VENDOR_##v, USB_PRODUCT_##v##_##p, i), __VA_ARGS__ }
76 	AXGE_DEV(ASIX, AX88178A, AXGE_FLAG_178A),
77 	AXGE_DEV(ASIX, AX88179, AXGE_FLAG_179, USB_DEV_BCD_LTEQ(0x0100)),
78 	AXGE_DEV(ASIX, AX88179, AXGE_FLAG_179A, USB_DEV_BCD_GTEQ(0x0200)),
79 	AXGE_DEV(BELKIN, B2B128, AXGE_FLAG_179),
80 	AXGE_DEV(DLINK, DUB1312, AXGE_FLAG_179),
81 	AXGE_DEV(LENOVO, GIGALAN, AXGE_FLAG_179),
82 	AXGE_DEV(SITECOMEU, LN032, AXGE_FLAG_179),
83 #undef AXGE_DEV
84 };
85 
86 static const struct {
87 	uint8_t	ctrl;
88 	uint8_t timer_l;
89 	uint8_t	timer_h;
90 	uint8_t	size;
91 	uint8_t	ifg;
92 } __packed axge_bulk_size[] = {
93 	{ 7, 0x4f, 0x00, 0x12, 0xff },
94 	{ 7, 0x20, 0x03, 0x16, 0xff },
95 	{ 7, 0xae, 0x07, 0x18, 0xff },
96 	{ 7, 0xcc, 0x4c, 0x18, 0x08 }
97 };
98 
99 /* prototypes */
100 
101 static device_probe_t axge_probe;
102 static device_attach_t axge_attach;
103 static device_detach_t axge_detach;
104 
105 static usb_callback_t axge_bulk_read_callback;
106 static usb_callback_t axge_bulk_write_callback;
107 
108 static miibus_readreg_t axge_miibus_readreg;
109 static miibus_writereg_t axge_miibus_writereg;
110 static miibus_statchg_t axge_miibus_statchg;
111 
112 static uether_fn_t axge_attach_post;
113 static uether_fn_t axge_init;
114 static uether_fn_t axge_stop;
115 static uether_fn_t axge_start;
116 static uether_fn_t axge_tick;
117 static uether_fn_t axge_rxfilter;
118 
119 static int	axge_read_mem(struct axge_softc *, uint8_t, uint16_t,
120 		    uint16_t, void *, int);
121 static void	axge_write_mem(struct axge_softc *, uint8_t, uint16_t,
122 		    uint16_t, void *, int);
123 static uint8_t	axge_read_cmd_1(struct axge_softc *, uint8_t, uint16_t);
124 static uint16_t	axge_read_cmd_2(struct axge_softc *, uint8_t, uint16_t,
125 		    uint16_t);
126 static void	axge_write_cmd_1(struct axge_softc *, uint8_t, uint16_t,
127 		    uint8_t);
128 static void	axge_write_cmd_2(struct axge_softc *, uint8_t, uint16_t,
129 		    uint16_t, uint16_t);
130 static void	axge_chip_init(struct axge_softc *);
131 static void	axge_reset(struct axge_softc *);
132 
133 static int	axge_attach_post_sub(struct usb_ether *);
134 static int	axge_ifmedia_upd(if_t);
135 static void	axge_ifmedia_sts(if_t, struct ifmediareq *);
136 static int	axge_ioctl(if_t, u_long, caddr_t);
137 static void	axge_rx_frame(struct usb_ether *, struct usb_page_cache *, int);
138 static void	axge_rxeof(struct usb_ether *, struct usb_page_cache *,
139 		    unsigned, unsigned, uint32_t);
140 static void	axge_csum_cfg(struct usb_ether *);
141 
142 #define	AXGE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
143 
144 #ifdef USB_DEBUG
145 static int axge_debug = 0;
146 
147 static SYSCTL_NODE(_hw_usb, OID_AUTO, axge, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
148     "USB axge");
149 SYSCTL_INT(_hw_usb_axge, OID_AUTO, debug, CTLFLAG_RWTUN, &axge_debug, 0,
150     "Debug level");
151 #endif
152 
153 static const struct usb_config axge_config[AXGE_N_TRANSFER] = {
154 	[AXGE_BULK_DT_WR] = {
155 		.type = UE_BULK,
156 		.endpoint = UE_ADDR_ANY,
157 		.direction = UE_DIR_OUT,
158 		.frames = AXGE_N_FRAMES,
159 		.bufsize = AXGE_N_FRAMES * MCLBYTES,
160 		.flags = {.pipe_bof = 1,.force_short_xfer = 1,},
161 		.callback = axge_bulk_write_callback,
162 		.timeout = 10000,	/* 10 seconds */
163 	},
164 	[AXGE_BULK_DT_RD] = {
165 		.type = UE_BULK,
166 		.endpoint = UE_ADDR_ANY,
167 		.direction = UE_DIR_IN,
168 		.bufsize = 65536,
169 		.flags = {.pipe_bof = 1,.short_xfer_ok = 1,},
170 		.callback = axge_bulk_read_callback,
171 		.timeout = 0,		/* no timeout */
172 	},
173 };
174 
175 static device_method_t axge_methods[] = {
176 	/* Device interface. */
177 	DEVMETHOD(device_probe,		axge_probe),
178 	DEVMETHOD(device_attach,	axge_attach),
179 	DEVMETHOD(device_detach,	axge_detach),
180 
181 	/* MII interface. */
182 	DEVMETHOD(miibus_readreg,	axge_miibus_readreg),
183 	DEVMETHOD(miibus_writereg,	axge_miibus_writereg),
184 	DEVMETHOD(miibus_statchg,	axge_miibus_statchg),
185 
186 	DEVMETHOD_END
187 };
188 
189 static driver_t axge_driver = {
190 	.name = "axge",
191 	.methods = axge_methods,
192 	.size = sizeof(struct axge_softc),
193 };
194 
195 DRIVER_MODULE(axge, uhub, axge_driver, NULL, NULL);
196 DRIVER_MODULE(miibus, axge, miibus_driver, NULL, NULL);
197 MODULE_DEPEND(axge, uether, 1, 1, 1);
198 MODULE_DEPEND(axge, usb, 1, 1, 1);
199 MODULE_DEPEND(axge, ether, 1, 1, 1);
200 MODULE_DEPEND(axge, miibus, 1, 1, 1);
201 MODULE_VERSION(axge, 1);
202 USB_PNP_HOST_INFO(axge_devs);
203 
204 static const struct usb_ether_methods axge_ue_methods = {
205 	.ue_attach_post = axge_attach_post,
206 	.ue_attach_post_sub = axge_attach_post_sub,
207 	.ue_start = axge_start,
208 	.ue_init = axge_init,
209 	.ue_stop = axge_stop,
210 	.ue_tick = axge_tick,
211 	.ue_setmulti = axge_rxfilter,
212 	.ue_setpromisc = axge_rxfilter,
213 	.ue_mii_upd = axge_ifmedia_upd,
214 	.ue_mii_sts = axge_ifmedia_sts,
215 };
216 
217 static int
218 axge_read_mem(struct axge_softc *sc, uint8_t cmd, uint16_t index,
219     uint16_t val, void *buf, int len)
220 {
221 	struct usb_device_request req;
222 
223 	AXGE_LOCK_ASSERT(sc, MA_OWNED);
224 
225 	req.bmRequestType = UT_READ_VENDOR_DEVICE;
226 	req.bRequest = cmd;
227 	USETW(req.wValue, val);
228 	USETW(req.wIndex, index);
229 	USETW(req.wLength, len);
230 
231 	return (uether_do_request(&sc->sc_ue, &req, buf, 1000));
232 }
233 
234 static void
235 axge_write_mem(struct axge_softc *sc, uint8_t cmd, uint16_t index,
236     uint16_t val, void *buf, int len)
237 {
238 	struct usb_device_request req;
239 
240 	AXGE_LOCK_ASSERT(sc, MA_OWNED);
241 
242 	req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
243 	req.bRequest = cmd;
244 	USETW(req.wValue, val);
245 	USETW(req.wIndex, index);
246 	USETW(req.wLength, len);
247 
248 	if (uether_do_request(&sc->sc_ue, &req, buf, 1000)) {
249 		/* Error ignored. */
250 	}
251 }
252 
253 static uint8_t
254 axge_read_cmd_1(struct axge_softc *sc, uint8_t cmd, uint16_t reg)
255 {
256 	uint8_t val;
257 
258 	axge_read_mem(sc, cmd, 1, reg, &val, 1);
259 	return (val);
260 }
261 
262 static uint16_t
263 axge_read_cmd_2(struct axge_softc *sc, uint8_t cmd, uint16_t index,
264     uint16_t reg)
265 {
266 	uint8_t val[2];
267 
268 	axge_read_mem(sc, cmd, index, reg, &val, 2);
269 	return (UGETW(val));
270 }
271 
272 static void
273 axge_write_cmd_1(struct axge_softc *sc, uint8_t cmd, uint16_t reg, uint8_t val)
274 {
275 	axge_write_mem(sc, cmd, 1, reg, &val, 1);
276 }
277 
278 static void
279 axge_write_cmd_2(struct axge_softc *sc, uint8_t cmd, uint16_t index,
280     uint16_t reg, uint16_t val)
281 {
282 	uint8_t temp[2];
283 
284 	USETW(temp, val);
285 	axge_write_mem(sc, cmd, index, reg, &temp, 2);
286 }
287 
288 static int
289 axge_miibus_readreg(device_t dev, int phy, int reg)
290 {
291 	struct axge_softc *sc;
292 	uint16_t val;
293 	int locked;
294 
295 	sc = device_get_softc(dev);
296 	locked = mtx_owned(&sc->sc_mtx);
297 	if (!locked)
298 		AXGE_LOCK(sc);
299 
300 	val = axge_read_cmd_2(sc, AXGE_ACCESS_PHY, reg, phy);
301 
302 	if (!locked)
303 		AXGE_UNLOCK(sc);
304 
305 	return (val);
306 }
307 
308 static int
309 axge_miibus_writereg(device_t dev, int phy, int reg, int val)
310 {
311 	struct axge_softc *sc;
312 	int locked;
313 
314 	sc = device_get_softc(dev);
315 	locked = mtx_owned(&sc->sc_mtx);
316 	if (!locked)
317 		AXGE_LOCK(sc);
318 
319 	axge_write_cmd_2(sc, AXGE_ACCESS_PHY, reg, phy, val);
320 
321 	if (!locked)
322 		AXGE_UNLOCK(sc);
323 
324 	return (0);
325 }
326 
327 static void
328 axge_miibus_statchg(device_t dev)
329 {
330 	struct axge_softc *sc;
331 	struct mii_data *mii;
332 	if_t ifp;
333 	uint8_t link_status, tmp[5];
334 	uint16_t val;
335 	int locked;
336 
337 	sc = device_get_softc(dev);
338 	mii = GET_MII(sc);
339 	locked = mtx_owned(&sc->sc_mtx);
340 	if (!locked)
341 		AXGE_LOCK(sc);
342 
343 	ifp = uether_getifp(&sc->sc_ue);
344 	if (mii == NULL || ifp == NULL ||
345 	    (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
346 		goto done;
347 
348 	sc->sc_flags &= ~AXGE_FLAG_LINK;
349 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
350 	    (IFM_ACTIVE | IFM_AVALID)) {
351 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
352 		case IFM_10_T:
353 		case IFM_100_TX:
354 		case IFM_1000_T:
355 			sc->sc_flags |= AXGE_FLAG_LINK;
356 			break;
357 		default:
358 			break;
359 		}
360 	}
361 
362 	/* Lost link, do nothing. */
363 	if ((sc->sc_flags & AXGE_FLAG_LINK) == 0)
364 		goto done;
365 
366 	link_status = axge_read_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_PLSR);
367 
368 	val = 0;
369 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
370 		val |= MSR_FD;
371 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
372 			val |= MSR_TFC;
373 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
374 			val |= MSR_RFC;
375 	}
376 	val |=  MSR_RE;
377 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
378 	case IFM_1000_T:
379 		val |= MSR_GM | MSR_EN_125MHZ;
380 		if (link_status & PLSR_USB_SS)
381 			memcpy(tmp, &axge_bulk_size[0], 5);
382 		else if (link_status & PLSR_USB_HS)
383 			memcpy(tmp, &axge_bulk_size[1], 5);
384 		else
385 			memcpy(tmp, &axge_bulk_size[3], 5);
386 		break;
387 	case IFM_100_TX:
388 		val |= MSR_PS;
389 		if (link_status & (PLSR_USB_SS | PLSR_USB_HS))
390 			memcpy(tmp, &axge_bulk_size[2], 5);
391 		else
392 			memcpy(tmp, &axge_bulk_size[3], 5);
393 		break;
394 	case IFM_10_T:
395 		memcpy(tmp, &axge_bulk_size[3], 5);
396 		break;
397 	}
398 	/* Rx bulk configuration. */
399 	axge_write_mem(sc, AXGE_ACCESS_MAC, 5, AXGE_RX_BULKIN_QCTRL, tmp, 5);
400 	axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_MSR, val);
401 done:
402 	if (!locked)
403 		AXGE_UNLOCK(sc);
404 }
405 
406 static void
407 axge_chip_init(struct axge_softc *sc)
408 {
409 	/* Power up ethernet PHY. */
410 	axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_EPPRCR, 0);
411 	axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_EPPRCR, EPPRCR_IPRL);
412 	uether_pause(&sc->sc_ue, hz / 4);
413 	axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CLK_SELECT,
414 	    AXGE_CLK_SELECT_ACS | AXGE_CLK_SELECT_BCS);
415 	uether_pause(&sc->sc_ue, hz / 10);
416 
417 	if ((sc->sc_flags & AXGE_FLAG_179A) != 0) {
418 		/*
419 		 * 179A chip has two firmware modes that each use different
420 		 * transfer layouts for Ethernet over USB. The newer fw mode has
421 		 * larger rx packet headers which seem to
422 		 * accomodate for ethernet frames up to 9K length and a VLAN
423 		 * field for hardware tagging, but is not backward compatible
424 		 * with 178A/179 bulk transfer code due to the change in size
425 		 * and field alignments. The other fw mode uses the same packet
426 		 * headers as the older 178A/179 chips, which this driver uses.
427 		 *
428 		 * As we do not currently have VLAN hw tagging or jumbo support
429 		 * in this driver anyway, we're ok forcing 179A into its compat
430 		 * mode by default.
431 		 */
432 		axge_write_cmd_1(sc, AXGE_FW_MODE, AXGE_FW_MODE_178A179, 0);
433 	}
434 }
435 
436 static void
437 axge_reset(struct axge_softc *sc)
438 {
439 	struct usb_config_descriptor *cd;
440 	usb_error_t err;
441 
442 	cd = usbd_get_config_descriptor(sc->sc_ue.ue_udev);
443 
444 	err = usbd_req_set_config(sc->sc_ue.ue_udev, &sc->sc_mtx,
445 	    cd->bConfigurationValue);
446 	if (err)
447 		DPRINTF("reset failed (ignored)\n");
448 
449 	/* Wait a little while for the chip to get its brains in order. */
450 	uether_pause(&sc->sc_ue, hz / 100);
451 
452 	/* Reinitialize controller to achieve full reset. */
453 	axge_chip_init(sc);
454 }
455 
456 static void
457 axge_attach_post(struct usb_ether *ue)
458 {
459 	struct axge_softc *sc;
460 
461 	sc = uether_getsc(ue);
462 
463 	/* Initialize controller and get station address. */
464 	axge_chip_init(sc);
465 	axge_read_mem(sc, AXGE_ACCESS_MAC, ETHER_ADDR_LEN, AXGE_NIDR,
466 	    ue->ue_eaddr, ETHER_ADDR_LEN);
467 }
468 
469 static int
470 axge_attach_post_sub(struct usb_ether *ue)
471 {
472 	if_t ifp;
473 	int error;
474 
475 	ifp = ue->ue_ifp;
476 	if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
477 	if_setstartfn(ifp, uether_start);
478 	if_setioctlfn(ifp, axge_ioctl);
479 	if_setinitfn(ifp, uether_init);
480 	if_setsendqlen(ifp, ifqmaxlen);
481 	if_setsendqready(ifp);
482 
483 	if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU | IFCAP_TXCSUM | IFCAP_RXCSUM, 0);
484 	if_sethwassist(ifp, AXGE_CSUM_FEATURES);
485 	if_setcapenable(ifp, if_getcapabilities(ifp));
486 
487 	bus_topo_lock();
488 	error = mii_attach(ue->ue_dev, &ue->ue_miibus, ifp,
489 	    uether_ifmedia_upd, ue->ue_methods->ue_mii_sts,
490 	    BMSR_DEFCAPMASK, AXGE_PHY_ADDR, MII_OFFSET_ANY, MIIF_DOPAUSE);
491 	bus_topo_unlock();
492 
493 	return (error);
494 }
495 
496 /*
497  * Set media options.
498  */
499 static int
500 axge_ifmedia_upd(if_t ifp)
501 {
502 	struct axge_softc *sc;
503 	struct mii_data *mii;
504 	struct mii_softc *miisc;
505 	int error;
506 
507 	sc = if_getsoftc(ifp);
508 	mii = GET_MII(sc);
509 	AXGE_LOCK_ASSERT(sc, MA_OWNED);
510 
511 	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
512 	    PHY_RESET(miisc);
513 	error = mii_mediachg(mii);
514 
515 	return (error);
516 }
517 
518 /*
519  * Report current media status.
520  */
521 static void
522 axge_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
523 {
524 	struct axge_softc *sc;
525 	struct mii_data *mii;
526 
527 	sc = if_getsoftc(ifp);
528 	mii = GET_MII(sc);
529 	AXGE_LOCK(sc);
530 	mii_pollstat(mii);
531 	ifmr->ifm_active = mii->mii_media_active;
532 	ifmr->ifm_status = mii->mii_media_status;
533 	AXGE_UNLOCK(sc);
534 }
535 
536 /*
537  * Probe for a AX88179 chip.
538  */
539 static int
540 axge_probe(device_t dev)
541 {
542 	struct usb_attach_arg *uaa;
543 
544 	uaa = device_get_ivars(dev);
545 	if (uaa->usb_mode != USB_MODE_HOST)
546 		return (ENXIO);
547 	if (uaa->info.bConfigIndex != AXGE_CONFIG_IDX)
548 		return (ENXIO);
549 	if (uaa->info.bIfaceIndex != AXGE_IFACE_IDX)
550 		return (ENXIO);
551 
552 	return (usbd_lookup_id_by_uaa(axge_devs, sizeof(axge_devs), uaa));
553 }
554 
555 /*
556  * Attach the interface. Allocate softc structures, do ifmedia
557  * setup and ethernet/BPF attach.
558  */
559 static int
560 axge_attach(device_t dev)
561 {
562 	struct usb_attach_arg *uaa;
563 	struct axge_softc *sc;
564 	struct usb_ether *ue;
565 	uint8_t iface_index;
566 	int error;
567 
568 	uaa = device_get_ivars(dev);
569 	sc = device_get_softc(dev);
570 	ue = &sc->sc_ue;
571 
572 	device_set_usb_desc(dev);
573 	mtx_init(&sc->sc_mtx, device_get_nameunit(dev), NULL, MTX_DEF);
574 
575 	sc->sc_flags = USB_GET_DRIVER_INFO(uaa);
576 
577 	iface_index = AXGE_IFACE_IDX;
578 	error = usbd_transfer_setup(uaa->device, &iface_index,
579 	    sc->sc_xfer, axge_config, AXGE_N_TRANSFER, sc, &sc->sc_mtx);
580 	if (error) {
581 		device_printf(dev, "allocating USB transfers failed\n");
582 		mtx_destroy(&sc->sc_mtx);
583 		return (ENXIO);
584 	}
585 
586 	ue->ue_sc = sc;
587 	ue->ue_dev = dev;
588 	ue->ue_udev = uaa->device;
589 	ue->ue_mtx = &sc->sc_mtx;
590 	ue->ue_methods = &axge_ue_methods;
591 
592 	error = uether_ifattach(ue);
593 	if (error) {
594 		device_printf(dev, "could not attach interface\n");
595 		goto detach;
596 	}
597 	return (0);			/* success */
598 
599 detach:
600 	axge_detach(dev);
601 	return (ENXIO);			/* failure */
602 }
603 
604 static int
605 axge_detach(device_t dev)
606 {
607 	struct axge_softc *sc;
608 	struct usb_ether *ue;
609 	uint16_t val;
610 
611 	sc = device_get_softc(dev);
612 	ue = &sc->sc_ue;
613 	if (device_is_attached(dev)) {
614 		/* wait for any post attach or other command to complete */
615 		usb_proc_drain(&ue->ue_tq);
616 
617 		AXGE_LOCK(sc);
618 		/*
619 		 * XXX
620 		 * ether_ifdetach(9) should be called first.
621 		 */
622 		axge_stop(ue);
623 		/* Force bulk-in to return a zero-length USB packet. */
624 		val = axge_read_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_EPPRCR);
625 		val |= EPPRCR_BZ | EPPRCR_IPRL;
626 		axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_EPPRCR, val);
627 		/* Change clock. */
628 		axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CLK_SELECT, 0);
629 		/* Disable MAC. */
630 		axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR, 0);
631 		AXGE_UNLOCK(sc);
632 	}
633 	usbd_transfer_unsetup(sc->sc_xfer, AXGE_N_TRANSFER);
634 	uether_ifdetach(ue);
635 	mtx_destroy(&sc->sc_mtx);
636 
637 	return (0);
638 }
639 
640 static void
641 axge_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error)
642 {
643 	struct axge_softc *sc;
644 	struct usb_ether *ue;
645 	struct usb_page_cache *pc;
646 	int actlen;
647 
648 	sc = usbd_xfer_softc(xfer);
649 	ue = &sc->sc_ue;
650 	usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
651 
652 	switch (USB_GET_STATE(xfer)) {
653 	case USB_ST_TRANSFERRED:
654 		pc = usbd_xfer_get_frame(xfer, 0);
655 		axge_rx_frame(ue, pc, actlen);
656 
657 		/* FALLTHROUGH */
658 	case USB_ST_SETUP:
659 tr_setup:
660 		usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer));
661 		usbd_transfer_submit(xfer);
662 		uether_rxflush(ue);
663 		break;
664 
665 	default:
666 		if (error != USB_ERR_CANCELLED) {
667 			usbd_xfer_set_stall(xfer);
668 			goto tr_setup;
669 		}
670 		break;
671 	}
672 }
673 
674 static void
675 axge_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error)
676 {
677 	struct axge_softc *sc;
678 	if_t ifp;
679 	struct usb_page_cache *pc;
680 	struct mbuf *m;
681 	struct axge_frame_txhdr txhdr;
682 	int nframes, pos;
683 
684 	sc = usbd_xfer_softc(xfer);
685 	ifp = uether_getifp(&sc->sc_ue);
686 
687 	switch (USB_GET_STATE(xfer)) {
688 	case USB_ST_TRANSFERRED:
689 		if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
690 		/* FALLTHROUGH */
691 	case USB_ST_SETUP:
692 tr_setup:
693 		if ((sc->sc_flags & AXGE_FLAG_LINK) == 0 ||
694 		    (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) != 0) {
695 			/*
696 			 * Don't send anything if there is no link or
697 			 * controller is busy.
698 			 */
699 			return;
700 		}
701 
702 		for (nframes = 0; nframes < AXGE_N_FRAMES &&
703 		    !if_sendq_empty(ifp); nframes++) {
704 			m = if_dequeue(ifp);
705 			if (m == NULL)
706 				break;
707 			usbd_xfer_set_frame_offset(xfer, nframes * MCLBYTES,
708 			    nframes);
709 			pc = usbd_xfer_get_frame(xfer, nframes);
710 			txhdr.mss = 0;
711 			txhdr.len = htole32(AXGE_TXBYTES(m->m_pkthdr.len));
712 			if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0 &&
713 			    (m->m_pkthdr.csum_flags & AXGE_CSUM_FEATURES) == 0)
714 				txhdr.len |= htole32(AXGE_CSUM_DISABLE);
715 
716 			pos = 0;
717 			usbd_copy_in(pc, pos, &txhdr, sizeof(txhdr));
718 			pos += sizeof(txhdr);
719 			usbd_m_copy_in(pc, pos, m, 0, m->m_pkthdr.len);
720 			pos += m->m_pkthdr.len;
721 
722 			/*
723 			 * if there's a BPF listener, bounce a copy
724 			 * of this frame to him:
725 			 */
726 			BPF_MTAP(ifp, m);
727 
728 			m_freem(m);
729 
730 			/* Set frame length. */
731 			usbd_xfer_set_frame_len(xfer, nframes, pos);
732 		}
733 		if (nframes != 0) {
734 			/*
735 			 * XXX
736 			 * Update TX packet counter here. This is not
737 			 * correct way but it seems that there is no way
738 			 * to know how many packets are sent at the end
739 			 * of transfer because controller combines
740 			 * multiple writes into single one if there is
741 			 * room in TX buffer of controller.
742 			 */
743 			if_inc_counter(ifp, IFCOUNTER_OPACKETS, nframes);
744 			usbd_xfer_set_frames(xfer, nframes);
745 			usbd_transfer_submit(xfer);
746 			if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
747 		}
748 		return;
749 		/* NOTREACHED */
750 	default:
751 		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
752 		if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
753 
754 		if (error != USB_ERR_CANCELLED) {
755 			usbd_xfer_set_stall(xfer);
756 			goto tr_setup;
757 		}
758 		return;
759 	}
760 }
761 
762 static void
763 axge_tick(struct usb_ether *ue)
764 {
765 	struct axge_softc *sc;
766 	struct mii_data *mii;
767 
768 	sc = uether_getsc(ue);
769 	mii = GET_MII(sc);
770 	AXGE_LOCK_ASSERT(sc, MA_OWNED);
771 
772 	mii_tick(mii);
773 }
774 
775 static u_int
776 axge_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
777 {
778 	uint8_t *hashtbl = arg;
779 	uint32_t h;
780 
781 	h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26;
782 	hashtbl[h / 8] |= 1 << (h % 8);
783 
784 	return (1);
785 }
786 
787 static void
788 axge_rxfilter(struct usb_ether *ue)
789 {
790 	struct axge_softc *sc;
791 	if_t ifp;
792 	uint16_t rxmode;
793 	uint8_t hashtbl[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
794 
795 	sc = uether_getsc(ue);
796 	ifp = uether_getifp(ue);
797 	AXGE_LOCK_ASSERT(sc, MA_OWNED);
798 
799 	/*
800 	 * Configure RX settings.
801 	 * Don't set RCR_IPE(IP header alignment on 32bit boundary) to disable
802 	 * inserting extra padding bytes.  This wastes ethernet to USB host
803 	 * bandwidth as well as complicating RX handling logic.  Current USB
804 	 * framework requires copying RX frames to mbufs so there is no need
805 	 * to worry about alignment.
806 	 */
807 	rxmode = RCR_DROP_CRCERR | RCR_START;
808 	if (if_getflags(ifp) & IFF_BROADCAST)
809 		rxmode |= RCR_ACPT_BCAST;
810 	if (if_getflags(ifp) & (IFF_ALLMULTI | IFF_PROMISC)) {
811 		if (if_getflags(ifp) & IFF_PROMISC)
812 			rxmode |= RCR_PROMISC;
813 		rxmode |= RCR_ACPT_ALL_MCAST;
814 		axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR, rxmode);
815 		return;
816 	}
817 
818 	rxmode |= RCR_ACPT_MCAST;
819 	if_foreach_llmaddr(ifp, axge_hash_maddr, &hashtbl);
820 
821 	axge_write_mem(sc, AXGE_ACCESS_MAC, 8, AXGE_MFA, (void *)&hashtbl, 8);
822 	axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR, rxmode);
823 }
824 
825 static void
826 axge_start(struct usb_ether *ue)
827 {
828 	struct axge_softc *sc;
829 
830 	sc = uether_getsc(ue);
831 	/*
832 	 * Start the USB transfers, if not already started.
833 	 */
834 	usbd_transfer_start(sc->sc_xfer[AXGE_BULK_DT_RD]);
835 	usbd_transfer_start(sc->sc_xfer[AXGE_BULK_DT_WR]);
836 }
837 
838 static void
839 axge_init(struct usb_ether *ue)
840 {
841 	struct axge_softc *sc;
842 	if_t ifp;
843 
844 	sc = uether_getsc(ue);
845 	ifp = uether_getifp(ue);
846 	AXGE_LOCK_ASSERT(sc, MA_OWNED);
847 
848 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
849 		return;
850 
851 	/*
852 	 * Cancel pending I/O and free all RX/TX buffers.
853 	 */
854 	axge_stop(ue);
855 
856 	axge_reset(sc);
857 
858 	/* Set MAC address. */
859 	axge_write_mem(sc, AXGE_ACCESS_MAC, ETHER_ADDR_LEN, AXGE_NIDR,
860 	    if_getlladdr(ifp), ETHER_ADDR_LEN);
861 
862 	axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_PWLLR, 0x34);
863 	axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_PWLHR, 0x52);
864 
865 	/* Configure TX/RX checksum offloading. */
866 	axge_csum_cfg(ue);
867 
868 	/*  Configure RX filters. */
869 	axge_rxfilter(ue);
870 
871 	/*
872 	 * XXX
873 	 * Controller supports wakeup on link change detection,
874 	 * magic packet and wakeup frame recpetion.  But it seems
875 	 * there is no framework for USB ethernet suspend/wakeup.
876 	 * Disable all wakeup functions.
877 	 */
878 	axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_MMSR, 0);
879 	(void)axge_read_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_MMSR);
880 
881 	/* Configure default medium type. */
882 	axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_MSR, MSR_GM | MSR_FD |
883 	    MSR_RFC | MSR_TFC | MSR_RE);
884 
885 	usbd_xfer_set_stall(sc->sc_xfer[AXGE_BULK_DT_WR]);
886 
887 	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
888 	/* Switch to selected media. */
889 	axge_ifmedia_upd(ifp);
890 }
891 
892 static void
893 axge_stop(struct usb_ether *ue)
894 {
895 	struct axge_softc *sc;
896 	if_t ifp;
897 	uint16_t val;
898 
899 	sc = uether_getsc(ue);
900 	ifp = uether_getifp(ue);
901 
902 	AXGE_LOCK_ASSERT(sc, MA_OWNED);
903 
904 	val = axge_read_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_MSR);
905 	val &= ~MSR_RE;
906 	axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_MSR, val);
907 
908 	if (ifp != NULL)
909 		if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
910 	sc->sc_flags &= ~AXGE_FLAG_LINK;
911 
912 	/*
913 	 * Stop all the transfers, if not already stopped:
914 	 */
915 	usbd_transfer_stop(sc->sc_xfer[AXGE_BULK_DT_WR]);
916 	usbd_transfer_stop(sc->sc_xfer[AXGE_BULK_DT_RD]);
917 }
918 
919 static int
920 axge_ioctl(if_t ifp, u_long cmd, caddr_t data)
921 {
922 	struct usb_ether *ue;
923 	struct axge_softc *sc;
924 	struct ifreq *ifr;
925 	int error, mask, reinit;
926 
927 	ue = if_getsoftc(ifp);
928 	sc = uether_getsc(ue);
929 	ifr = (struct ifreq *)data;
930 	error = 0;
931 	reinit = 0;
932 	if (cmd == SIOCSIFCAP) {
933 		AXGE_LOCK(sc);
934 		mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
935 		if ((mask & IFCAP_TXCSUM) != 0 &&
936 		    (if_getcapabilities(ifp) & IFCAP_TXCSUM) != 0) {
937 			if_togglecapenable(ifp, IFCAP_TXCSUM);
938 			if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
939 				if_sethwassistbits(ifp, AXGE_CSUM_FEATURES, 0);
940 			else
941 				if_sethwassistbits(ifp, 0, AXGE_CSUM_FEATURES);
942 			reinit++;
943 		}
944 		if ((mask & IFCAP_RXCSUM) != 0 &&
945 		    (if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0) {
946 			if_togglecapenable(ifp, IFCAP_RXCSUM);
947 			reinit++;
948 		}
949 		if (reinit > 0 && if_getdrvflags(ifp) & IFF_DRV_RUNNING)
950 			if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
951 		else
952 			reinit = 0;
953 		AXGE_UNLOCK(sc);
954 		if (reinit > 0)
955 			uether_init(ue);
956 	} else
957 		error = uether_ioctl(ifp, cmd, data);
958 
959 	return (error);
960 }
961 
962 static void
963 axge_rx_frame(struct usb_ether *ue, struct usb_page_cache *pc, int actlen)
964 {
965 	struct axge_frame_rxhdr pkt_hdr;
966 	uint32_t rxhdr;
967 	uint32_t pos;
968 	uint32_t pkt_cnt, pkt_end;
969 	uint32_t hdr_off;
970 	uint32_t pktlen;
971 
972 	/* verify we have enough data */
973 	if (actlen < (int)sizeof(rxhdr))
974 		return;
975 
976 	pos = 0;
977 
978 	usbd_copy_out(pc, actlen - sizeof(rxhdr), &rxhdr, sizeof(rxhdr));
979 	rxhdr = le32toh(rxhdr);
980 
981 	pkt_cnt = rxhdr & 0xFFFF;
982 	hdr_off = pkt_end = (rxhdr >> 16) & 0xFFFF;
983 
984 	/*
985 	 * On older firmware:
986 	 * <----------------------- actlen ------------------------>
987 	 * [frame #0]...[frame #N][pkt_hdr #0]...[pkt_hdr #N][rxhdr]
988 	 *
989 	 * On newer firmware:
990 	 * <----------------------- actlen -----------------
991 	 * [frame #0]...[frame #N][pkt_hdr #0][dummy_hdr]...
992 	 *                         -------------------------------->
993 	 *                         ...[pkt_hdr #N][dummy_hdr][rxhdr]
994 	 *
995 	 * Each RX frame would be aligned on 8 bytes boundary. If
996 	 * RCR_IPE bit is set in AXGE_RCR register, there would be 2
997 	 * padding bytes and 6 dummy bytes(as the padding also should
998 	 * be aligned on 8 bytes boundary) for each RX frame to align
999 	 * IP header on 32bits boundary.  Driver don't set RCR_IPE bit
1000 	 * of AXGE_RCR register, so there should be no padding bytes
1001 	 * which simplifies RX logic a lot.
1002 	 *
1003 	 * Further, newer firmware interweaves dummy headers that have
1004 	 * pktlen == 0 and should be skipped without being seen as
1005 	 * dropped frames.
1006 	 */
1007 	while (pkt_cnt--) {
1008 		/* verify the header offset */
1009 		if ((int)(hdr_off + sizeof(pkt_hdr)) > actlen) {
1010 			DPRINTF("End of packet headers\n");
1011 			break;
1012 		}
1013 		usbd_copy_out(pc, hdr_off, &pkt_hdr, sizeof(pkt_hdr));
1014 		pkt_hdr.status = le32toh(pkt_hdr.status);
1015 		pktlen = AXGE_RXBYTES(pkt_hdr.status);
1016 		hdr_off += sizeof(pkt_hdr);
1017 
1018 		/* Skip dummy packet header. */
1019 		if (pktlen == 0)
1020 			continue;
1021 
1022 		if (pos + pktlen > pkt_end) {
1023 			DPRINTF("Data position reached end\n");
1024 			break;
1025 		}
1026 
1027 		if (AXGE_RX_ERR(pkt_hdr.status) != 0) {
1028 			DPRINTF("Dropped a packet\n");
1029 			if_inc_counter(ue->ue_ifp, IFCOUNTER_IERRORS, 1);
1030 		} else
1031 			axge_rxeof(ue, pc, pos, pktlen, pkt_hdr.status);
1032 		pos += (pktlen + 7) & ~7;
1033 	}
1034 }
1035 
1036 static void
1037 axge_rxeof(struct usb_ether *ue, struct usb_page_cache *pc, unsigned offset,
1038     unsigned len, uint32_t status)
1039 {
1040 	if_t ifp;
1041 	struct mbuf *m;
1042 
1043 	ifp = ue->ue_ifp;
1044 	if (len < ETHER_HDR_LEN || len > MCLBYTES - ETHER_ALIGN) {
1045 		if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1046 		return;
1047 	}
1048 
1049 	if (len > MHLEN - ETHER_ALIGN)
1050 		m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1051 	else
1052 		m = m_gethdr(M_NOWAIT, MT_DATA);
1053 	if (m == NULL) {
1054 		if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1055 		return;
1056 	}
1057 	m->m_pkthdr.rcvif = ifp;
1058 	m->m_len = m->m_pkthdr.len = len;
1059 	m->m_data += ETHER_ALIGN;
1060 
1061 	usbd_copy_out(pc, offset, mtod(m, uint8_t *), len);
1062 
1063 	if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
1064 		if ((status & AXGE_RX_L3_CSUM_ERR) == 0 &&
1065 		    (status & AXGE_RX_L3_TYPE_MASK) == AXGE_RX_L3_TYPE_IPV4)
1066 			m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED |
1067 			    CSUM_IP_VALID;
1068 		if ((status & AXGE_RX_L4_CSUM_ERR) == 0 &&
1069 		    ((status & AXGE_RX_L4_TYPE_MASK) == AXGE_RX_L4_TYPE_UDP ||
1070 		    (status & AXGE_RX_L4_TYPE_MASK) == AXGE_RX_L4_TYPE_TCP)) {
1071 			m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
1072 			    CSUM_PSEUDO_HDR;
1073 			m->m_pkthdr.csum_data = 0xffff;
1074 		}
1075 	}
1076 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1077 
1078 	(void)mbufq_enqueue(&ue->ue_rxq, m);
1079 }
1080 
1081 static void
1082 axge_csum_cfg(struct usb_ether *ue)
1083 {
1084 	struct axge_softc *sc;
1085 	if_t ifp;
1086 	uint8_t csum;
1087 
1088 	sc = uether_getsc(ue);
1089 	AXGE_LOCK_ASSERT(sc, MA_OWNED);
1090 	ifp = uether_getifp(ue);
1091 
1092 	csum = 0;
1093 	if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
1094 		csum |= CTCR_IP | CTCR_TCP | CTCR_UDP;
1095 	axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CTCR, csum);
1096 
1097 	csum = 0;
1098 	if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
1099 		csum |= CRCR_IP | CRCR_TCP | CRCR_UDP;
1100 	axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CRCR, csum);
1101 }
1102