1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2013-2014 Kevin Lo
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 /*
30 * ASIX Electronics AX88178A/AX88179/AX88179A USB 2.0/3.0 gigabit ethernet
31 * driver.
32 */
33
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/bus.h>
37 #include <sys/condvar.h>
38 #include <sys/endian.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/module.h>
42 #include <sys/mutex.h>
43 #include <sys/socket.h>
44 #include <sys/sysctl.h>
45 #include <sys/unistd.h>
46
47 #include <net/if.h>
48 #include <net/if_var.h>
49 #include <net/if_media.h>
50
51 #include <dev/mii/mii.h>
52 #include <dev/mii/miivar.h>
53
54 #include <dev/usb/usb.h>
55 #include <dev/usb/usbdi.h>
56 #include <dev/usb/usbdi_util.h>
57 #include "usbdevs.h"
58
59 #define USB_DEBUG_VAR axge_debug
60 #include <dev/usb/usb_debug.h>
61 #include <dev/usb/usb_process.h>
62
63 #include <dev/usb/net/usb_ethernet.h>
64 #include <dev/usb/net/if_axgereg.h>
65
66 #include "miibus_if.h"
67
68 /*
69 * Various supported device vendors/products.
70 */
71
72 static const STRUCT_USB_HOST_ID axge_devs[] = {
73 #define AXGE_DEV(v,p,i,...) \
74 { USB_VPI(USB_VENDOR_##v, USB_PRODUCT_##v##_##p, i), __VA_ARGS__ }
75 AXGE_DEV(ASIX, AX88178A, AXGE_FLAG_178A),
76 AXGE_DEV(ASIX, AX88179, AXGE_FLAG_179, USB_DEV_BCD_LTEQ(0x0100)),
77 AXGE_DEV(ASIX, AX88179, AXGE_FLAG_179A, USB_DEV_BCD_GTEQ(0x0200)),
78 AXGE_DEV(BELKIN, B2B128, AXGE_FLAG_179),
79 AXGE_DEV(DLINK, DUB1312, AXGE_FLAG_179),
80 AXGE_DEV(LENOVO, GIGALAN, AXGE_FLAG_179),
81 AXGE_DEV(SITECOMEU, LN032, AXGE_FLAG_179),
82 #undef AXGE_DEV
83 };
84
85 static const struct {
86 uint8_t ctrl;
87 uint8_t timer_l;
88 uint8_t timer_h;
89 uint8_t size;
90 uint8_t ifg;
91 } __packed axge_bulk_size[] = {
92 { 7, 0x4f, 0x00, 0x12, 0xff },
93 { 7, 0x20, 0x03, 0x16, 0xff },
94 { 7, 0xae, 0x07, 0x18, 0xff },
95 { 7, 0xcc, 0x4c, 0x18, 0x08 }
96 };
97
98 /* prototypes */
99
100 static device_probe_t axge_probe;
101 static device_attach_t axge_attach;
102 static device_detach_t axge_detach;
103
104 static usb_callback_t axge_bulk_read_callback;
105 static usb_callback_t axge_bulk_write_callback;
106
107 static miibus_readreg_t axge_miibus_readreg;
108 static miibus_writereg_t axge_miibus_writereg;
109 static miibus_statchg_t axge_miibus_statchg;
110
111 static uether_fn_t axge_attach_post;
112 static uether_fn_t axge_init;
113 static uether_fn_t axge_stop;
114 static uether_fn_t axge_start;
115 static uether_fn_t axge_tick;
116 static uether_fn_t axge_rxfilter;
117
118 static int axge_read_mem(struct axge_softc *, uint8_t, uint16_t,
119 uint16_t, void *, int);
120 static void axge_write_mem(struct axge_softc *, uint8_t, uint16_t,
121 uint16_t, void *, int);
122 static uint8_t axge_read_cmd_1(struct axge_softc *, uint8_t, uint16_t);
123 static uint16_t axge_read_cmd_2(struct axge_softc *, uint8_t, uint16_t,
124 uint16_t);
125 static void axge_write_cmd_1(struct axge_softc *, uint8_t, uint16_t,
126 uint8_t);
127 static void axge_write_cmd_2(struct axge_softc *, uint8_t, uint16_t,
128 uint16_t, uint16_t);
129 static void axge_chip_init(struct axge_softc *);
130 static void axge_reset(struct axge_softc *);
131
132 static int axge_attach_post_sub(struct usb_ether *);
133 static int axge_ifmedia_upd(if_t);
134 static void axge_ifmedia_sts(if_t, struct ifmediareq *);
135 static int axge_ioctl(if_t, u_long, caddr_t);
136 static void axge_rx_frame(struct usb_ether *, struct usb_page_cache *, int);
137 static void axge_rxeof(struct usb_ether *, struct usb_page_cache *,
138 unsigned, unsigned, uint32_t);
139 static void axge_csum_cfg(struct usb_ether *);
140
141 #define AXGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
142
143 #ifdef USB_DEBUG
144 static int axge_debug = 0;
145
146 static SYSCTL_NODE(_hw_usb, OID_AUTO, axge, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
147 "USB axge");
148 SYSCTL_INT(_hw_usb_axge, OID_AUTO, debug, CTLFLAG_RWTUN, &axge_debug, 0,
149 "Debug level");
150 #endif
151
152 static const struct usb_config axge_config[AXGE_N_TRANSFER] = {
153 [AXGE_BULK_DT_WR] = {
154 .type = UE_BULK,
155 .endpoint = UE_ADDR_ANY,
156 .direction = UE_DIR_OUT,
157 .frames = AXGE_N_FRAMES,
158 .bufsize = AXGE_N_FRAMES * MCLBYTES,
159 .flags = {.pipe_bof = 1,.force_short_xfer = 1,},
160 .callback = axge_bulk_write_callback,
161 .timeout = 10000, /* 10 seconds */
162 },
163 [AXGE_BULK_DT_RD] = {
164 .type = UE_BULK,
165 .endpoint = UE_ADDR_ANY,
166 .direction = UE_DIR_IN,
167 .bufsize = 65536,
168 .flags = {.pipe_bof = 1,.short_xfer_ok = 1,},
169 .callback = axge_bulk_read_callback,
170 .timeout = 0, /* no timeout */
171 },
172 };
173
174 static device_method_t axge_methods[] = {
175 /* Device interface. */
176 DEVMETHOD(device_probe, axge_probe),
177 DEVMETHOD(device_attach, axge_attach),
178 DEVMETHOD(device_detach, axge_detach),
179
180 /* MII interface. */
181 DEVMETHOD(miibus_readreg, axge_miibus_readreg),
182 DEVMETHOD(miibus_writereg, axge_miibus_writereg),
183 DEVMETHOD(miibus_statchg, axge_miibus_statchg),
184
185 DEVMETHOD_END
186 };
187
188 static driver_t axge_driver = {
189 .name = "axge",
190 .methods = axge_methods,
191 .size = sizeof(struct axge_softc),
192 };
193
194 DRIVER_MODULE(axge, uhub, axge_driver, NULL, NULL);
195 DRIVER_MODULE(miibus, axge, miibus_driver, NULL, NULL);
196 MODULE_DEPEND(axge, uether, 1, 1, 1);
197 MODULE_DEPEND(axge, usb, 1, 1, 1);
198 MODULE_DEPEND(axge, ether, 1, 1, 1);
199 MODULE_DEPEND(axge, miibus, 1, 1, 1);
200 MODULE_VERSION(axge, 1);
201 USB_PNP_HOST_INFO(axge_devs);
202
203 static const struct usb_ether_methods axge_ue_methods = {
204 .ue_attach_post = axge_attach_post,
205 .ue_attach_post_sub = axge_attach_post_sub,
206 .ue_start = axge_start,
207 .ue_init = axge_init,
208 .ue_stop = axge_stop,
209 .ue_tick = axge_tick,
210 .ue_setmulti = axge_rxfilter,
211 .ue_setpromisc = axge_rxfilter,
212 .ue_mii_upd = axge_ifmedia_upd,
213 .ue_mii_sts = axge_ifmedia_sts,
214 };
215
216 static int
axge_read_mem(struct axge_softc * sc,uint8_t cmd,uint16_t index,uint16_t val,void * buf,int len)217 axge_read_mem(struct axge_softc *sc, uint8_t cmd, uint16_t index,
218 uint16_t val, void *buf, int len)
219 {
220 struct usb_device_request req;
221
222 AXGE_LOCK_ASSERT(sc, MA_OWNED);
223
224 req.bmRequestType = UT_READ_VENDOR_DEVICE;
225 req.bRequest = cmd;
226 USETW(req.wValue, val);
227 USETW(req.wIndex, index);
228 USETW(req.wLength, len);
229
230 return (uether_do_request(&sc->sc_ue, &req, buf, 1000));
231 }
232
233 static void
axge_write_mem(struct axge_softc * sc,uint8_t cmd,uint16_t index,uint16_t val,void * buf,int len)234 axge_write_mem(struct axge_softc *sc, uint8_t cmd, uint16_t index,
235 uint16_t val, void *buf, int len)
236 {
237 struct usb_device_request req;
238
239 AXGE_LOCK_ASSERT(sc, MA_OWNED);
240
241 req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
242 req.bRequest = cmd;
243 USETW(req.wValue, val);
244 USETW(req.wIndex, index);
245 USETW(req.wLength, len);
246
247 if (uether_do_request(&sc->sc_ue, &req, buf, 1000)) {
248 /* Error ignored. */
249 }
250 }
251
252 static uint8_t
axge_read_cmd_1(struct axge_softc * sc,uint8_t cmd,uint16_t reg)253 axge_read_cmd_1(struct axge_softc *sc, uint8_t cmd, uint16_t reg)
254 {
255 uint8_t val;
256
257 axge_read_mem(sc, cmd, 1, reg, &val, 1);
258 return (val);
259 }
260
261 static uint16_t
axge_read_cmd_2(struct axge_softc * sc,uint8_t cmd,uint16_t index,uint16_t reg)262 axge_read_cmd_2(struct axge_softc *sc, uint8_t cmd, uint16_t index,
263 uint16_t reg)
264 {
265 uint8_t val[2];
266
267 axge_read_mem(sc, cmd, index, reg, &val, 2);
268 return (UGETW(val));
269 }
270
271 static void
axge_write_cmd_1(struct axge_softc * sc,uint8_t cmd,uint16_t reg,uint8_t val)272 axge_write_cmd_1(struct axge_softc *sc, uint8_t cmd, uint16_t reg, uint8_t val)
273 {
274 axge_write_mem(sc, cmd, 1, reg, &val, 1);
275 }
276
277 static void
axge_write_cmd_2(struct axge_softc * sc,uint8_t cmd,uint16_t index,uint16_t reg,uint16_t val)278 axge_write_cmd_2(struct axge_softc *sc, uint8_t cmd, uint16_t index,
279 uint16_t reg, uint16_t val)
280 {
281 uint8_t temp[2];
282
283 USETW(temp, val);
284 axge_write_mem(sc, cmd, index, reg, &temp, 2);
285 }
286
287 static int
axge_miibus_readreg(device_t dev,int phy,int reg)288 axge_miibus_readreg(device_t dev, int phy, int reg)
289 {
290 struct axge_softc *sc;
291 uint16_t val;
292 int locked;
293
294 sc = device_get_softc(dev);
295 locked = mtx_owned(&sc->sc_mtx);
296 if (!locked)
297 AXGE_LOCK(sc);
298
299 val = axge_read_cmd_2(sc, AXGE_ACCESS_PHY, reg, phy);
300
301 if (!locked)
302 AXGE_UNLOCK(sc);
303
304 return (val);
305 }
306
307 static int
axge_miibus_writereg(device_t dev,int phy,int reg,int val)308 axge_miibus_writereg(device_t dev, int phy, int reg, int val)
309 {
310 struct axge_softc *sc;
311 int locked;
312
313 sc = device_get_softc(dev);
314 locked = mtx_owned(&sc->sc_mtx);
315 if (!locked)
316 AXGE_LOCK(sc);
317
318 axge_write_cmd_2(sc, AXGE_ACCESS_PHY, reg, phy, val);
319
320 if (!locked)
321 AXGE_UNLOCK(sc);
322
323 return (0);
324 }
325
326 static void
axge_miibus_statchg(device_t dev)327 axge_miibus_statchg(device_t dev)
328 {
329 struct axge_softc *sc;
330 struct mii_data *mii;
331 if_t ifp;
332 uint8_t link_status, tmp[5];
333 uint16_t val;
334 int locked;
335
336 sc = device_get_softc(dev);
337 mii = GET_MII(sc);
338 locked = mtx_owned(&sc->sc_mtx);
339 if (!locked)
340 AXGE_LOCK(sc);
341
342 ifp = uether_getifp(&sc->sc_ue);
343 if (mii == NULL || ifp == NULL ||
344 (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
345 goto done;
346
347 sc->sc_flags &= ~AXGE_FLAG_LINK;
348 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
349 (IFM_ACTIVE | IFM_AVALID)) {
350 switch (IFM_SUBTYPE(mii->mii_media_active)) {
351 case IFM_10_T:
352 case IFM_100_TX:
353 case IFM_1000_T:
354 sc->sc_flags |= AXGE_FLAG_LINK;
355 break;
356 default:
357 break;
358 }
359 }
360
361 /* Lost link, do nothing. */
362 if ((sc->sc_flags & AXGE_FLAG_LINK) == 0)
363 goto done;
364
365 link_status = axge_read_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_PLSR);
366
367 val = 0;
368 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
369 val |= MSR_FD;
370 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
371 val |= MSR_TFC;
372 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
373 val |= MSR_RFC;
374 }
375 val |= MSR_RE;
376 switch (IFM_SUBTYPE(mii->mii_media_active)) {
377 case IFM_1000_T:
378 val |= MSR_GM | MSR_EN_125MHZ;
379 if (link_status & PLSR_USB_SS)
380 memcpy(tmp, &axge_bulk_size[0], 5);
381 else if (link_status & PLSR_USB_HS)
382 memcpy(tmp, &axge_bulk_size[1], 5);
383 else
384 memcpy(tmp, &axge_bulk_size[3], 5);
385 break;
386 case IFM_100_TX:
387 val |= MSR_PS;
388 if (link_status & (PLSR_USB_SS | PLSR_USB_HS))
389 memcpy(tmp, &axge_bulk_size[2], 5);
390 else
391 memcpy(tmp, &axge_bulk_size[3], 5);
392 break;
393 case IFM_10_T:
394 memcpy(tmp, &axge_bulk_size[3], 5);
395 break;
396 }
397 /* Rx bulk configuration. */
398 axge_write_mem(sc, AXGE_ACCESS_MAC, 5, AXGE_RX_BULKIN_QCTRL, tmp, 5);
399 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_MSR, val);
400 done:
401 if (!locked)
402 AXGE_UNLOCK(sc);
403 }
404
405 static void
axge_chip_init(struct axge_softc * sc)406 axge_chip_init(struct axge_softc *sc)
407 {
408 /* Power up ethernet PHY. */
409 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_EPPRCR, 0);
410 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_EPPRCR, EPPRCR_IPRL);
411 uether_pause(&sc->sc_ue, hz / 4);
412 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CLK_SELECT,
413 AXGE_CLK_SELECT_ACS | AXGE_CLK_SELECT_BCS);
414 uether_pause(&sc->sc_ue, hz / 10);
415
416 if ((sc->sc_flags & AXGE_FLAG_179A) != 0) {
417 /*
418 * 179A chip has two firmware modes that each use different
419 * transfer layouts for Ethernet over USB. The newer fw mode has
420 * larger rx packet headers which seem to
421 * accomodate for ethernet frames up to 9K length and a VLAN
422 * field for hardware tagging, but is not backward compatible
423 * with 178A/179 bulk transfer code due to the change in size
424 * and field alignments. The other fw mode uses the same packet
425 * headers as the older 178A/179 chips, which this driver uses.
426 *
427 * As we do not currently have VLAN hw tagging or jumbo support
428 * in this driver anyway, we're ok forcing 179A into its compat
429 * mode by default.
430 */
431 axge_write_cmd_1(sc, AXGE_FW_MODE, AXGE_FW_MODE_178A179, 0);
432 }
433 }
434
435 static void
axge_reset(struct axge_softc * sc)436 axge_reset(struct axge_softc *sc)
437 {
438 struct usb_config_descriptor *cd;
439 usb_error_t err;
440
441 cd = usbd_get_config_descriptor(sc->sc_ue.ue_udev);
442
443 err = usbd_req_set_config(sc->sc_ue.ue_udev, &sc->sc_mtx,
444 cd->bConfigurationValue);
445 if (err)
446 DPRINTF("reset failed (ignored)\n");
447
448 /* Wait a little while for the chip to get its brains in order. */
449 uether_pause(&sc->sc_ue, hz / 100);
450
451 /* Reinitialize controller to achieve full reset. */
452 axge_chip_init(sc);
453 }
454
455 static void
axge_attach_post(struct usb_ether * ue)456 axge_attach_post(struct usb_ether *ue)
457 {
458 struct axge_softc *sc;
459
460 sc = uether_getsc(ue);
461
462 /* Initialize controller and get station address. */
463 axge_chip_init(sc);
464 axge_read_mem(sc, AXGE_ACCESS_MAC, ETHER_ADDR_LEN, AXGE_NIDR,
465 ue->ue_eaddr, ETHER_ADDR_LEN);
466 }
467
468 static int
axge_attach_post_sub(struct usb_ether * ue)469 axge_attach_post_sub(struct usb_ether *ue)
470 {
471 if_t ifp;
472 int error;
473
474 ifp = ue->ue_ifp;
475 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
476 if_setstartfn(ifp, uether_start);
477 if_setioctlfn(ifp, axge_ioctl);
478 if_setinitfn(ifp, uether_init);
479 if_setsendqlen(ifp, ifqmaxlen);
480 if_setsendqready(ifp);
481
482 if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU | IFCAP_TXCSUM | IFCAP_RXCSUM, 0);
483 if_sethwassist(ifp, AXGE_CSUM_FEATURES);
484 if_setcapenable(ifp, if_getcapabilities(ifp));
485
486 bus_topo_lock();
487 error = mii_attach(ue->ue_dev, &ue->ue_miibus, ifp,
488 uether_ifmedia_upd, ue->ue_methods->ue_mii_sts,
489 BMSR_DEFCAPMASK, AXGE_PHY_ADDR, MII_OFFSET_ANY, MIIF_DOPAUSE);
490 bus_topo_unlock();
491
492 return (error);
493 }
494
495 /*
496 * Set media options.
497 */
498 static int
axge_ifmedia_upd(if_t ifp)499 axge_ifmedia_upd(if_t ifp)
500 {
501 struct axge_softc *sc;
502 struct mii_data *mii;
503 struct mii_softc *miisc;
504 int error;
505
506 sc = if_getsoftc(ifp);
507 mii = GET_MII(sc);
508 AXGE_LOCK_ASSERT(sc, MA_OWNED);
509
510 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
511 PHY_RESET(miisc);
512 error = mii_mediachg(mii);
513
514 return (error);
515 }
516
517 /*
518 * Report current media status.
519 */
520 static void
axge_ifmedia_sts(if_t ifp,struct ifmediareq * ifmr)521 axge_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
522 {
523 struct axge_softc *sc;
524 struct mii_data *mii;
525
526 sc = if_getsoftc(ifp);
527 mii = GET_MII(sc);
528 AXGE_LOCK(sc);
529 mii_pollstat(mii);
530 ifmr->ifm_active = mii->mii_media_active;
531 ifmr->ifm_status = mii->mii_media_status;
532 AXGE_UNLOCK(sc);
533 }
534
535 /*
536 * Probe for a AX88179 chip.
537 */
538 static int
axge_probe(device_t dev)539 axge_probe(device_t dev)
540 {
541 struct usb_attach_arg *uaa;
542
543 uaa = device_get_ivars(dev);
544 if (uaa->usb_mode != USB_MODE_HOST)
545 return (ENXIO);
546 if (uaa->info.bConfigIndex != AXGE_CONFIG_IDX)
547 return (ENXIO);
548 if (uaa->info.bIfaceIndex != AXGE_IFACE_IDX)
549 return (ENXIO);
550
551 return (usbd_lookup_id_by_uaa(axge_devs, sizeof(axge_devs), uaa));
552 }
553
554 /*
555 * Attach the interface. Allocate softc structures, do ifmedia
556 * setup and ethernet/BPF attach.
557 */
558 static int
axge_attach(device_t dev)559 axge_attach(device_t dev)
560 {
561 struct usb_attach_arg *uaa;
562 struct axge_softc *sc;
563 struct usb_ether *ue;
564 uint8_t iface_index;
565 int error;
566
567 uaa = device_get_ivars(dev);
568 sc = device_get_softc(dev);
569 ue = &sc->sc_ue;
570
571 device_set_usb_desc(dev);
572 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), NULL, MTX_DEF);
573
574 sc->sc_flags = USB_GET_DRIVER_INFO(uaa);
575
576 iface_index = AXGE_IFACE_IDX;
577 error = usbd_transfer_setup(uaa->device, &iface_index,
578 sc->sc_xfer, axge_config, AXGE_N_TRANSFER, sc, &sc->sc_mtx);
579 if (error) {
580 device_printf(dev, "allocating USB transfers failed\n");
581 mtx_destroy(&sc->sc_mtx);
582 return (ENXIO);
583 }
584
585 ue->ue_sc = sc;
586 ue->ue_dev = dev;
587 ue->ue_udev = uaa->device;
588 ue->ue_mtx = &sc->sc_mtx;
589 ue->ue_methods = &axge_ue_methods;
590
591 error = uether_ifattach(ue);
592 if (error) {
593 device_printf(dev, "could not attach interface\n");
594 goto detach;
595 }
596 return (0); /* success */
597
598 detach:
599 axge_detach(dev);
600 return (ENXIO); /* failure */
601 }
602
603 static int
axge_detach(device_t dev)604 axge_detach(device_t dev)
605 {
606 struct axge_softc *sc;
607 struct usb_ether *ue;
608 uint16_t val;
609
610 sc = device_get_softc(dev);
611 ue = &sc->sc_ue;
612 if (device_is_attached(dev)) {
613 /* wait for any post attach or other command to complete */
614 usb_proc_drain(&ue->ue_tq);
615
616 AXGE_LOCK(sc);
617 /*
618 * XXX
619 * ether_ifdetach(9) should be called first.
620 */
621 axge_stop(ue);
622 /* Force bulk-in to return a zero-length USB packet. */
623 val = axge_read_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_EPPRCR);
624 val |= EPPRCR_BZ | EPPRCR_IPRL;
625 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_EPPRCR, val);
626 /* Change clock. */
627 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CLK_SELECT, 0);
628 /* Disable MAC. */
629 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR, 0);
630 AXGE_UNLOCK(sc);
631 }
632 usbd_transfer_unsetup(sc->sc_xfer, AXGE_N_TRANSFER);
633 uether_ifdetach(ue);
634 mtx_destroy(&sc->sc_mtx);
635
636 return (0);
637 }
638
639 static void
axge_bulk_read_callback(struct usb_xfer * xfer,usb_error_t error)640 axge_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error)
641 {
642 struct axge_softc *sc;
643 struct usb_ether *ue;
644 struct usb_page_cache *pc;
645 int actlen;
646
647 sc = usbd_xfer_softc(xfer);
648 ue = &sc->sc_ue;
649 usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
650
651 switch (USB_GET_STATE(xfer)) {
652 case USB_ST_TRANSFERRED:
653 pc = usbd_xfer_get_frame(xfer, 0);
654 axge_rx_frame(ue, pc, actlen);
655
656 /* FALLTHROUGH */
657 case USB_ST_SETUP:
658 tr_setup:
659 usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer));
660 usbd_transfer_submit(xfer);
661 uether_rxflush(ue);
662 break;
663
664 default:
665 if (error != USB_ERR_CANCELLED) {
666 usbd_xfer_set_stall(xfer);
667 goto tr_setup;
668 }
669 break;
670 }
671 }
672
673 static void
axge_bulk_write_callback(struct usb_xfer * xfer,usb_error_t error)674 axge_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error)
675 {
676 struct axge_softc *sc;
677 if_t ifp;
678 struct usb_page_cache *pc;
679 struct mbuf *m;
680 struct axge_frame_txhdr txhdr;
681 int nframes, pos;
682
683 sc = usbd_xfer_softc(xfer);
684 ifp = uether_getifp(&sc->sc_ue);
685
686 switch (USB_GET_STATE(xfer)) {
687 case USB_ST_TRANSFERRED:
688 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
689 /* FALLTHROUGH */
690 case USB_ST_SETUP:
691 tr_setup:
692 if ((sc->sc_flags & AXGE_FLAG_LINK) == 0 ||
693 (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) != 0) {
694 /*
695 * Don't send anything if there is no link or
696 * controller is busy.
697 */
698 return;
699 }
700
701 for (nframes = 0; nframes < AXGE_N_FRAMES &&
702 !if_sendq_empty(ifp); nframes++) {
703 m = if_dequeue(ifp);
704 if (m == NULL)
705 break;
706 usbd_xfer_set_frame_offset(xfer, nframes * MCLBYTES,
707 nframes);
708 pc = usbd_xfer_get_frame(xfer, nframes);
709 txhdr.mss = 0;
710 txhdr.len = htole32(AXGE_TXBYTES(m->m_pkthdr.len));
711 if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0 &&
712 (m->m_pkthdr.csum_flags & AXGE_CSUM_FEATURES) == 0)
713 txhdr.len |= htole32(AXGE_CSUM_DISABLE);
714
715 pos = 0;
716 usbd_copy_in(pc, pos, &txhdr, sizeof(txhdr));
717 pos += sizeof(txhdr);
718 usbd_m_copy_in(pc, pos, m, 0, m->m_pkthdr.len);
719 pos += m->m_pkthdr.len;
720
721 /*
722 * if there's a BPF listener, bounce a copy
723 * of this frame to him:
724 */
725 BPF_MTAP(ifp, m);
726
727 m_freem(m);
728
729 /* Set frame length. */
730 usbd_xfer_set_frame_len(xfer, nframes, pos);
731 }
732 if (nframes != 0) {
733 /*
734 * XXX
735 * Update TX packet counter here. This is not
736 * correct way but it seems that there is no way
737 * to know how many packets are sent at the end
738 * of transfer because controller combines
739 * multiple writes into single one if there is
740 * room in TX buffer of controller.
741 */
742 if_inc_counter(ifp, IFCOUNTER_OPACKETS, nframes);
743 usbd_xfer_set_frames(xfer, nframes);
744 usbd_transfer_submit(xfer);
745 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
746 }
747 return;
748 /* NOTREACHED */
749 default:
750 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
751 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
752
753 if (error != USB_ERR_CANCELLED) {
754 usbd_xfer_set_stall(xfer);
755 goto tr_setup;
756 }
757 return;
758 }
759 }
760
761 static void
axge_tick(struct usb_ether * ue)762 axge_tick(struct usb_ether *ue)
763 {
764 struct axge_softc *sc;
765 struct mii_data *mii;
766
767 sc = uether_getsc(ue);
768 mii = GET_MII(sc);
769 AXGE_LOCK_ASSERT(sc, MA_OWNED);
770
771 mii_tick(mii);
772 }
773
774 static u_int
axge_hash_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)775 axge_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
776 {
777 uint8_t *hashtbl = arg;
778 uint32_t h;
779
780 h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26;
781 hashtbl[h / 8] |= 1 << (h % 8);
782
783 return (1);
784 }
785
786 static void
axge_rxfilter(struct usb_ether * ue)787 axge_rxfilter(struct usb_ether *ue)
788 {
789 struct axge_softc *sc;
790 if_t ifp;
791 uint16_t rxmode;
792 uint8_t hashtbl[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
793
794 sc = uether_getsc(ue);
795 ifp = uether_getifp(ue);
796 AXGE_LOCK_ASSERT(sc, MA_OWNED);
797
798 /*
799 * Configure RX settings.
800 * Don't set RCR_IPE(IP header alignment on 32bit boundary) to disable
801 * inserting extra padding bytes. This wastes ethernet to USB host
802 * bandwidth as well as complicating RX handling logic. Current USB
803 * framework requires copying RX frames to mbufs so there is no need
804 * to worry about alignment.
805 */
806 rxmode = RCR_DROP_CRCERR | RCR_START;
807 if (if_getflags(ifp) & IFF_BROADCAST)
808 rxmode |= RCR_ACPT_BCAST;
809 if (if_getflags(ifp) & (IFF_ALLMULTI | IFF_PROMISC)) {
810 if (if_getflags(ifp) & IFF_PROMISC)
811 rxmode |= RCR_PROMISC;
812 rxmode |= RCR_ACPT_ALL_MCAST;
813 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR, rxmode);
814 return;
815 }
816
817 rxmode |= RCR_ACPT_MCAST;
818 if_foreach_llmaddr(ifp, axge_hash_maddr, &hashtbl);
819
820 axge_write_mem(sc, AXGE_ACCESS_MAC, 8, AXGE_MFA, (void *)&hashtbl, 8);
821 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR, rxmode);
822 }
823
824 static void
axge_start(struct usb_ether * ue)825 axge_start(struct usb_ether *ue)
826 {
827 struct axge_softc *sc;
828
829 sc = uether_getsc(ue);
830 /*
831 * Start the USB transfers, if not already started.
832 */
833 usbd_transfer_start(sc->sc_xfer[AXGE_BULK_DT_RD]);
834 usbd_transfer_start(sc->sc_xfer[AXGE_BULK_DT_WR]);
835 }
836
837 static void
axge_init(struct usb_ether * ue)838 axge_init(struct usb_ether *ue)
839 {
840 struct axge_softc *sc;
841 if_t ifp;
842
843 sc = uether_getsc(ue);
844 ifp = uether_getifp(ue);
845 AXGE_LOCK_ASSERT(sc, MA_OWNED);
846
847 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
848 return;
849
850 /*
851 * Cancel pending I/O and free all RX/TX buffers.
852 */
853 axge_stop(ue);
854
855 axge_reset(sc);
856
857 /* Set MAC address. */
858 axge_write_mem(sc, AXGE_ACCESS_MAC, ETHER_ADDR_LEN, AXGE_NIDR,
859 if_getlladdr(ifp), ETHER_ADDR_LEN);
860
861 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_PWLLR, 0x34);
862 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_PWLHR, 0x52);
863
864 /* Configure TX/RX checksum offloading. */
865 axge_csum_cfg(ue);
866
867 /* Configure RX filters. */
868 axge_rxfilter(ue);
869
870 /*
871 * XXX
872 * Controller supports wakeup on link change detection,
873 * magic packet and wakeup frame recpetion. But it seems
874 * there is no framework for USB ethernet suspend/wakeup.
875 * Disable all wakeup functions.
876 */
877 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_MMSR, 0);
878 (void)axge_read_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_MMSR);
879
880 /* Configure default medium type. */
881 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_MSR, MSR_GM | MSR_FD |
882 MSR_RFC | MSR_TFC | MSR_RE);
883
884 usbd_xfer_set_stall(sc->sc_xfer[AXGE_BULK_DT_WR]);
885
886 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
887 /* Switch to selected media. */
888 axge_ifmedia_upd(ifp);
889 }
890
891 static void
axge_stop(struct usb_ether * ue)892 axge_stop(struct usb_ether *ue)
893 {
894 struct axge_softc *sc;
895 if_t ifp;
896 uint16_t val;
897
898 sc = uether_getsc(ue);
899 ifp = uether_getifp(ue);
900
901 AXGE_LOCK_ASSERT(sc, MA_OWNED);
902
903 val = axge_read_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_MSR);
904 val &= ~MSR_RE;
905 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_MSR, val);
906
907 if (ifp != NULL)
908 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
909 sc->sc_flags &= ~AXGE_FLAG_LINK;
910
911 /*
912 * Stop all the transfers, if not already stopped:
913 */
914 usbd_transfer_stop(sc->sc_xfer[AXGE_BULK_DT_WR]);
915 usbd_transfer_stop(sc->sc_xfer[AXGE_BULK_DT_RD]);
916 }
917
918 static int
axge_ioctl(if_t ifp,u_long cmd,caddr_t data)919 axge_ioctl(if_t ifp, u_long cmd, caddr_t data)
920 {
921 struct usb_ether *ue;
922 struct axge_softc *sc;
923 struct ifreq *ifr;
924 int error, mask, reinit;
925
926 ue = if_getsoftc(ifp);
927 sc = uether_getsc(ue);
928 ifr = (struct ifreq *)data;
929 error = 0;
930 reinit = 0;
931 if (cmd == SIOCSIFCAP) {
932 AXGE_LOCK(sc);
933 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
934 if ((mask & IFCAP_TXCSUM) != 0 &&
935 (if_getcapabilities(ifp) & IFCAP_TXCSUM) != 0) {
936 if_togglecapenable(ifp, IFCAP_TXCSUM);
937 if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
938 if_sethwassistbits(ifp, AXGE_CSUM_FEATURES, 0);
939 else
940 if_sethwassistbits(ifp, 0, AXGE_CSUM_FEATURES);
941 reinit++;
942 }
943 if ((mask & IFCAP_RXCSUM) != 0 &&
944 (if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0) {
945 if_togglecapenable(ifp, IFCAP_RXCSUM);
946 reinit++;
947 }
948 if (reinit > 0 && if_getdrvflags(ifp) & IFF_DRV_RUNNING)
949 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
950 else
951 reinit = 0;
952 AXGE_UNLOCK(sc);
953 if (reinit > 0)
954 uether_init(ue);
955 } else
956 error = uether_ioctl(ifp, cmd, data);
957
958 return (error);
959 }
960
961 static void
axge_rx_frame(struct usb_ether * ue,struct usb_page_cache * pc,int actlen)962 axge_rx_frame(struct usb_ether *ue, struct usb_page_cache *pc, int actlen)
963 {
964 struct axge_frame_rxhdr pkt_hdr;
965 uint32_t rxhdr;
966 uint32_t pos;
967 uint32_t pkt_cnt, pkt_end;
968 uint32_t hdr_off;
969 uint32_t pktlen;
970
971 /* verify we have enough data */
972 if (actlen < (int)sizeof(rxhdr))
973 return;
974
975 pos = 0;
976
977 usbd_copy_out(pc, actlen - sizeof(rxhdr), &rxhdr, sizeof(rxhdr));
978 rxhdr = le32toh(rxhdr);
979
980 pkt_cnt = rxhdr & 0xFFFF;
981 hdr_off = pkt_end = (rxhdr >> 16) & 0xFFFF;
982
983 /*
984 * On older firmware:
985 * <----------------------- actlen ------------------------>
986 * [frame #0]...[frame #N][pkt_hdr #0]...[pkt_hdr #N][rxhdr]
987 *
988 * On newer firmware:
989 * <----------------------- actlen -----------------
990 * [frame #0]...[frame #N][pkt_hdr #0][dummy_hdr]...
991 * -------------------------------->
992 * ...[pkt_hdr #N][dummy_hdr][rxhdr]
993 *
994 * Each RX frame would be aligned on 8 bytes boundary. If
995 * RCR_IPE bit is set in AXGE_RCR register, there would be 2
996 * padding bytes and 6 dummy bytes(as the padding also should
997 * be aligned on 8 bytes boundary) for each RX frame to align
998 * IP header on 32bits boundary. Driver don't set RCR_IPE bit
999 * of AXGE_RCR register, so there should be no padding bytes
1000 * which simplifies RX logic a lot.
1001 *
1002 * Further, newer firmware interweaves dummy headers that have
1003 * pktlen == 0 and should be skipped without being seen as
1004 * dropped frames.
1005 */
1006 while (pkt_cnt--) {
1007 /* verify the header offset */
1008 if ((int)(hdr_off + sizeof(pkt_hdr)) > actlen) {
1009 DPRINTF("End of packet headers\n");
1010 break;
1011 }
1012 usbd_copy_out(pc, hdr_off, &pkt_hdr, sizeof(pkt_hdr));
1013 pkt_hdr.status = le32toh(pkt_hdr.status);
1014 pktlen = AXGE_RXBYTES(pkt_hdr.status);
1015 hdr_off += sizeof(pkt_hdr);
1016
1017 /* Skip dummy packet header. */
1018 if (pktlen == 0)
1019 continue;
1020
1021 if (pos + pktlen > pkt_end) {
1022 DPRINTF("Data position reached end\n");
1023 break;
1024 }
1025
1026 if (AXGE_RX_ERR(pkt_hdr.status) != 0) {
1027 DPRINTF("Dropped a packet\n");
1028 if_inc_counter(ue->ue_ifp, IFCOUNTER_IERRORS, 1);
1029 } else
1030 axge_rxeof(ue, pc, pos, pktlen, pkt_hdr.status);
1031 pos += (pktlen + 7) & ~7;
1032 }
1033 }
1034
1035 static void
axge_rxeof(struct usb_ether * ue,struct usb_page_cache * pc,unsigned offset,unsigned len,uint32_t status)1036 axge_rxeof(struct usb_ether *ue, struct usb_page_cache *pc, unsigned offset,
1037 unsigned len, uint32_t status)
1038 {
1039 if_t ifp;
1040 struct mbuf *m;
1041
1042 ifp = ue->ue_ifp;
1043 if (len < ETHER_HDR_LEN || len > MCLBYTES - ETHER_ALIGN) {
1044 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1045 return;
1046 }
1047
1048 if (len > MHLEN - ETHER_ALIGN)
1049 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1050 else
1051 m = m_gethdr(M_NOWAIT, MT_DATA);
1052 if (m == NULL) {
1053 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1054 return;
1055 }
1056 m->m_pkthdr.rcvif = ifp;
1057 m->m_len = m->m_pkthdr.len = len;
1058 m->m_data += ETHER_ALIGN;
1059
1060 usbd_copy_out(pc, offset, mtod(m, uint8_t *), len);
1061
1062 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
1063 if ((status & AXGE_RX_L3_CSUM_ERR) == 0 &&
1064 (status & AXGE_RX_L3_TYPE_MASK) == AXGE_RX_L3_TYPE_IPV4)
1065 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED |
1066 CSUM_IP_VALID;
1067 if ((status & AXGE_RX_L4_CSUM_ERR) == 0 &&
1068 ((status & AXGE_RX_L4_TYPE_MASK) == AXGE_RX_L4_TYPE_UDP ||
1069 (status & AXGE_RX_L4_TYPE_MASK) == AXGE_RX_L4_TYPE_TCP)) {
1070 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
1071 CSUM_PSEUDO_HDR;
1072 m->m_pkthdr.csum_data = 0xffff;
1073 }
1074 }
1075 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1076
1077 (void)mbufq_enqueue(&ue->ue_rxq, m);
1078 }
1079
1080 static void
axge_csum_cfg(struct usb_ether * ue)1081 axge_csum_cfg(struct usb_ether *ue)
1082 {
1083 struct axge_softc *sc;
1084 if_t ifp;
1085 uint8_t csum;
1086
1087 sc = uether_getsc(ue);
1088 AXGE_LOCK_ASSERT(sc, MA_OWNED);
1089 ifp = uether_getifp(ue);
1090
1091 csum = 0;
1092 if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
1093 csum |= CTCR_IP | CTCR_TCP | CTCR_UDP;
1094 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CTCR, csum);
1095
1096 csum = 0;
1097 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
1098 csum |= CRCR_IP | CRCR_TCP | CRCR_UDP;
1099 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CRCR, csum);
1100 }
1101