1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Written by: yen_cw@myson.com.tw
5 * Copyright (c) 2002 Myson Technology Inc.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * Myson fast ethernet PCI NIC driver, available at: http://www.myson.com.tw/
30 */
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/sockio.h>
35 #include <sys/mbuf.h>
36 #include <sys/malloc.h>
37 #include <sys/kernel.h>
38 #include <sys/socket.h>
39 #include <sys/queue.h>
40 #include <sys/types.h>
41 #include <sys/module.h>
42 #include <sys/lock.h>
43 #include <sys/mutex.h>
44
45 #define NBPFILTER 1
46
47 #include <net/if.h>
48 #include <net/if_var.h>
49 #include <net/if_arp.h>
50 #include <net/ethernet.h>
51 #include <net/if_media.h>
52 #include <net/if_types.h>
53 #include <net/if_dl.h>
54 #include <net/bpf.h>
55
56 #include <vm/vm.h> /* for vtophys */
57 #include <vm/pmap.h> /* for vtophys */
58 #include <machine/bus.h>
59 #include <machine/resource.h>
60 #include <sys/bus.h>
61 #include <sys/rman.h>
62
63 #include <dev/pci/pcireg.h>
64 #include <dev/pci/pcivar.h>
65
66 /*
67 * #define MY_USEIOSPACE
68 */
69
70 static int MY_USEIOSPACE = 1;
71
72 #ifdef MY_USEIOSPACE
73 #define MY_RES SYS_RES_IOPORT
74 #define MY_RID MY_PCI_LOIO
75 #else
76 #define MY_RES SYS_RES_MEMORY
77 #define MY_RID MY_PCI_LOMEM
78 #endif
79
80 #include <dev/my/if_myreg.h>
81
82 /*
83 * Various supported device vendors/types and their names.
84 */
85 struct my_type *my_info_tmp;
86 static struct my_type my_devs[] = {
87 {MYSONVENDORID, MTD800ID, "Myson MTD80X Based Fast Ethernet Card"},
88 {MYSONVENDORID, MTD803ID, "Myson MTD80X Based Fast Ethernet Card"},
89 {MYSONVENDORID, MTD891ID, "Myson MTD89X Based Giga Ethernet Card"},
90 {0, 0, NULL}
91 };
92
93 /*
94 * Various supported PHY vendors/types and their names. Note that this driver
95 * will work with pretty much any MII-compliant PHY, so failure to positively
96 * identify the chip is not a fatal error.
97 */
98 static struct my_type my_phys[] = {
99 {MysonPHYID0, MysonPHYID0, "<MYSON MTD981>"},
100 {SeeqPHYID0, SeeqPHYID0, "<SEEQ 80225>"},
101 {AhdocPHYID0, AhdocPHYID0, "<AHDOC 101>"},
102 {MarvellPHYID0, MarvellPHYID0, "<MARVELL 88E1000>"},
103 {LevelOnePHYID0, LevelOnePHYID0, "<LevelOne LXT1000>"},
104 {0, 0, "<MII-compliant physical interface>"}
105 };
106
107 static int my_probe(device_t);
108 static int my_attach(device_t);
109 static int my_detach(device_t);
110 static int my_newbuf(struct my_softc *, struct my_chain_onefrag *);
111 static int my_encap(struct my_softc *, struct my_chain *, struct mbuf *);
112 static void my_rxeof(struct my_softc *);
113 static void my_txeof(struct my_softc *);
114 static void my_txeoc(struct my_softc *);
115 static void my_intr(void *);
116 static void my_start(if_t);
117 static void my_start_locked(if_t);
118 static int my_ioctl(if_t, u_long, caddr_t);
119 static void my_init(void *);
120 static void my_init_locked(struct my_softc *);
121 static void my_stop(struct my_softc *);
122 static void my_autoneg_timeout(void *);
123 static void my_watchdog(void *);
124 static int my_shutdown(device_t);
125 static int my_ifmedia_upd(if_t);
126 static void my_ifmedia_sts(if_t, struct ifmediareq *);
127 static u_int16_t my_phy_readreg(struct my_softc *, int);
128 static void my_phy_writereg(struct my_softc *, int, int);
129 static void my_autoneg_xmit(struct my_softc *);
130 static void my_autoneg_mii(struct my_softc *, int, int);
131 static void my_setmode_mii(struct my_softc *, int);
132 static void my_getmode_mii(struct my_softc *);
133 static void my_setcfg(struct my_softc *, int);
134 static void my_setmulti(struct my_softc *);
135 static void my_reset(struct my_softc *);
136 static int my_list_rx_init(struct my_softc *);
137 static int my_list_tx_init(struct my_softc *);
138 static long my_send_cmd_to_phy(struct my_softc *, int, int);
139
140 #define MY_SETBIT(sc, reg, x) CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x))
141 #define MY_CLRBIT(sc, reg, x) CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x))
142
143 static device_method_t my_methods[] = {
144 /* Device interface */
145 DEVMETHOD(device_probe, my_probe),
146 DEVMETHOD(device_attach, my_attach),
147 DEVMETHOD(device_detach, my_detach),
148 DEVMETHOD(device_shutdown, my_shutdown),
149
150 DEVMETHOD_END
151 };
152
153 static driver_t my_driver = {
154 "my",
155 my_methods,
156 sizeof(struct my_softc)
157 };
158
159 DRIVER_MODULE(my, pci, my_driver, 0, 0);
160 MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, my, my_devs,
161 nitems(my_devs) - 1);
162 MODULE_DEPEND(my, pci, 1, 1, 1);
163 MODULE_DEPEND(my, ether, 1, 1, 1);
164
165 static long
my_send_cmd_to_phy(struct my_softc * sc,int opcode,int regad)166 my_send_cmd_to_phy(struct my_softc * sc, int opcode, int regad)
167 {
168 long miir;
169 int i;
170 int mask, data;
171
172 MY_LOCK_ASSERT(sc);
173
174 /* enable MII output */
175 miir = CSR_READ_4(sc, MY_MANAGEMENT);
176 miir &= 0xfffffff0;
177
178 miir |= MY_MASK_MIIR_MII_WRITE + MY_MASK_MIIR_MII_MDO;
179
180 /* send 32 1's preamble */
181 for (i = 0; i < 32; i++) {
182 /* low MDC; MDO is already high (miir) */
183 miir &= ~MY_MASK_MIIR_MII_MDC;
184 CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
185
186 /* high MDC */
187 miir |= MY_MASK_MIIR_MII_MDC;
188 CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
189 }
190
191 /* calculate ST+OP+PHYAD+REGAD+TA */
192 data = opcode | (sc->my_phy_addr << 7) | (regad << 2);
193
194 /* sent out */
195 mask = 0x8000;
196 while (mask) {
197 /* low MDC, prepare MDO */
198 miir &= ~(MY_MASK_MIIR_MII_MDC + MY_MASK_MIIR_MII_MDO);
199 if (mask & data)
200 miir |= MY_MASK_MIIR_MII_MDO;
201
202 CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
203 /* high MDC */
204 miir |= MY_MASK_MIIR_MII_MDC;
205 CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
206 DELAY(30);
207
208 /* next */
209 mask >>= 1;
210 if (mask == 0x2 && opcode == MY_OP_READ)
211 miir &= ~MY_MASK_MIIR_MII_WRITE;
212 }
213
214 return miir;
215 }
216
217 static u_int16_t
my_phy_readreg(struct my_softc * sc,int reg)218 my_phy_readreg(struct my_softc * sc, int reg)
219 {
220 long miir;
221 int mask, data;
222
223 MY_LOCK_ASSERT(sc);
224
225 if (sc->my_info->my_did == MTD803ID)
226 data = CSR_READ_2(sc, MY_PHYBASE + reg * 2);
227 else {
228 miir = my_send_cmd_to_phy(sc, MY_OP_READ, reg);
229
230 /* read data */
231 mask = 0x8000;
232 data = 0;
233 while (mask) {
234 /* low MDC */
235 miir &= ~MY_MASK_MIIR_MII_MDC;
236 CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
237
238 /* read MDI */
239 miir = CSR_READ_4(sc, MY_MANAGEMENT);
240 if (miir & MY_MASK_MIIR_MII_MDI)
241 data |= mask;
242
243 /* high MDC, and wait */
244 miir |= MY_MASK_MIIR_MII_MDC;
245 CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
246 DELAY(30);
247
248 /* next */
249 mask >>= 1;
250 }
251
252 /* low MDC */
253 miir &= ~MY_MASK_MIIR_MII_MDC;
254 CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
255 }
256
257 return (u_int16_t) data;
258 }
259
260 static void
my_phy_writereg(struct my_softc * sc,int reg,int data)261 my_phy_writereg(struct my_softc * sc, int reg, int data)
262 {
263 long miir;
264 int mask;
265
266 MY_LOCK_ASSERT(sc);
267
268 if (sc->my_info->my_did == MTD803ID)
269 CSR_WRITE_2(sc, MY_PHYBASE + reg * 2, data);
270 else {
271 miir = my_send_cmd_to_phy(sc, MY_OP_WRITE, reg);
272
273 /* write data */
274 mask = 0x8000;
275 while (mask) {
276 /* low MDC, prepare MDO */
277 miir &= ~(MY_MASK_MIIR_MII_MDC + MY_MASK_MIIR_MII_MDO);
278 if (mask & data)
279 miir |= MY_MASK_MIIR_MII_MDO;
280 CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
281 DELAY(1);
282
283 /* high MDC */
284 miir |= MY_MASK_MIIR_MII_MDC;
285 CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
286 DELAY(1);
287
288 /* next */
289 mask >>= 1;
290 }
291
292 /* low MDC */
293 miir &= ~MY_MASK_MIIR_MII_MDC;
294 CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
295 }
296 return;
297 }
298
299 static u_int
my_hash_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)300 my_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
301 {
302 uint32_t *hashes = arg;
303 int h;
304
305 h = ~ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26;
306 if (h < 32)
307 hashes[0] |= (1 << h);
308 else
309 hashes[1] |= (1 << (h - 32));
310
311 return (1);
312 }
313 /*
314 * Program the 64-bit multicast hash filter.
315 */
316 static void
my_setmulti(struct my_softc * sc)317 my_setmulti(struct my_softc * sc)
318 {
319 if_t ifp;
320 u_int32_t hashes[2] = {0, 0};
321 u_int32_t rxfilt;
322
323 MY_LOCK_ASSERT(sc);
324
325 ifp = sc->my_ifp;
326
327 rxfilt = CSR_READ_4(sc, MY_TCRRCR);
328
329 if (if_getflags(ifp) & IFF_ALLMULTI || if_getflags(ifp) & IFF_PROMISC) {
330 rxfilt |= MY_AM;
331 CSR_WRITE_4(sc, MY_TCRRCR, rxfilt);
332 CSR_WRITE_4(sc, MY_MAR0, 0xFFFFFFFF);
333 CSR_WRITE_4(sc, MY_MAR1, 0xFFFFFFFF);
334
335 return;
336 }
337 /* first, zot all the existing hash bits */
338 CSR_WRITE_4(sc, MY_MAR0, 0);
339 CSR_WRITE_4(sc, MY_MAR1, 0);
340
341 /* now program new ones */
342 if (if_foreach_llmaddr(ifp, my_hash_maddr, hashes) > 0)
343 rxfilt |= MY_AM;
344 else
345 rxfilt &= ~MY_AM;
346 CSR_WRITE_4(sc, MY_MAR0, hashes[0]);
347 CSR_WRITE_4(sc, MY_MAR1, hashes[1]);
348 CSR_WRITE_4(sc, MY_TCRRCR, rxfilt);
349 }
350
351 /*
352 * Initiate an autonegotiation session.
353 */
354 static void
my_autoneg_xmit(struct my_softc * sc)355 my_autoneg_xmit(struct my_softc * sc)
356 {
357 u_int16_t phy_sts = 0;
358
359 MY_LOCK_ASSERT(sc);
360
361 my_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET);
362 DELAY(500);
363 while (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_RESET);
364
365 phy_sts = my_phy_readreg(sc, PHY_BMCR);
366 phy_sts |= PHY_BMCR_AUTONEGENBL | PHY_BMCR_AUTONEGRSTR;
367 my_phy_writereg(sc, PHY_BMCR, phy_sts);
368
369 return;
370 }
371
372 static void
my_autoneg_timeout(void * arg)373 my_autoneg_timeout(void *arg)
374 {
375 struct my_softc *sc;
376
377 sc = arg;
378 MY_LOCK_ASSERT(sc);
379 my_autoneg_mii(sc, MY_FLAG_DELAYTIMEO, 1);
380 }
381
382 /*
383 * Invoke autonegotiation on a PHY.
384 */
385 static void
my_autoneg_mii(struct my_softc * sc,int flag,int verbose)386 my_autoneg_mii(struct my_softc * sc, int flag, int verbose)
387 {
388 u_int16_t phy_sts = 0, media, advert, ability;
389 u_int16_t ability2 = 0;
390 if_t ifp;
391 struct ifmedia *ifm;
392
393 MY_LOCK_ASSERT(sc);
394
395 ifm = &sc->ifmedia;
396 ifp = sc->my_ifp;
397
398 ifm->ifm_media = IFM_ETHER | IFM_AUTO;
399
400 #ifndef FORCE_AUTONEG_TFOUR
401 /*
402 * First, see if autoneg is supported. If not, there's no point in
403 * continuing.
404 */
405 phy_sts = my_phy_readreg(sc, PHY_BMSR);
406 if (!(phy_sts & PHY_BMSR_CANAUTONEG)) {
407 if (verbose)
408 device_printf(sc->my_dev,
409 "autonegotiation not supported\n");
410 ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX;
411 return;
412 }
413 #endif
414 switch (flag) {
415 case MY_FLAG_FORCEDELAY:
416 /*
417 * XXX Never use this option anywhere but in the probe
418 * routine: making the kernel stop dead in its tracks for
419 * three whole seconds after we've gone multi-user is really
420 * bad manners.
421 */
422 my_autoneg_xmit(sc);
423 DELAY(5000000);
424 break;
425 case MY_FLAG_SCHEDDELAY:
426 /*
427 * Wait for the transmitter to go idle before starting an
428 * autoneg session, otherwise my_start() may clobber our
429 * timeout, and we don't want to allow transmission during an
430 * autoneg session since that can screw it up.
431 */
432 if (sc->my_cdata.my_tx_head != NULL) {
433 sc->my_want_auto = 1;
434 MY_UNLOCK(sc);
435 return;
436 }
437 my_autoneg_xmit(sc);
438 callout_reset(&sc->my_autoneg_timer, hz * 5, my_autoneg_timeout,
439 sc);
440 sc->my_autoneg = 1;
441 sc->my_want_auto = 0;
442 return;
443 case MY_FLAG_DELAYTIMEO:
444 callout_stop(&sc->my_autoneg_timer);
445 sc->my_autoneg = 0;
446 break;
447 default:
448 device_printf(sc->my_dev, "invalid autoneg flag: %d\n", flag);
449 return;
450 }
451
452 if (my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_AUTONEGCOMP) {
453 if (verbose)
454 device_printf(sc->my_dev, "autoneg complete, ");
455 phy_sts = my_phy_readreg(sc, PHY_BMSR);
456 } else {
457 if (verbose)
458 device_printf(sc->my_dev, "autoneg not complete, ");
459 }
460
461 media = my_phy_readreg(sc, PHY_BMCR);
462
463 /* Link is good. Report modes and set duplex mode. */
464 if (my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT) {
465 if (verbose)
466 device_printf(sc->my_dev, "link status good. ");
467 advert = my_phy_readreg(sc, PHY_ANAR);
468 ability = my_phy_readreg(sc, PHY_LPAR);
469 if ((sc->my_pinfo->my_vid == MarvellPHYID0) ||
470 (sc->my_pinfo->my_vid == LevelOnePHYID0)) {
471 ability2 = my_phy_readreg(sc, PHY_1000SR);
472 if (ability2 & PHY_1000SR_1000BTXFULL) {
473 advert = 0;
474 ability = 0;
475 /*
476 * this version did not support 1000M,
477 * ifm->ifm_media =
478 * IFM_ETHER|IFM_1000_T|IFM_FDX;
479 */
480 ifm->ifm_media =
481 IFM_ETHER | IFM_100_TX | IFM_FDX;
482 media &= ~PHY_BMCR_SPEEDSEL;
483 media |= PHY_BMCR_1000;
484 media |= PHY_BMCR_DUPLEX;
485 printf("(full-duplex, 1000Mbps)\n");
486 } else if (ability2 & PHY_1000SR_1000BTXHALF) {
487 advert = 0;
488 ability = 0;
489 /*
490 * this version did not support 1000M,
491 * ifm->ifm_media = IFM_ETHER|IFM_1000_T;
492 */
493 ifm->ifm_media = IFM_ETHER | IFM_100_TX;
494 media &= ~PHY_BMCR_SPEEDSEL;
495 media &= ~PHY_BMCR_DUPLEX;
496 media |= PHY_BMCR_1000;
497 printf("(half-duplex, 1000Mbps)\n");
498 }
499 }
500 if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4) {
501 ifm->ifm_media = IFM_ETHER | IFM_100_T4;
502 media |= PHY_BMCR_SPEEDSEL;
503 media &= ~PHY_BMCR_DUPLEX;
504 printf("(100baseT4)\n");
505 } else if (advert & PHY_ANAR_100BTXFULL &&
506 ability & PHY_ANAR_100BTXFULL) {
507 ifm->ifm_media = IFM_ETHER | IFM_100_TX | IFM_FDX;
508 media |= PHY_BMCR_SPEEDSEL;
509 media |= PHY_BMCR_DUPLEX;
510 printf("(full-duplex, 100Mbps)\n");
511 } else if (advert & PHY_ANAR_100BTXHALF &&
512 ability & PHY_ANAR_100BTXHALF) {
513 ifm->ifm_media = IFM_ETHER | IFM_100_TX | IFM_HDX;
514 media |= PHY_BMCR_SPEEDSEL;
515 media &= ~PHY_BMCR_DUPLEX;
516 printf("(half-duplex, 100Mbps)\n");
517 } else if (advert & PHY_ANAR_10BTFULL &&
518 ability & PHY_ANAR_10BTFULL) {
519 ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_FDX;
520 media &= ~PHY_BMCR_SPEEDSEL;
521 media |= PHY_BMCR_DUPLEX;
522 printf("(full-duplex, 10Mbps)\n");
523 } else if (advert) {
524 ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX;
525 media &= ~PHY_BMCR_SPEEDSEL;
526 media &= ~PHY_BMCR_DUPLEX;
527 printf("(half-duplex, 10Mbps)\n");
528 }
529 media &= ~PHY_BMCR_AUTONEGENBL;
530
531 /* Set ASIC's duplex mode to match the PHY. */
532 my_phy_writereg(sc, PHY_BMCR, media);
533 my_setcfg(sc, media);
534 } else {
535 if (verbose)
536 device_printf(sc->my_dev, "no carrier\n");
537 }
538
539 my_init_locked(sc);
540 if (sc->my_tx_pend) {
541 sc->my_autoneg = 0;
542 sc->my_tx_pend = 0;
543 my_start_locked(ifp);
544 }
545 return;
546 }
547
548 /*
549 * To get PHY ability.
550 */
551 static void
my_getmode_mii(struct my_softc * sc)552 my_getmode_mii(struct my_softc * sc)
553 {
554 u_int16_t bmsr;
555 if_t ifp;
556
557 MY_LOCK_ASSERT(sc);
558 ifp = sc->my_ifp;
559 bmsr = my_phy_readreg(sc, PHY_BMSR);
560 if (bootverbose)
561 device_printf(sc->my_dev, "PHY status word: %x\n", bmsr);
562
563 /* fallback */
564 sc->ifmedia.ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX;
565
566 if (bmsr & PHY_BMSR_10BTHALF) {
567 if (bootverbose)
568 device_printf(sc->my_dev,
569 "10Mbps half-duplex mode supported\n");
570 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_HDX,
571 0, NULL);
572 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T, 0, NULL);
573 }
574 if (bmsr & PHY_BMSR_10BTFULL) {
575 if (bootverbose)
576 device_printf(sc->my_dev,
577 "10Mbps full-duplex mode supported\n");
578
579 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX,
580 0, NULL);
581 sc->ifmedia.ifm_media = IFM_ETHER | IFM_10_T | IFM_FDX;
582 }
583 if (bmsr & PHY_BMSR_100BTXHALF) {
584 if (bootverbose)
585 device_printf(sc->my_dev,
586 "100Mbps half-duplex mode supported\n");
587 if_setbaudrate(ifp, 100000000);
588 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL);
589 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX | IFM_HDX,
590 0, NULL);
591 sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_TX | IFM_HDX;
592 }
593 if (bmsr & PHY_BMSR_100BTXFULL) {
594 if (bootverbose)
595 device_printf(sc->my_dev,
596 "100Mbps full-duplex mode supported\n");
597 if_setbaudrate(ifp, 100000000);
598 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX,
599 0, NULL);
600 sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_TX | IFM_FDX;
601 }
602 /* Some also support 100BaseT4. */
603 if (bmsr & PHY_BMSR_100BT4) {
604 if (bootverbose)
605 device_printf(sc->my_dev, "100baseT4 mode supported\n");
606 if_setbaudrate(ifp, 100000000);
607 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_T4, 0, NULL);
608 sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_T4;
609 #ifdef FORCE_AUTONEG_TFOUR
610 if (bootverbose)
611 device_printf(sc->my_dev,
612 "forcing on autoneg support for BT4\n");
613 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_AUTO, 0 NULL):
614 sc->ifmedia.ifm_media = IFM_ETHER | IFM_AUTO;
615 #endif
616 }
617 #if 0 /* this version did not support 1000M, */
618 if (sc->my_pinfo->my_vid == MarvellPHYID0) {
619 if (bootverbose)
620 device_printf(sc->my_dev,
621 "1000Mbps half-duplex mode supported\n");
622
623 if_setbaudrate(ifp, 1000000000);
624 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T, 0, NULL);
625 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T | IFM_HDX,
626 0, NULL);
627 if (bootverbose)
628 device_printf(sc->my_dev,
629 "1000Mbps full-duplex mode supported\n");
630 if_setbaudrate(ifp, 1000000000);
631 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T | IFM_FDX,
632 0, NULL);
633 sc->ifmedia.ifm_media = IFM_ETHER | IFM_1000_T | IFM_FDX;
634 }
635 #endif
636 if (bmsr & PHY_BMSR_CANAUTONEG) {
637 if (bootverbose)
638 device_printf(sc->my_dev, "autoneg supported\n");
639 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
640 sc->ifmedia.ifm_media = IFM_ETHER | IFM_AUTO;
641 }
642 return;
643 }
644
645 /*
646 * Set speed and duplex mode.
647 */
648 static void
my_setmode_mii(struct my_softc * sc,int media)649 my_setmode_mii(struct my_softc * sc, int media)
650 {
651 u_int16_t bmcr;
652
653 MY_LOCK_ASSERT(sc);
654 /*
655 * If an autoneg session is in progress, stop it.
656 */
657 if (sc->my_autoneg) {
658 device_printf(sc->my_dev, "canceling autoneg session\n");
659 callout_stop(&sc->my_autoneg_timer);
660 sc->my_autoneg = sc->my_want_auto = 0;
661 bmcr = my_phy_readreg(sc, PHY_BMCR);
662 bmcr &= ~PHY_BMCR_AUTONEGENBL;
663 my_phy_writereg(sc, PHY_BMCR, bmcr);
664 }
665 device_printf(sc->my_dev, "selecting MII, ");
666 bmcr = my_phy_readreg(sc, PHY_BMCR);
667 bmcr &= ~(PHY_BMCR_AUTONEGENBL | PHY_BMCR_SPEEDSEL | PHY_BMCR_1000 |
668 PHY_BMCR_DUPLEX | PHY_BMCR_LOOPBK);
669
670 #if 0 /* this version did not support 1000M, */
671 if (IFM_SUBTYPE(media) == IFM_1000_T) {
672 printf("1000Mbps/T4, half-duplex\n");
673 bmcr &= ~PHY_BMCR_SPEEDSEL;
674 bmcr &= ~PHY_BMCR_DUPLEX;
675 bmcr |= PHY_BMCR_1000;
676 }
677 #endif
678 if (IFM_SUBTYPE(media) == IFM_100_T4) {
679 printf("100Mbps/T4, half-duplex\n");
680 bmcr |= PHY_BMCR_SPEEDSEL;
681 bmcr &= ~PHY_BMCR_DUPLEX;
682 }
683 if (IFM_SUBTYPE(media) == IFM_100_TX) {
684 printf("100Mbps, ");
685 bmcr |= PHY_BMCR_SPEEDSEL;
686 }
687 if (IFM_SUBTYPE(media) == IFM_10_T) {
688 printf("10Mbps, ");
689 bmcr &= ~PHY_BMCR_SPEEDSEL;
690 }
691 if ((media & IFM_GMASK) == IFM_FDX) {
692 printf("full duplex\n");
693 bmcr |= PHY_BMCR_DUPLEX;
694 } else {
695 printf("half duplex\n");
696 bmcr &= ~PHY_BMCR_DUPLEX;
697 }
698 my_phy_writereg(sc, PHY_BMCR, bmcr);
699 my_setcfg(sc, bmcr);
700 return;
701 }
702
703 /*
704 * The Myson manual states that in order to fiddle with the 'full-duplex' and
705 * '100Mbps' bits in the netconfig register, we first have to put the
706 * transmit and/or receive logic in the idle state.
707 */
708 static void
my_setcfg(struct my_softc * sc,int bmcr)709 my_setcfg(struct my_softc * sc, int bmcr)
710 {
711 int i, restart = 0;
712
713 MY_LOCK_ASSERT(sc);
714 if (CSR_READ_4(sc, MY_TCRRCR) & (MY_TE | MY_RE)) {
715 restart = 1;
716 MY_CLRBIT(sc, MY_TCRRCR, (MY_TE | MY_RE));
717 for (i = 0; i < MY_TIMEOUT; i++) {
718 DELAY(10);
719 if (!(CSR_READ_4(sc, MY_TCRRCR) &
720 (MY_TXRUN | MY_RXRUN)))
721 break;
722 }
723 if (i == MY_TIMEOUT)
724 device_printf(sc->my_dev,
725 "failed to force tx and rx to idle \n");
726 }
727 MY_CLRBIT(sc, MY_TCRRCR, MY_PS1000);
728 MY_CLRBIT(sc, MY_TCRRCR, MY_PS10);
729 if (bmcr & PHY_BMCR_1000)
730 MY_SETBIT(sc, MY_TCRRCR, MY_PS1000);
731 else if (!(bmcr & PHY_BMCR_SPEEDSEL))
732 MY_SETBIT(sc, MY_TCRRCR, MY_PS10);
733 if (bmcr & PHY_BMCR_DUPLEX)
734 MY_SETBIT(sc, MY_TCRRCR, MY_FD);
735 else
736 MY_CLRBIT(sc, MY_TCRRCR, MY_FD);
737 if (restart)
738 MY_SETBIT(sc, MY_TCRRCR, MY_TE | MY_RE);
739 return;
740 }
741
742 static void
my_reset(struct my_softc * sc)743 my_reset(struct my_softc * sc)
744 {
745 int i;
746
747 MY_LOCK_ASSERT(sc);
748 MY_SETBIT(sc, MY_BCR, MY_SWR);
749 for (i = 0; i < MY_TIMEOUT; i++) {
750 DELAY(10);
751 if (!(CSR_READ_4(sc, MY_BCR) & MY_SWR))
752 break;
753 }
754 if (i == MY_TIMEOUT)
755 device_printf(sc->my_dev, "reset never completed!\n");
756
757 /* Wait a little while for the chip to get its brains in order. */
758 DELAY(1000);
759 return;
760 }
761
762 /*
763 * Probe for a Myson chip. Check the PCI vendor and device IDs against our
764 * list and return a device name if we find a match.
765 */
766 static int
my_probe(device_t dev)767 my_probe(device_t dev)
768 {
769 struct my_type *t;
770
771 t = my_devs;
772 while (t->my_name != NULL) {
773 if ((pci_get_vendor(dev) == t->my_vid) &&
774 (pci_get_device(dev) == t->my_did)) {
775 device_set_desc(dev, t->my_name);
776 my_info_tmp = t;
777 return (BUS_PROBE_DEFAULT);
778 }
779 t++;
780 }
781 return (ENXIO);
782 }
783
784 /*
785 * Attach the interface. Allocate softc structures, do ifmedia setup and
786 * ethernet/BPF attach.
787 */
788 static int
my_attach(device_t dev)789 my_attach(device_t dev)
790 {
791 int i;
792 u_char eaddr[ETHER_ADDR_LEN];
793 u_int32_t iobase;
794 struct my_softc *sc;
795 if_t ifp;
796 int media = IFM_ETHER | IFM_100_TX | IFM_FDX;
797 unsigned int round;
798 caddr_t roundptr;
799 struct my_type *p;
800 u_int16_t phy_vid, phy_did, phy_sts = 0;
801 int rid, error = 0;
802
803 sc = device_get_softc(dev);
804 sc->my_dev = dev;
805 mtx_init(&sc->my_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
806 MTX_DEF);
807 callout_init_mtx(&sc->my_autoneg_timer, &sc->my_mtx, 0);
808 callout_init_mtx(&sc->my_watchdog, &sc->my_mtx, 0);
809
810 /*
811 * Map control/status registers.
812 */
813 pci_enable_busmaster(dev);
814
815 if (my_info_tmp->my_did == MTD800ID) {
816 iobase = pci_read_config(dev, MY_PCI_LOIO, 4);
817 if (iobase & 0x300)
818 MY_USEIOSPACE = 0;
819 }
820
821 rid = MY_RID;
822 sc->my_res = bus_alloc_resource_any(dev, MY_RES, &rid, RF_ACTIVE);
823
824 if (sc->my_res == NULL) {
825 device_printf(dev, "couldn't map ports/memory\n");
826 error = ENXIO;
827 goto destroy_mutex;
828 }
829 sc->my_btag = rman_get_bustag(sc->my_res);
830 sc->my_bhandle = rman_get_bushandle(sc->my_res);
831
832 rid = 0;
833 sc->my_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
834 RF_SHAREABLE | RF_ACTIVE);
835
836 if (sc->my_irq == NULL) {
837 device_printf(dev, "couldn't map interrupt\n");
838 error = ENXIO;
839 goto release_io;
840 }
841
842 sc->my_info = my_info_tmp;
843
844 /* Reset the adapter. */
845 MY_LOCK(sc);
846 my_reset(sc);
847 MY_UNLOCK(sc);
848
849 /*
850 * Get station address
851 */
852 for (i = 0; i < ETHER_ADDR_LEN; ++i)
853 eaddr[i] = CSR_READ_1(sc, MY_PAR0 + i);
854
855 sc->my_ldata_ptr = malloc(sizeof(struct my_list_data) + 8,
856 M_DEVBUF, M_NOWAIT);
857 if (sc->my_ldata_ptr == NULL) {
858 device_printf(dev, "no memory for list buffers!\n");
859 error = ENXIO;
860 goto release_irq;
861 }
862 sc->my_ldata = (struct my_list_data *) sc->my_ldata_ptr;
863 round = (uintptr_t)sc->my_ldata_ptr & 0xF;
864 roundptr = sc->my_ldata_ptr;
865 for (i = 0; i < 8; i++) {
866 if (round % 8) {
867 round++;
868 roundptr++;
869 } else
870 break;
871 }
872 sc->my_ldata = (struct my_list_data *) roundptr;
873 bzero(sc->my_ldata, sizeof(struct my_list_data));
874
875 ifp = sc->my_ifp = if_alloc(IFT_ETHER);
876 if_setsoftc(ifp, sc);
877 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
878 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
879 if_setioctlfn(ifp, my_ioctl);
880 if_setstartfn(ifp, my_start);
881 if_setinitfn(ifp, my_init);
882 if_setbaudrate(ifp, 10000000);
883 if_setsendqlen(ifp, ifqmaxlen);
884 if_setsendqready(ifp);
885
886 if (sc->my_info->my_did == MTD803ID)
887 sc->my_pinfo = my_phys;
888 else {
889 if (bootverbose)
890 device_printf(dev, "probing for a PHY\n");
891 MY_LOCK(sc);
892 for (i = MY_PHYADDR_MIN; i < MY_PHYADDR_MAX + 1; i++) {
893 if (bootverbose)
894 device_printf(dev, "checking address: %d\n", i);
895 sc->my_phy_addr = i;
896 phy_sts = my_phy_readreg(sc, PHY_BMSR);
897 if ((phy_sts != 0) && (phy_sts != 0xffff))
898 break;
899 else
900 phy_sts = 0;
901 }
902 if (phy_sts) {
903 phy_vid = my_phy_readreg(sc, PHY_VENID);
904 phy_did = my_phy_readreg(sc, PHY_DEVID);
905 if (bootverbose) {
906 device_printf(dev, "found PHY at address %d, ",
907 sc->my_phy_addr);
908 printf("vendor id: %x device id: %x\n",
909 phy_vid, phy_did);
910 }
911 p = my_phys;
912 while (p->my_vid) {
913 if (phy_vid == p->my_vid) {
914 sc->my_pinfo = p;
915 break;
916 }
917 p++;
918 }
919 if (sc->my_pinfo == NULL)
920 sc->my_pinfo = &my_phys[PHY_UNKNOWN];
921 if (bootverbose)
922 device_printf(dev, "PHY type: %s\n",
923 sc->my_pinfo->my_name);
924 } else {
925 MY_UNLOCK(sc);
926 device_printf(dev, "MII without any phy!\n");
927 error = ENXIO;
928 goto free_if;
929 }
930 MY_UNLOCK(sc);
931 }
932
933 /* Do ifmedia setup. */
934 ifmedia_init(&sc->ifmedia, 0, my_ifmedia_upd, my_ifmedia_sts);
935 MY_LOCK(sc);
936 my_getmode_mii(sc);
937 my_autoneg_mii(sc, MY_FLAG_FORCEDELAY, 1);
938 media = sc->ifmedia.ifm_media;
939 my_stop(sc);
940 MY_UNLOCK(sc);
941 ifmedia_set(&sc->ifmedia, media);
942
943 ether_ifattach(ifp, eaddr);
944
945 error = bus_setup_intr(dev, sc->my_irq, INTR_TYPE_NET | INTR_MPSAFE,
946 NULL, my_intr, sc, &sc->my_intrhand);
947
948 if (error) {
949 device_printf(dev, "couldn't set up irq\n");
950 goto detach_if;
951 }
952
953 return (0);
954
955 detach_if:
956 ether_ifdetach(ifp);
957 free_if:
958 if_free(ifp);
959 free(sc->my_ldata_ptr, M_DEVBUF);
960 release_irq:
961 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->my_irq);
962 release_io:
963 bus_release_resource(dev, MY_RES, MY_RID, sc->my_res);
964 destroy_mutex:
965 mtx_destroy(&sc->my_mtx);
966 return (error);
967 }
968
969 static int
my_detach(device_t dev)970 my_detach(device_t dev)
971 {
972 struct my_softc *sc;
973 if_t ifp;
974
975 sc = device_get_softc(dev);
976 ifp = sc->my_ifp;
977 ether_ifdetach(ifp);
978 MY_LOCK(sc);
979 my_stop(sc);
980 MY_UNLOCK(sc);
981 bus_teardown_intr(dev, sc->my_irq, sc->my_intrhand);
982 callout_drain(&sc->my_watchdog);
983 callout_drain(&sc->my_autoneg_timer);
984
985 if_free(ifp);
986 free(sc->my_ldata_ptr, M_DEVBUF);
987
988 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->my_irq);
989 bus_release_resource(dev, MY_RES, MY_RID, sc->my_res);
990 mtx_destroy(&sc->my_mtx);
991 return (0);
992 }
993
994 /*
995 * Initialize the transmit descriptors.
996 */
997 static int
my_list_tx_init(struct my_softc * sc)998 my_list_tx_init(struct my_softc * sc)
999 {
1000 struct my_chain_data *cd;
1001 struct my_list_data *ld;
1002 int i;
1003
1004 MY_LOCK_ASSERT(sc);
1005 cd = &sc->my_cdata;
1006 ld = sc->my_ldata;
1007 for (i = 0; i < MY_TX_LIST_CNT; i++) {
1008 cd->my_tx_chain[i].my_ptr = &ld->my_tx_list[i];
1009 if (i == (MY_TX_LIST_CNT - 1))
1010 cd->my_tx_chain[i].my_nextdesc = &cd->my_tx_chain[0];
1011 else
1012 cd->my_tx_chain[i].my_nextdesc =
1013 &cd->my_tx_chain[i + 1];
1014 }
1015 cd->my_tx_free = &cd->my_tx_chain[0];
1016 cd->my_tx_tail = cd->my_tx_head = NULL;
1017 return (0);
1018 }
1019
1020 /*
1021 * Initialize the RX descriptors and allocate mbufs for them. Note that we
1022 * arrange the descriptors in a closed ring, so that the last descriptor
1023 * points back to the first.
1024 */
1025 static int
my_list_rx_init(struct my_softc * sc)1026 my_list_rx_init(struct my_softc * sc)
1027 {
1028 struct my_chain_data *cd;
1029 struct my_list_data *ld;
1030 int i;
1031
1032 MY_LOCK_ASSERT(sc);
1033 cd = &sc->my_cdata;
1034 ld = sc->my_ldata;
1035 for (i = 0; i < MY_RX_LIST_CNT; i++) {
1036 cd->my_rx_chain[i].my_ptr =
1037 (struct my_desc *) & ld->my_rx_list[i];
1038 if (my_newbuf(sc, &cd->my_rx_chain[i]) == ENOBUFS) {
1039 MY_UNLOCK(sc);
1040 return (ENOBUFS);
1041 }
1042 if (i == (MY_RX_LIST_CNT - 1)) {
1043 cd->my_rx_chain[i].my_nextdesc = &cd->my_rx_chain[0];
1044 ld->my_rx_list[i].my_next = vtophys(&ld->my_rx_list[0]);
1045 } else {
1046 cd->my_rx_chain[i].my_nextdesc =
1047 &cd->my_rx_chain[i + 1];
1048 ld->my_rx_list[i].my_next =
1049 vtophys(&ld->my_rx_list[i + 1]);
1050 }
1051 }
1052 cd->my_rx_head = &cd->my_rx_chain[0];
1053 return (0);
1054 }
1055
1056 /*
1057 * Initialize an RX descriptor and attach an MBUF cluster.
1058 */
1059 static int
my_newbuf(struct my_softc * sc,struct my_chain_onefrag * c)1060 my_newbuf(struct my_softc * sc, struct my_chain_onefrag * c)
1061 {
1062 struct mbuf *m_new = NULL;
1063
1064 MY_LOCK_ASSERT(sc);
1065 MGETHDR(m_new, M_NOWAIT, MT_DATA);
1066 if (m_new == NULL) {
1067 device_printf(sc->my_dev,
1068 "no memory for rx list -- packet dropped!\n");
1069 return (ENOBUFS);
1070 }
1071 if (!(MCLGET(m_new, M_NOWAIT))) {
1072 device_printf(sc->my_dev,
1073 "no memory for rx list -- packet dropped!\n");
1074 m_freem(m_new);
1075 return (ENOBUFS);
1076 }
1077 c->my_mbuf = m_new;
1078 c->my_ptr->my_data = vtophys(mtod(m_new, caddr_t));
1079 c->my_ptr->my_ctl = (MCLBYTES - 1) << MY_RBSShift;
1080 c->my_ptr->my_status = MY_OWNByNIC;
1081 return (0);
1082 }
1083
1084 /*
1085 * A frame has been uploaded: pass the resulting mbuf chain up to the higher
1086 * level protocols.
1087 */
1088 static void
my_rxeof(struct my_softc * sc)1089 my_rxeof(struct my_softc * sc)
1090 {
1091 struct ether_header *eh;
1092 struct mbuf *m;
1093 if_t ifp;
1094 struct my_chain_onefrag *cur_rx;
1095 int total_len = 0;
1096 u_int32_t rxstat;
1097
1098 MY_LOCK_ASSERT(sc);
1099 ifp = sc->my_ifp;
1100 while (!((rxstat = sc->my_cdata.my_rx_head->my_ptr->my_status)
1101 & MY_OWNByNIC)) {
1102 cur_rx = sc->my_cdata.my_rx_head;
1103 sc->my_cdata.my_rx_head = cur_rx->my_nextdesc;
1104
1105 if (rxstat & MY_ES) { /* error summary: give up this rx pkt */
1106 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1107 cur_rx->my_ptr->my_status = MY_OWNByNIC;
1108 continue;
1109 }
1110 /* No errors; receive the packet. */
1111 total_len = (rxstat & MY_FLNGMASK) >> MY_FLNGShift;
1112 total_len -= ETHER_CRC_LEN;
1113
1114 if (total_len < MINCLSIZE) {
1115 m = m_devget(mtod(cur_rx->my_mbuf, char *),
1116 total_len, 0, ifp, NULL);
1117 cur_rx->my_ptr->my_status = MY_OWNByNIC;
1118 if (m == NULL) {
1119 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1120 continue;
1121 }
1122 } else {
1123 m = cur_rx->my_mbuf;
1124 /*
1125 * Try to conjure up a new mbuf cluster. If that
1126 * fails, it means we have an out of memory condition
1127 * and should leave the buffer in place and continue.
1128 * This will result in a lost packet, but there's
1129 * little else we can do in this situation.
1130 */
1131 if (my_newbuf(sc, cur_rx) == ENOBUFS) {
1132 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1133 cur_rx->my_ptr->my_status = MY_OWNByNIC;
1134 continue;
1135 }
1136 m->m_pkthdr.rcvif = ifp;
1137 m->m_pkthdr.len = m->m_len = total_len;
1138 }
1139 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1140 eh = mtod(m, struct ether_header *);
1141 #if NBPFILTER > 0
1142 /*
1143 * Handle BPF listeners. Let the BPF user see the packet, but
1144 * don't pass it up to the ether_input() layer unless it's a
1145 * broadcast packet, multicast packet, matches our ethernet
1146 * address or the interface is in promiscuous mode.
1147 */
1148 if (bpf_peers_present_if(ifp)) {
1149 bpf_mtap_if(ifp, m);
1150 if (if_getflags(ifp) & IFF_PROMISC &&
1151 (bcmp(eh->ether_dhost, if_getlladdr(sc->my_ifp),
1152 ETHER_ADDR_LEN) &&
1153 (eh->ether_dhost[0] & 1) == 0)) {
1154 m_freem(m);
1155 continue;
1156 }
1157 }
1158 #endif
1159 MY_UNLOCK(sc);
1160 if_input(ifp, m);
1161 MY_LOCK(sc);
1162 }
1163 return;
1164 }
1165
1166 /*
1167 * A frame was downloaded to the chip. It's safe for us to clean up the list
1168 * buffers.
1169 */
1170 static void
my_txeof(struct my_softc * sc)1171 my_txeof(struct my_softc * sc)
1172 {
1173 struct my_chain *cur_tx;
1174 if_t ifp;
1175
1176 MY_LOCK_ASSERT(sc);
1177 ifp = sc->my_ifp;
1178 /* Clear the timeout timer. */
1179 sc->my_timer = 0;
1180 if (sc->my_cdata.my_tx_head == NULL) {
1181 return;
1182 }
1183 /*
1184 * Go through our tx list and free mbufs for those frames that have
1185 * been transmitted.
1186 */
1187 while (sc->my_cdata.my_tx_head->my_mbuf != NULL) {
1188 u_int32_t txstat;
1189
1190 cur_tx = sc->my_cdata.my_tx_head;
1191 txstat = MY_TXSTATUS(cur_tx);
1192 if ((txstat & MY_OWNByNIC) || txstat == MY_UNSENT)
1193 break;
1194 if (!(CSR_READ_4(sc, MY_TCRRCR) & MY_Enhanced)) {
1195 if (txstat & MY_TXERR) {
1196 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1197 if (txstat & MY_EC) /* excessive collision */
1198 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
1199 if (txstat & MY_LC) /* late collision */
1200 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
1201 }
1202 if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
1203 (txstat & MY_NCRMASK) >> MY_NCRShift);
1204 }
1205 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1206 m_freem(cur_tx->my_mbuf);
1207 cur_tx->my_mbuf = NULL;
1208 if (sc->my_cdata.my_tx_head == sc->my_cdata.my_tx_tail) {
1209 sc->my_cdata.my_tx_head = NULL;
1210 sc->my_cdata.my_tx_tail = NULL;
1211 break;
1212 }
1213 sc->my_cdata.my_tx_head = cur_tx->my_nextdesc;
1214 }
1215 if (CSR_READ_4(sc, MY_TCRRCR) & MY_Enhanced) {
1216 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (CSR_READ_4(sc, MY_TSR) & MY_NCRMask));
1217 }
1218 return;
1219 }
1220
1221 /*
1222 * TX 'end of channel' interrupt handler.
1223 */
1224 static void
my_txeoc(struct my_softc * sc)1225 my_txeoc(struct my_softc * sc)
1226 {
1227 if_t ifp;
1228
1229 MY_LOCK_ASSERT(sc);
1230 ifp = sc->my_ifp;
1231 sc->my_timer = 0;
1232 if (sc->my_cdata.my_tx_head == NULL) {
1233 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1234 sc->my_cdata.my_tx_tail = NULL;
1235 if (sc->my_want_auto)
1236 my_autoneg_mii(sc, MY_FLAG_SCHEDDELAY, 1);
1237 } else {
1238 if (MY_TXOWN(sc->my_cdata.my_tx_head) == MY_UNSENT) {
1239 MY_TXOWN(sc->my_cdata.my_tx_head) = MY_OWNByNIC;
1240 sc->my_timer = 5;
1241 CSR_WRITE_4(sc, MY_TXPDR, 0xFFFFFFFF);
1242 }
1243 }
1244 return;
1245 }
1246
1247 static void
my_intr(void * arg)1248 my_intr(void *arg)
1249 {
1250 struct my_softc *sc;
1251 if_t ifp;
1252 u_int32_t status;
1253
1254 sc = arg;
1255 MY_LOCK(sc);
1256 ifp = sc->my_ifp;
1257 if (!(if_getflags(ifp) & IFF_UP)) {
1258 MY_UNLOCK(sc);
1259 return;
1260 }
1261 /* Disable interrupts. */
1262 CSR_WRITE_4(sc, MY_IMR, 0x00000000);
1263
1264 for (;;) {
1265 status = CSR_READ_4(sc, MY_ISR);
1266 status &= MY_INTRS;
1267 if (status)
1268 CSR_WRITE_4(sc, MY_ISR, status);
1269 else
1270 break;
1271
1272 if (status & MY_RI) /* receive interrupt */
1273 my_rxeof(sc);
1274
1275 if ((status & MY_RBU) || (status & MY_RxErr)) {
1276 /* rx buffer unavailable or rx error */
1277 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1278 #ifdef foo
1279 my_stop(sc);
1280 my_reset(sc);
1281 my_init_locked(sc);
1282 #endif
1283 }
1284 if (status & MY_TI) /* tx interrupt */
1285 my_txeof(sc);
1286 if (status & MY_ETI) /* tx early interrupt */
1287 my_txeof(sc);
1288 if (status & MY_TBU) /* tx buffer unavailable */
1289 my_txeoc(sc);
1290
1291 #if 0 /* 90/1/18 delete */
1292 if (status & MY_FBE) {
1293 my_reset(sc);
1294 my_init_locked(sc);
1295 }
1296 #endif
1297 }
1298
1299 /* Re-enable interrupts. */
1300 CSR_WRITE_4(sc, MY_IMR, MY_INTRS);
1301 if (!if_sendq_empty(ifp))
1302 my_start_locked(ifp);
1303 MY_UNLOCK(sc);
1304 return;
1305 }
1306
1307 /*
1308 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1309 * pointers to the fragment pointers.
1310 */
1311 static int
my_encap(struct my_softc * sc,struct my_chain * c,struct mbuf * m_head)1312 my_encap(struct my_softc * sc, struct my_chain * c, struct mbuf * m_head)
1313 {
1314 struct my_desc *f = NULL;
1315 int total_len;
1316 struct mbuf *m, *m_new = NULL;
1317
1318 MY_LOCK_ASSERT(sc);
1319 /* calculate the total tx pkt length */
1320 total_len = 0;
1321 for (m = m_head; m != NULL; m = m->m_next)
1322 total_len += m->m_len;
1323 /*
1324 * Start packing the mbufs in this chain into the fragment pointers.
1325 * Stop when we run out of fragments or hit the end of the mbuf
1326 * chain.
1327 */
1328 m = m_head;
1329 MGETHDR(m_new, M_NOWAIT, MT_DATA);
1330 if (m_new == NULL) {
1331 device_printf(sc->my_dev, "no memory for tx list");
1332 return (1);
1333 }
1334 if (m_head->m_pkthdr.len > MHLEN) {
1335 if (!(MCLGET(m_new, M_NOWAIT))) {
1336 m_freem(m_new);
1337 device_printf(sc->my_dev, "no memory for tx list");
1338 return (1);
1339 }
1340 }
1341 m_copydata(m_head, 0, m_head->m_pkthdr.len, mtod(m_new, caddr_t));
1342 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1343 m_freem(m_head);
1344 m_head = m_new;
1345 f = &c->my_ptr->my_frag[0];
1346 f->my_status = 0;
1347 f->my_data = vtophys(mtod(m_new, caddr_t));
1348 total_len = m_new->m_len;
1349 f->my_ctl = MY_TXFD | MY_TXLD | MY_CRCEnable | MY_PADEnable;
1350 f->my_ctl |= total_len << MY_PKTShift; /* pkt size */
1351 f->my_ctl |= total_len; /* buffer size */
1352 /* 89/12/29 add, for mtd891 *//* [ 89? ] */
1353 if (sc->my_info->my_did == MTD891ID)
1354 f->my_ctl |= MY_ETIControl | MY_RetryTxLC;
1355 c->my_mbuf = m_head;
1356 c->my_lastdesc = 0;
1357 MY_TXNEXT(c) = vtophys(&c->my_nextdesc->my_ptr->my_frag[0]);
1358 return (0);
1359 }
1360
1361 /*
1362 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1363 * to the mbuf data regions directly in the transmit lists. We also save a
1364 * copy of the pointers since the transmit list fragment pointers are
1365 * physical addresses.
1366 */
1367 static void
my_start(if_t ifp)1368 my_start(if_t ifp)
1369 {
1370 struct my_softc *sc;
1371
1372 sc = if_getsoftc(ifp);
1373 MY_LOCK(sc);
1374 my_start_locked(ifp);
1375 MY_UNLOCK(sc);
1376 }
1377
1378 static void
my_start_locked(if_t ifp)1379 my_start_locked(if_t ifp)
1380 {
1381 struct my_softc *sc;
1382 struct mbuf *m_head = NULL;
1383 struct my_chain *cur_tx = NULL, *start_tx;
1384
1385 sc = if_getsoftc(ifp);
1386 MY_LOCK_ASSERT(sc);
1387 if (sc->my_autoneg) {
1388 sc->my_tx_pend = 1;
1389 return;
1390 }
1391 /*
1392 * Check for an available queue slot. If there are none, punt.
1393 */
1394 if (sc->my_cdata.my_tx_free->my_mbuf != NULL) {
1395 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1396 return;
1397 }
1398 start_tx = sc->my_cdata.my_tx_free;
1399 while (sc->my_cdata.my_tx_free->my_mbuf == NULL) {
1400 m_head = if_dequeue(ifp);
1401 if (m_head == NULL)
1402 break;
1403
1404 /* Pick a descriptor off the free list. */
1405 cur_tx = sc->my_cdata.my_tx_free;
1406 sc->my_cdata.my_tx_free = cur_tx->my_nextdesc;
1407
1408 /* Pack the data into the descriptor. */
1409 my_encap(sc, cur_tx, m_head);
1410
1411 if (cur_tx != start_tx)
1412 MY_TXOWN(cur_tx) = MY_OWNByNIC;
1413 #if NBPFILTER > 0
1414 /*
1415 * If there's a BPF listener, bounce a copy of this frame to
1416 * him.
1417 */
1418 BPF_MTAP(ifp, cur_tx->my_mbuf);
1419 #endif
1420 }
1421 /*
1422 * If there are no packets queued, bail.
1423 */
1424 if (cur_tx == NULL) {
1425 return;
1426 }
1427 /*
1428 * Place the request for the upload interrupt in the last descriptor
1429 * in the chain. This way, if we're chaining several packets at once,
1430 * we'll only get an interrupt once for the whole chain rather than
1431 * once for each packet.
1432 */
1433 MY_TXCTL(cur_tx) |= MY_TXIC;
1434 cur_tx->my_ptr->my_frag[0].my_ctl |= MY_TXIC;
1435 sc->my_cdata.my_tx_tail = cur_tx;
1436 if (sc->my_cdata.my_tx_head == NULL)
1437 sc->my_cdata.my_tx_head = start_tx;
1438 MY_TXOWN(start_tx) = MY_OWNByNIC;
1439 CSR_WRITE_4(sc, MY_TXPDR, 0xFFFFFFFF); /* tx polling demand */
1440
1441 /*
1442 * Set a timeout in case the chip goes out to lunch.
1443 */
1444 sc->my_timer = 5;
1445 return;
1446 }
1447
1448 static void
my_init(void * xsc)1449 my_init(void *xsc)
1450 {
1451 struct my_softc *sc = xsc;
1452
1453 MY_LOCK(sc);
1454 my_init_locked(sc);
1455 MY_UNLOCK(sc);
1456 }
1457
1458 static void
my_init_locked(struct my_softc * sc)1459 my_init_locked(struct my_softc *sc)
1460 {
1461 if_t ifp = sc->my_ifp;
1462 u_int16_t phy_bmcr = 0;
1463
1464 MY_LOCK_ASSERT(sc);
1465 if (sc->my_autoneg) {
1466 return;
1467 }
1468 if (sc->my_pinfo != NULL)
1469 phy_bmcr = my_phy_readreg(sc, PHY_BMCR);
1470 /*
1471 * Cancel pending I/O and free all RX/TX buffers.
1472 */
1473 my_stop(sc);
1474 my_reset(sc);
1475
1476 /*
1477 * Set cache alignment and burst length.
1478 */
1479 #if 0 /* 89/9/1 modify, */
1480 CSR_WRITE_4(sc, MY_BCR, MY_RPBLE512);
1481 CSR_WRITE_4(sc, MY_TCRRCR, MY_TFTSF);
1482 #endif
1483 CSR_WRITE_4(sc, MY_BCR, MY_PBL8);
1484 CSR_WRITE_4(sc, MY_TCRRCR, MY_TFTSF | MY_RBLEN | MY_RPBLE512);
1485 /*
1486 * 89/12/29 add, for mtd891,
1487 */
1488 if (sc->my_info->my_did == MTD891ID) {
1489 MY_SETBIT(sc, MY_BCR, MY_PROG);
1490 MY_SETBIT(sc, MY_TCRRCR, MY_Enhanced);
1491 }
1492 my_setcfg(sc, phy_bmcr);
1493 /* Init circular RX list. */
1494 if (my_list_rx_init(sc) == ENOBUFS) {
1495 device_printf(sc->my_dev, "init failed: no memory for rx buffers\n");
1496 my_stop(sc);
1497 return;
1498 }
1499 /* Init TX descriptors. */
1500 my_list_tx_init(sc);
1501
1502 /* If we want promiscuous mode, set the allframes bit. */
1503 if (if_getflags(ifp) & IFF_PROMISC)
1504 MY_SETBIT(sc, MY_TCRRCR, MY_PROM);
1505 else
1506 MY_CLRBIT(sc, MY_TCRRCR, MY_PROM);
1507
1508 /*
1509 * Set capture broadcast bit to capture broadcast frames.
1510 */
1511 if (if_getflags(ifp) & IFF_BROADCAST)
1512 MY_SETBIT(sc, MY_TCRRCR, MY_AB);
1513 else
1514 MY_CLRBIT(sc, MY_TCRRCR, MY_AB);
1515
1516 /*
1517 * Program the multicast filter, if necessary.
1518 */
1519 my_setmulti(sc);
1520
1521 /*
1522 * Load the address of the RX list.
1523 */
1524 MY_CLRBIT(sc, MY_TCRRCR, MY_RE);
1525 CSR_WRITE_4(sc, MY_RXLBA, vtophys(&sc->my_ldata->my_rx_list[0]));
1526
1527 /*
1528 * Enable interrupts.
1529 */
1530 CSR_WRITE_4(sc, MY_IMR, MY_INTRS);
1531 CSR_WRITE_4(sc, MY_ISR, 0xFFFFFFFF);
1532
1533 /* Enable receiver and transmitter. */
1534 MY_SETBIT(sc, MY_TCRRCR, MY_RE);
1535 MY_CLRBIT(sc, MY_TCRRCR, MY_TE);
1536 CSR_WRITE_4(sc, MY_TXLBA, vtophys(&sc->my_ldata->my_tx_list[0]));
1537 MY_SETBIT(sc, MY_TCRRCR, MY_TE);
1538
1539 /* Restore state of BMCR */
1540 if (sc->my_pinfo != NULL)
1541 my_phy_writereg(sc, PHY_BMCR, phy_bmcr);
1542 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
1543 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1544
1545 callout_reset(&sc->my_watchdog, hz, my_watchdog, sc);
1546 return;
1547 }
1548
1549 /*
1550 * Set media options.
1551 */
1552
1553 static int
my_ifmedia_upd(if_t ifp)1554 my_ifmedia_upd(if_t ifp)
1555 {
1556 struct my_softc *sc;
1557 struct ifmedia *ifm;
1558
1559 sc = if_getsoftc(ifp);
1560 MY_LOCK(sc);
1561 ifm = &sc->ifmedia;
1562 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
1563 MY_UNLOCK(sc);
1564 return (EINVAL);
1565 }
1566 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO)
1567 my_autoneg_mii(sc, MY_FLAG_SCHEDDELAY, 1);
1568 else
1569 my_setmode_mii(sc, ifm->ifm_media);
1570 MY_UNLOCK(sc);
1571 return (0);
1572 }
1573
1574 /*
1575 * Report current media status.
1576 */
1577
1578 static void
my_ifmedia_sts(if_t ifp,struct ifmediareq * ifmr)1579 my_ifmedia_sts(if_t ifp, struct ifmediareq * ifmr)
1580 {
1581 struct my_softc *sc;
1582 u_int16_t advert = 0, ability = 0;
1583
1584 sc = if_getsoftc(ifp);
1585 MY_LOCK(sc);
1586 ifmr->ifm_active = IFM_ETHER;
1587 if (!(my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_AUTONEGENBL)) {
1588 #if 0 /* this version did not support 1000M, */
1589 if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_1000)
1590 ifmr->ifm_active = IFM_ETHER | IFM_1000TX;
1591 #endif
1592 if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_SPEEDSEL)
1593 ifmr->ifm_active = IFM_ETHER | IFM_100_TX;
1594 else
1595 ifmr->ifm_active = IFM_ETHER | IFM_10_T;
1596 if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_DUPLEX)
1597 ifmr->ifm_active |= IFM_FDX;
1598 else
1599 ifmr->ifm_active |= IFM_HDX;
1600
1601 MY_UNLOCK(sc);
1602 return;
1603 }
1604 ability = my_phy_readreg(sc, PHY_LPAR);
1605 advert = my_phy_readreg(sc, PHY_ANAR);
1606
1607 #if 0 /* this version did not support 1000M, */
1608 if (sc->my_pinfo->my_vid = MarvellPHYID0) {
1609 ability2 = my_phy_readreg(sc, PHY_1000SR);
1610 if (ability2 & PHY_1000SR_1000BTXFULL) {
1611 advert = 0;
1612 ability = 0;
1613 ifmr->ifm_active = IFM_ETHER|IFM_1000_T|IFM_FDX;
1614 } else if (ability & PHY_1000SR_1000BTXHALF) {
1615 advert = 0;
1616 ability = 0;
1617 ifmr->ifm_active = IFM_ETHER|IFM_1000_T|IFM_HDX;
1618 }
1619 }
1620 #endif
1621 if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4)
1622 ifmr->ifm_active = IFM_ETHER | IFM_100_T4;
1623 else if (advert & PHY_ANAR_100BTXFULL && ability & PHY_ANAR_100BTXFULL)
1624 ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1625 else if (advert & PHY_ANAR_100BTXHALF && ability & PHY_ANAR_100BTXHALF)
1626 ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_HDX;
1627 else if (advert & PHY_ANAR_10BTFULL && ability & PHY_ANAR_10BTFULL)
1628 ifmr->ifm_active = IFM_ETHER | IFM_10_T | IFM_FDX;
1629 else if (advert & PHY_ANAR_10BTHALF && ability & PHY_ANAR_10BTHALF)
1630 ifmr->ifm_active = IFM_ETHER | IFM_10_T | IFM_HDX;
1631 MY_UNLOCK(sc);
1632 return;
1633 }
1634
1635 static int
my_ioctl(if_t ifp,u_long command,caddr_t data)1636 my_ioctl(if_t ifp, u_long command, caddr_t data)
1637 {
1638 struct my_softc *sc = if_getsoftc(ifp);
1639 struct ifreq *ifr = (struct ifreq *) data;
1640 int error;
1641
1642 switch (command) {
1643 case SIOCSIFFLAGS:
1644 MY_LOCK(sc);
1645 if (if_getflags(ifp) & IFF_UP)
1646 my_init_locked(sc);
1647 else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1648 my_stop(sc);
1649 MY_UNLOCK(sc);
1650 error = 0;
1651 break;
1652 case SIOCADDMULTI:
1653 case SIOCDELMULTI:
1654 MY_LOCK(sc);
1655 my_setmulti(sc);
1656 MY_UNLOCK(sc);
1657 error = 0;
1658 break;
1659 case SIOCGIFMEDIA:
1660 case SIOCSIFMEDIA:
1661 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
1662 break;
1663 default:
1664 error = ether_ioctl(ifp, command, data);
1665 break;
1666 }
1667 return (error);
1668 }
1669
1670 static void
my_watchdog(void * arg)1671 my_watchdog(void *arg)
1672 {
1673 struct my_softc *sc;
1674 if_t ifp;
1675
1676 sc = arg;
1677 MY_LOCK_ASSERT(sc);
1678 callout_reset(&sc->my_watchdog, hz, my_watchdog, sc);
1679 if (sc->my_timer == 0 || --sc->my_timer > 0)
1680 return;
1681
1682 ifp = sc->my_ifp;
1683 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1684 if_printf(ifp, "watchdog timeout\n");
1685 if (!(my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT))
1686 if_printf(ifp, "no carrier - transceiver cable problem?\n");
1687 my_stop(sc);
1688 my_reset(sc);
1689 my_init_locked(sc);
1690 if (!if_sendq_empty(ifp))
1691 my_start_locked(ifp);
1692 }
1693
1694 /*
1695 * Stop the adapter and free any mbufs allocated to the RX and TX lists.
1696 */
1697 static void
my_stop(struct my_softc * sc)1698 my_stop(struct my_softc * sc)
1699 {
1700 int i;
1701 if_t ifp;
1702
1703 MY_LOCK_ASSERT(sc);
1704 ifp = sc->my_ifp;
1705
1706 callout_stop(&sc->my_autoneg_timer);
1707 callout_stop(&sc->my_watchdog);
1708
1709 MY_CLRBIT(sc, MY_TCRRCR, (MY_RE | MY_TE));
1710 CSR_WRITE_4(sc, MY_IMR, 0x00000000);
1711 CSR_WRITE_4(sc, MY_TXLBA, 0x00000000);
1712 CSR_WRITE_4(sc, MY_RXLBA, 0x00000000);
1713
1714 /*
1715 * Free data in the RX lists.
1716 */
1717 for (i = 0; i < MY_RX_LIST_CNT; i++) {
1718 if (sc->my_cdata.my_rx_chain[i].my_mbuf != NULL) {
1719 m_freem(sc->my_cdata.my_rx_chain[i].my_mbuf);
1720 sc->my_cdata.my_rx_chain[i].my_mbuf = NULL;
1721 }
1722 }
1723 bzero((char *)&sc->my_ldata->my_rx_list,
1724 sizeof(sc->my_ldata->my_rx_list));
1725 /*
1726 * Free the TX list buffers.
1727 */
1728 for (i = 0; i < MY_TX_LIST_CNT; i++) {
1729 if (sc->my_cdata.my_tx_chain[i].my_mbuf != NULL) {
1730 m_freem(sc->my_cdata.my_tx_chain[i].my_mbuf);
1731 sc->my_cdata.my_tx_chain[i].my_mbuf = NULL;
1732 }
1733 }
1734 bzero((char *)&sc->my_ldata->my_tx_list,
1735 sizeof(sc->my_ldata->my_tx_list));
1736 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
1737 return;
1738 }
1739
1740 /*
1741 * Stop all chip I/O so that the kernel's probe routines don't get confused
1742 * by errant DMAs when rebooting.
1743 */
1744 static int
my_shutdown(device_t dev)1745 my_shutdown(device_t dev)
1746 {
1747 struct my_softc *sc;
1748
1749 sc = device_get_softc(dev);
1750 MY_LOCK(sc);
1751 my_stop(sc);
1752 MY_UNLOCK(sc);
1753 return 0;
1754 }
1755