1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2003 Stuart Walsh<stu@ipng.org.uk>
5 * and Duncan Barclay<dmlb@dmlb.org>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS 'AS IS' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/bus.h>
32 #include <sys/endian.h>
33 #include <sys/kernel.h>
34 #include <sys/malloc.h>
35 #include <sys/mbuf.h>
36 #include <sys/module.h>
37 #include <sys/rman.h>
38 #include <sys/socket.h>
39 #include <sys/sockio.h>
40 #include <sys/sysctl.h>
41
42 #include <net/bpf.h>
43 #include <net/if.h>
44 #include <net/if_var.h>
45 #include <net/ethernet.h>
46 #include <net/if_dl.h>
47 #include <net/if_media.h>
48 #include <net/if_types.h>
49 #include <net/if_vlan_var.h>
50
51 #include <dev/mii/mii.h>
52 #include <dev/mii/miivar.h>
53
54 #include <dev/pci/pcireg.h>
55 #include <dev/pci/pcivar.h>
56
57 #include <machine/bus.h>
58
59 #include <dev/bfe/if_bfereg.h>
60
61 MODULE_DEPEND(bfe, pci, 1, 1, 1);
62 MODULE_DEPEND(bfe, ether, 1, 1, 1);
63 MODULE_DEPEND(bfe, miibus, 1, 1, 1);
64
65 /* "device miibus" required. See GENERIC if you get errors here. */
66 #include "miibus_if.h"
67
68 #define BFE_DEVDESC_MAX 64 /* Maximum device description length */
69
70 static struct bfe_type bfe_devs[] = {
71 { BCOM_VENDORID, BCOM_DEVICEID_BCM4401,
72 "Broadcom BCM4401 Fast Ethernet" },
73 { BCOM_VENDORID, BCOM_DEVICEID_BCM4401B0,
74 "Broadcom BCM4401-B0 Fast Ethernet" },
75 { 0, 0, NULL }
76 };
77
78 static int bfe_probe (device_t);
79 static int bfe_attach (device_t);
80 static int bfe_detach (device_t);
81 static int bfe_suspend (device_t);
82 static int bfe_resume (device_t);
83 static void bfe_release_resources (struct bfe_softc *);
84 static void bfe_intr (void *);
85 static int bfe_encap (struct bfe_softc *, struct mbuf **);
86 static void bfe_start (if_t);
87 static void bfe_start_locked (if_t);
88 static int bfe_ioctl (if_t, u_long, caddr_t);
89 static void bfe_init (void *);
90 static void bfe_init_locked (void *);
91 static void bfe_stop (struct bfe_softc *);
92 static void bfe_watchdog (struct bfe_softc *);
93 static int bfe_shutdown (device_t);
94 static void bfe_tick (void *);
95 static void bfe_txeof (struct bfe_softc *);
96 static void bfe_rxeof (struct bfe_softc *);
97 static void bfe_set_rx_mode (struct bfe_softc *);
98 static int bfe_list_rx_init (struct bfe_softc *);
99 static void bfe_list_tx_init (struct bfe_softc *);
100 static void bfe_discard_buf (struct bfe_softc *, int);
101 static int bfe_list_newbuf (struct bfe_softc *, int);
102 static void bfe_rx_ring_free (struct bfe_softc *);
103
104 static void bfe_pci_setup (struct bfe_softc *, u_int32_t);
105 static int bfe_ifmedia_upd (if_t);
106 static void bfe_ifmedia_sts (if_t, struct ifmediareq *);
107 static int bfe_miibus_readreg (device_t, int, int);
108 static int bfe_miibus_writereg (device_t, int, int, int);
109 static void bfe_miibus_statchg (device_t);
110 static int bfe_wait_bit (struct bfe_softc *, u_int32_t, u_int32_t,
111 u_long, const int);
112 static void bfe_get_config (struct bfe_softc *sc);
113 static void bfe_read_eeprom (struct bfe_softc *, u_int8_t *);
114 static void bfe_stats_update (struct bfe_softc *);
115 static void bfe_clear_stats (struct bfe_softc *);
116 static int bfe_readphy (struct bfe_softc *, u_int32_t, u_int32_t*);
117 static int bfe_writephy (struct bfe_softc *, u_int32_t, u_int32_t);
118 static int bfe_resetphy (struct bfe_softc *);
119 static int bfe_setupphy (struct bfe_softc *);
120 static void bfe_chip_reset (struct bfe_softc *);
121 static void bfe_chip_halt (struct bfe_softc *);
122 static void bfe_core_reset (struct bfe_softc *);
123 static void bfe_core_disable (struct bfe_softc *);
124 static int bfe_dma_alloc (struct bfe_softc *);
125 static void bfe_dma_free (struct bfe_softc *sc);
126 static void bfe_dma_map (void *, bus_dma_segment_t *, int, int);
127 static void bfe_cam_write (struct bfe_softc *, u_char *, int);
128 static int sysctl_bfe_stats (SYSCTL_HANDLER_ARGS);
129
130 static device_method_t bfe_methods[] = {
131 /* Device interface */
132 DEVMETHOD(device_probe, bfe_probe),
133 DEVMETHOD(device_attach, bfe_attach),
134 DEVMETHOD(device_detach, bfe_detach),
135 DEVMETHOD(device_shutdown, bfe_shutdown),
136 DEVMETHOD(device_suspend, bfe_suspend),
137 DEVMETHOD(device_resume, bfe_resume),
138
139 /* MII interface */
140 DEVMETHOD(miibus_readreg, bfe_miibus_readreg),
141 DEVMETHOD(miibus_writereg, bfe_miibus_writereg),
142 DEVMETHOD(miibus_statchg, bfe_miibus_statchg),
143
144 DEVMETHOD_END
145 };
146
147 static driver_t bfe_driver = {
148 "bfe",
149 bfe_methods,
150 sizeof(struct bfe_softc)
151 };
152
153 DRIVER_MODULE(bfe, pci, bfe_driver, 0, 0);
154 MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, bfe, bfe_devs,
155 nitems(bfe_devs) - 1);
156 DRIVER_MODULE(miibus, bfe, miibus_driver, 0, 0);
157
158 /*
159 * Probe for a Broadcom 4401 chip.
160 */
161 static int
bfe_probe(device_t dev)162 bfe_probe(device_t dev)
163 {
164 struct bfe_type *t;
165
166 t = bfe_devs;
167
168 while (t->bfe_name != NULL) {
169 if (pci_get_vendor(dev) == t->bfe_vid &&
170 pci_get_device(dev) == t->bfe_did) {
171 device_set_desc(dev, t->bfe_name);
172 return (BUS_PROBE_DEFAULT);
173 }
174 t++;
175 }
176
177 return (ENXIO);
178 }
179
180 struct bfe_dmamap_arg {
181 bus_addr_t bfe_busaddr;
182 };
183
184 static int
bfe_dma_alloc(struct bfe_softc * sc)185 bfe_dma_alloc(struct bfe_softc *sc)
186 {
187 struct bfe_dmamap_arg ctx;
188 struct bfe_rx_data *rd;
189 struct bfe_tx_data *td;
190 int error, i;
191
192 /*
193 * parent tag. Apparently the chip cannot handle any DMA address
194 * greater than 1GB.
195 */
196 error = bus_dma_tag_create(bus_get_dma_tag(sc->bfe_dev), /* parent */
197 1, 0, /* alignment, boundary */
198 BFE_DMA_MAXADDR, /* lowaddr */
199 BUS_SPACE_MAXADDR, /* highaddr */
200 NULL, NULL, /* filter, filterarg */
201 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
202 0, /* nsegments */
203 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
204 0, /* flags */
205 NULL, NULL, /* lockfunc, lockarg */
206 &sc->bfe_parent_tag);
207 if (error != 0) {
208 device_printf(sc->bfe_dev, "cannot create parent DMA tag.\n");
209 goto fail;
210 }
211
212 /* Create tag for Tx ring. */
213 error = bus_dma_tag_create(sc->bfe_parent_tag, /* parent */
214 BFE_TX_RING_ALIGN, 0, /* alignment, boundary */
215 BUS_SPACE_MAXADDR, /* lowaddr */
216 BUS_SPACE_MAXADDR, /* highaddr */
217 NULL, NULL, /* filter, filterarg */
218 BFE_TX_LIST_SIZE, /* maxsize */
219 1, /* nsegments */
220 BFE_TX_LIST_SIZE, /* maxsegsize */
221 0, /* flags */
222 NULL, NULL, /* lockfunc, lockarg */
223 &sc->bfe_tx_tag);
224 if (error != 0) {
225 device_printf(sc->bfe_dev, "cannot create Tx ring DMA tag.\n");
226 goto fail;
227 }
228
229 /* Create tag for Rx ring. */
230 error = bus_dma_tag_create(sc->bfe_parent_tag, /* parent */
231 BFE_RX_RING_ALIGN, 0, /* alignment, boundary */
232 BUS_SPACE_MAXADDR, /* lowaddr */
233 BUS_SPACE_MAXADDR, /* highaddr */
234 NULL, NULL, /* filter, filterarg */
235 BFE_RX_LIST_SIZE, /* maxsize */
236 1, /* nsegments */
237 BFE_RX_LIST_SIZE, /* maxsegsize */
238 0, /* flags */
239 NULL, NULL, /* lockfunc, lockarg */
240 &sc->bfe_rx_tag);
241 if (error != 0) {
242 device_printf(sc->bfe_dev, "cannot create Rx ring DMA tag.\n");
243 goto fail;
244 }
245
246 /* Create tag for Tx buffers. */
247 error = bus_dma_tag_create(sc->bfe_parent_tag, /* parent */
248 1, 0, /* alignment, boundary */
249 BUS_SPACE_MAXADDR, /* lowaddr */
250 BUS_SPACE_MAXADDR, /* highaddr */
251 NULL, NULL, /* filter, filterarg */
252 MCLBYTES * BFE_MAXTXSEGS, /* maxsize */
253 BFE_MAXTXSEGS, /* nsegments */
254 MCLBYTES, /* maxsegsize */
255 0, /* flags */
256 NULL, NULL, /* lockfunc, lockarg */
257 &sc->bfe_txmbuf_tag);
258 if (error != 0) {
259 device_printf(sc->bfe_dev,
260 "cannot create Tx buffer DMA tag.\n");
261 goto fail;
262 }
263
264 /* Create tag for Rx buffers. */
265 error = bus_dma_tag_create(sc->bfe_parent_tag, /* parent */
266 1, 0, /* alignment, boundary */
267 BUS_SPACE_MAXADDR, /* lowaddr */
268 BUS_SPACE_MAXADDR, /* highaddr */
269 NULL, NULL, /* filter, filterarg */
270 MCLBYTES, /* maxsize */
271 1, /* nsegments */
272 MCLBYTES, /* maxsegsize */
273 0, /* flags */
274 NULL, NULL, /* lockfunc, lockarg */
275 &sc->bfe_rxmbuf_tag);
276 if (error != 0) {
277 device_printf(sc->bfe_dev,
278 "cannot create Rx buffer DMA tag.\n");
279 goto fail;
280 }
281
282 /* Allocate DMA'able memory and load DMA map. */
283 error = bus_dmamem_alloc(sc->bfe_tx_tag, (void *)&sc->bfe_tx_list,
284 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->bfe_tx_map);
285 if (error != 0) {
286 device_printf(sc->bfe_dev,
287 "cannot allocate DMA'able memory for Tx ring.\n");
288 goto fail;
289 }
290 ctx.bfe_busaddr = 0;
291 error = bus_dmamap_load(sc->bfe_tx_tag, sc->bfe_tx_map,
292 sc->bfe_tx_list, BFE_TX_LIST_SIZE, bfe_dma_map, &ctx,
293 BUS_DMA_NOWAIT);
294 if (error != 0 || ctx.bfe_busaddr == 0) {
295 device_printf(sc->bfe_dev,
296 "cannot load DMA'able memory for Tx ring.\n");
297 goto fail;
298 }
299 sc->bfe_tx_dma = BFE_ADDR_LO(ctx.bfe_busaddr);
300
301 error = bus_dmamem_alloc(sc->bfe_rx_tag, (void *)&sc->bfe_rx_list,
302 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->bfe_rx_map);
303 if (error != 0) {
304 device_printf(sc->bfe_dev,
305 "cannot allocate DMA'able memory for Rx ring.\n");
306 goto fail;
307 }
308 ctx.bfe_busaddr = 0;
309 error = bus_dmamap_load(sc->bfe_rx_tag, sc->bfe_rx_map,
310 sc->bfe_rx_list, BFE_RX_LIST_SIZE, bfe_dma_map, &ctx,
311 BUS_DMA_NOWAIT);
312 if (error != 0 || ctx.bfe_busaddr == 0) {
313 device_printf(sc->bfe_dev,
314 "cannot load DMA'able memory for Rx ring.\n");
315 goto fail;
316 }
317 sc->bfe_rx_dma = BFE_ADDR_LO(ctx.bfe_busaddr);
318
319 /* Create DMA maps for Tx buffers. */
320 for (i = 0; i < BFE_TX_LIST_CNT; i++) {
321 td = &sc->bfe_tx_ring[i];
322 td->bfe_mbuf = NULL;
323 td->bfe_map = NULL;
324 error = bus_dmamap_create(sc->bfe_txmbuf_tag, 0, &td->bfe_map);
325 if (error != 0) {
326 device_printf(sc->bfe_dev,
327 "cannot create DMA map for Tx.\n");
328 goto fail;
329 }
330 }
331
332 /* Create spare DMA map for Rx buffers. */
333 error = bus_dmamap_create(sc->bfe_rxmbuf_tag, 0, &sc->bfe_rx_sparemap);
334 if (error != 0) {
335 device_printf(sc->bfe_dev, "cannot create spare DMA map for Rx.\n");
336 goto fail;
337 }
338 /* Create DMA maps for Rx buffers. */
339 for (i = 0; i < BFE_RX_LIST_CNT; i++) {
340 rd = &sc->bfe_rx_ring[i];
341 rd->bfe_mbuf = NULL;
342 rd->bfe_map = NULL;
343 rd->bfe_ctrl = 0;
344 error = bus_dmamap_create(sc->bfe_rxmbuf_tag, 0, &rd->bfe_map);
345 if (error != 0) {
346 device_printf(sc->bfe_dev,
347 "cannot create DMA map for Rx.\n");
348 goto fail;
349 }
350 }
351
352 fail:
353 return (error);
354 }
355
356 static void
bfe_dma_free(struct bfe_softc * sc)357 bfe_dma_free(struct bfe_softc *sc)
358 {
359 struct bfe_tx_data *td;
360 struct bfe_rx_data *rd;
361 int i;
362
363 /* Tx ring. */
364 if (sc->bfe_tx_tag != NULL) {
365 if (sc->bfe_tx_dma != 0)
366 bus_dmamap_unload(sc->bfe_tx_tag, sc->bfe_tx_map);
367 if (sc->bfe_tx_list != NULL)
368 bus_dmamem_free(sc->bfe_tx_tag, sc->bfe_tx_list,
369 sc->bfe_tx_map);
370 sc->bfe_tx_dma = 0;
371 sc->bfe_tx_list = NULL;
372 bus_dma_tag_destroy(sc->bfe_tx_tag);
373 sc->bfe_tx_tag = NULL;
374 }
375
376 /* Rx ring. */
377 if (sc->bfe_rx_tag != NULL) {
378 if (sc->bfe_rx_dma != 0)
379 bus_dmamap_unload(sc->bfe_rx_tag, sc->bfe_rx_map);
380 if (sc->bfe_rx_list != NULL)
381 bus_dmamem_free(sc->bfe_rx_tag, sc->bfe_rx_list,
382 sc->bfe_rx_map);
383 sc->bfe_rx_dma = 0;
384 sc->bfe_rx_list = NULL;
385 bus_dma_tag_destroy(sc->bfe_rx_tag);
386 sc->bfe_rx_tag = NULL;
387 }
388
389 /* Tx buffers. */
390 if (sc->bfe_txmbuf_tag != NULL) {
391 for (i = 0; i < BFE_TX_LIST_CNT; i++) {
392 td = &sc->bfe_tx_ring[i];
393 if (td->bfe_map != NULL) {
394 bus_dmamap_destroy(sc->bfe_txmbuf_tag,
395 td->bfe_map);
396 td->bfe_map = NULL;
397 }
398 }
399 bus_dma_tag_destroy(sc->bfe_txmbuf_tag);
400 sc->bfe_txmbuf_tag = NULL;
401 }
402
403 /* Rx buffers. */
404 if (sc->bfe_rxmbuf_tag != NULL) {
405 for (i = 0; i < BFE_RX_LIST_CNT; i++) {
406 rd = &sc->bfe_rx_ring[i];
407 if (rd->bfe_map != NULL) {
408 bus_dmamap_destroy(sc->bfe_rxmbuf_tag,
409 rd->bfe_map);
410 rd->bfe_map = NULL;
411 }
412 }
413 if (sc->bfe_rx_sparemap != NULL) {
414 bus_dmamap_destroy(sc->bfe_rxmbuf_tag,
415 sc->bfe_rx_sparemap);
416 sc->bfe_rx_sparemap = NULL;
417 }
418 bus_dma_tag_destroy(sc->bfe_rxmbuf_tag);
419 sc->bfe_rxmbuf_tag = NULL;
420 }
421
422 if (sc->bfe_parent_tag != NULL) {
423 bus_dma_tag_destroy(sc->bfe_parent_tag);
424 sc->bfe_parent_tag = NULL;
425 }
426 }
427
428 static int
bfe_attach(device_t dev)429 bfe_attach(device_t dev)
430 {
431 if_t ifp = NULL;
432 struct bfe_softc *sc;
433 int error = 0, rid;
434
435 sc = device_get_softc(dev);
436 mtx_init(&sc->bfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
437 MTX_DEF);
438 callout_init_mtx(&sc->bfe_stat_co, &sc->bfe_mtx, 0);
439
440 sc->bfe_dev = dev;
441
442 /*
443 * Map control/status registers.
444 */
445 pci_enable_busmaster(dev);
446
447 rid = PCIR_BAR(0);
448 sc->bfe_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
449 RF_ACTIVE);
450 if (sc->bfe_res == NULL) {
451 device_printf(dev, "couldn't map memory\n");
452 error = ENXIO;
453 goto fail;
454 }
455
456 /* Allocate interrupt */
457 rid = 0;
458
459 sc->bfe_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
460 RF_SHAREABLE | RF_ACTIVE);
461 if (sc->bfe_irq == NULL) {
462 device_printf(dev, "couldn't map interrupt\n");
463 error = ENXIO;
464 goto fail;
465 }
466
467 if (bfe_dma_alloc(sc) != 0) {
468 device_printf(dev, "failed to allocate DMA resources\n");
469 error = ENXIO;
470 goto fail;
471 }
472
473 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
474 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
475 "stats", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
476 sysctl_bfe_stats, "I", "Statistics");
477
478 /* Set up ifnet structure */
479 ifp = sc->bfe_ifp = if_alloc(IFT_ETHER);
480 if_setsoftc(ifp, sc);
481 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
482 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
483 if_setioctlfn(ifp, bfe_ioctl);
484 if_setstartfn(ifp, bfe_start);
485 if_setinitfn(ifp, bfe_init);
486 if_setsendqlen(ifp, BFE_TX_QLEN);
487 if_setsendqready(ifp);
488
489 bfe_get_config(sc);
490
491 /* Reset the chip and turn on the PHY */
492 BFE_LOCK(sc);
493 bfe_chip_reset(sc);
494 BFE_UNLOCK(sc);
495
496 error = mii_attach(dev, &sc->bfe_miibus, ifp, bfe_ifmedia_upd,
497 bfe_ifmedia_sts, BMSR_DEFCAPMASK, sc->bfe_phyaddr, MII_OFFSET_ANY,
498 0);
499 if (error != 0) {
500 device_printf(dev, "attaching PHYs failed\n");
501 goto fail;
502 }
503
504 ether_ifattach(ifp, sc->bfe_enaddr);
505
506 /*
507 * Tell the upper layer(s) we support long frames.
508 */
509 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
510 if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
511 if_setcapenablebit(ifp, IFCAP_VLAN_MTU, 0);
512
513 /*
514 * Hook interrupt last to avoid having to lock softc
515 */
516 error = bus_setup_intr(dev, sc->bfe_irq, INTR_TYPE_NET | INTR_MPSAFE,
517 NULL, bfe_intr, sc, &sc->bfe_intrhand);
518
519 if (error) {
520 device_printf(dev, "couldn't set up irq\n");
521 goto fail;
522 }
523 fail:
524 if (error != 0)
525 bfe_detach(dev);
526 return (error);
527 }
528
529 static int
bfe_detach(device_t dev)530 bfe_detach(device_t dev)
531 {
532 struct bfe_softc *sc;
533 if_t ifp;
534
535 sc = device_get_softc(dev);
536
537 ifp = sc->bfe_ifp;
538
539 if (device_is_attached(dev)) {
540 BFE_LOCK(sc);
541 sc->bfe_flags |= BFE_FLAG_DETACH;
542 bfe_stop(sc);
543 BFE_UNLOCK(sc);
544 callout_drain(&sc->bfe_stat_co);
545 if (ifp != NULL)
546 ether_ifdetach(ifp);
547 }
548
549 BFE_LOCK(sc);
550 bfe_chip_reset(sc);
551 BFE_UNLOCK(sc);
552
553 bus_generic_detach(dev);
554 if (sc->bfe_miibus != NULL)
555 device_delete_child(dev, sc->bfe_miibus);
556
557 bfe_release_resources(sc);
558 bfe_dma_free(sc);
559 mtx_destroy(&sc->bfe_mtx);
560
561 return (0);
562 }
563
564 /*
565 * Stop all chip I/O so that the kernel's probe routines don't
566 * get confused by errant DMAs when rebooting.
567 */
568 static int
bfe_shutdown(device_t dev)569 bfe_shutdown(device_t dev)
570 {
571 struct bfe_softc *sc;
572
573 sc = device_get_softc(dev);
574 BFE_LOCK(sc);
575 bfe_stop(sc);
576
577 BFE_UNLOCK(sc);
578
579 return (0);
580 }
581
582 static int
bfe_suspend(device_t dev)583 bfe_suspend(device_t dev)
584 {
585 struct bfe_softc *sc;
586
587 sc = device_get_softc(dev);
588 BFE_LOCK(sc);
589 bfe_stop(sc);
590 BFE_UNLOCK(sc);
591
592 return (0);
593 }
594
595 static int
bfe_resume(device_t dev)596 bfe_resume(device_t dev)
597 {
598 struct bfe_softc *sc;
599 if_t ifp;
600
601 sc = device_get_softc(dev);
602 ifp = sc->bfe_ifp;
603 BFE_LOCK(sc);
604 bfe_chip_reset(sc);
605 if (if_getflags(ifp) & IFF_UP) {
606 bfe_init_locked(sc);
607 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING &&
608 !if_sendq_empty(ifp))
609 bfe_start_locked(ifp);
610 }
611 BFE_UNLOCK(sc);
612
613 return (0);
614 }
615
616 static int
bfe_miibus_readreg(device_t dev,int phy,int reg)617 bfe_miibus_readreg(device_t dev, int phy, int reg)
618 {
619 struct bfe_softc *sc;
620 u_int32_t ret;
621
622 sc = device_get_softc(dev);
623 bfe_readphy(sc, reg, &ret);
624
625 return (ret);
626 }
627
628 static int
bfe_miibus_writereg(device_t dev,int phy,int reg,int val)629 bfe_miibus_writereg(device_t dev, int phy, int reg, int val)
630 {
631 struct bfe_softc *sc;
632
633 sc = device_get_softc(dev);
634 bfe_writephy(sc, reg, val);
635
636 return (0);
637 }
638
639 static void
bfe_miibus_statchg(device_t dev)640 bfe_miibus_statchg(device_t dev)
641 {
642 struct bfe_softc *sc;
643 struct mii_data *mii;
644 u_int32_t val;
645 #ifdef notyet
646 u_int32_t flow;
647 #endif
648
649 sc = device_get_softc(dev);
650 mii = device_get_softc(sc->bfe_miibus);
651
652 sc->bfe_flags &= ~BFE_FLAG_LINK;
653 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
654 (IFM_ACTIVE | IFM_AVALID)) {
655 switch (IFM_SUBTYPE(mii->mii_media_active)) {
656 case IFM_10_T:
657 case IFM_100_TX:
658 sc->bfe_flags |= BFE_FLAG_LINK;
659 break;
660 default:
661 break;
662 }
663 }
664
665 /* XXX Should stop Rx/Tx engine prior to touching MAC. */
666 val = CSR_READ_4(sc, BFE_TX_CTRL);
667 val &= ~BFE_TX_DUPLEX;
668 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
669 val |= BFE_TX_DUPLEX;
670 #ifdef notyet
671 flow = CSR_READ_4(sc, BFE_RXCONF);
672 flow &= ~BFE_RXCONF_FLOW;
673 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
674 IFM_ETH_RXPAUSE) != 0)
675 flow |= BFE_RXCONF_FLOW;
676 CSR_WRITE_4(sc, BFE_RXCONF, flow);
677 /*
678 * It seems that the hardware has Tx pause issues
679 * so enable only Rx pause.
680 */
681 flow = CSR_READ_4(sc, BFE_MAC_FLOW);
682 flow &= ~BFE_FLOW_PAUSE_ENAB;
683 CSR_WRITE_4(sc, BFE_MAC_FLOW, flow);
684 #endif
685 }
686 CSR_WRITE_4(sc, BFE_TX_CTRL, val);
687 }
688
689 static void
bfe_tx_ring_free(struct bfe_softc * sc)690 bfe_tx_ring_free(struct bfe_softc *sc)
691 {
692 int i;
693
694 for(i = 0; i < BFE_TX_LIST_CNT; i++) {
695 if (sc->bfe_tx_ring[i].bfe_mbuf != NULL) {
696 bus_dmamap_sync(sc->bfe_txmbuf_tag,
697 sc->bfe_tx_ring[i].bfe_map, BUS_DMASYNC_POSTWRITE);
698 bus_dmamap_unload(sc->bfe_txmbuf_tag,
699 sc->bfe_tx_ring[i].bfe_map);
700 m_freem(sc->bfe_tx_ring[i].bfe_mbuf);
701 sc->bfe_tx_ring[i].bfe_mbuf = NULL;
702 }
703 }
704 bzero(sc->bfe_tx_list, BFE_TX_LIST_SIZE);
705 bus_dmamap_sync(sc->bfe_tx_tag, sc->bfe_tx_map,
706 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
707 }
708
709 static void
bfe_rx_ring_free(struct bfe_softc * sc)710 bfe_rx_ring_free(struct bfe_softc *sc)
711 {
712 int i;
713
714 for (i = 0; i < BFE_RX_LIST_CNT; i++) {
715 if (sc->bfe_rx_ring[i].bfe_mbuf != NULL) {
716 bus_dmamap_sync(sc->bfe_rxmbuf_tag,
717 sc->bfe_rx_ring[i].bfe_map, BUS_DMASYNC_POSTREAD);
718 bus_dmamap_unload(sc->bfe_rxmbuf_tag,
719 sc->bfe_rx_ring[i].bfe_map);
720 m_freem(sc->bfe_rx_ring[i].bfe_mbuf);
721 sc->bfe_rx_ring[i].bfe_mbuf = NULL;
722 }
723 }
724 bzero(sc->bfe_rx_list, BFE_RX_LIST_SIZE);
725 bus_dmamap_sync(sc->bfe_rx_tag, sc->bfe_rx_map,
726 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
727 }
728
729 static int
bfe_list_rx_init(struct bfe_softc * sc)730 bfe_list_rx_init(struct bfe_softc *sc)
731 {
732 struct bfe_rx_data *rd;
733 int i;
734
735 sc->bfe_rx_prod = sc->bfe_rx_cons = 0;
736 bzero(sc->bfe_rx_list, BFE_RX_LIST_SIZE);
737 for (i = 0; i < BFE_RX_LIST_CNT; i++) {
738 rd = &sc->bfe_rx_ring[i];
739 rd->bfe_mbuf = NULL;
740 rd->bfe_ctrl = 0;
741 if (bfe_list_newbuf(sc, i) != 0)
742 return (ENOBUFS);
743 }
744
745 bus_dmamap_sync(sc->bfe_rx_tag, sc->bfe_rx_map,
746 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
747 CSR_WRITE_4(sc, BFE_DMARX_PTR, (i * sizeof(struct bfe_desc)));
748
749 return (0);
750 }
751
752 static void
bfe_list_tx_init(struct bfe_softc * sc)753 bfe_list_tx_init(struct bfe_softc *sc)
754 {
755 int i;
756
757 sc->bfe_tx_cnt = sc->bfe_tx_prod = sc->bfe_tx_cons = 0;
758 bzero(sc->bfe_tx_list, BFE_TX_LIST_SIZE);
759 for (i = 0; i < BFE_TX_LIST_CNT; i++)
760 sc->bfe_tx_ring[i].bfe_mbuf = NULL;
761
762 bus_dmamap_sync(sc->bfe_tx_tag, sc->bfe_tx_map,
763 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
764 }
765
766 static void
bfe_discard_buf(struct bfe_softc * sc,int c)767 bfe_discard_buf(struct bfe_softc *sc, int c)
768 {
769 struct bfe_rx_data *r;
770 struct bfe_desc *d;
771
772 r = &sc->bfe_rx_ring[c];
773 d = &sc->bfe_rx_list[c];
774 d->bfe_ctrl = htole32(r->bfe_ctrl);
775 }
776
777 static int
bfe_list_newbuf(struct bfe_softc * sc,int c)778 bfe_list_newbuf(struct bfe_softc *sc, int c)
779 {
780 struct bfe_rxheader *rx_header;
781 struct bfe_desc *d;
782 struct bfe_rx_data *r;
783 struct mbuf *m;
784 bus_dma_segment_t segs[1];
785 bus_dmamap_t map;
786 u_int32_t ctrl;
787 int nsegs;
788
789 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
790 if (m == NULL)
791 return (ENOBUFS);
792 m->m_len = m->m_pkthdr.len = MCLBYTES;
793
794 if (bus_dmamap_load_mbuf_sg(sc->bfe_rxmbuf_tag, sc->bfe_rx_sparemap,
795 m, segs, &nsegs, 0) != 0) {
796 m_freem(m);
797 return (ENOBUFS);
798 }
799
800 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
801 r = &sc->bfe_rx_ring[c];
802 if (r->bfe_mbuf != NULL) {
803 bus_dmamap_sync(sc->bfe_rxmbuf_tag, r->bfe_map,
804 BUS_DMASYNC_POSTREAD);
805 bus_dmamap_unload(sc->bfe_rxmbuf_tag, r->bfe_map);
806 }
807 map = r->bfe_map;
808 r->bfe_map = sc->bfe_rx_sparemap;
809 sc->bfe_rx_sparemap = map;
810 r->bfe_mbuf = m;
811
812 rx_header = mtod(m, struct bfe_rxheader *);
813 rx_header->len = 0;
814 rx_header->flags = 0;
815 bus_dmamap_sync(sc->bfe_rxmbuf_tag, r->bfe_map, BUS_DMASYNC_PREREAD);
816
817 ctrl = segs[0].ds_len & BFE_DESC_LEN;
818 KASSERT(ctrl > ETHER_MAX_LEN + 32, ("%s: buffer size too small(%d)!",
819 __func__, ctrl));
820 if (c == BFE_RX_LIST_CNT - 1)
821 ctrl |= BFE_DESC_EOT;
822 r->bfe_ctrl = ctrl;
823
824 d = &sc->bfe_rx_list[c];
825 d->bfe_ctrl = htole32(ctrl);
826 /* The chip needs all addresses to be added to BFE_PCI_DMA. */
827 d->bfe_addr = htole32(BFE_ADDR_LO(segs[0].ds_addr) + BFE_PCI_DMA);
828
829 return (0);
830 }
831
832 static void
bfe_get_config(struct bfe_softc * sc)833 bfe_get_config(struct bfe_softc *sc)
834 {
835 u_int8_t eeprom[128];
836
837 bfe_read_eeprom(sc, eeprom);
838
839 sc->bfe_enaddr[0] = eeprom[79];
840 sc->bfe_enaddr[1] = eeprom[78];
841 sc->bfe_enaddr[2] = eeprom[81];
842 sc->bfe_enaddr[3] = eeprom[80];
843 sc->bfe_enaddr[4] = eeprom[83];
844 sc->bfe_enaddr[5] = eeprom[82];
845
846 sc->bfe_phyaddr = eeprom[90] & 0x1f;
847 sc->bfe_mdc_port = (eeprom[90] >> 14) & 0x1;
848
849 sc->bfe_core_unit = 0;
850 sc->bfe_dma_offset = BFE_PCI_DMA;
851 }
852
853 static void
bfe_pci_setup(struct bfe_softc * sc,u_int32_t cores)854 bfe_pci_setup(struct bfe_softc *sc, u_int32_t cores)
855 {
856 u_int32_t bar_orig, val;
857
858 bar_orig = pci_read_config(sc->bfe_dev, BFE_BAR0_WIN, 4);
859 pci_write_config(sc->bfe_dev, BFE_BAR0_WIN, BFE_REG_PCI, 4);
860
861 val = CSR_READ_4(sc, BFE_SBINTVEC);
862 val |= cores;
863 CSR_WRITE_4(sc, BFE_SBINTVEC, val);
864
865 val = CSR_READ_4(sc, BFE_SSB_PCI_TRANS_2);
866 val |= BFE_SSB_PCI_PREF | BFE_SSB_PCI_BURST;
867 CSR_WRITE_4(sc, BFE_SSB_PCI_TRANS_2, val);
868
869 pci_write_config(sc->bfe_dev, BFE_BAR0_WIN, bar_orig, 4);
870 }
871
872 static void
bfe_clear_stats(struct bfe_softc * sc)873 bfe_clear_stats(struct bfe_softc *sc)
874 {
875 uint32_t reg;
876
877 BFE_LOCK_ASSERT(sc);
878
879 CSR_WRITE_4(sc, BFE_MIB_CTRL, BFE_MIB_CLR_ON_READ);
880 for (reg = BFE_TX_GOOD_O; reg <= BFE_TX_PAUSE; reg += 4)
881 CSR_READ_4(sc, reg);
882 for (reg = BFE_RX_GOOD_O; reg <= BFE_RX_NPAUSE; reg += 4)
883 CSR_READ_4(sc, reg);
884 }
885
886 static int
bfe_resetphy(struct bfe_softc * sc)887 bfe_resetphy(struct bfe_softc *sc)
888 {
889 u_int32_t val;
890
891 bfe_writephy(sc, 0, BMCR_RESET);
892 DELAY(100);
893 bfe_readphy(sc, 0, &val);
894 if (val & BMCR_RESET) {
895 device_printf(sc->bfe_dev, "PHY Reset would not complete.\n");
896 return (ENXIO);
897 }
898 return (0);
899 }
900
901 static void
bfe_chip_halt(struct bfe_softc * sc)902 bfe_chip_halt(struct bfe_softc *sc)
903 {
904 BFE_LOCK_ASSERT(sc);
905 /* disable interrupts - not that it actually does..*/
906 CSR_WRITE_4(sc, BFE_IMASK, 0);
907 CSR_READ_4(sc, BFE_IMASK);
908
909 CSR_WRITE_4(sc, BFE_ENET_CTRL, BFE_ENET_DISABLE);
910 bfe_wait_bit(sc, BFE_ENET_CTRL, BFE_ENET_DISABLE, 200, 1);
911
912 CSR_WRITE_4(sc, BFE_DMARX_CTRL, 0);
913 CSR_WRITE_4(sc, BFE_DMATX_CTRL, 0);
914 DELAY(10);
915 }
916
917 static void
bfe_chip_reset(struct bfe_softc * sc)918 bfe_chip_reset(struct bfe_softc *sc)
919 {
920 u_int32_t val;
921
922 BFE_LOCK_ASSERT(sc);
923
924 /* Set the interrupt vector for the enet core */
925 bfe_pci_setup(sc, BFE_INTVEC_ENET0);
926
927 /* is core up? */
928 val = CSR_READ_4(sc, BFE_SBTMSLOW) &
929 (BFE_RESET | BFE_REJECT | BFE_CLOCK);
930 if (val == BFE_CLOCK) {
931 /* It is, so shut it down */
932 CSR_WRITE_4(sc, BFE_RCV_LAZY, 0);
933 CSR_WRITE_4(sc, BFE_ENET_CTRL, BFE_ENET_DISABLE);
934 bfe_wait_bit(sc, BFE_ENET_CTRL, BFE_ENET_DISABLE, 100, 1);
935 CSR_WRITE_4(sc, BFE_DMATX_CTRL, 0);
936 if (CSR_READ_4(sc, BFE_DMARX_STAT) & BFE_STAT_EMASK)
937 bfe_wait_bit(sc, BFE_DMARX_STAT, BFE_STAT_SIDLE,
938 100, 0);
939 CSR_WRITE_4(sc, BFE_DMARX_CTRL, 0);
940 }
941
942 bfe_core_reset(sc);
943 bfe_clear_stats(sc);
944
945 /*
946 * We want the phy registers to be accessible even when
947 * the driver is "downed" so initialize MDC preamble, frequency,
948 * and whether internal or external phy here.
949 */
950
951 /* 4402 has 62.5Mhz SB clock and internal phy */
952 CSR_WRITE_4(sc, BFE_MDIO_CTRL, 0x8d);
953
954 /* Internal or external PHY? */
955 val = CSR_READ_4(sc, BFE_DEVCTRL);
956 if (!(val & BFE_IPP))
957 CSR_WRITE_4(sc, BFE_ENET_CTRL, BFE_ENET_EPSEL);
958 else if (CSR_READ_4(sc, BFE_DEVCTRL) & BFE_EPR) {
959 BFE_AND(sc, BFE_DEVCTRL, ~BFE_EPR);
960 DELAY(100);
961 }
962
963 /* Enable CRC32 generation and set proper LED modes */
964 BFE_OR(sc, BFE_MAC_CTRL, BFE_CTRL_CRC32_ENAB | BFE_CTRL_LED);
965
966 /* Reset or clear powerdown control bit */
967 BFE_AND(sc, BFE_MAC_CTRL, ~BFE_CTRL_PDOWN);
968
969 CSR_WRITE_4(sc, BFE_RCV_LAZY, ((1 << BFE_LAZY_FC_SHIFT) &
970 BFE_LAZY_FC_MASK));
971
972 /*
973 * We don't want lazy interrupts, so just send them at
974 * the end of a frame, please
975 */
976 BFE_OR(sc, BFE_RCV_LAZY, 0);
977
978 /* Set max lengths, accounting for VLAN tags */
979 CSR_WRITE_4(sc, BFE_RXMAXLEN, ETHER_MAX_LEN+32);
980 CSR_WRITE_4(sc, BFE_TXMAXLEN, ETHER_MAX_LEN+32);
981
982 /* Set watermark XXX - magic */
983 CSR_WRITE_4(sc, BFE_TX_WMARK, 56);
984
985 /*
986 * Initialise DMA channels
987 * - not forgetting dma addresses need to be added to BFE_PCI_DMA
988 */
989 CSR_WRITE_4(sc, BFE_DMATX_CTRL, BFE_TX_CTRL_ENABLE);
990 CSR_WRITE_4(sc, BFE_DMATX_ADDR, sc->bfe_tx_dma + BFE_PCI_DMA);
991
992 CSR_WRITE_4(sc, BFE_DMARX_CTRL, (BFE_RX_OFFSET << BFE_RX_CTRL_ROSHIFT) |
993 BFE_RX_CTRL_ENABLE);
994 CSR_WRITE_4(sc, BFE_DMARX_ADDR, sc->bfe_rx_dma + BFE_PCI_DMA);
995
996 bfe_resetphy(sc);
997 bfe_setupphy(sc);
998 }
999
1000 static void
bfe_core_disable(struct bfe_softc * sc)1001 bfe_core_disable(struct bfe_softc *sc)
1002 {
1003 if ((CSR_READ_4(sc, BFE_SBTMSLOW)) & BFE_RESET)
1004 return;
1005
1006 /*
1007 * Set reject, wait for it set, then wait for the core to stop
1008 * being busy, then set reset and reject and enable the clocks.
1009 */
1010 CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_REJECT | BFE_CLOCK));
1011 bfe_wait_bit(sc, BFE_SBTMSLOW, BFE_REJECT, 1000, 0);
1012 bfe_wait_bit(sc, BFE_SBTMSHIGH, BFE_BUSY, 1000, 1);
1013 CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_FGC | BFE_CLOCK | BFE_REJECT |
1014 BFE_RESET));
1015 CSR_READ_4(sc, BFE_SBTMSLOW);
1016 DELAY(10);
1017 /* Leave reset and reject set */
1018 CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_REJECT | BFE_RESET));
1019 DELAY(10);
1020 }
1021
1022 static void
bfe_core_reset(struct bfe_softc * sc)1023 bfe_core_reset(struct bfe_softc *sc)
1024 {
1025 u_int32_t val;
1026
1027 /* Disable the core */
1028 bfe_core_disable(sc);
1029
1030 /* and bring it back up */
1031 CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_RESET | BFE_CLOCK | BFE_FGC));
1032 CSR_READ_4(sc, BFE_SBTMSLOW);
1033 DELAY(10);
1034
1035 /* Chip bug, clear SERR, IB and TO if they are set. */
1036 if (CSR_READ_4(sc, BFE_SBTMSHIGH) & BFE_SERR)
1037 CSR_WRITE_4(sc, BFE_SBTMSHIGH, 0);
1038 val = CSR_READ_4(sc, BFE_SBIMSTATE);
1039 if (val & (BFE_IBE | BFE_TO))
1040 CSR_WRITE_4(sc, BFE_SBIMSTATE, val & ~(BFE_IBE | BFE_TO));
1041
1042 /* Clear reset and allow it to move through the core */
1043 CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_CLOCK | BFE_FGC));
1044 CSR_READ_4(sc, BFE_SBTMSLOW);
1045 DELAY(10);
1046
1047 /* Leave the clock set */
1048 CSR_WRITE_4(sc, BFE_SBTMSLOW, BFE_CLOCK);
1049 CSR_READ_4(sc, BFE_SBTMSLOW);
1050 DELAY(10);
1051 }
1052
1053 static void
bfe_cam_write(struct bfe_softc * sc,u_char * data,int index)1054 bfe_cam_write(struct bfe_softc *sc, u_char *data, int index)
1055 {
1056 u_int32_t val;
1057
1058 val = ((u_int32_t) data[2]) << 24;
1059 val |= ((u_int32_t) data[3]) << 16;
1060 val |= ((u_int32_t) data[4]) << 8;
1061 val |= ((u_int32_t) data[5]);
1062 CSR_WRITE_4(sc, BFE_CAM_DATA_LO, val);
1063 val = (BFE_CAM_HI_VALID |
1064 (((u_int32_t) data[0]) << 8) |
1065 (((u_int32_t) data[1])));
1066 CSR_WRITE_4(sc, BFE_CAM_DATA_HI, val);
1067 CSR_WRITE_4(sc, BFE_CAM_CTRL, (BFE_CAM_WRITE |
1068 ((u_int32_t) index << BFE_CAM_INDEX_SHIFT)));
1069 bfe_wait_bit(sc, BFE_CAM_CTRL, BFE_CAM_BUSY, 10000, 1);
1070 }
1071
1072 static u_int
bfe_write_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)1073 bfe_write_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
1074 {
1075 struct bfe_softc *sc = arg;
1076
1077 bfe_cam_write(sc, LLADDR(sdl), cnt + 1);
1078
1079 return (1);
1080 }
1081
1082 static void
bfe_set_rx_mode(struct bfe_softc * sc)1083 bfe_set_rx_mode(struct bfe_softc *sc)
1084 {
1085 if_t ifp = sc->bfe_ifp;
1086 u_int32_t val;
1087
1088 BFE_LOCK_ASSERT(sc);
1089
1090 val = CSR_READ_4(sc, BFE_RXCONF);
1091
1092 if (if_getflags(ifp) & IFF_PROMISC)
1093 val |= BFE_RXCONF_PROMISC;
1094 else
1095 val &= ~BFE_RXCONF_PROMISC;
1096
1097 if (if_getflags(ifp) & IFF_BROADCAST)
1098 val &= ~BFE_RXCONF_DBCAST;
1099 else
1100 val |= BFE_RXCONF_DBCAST;
1101
1102 CSR_WRITE_4(sc, BFE_CAM_CTRL, 0);
1103 bfe_cam_write(sc, if_getlladdr(sc->bfe_ifp), 0);
1104
1105 if (if_getflags(ifp) & IFF_ALLMULTI)
1106 val |= BFE_RXCONF_ALLMULTI;
1107 else {
1108 val &= ~BFE_RXCONF_ALLMULTI;
1109 if_foreach_llmaddr(ifp, bfe_write_maddr, sc);
1110 }
1111
1112 CSR_WRITE_4(sc, BFE_RXCONF, val);
1113 BFE_OR(sc, BFE_CAM_CTRL, BFE_CAM_ENABLE);
1114 }
1115
1116 static void
bfe_dma_map(void * arg,bus_dma_segment_t * segs,int nseg,int error)1117 bfe_dma_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1118 {
1119 struct bfe_dmamap_arg *ctx;
1120
1121 if (error != 0)
1122 return;
1123
1124 KASSERT(nseg == 1, ("%s : %d segments returned!", __func__, nseg));
1125
1126 ctx = (struct bfe_dmamap_arg *)arg;
1127 ctx->bfe_busaddr = segs[0].ds_addr;
1128 }
1129
1130 static void
bfe_release_resources(struct bfe_softc * sc)1131 bfe_release_resources(struct bfe_softc *sc)
1132 {
1133
1134 if (sc->bfe_intrhand != NULL)
1135 bus_teardown_intr(sc->bfe_dev, sc->bfe_irq, sc->bfe_intrhand);
1136
1137 if (sc->bfe_irq != NULL)
1138 bus_release_resource(sc->bfe_dev, SYS_RES_IRQ, 0, sc->bfe_irq);
1139
1140 if (sc->bfe_res != NULL)
1141 bus_release_resource(sc->bfe_dev, SYS_RES_MEMORY, PCIR_BAR(0),
1142 sc->bfe_res);
1143
1144 if (sc->bfe_ifp != NULL)
1145 if_free(sc->bfe_ifp);
1146 }
1147
1148 static void
bfe_read_eeprom(struct bfe_softc * sc,u_int8_t * data)1149 bfe_read_eeprom(struct bfe_softc *sc, u_int8_t *data)
1150 {
1151 long i;
1152 u_int16_t *ptr = (u_int16_t *)data;
1153
1154 for(i = 0; i < 128; i += 2)
1155 ptr[i/2] = CSR_READ_4(sc, 4096 + i);
1156 }
1157
1158 static int
bfe_wait_bit(struct bfe_softc * sc,u_int32_t reg,u_int32_t bit,u_long timeout,const int clear)1159 bfe_wait_bit(struct bfe_softc *sc, u_int32_t reg, u_int32_t bit,
1160 u_long timeout, const int clear)
1161 {
1162 u_long i;
1163
1164 for (i = 0; i < timeout; i++) {
1165 u_int32_t val = CSR_READ_4(sc, reg);
1166
1167 if (clear && !(val & bit))
1168 break;
1169 if (!clear && (val & bit))
1170 break;
1171 DELAY(10);
1172 }
1173 if (i == timeout) {
1174 device_printf(sc->bfe_dev,
1175 "BUG! Timeout waiting for bit %08x of register "
1176 "%x to %s.\n", bit, reg, (clear ? "clear" : "set"));
1177 return (-1);
1178 }
1179 return (0);
1180 }
1181
1182 static int
bfe_readphy(struct bfe_softc * sc,u_int32_t reg,u_int32_t * val)1183 bfe_readphy(struct bfe_softc *sc, u_int32_t reg, u_int32_t *val)
1184 {
1185 int err;
1186
1187 /* Clear MII ISR */
1188 CSR_WRITE_4(sc, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII);
1189 CSR_WRITE_4(sc, BFE_MDIO_DATA, (BFE_MDIO_SB_START |
1190 (BFE_MDIO_OP_READ << BFE_MDIO_OP_SHIFT) |
1191 (sc->bfe_phyaddr << BFE_MDIO_PMD_SHIFT) |
1192 (reg << BFE_MDIO_RA_SHIFT) |
1193 (BFE_MDIO_TA_VALID << BFE_MDIO_TA_SHIFT)));
1194 err = bfe_wait_bit(sc, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII, 100, 0);
1195 *val = CSR_READ_4(sc, BFE_MDIO_DATA) & BFE_MDIO_DATA_DATA;
1196
1197 return (err);
1198 }
1199
1200 static int
bfe_writephy(struct bfe_softc * sc,u_int32_t reg,u_int32_t val)1201 bfe_writephy(struct bfe_softc *sc, u_int32_t reg, u_int32_t val)
1202 {
1203 int status;
1204
1205 CSR_WRITE_4(sc, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII);
1206 CSR_WRITE_4(sc, BFE_MDIO_DATA, (BFE_MDIO_SB_START |
1207 (BFE_MDIO_OP_WRITE << BFE_MDIO_OP_SHIFT) |
1208 (sc->bfe_phyaddr << BFE_MDIO_PMD_SHIFT) |
1209 (reg << BFE_MDIO_RA_SHIFT) |
1210 (BFE_MDIO_TA_VALID << BFE_MDIO_TA_SHIFT) |
1211 (val & BFE_MDIO_DATA_DATA)));
1212 status = bfe_wait_bit(sc, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII, 100, 0);
1213
1214 return (status);
1215 }
1216
1217 /*
1218 * XXX - I think this is handled by the PHY driver, but it can't hurt to do it
1219 * twice
1220 */
1221 static int
bfe_setupphy(struct bfe_softc * sc)1222 bfe_setupphy(struct bfe_softc *sc)
1223 {
1224 u_int32_t val;
1225
1226 /* Enable activity LED */
1227 bfe_readphy(sc, 26, &val);
1228 bfe_writephy(sc, 26, val & 0x7fff);
1229 bfe_readphy(sc, 26, &val);
1230
1231 /* Enable traffic meter LED mode */
1232 bfe_readphy(sc, 27, &val);
1233 bfe_writephy(sc, 27, val | (1 << 6));
1234
1235 return (0);
1236 }
1237
1238 static void
bfe_stats_update(struct bfe_softc * sc)1239 bfe_stats_update(struct bfe_softc *sc)
1240 {
1241 struct bfe_hw_stats *stats;
1242 if_t ifp;
1243 uint32_t mib[BFE_MIB_CNT];
1244 uint32_t reg, *val;
1245
1246 BFE_LOCK_ASSERT(sc);
1247
1248 val = mib;
1249 CSR_WRITE_4(sc, BFE_MIB_CTRL, BFE_MIB_CLR_ON_READ);
1250 for (reg = BFE_TX_GOOD_O; reg <= BFE_TX_PAUSE; reg += 4)
1251 *val++ = CSR_READ_4(sc, reg);
1252 for (reg = BFE_RX_GOOD_O; reg <= BFE_RX_NPAUSE; reg += 4)
1253 *val++ = CSR_READ_4(sc, reg);
1254
1255 ifp = sc->bfe_ifp;
1256 stats = &sc->bfe_stats;
1257 /* Tx stat. */
1258 stats->tx_good_octets += mib[MIB_TX_GOOD_O];
1259 stats->tx_good_frames += mib[MIB_TX_GOOD_P];
1260 stats->tx_octets += mib[MIB_TX_O];
1261 stats->tx_frames += mib[MIB_TX_P];
1262 stats->tx_bcast_frames += mib[MIB_TX_BCAST];
1263 stats->tx_mcast_frames += mib[MIB_TX_MCAST];
1264 stats->tx_pkts_64 += mib[MIB_TX_64];
1265 stats->tx_pkts_65_127 += mib[MIB_TX_65_127];
1266 stats->tx_pkts_128_255 += mib[MIB_TX_128_255];
1267 stats->tx_pkts_256_511 += mib[MIB_TX_256_511];
1268 stats->tx_pkts_512_1023 += mib[MIB_TX_512_1023];
1269 stats->tx_pkts_1024_max += mib[MIB_TX_1024_MAX];
1270 stats->tx_jabbers += mib[MIB_TX_JABBER];
1271 stats->tx_oversize_frames += mib[MIB_TX_OSIZE];
1272 stats->tx_frag_frames += mib[MIB_TX_FRAG];
1273 stats->tx_underruns += mib[MIB_TX_URUNS];
1274 stats->tx_colls += mib[MIB_TX_TCOLS];
1275 stats->tx_single_colls += mib[MIB_TX_SCOLS];
1276 stats->tx_multi_colls += mib[MIB_TX_MCOLS];
1277 stats->tx_excess_colls += mib[MIB_TX_ECOLS];
1278 stats->tx_late_colls += mib[MIB_TX_LCOLS];
1279 stats->tx_deferrals += mib[MIB_TX_DEFERED];
1280 stats->tx_carrier_losts += mib[MIB_TX_CLOST];
1281 stats->tx_pause_frames += mib[MIB_TX_PAUSE];
1282 /* Rx stat. */
1283 stats->rx_good_octets += mib[MIB_RX_GOOD_O];
1284 stats->rx_good_frames += mib[MIB_RX_GOOD_P];
1285 stats->rx_octets += mib[MIB_RX_O];
1286 stats->rx_frames += mib[MIB_RX_P];
1287 stats->rx_bcast_frames += mib[MIB_RX_BCAST];
1288 stats->rx_mcast_frames += mib[MIB_RX_MCAST];
1289 stats->rx_pkts_64 += mib[MIB_RX_64];
1290 stats->rx_pkts_65_127 += mib[MIB_RX_65_127];
1291 stats->rx_pkts_128_255 += mib[MIB_RX_128_255];
1292 stats->rx_pkts_256_511 += mib[MIB_RX_256_511];
1293 stats->rx_pkts_512_1023 += mib[MIB_RX_512_1023];
1294 stats->rx_pkts_1024_max += mib[MIB_RX_1024_MAX];
1295 stats->rx_jabbers += mib[MIB_RX_JABBER];
1296 stats->rx_oversize_frames += mib[MIB_RX_OSIZE];
1297 stats->rx_frag_frames += mib[MIB_RX_FRAG];
1298 stats->rx_missed_frames += mib[MIB_RX_MISS];
1299 stats->rx_crc_align_errs += mib[MIB_RX_CRCA];
1300 stats->rx_runts += mib[MIB_RX_USIZE];
1301 stats->rx_crc_errs += mib[MIB_RX_CRC];
1302 stats->rx_align_errs += mib[MIB_RX_ALIGN];
1303 stats->rx_symbol_errs += mib[MIB_RX_SYM];
1304 stats->rx_pause_frames += mib[MIB_RX_PAUSE];
1305 stats->rx_control_frames += mib[MIB_RX_NPAUSE];
1306
1307 /* Update counters in ifnet. */
1308 if_inc_counter(ifp, IFCOUNTER_OPACKETS, (u_long)mib[MIB_TX_GOOD_P]);
1309 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (u_long)mib[MIB_TX_TCOLS]);
1310 if_inc_counter(ifp, IFCOUNTER_OERRORS, (u_long)mib[MIB_TX_URUNS] +
1311 (u_long)mib[MIB_TX_ECOLS] +
1312 (u_long)mib[MIB_TX_DEFERED] +
1313 (u_long)mib[MIB_TX_CLOST]);
1314
1315 if_inc_counter(ifp, IFCOUNTER_IPACKETS, (u_long)mib[MIB_RX_GOOD_P]);
1316
1317 if_inc_counter(ifp, IFCOUNTER_IERRORS, mib[MIB_RX_JABBER] +
1318 mib[MIB_RX_MISS] +
1319 mib[MIB_RX_CRCA] +
1320 mib[MIB_RX_USIZE] +
1321 mib[MIB_RX_CRC] +
1322 mib[MIB_RX_ALIGN] +
1323 mib[MIB_RX_SYM]);
1324 }
1325
1326 static void
bfe_txeof(struct bfe_softc * sc)1327 bfe_txeof(struct bfe_softc *sc)
1328 {
1329 struct bfe_tx_data *r;
1330 if_t ifp;
1331 int i, chipidx;
1332
1333 BFE_LOCK_ASSERT(sc);
1334
1335 ifp = sc->bfe_ifp;
1336
1337 chipidx = CSR_READ_4(sc, BFE_DMATX_STAT) & BFE_STAT_CDMASK;
1338 chipidx /= sizeof(struct bfe_desc);
1339
1340 i = sc->bfe_tx_cons;
1341 if (i == chipidx)
1342 return;
1343 bus_dmamap_sync(sc->bfe_tx_tag, sc->bfe_tx_map,
1344 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1345 /* Go through the mbufs and free those that have been transmitted */
1346 for (; i != chipidx; BFE_INC(i, BFE_TX_LIST_CNT)) {
1347 r = &sc->bfe_tx_ring[i];
1348 sc->bfe_tx_cnt--;
1349 if (r->bfe_mbuf == NULL)
1350 continue;
1351 bus_dmamap_sync(sc->bfe_txmbuf_tag, r->bfe_map,
1352 BUS_DMASYNC_POSTWRITE);
1353 bus_dmamap_unload(sc->bfe_txmbuf_tag, r->bfe_map);
1354
1355 m_freem(r->bfe_mbuf);
1356 r->bfe_mbuf = NULL;
1357 }
1358
1359 if (i != sc->bfe_tx_cons) {
1360 /* we freed up some mbufs */
1361 sc->bfe_tx_cons = i;
1362 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1363 }
1364
1365 if (sc->bfe_tx_cnt == 0)
1366 sc->bfe_watchdog_timer = 0;
1367 }
1368
1369 /* Pass a received packet up the stack */
1370 static void
bfe_rxeof(struct bfe_softc * sc)1371 bfe_rxeof(struct bfe_softc *sc)
1372 {
1373 struct mbuf *m;
1374 if_t ifp;
1375 struct bfe_rxheader *rxheader;
1376 struct bfe_rx_data *r;
1377 int cons, prog;
1378 u_int32_t status, current, len, flags;
1379
1380 BFE_LOCK_ASSERT(sc);
1381 cons = sc->bfe_rx_cons;
1382 status = CSR_READ_4(sc, BFE_DMARX_STAT);
1383 current = (status & BFE_STAT_CDMASK) / sizeof(struct bfe_desc);
1384
1385 ifp = sc->bfe_ifp;
1386
1387 bus_dmamap_sync(sc->bfe_rx_tag, sc->bfe_rx_map,
1388 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1389
1390 for (prog = 0; current != cons; prog++,
1391 BFE_INC(cons, BFE_RX_LIST_CNT)) {
1392 r = &sc->bfe_rx_ring[cons];
1393 m = r->bfe_mbuf;
1394 /*
1395 * Rx status should be read from mbuf such that we can't
1396 * delay bus_dmamap_sync(9). This hardware limiation
1397 * results in inefficient mbuf usage as bfe(4) couldn't
1398 * reuse mapped buffer from errored frame.
1399 */
1400 if (bfe_list_newbuf(sc, cons) != 0) {
1401 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1402 bfe_discard_buf(sc, cons);
1403 continue;
1404 }
1405 rxheader = mtod(m, struct bfe_rxheader*);
1406 len = le16toh(rxheader->len);
1407 flags = le16toh(rxheader->flags);
1408
1409 /* Remove CRC bytes. */
1410 len -= ETHER_CRC_LEN;
1411
1412 /* flag an error and try again */
1413 if ((len > ETHER_MAX_LEN+32) || (flags & BFE_RX_FLAG_ERRORS)) {
1414 m_freem(m);
1415 continue;
1416 }
1417
1418 /* Make sure to skip header bytes written by hardware. */
1419 m_adj(m, BFE_RX_OFFSET);
1420 m->m_len = m->m_pkthdr.len = len;
1421
1422 m->m_pkthdr.rcvif = ifp;
1423 BFE_UNLOCK(sc);
1424 if_input(ifp, m);
1425 BFE_LOCK(sc);
1426 }
1427
1428 if (prog > 0) {
1429 sc->bfe_rx_cons = cons;
1430 bus_dmamap_sync(sc->bfe_rx_tag, sc->bfe_rx_map,
1431 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1432 }
1433 }
1434
1435 static void
bfe_intr(void * xsc)1436 bfe_intr(void *xsc)
1437 {
1438 struct bfe_softc *sc = xsc;
1439 if_t ifp;
1440 u_int32_t istat;
1441
1442 ifp = sc->bfe_ifp;
1443
1444 BFE_LOCK(sc);
1445
1446 istat = CSR_READ_4(sc, BFE_ISTAT);
1447
1448 /*
1449 * Defer unsolicited interrupts - This is necessary because setting the
1450 * chips interrupt mask register to 0 doesn't actually stop the
1451 * interrupts
1452 */
1453 istat &= BFE_IMASK_DEF;
1454 CSR_WRITE_4(sc, BFE_ISTAT, istat);
1455 CSR_READ_4(sc, BFE_ISTAT);
1456
1457 /* not expecting this interrupt, disregard it */
1458 if (istat == 0 || (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
1459 BFE_UNLOCK(sc);
1460 return;
1461 }
1462
1463 /* A packet was received */
1464 if (istat & BFE_ISTAT_RX)
1465 bfe_rxeof(sc);
1466
1467 /* A packet was sent */
1468 if (istat & BFE_ISTAT_TX)
1469 bfe_txeof(sc);
1470
1471 if (istat & BFE_ISTAT_ERRORS) {
1472 if (istat & BFE_ISTAT_DSCE) {
1473 device_printf(sc->bfe_dev, "Descriptor Error\n");
1474 bfe_stop(sc);
1475 BFE_UNLOCK(sc);
1476 return;
1477 }
1478
1479 if (istat & BFE_ISTAT_DPE) {
1480 device_printf(sc->bfe_dev,
1481 "Descriptor Protocol Error\n");
1482 bfe_stop(sc);
1483 BFE_UNLOCK(sc);
1484 return;
1485 }
1486 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1487 bfe_init_locked(sc);
1488 }
1489
1490 /* We have packets pending, fire them out */
1491 if (!if_sendq_empty(ifp))
1492 bfe_start_locked(ifp);
1493
1494 BFE_UNLOCK(sc);
1495 }
1496
1497 static int
bfe_encap(struct bfe_softc * sc,struct mbuf ** m_head)1498 bfe_encap(struct bfe_softc *sc, struct mbuf **m_head)
1499 {
1500 struct bfe_desc *d;
1501 struct bfe_tx_data *r, *r1;
1502 struct mbuf *m;
1503 bus_dmamap_t map;
1504 bus_dma_segment_t txsegs[BFE_MAXTXSEGS];
1505 uint32_t cur, si;
1506 int error, i, nsegs;
1507
1508 BFE_LOCK_ASSERT(sc);
1509
1510 M_ASSERTPKTHDR((*m_head));
1511
1512 si = cur = sc->bfe_tx_prod;
1513 r = &sc->bfe_tx_ring[cur];
1514 error = bus_dmamap_load_mbuf_sg(sc->bfe_txmbuf_tag, r->bfe_map, *m_head,
1515 txsegs, &nsegs, 0);
1516 if (error == EFBIG) {
1517 m = m_collapse(*m_head, M_NOWAIT, BFE_MAXTXSEGS);
1518 if (m == NULL) {
1519 m_freem(*m_head);
1520 *m_head = NULL;
1521 return (ENOMEM);
1522 }
1523 *m_head = m;
1524 error = bus_dmamap_load_mbuf_sg(sc->bfe_txmbuf_tag, r->bfe_map,
1525 *m_head, txsegs, &nsegs, 0);
1526 if (error != 0) {
1527 m_freem(*m_head);
1528 *m_head = NULL;
1529 return (error);
1530 }
1531 } else if (error != 0)
1532 return (error);
1533 if (nsegs == 0) {
1534 m_freem(*m_head);
1535 *m_head = NULL;
1536 return (EIO);
1537 }
1538
1539 if (sc->bfe_tx_cnt + nsegs > BFE_TX_LIST_CNT - 1) {
1540 bus_dmamap_unload(sc->bfe_txmbuf_tag, r->bfe_map);
1541 return (ENOBUFS);
1542 }
1543
1544 for (i = 0; i < nsegs; i++) {
1545 d = &sc->bfe_tx_list[cur];
1546 d->bfe_ctrl = htole32(txsegs[i].ds_len & BFE_DESC_LEN);
1547 d->bfe_ctrl |= htole32(BFE_DESC_IOC);
1548 if (cur == BFE_TX_LIST_CNT - 1)
1549 /*
1550 * Tell the chip to wrap to the start of
1551 * the descriptor list.
1552 */
1553 d->bfe_ctrl |= htole32(BFE_DESC_EOT);
1554 /* The chip needs all addresses to be added to BFE_PCI_DMA. */
1555 d->bfe_addr = htole32(BFE_ADDR_LO(txsegs[i].ds_addr) +
1556 BFE_PCI_DMA);
1557 BFE_INC(cur, BFE_TX_LIST_CNT);
1558 }
1559
1560 /* Update producer index. */
1561 sc->bfe_tx_prod = cur;
1562
1563 /* Set EOF on the last descriptor. */
1564 cur = (cur + BFE_TX_LIST_CNT - 1) % BFE_TX_LIST_CNT;
1565 d = &sc->bfe_tx_list[cur];
1566 d->bfe_ctrl |= htole32(BFE_DESC_EOF);
1567
1568 /* Lastly set SOF on the first descriptor to avoid races. */
1569 d = &sc->bfe_tx_list[si];
1570 d->bfe_ctrl |= htole32(BFE_DESC_SOF);
1571
1572 r1 = &sc->bfe_tx_ring[cur];
1573 map = r->bfe_map;
1574 r->bfe_map = r1->bfe_map;
1575 r1->bfe_map = map;
1576 r1->bfe_mbuf = *m_head;
1577 sc->bfe_tx_cnt += nsegs;
1578
1579 bus_dmamap_sync(sc->bfe_txmbuf_tag, map, BUS_DMASYNC_PREWRITE);
1580
1581 return (0);
1582 }
1583
1584 /*
1585 * Set up to transmit a packet.
1586 */
1587 static void
bfe_start(if_t ifp)1588 bfe_start(if_t ifp)
1589 {
1590 BFE_LOCK((struct bfe_softc *)if_getsoftc(ifp));
1591 bfe_start_locked(ifp);
1592 BFE_UNLOCK((struct bfe_softc *)if_getsoftc(ifp));
1593 }
1594
1595 /*
1596 * Set up to transmit a packet. The softc is already locked.
1597 */
1598 static void
bfe_start_locked(if_t ifp)1599 bfe_start_locked(if_t ifp)
1600 {
1601 struct bfe_softc *sc;
1602 struct mbuf *m_head;
1603 int queued;
1604
1605 sc = if_getsoftc(ifp);
1606
1607 BFE_LOCK_ASSERT(sc);
1608
1609 /*
1610 * Not much point trying to send if the link is down
1611 * or we have nothing to send.
1612 */
1613 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1614 IFF_DRV_RUNNING || (sc->bfe_flags & BFE_FLAG_LINK) == 0)
1615 return;
1616
1617 for (queued = 0; !if_sendq_empty(ifp) &&
1618 sc->bfe_tx_cnt < BFE_TX_LIST_CNT - 1;) {
1619 m_head = if_dequeue(ifp);
1620 if (m_head == NULL)
1621 break;
1622
1623 /*
1624 * Pack the data into the tx ring. If we dont have
1625 * enough room, let the chip drain the ring.
1626 */
1627 if (bfe_encap(sc, &m_head)) {
1628 if (m_head == NULL)
1629 break;
1630 if_sendq_prepend(ifp, m_head);
1631 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1632 break;
1633 }
1634
1635 queued++;
1636
1637 /*
1638 * If there's a BPF listener, bounce a copy of this frame
1639 * to him.
1640 */
1641 BPF_MTAP(ifp, m_head);
1642 }
1643
1644 if (queued) {
1645 bus_dmamap_sync(sc->bfe_tx_tag, sc->bfe_tx_map,
1646 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1647 /* Transmit - twice due to apparent hardware bug */
1648 CSR_WRITE_4(sc, BFE_DMATX_PTR,
1649 sc->bfe_tx_prod * sizeof(struct bfe_desc));
1650 /*
1651 * XXX It seems the following write is not necessary
1652 * to kick Tx command. What might be required would be
1653 * a way flushing PCI posted write. Reading the register
1654 * back ensures the flush operation. In addition,
1655 * hardware will execute PCI posted write in the long
1656 * run and watchdog timer for the kick command was set
1657 * to 5 seconds. Therefore I think the second write
1658 * access is not necessary or could be replaced with
1659 * read operation.
1660 */
1661 CSR_WRITE_4(sc, BFE_DMATX_PTR,
1662 sc->bfe_tx_prod * sizeof(struct bfe_desc));
1663
1664 /*
1665 * Set a timeout in case the chip goes out to lunch.
1666 */
1667 sc->bfe_watchdog_timer = 5;
1668 }
1669 }
1670
1671 static void
bfe_init(void * xsc)1672 bfe_init(void *xsc)
1673 {
1674 BFE_LOCK((struct bfe_softc *)xsc);
1675 bfe_init_locked(xsc);
1676 BFE_UNLOCK((struct bfe_softc *)xsc);
1677 }
1678
1679 static void
bfe_init_locked(void * xsc)1680 bfe_init_locked(void *xsc)
1681 {
1682 struct bfe_softc *sc = (struct bfe_softc*)xsc;
1683 if_t ifp = sc->bfe_ifp;
1684 struct mii_data *mii;
1685
1686 BFE_LOCK_ASSERT(sc);
1687
1688 mii = device_get_softc(sc->bfe_miibus);
1689
1690 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1691 return;
1692
1693 bfe_stop(sc);
1694 bfe_chip_reset(sc);
1695
1696 if (bfe_list_rx_init(sc) == ENOBUFS) {
1697 device_printf(sc->bfe_dev,
1698 "%s: Not enough memory for list buffers\n", __func__);
1699 bfe_stop(sc);
1700 return;
1701 }
1702 bfe_list_tx_init(sc);
1703
1704 bfe_set_rx_mode(sc);
1705
1706 /* Enable the chip and core */
1707 BFE_OR(sc, BFE_ENET_CTRL, BFE_ENET_ENABLE);
1708 /* Enable interrupts */
1709 CSR_WRITE_4(sc, BFE_IMASK, BFE_IMASK_DEF);
1710
1711 /* Clear link state and change media. */
1712 sc->bfe_flags &= ~BFE_FLAG_LINK;
1713 mii_mediachg(mii);
1714
1715 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
1716 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1717
1718 callout_reset(&sc->bfe_stat_co, hz, bfe_tick, sc);
1719 }
1720
1721 /*
1722 * Set media options.
1723 */
1724 static int
bfe_ifmedia_upd(if_t ifp)1725 bfe_ifmedia_upd(if_t ifp)
1726 {
1727 struct bfe_softc *sc;
1728 struct mii_data *mii;
1729 struct mii_softc *miisc;
1730 int error;
1731
1732 sc = if_getsoftc(ifp);
1733 BFE_LOCK(sc);
1734
1735 mii = device_get_softc(sc->bfe_miibus);
1736 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1737 PHY_RESET(miisc);
1738 error = mii_mediachg(mii);
1739 BFE_UNLOCK(sc);
1740
1741 return (error);
1742 }
1743
1744 /*
1745 * Report current media status.
1746 */
1747 static void
bfe_ifmedia_sts(if_t ifp,struct ifmediareq * ifmr)1748 bfe_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
1749 {
1750 struct bfe_softc *sc = if_getsoftc(ifp);
1751 struct mii_data *mii;
1752
1753 BFE_LOCK(sc);
1754 mii = device_get_softc(sc->bfe_miibus);
1755 mii_pollstat(mii);
1756 ifmr->ifm_active = mii->mii_media_active;
1757 ifmr->ifm_status = mii->mii_media_status;
1758 BFE_UNLOCK(sc);
1759 }
1760
1761 static int
bfe_ioctl(if_t ifp,u_long command,caddr_t data)1762 bfe_ioctl(if_t ifp, u_long command, caddr_t data)
1763 {
1764 struct bfe_softc *sc = if_getsoftc(ifp);
1765 struct ifreq *ifr = (struct ifreq *) data;
1766 struct mii_data *mii;
1767 int error = 0;
1768
1769 switch (command) {
1770 case SIOCSIFFLAGS:
1771 BFE_LOCK(sc);
1772 if (if_getflags(ifp) & IFF_UP) {
1773 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1774 bfe_set_rx_mode(sc);
1775 else if ((sc->bfe_flags & BFE_FLAG_DETACH) == 0)
1776 bfe_init_locked(sc);
1777 } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1778 bfe_stop(sc);
1779 BFE_UNLOCK(sc);
1780 break;
1781 case SIOCADDMULTI:
1782 case SIOCDELMULTI:
1783 BFE_LOCK(sc);
1784 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1785 bfe_set_rx_mode(sc);
1786 BFE_UNLOCK(sc);
1787 break;
1788 case SIOCGIFMEDIA:
1789 case SIOCSIFMEDIA:
1790 mii = device_get_softc(sc->bfe_miibus);
1791 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1792 break;
1793 default:
1794 error = ether_ioctl(ifp, command, data);
1795 break;
1796 }
1797
1798 return (error);
1799 }
1800
1801 static void
bfe_watchdog(struct bfe_softc * sc)1802 bfe_watchdog(struct bfe_softc *sc)
1803 {
1804 if_t ifp;
1805
1806 BFE_LOCK_ASSERT(sc);
1807
1808 if (sc->bfe_watchdog_timer == 0 || --sc->bfe_watchdog_timer)
1809 return;
1810
1811 ifp = sc->bfe_ifp;
1812
1813 device_printf(sc->bfe_dev, "watchdog timeout -- resetting\n");
1814
1815 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1816 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1817 bfe_init_locked(sc);
1818
1819 if (!if_sendq_empty(ifp))
1820 bfe_start_locked(ifp);
1821 }
1822
1823 static void
bfe_tick(void * xsc)1824 bfe_tick(void *xsc)
1825 {
1826 struct bfe_softc *sc = xsc;
1827 struct mii_data *mii;
1828
1829 BFE_LOCK_ASSERT(sc);
1830
1831 mii = device_get_softc(sc->bfe_miibus);
1832 mii_tick(mii);
1833 bfe_stats_update(sc);
1834 bfe_watchdog(sc);
1835 callout_reset(&sc->bfe_stat_co, hz, bfe_tick, sc);
1836 }
1837
1838 /*
1839 * Stop the adapter and free any mbufs allocated to the
1840 * RX and TX lists.
1841 */
1842 static void
bfe_stop(struct bfe_softc * sc)1843 bfe_stop(struct bfe_softc *sc)
1844 {
1845 if_t ifp;
1846
1847 BFE_LOCK_ASSERT(sc);
1848
1849 ifp = sc->bfe_ifp;
1850 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
1851 sc->bfe_flags &= ~BFE_FLAG_LINK;
1852 callout_stop(&sc->bfe_stat_co);
1853 sc->bfe_watchdog_timer = 0;
1854
1855 bfe_chip_halt(sc);
1856 bfe_tx_ring_free(sc);
1857 bfe_rx_ring_free(sc);
1858 }
1859
1860 static int
sysctl_bfe_stats(SYSCTL_HANDLER_ARGS)1861 sysctl_bfe_stats(SYSCTL_HANDLER_ARGS)
1862 {
1863 struct bfe_softc *sc;
1864 struct bfe_hw_stats *stats;
1865 int error, result;
1866
1867 result = -1;
1868 error = sysctl_handle_int(oidp, &result, 0, req);
1869
1870 if (error != 0 || req->newptr == NULL)
1871 return (error);
1872
1873 if (result != 1)
1874 return (error);
1875
1876 sc = (struct bfe_softc *)arg1;
1877 stats = &sc->bfe_stats;
1878
1879 printf("%s statistics:\n", device_get_nameunit(sc->bfe_dev));
1880 printf("Transmit good octets : %ju\n",
1881 (uintmax_t)stats->tx_good_octets);
1882 printf("Transmit good frames : %ju\n",
1883 (uintmax_t)stats->tx_good_frames);
1884 printf("Transmit octets : %ju\n",
1885 (uintmax_t)stats->tx_octets);
1886 printf("Transmit frames : %ju\n",
1887 (uintmax_t)stats->tx_frames);
1888 printf("Transmit broadcast frames : %ju\n",
1889 (uintmax_t)stats->tx_bcast_frames);
1890 printf("Transmit multicast frames : %ju\n",
1891 (uintmax_t)stats->tx_mcast_frames);
1892 printf("Transmit frames 64 bytes : %ju\n",
1893 (uint64_t)stats->tx_pkts_64);
1894 printf("Transmit frames 65 to 127 bytes : %ju\n",
1895 (uint64_t)stats->tx_pkts_65_127);
1896 printf("Transmit frames 128 to 255 bytes : %ju\n",
1897 (uint64_t)stats->tx_pkts_128_255);
1898 printf("Transmit frames 256 to 511 bytes : %ju\n",
1899 (uint64_t)stats->tx_pkts_256_511);
1900 printf("Transmit frames 512 to 1023 bytes : %ju\n",
1901 (uint64_t)stats->tx_pkts_512_1023);
1902 printf("Transmit frames 1024 to max bytes : %ju\n",
1903 (uint64_t)stats->tx_pkts_1024_max);
1904 printf("Transmit jabber errors : %u\n", stats->tx_jabbers);
1905 printf("Transmit oversized frames : %ju\n",
1906 (uint64_t)stats->tx_oversize_frames);
1907 printf("Transmit fragmented frames : %ju\n",
1908 (uint64_t)stats->tx_frag_frames);
1909 printf("Transmit underruns : %u\n", stats->tx_colls);
1910 printf("Transmit total collisions : %u\n", stats->tx_single_colls);
1911 printf("Transmit single collisions : %u\n", stats->tx_single_colls);
1912 printf("Transmit multiple collisions : %u\n", stats->tx_multi_colls);
1913 printf("Transmit excess collisions : %u\n", stats->tx_excess_colls);
1914 printf("Transmit late collisions : %u\n", stats->tx_late_colls);
1915 printf("Transmit deferrals : %u\n", stats->tx_deferrals);
1916 printf("Transmit carrier losts : %u\n", stats->tx_carrier_losts);
1917 printf("Transmit pause frames : %u\n", stats->tx_pause_frames);
1918
1919 printf("Receive good octets : %ju\n",
1920 (uintmax_t)stats->rx_good_octets);
1921 printf("Receive good frames : %ju\n",
1922 (uintmax_t)stats->rx_good_frames);
1923 printf("Receive octets : %ju\n",
1924 (uintmax_t)stats->rx_octets);
1925 printf("Receive frames : %ju\n",
1926 (uintmax_t)stats->rx_frames);
1927 printf("Receive broadcast frames : %ju\n",
1928 (uintmax_t)stats->rx_bcast_frames);
1929 printf("Receive multicast frames : %ju\n",
1930 (uintmax_t)stats->rx_mcast_frames);
1931 printf("Receive frames 64 bytes : %ju\n",
1932 (uint64_t)stats->rx_pkts_64);
1933 printf("Receive frames 65 to 127 bytes : %ju\n",
1934 (uint64_t)stats->rx_pkts_65_127);
1935 printf("Receive frames 128 to 255 bytes : %ju\n",
1936 (uint64_t)stats->rx_pkts_128_255);
1937 printf("Receive frames 256 to 511 bytes : %ju\n",
1938 (uint64_t)stats->rx_pkts_256_511);
1939 printf("Receive frames 512 to 1023 bytes : %ju\n",
1940 (uint64_t)stats->rx_pkts_512_1023);
1941 printf("Receive frames 1024 to max bytes : %ju\n",
1942 (uint64_t)stats->rx_pkts_1024_max);
1943 printf("Receive jabber errors : %u\n", stats->rx_jabbers);
1944 printf("Receive oversized frames : %ju\n",
1945 (uint64_t)stats->rx_oversize_frames);
1946 printf("Receive fragmented frames : %ju\n",
1947 (uint64_t)stats->rx_frag_frames);
1948 printf("Receive missed frames : %u\n", stats->rx_missed_frames);
1949 printf("Receive CRC align errors : %u\n", stats->rx_crc_align_errs);
1950 printf("Receive undersized frames : %u\n", stats->rx_runts);
1951 printf("Receive CRC errors : %u\n", stats->rx_crc_errs);
1952 printf("Receive align errors : %u\n", stats->rx_align_errs);
1953 printf("Receive symbol errors : %u\n", stats->rx_symbol_errs);
1954 printf("Receive pause frames : %u\n", stats->rx_pause_frames);
1955 printf("Receive control frames : %u\n", stats->rx_control_frames);
1956
1957 return (error);
1958 }
1959