xref: /freebsd/sys/dev/mgb/if_mgb.c (revision f7c32ed617858bcd22f8d1b03199099d50125721)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2019 The FreeBSD Foundation, Inc.
5  *
6  * This driver was written by Gerald ND Aryeetey <gndaryee@uwaterloo.ca>
7  * under sponsorship from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 /*
34  * Microchip LAN7430/LAN7431 PCIe to Gigabit Ethernet Controller driver.
35  *
36  * Product information:
37  * LAN7430 https://www.microchip.com/en-us/product/LAN7430
38  *   - Integrated IEEE 802.3 compliant PHY
39  * LAN7431 https://www.microchip.com/en-us/product/LAN7431
40  *   - RGMII Interface
41  *
42  * This driver uses the iflib interface and the default 'ukphy' PHY driver.
43  *
44  * UNIMPLEMENTED FEATURES
45  * ----------------------
46  * A number of features supported by LAN743X device are not yet implemented in
47  * this driver:
48  *
49  * - Multiple (up to 4) RX queues support
50  *   - Just needs to remove asserts and malloc multiple `rx_ring_data`
51  *     structs based on ncpus.
52  * - RX/TX Checksum Offloading support
53  * - VLAN support
54  * - Receive Packet Filtering (Multicast Perfect/Hash Address) support
55  * - Wake on LAN (WoL) support
56  * - TX LSO support
57  * - Receive Side Scaling (RSS) support
58  * - Debugging Capabilities:
59  *   - Could include MAC statistics and
60  *     error status registers in sysctl.
61  */
62 
63 #include <sys/param.h>
64 #include <sys/bus.h>
65 #include <sys/endian.h>
66 #include <sys/kdb.h>
67 #include <sys/kernel.h>
68 #include <sys/module.h>
69 #include <sys/rman.h>
70 #include <sys/socket.h>
71 #include <sys/sockio.h>
72 #include <machine/bus.h>
73 #include <machine/resource.h>
74 
75 #include <net/ethernet.h>
76 #include <net/if.h>
77 #include <net/if_var.h>
78 #include <net/if_types.h>
79 #include <net/if_media.h>
80 #include <net/iflib.h>
81 
82 #include <dev/mgb/if_mgb.h>
83 #include <dev/mii/mii.h>
84 #include <dev/mii/miivar.h>
85 #include <dev/pci/pcireg.h>
86 #include <dev/pci/pcivar.h>
87 
88 #include "ifdi_if.h"
89 #include "miibus_if.h"
90 
91 static pci_vendor_info_t mgb_vendor_info_array[] = {
92 	PVID(MGB_MICROCHIP_VENDOR_ID, MGB_LAN7430_DEVICE_ID,
93 	    "Microchip LAN7430 PCIe Gigabit Ethernet Controller"),
94 	PVID(MGB_MICROCHIP_VENDOR_ID, MGB_LAN7431_DEVICE_ID,
95 	    "Microchip LAN7431 PCIe Gigabit Ethernet Controller"),
96 	PVID_END
97 };
98 
99 /* Device methods */
100 static device_register_t		mgb_register;
101 
102 /* IFLIB methods */
103 static ifdi_attach_pre_t		mgb_attach_pre;
104 static ifdi_attach_post_t		mgb_attach_post;
105 static ifdi_detach_t			mgb_detach;
106 
107 static ifdi_tx_queues_alloc_t		mgb_tx_queues_alloc;
108 static ifdi_rx_queues_alloc_t		mgb_rx_queues_alloc;
109 static ifdi_queues_free_t		mgb_queues_free;
110 
111 static ifdi_init_t			mgb_init;
112 static ifdi_stop_t			mgb_stop;
113 
114 static ifdi_msix_intr_assign_t		mgb_msix_intr_assign;
115 static ifdi_tx_queue_intr_enable_t	mgb_tx_queue_intr_enable;
116 static ifdi_rx_queue_intr_enable_t	mgb_rx_queue_intr_enable;
117 static ifdi_intr_enable_t		mgb_intr_enable_all;
118 static ifdi_intr_disable_t		mgb_intr_disable_all;
119 
120 /* IFLIB_TXRX methods */
121 static int				mgb_isc_txd_encap(void *,
122 					    if_pkt_info_t);
123 static void				mgb_isc_txd_flush(void *,
124 					    uint16_t, qidx_t);
125 static int				mgb_isc_txd_credits_update(void *,
126 					    uint16_t, bool);
127 static int				mgb_isc_rxd_available(void *,
128 					    uint16_t, qidx_t, qidx_t);
129 static int				mgb_isc_rxd_pkt_get(void *,
130 					    if_rxd_info_t);
131 static void				mgb_isc_rxd_refill(void *,
132 					    if_rxd_update_t);
133 static void				mgb_isc_rxd_flush(void *,
134 					    uint16_t, uint8_t, qidx_t);
135 
136 /* Interrupts */
137 static driver_filter_t			mgb_legacy_intr;
138 static driver_filter_t			mgb_admin_intr;
139 static driver_filter_t			mgb_rxq_intr;
140 static bool				mgb_intr_test(struct mgb_softc *);
141 
142 /* MII methods */
143 static miibus_readreg_t			mgb_miibus_readreg;
144 static miibus_writereg_t		mgb_miibus_writereg;
145 static miibus_linkchg_t			mgb_miibus_linkchg;
146 static miibus_statchg_t			mgb_miibus_statchg;
147 
148 static int				mgb_media_change(if_t);
149 static void				mgb_media_status(if_t,
150 					    struct ifmediareq *);
151 
152 /* Helper/Test functions */
153 static int				mgb_test_bar(struct mgb_softc *);
154 static int				mgb_alloc_regs(struct mgb_softc *);
155 static int				mgb_release_regs(struct mgb_softc *);
156 
157 static void				mgb_get_ethaddr(struct mgb_softc *,
158 					    struct ether_addr *);
159 
160 static int				mgb_wait_for_bits(struct mgb_softc *,
161 					    int, int, int);
162 
163 /* H/W init, reset and teardown helpers */
164 static int				mgb_hw_init(struct mgb_softc *);
165 static int				mgb_hw_teardown(struct mgb_softc *);
166 static int				mgb_hw_reset(struct mgb_softc *);
167 static int				mgb_mac_init(struct mgb_softc *);
168 static int				mgb_dmac_reset(struct mgb_softc *);
169 static int				mgb_phy_reset(struct mgb_softc *);
170 
171 static int				mgb_dma_init(struct mgb_softc *);
172 static int				mgb_dma_tx_ring_init(struct mgb_softc *,
173 					    int);
174 static int				mgb_dma_rx_ring_init(struct mgb_softc *,
175 					    int);
176 
177 static int				mgb_dmac_control(struct mgb_softc *,
178 					    int, int, enum mgb_dmac_cmd);
179 static int				mgb_fct_control(struct mgb_softc *,
180 					    int, int, enum mgb_fct_cmd);
181 
182 /*********************************************************************
183  *  FreeBSD Device Interface Entry Points
184  *********************************************************************/
185 
186 static device_method_t mgb_methods[] = {
187 	/* Device interface */
188 	DEVMETHOD(device_register,	mgb_register),
189 	DEVMETHOD(device_probe,		iflib_device_probe),
190 	DEVMETHOD(device_attach,	iflib_device_attach),
191 	DEVMETHOD(device_detach,	iflib_device_detach),
192 	DEVMETHOD(device_shutdown,	iflib_device_shutdown),
193 	DEVMETHOD(device_suspend,	iflib_device_suspend),
194 	DEVMETHOD(device_resume,	iflib_device_resume),
195 
196 	/* MII Interface */
197 	DEVMETHOD(miibus_readreg,	mgb_miibus_readreg),
198 	DEVMETHOD(miibus_writereg,	mgb_miibus_writereg),
199 	DEVMETHOD(miibus_linkchg,	mgb_miibus_linkchg),
200 	DEVMETHOD(miibus_statchg,	mgb_miibus_statchg),
201 
202 	DEVMETHOD_END
203 };
204 
205 static driver_t mgb_driver = {
206 	"mgb", mgb_methods, sizeof(struct mgb_softc)
207 };
208 
209 static devclass_t mgb_devclass;
210 DRIVER_MODULE(mgb, pci, mgb_driver, mgb_devclass, NULL, NULL);
211 IFLIB_PNP_INFO(pci, mgb, mgb_vendor_info_array);
212 MODULE_VERSION(mgb, 1);
213 
214 #if 0 /* MIIBUS_DEBUG */
215 /* If MIIBUS debug stuff is in attach then order matters. Use below instead. */
216 DRIVER_MODULE_ORDERED(miibus, mgb, miibus_driver, miibus_devclass, NULL, NULL,
217     SI_ORDER_ANY);
218 #endif /* MIIBUS_DEBUG */
219 DRIVER_MODULE(miibus, mgb, miibus_driver, miibus_devclass, NULL, NULL);
220 
221 MODULE_DEPEND(mgb, pci, 1, 1, 1);
222 MODULE_DEPEND(mgb, ether, 1, 1, 1);
223 MODULE_DEPEND(mgb, miibus, 1, 1, 1);
224 MODULE_DEPEND(mgb, iflib, 1, 1, 1);
225 
226 static device_method_t mgb_iflib_methods[] = {
227 	DEVMETHOD(ifdi_attach_pre, mgb_attach_pre),
228 	DEVMETHOD(ifdi_attach_post, mgb_attach_post),
229 	DEVMETHOD(ifdi_detach, mgb_detach),
230 
231 	DEVMETHOD(ifdi_init, mgb_init),
232 	DEVMETHOD(ifdi_stop, mgb_stop),
233 
234 	DEVMETHOD(ifdi_tx_queues_alloc, mgb_tx_queues_alloc),
235 	DEVMETHOD(ifdi_rx_queues_alloc, mgb_rx_queues_alloc),
236 	DEVMETHOD(ifdi_queues_free, mgb_queues_free),
237 
238 	DEVMETHOD(ifdi_msix_intr_assign, mgb_msix_intr_assign),
239 	DEVMETHOD(ifdi_tx_queue_intr_enable, mgb_tx_queue_intr_enable),
240 	DEVMETHOD(ifdi_rx_queue_intr_enable, mgb_rx_queue_intr_enable),
241 	DEVMETHOD(ifdi_intr_enable, mgb_intr_enable_all),
242 	DEVMETHOD(ifdi_intr_disable, mgb_intr_disable_all),
243 
244 #if 0 /* Not yet implemented IFLIB methods */
245 	/*
246 	 * Set multicast addresses, mtu and promiscuous mode
247 	 */
248 	DEVMETHOD(ifdi_multi_set, mgb_multi_set),
249 	DEVMETHOD(ifdi_mtu_set, mgb_mtu_set),
250 	DEVMETHOD(ifdi_promisc_set, mgb_promisc_set),
251 
252 	/*
253 	 * Needed for VLAN support
254 	 */
255 	DEVMETHOD(ifdi_vlan_register, mgb_vlan_register),
256 	DEVMETHOD(ifdi_vlan_unregister, mgb_vlan_unregister),
257 
258 	/*
259 	 * Needed for WOL support
260 	 * at the very least.
261 	 */
262 	DEVMETHOD(ifdi_shutdown, mgb_shutdown),
263 	DEVMETHOD(ifdi_suspend, mgb_suspend),
264 	DEVMETHOD(ifdi_resume, mgb_resume),
265 #endif /* UNUSED_IFLIB_METHODS */
266 	DEVMETHOD_END
267 };
268 
269 static driver_t mgb_iflib_driver = {
270 	"mgb", mgb_iflib_methods, sizeof(struct mgb_softc)
271 };
272 
273 static struct if_txrx mgb_txrx  = {
274 	.ift_txd_encap = mgb_isc_txd_encap,
275 	.ift_txd_flush = mgb_isc_txd_flush,
276 	.ift_txd_credits_update = mgb_isc_txd_credits_update,
277 	.ift_rxd_available = mgb_isc_rxd_available,
278 	.ift_rxd_pkt_get = mgb_isc_rxd_pkt_get,
279 	.ift_rxd_refill = mgb_isc_rxd_refill,
280 	.ift_rxd_flush = mgb_isc_rxd_flush,
281 
282 	.ift_legacy_intr = mgb_legacy_intr
283 };
284 
285 static struct if_shared_ctx mgb_sctx_init = {
286 	.isc_magic = IFLIB_MAGIC,
287 
288 	.isc_q_align = PAGE_SIZE,
289 	.isc_admin_intrcnt = 1,
290 	.isc_flags = IFLIB_DRIVER_MEDIA /* | IFLIB_HAS_RXCQ | IFLIB_HAS_TXCQ*/,
291 
292 	.isc_vendor_info = mgb_vendor_info_array,
293 	.isc_driver_version = "1",
294 	.isc_driver = &mgb_iflib_driver,
295 	/* 2 queues per set for TX and RX (ring queue, head writeback queue) */
296 	.isc_ntxqs = 2,
297 
298 	.isc_tx_maxsize = MGB_DMA_MAXSEGS  * MCLBYTES,
299 	/* .isc_tx_nsegments = MGB_DMA_MAXSEGS, */
300 	.isc_tx_maxsegsize = MCLBYTES,
301 
302 	.isc_ntxd_min = {1, 1}, /* Will want to make this bigger */
303 	.isc_ntxd_max = {MGB_DMA_RING_SIZE, 1},
304 	.isc_ntxd_default = {MGB_DMA_RING_SIZE, 1},
305 
306 	.isc_nrxqs = 2,
307 
308 	.isc_rx_maxsize = MCLBYTES,
309 	.isc_rx_nsegments = 1,
310 	.isc_rx_maxsegsize = MCLBYTES,
311 
312 	.isc_nrxd_min = {1, 1}, /* Will want to make this bigger */
313 	.isc_nrxd_max = {MGB_DMA_RING_SIZE, 1},
314 	.isc_nrxd_default = {MGB_DMA_RING_SIZE, 1},
315 
316 	.isc_nfl = 1, /*one free list since there is only one queue */
317 #if 0 /* UNUSED_CTX */
318 
319 	.isc_tso_maxsize = MGB_TSO_MAXSIZE + sizeof(struct ether_vlan_header),
320 	.isc_tso_maxsegsize = MGB_TX_MAXSEGSIZE,
321 #endif /* UNUSED_CTX */
322 };
323 
324 /*********************************************************************/
325 
326 static void *
327 mgb_register(device_t dev)
328 {
329 
330 	return (&mgb_sctx_init);
331 }
332 
333 static int
334 mgb_attach_pre(if_ctx_t ctx)
335 {
336 	struct mgb_softc *sc;
337 	if_softc_ctx_t scctx;
338 	int error, phyaddr, rid;
339 	struct ether_addr hwaddr;
340 	struct mii_data *miid;
341 
342 	sc = iflib_get_softc(ctx);
343 	sc->ctx = ctx;
344 	sc->dev = iflib_get_dev(ctx);
345 	scctx = iflib_get_softc_ctx(ctx);
346 
347 	/* IFLIB required setup */
348 	scctx->isc_txrx = &mgb_txrx;
349 	scctx->isc_tx_nsegments = MGB_DMA_MAXSEGS;
350 	/* Ring desc queues */
351 	scctx->isc_txqsizes[0] = sizeof(struct mgb_ring_desc) *
352 	    scctx->isc_ntxd[0];
353 	scctx->isc_rxqsizes[0] = sizeof(struct mgb_ring_desc) *
354 	    scctx->isc_nrxd[0];
355 
356 	/* Head WB queues */
357 	scctx->isc_txqsizes[1] = sizeof(uint32_t) * scctx->isc_ntxd[1];
358 	scctx->isc_rxqsizes[1] = sizeof(uint32_t) * scctx->isc_nrxd[1];
359 
360 	/* XXX: Must have 1 txqset, but can have up to 4 rxqsets */
361 	scctx->isc_nrxqsets = 1;
362 	scctx->isc_ntxqsets = 1;
363 
364 	/* scctx->isc_tx_csum_flags = (CSUM_TCP | CSUM_UDP) |
365 	    (CSUM_TCP_IPV6 | CSUM_UDP_IPV6) | CSUM_TSO */
366 	scctx->isc_tx_csum_flags = 0;
367 	scctx->isc_capabilities = scctx->isc_capenable = 0;
368 #if 0
369 	/*
370 	 * CSUM, TSO and VLAN support are TBD
371 	 */
372 	    IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 |
373 	    IFCAP_TSO4 | IFCAP_TSO6 |
374 	    IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 |
375 	    IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
376 	    IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO |
377 	    IFCAP_JUMBO_MTU;
378 	scctx->isc_capabilities |= IFCAP_LRO | IFCAP_VLAN_HWFILTER;
379 #endif
380 
381 	/* get the BAR */
382 	error = mgb_alloc_regs(sc);
383 	if (error != 0) {
384 		device_printf(sc->dev,
385 		    "Unable to allocate bus resource: registers.\n");
386 		goto fail;
387 	}
388 
389 	error = mgb_test_bar(sc);
390 	if (error != 0)
391 		goto fail;
392 
393 	error = mgb_hw_init(sc);
394 	if (error != 0) {
395 		device_printf(sc->dev,
396 		    "MGB device init failed. (err: %d)\n", error);
397 		goto fail;
398 	}
399 
400 	switch (pci_get_device(sc->dev)) {
401 	case MGB_LAN7430_DEVICE_ID:
402 		phyaddr = 1;
403 		break;
404 	case MGB_LAN7431_DEVICE_ID:
405 	default:
406 		phyaddr = MII_PHY_ANY;
407 		break;
408 	}
409 
410 	/* XXX: Would be nice(r) if locked methods were here */
411 	error = mii_attach(sc->dev, &sc->miibus, iflib_get_ifp(ctx),
412 	    mgb_media_change, mgb_media_status,
413 	    BMSR_DEFCAPMASK, phyaddr, MII_OFFSET_ANY, MIIF_DOPAUSE);
414 	if (error != 0) {
415 		device_printf(sc->dev, "Failed to attach MII interface\n");
416 		goto fail;
417 	}
418 
419 	miid = device_get_softc(sc->miibus);
420 	scctx->isc_media = &miid->mii_media;
421 
422 	scctx->isc_msix_bar = pci_msix_table_bar(sc->dev);
423 	/** Setup PBA BAR **/
424 	rid = pci_msix_pba_bar(sc->dev);
425 	if (rid != scctx->isc_msix_bar) {
426 		sc->pba = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
427 		    &rid, RF_ACTIVE);
428 		if (sc->pba == NULL) {
429 			error = ENXIO;
430 			device_printf(sc->dev, "Failed to setup PBA BAR\n");
431 			goto fail;
432 		}
433 	}
434 
435 	mgb_get_ethaddr(sc, &hwaddr);
436 	if (ETHER_IS_BROADCAST(hwaddr.octet) ||
437 	    ETHER_IS_MULTICAST(hwaddr.octet) ||
438 	    ETHER_IS_ZERO(hwaddr.octet))
439 		ether_gen_addr(iflib_get_ifp(ctx), &hwaddr);
440 
441 	/*
442 	 * XXX: if the MAC address was generated the linux driver
443 	 * writes it back to the device.
444 	 */
445 	iflib_set_mac(ctx, hwaddr.octet);
446 
447 	/* Map all vectors to vector 0 (admin interrupts) by default. */
448 	CSR_WRITE_REG(sc, MGB_INTR_VEC_RX_MAP, 0);
449 	CSR_WRITE_REG(sc, MGB_INTR_VEC_TX_MAP, 0);
450 	CSR_WRITE_REG(sc, MGB_INTR_VEC_OTHER_MAP, 0);
451 
452 	return (0);
453 
454 fail:
455 	mgb_detach(ctx);
456 	return (error);
457 }
458 
459 static int
460 mgb_attach_post(if_ctx_t ctx)
461 {
462 	struct mgb_softc *sc;
463 
464 	sc = iflib_get_softc(ctx);
465 
466 	device_printf(sc->dev, "Interrupt test: %s\n",
467 	    (mgb_intr_test(sc) ? "PASS" : "FAIL"));
468 
469 	return (0);
470 }
471 
472 static int
473 mgb_detach(if_ctx_t ctx)
474 {
475 	struct mgb_softc *sc;
476 	int error;
477 
478 	sc = iflib_get_softc(ctx);
479 
480 	/* XXX: Should report errors but still detach everything. */
481 	error = mgb_hw_teardown(sc);
482 
483 	/* Release IRQs */
484 	iflib_irq_free(ctx, &sc->rx_irq);
485 	iflib_irq_free(ctx, &sc->admin_irq);
486 
487 	if (sc->miibus != NULL)
488 		device_delete_child(sc->dev, sc->miibus);
489 
490 	if (sc->pba != NULL)
491 		error = bus_release_resource(sc->dev, SYS_RES_MEMORY,
492 		    rman_get_rid(sc->pba), sc->pba);
493 	sc->pba = NULL;
494 
495 	error = mgb_release_regs(sc);
496 
497 	return (error);
498 }
499 
500 static int
501 mgb_media_change(if_t ifp)
502 {
503 	struct mii_data *miid;
504 	struct mii_softc *miisc;
505 	struct mgb_softc *sc;
506 	if_ctx_t ctx;
507 	int needs_reset;
508 
509 	ctx = if_getsoftc(ifp);
510 	sc = iflib_get_softc(ctx);
511 	miid = device_get_softc(sc->miibus);
512 	LIST_FOREACH(miisc, &miid->mii_phys, mii_list)
513 		PHY_RESET(miisc);
514 
515 	needs_reset = mii_mediachg(miid);
516 	if (needs_reset != 0)
517 		ifp->if_init(ctx);
518 	return (needs_reset);
519 }
520 
521 static void
522 mgb_media_status(if_t ifp, struct ifmediareq *ifmr)
523 {
524 	struct mgb_softc *sc;
525 	struct mii_data *miid;
526 
527 	sc = iflib_get_softc(if_getsoftc(ifp));
528 	miid = device_get_softc(sc->miibus);
529 	if ((if_getflags(ifp) & IFF_UP) == 0)
530 		return;
531 
532 	mii_pollstat(miid);
533 	ifmr->ifm_active = miid->mii_media_active;
534 	ifmr->ifm_status = miid->mii_media_status;
535 }
536 
537 static int
538 mgb_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs,
539     int ntxqsets)
540 {
541 	struct mgb_softc *sc;
542 	struct mgb_ring_data *rdata;
543 	int q;
544 
545 	sc = iflib_get_softc(ctx);
546 	KASSERT(ntxqsets == 1, ("ntxqsets = %d", ntxqsets));
547 	rdata = &sc->tx_ring_data;
548 	for (q = 0; q < ntxqsets; q++) {
549 		KASSERT(ntxqs == 2, ("ntxqs = %d", ntxqs));
550 		/* Ring */
551 		rdata->ring = (struct mgb_ring_desc *) vaddrs[q * ntxqs + 0];
552 		rdata->ring_bus_addr = paddrs[q * ntxqs + 0];
553 
554 		/* Head WB */
555 		rdata->head_wb = (uint32_t *) vaddrs[q * ntxqs + 1];
556 		rdata->head_wb_bus_addr = paddrs[q * ntxqs + 1];
557 	}
558 	return (0);
559 }
560 
561 static int
562 mgb_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs,
563     int nrxqsets)
564 {
565 	struct mgb_softc *sc;
566 	struct mgb_ring_data *rdata;
567 	int q;
568 
569 	sc = iflib_get_softc(ctx);
570 	KASSERT(nrxqsets == 1, ("nrxqsets = %d", nrxqsets));
571 	rdata = &sc->rx_ring_data;
572 	for (q = 0; q < nrxqsets; q++) {
573 		KASSERT(nrxqs == 2, ("nrxqs = %d", nrxqs));
574 		/* Ring */
575 		rdata->ring = (struct mgb_ring_desc *) vaddrs[q * nrxqs + 0];
576 		rdata->ring_bus_addr = paddrs[q * nrxqs + 0];
577 
578 		/* Head WB */
579 		rdata->head_wb = (uint32_t *) vaddrs[q * nrxqs + 1];
580 		rdata->head_wb_bus_addr = paddrs[q * nrxqs + 1];
581 	}
582 	return (0);
583 }
584 
585 static void
586 mgb_queues_free(if_ctx_t ctx)
587 {
588 	struct mgb_softc *sc;
589 
590 	sc = iflib_get_softc(ctx);
591 
592 	memset(&sc->rx_ring_data, 0, sizeof(struct mgb_ring_data));
593 	memset(&sc->tx_ring_data, 0, sizeof(struct mgb_ring_data));
594 }
595 
596 static void
597 mgb_init(if_ctx_t ctx)
598 {
599 	struct mgb_softc *sc;
600 	struct mii_data *miid;
601 	int error;
602 
603 	sc = iflib_get_softc(ctx);
604 	miid = device_get_softc(sc->miibus);
605 	device_printf(sc->dev, "running init ...\n");
606 
607 	mgb_dma_init(sc);
608 
609 	/* XXX: Turn off perfect filtering, turn on (broad|multi|uni)cast rx */
610 	CSR_CLEAR_REG(sc, MGB_RFE_CTL, MGB_RFE_ALLOW_PERFECT_FILTER);
611 	CSR_UPDATE_REG(sc, MGB_RFE_CTL,
612 	    MGB_RFE_ALLOW_BROADCAST |
613 	    MGB_RFE_ALLOW_MULTICAST |
614 	    MGB_RFE_ALLOW_UNICAST);
615 
616 	error = mii_mediachg(miid);
617 	/* Not much we can do if this fails. */
618 	if (error)
619 		device_printf(sc->dev, "%s: mii_mediachg returned %d", __func__,
620 		    error);
621 }
622 
623 #ifdef DEBUG
624 static void
625 mgb_dump_some_stats(struct mgb_softc *sc)
626 {
627 	int i;
628 	int first_stat = 0x1200;
629 	int last_stat = 0x12FC;
630 
631 	for (i = first_stat; i <= last_stat; i += 4)
632 		if (CSR_READ_REG(sc, i) != 0)
633 			device_printf(sc->dev, "0x%04x: 0x%08x\n", i,
634 			    CSR_READ_REG(sc, i));
635 	char *stat_names[] = {
636 		"MAC_ERR_STS ",
637 		"FCT_INT_STS ",
638 		"DMAC_CFG ",
639 		"DMAC_CMD ",
640 		"DMAC_INT_STS ",
641 		"DMAC_INT_EN ",
642 		"DMAC_RX_ERR_STS0 ",
643 		"DMAC_RX_ERR_STS1 ",
644 		"DMAC_RX_ERR_STS2 ",
645 		"DMAC_RX_ERR_STS3 ",
646 		"INT_STS ",
647 		"INT_EN ",
648 		"INT_VEC_EN ",
649 		"INT_VEC_MAP0 ",
650 		"INT_VEC_MAP1 ",
651 		"INT_VEC_MAP2 ",
652 		"TX_HEAD0",
653 		"TX_TAIL0",
654 		"DMAC_TX_ERR_STS0 ",
655 		NULL
656 	};
657 	int stats[] = {
658 		0x114,
659 		0xA0,
660 		0xC00,
661 		0xC0C,
662 		0xC10,
663 		0xC14,
664 		0xC60,
665 		0xCA0,
666 		0xCE0,
667 		0xD20,
668 		0x780,
669 		0x788,
670 		0x794,
671 		0x7A0,
672 		0x7A4,
673 		0x780,
674 		0xD58,
675 		0xD5C,
676 		0xD60,
677 		0x0
678 	};
679 	i = 0;
680 	printf("==============================\n");
681 	while (stats[i++])
682 		device_printf(sc->dev, "%s at offset 0x%04x = 0x%08x\n",
683 		    stat_names[i - 1], stats[i - 1],
684 		    CSR_READ_REG(sc, stats[i - 1]));
685 	printf("==== TX RING DESCS ====\n");
686 	for (i = 0; i < MGB_DMA_RING_SIZE; i++)
687 		device_printf(sc->dev, "ring[%d].data0=0x%08x\n"
688 		    "ring[%d].data1=0x%08x\n"
689 		    "ring[%d].data2=0x%08x\n"
690 		    "ring[%d].data3=0x%08x\n",
691 		    i, sc->tx_ring_data.ring[i].ctl,
692 		    i, sc->tx_ring_data.ring[i].addr.low,
693 		    i, sc->tx_ring_data.ring[i].addr.high,
694 		    i, sc->tx_ring_data.ring[i].sts);
695 	device_printf(sc->dev, "==== DUMP_TX_DMA_RAM ====\n");
696 	CSR_WRITE_REG(sc, 0x24, 0xF); // DP_SEL & TX_RAM_0
697 	for (i = 0; i < 128; i++) {
698 		CSR_WRITE_REG(sc, 0x2C, i); // DP_ADDR
699 
700 		CSR_WRITE_REG(sc, 0x28, 0); // DP_CMD
701 
702 		while ((CSR_READ_REG(sc, 0x24) & 0x80000000) == 0) // DP_SEL & READY
703 			DELAY(1000);
704 
705 		device_printf(sc->dev, "DMAC_TX_RAM_0[%u]=%08x\n", i,
706 		    CSR_READ_REG(sc, 0x30)); // DP_DATA
707 	}
708 }
709 #endif
710 
711 static void
712 mgb_stop(if_ctx_t ctx)
713 {
714 	struct mgb_softc *sc ;
715 	if_softc_ctx_t scctx;
716 	int i;
717 
718 	sc = iflib_get_softc(ctx);
719 	scctx = iflib_get_softc_ctx(ctx);
720 
721 	/* XXX: Could potentially timeout */
722 	for (i = 0; i < scctx->isc_nrxqsets; i++) {
723 		mgb_dmac_control(sc, MGB_DMAC_RX_START, 0, DMAC_STOP);
724 		mgb_fct_control(sc, MGB_FCT_RX_CTL, 0, FCT_DISABLE);
725 	}
726 	for (i = 0; i < scctx->isc_ntxqsets; i++) {
727 		mgb_dmac_control(sc, MGB_DMAC_TX_START, 0, DMAC_STOP);
728 		mgb_fct_control(sc, MGB_FCT_TX_CTL, 0, FCT_DISABLE);
729 	}
730 }
731 
732 static int
733 mgb_legacy_intr(void *xsc)
734 {
735 	struct mgb_softc *sc;
736 
737 	sc = xsc;
738 	iflib_admin_intr_deferred(sc->ctx);
739 	return (FILTER_HANDLED);
740 }
741 
742 static int
743 mgb_rxq_intr(void *xsc)
744 {
745 	struct mgb_softc *sc;
746 	if_softc_ctx_t scctx;
747 	uint32_t intr_sts, intr_en;
748 	int qidx;
749 
750 	sc = xsc;
751 	scctx = iflib_get_softc_ctx(sc->ctx);
752 
753 	intr_sts = CSR_READ_REG(sc, MGB_INTR_STS);
754 	intr_en = CSR_READ_REG(sc, MGB_INTR_ENBL_SET);
755 	intr_sts &= intr_en;
756 
757 	for (qidx = 0; qidx < scctx->isc_nrxqsets; qidx++) {
758 		if ((intr_sts & MGB_INTR_STS_RX(qidx))){
759 			CSR_WRITE_REG(sc, MGB_INTR_ENBL_CLR,
760 			    MGB_INTR_STS_RX(qidx));
761 			CSR_WRITE_REG(sc, MGB_INTR_STS, MGB_INTR_STS_RX(qidx));
762 		}
763 	}
764 	return (FILTER_SCHEDULE_THREAD);
765 }
766 
767 static int
768 mgb_admin_intr(void *xsc)
769 {
770 	struct mgb_softc *sc;
771 	if_softc_ctx_t scctx;
772 	uint32_t intr_sts, intr_en;
773 	int qidx;
774 
775 	sc = xsc;
776 	scctx = iflib_get_softc_ctx(sc->ctx);
777 
778 	intr_sts = CSR_READ_REG(sc, MGB_INTR_STS);
779 	intr_en = CSR_READ_REG(sc, MGB_INTR_ENBL_SET);
780 	intr_sts &= intr_en;
781 
782 	/* TODO: shouldn't continue if suspended */
783 	if ((intr_sts & MGB_INTR_STS_ANY) == 0)
784 		return (FILTER_STRAY);
785 	if ((intr_sts &  MGB_INTR_STS_TEST) != 0) {
786 		sc->isr_test_flag = true;
787 		CSR_WRITE_REG(sc, MGB_INTR_STS, MGB_INTR_STS_TEST);
788 		return (FILTER_HANDLED);
789 	}
790 	if ((intr_sts & MGB_INTR_STS_RX_ANY) != 0) {
791 		for (qidx = 0; qidx < scctx->isc_nrxqsets; qidx++) {
792 			if ((intr_sts & MGB_INTR_STS_RX(qidx))){
793 				iflib_rx_intr_deferred(sc->ctx, qidx);
794 			}
795 		}
796 		return (FILTER_HANDLED);
797 	}
798 	/* XXX: TX interrupts should not occur */
799 	if ((intr_sts & MGB_INTR_STS_TX_ANY) != 0) {
800 		for (qidx = 0; qidx < scctx->isc_ntxqsets; qidx++) {
801 			if ((intr_sts & MGB_INTR_STS_RX(qidx))) {
802 				/* clear the interrupt sts and run handler */
803 				CSR_WRITE_REG(sc, MGB_INTR_ENBL_CLR,
804 				    MGB_INTR_STS_TX(qidx));
805 				CSR_WRITE_REG(sc, MGB_INTR_STS,
806 				    MGB_INTR_STS_TX(qidx));
807 				iflib_tx_intr_deferred(sc->ctx, qidx);
808 			}
809 		}
810 		return (FILTER_HANDLED);
811 	}
812 
813 	return (FILTER_SCHEDULE_THREAD);
814 }
815 
816 static int
817 mgb_msix_intr_assign(if_ctx_t ctx, int msix)
818 {
819 	struct mgb_softc *sc;
820 	if_softc_ctx_t scctx;
821 	int error, i, vectorid;
822 	char irq_name[16];
823 
824 	sc = iflib_get_softc(ctx);
825 	scctx = iflib_get_softc_ctx(ctx);
826 
827 	KASSERT(scctx->isc_nrxqsets == 1 && scctx->isc_ntxqsets == 1,
828 	    ("num rxqsets/txqsets != 1 "));
829 
830 	/*
831 	 * First vector should be admin interrupts, others vectors are TX/RX
832 	 *
833 	 * RIDs start at 1, and vector ids start at 0.
834 	 */
835 	vectorid = 0;
836 	error = iflib_irq_alloc_generic(ctx, &sc->admin_irq, vectorid + 1,
837 	    IFLIB_INTR_ADMIN, mgb_admin_intr, sc, 0, "admin");
838 	if (error) {
839 		device_printf(sc->dev,
840 		    "Failed to register admin interrupt handler\n");
841 		return (error);
842 	}
843 
844 	for (i = 0; i < scctx->isc_nrxqsets; i++) {
845 		vectorid++;
846 		snprintf(irq_name, sizeof(irq_name), "rxq%d", i);
847 		error = iflib_irq_alloc_generic(ctx, &sc->rx_irq, vectorid + 1,
848 		    IFLIB_INTR_RXTX, mgb_rxq_intr, sc, i, irq_name);
849 		if (error) {
850 			device_printf(sc->dev,
851 			    "Failed to register rxq %d interrupt handler\n", i);
852 			return (error);
853 		}
854 		CSR_UPDATE_REG(sc, MGB_INTR_VEC_RX_MAP,
855 		    MGB_INTR_VEC_MAP(vectorid, i));
856 	}
857 
858 	/* Not actually mapping hw TX interrupts ... */
859 	for (i = 0; i < scctx->isc_ntxqsets; i++) {
860 		snprintf(irq_name, sizeof(irq_name), "txq%d", i);
861 		iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_TX, NULL, i,
862 		    irq_name);
863 	}
864 
865 	return (0);
866 }
867 
868 static void
869 mgb_intr_enable_all(if_ctx_t ctx)
870 {
871 	struct mgb_softc *sc;
872 	if_softc_ctx_t scctx;
873 	int i, dmac_enable = 0, intr_sts = 0, vec_en = 0;
874 
875 	sc = iflib_get_softc(ctx);
876 	scctx = iflib_get_softc_ctx(ctx);
877 	intr_sts |= MGB_INTR_STS_ANY;
878 	vec_en |= MGB_INTR_STS_ANY;
879 
880 	for (i = 0; i < scctx->isc_nrxqsets; i++) {
881 		intr_sts |= MGB_INTR_STS_RX(i);
882 		dmac_enable |= MGB_DMAC_RX_INTR_ENBL(i);
883 		vec_en |= MGB_INTR_RX_VEC_STS(i);
884 	}
885 
886 	/* TX interrupts aren't needed ... */
887 
888 	CSR_WRITE_REG(sc, MGB_INTR_ENBL_SET, intr_sts);
889 	CSR_WRITE_REG(sc, MGB_INTR_VEC_ENBL_SET, vec_en);
890 	CSR_WRITE_REG(sc, MGB_DMAC_INTR_STS, dmac_enable);
891 	CSR_WRITE_REG(sc, MGB_DMAC_INTR_ENBL_SET, dmac_enable);
892 }
893 
894 static void
895 mgb_intr_disable_all(if_ctx_t ctx)
896 {
897 	struct mgb_softc *sc;
898 
899 	sc = iflib_get_softc(ctx);
900 	CSR_WRITE_REG(sc, MGB_INTR_ENBL_CLR, UINT32_MAX);
901 	CSR_WRITE_REG(sc, MGB_INTR_VEC_ENBL_CLR, UINT32_MAX);
902 	CSR_WRITE_REG(sc, MGB_INTR_STS, UINT32_MAX);
903 
904 	CSR_WRITE_REG(sc, MGB_DMAC_INTR_ENBL_CLR, UINT32_MAX);
905 	CSR_WRITE_REG(sc, MGB_DMAC_INTR_STS, UINT32_MAX);
906 }
907 
908 static int
909 mgb_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
910 {
911 	/* called after successful rx isr */
912 	struct mgb_softc *sc;
913 
914 	sc = iflib_get_softc(ctx);
915 	CSR_WRITE_REG(sc, MGB_INTR_VEC_ENBL_SET, MGB_INTR_RX_VEC_STS(qid));
916 	CSR_WRITE_REG(sc, MGB_INTR_ENBL_SET, MGB_INTR_STS_RX(qid));
917 
918 	CSR_WRITE_REG(sc, MGB_DMAC_INTR_STS, MGB_DMAC_RX_INTR_ENBL(qid));
919 	CSR_WRITE_REG(sc, MGB_DMAC_INTR_ENBL_SET, MGB_DMAC_RX_INTR_ENBL(qid));
920 	return (0);
921 }
922 
923 static int
924 mgb_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
925 {
926 	/* XXX: not called (since tx interrupts not used) */
927 	struct mgb_softc *sc;
928 
929 	sc = iflib_get_softc(ctx);
930 
931 	CSR_WRITE_REG(sc, MGB_INTR_ENBL_SET, MGB_INTR_STS_TX(qid));
932 
933 	CSR_WRITE_REG(sc, MGB_DMAC_INTR_STS, MGB_DMAC_TX_INTR_ENBL(qid));
934 	CSR_WRITE_REG(sc, MGB_DMAC_INTR_ENBL_SET, MGB_DMAC_TX_INTR_ENBL(qid));
935 	return (0);
936 }
937 
938 static bool
939 mgb_intr_test(struct mgb_softc *sc)
940 {
941 	int i;
942 
943 	sc->isr_test_flag = false;
944 	CSR_WRITE_REG(sc, MGB_INTR_STS, MGB_INTR_STS_TEST);
945 	CSR_WRITE_REG(sc, MGB_INTR_VEC_ENBL_SET, MGB_INTR_STS_ANY);
946 	CSR_WRITE_REG(sc, MGB_INTR_ENBL_SET,
947 	    MGB_INTR_STS_ANY | MGB_INTR_STS_TEST);
948 	CSR_WRITE_REG(sc, MGB_INTR_SET, MGB_INTR_STS_TEST);
949 	if (sc->isr_test_flag)
950 		return (true);
951 	for (i = 0; i < MGB_TIMEOUT; i++) {
952 		DELAY(10);
953 		if (sc->isr_test_flag)
954 			break;
955 	}
956 	CSR_WRITE_REG(sc, MGB_INTR_ENBL_CLR, MGB_INTR_STS_TEST);
957 	CSR_WRITE_REG(sc, MGB_INTR_STS, MGB_INTR_STS_TEST);
958 	return (sc->isr_test_flag);
959 }
960 
961 static int
962 mgb_isc_txd_encap(void *xsc , if_pkt_info_t ipi)
963 {
964 	struct mgb_softc *sc;
965 	if_softc_ctx_t scctx;
966 	struct mgb_ring_data *rdata;
967 	struct mgb_ring_desc *txd;
968 	bus_dma_segment_t *segs;
969 	qidx_t pidx, nsegs;
970 	int i;
971 
972 	KASSERT(ipi->ipi_qsidx == 0,
973 	    ("tried to refill TX Channel %d.\n", ipi->ipi_qsidx));
974 	sc = xsc;
975 	scctx = iflib_get_softc_ctx(sc->ctx);
976 	rdata = &sc->tx_ring_data;
977 
978 	pidx = ipi->ipi_pidx;
979 	segs = ipi->ipi_segs;
980 	nsegs = ipi->ipi_nsegs;
981 
982 	/* For each seg, create a descriptor */
983 	for (i = 0; i < nsegs; ++i) {
984 		KASSERT(nsegs == 1, ("Multisegment packet !!!!!\n"));
985 		txd = &rdata->ring[pidx];
986 		txd->ctl = htole32(
987 		    (segs[i].ds_len & MGB_DESC_CTL_BUFLEN_MASK ) |
988 		    /*
989 		     * XXX: This will be wrong in the multipacket case
990 		     * I suspect FS should be for the first packet and
991 		     * LS should be for the last packet
992 		     */
993 		    MGB_TX_DESC_CTL_FS | MGB_TX_DESC_CTL_LS |
994 		    MGB_DESC_CTL_FCS);
995 		txd->addr.low = htole32(CSR_TRANSLATE_ADDR_LOW32(
996 		    segs[i].ds_addr));
997 		txd->addr.high = htole32(CSR_TRANSLATE_ADDR_HIGH32(
998 		    segs[i].ds_addr));
999 		txd->sts = htole32(
1000 		    (segs[i].ds_len << 16) & MGB_DESC_FRAME_LEN_MASK);
1001 		pidx = MGB_NEXT_RING_IDX(pidx);
1002 	}
1003 	ipi->ipi_new_pidx = pidx;
1004 	return (0);
1005 }
1006 
1007 static void
1008 mgb_isc_txd_flush(void *xsc, uint16_t txqid, qidx_t pidx)
1009 {
1010 	struct mgb_softc *sc;
1011 	struct mgb_ring_data *rdata;
1012 
1013 	KASSERT(txqid == 0, ("tried to flush TX Channel %d.\n", txqid));
1014 	sc = xsc;
1015 	rdata = &sc->tx_ring_data;
1016 
1017 	if (rdata->last_tail != pidx) {
1018 		rdata->last_tail = pidx;
1019 		CSR_WRITE_REG(sc, MGB_DMA_TX_TAIL(txqid), rdata->last_tail);
1020 	}
1021 }
1022 
1023 static int
1024 mgb_isc_txd_credits_update(void *xsc, uint16_t txqid, bool clear)
1025 {
1026 	struct mgb_softc *sc;
1027 	struct mgb_ring_desc *txd;
1028 	struct mgb_ring_data *rdata;
1029 	int processed = 0;
1030 
1031 	/*
1032 	 * > If clear is true, we need to report the number of TX command ring
1033 	 * > descriptors that have been processed by the device.  If clear is
1034 	 * > false, we just need to report whether or not at least one TX
1035 	 * > command ring descriptor has been processed by the device.
1036 	 * - vmx driver
1037 	 */
1038 	KASSERT(txqid == 0, ("tried to credits_update TX Channel %d.\n",
1039 	    txqid));
1040 	sc = xsc;
1041 	rdata = &sc->tx_ring_data;
1042 
1043 	while (*(rdata->head_wb) != rdata->last_head) {
1044 		if (!clear)
1045 			return (1);
1046 
1047 		txd = &rdata->ring[rdata->last_head];
1048 		memset(txd, 0, sizeof(struct mgb_ring_desc));
1049 		rdata->last_head = MGB_NEXT_RING_IDX(rdata->last_head);
1050 		processed++;
1051 	}
1052 
1053 	return (processed);
1054 }
1055 
1056 static int
1057 mgb_isc_rxd_available(void *xsc, uint16_t rxqid, qidx_t idx, qidx_t budget)
1058 {
1059 	struct mgb_softc *sc;
1060 	if_softc_ctx_t scctx;
1061 	struct mgb_ring_data *rdata;
1062 	int avail = 0;
1063 
1064 	sc = xsc;
1065 	KASSERT(rxqid == 0, ("tried to check availability in RX Channel %d.\n",
1066 	    rxqid));
1067 
1068 	rdata = &sc->rx_ring_data;
1069 	scctx = iflib_get_softc_ctx(sc->ctx);
1070 	for (; idx != *(rdata->head_wb); idx = MGB_NEXT_RING_IDX(idx)) {
1071 		avail++;
1072 		/* XXX: Could verify desc is device owned here */
1073 		if (avail == budget)
1074 			break;
1075 	}
1076 	return (avail);
1077 }
1078 
1079 static int
1080 mgb_isc_rxd_pkt_get(void *xsc, if_rxd_info_t ri)
1081 {
1082 	struct mgb_softc *sc;
1083 	struct mgb_ring_data *rdata;
1084 	struct mgb_ring_desc rxd;
1085 	int total_len;
1086 
1087 	KASSERT(ri->iri_qsidx == 0,
1088 	    ("tried to check availability in RX Channel %d\n", ri->iri_qsidx));
1089 	sc = xsc;
1090 	total_len = 0;
1091 	rdata = &sc->rx_ring_data;
1092 
1093 	while (*(rdata->head_wb) != rdata->last_head) {
1094 		/* copy ring desc and do swapping */
1095 		rxd = rdata->ring[rdata->last_head];
1096 		rxd.ctl = le32toh(rxd.ctl);
1097 		rxd.addr.low = le32toh(rxd.ctl);
1098 		rxd.addr.high = le32toh(rxd.ctl);
1099 		rxd.sts = le32toh(rxd.ctl);
1100 
1101 		if ((rxd.ctl & MGB_DESC_CTL_OWN) != 0) {
1102 			device_printf(sc->dev,
1103 			    "Tried to read descriptor ... "
1104 			    "found that it's owned by the driver\n");
1105 			return (EINVAL);
1106 		}
1107 		if ((rxd.ctl & MGB_RX_DESC_CTL_FS) == 0) {
1108 			device_printf(sc->dev,
1109 			    "Tried to read descriptor ... "
1110 			    "found that FS is not set.\n");
1111 			device_printf(sc->dev, "Tried to read descriptor ... that it FS is not set.\n");
1112 			return (EINVAL);
1113 		}
1114 		/* XXX: Multi-packet support */
1115 		if ((rxd.ctl & MGB_RX_DESC_CTL_LS) == 0) {
1116 			device_printf(sc->dev,
1117 			    "Tried to read descriptor ... "
1118 			    "found that LS is not set. (Multi-buffer packets not yet supported)\n");
1119 			return (EINVAL);
1120 		}
1121 		ri->iri_frags[0].irf_flid = 0;
1122 		ri->iri_frags[0].irf_idx = rdata->last_head;
1123 		ri->iri_frags[0].irf_len = MGB_DESC_GET_FRAME_LEN(&rxd);
1124 		total_len += ri->iri_frags[0].irf_len;
1125 
1126 		rdata->last_head = MGB_NEXT_RING_IDX(rdata->last_head);
1127 		break;
1128 	}
1129 	ri->iri_nfrags = 1;
1130 	ri->iri_len = total_len;
1131 
1132 	return (0);
1133 }
1134 
1135 static void
1136 mgb_isc_rxd_refill(void *xsc, if_rxd_update_t iru)
1137 {
1138 	if_softc_ctx_t scctx;
1139 	struct mgb_softc *sc;
1140 	struct mgb_ring_data *rdata;
1141 	struct mgb_ring_desc *rxd;
1142 	uint64_t *paddrs;
1143 	qidx_t *idxs;
1144 	qidx_t idx;
1145 	int count, len;
1146 
1147 	count = iru->iru_count;
1148 	len = iru->iru_buf_size;
1149 	idxs = iru->iru_idxs;
1150 	paddrs = iru->iru_paddrs;
1151 	KASSERT(iru->iru_qsidx == 0,
1152 	    ("tried to refill RX Channel %d.\n", iru->iru_qsidx));
1153 
1154 	sc = xsc;
1155 	scctx = iflib_get_softc_ctx(sc->ctx);
1156 	rdata = &sc->rx_ring_data;
1157 
1158 	while (count > 0) {
1159 		idx = idxs[--count];
1160 		rxd = &rdata->ring[idx];
1161 
1162 		rxd->sts = 0;
1163 		rxd->addr.low =
1164 		    htole32(CSR_TRANSLATE_ADDR_LOW32(paddrs[count]));
1165 		rxd->addr.high =
1166 		    htole32(CSR_TRANSLATE_ADDR_HIGH32(paddrs[count]));
1167 		rxd->ctl = htole32(MGB_DESC_CTL_OWN |
1168 		    (len & MGB_DESC_CTL_BUFLEN_MASK));
1169 	}
1170 	return;
1171 }
1172 
1173 static void
1174 mgb_isc_rxd_flush(void *xsc, uint16_t rxqid, uint8_t flid, qidx_t pidx)
1175 {
1176 	struct mgb_softc *sc;
1177 
1178 	sc = xsc;
1179 
1180 	KASSERT(rxqid == 0, ("tried to flush RX Channel %d.\n", rxqid));
1181 	/*
1182 	 * According to the programming guide, last_tail must be set to
1183 	 * the last valid RX descriptor, rather than to the one past that.
1184 	 * Note that this is not true for the TX ring!
1185 	 */
1186 	sc->rx_ring_data.last_tail = MGB_PREV_RING_IDX(pidx);
1187 	CSR_WRITE_REG(sc, MGB_DMA_RX_TAIL(rxqid), sc->rx_ring_data.last_tail);
1188 	return;
1189 }
1190 
1191 static int
1192 mgb_test_bar(struct mgb_softc *sc)
1193 {
1194 	uint32_t id_rev, dev_id, rev;
1195 
1196 	id_rev = CSR_READ_REG(sc, 0);
1197 	dev_id = id_rev >> 16;
1198 	rev = id_rev & 0xFFFF;
1199 	if (dev_id == MGB_LAN7430_DEVICE_ID ||
1200 	    dev_id == MGB_LAN7431_DEVICE_ID) {
1201 		return (0);
1202 	} else {
1203 		device_printf(sc->dev, "ID check failed.\n");
1204 		return (ENXIO);
1205 	}
1206 }
1207 
1208 static int
1209 mgb_alloc_regs(struct mgb_softc *sc)
1210 {
1211 	int rid;
1212 
1213 	rid = PCIR_BAR(MGB_BAR);
1214 	pci_enable_busmaster(sc->dev);
1215 	sc->regs = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1216 	    &rid, RF_ACTIVE);
1217 	if (sc->regs == NULL)
1218 		 return (ENXIO);
1219 
1220 	return (0);
1221 }
1222 
1223 static int
1224 mgb_release_regs(struct mgb_softc *sc)
1225 {
1226 	int error = 0;
1227 
1228 	if (sc->regs != NULL)
1229 		error = bus_release_resource(sc->dev, SYS_RES_MEMORY,
1230 		    rman_get_rid(sc->regs), sc->regs);
1231 	sc->regs = NULL;
1232 	pci_disable_busmaster(sc->dev);
1233 	return (error);
1234 }
1235 
1236 static int
1237 mgb_dma_init(struct mgb_softc *sc)
1238 {
1239 	if_softc_ctx_t scctx;
1240 	int ch, error = 0;
1241 
1242 	scctx = iflib_get_softc_ctx(sc->ctx);
1243 
1244 	for (ch = 0; ch < scctx->isc_nrxqsets; ch++)
1245 		if ((error = mgb_dma_rx_ring_init(sc, ch)))
1246 			goto fail;
1247 
1248 	for (ch = 0; ch < scctx->isc_nrxqsets; ch++)
1249 		if ((error = mgb_dma_tx_ring_init(sc, ch)))
1250 			goto fail;
1251 
1252 fail:
1253 	return (error);
1254 }
1255 
1256 static int
1257 mgb_dma_rx_ring_init(struct mgb_softc *sc, int channel)
1258 {
1259 	struct mgb_ring_data *rdata;
1260 	int ring_config, error = 0;
1261 
1262 	rdata = &sc->rx_ring_data;
1263 	mgb_dmac_control(sc, MGB_DMAC_RX_START, 0, DMAC_RESET);
1264 	KASSERT(MGB_DMAC_STATE_IS_INITIAL(sc, MGB_DMAC_RX_START, channel),
1265 	    ("Trying to init channels when not in init state\n"));
1266 
1267 	/* write ring address */
1268 	if (rdata->ring_bus_addr == 0) {
1269 		device_printf(sc->dev, "Invalid ring bus addr.\n");
1270 		goto fail;
1271 	}
1272 
1273 	CSR_WRITE_REG(sc, MGB_DMA_RX_BASE_H(channel),
1274 	    CSR_TRANSLATE_ADDR_HIGH32(rdata->ring_bus_addr));
1275 	CSR_WRITE_REG(sc, MGB_DMA_RX_BASE_L(channel),
1276 	    CSR_TRANSLATE_ADDR_LOW32(rdata->ring_bus_addr));
1277 
1278 	/* write head pointer writeback address */
1279 	if (rdata->head_wb_bus_addr == 0) {
1280 		device_printf(sc->dev, "Invalid head wb bus addr.\n");
1281 		goto fail;
1282 	}
1283 	CSR_WRITE_REG(sc, MGB_DMA_RX_HEAD_WB_H(channel),
1284 	    CSR_TRANSLATE_ADDR_HIGH32(rdata->head_wb_bus_addr));
1285 	CSR_WRITE_REG(sc, MGB_DMA_RX_HEAD_WB_L(channel),
1286 	    CSR_TRANSLATE_ADDR_LOW32(rdata->head_wb_bus_addr));
1287 
1288 	/* Enable head pointer writeback */
1289 	CSR_WRITE_REG(sc, MGB_DMA_RX_CONFIG0(channel), MGB_DMA_HEAD_WB_ENBL);
1290 
1291 	ring_config = CSR_READ_REG(sc, MGB_DMA_RX_CONFIG1(channel));
1292 	/*  ring size */
1293 	ring_config &= ~MGB_DMA_RING_LEN_MASK;
1294 	ring_config |= (MGB_DMA_RING_SIZE & MGB_DMA_RING_LEN_MASK);
1295 	/* packet padding  (PAD_2 is better for IP header alignment ...) */
1296 	ring_config &= ~MGB_DMA_RING_PAD_MASK;
1297 	ring_config |= (MGB_DMA_RING_PAD_0 & MGB_DMA_RING_PAD_MASK);
1298 
1299 	CSR_WRITE_REG(sc, MGB_DMA_RX_CONFIG1(channel), ring_config);
1300 
1301 	rdata->last_head = CSR_READ_REG(sc, MGB_DMA_RX_HEAD(channel));
1302 
1303 	mgb_fct_control(sc, MGB_FCT_RX_CTL, channel, FCT_RESET);
1304 	if (error != 0) {
1305 		device_printf(sc->dev, "Failed to reset RX FCT.\n");
1306 		goto fail;
1307 	}
1308 	mgb_fct_control(sc, MGB_FCT_RX_CTL, channel, FCT_ENABLE);
1309 	if (error != 0) {
1310 		device_printf(sc->dev, "Failed to enable RX FCT.\n");
1311 		goto fail;
1312 	}
1313 	mgb_dmac_control(sc, MGB_DMAC_RX_START, channel, DMAC_START);
1314 	if (error != 0)
1315 		device_printf(sc->dev, "Failed to start RX DMAC.\n");
1316 fail:
1317 	return (error);
1318 }
1319 
1320 static int
1321 mgb_dma_tx_ring_init(struct mgb_softc *sc, int channel)
1322 {
1323 	struct mgb_ring_data *rdata;
1324 	int ring_config, error = 0;
1325 
1326 	rdata = &sc->tx_ring_data;
1327 	if ((error = mgb_fct_control(sc, MGB_FCT_TX_CTL, channel, FCT_RESET))) {
1328 		device_printf(sc->dev, "Failed to reset TX FCT.\n");
1329 		goto fail;
1330 	}
1331 	if ((error = mgb_fct_control(sc, MGB_FCT_TX_CTL, channel,
1332 	    FCT_ENABLE))) {
1333 		device_printf(sc->dev, "Failed to enable TX FCT.\n");
1334 		goto fail;
1335 	}
1336 	if ((error = mgb_dmac_control(sc, MGB_DMAC_TX_START, channel,
1337 	    DMAC_RESET))) {
1338 		device_printf(sc->dev, "Failed to reset TX DMAC.\n");
1339 		goto fail;
1340 	}
1341 	KASSERT(MGB_DMAC_STATE_IS_INITIAL(sc, MGB_DMAC_TX_START, channel),
1342 	    ("Trying to init channels in not init state\n"));
1343 
1344 	/* write ring address */
1345 	if (rdata->ring_bus_addr == 0) {
1346 		device_printf(sc->dev, "Invalid ring bus addr.\n");
1347 		goto fail;
1348 	}
1349 	CSR_WRITE_REG(sc, MGB_DMA_TX_BASE_H(channel),
1350 	    CSR_TRANSLATE_ADDR_HIGH32(rdata->ring_bus_addr));
1351 	CSR_WRITE_REG(sc, MGB_DMA_TX_BASE_L(channel),
1352 	    CSR_TRANSLATE_ADDR_LOW32(rdata->ring_bus_addr));
1353 
1354 	/* write ring size */
1355 	ring_config = CSR_READ_REG(sc, MGB_DMA_TX_CONFIG1(channel));
1356 	ring_config &= ~MGB_DMA_RING_LEN_MASK;
1357 	ring_config |= (MGB_DMA_RING_SIZE & MGB_DMA_RING_LEN_MASK);
1358 	CSR_WRITE_REG(sc, MGB_DMA_TX_CONFIG1(channel), ring_config);
1359 
1360 	/* Enable interrupt on completion and head pointer writeback */
1361 	ring_config = (MGB_DMA_HEAD_WB_LS_ENBL | MGB_DMA_HEAD_WB_ENBL);
1362 	CSR_WRITE_REG(sc, MGB_DMA_TX_CONFIG0(channel), ring_config);
1363 
1364 	/* write head pointer writeback address */
1365 	if (rdata->head_wb_bus_addr == 0) {
1366 		device_printf(sc->dev, "Invalid head wb bus addr.\n");
1367 		goto fail;
1368 	}
1369 	CSR_WRITE_REG(sc, MGB_DMA_TX_HEAD_WB_H(channel),
1370 	    CSR_TRANSLATE_ADDR_HIGH32(rdata->head_wb_bus_addr));
1371 	CSR_WRITE_REG(sc, MGB_DMA_TX_HEAD_WB_L(channel),
1372 	    CSR_TRANSLATE_ADDR_LOW32(rdata->head_wb_bus_addr));
1373 
1374 	rdata->last_head = CSR_READ_REG(sc, MGB_DMA_TX_HEAD(channel));
1375 	KASSERT(rdata->last_head == 0, ("MGB_DMA_TX_HEAD was not reset.\n"));
1376 	rdata->last_tail = 0;
1377 	CSR_WRITE_REG(sc, MGB_DMA_TX_TAIL(channel), rdata->last_tail);
1378 
1379 	if ((error = mgb_dmac_control(sc, MGB_DMAC_TX_START, channel,
1380 	    DMAC_START)))
1381 		device_printf(sc->dev, "Failed to start TX DMAC.\n");
1382 fail:
1383 	return (error);
1384 }
1385 
1386 static int
1387 mgb_dmac_control(struct mgb_softc *sc, int start, int channel,
1388     enum mgb_dmac_cmd cmd)
1389 {
1390 	int error = 0;
1391 
1392 	switch (cmd) {
1393 	case DMAC_RESET:
1394 		CSR_WRITE_REG(sc, MGB_DMAC_CMD,
1395 		    MGB_DMAC_CMD_RESET(start, channel));
1396 		error = mgb_wait_for_bits(sc, MGB_DMAC_CMD, 0,
1397 		    MGB_DMAC_CMD_RESET(start, channel));
1398 		break;
1399 
1400 	case DMAC_START:
1401 		/*
1402 		 * NOTE: this simplifies the logic, since it will never
1403 		 * try to start in STOP_PENDING, but it also increases work.
1404 		 */
1405 		error = mgb_dmac_control(sc, start, channel, DMAC_STOP);
1406 		if (error != 0)
1407 			return (error);
1408 		CSR_WRITE_REG(sc, MGB_DMAC_CMD,
1409 		    MGB_DMAC_CMD_START(start, channel));
1410 		break;
1411 
1412 	case DMAC_STOP:
1413 		CSR_WRITE_REG(sc, MGB_DMAC_CMD,
1414 		    MGB_DMAC_CMD_STOP(start, channel));
1415 		error = mgb_wait_for_bits(sc, MGB_DMAC_CMD,
1416 		    MGB_DMAC_CMD_STOP(start, channel),
1417 		    MGB_DMAC_CMD_START(start, channel));
1418 		break;
1419 	}
1420 	return (error);
1421 }
1422 
1423 static int
1424 mgb_fct_control(struct mgb_softc *sc, int reg, int channel,
1425     enum mgb_fct_cmd cmd)
1426 {
1427 
1428 	switch (cmd) {
1429 	case FCT_RESET:
1430 		CSR_WRITE_REG(sc, reg, MGB_FCT_RESET(channel));
1431 		return (mgb_wait_for_bits(sc, reg, 0, MGB_FCT_RESET(channel)));
1432 	case FCT_ENABLE:
1433 		CSR_WRITE_REG(sc, reg, MGB_FCT_ENBL(channel));
1434 		return (0);
1435 	case FCT_DISABLE:
1436 		CSR_WRITE_REG(sc, reg, MGB_FCT_DSBL(channel));
1437 		return (mgb_wait_for_bits(sc, reg, 0, MGB_FCT_ENBL(channel)));
1438 	}
1439 }
1440 
1441 static int
1442 mgb_hw_teardown(struct mgb_softc *sc)
1443 {
1444 	int err = 0;
1445 
1446 	/* Stop MAC */
1447 	CSR_CLEAR_REG(sc, MGB_MAC_RX, MGB_MAC_ENBL);
1448 	CSR_WRITE_REG(sc, MGB_MAC_TX, MGB_MAC_ENBL);
1449 	if ((err = mgb_wait_for_bits(sc, MGB_MAC_RX, MGB_MAC_DSBL, 0)))
1450 		return (err);
1451 	if ((err = mgb_wait_for_bits(sc, MGB_MAC_TX, MGB_MAC_DSBL, 0)))
1452 		return (err);
1453 	return (err);
1454 }
1455 
1456 static int
1457 mgb_hw_init(struct mgb_softc *sc)
1458 {
1459 	int error = 0;
1460 
1461 	error = mgb_hw_reset(sc);
1462 	if (error != 0)
1463 		goto fail;
1464 
1465 	mgb_mac_init(sc);
1466 
1467 	error = mgb_phy_reset(sc);
1468 	if (error != 0)
1469 		goto fail;
1470 
1471 	error = mgb_dmac_reset(sc);
1472 	if (error != 0)
1473 		goto fail;
1474 
1475 fail:
1476 	return (error);
1477 }
1478 
1479 static int
1480 mgb_hw_reset(struct mgb_softc *sc)
1481 {
1482 
1483 	CSR_UPDATE_REG(sc, MGB_HW_CFG, MGB_LITE_RESET);
1484 	return (mgb_wait_for_bits(sc, MGB_HW_CFG, 0, MGB_LITE_RESET));
1485 }
1486 
1487 static int
1488 mgb_mac_init(struct mgb_softc *sc)
1489 {
1490 
1491 	/**
1492 	 * enable automatic duplex detection and
1493 	 * automatic speed detection
1494 	 */
1495 	CSR_UPDATE_REG(sc, MGB_MAC_CR, MGB_MAC_ADD_ENBL | MGB_MAC_ASD_ENBL);
1496 	CSR_UPDATE_REG(sc, MGB_MAC_TX, MGB_MAC_ENBL);
1497 	CSR_UPDATE_REG(sc, MGB_MAC_RX, MGB_MAC_ENBL);
1498 
1499 	return (MGB_STS_OK);
1500 }
1501 
1502 static int
1503 mgb_phy_reset(struct mgb_softc *sc)
1504 {
1505 
1506 	CSR_UPDATE_BYTE(sc, MGB_PMT_CTL, MGB_PHY_RESET);
1507 	if (mgb_wait_for_bits(sc, MGB_PMT_CTL, 0, MGB_PHY_RESET) ==
1508 	    MGB_STS_TIMEOUT)
1509 		return (MGB_STS_TIMEOUT);
1510 	return (mgb_wait_for_bits(sc, MGB_PMT_CTL, MGB_PHY_READY, 0));
1511 }
1512 
1513 static int
1514 mgb_dmac_reset(struct mgb_softc *sc)
1515 {
1516 
1517 	CSR_WRITE_REG(sc, MGB_DMAC_CMD, MGB_DMAC_RESET);
1518 	return (mgb_wait_for_bits(sc, MGB_DMAC_CMD, 0, MGB_DMAC_RESET));
1519 }
1520 
1521 static int
1522 mgb_wait_for_bits(struct mgb_softc *sc, int reg, int set_bits, int clear_bits)
1523 {
1524 	int i, val;
1525 
1526 	i = 0;
1527 	do {
1528 		/*
1529 		 * XXX: Datasheets states delay should be > 5 microseconds
1530 		 * for device reset.
1531 		 */
1532 		DELAY(100);
1533 		val = CSR_READ_REG(sc, reg);
1534 		if ((val & set_bits) == set_bits && (val & clear_bits) == 0)
1535 			return (MGB_STS_OK);
1536 	} while (i++ < MGB_TIMEOUT);
1537 
1538 	return (MGB_STS_TIMEOUT);
1539 }
1540 
1541 static void
1542 mgb_get_ethaddr(struct mgb_softc *sc, struct ether_addr *dest)
1543 {
1544 
1545 	CSR_READ_REG_BYTES(sc, MGB_MAC_ADDR_BASE_L, &dest->octet[0], 4);
1546 	CSR_READ_REG_BYTES(sc, MGB_MAC_ADDR_BASE_H, &dest->octet[4], 2);
1547 }
1548 
1549 static int
1550 mgb_miibus_readreg(device_t dev, int phy, int reg)
1551 {
1552 	struct mgb_softc *sc;
1553 	int mii_access;
1554 
1555 	sc = iflib_get_softc(device_get_softc(dev));
1556 
1557 	if (mgb_wait_for_bits(sc, MGB_MII_ACCESS, 0, MGB_MII_BUSY) ==
1558 	    MGB_STS_TIMEOUT)
1559 		return (EIO);
1560 	mii_access = (phy & MGB_MII_PHY_ADDR_MASK) << MGB_MII_PHY_ADDR_SHIFT;
1561 	mii_access |= (reg & MGB_MII_REG_ADDR_MASK) << MGB_MII_REG_ADDR_SHIFT;
1562 	mii_access |= MGB_MII_BUSY | MGB_MII_READ;
1563 	CSR_WRITE_REG(sc, MGB_MII_ACCESS, mii_access);
1564 	if (mgb_wait_for_bits(sc, MGB_MII_ACCESS, 0, MGB_MII_BUSY) ==
1565 	    MGB_STS_TIMEOUT)
1566 		return (EIO);
1567 	return (CSR_READ_2_BYTES(sc, MGB_MII_DATA));
1568 }
1569 
1570 static int
1571 mgb_miibus_writereg(device_t dev, int phy, int reg, int data)
1572 {
1573 	struct mgb_softc *sc;
1574 	int mii_access;
1575 
1576 	sc = iflib_get_softc(device_get_softc(dev));
1577 
1578 	if (mgb_wait_for_bits(sc, MGB_MII_ACCESS, 0, MGB_MII_BUSY) ==
1579 	    MGB_STS_TIMEOUT)
1580 		return (EIO);
1581 	mii_access = (phy & MGB_MII_PHY_ADDR_MASK) << MGB_MII_PHY_ADDR_SHIFT;
1582 	mii_access |= (reg & MGB_MII_REG_ADDR_MASK) << MGB_MII_REG_ADDR_SHIFT;
1583 	mii_access |= MGB_MII_BUSY | MGB_MII_WRITE;
1584 	CSR_WRITE_REG(sc, MGB_MII_DATA, data);
1585 	CSR_WRITE_REG(sc, MGB_MII_ACCESS, mii_access);
1586 	if (mgb_wait_for_bits(sc, MGB_MII_ACCESS, 0, MGB_MII_BUSY) ==
1587 	    MGB_STS_TIMEOUT)
1588 		return (EIO);
1589 	return (0);
1590 }
1591 
1592 /* XXX: May need to lock these up */
1593 static void
1594 mgb_miibus_statchg(device_t dev)
1595 {
1596 	struct mgb_softc *sc;
1597 	struct mii_data *miid;
1598 
1599 	sc = iflib_get_softc(device_get_softc(dev));
1600 	miid = device_get_softc(sc->miibus);
1601 	/* Update baudrate in iflib */
1602 	sc->baudrate = ifmedia_baudrate(miid->mii_media_active);
1603 	iflib_link_state_change(sc->ctx, sc->link_state, sc->baudrate);
1604 }
1605 
1606 static void
1607 mgb_miibus_linkchg(device_t dev)
1608 {
1609 	struct mgb_softc *sc;
1610 	struct mii_data *miid;
1611 	int link_state;
1612 
1613 	sc = iflib_get_softc(device_get_softc(dev));
1614 	miid = device_get_softc(sc->miibus);
1615 	/* XXX: copied from miibus_linkchg **/
1616 	if (miid->mii_media_status & IFM_AVALID) {
1617 		if (miid->mii_media_status & IFM_ACTIVE)
1618 			link_state = LINK_STATE_UP;
1619 		else
1620 			link_state = LINK_STATE_DOWN;
1621 	} else
1622 		link_state = LINK_STATE_UNKNOWN;
1623 	sc->link_state = link_state;
1624 	iflib_link_state_change(sc->ctx, sc->link_state, sc->baudrate);
1625 }
1626