xref: /freebsd/sys/dev/mgb/if_mgb.c (revision 8890ab7758b8a03a68f3fe596f6a5446921631a5)
1*8890ab77SEd Maste /*-
2*8890ab77SEd Maste  * SPDX-License-Identifier: BSD-2-Clause
3*8890ab77SEd Maste  *
4*8890ab77SEd Maste  * Copyright (c) 2019 The FreeBSD Foundation, Inc.
5*8890ab77SEd Maste  *
6*8890ab77SEd Maste  * This driver was written by Gerald ND Aryeetey <gndaryee@uwaterloo.ca>
7*8890ab77SEd Maste  * under sponsorship from the FreeBSD Foundation.
8*8890ab77SEd Maste  *
9*8890ab77SEd Maste  * Redistribution and use in source and binary forms, with or without
10*8890ab77SEd Maste  * modification, are permitted provided that the following conditions
11*8890ab77SEd Maste  * are met:
12*8890ab77SEd Maste  * 1. Redistributions of source code must retain the above copyright
13*8890ab77SEd Maste  *    notice, this list of conditions and the following disclaimer.
14*8890ab77SEd Maste  * 2. Redistributions in binary form must reproduce the above copyright
15*8890ab77SEd Maste  *    notice, this list of conditions and the following disclaimer in the
16*8890ab77SEd Maste  *    documentation and/or other materials provided with the distribution.
17*8890ab77SEd Maste  *
18*8890ab77SEd Maste  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19*8890ab77SEd Maste  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20*8890ab77SEd Maste  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21*8890ab77SEd Maste  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22*8890ab77SEd Maste  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23*8890ab77SEd Maste  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24*8890ab77SEd Maste  * OR SERVICES; LOSS OF USE DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25*8890ab77SEd Maste  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26*8890ab77SEd Maste  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27*8890ab77SEd Maste  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28*8890ab77SEd Maste  * SUCH DAMAGE.
29*8890ab77SEd Maste  */
30*8890ab77SEd Maste #include <sys/cdefs.h>
31*8890ab77SEd Maste __FBSDID("$FreeBSD$");
32*8890ab77SEd Maste 
33*8890ab77SEd Maste /*
34*8890ab77SEd Maste  * Microchip LAN7430/LAN7431 PCIe to Gigabit Ethernet Controller driver.
35*8890ab77SEd Maste  *
36*8890ab77SEd Maste  * Product information:
37*8890ab77SEd Maste  * LAN7430 https://www.microchip.com/wwwproducts/en/LAN7430
38*8890ab77SEd Maste  *   - Integrated IEEE 802.3 compliant PHY
39*8890ab77SEd Maste  * LAN7431 https://www.microchip.com/wwwproducts/en/LAN7431
40*8890ab77SEd Maste  *   - RGMII Interface
41*8890ab77SEd Maste  *
42*8890ab77SEd Maste  * This driver uses the iflib interface and the default 'ukphy' PHY driver.
43*8890ab77SEd Maste  *
44*8890ab77SEd Maste  * UNIMPLEMENTED FEATURES
45*8890ab77SEd Maste  * ----------------------
46*8890ab77SEd Maste  * A number of features supported by LAN743X device are not yet implemented in
47*8890ab77SEd Maste  * this driver:
48*8890ab77SEd Maste  *
49*8890ab77SEd Maste  * - Multiple (up to 4) RX queues support
50*8890ab77SEd Maste  *   - Just needs to remove asserts and malloc multiple `rx_ring_data`
51*8890ab77SEd Maste  *     structs based on ncpus.
52*8890ab77SEd Maste  * - RX/TX Checksum Offloading support
53*8890ab77SEd Maste  * - VLAN support
54*8890ab77SEd Maste  * - Recieve Packet Filtering (Multicast Perfect/Hash Address) support
55*8890ab77SEd Maste  * - Wake on LAN (WoL) support
56*8890ab77SEd Maste  * - TX LSO support
57*8890ab77SEd Maste  * - Recieve Side Scaling (RSS) support
58*8890ab77SEd Maste  * - Debugging Capabilities:
59*8890ab77SEd Maste  *   - Could include MAC statistics and
60*8890ab77SEd Maste  *     error status registers in sysctl.
61*8890ab77SEd Maste  */
62*8890ab77SEd Maste 
63*8890ab77SEd Maste #include <sys/param.h>
64*8890ab77SEd Maste #include <sys/bus.h>
65*8890ab77SEd Maste #include <sys/endian.h>
66*8890ab77SEd Maste #include <sys/kdb.h>
67*8890ab77SEd Maste #include <sys/kernel.h>
68*8890ab77SEd Maste #include <sys/module.h>
69*8890ab77SEd Maste #include <sys/rman.h>
70*8890ab77SEd Maste #include <sys/socket.h>
71*8890ab77SEd Maste #include <sys/sockio.h>
72*8890ab77SEd Maste #include <machine/bus.h>
73*8890ab77SEd Maste #include <machine/resource.h>
74*8890ab77SEd Maste 
75*8890ab77SEd Maste #include <net/ethernet.h>
76*8890ab77SEd Maste #include <net/if.h>
77*8890ab77SEd Maste #include <net/if_var.h>
78*8890ab77SEd Maste #include <net/if_types.h>
79*8890ab77SEd Maste #include <net/if_media.h>
80*8890ab77SEd Maste #include <net/iflib.h>
81*8890ab77SEd Maste 
82*8890ab77SEd Maste #include <dev/mgb/if_mgb.h>
83*8890ab77SEd Maste #include <dev/mii/mii.h>
84*8890ab77SEd Maste #include <dev/mii/miivar.h>
85*8890ab77SEd Maste #include <dev/pci/pcireg.h>
86*8890ab77SEd Maste #include <dev/pci/pcivar.h>
87*8890ab77SEd Maste 
88*8890ab77SEd Maste #include "ifdi_if.h"
89*8890ab77SEd Maste #include "miibus_if.h"
90*8890ab77SEd Maste 
91*8890ab77SEd Maste static pci_vendor_info_t mgb_vendor_info_array[] = {
92*8890ab77SEd Maste 	PVID(MGB_MICROCHIP_VENDOR_ID, MGB_LAN7430_DEVICE_ID,
93*8890ab77SEd Maste 	    "Microchip LAN7430 PCIe Gigabit Ethernet Controller"),
94*8890ab77SEd Maste 	PVID(MGB_MICROCHIP_VENDOR_ID, MGB_LAN7431_DEVICE_ID,
95*8890ab77SEd Maste 	    "Microchip LAN7431 PCIe Gigabit Ethernet Controller"),
96*8890ab77SEd Maste 	PVID_END
97*8890ab77SEd Maste };
98*8890ab77SEd Maste 
99*8890ab77SEd Maste /* Device methods */
100*8890ab77SEd Maste static device_register_t		mgb_register;
101*8890ab77SEd Maste 
102*8890ab77SEd Maste /* IFLIB methods */
103*8890ab77SEd Maste static ifdi_attach_pre_t		mgb_attach_pre;
104*8890ab77SEd Maste static ifdi_attach_post_t		mgb_attach_post;
105*8890ab77SEd Maste static ifdi_detach_t			mgb_detach;
106*8890ab77SEd Maste 
107*8890ab77SEd Maste static ifdi_tx_queues_alloc_t		mgb_tx_queues_alloc;
108*8890ab77SEd Maste static ifdi_rx_queues_alloc_t		mgb_rx_queues_alloc;
109*8890ab77SEd Maste static ifdi_queues_free_t		mgb_queues_free;
110*8890ab77SEd Maste 
111*8890ab77SEd Maste static ifdi_init_t			mgb_init;
112*8890ab77SEd Maste static ifdi_stop_t			mgb_stop;
113*8890ab77SEd Maste 
114*8890ab77SEd Maste static ifdi_msix_intr_assign_t		mgb_msix_intr_assign;
115*8890ab77SEd Maste static ifdi_tx_queue_intr_enable_t	mgb_tx_queue_intr_enable;
116*8890ab77SEd Maste static ifdi_rx_queue_intr_enable_t	mgb_rx_queue_intr_enable;
117*8890ab77SEd Maste static ifdi_intr_enable_t		mgb_intr_enable_all;
118*8890ab77SEd Maste static ifdi_intr_disable_t		mgb_intr_disable_all;
119*8890ab77SEd Maste 
120*8890ab77SEd Maste /* IFLIB_TXRX methods */
121*8890ab77SEd Maste static int				mgb_isc_txd_encap(void *,
122*8890ab77SEd Maste 					    if_pkt_info_t);
123*8890ab77SEd Maste static void				mgb_isc_txd_flush(void *,
124*8890ab77SEd Maste 					    uint16_t, qidx_t);
125*8890ab77SEd Maste static int				mgb_isc_txd_credits_update(void *,
126*8890ab77SEd Maste 					    uint16_t, bool);
127*8890ab77SEd Maste static int				mgb_isc_rxd_available(void *,
128*8890ab77SEd Maste 					    uint16_t, qidx_t, qidx_t);
129*8890ab77SEd Maste static int				mgb_isc_rxd_pkt_get(void *,
130*8890ab77SEd Maste 					    if_rxd_info_t);
131*8890ab77SEd Maste static void				mgb_isc_rxd_refill(void *,
132*8890ab77SEd Maste 					    if_rxd_update_t);
133*8890ab77SEd Maste static void				mgb_isc_rxd_flush(void *,
134*8890ab77SEd Maste 					    uint16_t, uint8_t, qidx_t);
135*8890ab77SEd Maste 
136*8890ab77SEd Maste /* Interrupts */
137*8890ab77SEd Maste static driver_filter_t			mgb_legacy_intr;
138*8890ab77SEd Maste static driver_filter_t			mgb_admin_intr;
139*8890ab77SEd Maste static driver_filter_t			mgb_rxq_intr;
140*8890ab77SEd Maste static bool				mgb_intr_test(struct mgb_softc *);
141*8890ab77SEd Maste 
142*8890ab77SEd Maste /* MII methods */
143*8890ab77SEd Maste static miibus_readreg_t			mgb_miibus_readreg;
144*8890ab77SEd Maste static miibus_writereg_t		mgb_miibus_writereg;
145*8890ab77SEd Maste static miibus_linkchg_t			mgb_miibus_linkchg;
146*8890ab77SEd Maste static miibus_statchg_t			mgb_miibus_statchg;
147*8890ab77SEd Maste 
148*8890ab77SEd Maste static int				mgb_media_change(if_t);
149*8890ab77SEd Maste static void				mgb_media_status(if_t,
150*8890ab77SEd Maste 					    struct ifmediareq *);
151*8890ab77SEd Maste 
152*8890ab77SEd Maste /* Helper/Test functions */
153*8890ab77SEd Maste static int				mgb_test_bar(struct mgb_softc *);
154*8890ab77SEd Maste static int				mgb_alloc_regs(struct mgb_softc *);
155*8890ab77SEd Maste static int				mgb_release_regs(struct mgb_softc *);
156*8890ab77SEd Maste 
157*8890ab77SEd Maste static void				mgb_get_ethaddr(struct mgb_softc *,
158*8890ab77SEd Maste 					    struct ether_addr *);
159*8890ab77SEd Maste 
160*8890ab77SEd Maste static int				mgb_wait_for_bits(struct mgb_softc *,
161*8890ab77SEd Maste 					    int, int, int);
162*8890ab77SEd Maste 
163*8890ab77SEd Maste /* H/W init, reset and teardown helpers */
164*8890ab77SEd Maste static int				mgb_hw_init(struct mgb_softc *);
165*8890ab77SEd Maste static int				mgb_hw_teardown(struct mgb_softc *);
166*8890ab77SEd Maste static int				mgb_hw_reset(struct mgb_softc *);
167*8890ab77SEd Maste static int				mgb_mac_init(struct mgb_softc *);
168*8890ab77SEd Maste static int				mgb_dmac_reset(struct mgb_softc *);
169*8890ab77SEd Maste static int				mgb_phy_reset(struct mgb_softc *);
170*8890ab77SEd Maste 
171*8890ab77SEd Maste static int				mgb_dma_init(struct mgb_softc *);
172*8890ab77SEd Maste static int				mgb_dma_tx_ring_init(struct mgb_softc *,
173*8890ab77SEd Maste 					    int);
174*8890ab77SEd Maste static int				mgb_dma_rx_ring_init(struct mgb_softc *,
175*8890ab77SEd Maste 					    int);
176*8890ab77SEd Maste 
177*8890ab77SEd Maste static int				mgb_dmac_control(struct mgb_softc *,
178*8890ab77SEd Maste 					    int, int, enum mgb_dmac_cmd);
179*8890ab77SEd Maste static int				mgb_fct_control(struct mgb_softc *,
180*8890ab77SEd Maste 					    int, int, enum mgb_fct_cmd);
181*8890ab77SEd Maste 
182*8890ab77SEd Maste /*********************************************************************
183*8890ab77SEd Maste  *  FreeBSD Device Interface Entry Points
184*8890ab77SEd Maste  *********************************************************************/
185*8890ab77SEd Maste 
186*8890ab77SEd Maste static device_method_t mgb_methods[] = {
187*8890ab77SEd Maste 	/* Device interface */
188*8890ab77SEd Maste 	DEVMETHOD(device_register,	mgb_register),
189*8890ab77SEd Maste 	DEVMETHOD(device_probe,		iflib_device_probe),
190*8890ab77SEd Maste 	DEVMETHOD(device_attach,	iflib_device_attach),
191*8890ab77SEd Maste 	DEVMETHOD(device_detach,	iflib_device_detach),
192*8890ab77SEd Maste 	DEVMETHOD(device_shutdown,	iflib_device_shutdown),
193*8890ab77SEd Maste 	DEVMETHOD(device_suspend,	iflib_device_suspend),
194*8890ab77SEd Maste 	DEVMETHOD(device_resume,	iflib_device_resume),
195*8890ab77SEd Maste 
196*8890ab77SEd Maste 	/* MII Interface */
197*8890ab77SEd Maste 	DEVMETHOD(miibus_readreg,	mgb_miibus_readreg),
198*8890ab77SEd Maste 	DEVMETHOD(miibus_writereg,	mgb_miibus_writereg),
199*8890ab77SEd Maste 	DEVMETHOD(miibus_linkchg,	mgb_miibus_linkchg),
200*8890ab77SEd Maste 	DEVMETHOD(miibus_statchg,	mgb_miibus_statchg),
201*8890ab77SEd Maste 
202*8890ab77SEd Maste 	DEVMETHOD_END
203*8890ab77SEd Maste };
204*8890ab77SEd Maste 
205*8890ab77SEd Maste static driver_t mgb_driver = {
206*8890ab77SEd Maste 	"mgb", mgb_methods, sizeof(struct mgb_softc)
207*8890ab77SEd Maste };
208*8890ab77SEd Maste 
209*8890ab77SEd Maste devclass_t mgb_devclass;
210*8890ab77SEd Maste DRIVER_MODULE(mgb, pci, mgb_driver, mgb_devclass, NULL, NULL);
211*8890ab77SEd Maste IFLIB_PNP_INFO(pci, mgb, mgb_vendor_info_array);
212*8890ab77SEd Maste MODULE_VERSION(mgb, 1);
213*8890ab77SEd Maste 
214*8890ab77SEd Maste #if 0 /* MIIBUS_DEBUG */
215*8890ab77SEd Maste /* If MIIBUS debug stuff is in attach then order matters. Use below instead. */
216*8890ab77SEd Maste DRIVER_MODULE_ORDERED(miibus, mgb, miibus_driver, miibus_devclass, NULL, NULL,
217*8890ab77SEd Maste     SI_ORDER_ANY);
218*8890ab77SEd Maste #endif /* MIIBUS_DEBUG */
219*8890ab77SEd Maste DRIVER_MODULE(miibus, mgb, miibus_driver, miibus_devclass, NULL, NULL);
220*8890ab77SEd Maste 
221*8890ab77SEd Maste MODULE_DEPEND(mgb, pci, 1, 1, 1);
222*8890ab77SEd Maste MODULE_DEPEND(mgb, ether, 1, 1, 1);
223*8890ab77SEd Maste MODULE_DEPEND(mgb, miibus, 1, 1, 1);
224*8890ab77SEd Maste MODULE_DEPEND(mgb, iflib, 1, 1, 1);
225*8890ab77SEd Maste 
226*8890ab77SEd Maste static device_method_t mgb_iflib_methods[] = {
227*8890ab77SEd Maste 	DEVMETHOD(ifdi_attach_pre, mgb_attach_pre),
228*8890ab77SEd Maste 	DEVMETHOD(ifdi_attach_post, mgb_attach_post),
229*8890ab77SEd Maste 	DEVMETHOD(ifdi_detach, mgb_detach),
230*8890ab77SEd Maste 
231*8890ab77SEd Maste 	DEVMETHOD(ifdi_init, mgb_init),
232*8890ab77SEd Maste 	DEVMETHOD(ifdi_stop, mgb_stop),
233*8890ab77SEd Maste 
234*8890ab77SEd Maste 	DEVMETHOD(ifdi_tx_queues_alloc, mgb_tx_queues_alloc),
235*8890ab77SEd Maste 	DEVMETHOD(ifdi_rx_queues_alloc, mgb_rx_queues_alloc),
236*8890ab77SEd Maste 	DEVMETHOD(ifdi_queues_free, mgb_queues_free),
237*8890ab77SEd Maste 
238*8890ab77SEd Maste 	DEVMETHOD(ifdi_msix_intr_assign, mgb_msix_intr_assign),
239*8890ab77SEd Maste 	DEVMETHOD(ifdi_tx_queue_intr_enable, mgb_tx_queue_intr_enable),
240*8890ab77SEd Maste 	DEVMETHOD(ifdi_rx_queue_intr_enable, mgb_rx_queue_intr_enable),
241*8890ab77SEd Maste 	DEVMETHOD(ifdi_intr_enable, mgb_intr_enable_all),
242*8890ab77SEd Maste 	DEVMETHOD(ifdi_intr_disable, mgb_intr_disable_all),
243*8890ab77SEd Maste 
244*8890ab77SEd Maste 
245*8890ab77SEd Maste #if 0 /* Not yet implemented IFLIB methods */
246*8890ab77SEd Maste 	/*
247*8890ab77SEd Maste 	 * Set multicast addresses, mtu and promiscuous mode
248*8890ab77SEd Maste 	 */
249*8890ab77SEd Maste 	DEVMETHOD(ifdi_multi_set, mgb_multi_set),
250*8890ab77SEd Maste 	DEVMETHOD(ifdi_mtu_set, mgb_mtu_set),
251*8890ab77SEd Maste 	DEVMETHOD(ifdi_promisc_set, mgb_promisc_set),
252*8890ab77SEd Maste 
253*8890ab77SEd Maste 	/*
254*8890ab77SEd Maste 	 * Needed for VLAN support
255*8890ab77SEd Maste 	 */
256*8890ab77SEd Maste 	DEVMETHOD(ifdi_vlan_register, mgb_vlan_register),
257*8890ab77SEd Maste 	DEVMETHOD(ifdi_vlan_unregister, mgb_vlan_unregister),
258*8890ab77SEd Maste 
259*8890ab77SEd Maste 	/*
260*8890ab77SEd Maste 	 * Needed for WOL support
261*8890ab77SEd Maste 	 * at the very least.
262*8890ab77SEd Maste 	 */
263*8890ab77SEd Maste 	DEVMETHOD(ifdi_shutdown, mgb_shutdown),
264*8890ab77SEd Maste 	DEVMETHOD(ifdi_suspend, mgb_suspend),
265*8890ab77SEd Maste 	DEVMETHOD(ifdi_resume, mgb_resume),
266*8890ab77SEd Maste #endif /* UNUSED_IFLIB_METHODS */
267*8890ab77SEd Maste 	DEVMETHOD_END
268*8890ab77SEd Maste };
269*8890ab77SEd Maste 
270*8890ab77SEd Maste static driver_t mgb_iflib_driver = {
271*8890ab77SEd Maste 	"mgb", mgb_iflib_methods, sizeof(struct mgb_softc)
272*8890ab77SEd Maste };
273*8890ab77SEd Maste 
274*8890ab77SEd Maste struct if_txrx mgb_txrx  = {
275*8890ab77SEd Maste 	.ift_txd_encap = mgb_isc_txd_encap,
276*8890ab77SEd Maste 	.ift_txd_flush = mgb_isc_txd_flush,
277*8890ab77SEd Maste 	.ift_txd_credits_update = mgb_isc_txd_credits_update,
278*8890ab77SEd Maste 	.ift_rxd_available = mgb_isc_rxd_available,
279*8890ab77SEd Maste 	.ift_rxd_pkt_get = mgb_isc_rxd_pkt_get,
280*8890ab77SEd Maste 	.ift_rxd_refill = mgb_isc_rxd_refill,
281*8890ab77SEd Maste 	.ift_rxd_flush = mgb_isc_rxd_flush,
282*8890ab77SEd Maste 
283*8890ab77SEd Maste 	.ift_legacy_intr = mgb_legacy_intr
284*8890ab77SEd Maste };
285*8890ab77SEd Maste 
286*8890ab77SEd Maste struct if_shared_ctx mgb_sctx_init = {
287*8890ab77SEd Maste 	.isc_magic = IFLIB_MAGIC,
288*8890ab77SEd Maste 
289*8890ab77SEd Maste 	.isc_q_align = PAGE_SIZE,
290*8890ab77SEd Maste 	.isc_admin_intrcnt = 1,
291*8890ab77SEd Maste 	.isc_flags = IFLIB_DRIVER_MEDIA /* | IFLIB_HAS_RXCQ | IFLIB_HAS_TXCQ*/,
292*8890ab77SEd Maste 
293*8890ab77SEd Maste 	.isc_vendor_info = mgb_vendor_info_array,
294*8890ab77SEd Maste 	.isc_driver_version = "1",
295*8890ab77SEd Maste 	.isc_driver = &mgb_iflib_driver,
296*8890ab77SEd Maste 	/* 2 queues per set for TX and RX (ring queue, head writeback queue) */
297*8890ab77SEd Maste 	.isc_ntxqs = 2,
298*8890ab77SEd Maste 
299*8890ab77SEd Maste 	.isc_tx_maxsize = MGB_DMA_MAXSEGS  * MCLBYTES,
300*8890ab77SEd Maste 	/* .isc_tx_nsegments = MGB_DMA_MAXSEGS, */
301*8890ab77SEd Maste 	.isc_tx_maxsegsize = MCLBYTES,
302*8890ab77SEd Maste 
303*8890ab77SEd Maste 	.isc_ntxd_min = {1, 1}, /* Will want to make this bigger */
304*8890ab77SEd Maste 	.isc_ntxd_max = {MGB_DMA_RING_SIZE, 1},
305*8890ab77SEd Maste 	.isc_ntxd_default = {MGB_DMA_RING_SIZE, 1},
306*8890ab77SEd Maste 
307*8890ab77SEd Maste 	.isc_nrxqs = 2,
308*8890ab77SEd Maste 
309*8890ab77SEd Maste 	.isc_rx_maxsize = MCLBYTES,
310*8890ab77SEd Maste 	.isc_rx_nsegments = 1,
311*8890ab77SEd Maste 	.isc_rx_maxsegsize = MCLBYTES,
312*8890ab77SEd Maste 
313*8890ab77SEd Maste 	.isc_nrxd_min = {1, 1}, /* Will want to make this bigger */
314*8890ab77SEd Maste 	.isc_nrxd_max = {MGB_DMA_RING_SIZE, 1},
315*8890ab77SEd Maste 	.isc_nrxd_default = {MGB_DMA_RING_SIZE, 1},
316*8890ab77SEd Maste 
317*8890ab77SEd Maste 	.isc_nfl = 1, /*one free list since there is only one queue */
318*8890ab77SEd Maste #if 0 /* UNUSED_CTX */
319*8890ab77SEd Maste 
320*8890ab77SEd Maste 	.isc_tso_maxsize = MGB_TSO_MAXSIZE + sizeof(struct ether_vlan_header),
321*8890ab77SEd Maste 	.isc_tso_maxsegsize = MGB_TX_MAXSEGSIZE,
322*8890ab77SEd Maste #endif /* UNUSED_CTX */
323*8890ab77SEd Maste };
324*8890ab77SEd Maste 
325*8890ab77SEd Maste /*********************************************************************/
326*8890ab77SEd Maste 
327*8890ab77SEd Maste 
328*8890ab77SEd Maste static void *
329*8890ab77SEd Maste mgb_register(device_t dev)
330*8890ab77SEd Maste {
331*8890ab77SEd Maste 
332*8890ab77SEd Maste 	return (&mgb_sctx_init);
333*8890ab77SEd Maste }
334*8890ab77SEd Maste 
335*8890ab77SEd Maste static int
336*8890ab77SEd Maste mgb_attach_pre(if_ctx_t ctx)
337*8890ab77SEd Maste {
338*8890ab77SEd Maste 	struct mgb_softc *sc;
339*8890ab77SEd Maste 	if_softc_ctx_t scctx;
340*8890ab77SEd Maste 	int error, phyaddr, rid;
341*8890ab77SEd Maste 	struct ether_addr hwaddr;
342*8890ab77SEd Maste 	struct mii_data *miid;
343*8890ab77SEd Maste 
344*8890ab77SEd Maste 	sc = iflib_get_softc(ctx);
345*8890ab77SEd Maste 	sc->ctx = ctx;
346*8890ab77SEd Maste 	sc->dev = iflib_get_dev(ctx);
347*8890ab77SEd Maste 	scctx = iflib_get_softc_ctx(ctx);
348*8890ab77SEd Maste 
349*8890ab77SEd Maste 	/* IFLIB required setup */
350*8890ab77SEd Maste 	scctx->isc_txrx = &mgb_txrx;
351*8890ab77SEd Maste 	scctx->isc_tx_nsegments = MGB_DMA_MAXSEGS;
352*8890ab77SEd Maste 	/* Ring desc queues */
353*8890ab77SEd Maste 	scctx->isc_txqsizes[0] = sizeof(struct mgb_ring_desc) *
354*8890ab77SEd Maste 	    scctx->isc_ntxd[0];
355*8890ab77SEd Maste 	scctx->isc_rxqsizes[0] = sizeof(struct mgb_ring_desc) *
356*8890ab77SEd Maste 	    scctx->isc_nrxd[0];
357*8890ab77SEd Maste 
358*8890ab77SEd Maste 	/* Head WB queues */
359*8890ab77SEd Maste 	scctx->isc_txqsizes[1] = sizeof(uint32_t) * scctx->isc_ntxd[1];
360*8890ab77SEd Maste 	scctx->isc_rxqsizes[1] = sizeof(uint32_t) * scctx->isc_nrxd[1];
361*8890ab77SEd Maste 
362*8890ab77SEd Maste 	/* XXX: Must have 1 txqset, but can have up to 4 rxqsets */
363*8890ab77SEd Maste 	scctx->isc_nrxqsets = 1;
364*8890ab77SEd Maste 	scctx->isc_ntxqsets = 1;
365*8890ab77SEd Maste 
366*8890ab77SEd Maste 	/* scctx->isc_tx_csum_flags = (CSUM_TCP | CSUM_UDP) |
367*8890ab77SEd Maste 	    (CSUM_TCP_IPV6 | CSUM_UDP_IPV6) | CSUM_TSO */
368*8890ab77SEd Maste 	scctx->isc_tx_csum_flags = 0;
369*8890ab77SEd Maste 	scctx->isc_capabilities = scctx->isc_capenable = 0;
370*8890ab77SEd Maste #if 0
371*8890ab77SEd Maste 	/*
372*8890ab77SEd Maste 	 * CSUM, TSO and VLAN support are TBD
373*8890ab77SEd Maste 	 */
374*8890ab77SEd Maste 	    IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 |
375*8890ab77SEd Maste 	    IFCAP_TSO4 | IFCAP_TSO6 |
376*8890ab77SEd Maste 	    IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 |
377*8890ab77SEd Maste 	    IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
378*8890ab77SEd Maste 	    IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO |
379*8890ab77SEd Maste 	    IFCAP_JUMBO_MTU;
380*8890ab77SEd Maste 	scctx->isc_capabilities |= IFCAP_LRO | IFCAP_VLAN_HWFILTER;
381*8890ab77SEd Maste #endif
382*8890ab77SEd Maste 
383*8890ab77SEd Maste 	/* get the BAR */
384*8890ab77SEd Maste 	error = mgb_alloc_regs(sc);
385*8890ab77SEd Maste 	if (error != 0) {
386*8890ab77SEd Maste 		device_printf(sc->dev,
387*8890ab77SEd Maste 		    "Unable to allocate bus resource: registers.\n");
388*8890ab77SEd Maste 		goto fail;
389*8890ab77SEd Maste 	}
390*8890ab77SEd Maste 
391*8890ab77SEd Maste 	error = mgb_test_bar(sc);
392*8890ab77SEd Maste 	if (error != 0)
393*8890ab77SEd Maste 		goto fail;
394*8890ab77SEd Maste 
395*8890ab77SEd Maste 	error = mgb_hw_init(sc);
396*8890ab77SEd Maste 	if (error != 0) {
397*8890ab77SEd Maste 		device_printf(sc->dev,
398*8890ab77SEd Maste 		    "MGB device init failed. (err: %d)\n", error);
399*8890ab77SEd Maste 		goto fail;
400*8890ab77SEd Maste 	}
401*8890ab77SEd Maste 
402*8890ab77SEd Maste 	switch (pci_get_device(sc->dev))
403*8890ab77SEd Maste 	{
404*8890ab77SEd Maste 	case MGB_LAN7430_DEVICE_ID:
405*8890ab77SEd Maste 		phyaddr = 1;
406*8890ab77SEd Maste 		break;
407*8890ab77SEd Maste 	case MGB_LAN7431_DEVICE_ID:
408*8890ab77SEd Maste 	default:
409*8890ab77SEd Maste 		phyaddr = MII_PHY_ANY;
410*8890ab77SEd Maste 		break;
411*8890ab77SEd Maste 	}
412*8890ab77SEd Maste 
413*8890ab77SEd Maste 	/* XXX: Would be nice(r) if locked methods were here */
414*8890ab77SEd Maste 	error = mii_attach(sc->dev, &sc->miibus, iflib_get_ifp(ctx),
415*8890ab77SEd Maste 	    mgb_media_change, mgb_media_status,
416*8890ab77SEd Maste 	    BMSR_DEFCAPMASK, phyaddr, MII_OFFSET_ANY, MIIF_DOPAUSE);
417*8890ab77SEd Maste 	if (error != 0) {
418*8890ab77SEd Maste 		device_printf(sc->dev, "Failed to attach MII interface\n");
419*8890ab77SEd Maste 		goto fail;
420*8890ab77SEd Maste 	}
421*8890ab77SEd Maste 
422*8890ab77SEd Maste 	miid = device_get_softc(sc->miibus);
423*8890ab77SEd Maste 	scctx->isc_media = &miid->mii_media;
424*8890ab77SEd Maste 
425*8890ab77SEd Maste 	scctx->isc_msix_bar = pci_msix_table_bar(sc->dev);
426*8890ab77SEd Maste 	/** Setup PBA BAR **/
427*8890ab77SEd Maste 	rid = pci_msix_pba_bar(sc->dev);
428*8890ab77SEd Maste 	if (rid != scctx->isc_msix_bar) {
429*8890ab77SEd Maste 		sc->pba = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
430*8890ab77SEd Maste 		    &rid, RF_ACTIVE);
431*8890ab77SEd Maste 		if (sc->pba == NULL) {
432*8890ab77SEd Maste 			error = ENXIO;
433*8890ab77SEd Maste 			device_printf(sc->dev, "Failed to setup PBA BAR\n");
434*8890ab77SEd Maste 			goto fail;
435*8890ab77SEd Maste 		}
436*8890ab77SEd Maste 	}
437*8890ab77SEd Maste 
438*8890ab77SEd Maste 	mgb_get_ethaddr(sc, &hwaddr);
439*8890ab77SEd Maste 	if (ETHER_IS_BROADCAST(hwaddr.octet) ||
440*8890ab77SEd Maste 	    ETHER_IS_MULTICAST(hwaddr.octet) ||
441*8890ab77SEd Maste 	    ETHER_IS_ZERO(hwaddr.octet))
442*8890ab77SEd Maste 		ether_gen_addr(iflib_get_ifp(ctx), &hwaddr);
443*8890ab77SEd Maste 
444*8890ab77SEd Maste 	/*
445*8890ab77SEd Maste 	 * XXX: if the MAC address was generated the linux driver
446*8890ab77SEd Maste 	 * writes it back to the device.
447*8890ab77SEd Maste 	 */
448*8890ab77SEd Maste 	iflib_set_mac(ctx, hwaddr.octet);
449*8890ab77SEd Maste 
450*8890ab77SEd Maste 	/* Map all vectors to vector 0 (admin interrupts) by default. */
451*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_INTR_VEC_RX_MAP, 0);
452*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_INTR_VEC_TX_MAP, 0);
453*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_INTR_VEC_OTHER_MAP, 0);
454*8890ab77SEd Maste 
455*8890ab77SEd Maste 	return (0);
456*8890ab77SEd Maste 
457*8890ab77SEd Maste fail:
458*8890ab77SEd Maste 	mgb_detach(ctx);
459*8890ab77SEd Maste 	return (error);
460*8890ab77SEd Maste }
461*8890ab77SEd Maste 
462*8890ab77SEd Maste static int
463*8890ab77SEd Maste mgb_attach_post(if_ctx_t ctx)
464*8890ab77SEd Maste {
465*8890ab77SEd Maste 	struct mgb_softc *sc;
466*8890ab77SEd Maste 
467*8890ab77SEd Maste 	sc = iflib_get_softc(ctx);
468*8890ab77SEd Maste 
469*8890ab77SEd Maste 	device_printf(sc->dev, "Interrupt test: %s\n",
470*8890ab77SEd Maste 	    (mgb_intr_test(sc) ? "PASS" : "FAIL"));
471*8890ab77SEd Maste 
472*8890ab77SEd Maste 	return (0);
473*8890ab77SEd Maste }
474*8890ab77SEd Maste 
475*8890ab77SEd Maste static int
476*8890ab77SEd Maste mgb_detach(if_ctx_t ctx)
477*8890ab77SEd Maste {
478*8890ab77SEd Maste 	struct mgb_softc *sc;
479*8890ab77SEd Maste 	int error;
480*8890ab77SEd Maste 
481*8890ab77SEd Maste 	sc = iflib_get_softc(ctx);
482*8890ab77SEd Maste 
483*8890ab77SEd Maste 	/* XXX: Should report errors but still detach everything. */
484*8890ab77SEd Maste 	error = mgb_hw_teardown(sc);
485*8890ab77SEd Maste 
486*8890ab77SEd Maste 	/* Release IRQs */
487*8890ab77SEd Maste 	iflib_irq_free(ctx, &sc->rx_irq);
488*8890ab77SEd Maste 	iflib_irq_free(ctx, &sc->admin_irq);
489*8890ab77SEd Maste 
490*8890ab77SEd Maste 	if (sc->miibus != NULL)
491*8890ab77SEd Maste 		device_delete_child(sc->dev, sc->miibus);
492*8890ab77SEd Maste 
493*8890ab77SEd Maste 	if (sc->pba != NULL)
494*8890ab77SEd Maste 		error = bus_release_resource(sc->dev, SYS_RES_MEMORY,
495*8890ab77SEd Maste 		    rman_get_rid(sc->pba), sc->pba);
496*8890ab77SEd Maste 	sc->pba = NULL;
497*8890ab77SEd Maste 
498*8890ab77SEd Maste 	error = mgb_release_regs(sc);
499*8890ab77SEd Maste 
500*8890ab77SEd Maste 	return (error);
501*8890ab77SEd Maste }
502*8890ab77SEd Maste 
503*8890ab77SEd Maste static int
504*8890ab77SEd Maste mgb_media_change(if_t ifp)
505*8890ab77SEd Maste {
506*8890ab77SEd Maste 	struct mii_data *miid;
507*8890ab77SEd Maste 	struct mii_softc *miisc;
508*8890ab77SEd Maste 	struct mgb_softc *sc;
509*8890ab77SEd Maste 	if_ctx_t ctx;
510*8890ab77SEd Maste 	int needs_reset;
511*8890ab77SEd Maste 
512*8890ab77SEd Maste 	ctx = if_getsoftc(ifp);
513*8890ab77SEd Maste 	sc = iflib_get_softc(ctx);
514*8890ab77SEd Maste 	miid = device_get_softc(sc->miibus);
515*8890ab77SEd Maste 	LIST_FOREACH(miisc, &miid->mii_phys, mii_list)
516*8890ab77SEd Maste 		PHY_RESET(miisc);
517*8890ab77SEd Maste 
518*8890ab77SEd Maste 	needs_reset = mii_mediachg(miid);
519*8890ab77SEd Maste 	if (needs_reset != 0)
520*8890ab77SEd Maste 		ifp->if_init(ctx);
521*8890ab77SEd Maste 	return (needs_reset);
522*8890ab77SEd Maste }
523*8890ab77SEd Maste 
524*8890ab77SEd Maste static void
525*8890ab77SEd Maste mgb_media_status(if_t ifp, struct ifmediareq *ifmr)
526*8890ab77SEd Maste {
527*8890ab77SEd Maste 	struct mgb_softc *sc;
528*8890ab77SEd Maste 	struct mii_data *miid;
529*8890ab77SEd Maste 
530*8890ab77SEd Maste 	sc = iflib_get_softc(if_getsoftc(ifp));
531*8890ab77SEd Maste 	miid = device_get_softc(sc->miibus);
532*8890ab77SEd Maste 	if ((if_getflags(ifp) & IFF_UP) == 0)
533*8890ab77SEd Maste 		return;
534*8890ab77SEd Maste 
535*8890ab77SEd Maste 	mii_pollstat(miid);
536*8890ab77SEd Maste 	ifmr->ifm_active = miid->mii_media_active;
537*8890ab77SEd Maste 	ifmr->ifm_status = miid->mii_media_status;
538*8890ab77SEd Maste }
539*8890ab77SEd Maste 
540*8890ab77SEd Maste static int
541*8890ab77SEd Maste mgb_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs,
542*8890ab77SEd Maste     int ntxqsets)
543*8890ab77SEd Maste {
544*8890ab77SEd Maste 	struct mgb_softc *sc;
545*8890ab77SEd Maste 	struct mgb_ring_data *rdata;
546*8890ab77SEd Maste 	int q;
547*8890ab77SEd Maste 
548*8890ab77SEd Maste 	sc = iflib_get_softc(ctx);
549*8890ab77SEd Maste 	KASSERT(ntxqsets == 1, ("ntxqsets = %d", ntxqsets));
550*8890ab77SEd Maste 	rdata = &sc->tx_ring_data;
551*8890ab77SEd Maste 	for (q = 0; q < ntxqsets; q++) {
552*8890ab77SEd Maste 		KASSERT(ntxqs == 2, ("ntxqs = %d", ntxqs));
553*8890ab77SEd Maste 		/* Ring */
554*8890ab77SEd Maste 		rdata->ring = (struct mgb_ring_desc *) vaddrs[q * ntxqs + 0];
555*8890ab77SEd Maste 		rdata->ring_bus_addr = paddrs[q * ntxqs + 0];
556*8890ab77SEd Maste 
557*8890ab77SEd Maste 		/* Head WB */
558*8890ab77SEd Maste 		rdata->head_wb = (uint32_t *) vaddrs[q * ntxqs + 1];
559*8890ab77SEd Maste 		rdata->head_wb_bus_addr = paddrs[q * ntxqs + 1];
560*8890ab77SEd Maste 	}
561*8890ab77SEd Maste 	return 0;
562*8890ab77SEd Maste }
563*8890ab77SEd Maste 
564*8890ab77SEd Maste static int
565*8890ab77SEd Maste mgb_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs,
566*8890ab77SEd Maste     int nrxqsets)
567*8890ab77SEd Maste {
568*8890ab77SEd Maste 	struct mgb_softc *sc;
569*8890ab77SEd Maste 	struct mgb_ring_data *rdata;
570*8890ab77SEd Maste 	int q;
571*8890ab77SEd Maste 
572*8890ab77SEd Maste 	sc = iflib_get_softc(ctx);
573*8890ab77SEd Maste 	KASSERT(nrxqsets == 1, ("nrxqsets = %d", nrxqsets));
574*8890ab77SEd Maste 	rdata = &sc->rx_ring_data;
575*8890ab77SEd Maste 	for (q = 0; q < nrxqsets; q++) {
576*8890ab77SEd Maste 		KASSERT(nrxqs == 2, ("nrxqs = %d", nrxqs));
577*8890ab77SEd Maste 		/* Ring */
578*8890ab77SEd Maste 		rdata->ring = (struct mgb_ring_desc *) vaddrs[q * nrxqs + 0];
579*8890ab77SEd Maste 		rdata->ring_bus_addr = paddrs[q * nrxqs + 0];
580*8890ab77SEd Maste 
581*8890ab77SEd Maste 		/* Head WB */
582*8890ab77SEd Maste 		rdata->head_wb = (uint32_t *) vaddrs[q * nrxqs + 1];
583*8890ab77SEd Maste 		rdata->head_wb_bus_addr = paddrs[q * nrxqs + 1];
584*8890ab77SEd Maste 	}
585*8890ab77SEd Maste 	return 0;
586*8890ab77SEd Maste }
587*8890ab77SEd Maste 
588*8890ab77SEd Maste static void
589*8890ab77SEd Maste mgb_queues_free(if_ctx_t ctx)
590*8890ab77SEd Maste {
591*8890ab77SEd Maste 	struct mgb_softc *sc;
592*8890ab77SEd Maste 
593*8890ab77SEd Maste 	sc = iflib_get_softc(ctx);
594*8890ab77SEd Maste 
595*8890ab77SEd Maste 	memset(&sc->rx_ring_data, 0, sizeof(struct mgb_ring_data));
596*8890ab77SEd Maste 	memset(&sc->tx_ring_data, 0, sizeof(struct mgb_ring_data));
597*8890ab77SEd Maste }
598*8890ab77SEd Maste 
599*8890ab77SEd Maste static void
600*8890ab77SEd Maste mgb_init(if_ctx_t ctx)
601*8890ab77SEd Maste {
602*8890ab77SEd Maste 	struct mgb_softc *sc;
603*8890ab77SEd Maste 	struct mii_data *miid;
604*8890ab77SEd Maste 	int error;
605*8890ab77SEd Maste 
606*8890ab77SEd Maste 	sc = iflib_get_softc(ctx);
607*8890ab77SEd Maste 	miid = device_get_softc(sc->miibus);
608*8890ab77SEd Maste 	device_printf(sc->dev, "running init ...\n");
609*8890ab77SEd Maste 
610*8890ab77SEd Maste 	mgb_dma_init(sc);
611*8890ab77SEd Maste 
612*8890ab77SEd Maste 	/* XXX: Turn off perfect filtering, turn on (broad|multi|uni)cast rx */
613*8890ab77SEd Maste 	CSR_CLEAR_REG(sc, MGB_RFE_CTL, MGB_RFE_ALLOW_PERFECT_FILTER);
614*8890ab77SEd Maste 	CSR_UPDATE_REG(sc, MGB_RFE_CTL,
615*8890ab77SEd Maste 	    MGB_RFE_ALLOW_BROADCAST |
616*8890ab77SEd Maste 	    MGB_RFE_ALLOW_UNICAST |
617*8890ab77SEd Maste 	    MGB_RFE_ALLOW_UNICAST);
618*8890ab77SEd Maste 
619*8890ab77SEd Maste 	error = mii_mediachg(miid);
620*8890ab77SEd Maste 	KASSERT(!error, ("mii_mediachg returned: %d", error));
621*8890ab77SEd Maste }
622*8890ab77SEd Maste 
623*8890ab77SEd Maste #ifdef DEBUG
624*8890ab77SEd Maste static void
625*8890ab77SEd Maste mgb_dump_some_stats(struct mgb_softc *sc)
626*8890ab77SEd Maste {
627*8890ab77SEd Maste 	int i;
628*8890ab77SEd Maste 	int first_stat = 0x1200;
629*8890ab77SEd Maste 	int last_stat = 0x12FC;
630*8890ab77SEd Maste 
631*8890ab77SEd Maste 	for (i = first_stat; i <= last_stat; i += 4)
632*8890ab77SEd Maste 		if (CSR_READ_REG(sc, i) != 0)
633*8890ab77SEd Maste 			device_printf(sc->dev, "0x%04x: 0x%08x\n", i,
634*8890ab77SEd Maste 			    CSR_READ_REG(sc, i));
635*8890ab77SEd Maste 	char *stat_names[] = {
636*8890ab77SEd Maste 		"MAC_ERR_STS ",
637*8890ab77SEd Maste 		"FCT_INT_STS ",
638*8890ab77SEd Maste 		"DMAC_CFG ",
639*8890ab77SEd Maste 		"DMAC_CMD ",
640*8890ab77SEd Maste 		"DMAC_INT_STS ",
641*8890ab77SEd Maste 		"DMAC_INT_EN ",
642*8890ab77SEd Maste 		"DMAC_RX_ERR_STS0 ",
643*8890ab77SEd Maste 		"DMAC_RX_ERR_STS1 ",
644*8890ab77SEd Maste 		"DMAC_RX_ERR_STS2 ",
645*8890ab77SEd Maste 		"DMAC_RX_ERR_STS3 ",
646*8890ab77SEd Maste 		"INT_STS ",
647*8890ab77SEd Maste 		"INT_EN ",
648*8890ab77SEd Maste 		"INT_VEC_EN ",
649*8890ab77SEd Maste 		"INT_VEC_MAP0 ",
650*8890ab77SEd Maste 		"INT_VEC_MAP1 ",
651*8890ab77SEd Maste 		"INT_VEC_MAP2 ",
652*8890ab77SEd Maste 		"TX_HEAD0",
653*8890ab77SEd Maste 		"TX_TAIL0",
654*8890ab77SEd Maste 		"DMAC_TX_ERR_STS0 ",
655*8890ab77SEd Maste 		NULL
656*8890ab77SEd Maste 	};
657*8890ab77SEd Maste 	int stats[] = {
658*8890ab77SEd Maste 		0x114,
659*8890ab77SEd Maste 		0xA0,
660*8890ab77SEd Maste 		0xC00,
661*8890ab77SEd Maste 		0xC0C,
662*8890ab77SEd Maste 		0xC10,
663*8890ab77SEd Maste 		0xC14,
664*8890ab77SEd Maste 		0xC60,
665*8890ab77SEd Maste 		0xCA0,
666*8890ab77SEd Maste 		0xCE0,
667*8890ab77SEd Maste 		0xD20,
668*8890ab77SEd Maste 		0x780,
669*8890ab77SEd Maste 		0x788,
670*8890ab77SEd Maste 		0x794,
671*8890ab77SEd Maste 		0x7A0,
672*8890ab77SEd Maste 		0x7A4,
673*8890ab77SEd Maste 		0x780,
674*8890ab77SEd Maste 		0xD58,
675*8890ab77SEd Maste 		0xD5C,
676*8890ab77SEd Maste 		0xD60,
677*8890ab77SEd Maste 		0x0
678*8890ab77SEd Maste 	};
679*8890ab77SEd Maste 	i = 0;
680*8890ab77SEd Maste 	printf("==============================\n");
681*8890ab77SEd Maste 	while (stats[i++])
682*8890ab77SEd Maste 		device_printf(sc->dev, "%s at offset 0x%04x = 0x%08x\n",
683*8890ab77SEd Maste 		    stat_names[i - 1], stats[i - 1],
684*8890ab77SEd Maste 		    CSR_READ_REG(sc, stats[i - 1]));
685*8890ab77SEd Maste 	printf("==== TX RING DESCS ====\n");
686*8890ab77SEd Maste 	for (i = 0; i < MGB_DMA_RING_SIZE; i++)
687*8890ab77SEd Maste 		device_printf(sc->dev, "ring[%d].data0=0x%08x\n"
688*8890ab77SEd Maste 		    "ring[%d].data1=0x%08x\n"
689*8890ab77SEd Maste 		    "ring[%d].data2=0x%08x\n"
690*8890ab77SEd Maste 		    "ring[%d].data3=0x%08x\n",
691*8890ab77SEd Maste 		    i, sc->tx_ring_data.ring[i].ctl,
692*8890ab77SEd Maste 		    i, sc->tx_ring_data.ring[i].addr.low,
693*8890ab77SEd Maste 		    i, sc->tx_ring_data.ring[i].addr.high,
694*8890ab77SEd Maste 		    i, sc->tx_ring_data.ring[i].sts);
695*8890ab77SEd Maste 	device_printf(sc->dev, "==== DUMP_TX_DMA_RAM ====\n");
696*8890ab77SEd Maste 	int i;
697*8890ab77SEd Maste 	CSR_WRITE_REG(sc, 0x24, 0xF); // DP_SEL & TX_RAM_0
698*8890ab77SEd Maste 	for (i = 0; i < 128; i++) {
699*8890ab77SEd Maste 		CSR_WRITE_REG(sc, 0x2C, i); // DP_ADDR
700*8890ab77SEd Maste 
701*8890ab77SEd Maste 		CSR_WRITE_REG(sc, 0x28, 0); // DP_CMD
702*8890ab77SEd Maste 
703*8890ab77SEd Maste 		while ((CSR_READ_REG(sc, 0x24) & 0x80000000) == 0) // DP_SEL & READY
704*8890ab77SEd Maste 			DELAY(1000);
705*8890ab77SEd Maste 
706*8890ab77SEd Maste 		device_printf(sc->dev, "DMAC_TX_RAM_0[%u]=%08x\n", i,
707*8890ab77SEd Maste 		    CSR_READ_REG(sc, 0x30)); // DP_DATA
708*8890ab77SEd Maste 	}
709*8890ab77SEd Maste }
710*8890ab77SEd Maste #endif
711*8890ab77SEd Maste 
712*8890ab77SEd Maste static void
713*8890ab77SEd Maste mgb_stop(if_ctx_t ctx)
714*8890ab77SEd Maste {
715*8890ab77SEd Maste 	struct mgb_softc *sc ;
716*8890ab77SEd Maste 	if_softc_ctx_t scctx;
717*8890ab77SEd Maste 	int i;
718*8890ab77SEd Maste 
719*8890ab77SEd Maste 	sc = iflib_get_softc(ctx);
720*8890ab77SEd Maste 	scctx = iflib_get_softc_ctx(ctx);
721*8890ab77SEd Maste 
722*8890ab77SEd Maste 	/* XXX: Could potentially timeout */
723*8890ab77SEd Maste 	for (i = 0; i < scctx->isc_nrxqsets; i++) {
724*8890ab77SEd Maste 		mgb_dmac_control(sc, MGB_DMAC_RX_START, 0, DMAC_STOP);
725*8890ab77SEd Maste 		mgb_fct_control(sc, MGB_FCT_RX_CTL, 0, FCT_DISABLE);
726*8890ab77SEd Maste 	}
727*8890ab77SEd Maste 	for (i = 0; i < scctx->isc_ntxqsets; i++) {
728*8890ab77SEd Maste 		mgb_dmac_control(sc, MGB_DMAC_TX_START, 0, DMAC_STOP);
729*8890ab77SEd Maste 		mgb_fct_control(sc, MGB_FCT_TX_CTL, 0, FCT_DISABLE);
730*8890ab77SEd Maste 	}
731*8890ab77SEd Maste }
732*8890ab77SEd Maste 
733*8890ab77SEd Maste static int
734*8890ab77SEd Maste mgb_legacy_intr(void *xsc)
735*8890ab77SEd Maste {
736*8890ab77SEd Maste 	struct mgb_softc *sc;
737*8890ab77SEd Maste 
738*8890ab77SEd Maste 	sc = xsc;
739*8890ab77SEd Maste 	iflib_admin_intr_deferred(sc->ctx);
740*8890ab77SEd Maste 	return (FILTER_HANDLED);
741*8890ab77SEd Maste }
742*8890ab77SEd Maste 
743*8890ab77SEd Maste static int
744*8890ab77SEd Maste mgb_rxq_intr(void *xsc)
745*8890ab77SEd Maste {
746*8890ab77SEd Maste 	struct mgb_softc *sc;
747*8890ab77SEd Maste 	if_softc_ctx_t scctx;
748*8890ab77SEd Maste 	uint32_t intr_sts, intr_en;
749*8890ab77SEd Maste 	int qidx;
750*8890ab77SEd Maste 
751*8890ab77SEd Maste 	sc = xsc;
752*8890ab77SEd Maste 	scctx = iflib_get_softc_ctx(sc->ctx);
753*8890ab77SEd Maste 
754*8890ab77SEd Maste 	intr_sts = CSR_READ_REG(sc, MGB_INTR_STS);
755*8890ab77SEd Maste 	intr_en = CSR_READ_REG(sc, MGB_INTR_ENBL_SET);
756*8890ab77SEd Maste 	intr_sts &= intr_en;
757*8890ab77SEd Maste 
758*8890ab77SEd Maste 	for (qidx = 0; qidx < scctx->isc_nrxqsets; qidx++) {
759*8890ab77SEd Maste 		if ((intr_sts & MGB_INTR_STS_RX(qidx))){
760*8890ab77SEd Maste 			CSR_WRITE_REG(sc, MGB_INTR_ENBL_CLR,
761*8890ab77SEd Maste 			    MGB_INTR_STS_RX(qidx));
762*8890ab77SEd Maste 			CSR_WRITE_REG(sc, MGB_INTR_STS, MGB_INTR_STS_RX(qidx));
763*8890ab77SEd Maste 		}
764*8890ab77SEd Maste 	}
765*8890ab77SEd Maste 	return (FILTER_SCHEDULE_THREAD);
766*8890ab77SEd Maste }
767*8890ab77SEd Maste 
768*8890ab77SEd Maste static int
769*8890ab77SEd Maste mgb_admin_intr(void *xsc)
770*8890ab77SEd Maste {
771*8890ab77SEd Maste 	struct mgb_softc *sc;
772*8890ab77SEd Maste 	if_softc_ctx_t scctx;
773*8890ab77SEd Maste 	uint32_t intr_sts, intr_en;
774*8890ab77SEd Maste 	int qidx;
775*8890ab77SEd Maste 
776*8890ab77SEd Maste 	sc = xsc;
777*8890ab77SEd Maste 	scctx = iflib_get_softc_ctx(sc->ctx);
778*8890ab77SEd Maste 
779*8890ab77SEd Maste 	intr_sts = CSR_READ_REG(sc, MGB_INTR_STS);
780*8890ab77SEd Maste 	intr_en = CSR_READ_REG(sc, MGB_INTR_ENBL_SET);
781*8890ab77SEd Maste 	intr_sts &= intr_en;
782*8890ab77SEd Maste 
783*8890ab77SEd Maste 	/*
784*8890ab77SEd Maste 	 * NOTE: Debugging printfs here
785*8890ab77SEd Maste 	 * will likely cause interrupt test failure.
786*8890ab77SEd Maste 	 */
787*8890ab77SEd Maste 
788*8890ab77SEd Maste 	/* TODO: shouldn't continue if suspended */
789*8890ab77SEd Maste 	if ((intr_sts & MGB_INTR_STS_ANY) == 0)
790*8890ab77SEd Maste 	{
791*8890ab77SEd Maste 		device_printf(sc->dev, "non-mgb interrupt triggered.\n");
792*8890ab77SEd Maste 		return (FILTER_SCHEDULE_THREAD);
793*8890ab77SEd Maste 	}
794*8890ab77SEd Maste 	if ((intr_sts &  MGB_INTR_STS_TEST) != 0)
795*8890ab77SEd Maste 	{
796*8890ab77SEd Maste 		sc->isr_test_flag = true;
797*8890ab77SEd Maste 		CSR_WRITE_REG(sc, MGB_INTR_STS, MGB_INTR_STS_TEST);
798*8890ab77SEd Maste 		return (FILTER_HANDLED);
799*8890ab77SEd Maste 	}
800*8890ab77SEd Maste 	if ((intr_sts & MGB_INTR_STS_RX_ANY) != 0)
801*8890ab77SEd Maste 	{
802*8890ab77SEd Maste 		for (qidx = 0; qidx < scctx->isc_nrxqsets; qidx++) {
803*8890ab77SEd Maste 			if ((intr_sts & MGB_INTR_STS_RX(qidx))){
804*8890ab77SEd Maste 				iflib_rx_intr_deferred(sc->ctx, qidx);
805*8890ab77SEd Maste 			}
806*8890ab77SEd Maste 		}
807*8890ab77SEd Maste 		return (FILTER_HANDLED);
808*8890ab77SEd Maste 	}
809*8890ab77SEd Maste 	/* XXX: TX interrupts should not occur */
810*8890ab77SEd Maste 	if ((intr_sts & MGB_INTR_STS_TX_ANY) != 0)
811*8890ab77SEd Maste 	{
812*8890ab77SEd Maste 		for (qidx = 0; qidx < scctx->isc_ntxqsets; qidx++) {
813*8890ab77SEd Maste 			if ((intr_sts & MGB_INTR_STS_RX(qidx))) {
814*8890ab77SEd Maste 				/* clear the interrupt sts and run handler */
815*8890ab77SEd Maste 				CSR_WRITE_REG(sc, MGB_INTR_ENBL_CLR,
816*8890ab77SEd Maste 				    MGB_INTR_STS_TX(qidx));
817*8890ab77SEd Maste 				CSR_WRITE_REG(sc, MGB_INTR_STS,
818*8890ab77SEd Maste 				    MGB_INTR_STS_TX(qidx));
819*8890ab77SEd Maste 				iflib_tx_intr_deferred(sc->ctx, qidx);
820*8890ab77SEd Maste 			}
821*8890ab77SEd Maste 		}
822*8890ab77SEd Maste 		return (FILTER_HANDLED);
823*8890ab77SEd Maste 	}
824*8890ab77SEd Maste 
825*8890ab77SEd Maste 	return (FILTER_SCHEDULE_THREAD);
826*8890ab77SEd Maste }
827*8890ab77SEd Maste 
828*8890ab77SEd Maste static int
829*8890ab77SEd Maste mgb_msix_intr_assign(if_ctx_t ctx, int msix)
830*8890ab77SEd Maste {
831*8890ab77SEd Maste 	struct mgb_softc *sc;
832*8890ab77SEd Maste 	if_softc_ctx_t scctx;
833*8890ab77SEd Maste 	int error, i, vectorid;
834*8890ab77SEd Maste 	char irq_name[16];
835*8890ab77SEd Maste 
836*8890ab77SEd Maste 	sc = iflib_get_softc(ctx);
837*8890ab77SEd Maste 	scctx = iflib_get_softc_ctx(ctx);
838*8890ab77SEd Maste 
839*8890ab77SEd Maste 	KASSERT(scctx->isc_nrxqsets == 1 && scctx->isc_ntxqsets == 1,
840*8890ab77SEd Maste 	    ("num rxqsets/txqsets != 1 "));
841*8890ab77SEd Maste 
842*8890ab77SEd Maste 	/*
843*8890ab77SEd Maste 	 * First vector should be admin interrupts, others vectors are TX/RX
844*8890ab77SEd Maste 	 *
845*8890ab77SEd Maste 	 * RIDs start at 1, and vector ids start at 0.
846*8890ab77SEd Maste 	 */
847*8890ab77SEd Maste 	vectorid = 0;
848*8890ab77SEd Maste 	error = iflib_irq_alloc_generic(ctx, &sc->admin_irq, vectorid + 1,
849*8890ab77SEd Maste 	    IFLIB_INTR_ADMIN, mgb_admin_intr, sc, 0, "admin");
850*8890ab77SEd Maste 	if (error) {
851*8890ab77SEd Maste 		device_printf(sc->dev,
852*8890ab77SEd Maste 		    "Failed to register admin interrupt handler\n");
853*8890ab77SEd Maste 		return (error);
854*8890ab77SEd Maste 	}
855*8890ab77SEd Maste 
856*8890ab77SEd Maste 	for (i = 0; i < scctx->isc_nrxqsets; i++) {
857*8890ab77SEd Maste 		vectorid++;
858*8890ab77SEd Maste 		snprintf(irq_name, sizeof(irq_name), "rxq%d", i);
859*8890ab77SEd Maste 		error = iflib_irq_alloc_generic(ctx, &sc->rx_irq, vectorid + 1,
860*8890ab77SEd Maste 		    IFLIB_INTR_RX, mgb_rxq_intr, sc, i, irq_name);
861*8890ab77SEd Maste 		if (error) {
862*8890ab77SEd Maste 			device_printf(sc->dev,
863*8890ab77SEd Maste 			    "Failed to register rxq %d interrupt handler\n", i);
864*8890ab77SEd Maste 			return (error);
865*8890ab77SEd Maste 		}
866*8890ab77SEd Maste 		CSR_UPDATE_REG(sc, MGB_INTR_VEC_RX_MAP,
867*8890ab77SEd Maste 		    MGB_INTR_VEC_MAP(vectorid, i));
868*8890ab77SEd Maste 	}
869*8890ab77SEd Maste 
870*8890ab77SEd Maste 	/* Not actually mapping hw TX interrupts ... */
871*8890ab77SEd Maste 	for (i = 0; i < scctx->isc_ntxqsets; i++) {
872*8890ab77SEd Maste 		snprintf(irq_name, sizeof(irq_name), "txq%d", i);
873*8890ab77SEd Maste 		iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_TX, NULL, i,
874*8890ab77SEd Maste 		    irq_name);
875*8890ab77SEd Maste 	}
876*8890ab77SEd Maste 
877*8890ab77SEd Maste 	return (0);
878*8890ab77SEd Maste }
879*8890ab77SEd Maste 
880*8890ab77SEd Maste static void
881*8890ab77SEd Maste mgb_intr_enable_all(if_ctx_t ctx)
882*8890ab77SEd Maste {
883*8890ab77SEd Maste 	struct mgb_softc *sc;
884*8890ab77SEd Maste 	if_softc_ctx_t scctx;
885*8890ab77SEd Maste 	int i, dmac_enable = 0, intr_sts = 0, vec_en = 0;
886*8890ab77SEd Maste 
887*8890ab77SEd Maste 	sc = iflib_get_softc(ctx);
888*8890ab77SEd Maste 	scctx = iflib_get_softc_ctx(ctx);
889*8890ab77SEd Maste 	intr_sts |= MGB_INTR_STS_ANY;
890*8890ab77SEd Maste 	vec_en |= MGB_INTR_STS_ANY;
891*8890ab77SEd Maste 
892*8890ab77SEd Maste 	for (i = 0; i < scctx->isc_nrxqsets; i++) {
893*8890ab77SEd Maste 		intr_sts |= MGB_INTR_STS_RX(i);
894*8890ab77SEd Maste 		dmac_enable |= MGB_DMAC_RX_INTR_ENBL(i);
895*8890ab77SEd Maste 		vec_en |= MGB_INTR_RX_VEC_STS(i);
896*8890ab77SEd Maste 	}
897*8890ab77SEd Maste 
898*8890ab77SEd Maste 	/* TX interrupts aren't needed ... */
899*8890ab77SEd Maste 
900*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_INTR_ENBL_SET, intr_sts);
901*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_INTR_VEC_ENBL_SET, vec_en);
902*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_DMAC_INTR_STS, dmac_enable);
903*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_DMAC_INTR_ENBL_SET, dmac_enable);
904*8890ab77SEd Maste }
905*8890ab77SEd Maste 
906*8890ab77SEd Maste static void
907*8890ab77SEd Maste mgb_intr_disable_all(if_ctx_t ctx)
908*8890ab77SEd Maste {
909*8890ab77SEd Maste 	struct mgb_softc *sc;
910*8890ab77SEd Maste 
911*8890ab77SEd Maste 	sc = iflib_get_softc(ctx);
912*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_INTR_ENBL_CLR, UINT32_MAX);
913*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_INTR_VEC_ENBL_CLR, UINT32_MAX);
914*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_INTR_STS, UINT32_MAX);
915*8890ab77SEd Maste 
916*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_DMAC_INTR_ENBL_CLR, UINT32_MAX);
917*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_DMAC_INTR_STS, UINT32_MAX);
918*8890ab77SEd Maste }
919*8890ab77SEd Maste 
920*8890ab77SEd Maste static int
921*8890ab77SEd Maste mgb_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
922*8890ab77SEd Maste {
923*8890ab77SEd Maste 	/* called after successful rx isr */
924*8890ab77SEd Maste 	struct mgb_softc *sc;
925*8890ab77SEd Maste 
926*8890ab77SEd Maste 	sc = iflib_get_softc(ctx);
927*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_INTR_VEC_ENBL_SET, MGB_INTR_RX_VEC_STS(qid));
928*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_INTR_ENBL_SET, MGB_INTR_STS_RX(qid));
929*8890ab77SEd Maste 
930*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_DMAC_INTR_STS, MGB_DMAC_RX_INTR_ENBL(qid));
931*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_DMAC_INTR_ENBL_SET, MGB_DMAC_RX_INTR_ENBL(qid));
932*8890ab77SEd Maste 	return (0);
933*8890ab77SEd Maste }
934*8890ab77SEd Maste 
935*8890ab77SEd Maste static int
936*8890ab77SEd Maste mgb_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
937*8890ab77SEd Maste {
938*8890ab77SEd Maste 	/* XXX: not called (since tx interrupts not used) */
939*8890ab77SEd Maste 	struct mgb_softc *sc;
940*8890ab77SEd Maste 
941*8890ab77SEd Maste 	sc = iflib_get_softc(ctx);
942*8890ab77SEd Maste 
943*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_INTR_ENBL_SET, MGB_INTR_STS_TX(qid));
944*8890ab77SEd Maste 
945*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_DMAC_INTR_STS, MGB_DMAC_TX_INTR_ENBL(qid));
946*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_DMAC_INTR_ENBL_SET, MGB_DMAC_TX_INTR_ENBL(qid));
947*8890ab77SEd Maste 	return (0);
948*8890ab77SEd Maste }
949*8890ab77SEd Maste 
950*8890ab77SEd Maste static bool
951*8890ab77SEd Maste mgb_intr_test(struct mgb_softc *sc)
952*8890ab77SEd Maste {
953*8890ab77SEd Maste 	int i;
954*8890ab77SEd Maste 
955*8890ab77SEd Maste 	sc->isr_test_flag = false;
956*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_INTR_STS, MGB_INTR_STS_TEST);
957*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_INTR_VEC_ENBL_SET, MGB_INTR_STS_ANY);
958*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_INTR_ENBL_SET,
959*8890ab77SEd Maste 	    MGB_INTR_STS_ANY | MGB_INTR_STS_TEST);
960*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_INTR_SET, MGB_INTR_STS_TEST);
961*8890ab77SEd Maste 	if (sc->isr_test_flag)
962*8890ab77SEd Maste 		return true;
963*8890ab77SEd Maste 	for (i = 0; i < MGB_TIMEOUT; i++) {
964*8890ab77SEd Maste 		DELAY(10);
965*8890ab77SEd Maste 		if (sc->isr_test_flag)
966*8890ab77SEd Maste 			break;
967*8890ab77SEd Maste 	}
968*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_INTR_ENBL_CLR, MGB_INTR_STS_TEST);
969*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_INTR_STS, MGB_INTR_STS_TEST);
970*8890ab77SEd Maste 	return sc->isr_test_flag;
971*8890ab77SEd Maste }
972*8890ab77SEd Maste 
973*8890ab77SEd Maste static int
974*8890ab77SEd Maste mgb_isc_txd_encap(void *xsc , if_pkt_info_t ipi)
975*8890ab77SEd Maste {
976*8890ab77SEd Maste 	struct mgb_softc *sc;
977*8890ab77SEd Maste 	if_softc_ctx_t scctx;
978*8890ab77SEd Maste 	struct mgb_ring_data *rdata;
979*8890ab77SEd Maste 	struct mgb_ring_desc *txd;
980*8890ab77SEd Maste 	bus_dma_segment_t *segs;
981*8890ab77SEd Maste 	qidx_t pidx, nsegs;
982*8890ab77SEd Maste 	int i;
983*8890ab77SEd Maste 
984*8890ab77SEd Maste 	KASSERT(ipi->ipi_qsidx == 0,
985*8890ab77SEd Maste 	    ("tried to refill TX Channel %d.\n", ipi->ipi_qsidx));
986*8890ab77SEd Maste 	sc = xsc;
987*8890ab77SEd Maste 	scctx = iflib_get_softc_ctx(sc->ctx);
988*8890ab77SEd Maste 	rdata = &sc->tx_ring_data;
989*8890ab77SEd Maste 
990*8890ab77SEd Maste 	pidx = ipi->ipi_pidx;
991*8890ab77SEd Maste 	segs = ipi->ipi_segs;
992*8890ab77SEd Maste 	nsegs = ipi->ipi_nsegs;
993*8890ab77SEd Maste 
994*8890ab77SEd Maste 	/* For each seg, create a descriptor */
995*8890ab77SEd Maste 	for (i = 0; i < nsegs; ++i) {
996*8890ab77SEd Maste 		KASSERT(nsegs == 1, ("Multisegment packet !!!!!\n"));
997*8890ab77SEd Maste 		txd = &rdata->ring[pidx];
998*8890ab77SEd Maste 		txd->ctl = htole32(
999*8890ab77SEd Maste 		    (segs[i].ds_len & MGB_DESC_CTL_BUFLEN_MASK ) |
1000*8890ab77SEd Maste 		    /*
1001*8890ab77SEd Maste 		     * XXX: This will be wrong in the multipacket case
1002*8890ab77SEd Maste 		     * I suspect FS should be for the first packet and
1003*8890ab77SEd Maste 		     * LS should be for the last packet
1004*8890ab77SEd Maste 		     */
1005*8890ab77SEd Maste 		    MGB_TX_DESC_CTL_FS | MGB_TX_DESC_CTL_LS |
1006*8890ab77SEd Maste 		    MGB_DESC_CTL_FCS);
1007*8890ab77SEd Maste 		txd->addr.low = htole32(CSR_TRANSLATE_ADDR_LOW32(
1008*8890ab77SEd Maste 		    segs[i].ds_addr));
1009*8890ab77SEd Maste 		txd->addr.high = htole32(CSR_TRANSLATE_ADDR_HIGH32(
1010*8890ab77SEd Maste 		    segs[i].ds_addr));
1011*8890ab77SEd Maste 		txd->sts = htole32(
1012*8890ab77SEd Maste 		    (segs[i].ds_len << 16) & MGB_DESC_FRAME_LEN_MASK);
1013*8890ab77SEd Maste 		pidx = MGB_NEXT_RING_IDX(pidx);
1014*8890ab77SEd Maste 	}
1015*8890ab77SEd Maste 	ipi->ipi_new_pidx = pidx;
1016*8890ab77SEd Maste 	return (0);
1017*8890ab77SEd Maste }
1018*8890ab77SEd Maste 
1019*8890ab77SEd Maste static void
1020*8890ab77SEd Maste mgb_isc_txd_flush(void *xsc, uint16_t txqid, qidx_t pidx)
1021*8890ab77SEd Maste {
1022*8890ab77SEd Maste 	struct mgb_softc *sc;
1023*8890ab77SEd Maste 	struct mgb_ring_data *rdata;
1024*8890ab77SEd Maste 
1025*8890ab77SEd Maste 	KASSERT(txqid == 0, ("tried to flush TX Channel %d.\n", txqid));
1026*8890ab77SEd Maste 	sc = xsc;
1027*8890ab77SEd Maste 	rdata = &sc->tx_ring_data;
1028*8890ab77SEd Maste 
1029*8890ab77SEd Maste 	if (rdata->last_tail != pidx) {
1030*8890ab77SEd Maste 		rdata->last_tail = pidx;
1031*8890ab77SEd Maste 		CSR_WRITE_REG(sc, MGB_DMA_TX_TAIL(txqid), rdata->last_tail);
1032*8890ab77SEd Maste 	}
1033*8890ab77SEd Maste }
1034*8890ab77SEd Maste 
1035*8890ab77SEd Maste static int
1036*8890ab77SEd Maste mgb_isc_txd_credits_update(void *xsc, uint16_t txqid, bool clear)
1037*8890ab77SEd Maste {
1038*8890ab77SEd Maste 	struct mgb_softc *sc;
1039*8890ab77SEd Maste 	struct mgb_ring_desc *txd;
1040*8890ab77SEd Maste 	struct mgb_ring_data *rdata;
1041*8890ab77SEd Maste 	int processed = 0;
1042*8890ab77SEd Maste 
1043*8890ab77SEd Maste 	/*
1044*8890ab77SEd Maste 	 * > If clear is true, we need to report the number of TX command ring
1045*8890ab77SEd Maste 	 * > descriptors that have been processed by the device.  If clear is
1046*8890ab77SEd Maste 	 * > false, we just need to report whether or not at least one TX
1047*8890ab77SEd Maste 	 * > command ring descriptor has been processed by the device.
1048*8890ab77SEd Maste 	 * - vmx driver
1049*8890ab77SEd Maste 	 */
1050*8890ab77SEd Maste 	KASSERT(txqid == 0, ("tried to credits_update TX Channel %d.\n",
1051*8890ab77SEd Maste 	    txqid));
1052*8890ab77SEd Maste 	sc = xsc;
1053*8890ab77SEd Maste 	rdata = &sc->tx_ring_data;
1054*8890ab77SEd Maste 
1055*8890ab77SEd Maste 	while (*(rdata->head_wb) != rdata->last_head) {
1056*8890ab77SEd Maste 		if (!clear)
1057*8890ab77SEd Maste 			return 1;
1058*8890ab77SEd Maste 
1059*8890ab77SEd Maste 		txd = &rdata->ring[rdata->last_head];
1060*8890ab77SEd Maste 		memset(txd, 0, sizeof(struct mgb_ring_desc));
1061*8890ab77SEd Maste 		rdata->last_head = MGB_NEXT_RING_IDX(rdata->last_head);
1062*8890ab77SEd Maste 		processed++;
1063*8890ab77SEd Maste 	}
1064*8890ab77SEd Maste 
1065*8890ab77SEd Maste 	return (processed);
1066*8890ab77SEd Maste }
1067*8890ab77SEd Maste 
1068*8890ab77SEd Maste static int
1069*8890ab77SEd Maste mgb_isc_rxd_available(void *xsc, uint16_t rxqid, qidx_t idx, qidx_t budget)
1070*8890ab77SEd Maste {
1071*8890ab77SEd Maste 	struct mgb_softc *sc;
1072*8890ab77SEd Maste 	if_softc_ctx_t scctx;
1073*8890ab77SEd Maste 	struct mgb_ring_data *rdata;
1074*8890ab77SEd Maste 	int avail = 0;
1075*8890ab77SEd Maste 
1076*8890ab77SEd Maste 	sc = xsc;
1077*8890ab77SEd Maste 	KASSERT(rxqid == 0, ("tried to check availability in RX Channel %d.\n",
1078*8890ab77SEd Maste 	    rxqid));
1079*8890ab77SEd Maste 
1080*8890ab77SEd Maste 	rdata = &sc->rx_ring_data;
1081*8890ab77SEd Maste 	scctx = iflib_get_softc_ctx(sc->ctx);
1082*8890ab77SEd Maste 	for (; idx != *(rdata->head_wb);
1083*8890ab77SEd Maste 	    idx = MGB_NEXT_RING_IDX(idx)) {
1084*8890ab77SEd Maste 		avail++;
1085*8890ab77SEd Maste 		/* XXX: Could verify desc is device owned here */
1086*8890ab77SEd Maste 		if (avail == budget)
1087*8890ab77SEd Maste 			break;
1088*8890ab77SEd Maste 	}
1089*8890ab77SEd Maste 	return (avail);
1090*8890ab77SEd Maste }
1091*8890ab77SEd Maste 
1092*8890ab77SEd Maste static int
1093*8890ab77SEd Maste mgb_isc_rxd_pkt_get(void *xsc, if_rxd_info_t ri)
1094*8890ab77SEd Maste {
1095*8890ab77SEd Maste 	struct mgb_softc *sc;
1096*8890ab77SEd Maste 	struct mgb_ring_data *rdata;
1097*8890ab77SEd Maste 	struct mgb_ring_desc rxd;
1098*8890ab77SEd Maste 	int total_len;
1099*8890ab77SEd Maste 
1100*8890ab77SEd Maste 	KASSERT(ri->iri_qsidx == 0,
1101*8890ab77SEd Maste 	    ("tried to check availability in RX Channel %d\n", ri->iri_qsidx));
1102*8890ab77SEd Maste 	sc = xsc;
1103*8890ab77SEd Maste 	total_len = 0;
1104*8890ab77SEd Maste 	rdata = &sc->rx_ring_data;
1105*8890ab77SEd Maste 
1106*8890ab77SEd Maste 	while (*(rdata->head_wb) != rdata->last_head) {
1107*8890ab77SEd Maste 		/* copy ring desc and do swapping */
1108*8890ab77SEd Maste 		rxd = rdata->ring[rdata->last_head];
1109*8890ab77SEd Maste 		rxd.ctl = le32toh(rxd.ctl);
1110*8890ab77SEd Maste 		rxd.addr.low = le32toh(rxd.ctl);
1111*8890ab77SEd Maste 		rxd.addr.high = le32toh(rxd.ctl);
1112*8890ab77SEd Maste 		rxd.sts = le32toh(rxd.ctl);
1113*8890ab77SEd Maste 
1114*8890ab77SEd Maste 		if ((rxd.ctl & MGB_DESC_CTL_OWN) != 0) {
1115*8890ab77SEd Maste 			device_printf(sc->dev,
1116*8890ab77SEd Maste 			    "Tried to read descriptor ... "
1117*8890ab77SEd Maste 			    "found that it's owned by the driver\n");
1118*8890ab77SEd Maste 			return EINVAL;
1119*8890ab77SEd Maste 		}
1120*8890ab77SEd Maste 		if ((rxd.ctl & MGB_RX_DESC_CTL_FS) == 0) {
1121*8890ab77SEd Maste 			device_printf(sc->dev,
1122*8890ab77SEd Maste 			    "Tried to read descriptor ... "
1123*8890ab77SEd Maste 			    "found that FS is not set.\n");
1124*8890ab77SEd Maste 			device_printf(sc->dev, "Tried to read descriptor ... that it FS is not set.\n");
1125*8890ab77SEd Maste 			return EINVAL;
1126*8890ab77SEd Maste 		}
1127*8890ab77SEd Maste 		/* XXX: Multi-packet support */
1128*8890ab77SEd Maste 		if ((rxd.ctl & MGB_RX_DESC_CTL_LS) == 0) {
1129*8890ab77SEd Maste 			device_printf(sc->dev,
1130*8890ab77SEd Maste 			    "Tried to read descriptor ... "
1131*8890ab77SEd Maste 			    "found that LS is not set. (Multi-buffer packets not yet supported)\n");
1132*8890ab77SEd Maste 			return EINVAL;
1133*8890ab77SEd Maste 		}
1134*8890ab77SEd Maste 		ri->iri_frags[0].irf_flid = 0;
1135*8890ab77SEd Maste 		ri->iri_frags[0].irf_idx = rdata->last_head;
1136*8890ab77SEd Maste 		ri->iri_frags[0].irf_len = MGB_DESC_GET_FRAME_LEN(&rxd);
1137*8890ab77SEd Maste 		total_len += ri->iri_frags[0].irf_len;
1138*8890ab77SEd Maste 
1139*8890ab77SEd Maste 		rdata->last_head = MGB_NEXT_RING_IDX(rdata->last_head);
1140*8890ab77SEd Maste 		break;
1141*8890ab77SEd Maste 	}
1142*8890ab77SEd Maste 	ri->iri_nfrags = 1;
1143*8890ab77SEd Maste 	ri->iri_len = total_len;
1144*8890ab77SEd Maste 
1145*8890ab77SEd Maste 	return (0);
1146*8890ab77SEd Maste }
1147*8890ab77SEd Maste 
1148*8890ab77SEd Maste static void
1149*8890ab77SEd Maste mgb_isc_rxd_refill(void *xsc, if_rxd_update_t iru)
1150*8890ab77SEd Maste {
1151*8890ab77SEd Maste 	if_softc_ctx_t scctx;
1152*8890ab77SEd Maste 	struct mgb_softc *sc;
1153*8890ab77SEd Maste 	struct mgb_ring_data *rdata;
1154*8890ab77SEd Maste 	struct mgb_ring_desc *rxd;
1155*8890ab77SEd Maste 	uint64_t *paddrs;
1156*8890ab77SEd Maste 	qidx_t *idxs;
1157*8890ab77SEd Maste 	qidx_t idx;
1158*8890ab77SEd Maste 	int count, len;
1159*8890ab77SEd Maste 
1160*8890ab77SEd Maste 	count = iru->iru_count;
1161*8890ab77SEd Maste 	len = iru->iru_buf_size;
1162*8890ab77SEd Maste 	idxs = iru->iru_idxs;
1163*8890ab77SEd Maste 	paddrs = iru->iru_paddrs;
1164*8890ab77SEd Maste 	KASSERT(iru->iru_qsidx == 0,
1165*8890ab77SEd Maste 	    ("tried to refill RX Channel %d.\n", iru->iru_qsidx));
1166*8890ab77SEd Maste 
1167*8890ab77SEd Maste 	sc = xsc;
1168*8890ab77SEd Maste 	scctx = iflib_get_softc_ctx(sc->ctx);
1169*8890ab77SEd Maste 	rdata = &sc->rx_ring_data;
1170*8890ab77SEd Maste 
1171*8890ab77SEd Maste 	while (count > 0) {
1172*8890ab77SEd Maste 		idx = idxs[--count];
1173*8890ab77SEd Maste 		rxd = &rdata->ring[idx];
1174*8890ab77SEd Maste 
1175*8890ab77SEd Maste 		rxd->sts = 0;
1176*8890ab77SEd Maste 		rxd->addr.low =
1177*8890ab77SEd Maste 		    htole32(CSR_TRANSLATE_ADDR_LOW32(paddrs[count]));
1178*8890ab77SEd Maste 		rxd->addr.high =
1179*8890ab77SEd Maste 		    htole32(CSR_TRANSLATE_ADDR_HIGH32(paddrs[count]));
1180*8890ab77SEd Maste 		rxd->ctl = htole32(MGB_DESC_CTL_OWN |
1181*8890ab77SEd Maste 		    (len & MGB_DESC_CTL_BUFLEN_MASK));
1182*8890ab77SEd Maste 	}
1183*8890ab77SEd Maste 	return;
1184*8890ab77SEd Maste }
1185*8890ab77SEd Maste 
1186*8890ab77SEd Maste static void
1187*8890ab77SEd Maste mgb_isc_rxd_flush(void *xsc, uint16_t rxqid, uint8_t flid, qidx_t pidx)
1188*8890ab77SEd Maste {
1189*8890ab77SEd Maste 	struct mgb_softc *sc;
1190*8890ab77SEd Maste 
1191*8890ab77SEd Maste 	sc = xsc;
1192*8890ab77SEd Maste 
1193*8890ab77SEd Maste 	KASSERT(rxqid == 0, ("tried to flush RX Channel %d.\n", rxqid));
1194*8890ab77SEd Maste 	sc->rx_ring_data.last_tail = pidx;
1195*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_DMA_RX_TAIL(rxqid), sc->rx_ring_data.last_tail);
1196*8890ab77SEd Maste 	return;
1197*8890ab77SEd Maste }
1198*8890ab77SEd Maste 
1199*8890ab77SEd Maste static int
1200*8890ab77SEd Maste mgb_test_bar(struct mgb_softc *sc)
1201*8890ab77SEd Maste {
1202*8890ab77SEd Maste 	uint32_t id_rev, dev_id, rev;
1203*8890ab77SEd Maste 
1204*8890ab77SEd Maste 	id_rev = CSR_READ_REG(sc, 0);
1205*8890ab77SEd Maste 	dev_id = id_rev >> 16;
1206*8890ab77SEd Maste 	rev = id_rev & 0xFFFF;
1207*8890ab77SEd Maste 	if (dev_id == MGB_LAN7430_DEVICE_ID ||
1208*8890ab77SEd Maste 	    dev_id == MGB_LAN7431_DEVICE_ID) {
1209*8890ab77SEd Maste 		return 0;
1210*8890ab77SEd Maste 	} else {
1211*8890ab77SEd Maste 		device_printf(sc->dev, "ID check failed.\n");
1212*8890ab77SEd Maste 		return ENXIO;
1213*8890ab77SEd Maste 	}
1214*8890ab77SEd Maste }
1215*8890ab77SEd Maste 
1216*8890ab77SEd Maste static int
1217*8890ab77SEd Maste mgb_alloc_regs(struct mgb_softc *sc)
1218*8890ab77SEd Maste {
1219*8890ab77SEd Maste 	int rid;
1220*8890ab77SEd Maste 
1221*8890ab77SEd Maste 	rid = PCIR_BAR(MGB_BAR);
1222*8890ab77SEd Maste 	pci_enable_busmaster(sc->dev);
1223*8890ab77SEd Maste 	sc->regs = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1224*8890ab77SEd Maste 	    &rid, RF_ACTIVE);
1225*8890ab77SEd Maste 	if (sc->regs == NULL)
1226*8890ab77SEd Maste 		 return ENXIO;
1227*8890ab77SEd Maste 
1228*8890ab77SEd Maste 	return (0);
1229*8890ab77SEd Maste }
1230*8890ab77SEd Maste 
1231*8890ab77SEd Maste static int
1232*8890ab77SEd Maste mgb_release_regs(struct mgb_softc *sc)
1233*8890ab77SEd Maste {
1234*8890ab77SEd Maste 	int error = 0;
1235*8890ab77SEd Maste 
1236*8890ab77SEd Maste 	if (sc->regs != NULL)
1237*8890ab77SEd Maste 		error = bus_release_resource(sc->dev, SYS_RES_MEMORY,
1238*8890ab77SEd Maste 		    rman_get_rid(sc->regs), sc->regs);
1239*8890ab77SEd Maste 	sc->regs = NULL;
1240*8890ab77SEd Maste 	pci_disable_busmaster(sc->dev);
1241*8890ab77SEd Maste 	return error;
1242*8890ab77SEd Maste }
1243*8890ab77SEd Maste 
1244*8890ab77SEd Maste static int
1245*8890ab77SEd Maste mgb_dma_init(struct mgb_softc *sc)
1246*8890ab77SEd Maste {
1247*8890ab77SEd Maste 	if_softc_ctx_t scctx;
1248*8890ab77SEd Maste 	int ch, error = 0;
1249*8890ab77SEd Maste 
1250*8890ab77SEd Maste 	scctx = iflib_get_softc_ctx(sc->ctx);
1251*8890ab77SEd Maste 
1252*8890ab77SEd Maste 	for (ch = 0; ch < scctx->isc_nrxqsets; ch++)
1253*8890ab77SEd Maste 		if ((error = mgb_dma_rx_ring_init(sc, ch)))
1254*8890ab77SEd Maste 			goto fail;
1255*8890ab77SEd Maste 
1256*8890ab77SEd Maste 	for (ch = 0; ch < scctx->isc_nrxqsets; ch++)
1257*8890ab77SEd Maste 		if ((error = mgb_dma_tx_ring_init(sc, ch)))
1258*8890ab77SEd Maste 			goto fail;
1259*8890ab77SEd Maste 
1260*8890ab77SEd Maste fail:
1261*8890ab77SEd Maste 	return error;
1262*8890ab77SEd Maste }
1263*8890ab77SEd Maste 
1264*8890ab77SEd Maste static int
1265*8890ab77SEd Maste mgb_dma_rx_ring_init(struct mgb_softc *sc, int channel)
1266*8890ab77SEd Maste {
1267*8890ab77SEd Maste 	struct mgb_ring_data *rdata;
1268*8890ab77SEd Maste 	int ring_config, error = 0;
1269*8890ab77SEd Maste 
1270*8890ab77SEd Maste 	rdata = &sc->rx_ring_data;
1271*8890ab77SEd Maste 	mgb_dmac_control(sc, MGB_DMAC_RX_START, 0, DMAC_RESET);
1272*8890ab77SEd Maste 	KASSERT(MGB_DMAC_STATE_IS_INITIAL(sc, MGB_DMAC_RX_START, channel),
1273*8890ab77SEd Maste 	    ("Trying to init channels when not in init state\n"));
1274*8890ab77SEd Maste 
1275*8890ab77SEd Maste 	/* write ring address */
1276*8890ab77SEd Maste 	if (rdata->ring_bus_addr == 0) {
1277*8890ab77SEd Maste 		device_printf(sc->dev, "Invalid ring bus addr.\n");
1278*8890ab77SEd Maste 		goto fail;
1279*8890ab77SEd Maste 	}
1280*8890ab77SEd Maste 
1281*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_DMA_RX_BASE_H(channel),
1282*8890ab77SEd Maste 	    CSR_TRANSLATE_ADDR_HIGH32(rdata->ring_bus_addr));
1283*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_DMA_RX_BASE_L(channel),
1284*8890ab77SEd Maste 	    CSR_TRANSLATE_ADDR_LOW32(rdata->ring_bus_addr));
1285*8890ab77SEd Maste 
1286*8890ab77SEd Maste 	/* write head pointer writeback address */
1287*8890ab77SEd Maste 	if (rdata->head_wb_bus_addr == 0) {
1288*8890ab77SEd Maste 		device_printf(sc->dev, "Invalid head wb bus addr.\n");
1289*8890ab77SEd Maste 		goto fail;
1290*8890ab77SEd Maste 	}
1291*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_DMA_RX_HEAD_WB_H(channel),
1292*8890ab77SEd Maste 	    CSR_TRANSLATE_ADDR_HIGH32(rdata->head_wb_bus_addr));
1293*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_DMA_RX_HEAD_WB_L(channel),
1294*8890ab77SEd Maste 	    CSR_TRANSLATE_ADDR_LOW32(rdata->head_wb_bus_addr));
1295*8890ab77SEd Maste 
1296*8890ab77SEd Maste 	/* Enable head pointer writeback */
1297*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_DMA_RX_CONFIG0(channel), MGB_DMA_HEAD_WB_ENBL);
1298*8890ab77SEd Maste 
1299*8890ab77SEd Maste 	ring_config = CSR_READ_REG(sc, MGB_DMA_RX_CONFIG1(channel));
1300*8890ab77SEd Maste 	/*  ring size */
1301*8890ab77SEd Maste 	ring_config &= ~MGB_DMA_RING_LEN_MASK;
1302*8890ab77SEd Maste 	ring_config |= (MGB_DMA_RING_SIZE & MGB_DMA_RING_LEN_MASK);
1303*8890ab77SEd Maste 	/* packet padding  (PAD_2 is better for IP header alignment ...) */
1304*8890ab77SEd Maste 	ring_config &= ~MGB_DMA_RING_PAD_MASK;
1305*8890ab77SEd Maste 	ring_config |= (MGB_DMA_RING_PAD_0 & MGB_DMA_RING_PAD_MASK);
1306*8890ab77SEd Maste 
1307*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_DMA_RX_CONFIG1(channel), ring_config);
1308*8890ab77SEd Maste 
1309*8890ab77SEd Maste 	rdata->last_head = CSR_READ_REG(sc, MGB_DMA_RX_HEAD(channel));
1310*8890ab77SEd Maste 
1311*8890ab77SEd Maste 	mgb_fct_control(sc, MGB_FCT_RX_CTL, channel, FCT_RESET);
1312*8890ab77SEd Maste 	if (error != 0) {
1313*8890ab77SEd Maste 		device_printf(sc->dev, "Failed to reset RX FCT.\n");
1314*8890ab77SEd Maste 		goto fail;
1315*8890ab77SEd Maste 	}
1316*8890ab77SEd Maste 	mgb_fct_control(sc, MGB_FCT_RX_CTL, channel, FCT_ENABLE);
1317*8890ab77SEd Maste 	if (error != 0) {
1318*8890ab77SEd Maste 		device_printf(sc->dev, "Failed to enable RX FCT.\n");
1319*8890ab77SEd Maste 		goto fail;
1320*8890ab77SEd Maste 	}
1321*8890ab77SEd Maste 	mgb_dmac_control(sc, MGB_DMAC_RX_START, channel, DMAC_START);
1322*8890ab77SEd Maste 	if (error != 0)
1323*8890ab77SEd Maste 		device_printf(sc->dev, "Failed to start RX DMAC.\n");
1324*8890ab77SEd Maste fail:
1325*8890ab77SEd Maste 	return (error);
1326*8890ab77SEd Maste }
1327*8890ab77SEd Maste 
1328*8890ab77SEd Maste static int
1329*8890ab77SEd Maste mgb_dma_tx_ring_init(struct mgb_softc *sc, int channel)
1330*8890ab77SEd Maste {
1331*8890ab77SEd Maste 	struct mgb_ring_data *rdata;
1332*8890ab77SEd Maste 	int ring_config, error = 0;
1333*8890ab77SEd Maste 
1334*8890ab77SEd Maste 	rdata = &sc->tx_ring_data;
1335*8890ab77SEd Maste 	if ((error = mgb_fct_control(sc, MGB_FCT_TX_CTL, channel, FCT_RESET))) {
1336*8890ab77SEd Maste 		device_printf(sc->dev, "Failed to reset TX FCT.\n");
1337*8890ab77SEd Maste 		goto fail;
1338*8890ab77SEd Maste 	}
1339*8890ab77SEd Maste 	if ((error = mgb_fct_control(sc, MGB_FCT_TX_CTL, channel,
1340*8890ab77SEd Maste 	    FCT_ENABLE))) {
1341*8890ab77SEd Maste 		device_printf(sc->dev, "Failed to enable TX FCT.\n");
1342*8890ab77SEd Maste 		goto fail;
1343*8890ab77SEd Maste 	}
1344*8890ab77SEd Maste 	if ((error = mgb_dmac_control(sc, MGB_DMAC_TX_START, channel,
1345*8890ab77SEd Maste 	    DMAC_RESET))) {
1346*8890ab77SEd Maste 		device_printf(sc->dev, "Failed to reset TX DMAC.\n");
1347*8890ab77SEd Maste 		goto fail;
1348*8890ab77SEd Maste 	}
1349*8890ab77SEd Maste 	KASSERT(MGB_DMAC_STATE_IS_INITIAL(sc, MGB_DMAC_TX_START, channel),
1350*8890ab77SEd Maste 	    ("Trying to init channels in not init state\n"));
1351*8890ab77SEd Maste 
1352*8890ab77SEd Maste 	/* write ring address */
1353*8890ab77SEd Maste 	if (rdata->ring_bus_addr == 0) {
1354*8890ab77SEd Maste 		device_printf(sc->dev, "Invalid ring bus addr.\n");
1355*8890ab77SEd Maste 		goto fail;
1356*8890ab77SEd Maste 	}
1357*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_DMA_TX_BASE_H(channel),
1358*8890ab77SEd Maste 	    CSR_TRANSLATE_ADDR_HIGH32(rdata->ring_bus_addr));
1359*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_DMA_TX_BASE_L(channel),
1360*8890ab77SEd Maste 	    CSR_TRANSLATE_ADDR_LOW32(rdata->ring_bus_addr));
1361*8890ab77SEd Maste 
1362*8890ab77SEd Maste 	/* write ring size */
1363*8890ab77SEd Maste 	ring_config = CSR_READ_REG(sc, MGB_DMA_TX_CONFIG1(channel));
1364*8890ab77SEd Maste 	ring_config &= ~MGB_DMA_RING_LEN_MASK;
1365*8890ab77SEd Maste 	ring_config |= (MGB_DMA_RING_SIZE & MGB_DMA_RING_LEN_MASK);
1366*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_DMA_TX_CONFIG1(channel), ring_config);
1367*8890ab77SEd Maste 
1368*8890ab77SEd Maste 	/* Enable interrupt on completion and head pointer writeback */
1369*8890ab77SEd Maste 	ring_config = (MGB_DMA_HEAD_WB_LS_ENBL | MGB_DMA_HEAD_WB_ENBL);
1370*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_DMA_TX_CONFIG0(channel), ring_config);
1371*8890ab77SEd Maste 
1372*8890ab77SEd Maste 	/* write head pointer writeback address */
1373*8890ab77SEd Maste 	if (rdata->head_wb_bus_addr == 0) {
1374*8890ab77SEd Maste 		device_printf(sc->dev, "Invalid head wb bus addr.\n");
1375*8890ab77SEd Maste 		goto fail;
1376*8890ab77SEd Maste 	}
1377*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_DMA_TX_HEAD_WB_H(channel),
1378*8890ab77SEd Maste 	    CSR_TRANSLATE_ADDR_HIGH32(rdata->head_wb_bus_addr));
1379*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_DMA_TX_HEAD_WB_L(channel),
1380*8890ab77SEd Maste 	    CSR_TRANSLATE_ADDR_LOW32(rdata->head_wb_bus_addr));
1381*8890ab77SEd Maste 
1382*8890ab77SEd Maste 	rdata->last_head = CSR_READ_REG(sc, MGB_DMA_TX_HEAD(channel));
1383*8890ab77SEd Maste 	KASSERT(rdata->last_head == 0, ("MGB_DMA_TX_HEAD was not reset.\n"));
1384*8890ab77SEd Maste 	rdata->last_tail = 0;
1385*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_DMA_TX_TAIL(channel), rdata->last_tail);
1386*8890ab77SEd Maste 
1387*8890ab77SEd Maste 	if ((error = mgb_dmac_control(sc, MGB_DMAC_TX_START, channel,
1388*8890ab77SEd Maste 	    DMAC_START)))
1389*8890ab77SEd Maste 		device_printf(sc->dev, "Failed to start TX DMAC.\n");
1390*8890ab77SEd Maste fail:
1391*8890ab77SEd Maste 	return error;
1392*8890ab77SEd Maste }
1393*8890ab77SEd Maste 
1394*8890ab77SEd Maste static int
1395*8890ab77SEd Maste mgb_dmac_control(struct mgb_softc *sc, int start, int channel,
1396*8890ab77SEd Maste     enum mgb_dmac_cmd cmd)
1397*8890ab77SEd Maste {
1398*8890ab77SEd Maste 	int error = 0;
1399*8890ab77SEd Maste 
1400*8890ab77SEd Maste 	switch (cmd) {
1401*8890ab77SEd Maste 	case DMAC_RESET:
1402*8890ab77SEd Maste 		CSR_WRITE_REG(sc, MGB_DMAC_CMD,
1403*8890ab77SEd Maste 		    MGB_DMAC_CMD_RESET(start, channel));
1404*8890ab77SEd Maste 		error = mgb_wait_for_bits(sc, MGB_DMAC_CMD, 0,
1405*8890ab77SEd Maste 		    MGB_DMAC_CMD_RESET(start, channel));
1406*8890ab77SEd Maste 		break;
1407*8890ab77SEd Maste 
1408*8890ab77SEd Maste 	case DMAC_START:
1409*8890ab77SEd Maste 		/*
1410*8890ab77SEd Maste 		 * NOTE: this simplifies the logic, since it will never
1411*8890ab77SEd Maste 		 * try to start in STOP_PENDING, but it also increases work.
1412*8890ab77SEd Maste 		 */
1413*8890ab77SEd Maste 		error = mgb_dmac_control(sc, start, channel, DMAC_STOP);
1414*8890ab77SEd Maste 		if (error != 0)
1415*8890ab77SEd Maste 			return error;
1416*8890ab77SEd Maste 		CSR_WRITE_REG(sc, MGB_DMAC_CMD,
1417*8890ab77SEd Maste 		    MGB_DMAC_CMD_START(start, channel));
1418*8890ab77SEd Maste 		break;
1419*8890ab77SEd Maste 
1420*8890ab77SEd Maste 	case DMAC_STOP:
1421*8890ab77SEd Maste 		CSR_WRITE_REG(sc, MGB_DMAC_CMD,
1422*8890ab77SEd Maste 		    MGB_DMAC_CMD_STOP(start, channel));
1423*8890ab77SEd Maste 		error = mgb_wait_for_bits(sc, MGB_DMAC_CMD,
1424*8890ab77SEd Maste 		    MGB_DMAC_CMD_STOP(start, channel),
1425*8890ab77SEd Maste 		    MGB_DMAC_CMD_START(start, channel));
1426*8890ab77SEd Maste 		break;
1427*8890ab77SEd Maste 	}
1428*8890ab77SEd Maste 	return error;
1429*8890ab77SEd Maste }
1430*8890ab77SEd Maste 
1431*8890ab77SEd Maste static int
1432*8890ab77SEd Maste mgb_fct_control(struct mgb_softc *sc, int reg, int channel,
1433*8890ab77SEd Maste     enum mgb_fct_cmd cmd)
1434*8890ab77SEd Maste {
1435*8890ab77SEd Maste 
1436*8890ab77SEd Maste 	switch (cmd) {
1437*8890ab77SEd Maste 	case FCT_RESET:
1438*8890ab77SEd Maste 		CSR_WRITE_REG(sc, reg, MGB_FCT_RESET(channel));
1439*8890ab77SEd Maste 		return mgb_wait_for_bits(sc, reg, 0, MGB_FCT_RESET(channel));
1440*8890ab77SEd Maste 	case FCT_ENABLE:
1441*8890ab77SEd Maste 		CSR_WRITE_REG(sc, reg, MGB_FCT_ENBL(channel));
1442*8890ab77SEd Maste 		return (0);
1443*8890ab77SEd Maste 	case FCT_DISABLE:
1444*8890ab77SEd Maste 		CSR_WRITE_REG(sc, reg, MGB_FCT_DSBL(channel));
1445*8890ab77SEd Maste 		return mgb_wait_for_bits(sc, reg, 0, MGB_FCT_ENBL(channel));
1446*8890ab77SEd Maste 	}
1447*8890ab77SEd Maste }
1448*8890ab77SEd Maste 
1449*8890ab77SEd Maste static int
1450*8890ab77SEd Maste mgb_hw_teardown(struct mgb_softc *sc)
1451*8890ab77SEd Maste {
1452*8890ab77SEd Maste 	int err = 0;
1453*8890ab77SEd Maste 
1454*8890ab77SEd Maste 	/* Stop MAC */
1455*8890ab77SEd Maste 	CSR_CLEAR_REG(sc, MGB_MAC_RX, MGB_MAC_ENBL);
1456*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_MAC_TX, MGB_MAC_ENBL);
1457*8890ab77SEd Maste 	if ((err = mgb_wait_for_bits(sc, MGB_MAC_RX, MGB_MAC_DSBL, 0)))
1458*8890ab77SEd Maste 		return (err);
1459*8890ab77SEd Maste 	if ((err = mgb_wait_for_bits(sc, MGB_MAC_TX, MGB_MAC_DSBL, 0)))
1460*8890ab77SEd Maste 		return (err);
1461*8890ab77SEd Maste 	return (err);
1462*8890ab77SEd Maste }
1463*8890ab77SEd Maste 
1464*8890ab77SEd Maste static int
1465*8890ab77SEd Maste mgb_hw_init(struct mgb_softc *sc)
1466*8890ab77SEd Maste {
1467*8890ab77SEd Maste 	int error = 0;
1468*8890ab77SEd Maste 
1469*8890ab77SEd Maste 	error = mgb_hw_reset(sc);
1470*8890ab77SEd Maste 	if (error != 0)
1471*8890ab77SEd Maste 		goto fail;
1472*8890ab77SEd Maste 
1473*8890ab77SEd Maste 	mgb_mac_init(sc);
1474*8890ab77SEd Maste 
1475*8890ab77SEd Maste 	error = mgb_phy_reset(sc);
1476*8890ab77SEd Maste 	if (error != 0)
1477*8890ab77SEd Maste 		goto fail;
1478*8890ab77SEd Maste 
1479*8890ab77SEd Maste 	error = mgb_dmac_reset(sc);
1480*8890ab77SEd Maste 	if (error != 0)
1481*8890ab77SEd Maste 		goto fail;
1482*8890ab77SEd Maste 
1483*8890ab77SEd Maste fail:
1484*8890ab77SEd Maste 	return error;
1485*8890ab77SEd Maste }
1486*8890ab77SEd Maste 
1487*8890ab77SEd Maste static int
1488*8890ab77SEd Maste mgb_hw_reset(struct mgb_softc *sc)
1489*8890ab77SEd Maste {
1490*8890ab77SEd Maste 
1491*8890ab77SEd Maste 	CSR_UPDATE_REG(sc, MGB_HW_CFG, MGB_LITE_RESET);
1492*8890ab77SEd Maste 	return (mgb_wait_for_bits(sc, MGB_HW_CFG, 0, MGB_LITE_RESET));
1493*8890ab77SEd Maste }
1494*8890ab77SEd Maste 
1495*8890ab77SEd Maste static int
1496*8890ab77SEd Maste mgb_mac_init(struct mgb_softc *sc)
1497*8890ab77SEd Maste {
1498*8890ab77SEd Maste 
1499*8890ab77SEd Maste 	/**
1500*8890ab77SEd Maste 	 * enable automatic duplex detection and
1501*8890ab77SEd Maste 	 * automatic speed detection
1502*8890ab77SEd Maste 	 */
1503*8890ab77SEd Maste 	CSR_UPDATE_REG(sc, MGB_MAC_CR, MGB_MAC_ADD_ENBL | MGB_MAC_ASD_ENBL);
1504*8890ab77SEd Maste 	CSR_UPDATE_REG(sc, MGB_MAC_TX, MGB_MAC_ENBL);
1505*8890ab77SEd Maste 	CSR_UPDATE_REG(sc, MGB_MAC_RX, MGB_MAC_ENBL);
1506*8890ab77SEd Maste 
1507*8890ab77SEd Maste 	return MGB_STS_OK;
1508*8890ab77SEd Maste }
1509*8890ab77SEd Maste 
1510*8890ab77SEd Maste static int
1511*8890ab77SEd Maste mgb_phy_reset(struct mgb_softc *sc)
1512*8890ab77SEd Maste {
1513*8890ab77SEd Maste 
1514*8890ab77SEd Maste 	CSR_UPDATE_BYTE(sc, MGB_PMT_CTL, MGB_PHY_RESET);
1515*8890ab77SEd Maste 	if (mgb_wait_for_bits(sc, MGB_PMT_CTL, 0, MGB_PHY_RESET) ==
1516*8890ab77SEd Maste 	    MGB_STS_TIMEOUT)
1517*8890ab77SEd Maste 		return MGB_STS_TIMEOUT;
1518*8890ab77SEd Maste 	return (mgb_wait_for_bits(sc, MGB_PMT_CTL, MGB_PHY_READY, 0));
1519*8890ab77SEd Maste }
1520*8890ab77SEd Maste 
1521*8890ab77SEd Maste static int
1522*8890ab77SEd Maste mgb_dmac_reset(struct mgb_softc *sc)
1523*8890ab77SEd Maste {
1524*8890ab77SEd Maste 
1525*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_DMAC_CMD, MGB_DMAC_RESET);
1526*8890ab77SEd Maste 	return (mgb_wait_for_bits(sc, MGB_DMAC_CMD, 0, MGB_DMAC_RESET));
1527*8890ab77SEd Maste }
1528*8890ab77SEd Maste 
1529*8890ab77SEd Maste static int
1530*8890ab77SEd Maste mgb_wait_for_bits(struct mgb_softc *sc, int reg, int set_bits, int clear_bits)
1531*8890ab77SEd Maste {
1532*8890ab77SEd Maste 	int i, val;
1533*8890ab77SEd Maste 
1534*8890ab77SEd Maste 	i = 0;
1535*8890ab77SEd Maste 	do {
1536*8890ab77SEd Maste 		/*
1537*8890ab77SEd Maste 		 * XXX: Datasheets states delay should be > 5 microseconds
1538*8890ab77SEd Maste 		 * for device reset.
1539*8890ab77SEd Maste 		 */
1540*8890ab77SEd Maste 		DELAY(100);
1541*8890ab77SEd Maste 		val = CSR_READ_REG(sc, reg);
1542*8890ab77SEd Maste 		if ((val & set_bits) == set_bits &&
1543*8890ab77SEd Maste 		    (val & clear_bits) == 0)
1544*8890ab77SEd Maste 			return MGB_STS_OK;
1545*8890ab77SEd Maste 	} while (i++ < MGB_TIMEOUT);
1546*8890ab77SEd Maste 
1547*8890ab77SEd Maste 	return MGB_STS_TIMEOUT;
1548*8890ab77SEd Maste }
1549*8890ab77SEd Maste 
1550*8890ab77SEd Maste static void
1551*8890ab77SEd Maste mgb_get_ethaddr(struct mgb_softc *sc, struct ether_addr *dest)
1552*8890ab77SEd Maste {
1553*8890ab77SEd Maste 
1554*8890ab77SEd Maste 	CSR_READ_REG_BYTES(sc, MGB_MAC_ADDR_BASE_L, &dest->octet[0], 4);
1555*8890ab77SEd Maste 	CSR_READ_REG_BYTES(sc, MGB_MAC_ADDR_BASE_H, &dest->octet[4], 2);
1556*8890ab77SEd Maste }
1557*8890ab77SEd Maste 
1558*8890ab77SEd Maste static int
1559*8890ab77SEd Maste mgb_miibus_readreg(device_t dev, int phy, int reg)
1560*8890ab77SEd Maste {
1561*8890ab77SEd Maste 	struct mgb_softc *sc;
1562*8890ab77SEd Maste 	int mii_access;
1563*8890ab77SEd Maste 
1564*8890ab77SEd Maste 	sc = iflib_get_softc(device_get_softc(dev));
1565*8890ab77SEd Maste 
1566*8890ab77SEd Maste 	if (mgb_wait_for_bits(sc, MGB_MII_ACCESS, 0, MGB_MII_BUSY) ==
1567*8890ab77SEd Maste 	    MGB_STS_TIMEOUT)
1568*8890ab77SEd Maste 		return EIO;
1569*8890ab77SEd Maste 	mii_access = (phy & MGB_MII_PHY_ADDR_MASK) << MGB_MII_PHY_ADDR_SHIFT;
1570*8890ab77SEd Maste 	mii_access |= (reg & MGB_MII_REG_ADDR_MASK) << MGB_MII_REG_ADDR_SHIFT;
1571*8890ab77SEd Maste 	mii_access |= MGB_MII_BUSY | MGB_MII_READ;
1572*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_MII_ACCESS, mii_access);
1573*8890ab77SEd Maste 	if (mgb_wait_for_bits(sc, MGB_MII_ACCESS, 0, MGB_MII_BUSY) ==
1574*8890ab77SEd Maste 	    MGB_STS_TIMEOUT)
1575*8890ab77SEd Maste 		return EIO;
1576*8890ab77SEd Maste 	return (CSR_READ_2_BYTES(sc, MGB_MII_DATA));
1577*8890ab77SEd Maste }
1578*8890ab77SEd Maste 
1579*8890ab77SEd Maste static int
1580*8890ab77SEd Maste mgb_miibus_writereg(device_t dev, int phy, int reg, int data)
1581*8890ab77SEd Maste {
1582*8890ab77SEd Maste 	struct mgb_softc *sc;
1583*8890ab77SEd Maste 	int mii_access;
1584*8890ab77SEd Maste 
1585*8890ab77SEd Maste 	sc = iflib_get_softc(device_get_softc(dev));
1586*8890ab77SEd Maste 
1587*8890ab77SEd Maste 	if (mgb_wait_for_bits(sc, MGB_MII_ACCESS,
1588*8890ab77SEd Maste 	    0, MGB_MII_BUSY) == MGB_STS_TIMEOUT)
1589*8890ab77SEd Maste 		return EIO;
1590*8890ab77SEd Maste 	mii_access = (phy & MGB_MII_PHY_ADDR_MASK) << MGB_MII_PHY_ADDR_SHIFT;
1591*8890ab77SEd Maste 	mii_access |= (reg & MGB_MII_REG_ADDR_MASK) << MGB_MII_REG_ADDR_SHIFT;
1592*8890ab77SEd Maste 	mii_access |= MGB_MII_BUSY | MGB_MII_WRITE;
1593*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_MII_DATA, data);
1594*8890ab77SEd Maste 	CSR_WRITE_REG(sc, MGB_MII_ACCESS, mii_access);
1595*8890ab77SEd Maste 	if (mgb_wait_for_bits(sc, MGB_MII_ACCESS, 0, MGB_MII_BUSY) ==
1596*8890ab77SEd Maste 	    MGB_STS_TIMEOUT)
1597*8890ab77SEd Maste 		return EIO;
1598*8890ab77SEd Maste 	return 0;
1599*8890ab77SEd Maste }
1600*8890ab77SEd Maste 
1601*8890ab77SEd Maste /* XXX: May need to lock these up */
1602*8890ab77SEd Maste static void
1603*8890ab77SEd Maste mgb_miibus_statchg(device_t dev)
1604*8890ab77SEd Maste {
1605*8890ab77SEd Maste 	struct mgb_softc *sc;
1606*8890ab77SEd Maste 	struct mii_data *miid;
1607*8890ab77SEd Maste 
1608*8890ab77SEd Maste 	sc = iflib_get_softc(device_get_softc(dev));
1609*8890ab77SEd Maste 	miid = device_get_softc(sc->miibus);
1610*8890ab77SEd Maste 	/* Update baudrate in iflib */
1611*8890ab77SEd Maste 	sc->baudrate = ifmedia_baudrate(miid->mii_media_active);
1612*8890ab77SEd Maste 	iflib_link_state_change(sc->ctx, sc->link_state, sc->baudrate);
1613*8890ab77SEd Maste }
1614*8890ab77SEd Maste 
1615*8890ab77SEd Maste static void
1616*8890ab77SEd Maste mgb_miibus_linkchg(device_t dev)
1617*8890ab77SEd Maste {
1618*8890ab77SEd Maste 	struct mgb_softc *sc;
1619*8890ab77SEd Maste 	struct mii_data *miid;
1620*8890ab77SEd Maste 	int link_state;
1621*8890ab77SEd Maste 
1622*8890ab77SEd Maste 	sc = iflib_get_softc(device_get_softc(dev));
1623*8890ab77SEd Maste 	miid = device_get_softc(sc->miibus);
1624*8890ab77SEd Maste 	/* XXX: copied from miibus_linkchg **/
1625*8890ab77SEd Maste 	if (miid->mii_media_status & IFM_AVALID) {
1626*8890ab77SEd Maste 		if (miid->mii_media_status & IFM_ACTIVE)
1627*8890ab77SEd Maste 			link_state = LINK_STATE_UP;
1628*8890ab77SEd Maste 		else
1629*8890ab77SEd Maste 			link_state = LINK_STATE_DOWN;
1630*8890ab77SEd Maste 	} else
1631*8890ab77SEd Maste 		link_state = LINK_STATE_UNKNOWN;
1632*8890ab77SEd Maste 	sc->link_state = link_state;
1633*8890ab77SEd Maste 	iflib_link_state_change(sc->ctx, sc->link_state, sc->baudrate);
1634*8890ab77SEd Maste }
1635