xref: /freebsd/sys/dev/mgb/if_mgb.c (revision dab59af3bcc7cb7ba01569d3044894b3e860ad56)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2019 The FreeBSD Foundation
5  *
6  * This driver was written by Gerald ND Aryeetey <gndaryee@uwaterloo.ca>
7  * under sponsorship from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 #include <sys/cdefs.h>
31 /*
32  * Microchip LAN7430/LAN7431 PCIe to Gigabit Ethernet Controller driver.
33  *
34  * Product information:
35  * LAN7430 https://www.microchip.com/en-us/product/LAN7430
36  *   - Integrated IEEE 802.3 compliant PHY
37  * LAN7431 https://www.microchip.com/en-us/product/LAN7431
38  *   - RGMII Interface
39  *
40  * This driver uses the iflib interface and the default 'ukphy' PHY driver.
41  *
42  * UNIMPLEMENTED FEATURES
43  * ----------------------
44  * A number of features supported by LAN743X device are not yet implemented in
45  * this driver:
46  *
47  * - Multiple (up to 4) RX queues support
48  *   - Just needs to remove asserts and malloc multiple `rx_ring_data`
49  *     structs based on ncpus.
50  * - RX/TX Checksum Offloading support
51  * - VLAN support
52  * - Receive Packet Filtering (Multicast Perfect/Hash Address) support
53  * - Wake on LAN (WoL) support
54  * - TX LSO support
55  * - Receive Side Scaling (RSS) support
56  * - Debugging Capabilities:
57  *   - Could include MAC statistics and
58  *     error status registers in sysctl.
59  */
60 
61 #include <sys/param.h>
62 #include <sys/bus.h>
63 #include <sys/endian.h>
64 #include <sys/kdb.h>
65 #include <sys/kernel.h>
66 #include <sys/module.h>
67 #include <sys/rman.h>
68 #include <sys/socket.h>
69 #include <sys/sockio.h>
70 #include <machine/bus.h>
71 #include <machine/resource.h>
72 
73 #include <net/ethernet.h>
74 #include <net/if.h>
75 #include <net/if_var.h>
76 #include <net/if_types.h>
77 #include <net/if_media.h>
78 #include <net/iflib.h>
79 
80 #include <dev/mgb/if_mgb.h>
81 #include <dev/mii/mii.h>
82 #include <dev/mii/miivar.h>
83 #include <dev/pci/pcireg.h>
84 #include <dev/pci/pcivar.h>
85 
86 #include "ifdi_if.h"
87 #include "miibus_if.h"
88 
89 static const pci_vendor_info_t mgb_vendor_info_array[] = {
90 	PVID(MGB_MICROCHIP_VENDOR_ID, MGB_LAN7430_DEVICE_ID,
91 	    "Microchip LAN7430 PCIe Gigabit Ethernet Controller"),
92 	PVID(MGB_MICROCHIP_VENDOR_ID, MGB_LAN7431_DEVICE_ID,
93 	    "Microchip LAN7431 PCIe Gigabit Ethernet Controller"),
94 	PVID_END
95 };
96 
97 /* Device methods */
98 static device_register_t		mgb_register;
99 
100 /* IFLIB methods */
101 static ifdi_attach_pre_t		mgb_attach_pre;
102 static ifdi_attach_post_t		mgb_attach_post;
103 static ifdi_detach_t			mgb_detach;
104 
105 static ifdi_tx_queues_alloc_t		mgb_tx_queues_alloc;
106 static ifdi_rx_queues_alloc_t		mgb_rx_queues_alloc;
107 static ifdi_queues_free_t		mgb_queues_free;
108 
109 static ifdi_init_t			mgb_init;
110 static ifdi_stop_t			mgb_stop;
111 
112 static ifdi_msix_intr_assign_t		mgb_msix_intr_assign;
113 static ifdi_tx_queue_intr_enable_t	mgb_tx_queue_intr_enable;
114 static ifdi_rx_queue_intr_enable_t	mgb_rx_queue_intr_enable;
115 static ifdi_intr_enable_t		mgb_intr_enable_all;
116 static ifdi_intr_disable_t		mgb_intr_disable_all;
117 
118 /* IFLIB_TXRX methods */
119 static int				mgb_isc_txd_encap(void *,
120 					    if_pkt_info_t);
121 static void				mgb_isc_txd_flush(void *,
122 					    uint16_t, qidx_t);
123 static int				mgb_isc_txd_credits_update(void *,
124 					    uint16_t, bool);
125 static int				mgb_isc_rxd_available(void *,
126 					    uint16_t, qidx_t, qidx_t);
127 static int				mgb_isc_rxd_pkt_get(void *,
128 					    if_rxd_info_t);
129 static void				mgb_isc_rxd_refill(void *,
130 					    if_rxd_update_t);
131 static void				mgb_isc_rxd_flush(void *,
132 					    uint16_t, uint8_t, qidx_t);
133 
134 /* Interrupts */
135 static driver_filter_t			mgb_legacy_intr;
136 static driver_filter_t			mgb_admin_intr;
137 static driver_filter_t			mgb_rxq_intr;
138 static bool				mgb_intr_test(struct mgb_softc *);
139 
140 /* MII methods */
141 static miibus_readreg_t			mgb_miibus_readreg;
142 static miibus_writereg_t		mgb_miibus_writereg;
143 static miibus_linkchg_t			mgb_miibus_linkchg;
144 static miibus_statchg_t			mgb_miibus_statchg;
145 
146 static int				mgb_media_change(if_t);
147 static void				mgb_media_status(if_t,
148 					    struct ifmediareq *);
149 
150 /* Helper/Test functions */
151 static int				mgb_test_bar(struct mgb_softc *);
152 static int				mgb_alloc_regs(struct mgb_softc *);
153 static int				mgb_release_regs(struct mgb_softc *);
154 
155 static void				mgb_get_ethaddr(struct mgb_softc *,
156 					    struct ether_addr *);
157 
158 static int				mgb_wait_for_bits(struct mgb_softc *,
159 					    int, int, int);
160 
161 /* H/W init, reset and teardown helpers */
162 static int				mgb_hw_init(struct mgb_softc *);
163 static int				mgb_hw_teardown(struct mgb_softc *);
164 static int				mgb_hw_reset(struct mgb_softc *);
165 static int				mgb_mac_init(struct mgb_softc *);
166 static int				mgb_dmac_reset(struct mgb_softc *);
167 static int				mgb_phy_reset(struct mgb_softc *);
168 
169 static int				mgb_dma_init(struct mgb_softc *);
170 static int				mgb_dma_tx_ring_init(struct mgb_softc *,
171 					    int);
172 static int				mgb_dma_rx_ring_init(struct mgb_softc *,
173 					    int);
174 
175 static int				mgb_dmac_control(struct mgb_softc *,
176 					    int, int, enum mgb_dmac_cmd);
177 static int				mgb_fct_control(struct mgb_softc *,
178 					    int, int, enum mgb_fct_cmd);
179 
180 /*********************************************************************
181  *  FreeBSD Device Interface Entry Points
182  *********************************************************************/
183 
184 static device_method_t mgb_methods[] = {
185 	/* Device interface */
186 	DEVMETHOD(device_register,	mgb_register),
187 	DEVMETHOD(device_probe,		iflib_device_probe),
188 	DEVMETHOD(device_attach,	iflib_device_attach),
189 	DEVMETHOD(device_detach,	iflib_device_detach),
190 	DEVMETHOD(device_shutdown,	iflib_device_shutdown),
191 	DEVMETHOD(device_suspend,	iflib_device_suspend),
192 	DEVMETHOD(device_resume,	iflib_device_resume),
193 
194 	/* MII Interface */
195 	DEVMETHOD(miibus_readreg,	mgb_miibus_readreg),
196 	DEVMETHOD(miibus_writereg,	mgb_miibus_writereg),
197 	DEVMETHOD(miibus_linkchg,	mgb_miibus_linkchg),
198 	DEVMETHOD(miibus_statchg,	mgb_miibus_statchg),
199 
200 	DEVMETHOD_END
201 };
202 
203 static driver_t mgb_driver = {
204 	"mgb", mgb_methods, sizeof(struct mgb_softc)
205 };
206 
207 DRIVER_MODULE(mgb, pci, mgb_driver, NULL, NULL);
208 IFLIB_PNP_INFO(pci, mgb, mgb_vendor_info_array);
209 MODULE_VERSION(mgb, 1);
210 
211 #if 0 /* MIIBUS_DEBUG */
212 /* If MIIBUS debug stuff is in attach then order matters. Use below instead. */
213 DRIVER_MODULE_ORDERED(miibus, mgb, miibus_driver, NULL, NULL,
214     SI_ORDER_ANY);
215 #endif /* MIIBUS_DEBUG */
216 DRIVER_MODULE(miibus, mgb, miibus_driver, NULL, NULL);
217 
218 MODULE_DEPEND(mgb, pci, 1, 1, 1);
219 MODULE_DEPEND(mgb, ether, 1, 1, 1);
220 MODULE_DEPEND(mgb, miibus, 1, 1, 1);
221 MODULE_DEPEND(mgb, iflib, 1, 1, 1);
222 
223 static device_method_t mgb_iflib_methods[] = {
224 	DEVMETHOD(ifdi_attach_pre, mgb_attach_pre),
225 	DEVMETHOD(ifdi_attach_post, mgb_attach_post),
226 	DEVMETHOD(ifdi_detach, mgb_detach),
227 
228 	DEVMETHOD(ifdi_init, mgb_init),
229 	DEVMETHOD(ifdi_stop, mgb_stop),
230 
231 	DEVMETHOD(ifdi_tx_queues_alloc, mgb_tx_queues_alloc),
232 	DEVMETHOD(ifdi_rx_queues_alloc, mgb_rx_queues_alloc),
233 	DEVMETHOD(ifdi_queues_free, mgb_queues_free),
234 
235 	DEVMETHOD(ifdi_msix_intr_assign, mgb_msix_intr_assign),
236 	DEVMETHOD(ifdi_tx_queue_intr_enable, mgb_tx_queue_intr_enable),
237 	DEVMETHOD(ifdi_rx_queue_intr_enable, mgb_rx_queue_intr_enable),
238 	DEVMETHOD(ifdi_intr_enable, mgb_intr_enable_all),
239 	DEVMETHOD(ifdi_intr_disable, mgb_intr_disable_all),
240 
241 #if 0 /* Not yet implemented IFLIB methods */
242 	/*
243 	 * Set multicast addresses, mtu and promiscuous mode
244 	 */
245 	DEVMETHOD(ifdi_multi_set, mgb_multi_set),
246 	DEVMETHOD(ifdi_mtu_set, mgb_mtu_set),
247 	DEVMETHOD(ifdi_promisc_set, mgb_promisc_set),
248 
249 	/*
250 	 * Needed for VLAN support
251 	 */
252 	DEVMETHOD(ifdi_vlan_register, mgb_vlan_register),
253 	DEVMETHOD(ifdi_vlan_unregister, mgb_vlan_unregister),
254 	DEVMETHOD(ifdi_needs_restart, mgb_if_needs_restart),
255 
256 	/*
257 	 * Needed for WOL support
258 	 * at the very least.
259 	 */
260 	DEVMETHOD(ifdi_shutdown, mgb_shutdown),
261 	DEVMETHOD(ifdi_suspend, mgb_suspend),
262 	DEVMETHOD(ifdi_resume, mgb_resume),
263 #endif /* UNUSED_IFLIB_METHODS */
264 	DEVMETHOD_END
265 };
266 
267 static driver_t mgb_iflib_driver = {
268 	"mgb", mgb_iflib_methods, sizeof(struct mgb_softc)
269 };
270 
271 static struct if_txrx mgb_txrx  = {
272 	.ift_txd_encap = mgb_isc_txd_encap,
273 	.ift_txd_flush = mgb_isc_txd_flush,
274 	.ift_txd_credits_update = mgb_isc_txd_credits_update,
275 	.ift_rxd_available = mgb_isc_rxd_available,
276 	.ift_rxd_pkt_get = mgb_isc_rxd_pkt_get,
277 	.ift_rxd_refill = mgb_isc_rxd_refill,
278 	.ift_rxd_flush = mgb_isc_rxd_flush,
279 
280 	.ift_legacy_intr = mgb_legacy_intr
281 };
282 
283 static struct if_shared_ctx mgb_sctx_init = {
284 	.isc_magic = IFLIB_MAGIC,
285 
286 	.isc_q_align = PAGE_SIZE,
287 	.isc_admin_intrcnt = 1,
288 	.isc_flags = IFLIB_DRIVER_MEDIA /* | IFLIB_HAS_RXCQ | IFLIB_HAS_TXCQ*/,
289 
290 	.isc_vendor_info = mgb_vendor_info_array,
291 	.isc_driver_version = "1",
292 	.isc_driver = &mgb_iflib_driver,
293 	/* 2 queues per set for TX and RX (ring queue, head writeback queue) */
294 	.isc_ntxqs = 2,
295 
296 	.isc_tx_maxsize = MGB_DMA_MAXSEGS  * MCLBYTES,
297 	/* .isc_tx_nsegments = MGB_DMA_MAXSEGS, */
298 	.isc_tx_maxsegsize = MCLBYTES,
299 
300 	.isc_ntxd_min = {1, 1}, /* Will want to make this bigger */
301 	.isc_ntxd_max = {MGB_DMA_RING_SIZE, 1},
302 	.isc_ntxd_default = {MGB_DMA_RING_SIZE, 1},
303 
304 	.isc_nrxqs = 2,
305 
306 	.isc_rx_maxsize = MCLBYTES,
307 	.isc_rx_nsegments = 1,
308 	.isc_rx_maxsegsize = MCLBYTES,
309 
310 	.isc_nrxd_min = {1, 1}, /* Will want to make this bigger */
311 	.isc_nrxd_max = {MGB_DMA_RING_SIZE, 1},
312 	.isc_nrxd_default = {MGB_DMA_RING_SIZE, 1},
313 
314 	.isc_nfl = 1, /*one free list since there is only one queue */
315 #if 0 /* UNUSED_CTX */
316 
317 	.isc_tso_maxsize = MGB_TSO_MAXSIZE + sizeof(struct ether_vlan_header),
318 	.isc_tso_maxsegsize = MGB_TX_MAXSEGSIZE,
319 #endif /* UNUSED_CTX */
320 };
321 
322 /*********************************************************************/
323 
324 static void *
mgb_register(device_t dev)325 mgb_register(device_t dev)
326 {
327 
328 	return (&mgb_sctx_init);
329 }
330 
331 static int
mgb_attach_pre(if_ctx_t ctx)332 mgb_attach_pre(if_ctx_t ctx)
333 {
334 	struct mgb_softc *sc;
335 	if_softc_ctx_t scctx;
336 	int error, phyaddr, rid;
337 	struct ether_addr hwaddr;
338 	struct mii_data *miid;
339 
340 	sc = iflib_get_softc(ctx);
341 	sc->ctx = ctx;
342 	sc->dev = iflib_get_dev(ctx);
343 	scctx = iflib_get_softc_ctx(ctx);
344 
345 	/* IFLIB required setup */
346 	scctx->isc_txrx = &mgb_txrx;
347 	scctx->isc_tx_nsegments = MGB_DMA_MAXSEGS;
348 	/* Ring desc queues */
349 	scctx->isc_txqsizes[0] = sizeof(struct mgb_ring_desc) *
350 	    scctx->isc_ntxd[0];
351 	scctx->isc_rxqsizes[0] = sizeof(struct mgb_ring_desc) *
352 	    scctx->isc_nrxd[0];
353 
354 	/* Head WB queues */
355 	scctx->isc_txqsizes[1] = sizeof(uint32_t) * scctx->isc_ntxd[1];
356 	scctx->isc_rxqsizes[1] = sizeof(uint32_t) * scctx->isc_nrxd[1];
357 
358 	/* XXX: Must have 1 txqset, but can have up to 4 rxqsets */
359 	scctx->isc_nrxqsets = 1;
360 	scctx->isc_ntxqsets = 1;
361 
362 	/* scctx->isc_tx_csum_flags = (CSUM_TCP | CSUM_UDP) |
363 	    (CSUM_TCP_IPV6 | CSUM_UDP_IPV6) | CSUM_TSO */
364 	scctx->isc_tx_csum_flags = 0;
365 	scctx->isc_capabilities = scctx->isc_capenable = 0;
366 #if 0
367 	/*
368 	 * CSUM, TSO and VLAN support are TBD
369 	 */
370 	    IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 |
371 	    IFCAP_TSO4 | IFCAP_TSO6 |
372 	    IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 |
373 	    IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
374 	    IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO |
375 	    IFCAP_JUMBO_MTU;
376 	scctx->isc_capabilities |= IFCAP_LRO | IFCAP_VLAN_HWFILTER;
377 #endif
378 
379 	/* get the BAR */
380 	error = mgb_alloc_regs(sc);
381 	if (error != 0) {
382 		device_printf(sc->dev,
383 		    "Unable to allocate bus resource: registers.\n");
384 		goto fail;
385 	}
386 
387 	error = mgb_test_bar(sc);
388 	if (error != 0)
389 		goto fail;
390 
391 	error = mgb_hw_init(sc);
392 	if (error != 0) {
393 		device_printf(sc->dev,
394 		    "MGB device init failed. (err: %d)\n", error);
395 		goto fail;
396 	}
397 
398 	switch (pci_get_device(sc->dev)) {
399 	case MGB_LAN7430_DEVICE_ID:
400 		phyaddr = 1;
401 		break;
402 	case MGB_LAN7431_DEVICE_ID:
403 	default:
404 		phyaddr = MII_PHY_ANY;
405 		break;
406 	}
407 
408 	/* XXX: Would be nice(r) if locked methods were here */
409 	error = mii_attach(sc->dev, &sc->miibus, iflib_get_ifp(ctx),
410 	    mgb_media_change, mgb_media_status,
411 	    BMSR_DEFCAPMASK, phyaddr, MII_OFFSET_ANY, MIIF_DOPAUSE);
412 	if (error != 0) {
413 		device_printf(sc->dev, "Failed to attach MII interface\n");
414 		goto fail;
415 	}
416 
417 	miid = device_get_softc(sc->miibus);
418 	scctx->isc_media = &miid->mii_media;
419 
420 	scctx->isc_msix_bar = pci_msix_table_bar(sc->dev);
421 	/** Setup PBA BAR **/
422 	rid = pci_msix_pba_bar(sc->dev);
423 	if (rid != scctx->isc_msix_bar) {
424 		sc->pba = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
425 		    &rid, RF_ACTIVE);
426 		if (sc->pba == NULL) {
427 			error = ENXIO;
428 			device_printf(sc->dev, "Failed to setup PBA BAR\n");
429 			goto fail;
430 		}
431 	}
432 
433 	mgb_get_ethaddr(sc, &hwaddr);
434 	if (ETHER_IS_BROADCAST(hwaddr.octet) ||
435 	    ETHER_IS_MULTICAST(hwaddr.octet) ||
436 	    ETHER_IS_ZERO(hwaddr.octet))
437 		ether_gen_addr(iflib_get_ifp(ctx), &hwaddr);
438 
439 	/*
440 	 * XXX: if the MAC address was generated the linux driver
441 	 * writes it back to the device.
442 	 */
443 	iflib_set_mac(ctx, hwaddr.octet);
444 
445 	/* Map all vectors to vector 0 (admin interrupts) by default. */
446 	CSR_WRITE_REG(sc, MGB_INTR_VEC_RX_MAP, 0);
447 	CSR_WRITE_REG(sc, MGB_INTR_VEC_TX_MAP, 0);
448 	CSR_WRITE_REG(sc, MGB_INTR_VEC_OTHER_MAP, 0);
449 
450 	return (0);
451 
452 fail:
453 	mgb_detach(ctx);
454 	return (error);
455 }
456 
457 static int
mgb_attach_post(if_ctx_t ctx)458 mgb_attach_post(if_ctx_t ctx)
459 {
460 	struct mgb_softc *sc;
461 
462 	sc = iflib_get_softc(ctx);
463 
464 	device_printf(sc->dev, "Interrupt test: %s\n",
465 	    (mgb_intr_test(sc) ? "PASS" : "FAIL"));
466 
467 	return (0);
468 }
469 
470 static int
mgb_detach(if_ctx_t ctx)471 mgb_detach(if_ctx_t ctx)
472 {
473 	struct mgb_softc *sc;
474 	int error;
475 
476 	sc = iflib_get_softc(ctx);
477 
478 	/* XXX: Should report errors but still detach everything. */
479 	error = mgb_hw_teardown(sc);
480 
481 	/* Release IRQs */
482 	iflib_irq_free(ctx, &sc->rx_irq);
483 	iflib_irq_free(ctx, &sc->admin_irq);
484 
485 	if (sc->miibus != NULL)
486 		device_delete_child(sc->dev, sc->miibus);
487 
488 	if (sc->pba != NULL)
489 		error = bus_release_resource(sc->dev, SYS_RES_MEMORY,
490 		    rman_get_rid(sc->pba), sc->pba);
491 	sc->pba = NULL;
492 
493 	error = mgb_release_regs(sc);
494 
495 	return (error);
496 }
497 
498 static int
mgb_media_change(if_t ifp)499 mgb_media_change(if_t ifp)
500 {
501 	struct mii_data *miid;
502 	struct mii_softc *miisc;
503 	struct mgb_softc *sc;
504 	if_ctx_t ctx;
505 	int needs_reset;
506 
507 	ctx = if_getsoftc(ifp);
508 	sc = iflib_get_softc(ctx);
509 	miid = device_get_softc(sc->miibus);
510 	LIST_FOREACH(miisc, &miid->mii_phys, mii_list)
511 		PHY_RESET(miisc);
512 
513 	needs_reset = mii_mediachg(miid);
514 	if (needs_reset != 0)
515 		if_init(ifp, ctx);
516 	return (needs_reset);
517 }
518 
519 static void
mgb_media_status(if_t ifp,struct ifmediareq * ifmr)520 mgb_media_status(if_t ifp, struct ifmediareq *ifmr)
521 {
522 	struct mgb_softc *sc;
523 	struct mii_data *miid;
524 
525 	sc = iflib_get_softc(if_getsoftc(ifp));
526 	miid = device_get_softc(sc->miibus);
527 	if ((if_getflags(ifp) & IFF_UP) == 0)
528 		return;
529 
530 	mii_pollstat(miid);
531 	ifmr->ifm_active = miid->mii_media_active;
532 	ifmr->ifm_status = miid->mii_media_status;
533 }
534 
535 static int
mgb_tx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int ntxqs,int ntxqsets)536 mgb_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs,
537     int ntxqsets)
538 {
539 	struct mgb_softc *sc;
540 	struct mgb_ring_data *rdata;
541 	int q;
542 
543 	sc = iflib_get_softc(ctx);
544 	KASSERT(ntxqsets == 1, ("ntxqsets = %d", ntxqsets));
545 	rdata = &sc->tx_ring_data;
546 	for (q = 0; q < ntxqsets; q++) {
547 		KASSERT(ntxqs == 2, ("ntxqs = %d", ntxqs));
548 		/* Ring */
549 		rdata->ring = (struct mgb_ring_desc *) vaddrs[q * ntxqs + 0];
550 		rdata->ring_bus_addr = paddrs[q * ntxqs + 0];
551 
552 		/* Head WB */
553 		rdata->head_wb = (uint32_t *) vaddrs[q * ntxqs + 1];
554 		rdata->head_wb_bus_addr = paddrs[q * ntxqs + 1];
555 	}
556 	return (0);
557 }
558 
559 static int
mgb_rx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int nrxqs,int nrxqsets)560 mgb_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs,
561     int nrxqsets)
562 {
563 	struct mgb_softc *sc;
564 	struct mgb_ring_data *rdata;
565 	int q;
566 
567 	sc = iflib_get_softc(ctx);
568 	KASSERT(nrxqsets == 1, ("nrxqsets = %d", nrxqsets));
569 	rdata = &sc->rx_ring_data;
570 	for (q = 0; q < nrxqsets; q++) {
571 		KASSERT(nrxqs == 2, ("nrxqs = %d", nrxqs));
572 		/* Ring */
573 		rdata->ring = (struct mgb_ring_desc *) vaddrs[q * nrxqs + 0];
574 		rdata->ring_bus_addr = paddrs[q * nrxqs + 0];
575 
576 		/* Head WB */
577 		rdata->head_wb = (uint32_t *) vaddrs[q * nrxqs + 1];
578 		rdata->head_wb_bus_addr = paddrs[q * nrxqs + 1];
579 	}
580 	return (0);
581 }
582 
583 static void
mgb_queues_free(if_ctx_t ctx)584 mgb_queues_free(if_ctx_t ctx)
585 {
586 	struct mgb_softc *sc;
587 
588 	sc = iflib_get_softc(ctx);
589 
590 	memset(&sc->rx_ring_data, 0, sizeof(struct mgb_ring_data));
591 	memset(&sc->tx_ring_data, 0, sizeof(struct mgb_ring_data));
592 }
593 
594 static void
mgb_init(if_ctx_t ctx)595 mgb_init(if_ctx_t ctx)
596 {
597 	struct mgb_softc *sc;
598 	struct mii_data *miid;
599 	int error;
600 
601 	sc = iflib_get_softc(ctx);
602 	miid = device_get_softc(sc->miibus);
603 	device_printf(sc->dev, "running init ...\n");
604 
605 	mgb_dma_init(sc);
606 
607 	/* XXX: Turn off perfect filtering, turn on (broad|multi|uni)cast rx */
608 	CSR_CLEAR_REG(sc, MGB_RFE_CTL, MGB_RFE_ALLOW_PERFECT_FILTER);
609 	CSR_UPDATE_REG(sc, MGB_RFE_CTL,
610 	    MGB_RFE_ALLOW_BROADCAST |
611 	    MGB_RFE_ALLOW_MULTICAST |
612 	    MGB_RFE_ALLOW_UNICAST);
613 
614 	error = mii_mediachg(miid);
615 	/* Not much we can do if this fails. */
616 	if (error)
617 		device_printf(sc->dev, "%s: mii_mediachg returned %d", __func__,
618 		    error);
619 }
620 
621 #if 0
622 static void
623 mgb_dump_some_stats(struct mgb_softc *sc)
624 {
625 	int i;
626 	int first_stat = 0x1200;
627 	int last_stat = 0x12FC;
628 
629 	for (i = first_stat; i <= last_stat; i += 4)
630 		if (CSR_READ_REG(sc, i) != 0)
631 			device_printf(sc->dev, "0x%04x: 0x%08x\n", i,
632 			    CSR_READ_REG(sc, i));
633 	char *stat_names[] = {
634 		"MAC_ERR_STS ",
635 		"FCT_INT_STS ",
636 		"DMAC_CFG ",
637 		"DMAC_CMD ",
638 		"DMAC_INT_STS ",
639 		"DMAC_INT_EN ",
640 		"DMAC_RX_ERR_STS0 ",
641 		"DMAC_RX_ERR_STS1 ",
642 		"DMAC_RX_ERR_STS2 ",
643 		"DMAC_RX_ERR_STS3 ",
644 		"INT_STS ",
645 		"INT_EN ",
646 		"INT_VEC_EN ",
647 		"INT_VEC_MAP0 ",
648 		"INT_VEC_MAP1 ",
649 		"INT_VEC_MAP2 ",
650 		"TX_HEAD0",
651 		"TX_TAIL0",
652 		"DMAC_TX_ERR_STS0 ",
653 		NULL
654 	};
655 	int stats[] = {
656 		0x114,
657 		0xA0,
658 		0xC00,
659 		0xC0C,
660 		0xC10,
661 		0xC14,
662 		0xC60,
663 		0xCA0,
664 		0xCE0,
665 		0xD20,
666 		0x780,
667 		0x788,
668 		0x794,
669 		0x7A0,
670 		0x7A4,
671 		0x780,
672 		0xD58,
673 		0xD5C,
674 		0xD60,
675 		0x0
676 	};
677 	i = 0;
678 	printf("==============================\n");
679 	while (stats[i++])
680 		device_printf(sc->dev, "%s at offset 0x%04x = 0x%08x\n",
681 		    stat_names[i - 1], stats[i - 1],
682 		    CSR_READ_REG(sc, stats[i - 1]));
683 	printf("==== TX RING DESCS ====\n");
684 	for (i = 0; i < MGB_DMA_RING_SIZE; i++)
685 		device_printf(sc->dev, "ring[%d].data0=0x%08x\n"
686 		    "ring[%d].data1=0x%08x\n"
687 		    "ring[%d].data2=0x%08x\n"
688 		    "ring[%d].data3=0x%08x\n",
689 		    i, sc->tx_ring_data.ring[i].ctl,
690 		    i, sc->tx_ring_data.ring[i].addr.low,
691 		    i, sc->tx_ring_data.ring[i].addr.high,
692 		    i, sc->tx_ring_data.ring[i].sts);
693 	device_printf(sc->dev, "==== DUMP_TX_DMA_RAM ====\n");
694 	CSR_WRITE_REG(sc, 0x24, 0xF); // DP_SEL & TX_RAM_0
695 	for (i = 0; i < 128; i++) {
696 		CSR_WRITE_REG(sc, 0x2C, i); // DP_ADDR
697 
698 		CSR_WRITE_REG(sc, 0x28, 0); // DP_CMD
699 
700 		while ((CSR_READ_REG(sc, 0x24) & 0x80000000) == 0) // DP_SEL & READY
701 			DELAY(1000);
702 
703 		device_printf(sc->dev, "DMAC_TX_RAM_0[%u]=%08x\n", i,
704 		    CSR_READ_REG(sc, 0x30)); // DP_DATA
705 	}
706 }
707 #endif
708 
709 static void
mgb_stop(if_ctx_t ctx)710 mgb_stop(if_ctx_t ctx)
711 {
712 	struct mgb_softc *sc ;
713 	if_softc_ctx_t scctx;
714 	int i;
715 
716 	sc = iflib_get_softc(ctx);
717 	scctx = iflib_get_softc_ctx(ctx);
718 
719 	/* XXX: Could potentially timeout */
720 	for (i = 0; i < scctx->isc_nrxqsets; i++) {
721 		mgb_dmac_control(sc, MGB_DMAC_RX_START, 0, DMAC_STOP);
722 		mgb_fct_control(sc, MGB_FCT_RX_CTL, 0, FCT_DISABLE);
723 	}
724 	for (i = 0; i < scctx->isc_ntxqsets; i++) {
725 		mgb_dmac_control(sc, MGB_DMAC_TX_START, 0, DMAC_STOP);
726 		mgb_fct_control(sc, MGB_FCT_TX_CTL, 0, FCT_DISABLE);
727 	}
728 }
729 
730 static int
mgb_legacy_intr(void * xsc)731 mgb_legacy_intr(void *xsc)
732 {
733 	struct mgb_softc *sc;
734 
735 	sc = xsc;
736 	iflib_admin_intr_deferred(sc->ctx);
737 	return (FILTER_HANDLED);
738 }
739 
740 static int
mgb_rxq_intr(void * xsc)741 mgb_rxq_intr(void *xsc)
742 {
743 	struct mgb_softc *sc;
744 	if_softc_ctx_t scctx;
745 	uint32_t intr_sts, intr_en;
746 	int qidx;
747 
748 	sc = xsc;
749 	scctx = iflib_get_softc_ctx(sc->ctx);
750 
751 	intr_sts = CSR_READ_REG(sc, MGB_INTR_STS);
752 	intr_en = CSR_READ_REG(sc, MGB_INTR_ENBL_SET);
753 	intr_sts &= intr_en;
754 
755 	for (qidx = 0; qidx < scctx->isc_nrxqsets; qidx++) {
756 		if ((intr_sts & MGB_INTR_STS_RX(qidx))){
757 			CSR_WRITE_REG(sc, MGB_INTR_ENBL_CLR,
758 			    MGB_INTR_STS_RX(qidx));
759 			CSR_WRITE_REG(sc, MGB_INTR_STS, MGB_INTR_STS_RX(qidx));
760 		}
761 	}
762 	return (FILTER_SCHEDULE_THREAD);
763 }
764 
765 static int
mgb_admin_intr(void * xsc)766 mgb_admin_intr(void *xsc)
767 {
768 	struct mgb_softc *sc;
769 	if_softc_ctx_t scctx;
770 	uint32_t intr_sts, intr_en;
771 	int qidx;
772 
773 	sc = xsc;
774 	scctx = iflib_get_softc_ctx(sc->ctx);
775 
776 	intr_sts = CSR_READ_REG(sc, MGB_INTR_STS);
777 	intr_en = CSR_READ_REG(sc, MGB_INTR_ENBL_SET);
778 	intr_sts &= intr_en;
779 
780 	/* TODO: shouldn't continue if suspended */
781 	if ((intr_sts & MGB_INTR_STS_ANY) == 0)
782 		return (FILTER_STRAY);
783 	if ((intr_sts &  MGB_INTR_STS_TEST) != 0) {
784 		sc->isr_test_flag = true;
785 		CSR_WRITE_REG(sc, MGB_INTR_STS, MGB_INTR_STS_TEST);
786 		return (FILTER_HANDLED);
787 	}
788 	if ((intr_sts & MGB_INTR_STS_RX_ANY) != 0) {
789 		for (qidx = 0; qidx < scctx->isc_nrxqsets; qidx++) {
790 			if ((intr_sts & MGB_INTR_STS_RX(qidx))){
791 				iflib_rx_intr_deferred(sc->ctx, qidx);
792 			}
793 		}
794 		return (FILTER_HANDLED);
795 	}
796 	/* XXX: TX interrupts should not occur */
797 	if ((intr_sts & MGB_INTR_STS_TX_ANY) != 0) {
798 		for (qidx = 0; qidx < scctx->isc_ntxqsets; qidx++) {
799 			if ((intr_sts & MGB_INTR_STS_RX(qidx))) {
800 				/* clear the interrupt sts and run handler */
801 				CSR_WRITE_REG(sc, MGB_INTR_ENBL_CLR,
802 				    MGB_INTR_STS_TX(qidx));
803 				CSR_WRITE_REG(sc, MGB_INTR_STS,
804 				    MGB_INTR_STS_TX(qidx));
805 				iflib_tx_intr_deferred(sc->ctx, qidx);
806 			}
807 		}
808 		return (FILTER_HANDLED);
809 	}
810 
811 	return (FILTER_SCHEDULE_THREAD);
812 }
813 
814 static int
mgb_msix_intr_assign(if_ctx_t ctx,int msix)815 mgb_msix_intr_assign(if_ctx_t ctx, int msix)
816 {
817 	struct mgb_softc *sc;
818 	if_softc_ctx_t scctx;
819 	int error, i, vectorid;
820 	char irq_name[16];
821 
822 	sc = iflib_get_softc(ctx);
823 	scctx = iflib_get_softc_ctx(ctx);
824 
825 	KASSERT(scctx->isc_nrxqsets == 1 && scctx->isc_ntxqsets == 1,
826 	    ("num rxqsets/txqsets != 1 "));
827 
828 	/*
829 	 * First vector should be admin interrupts, others vectors are TX/RX
830 	 *
831 	 * RIDs start at 1, and vector ids start at 0.
832 	 */
833 	vectorid = 0;
834 	error = iflib_irq_alloc_generic(ctx, &sc->admin_irq, vectorid + 1,
835 	    IFLIB_INTR_ADMIN, mgb_admin_intr, sc, 0, "admin");
836 	if (error) {
837 		device_printf(sc->dev,
838 		    "Failed to register admin interrupt handler\n");
839 		return (error);
840 	}
841 
842 	for (i = 0; i < scctx->isc_nrxqsets; i++) {
843 		vectorid++;
844 		snprintf(irq_name, sizeof(irq_name), "rxq%d", i);
845 		error = iflib_irq_alloc_generic(ctx, &sc->rx_irq, vectorid + 1,
846 		    IFLIB_INTR_RXTX, mgb_rxq_intr, sc, i, irq_name);
847 		if (error) {
848 			device_printf(sc->dev,
849 			    "Failed to register rxq %d interrupt handler\n", i);
850 			return (error);
851 		}
852 		CSR_UPDATE_REG(sc, MGB_INTR_VEC_RX_MAP,
853 		    MGB_INTR_VEC_MAP(vectorid, i));
854 	}
855 
856 	/* Not actually mapping hw TX interrupts ... */
857 	for (i = 0; i < scctx->isc_ntxqsets; i++) {
858 		snprintf(irq_name, sizeof(irq_name), "txq%d", i);
859 		iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_TX, NULL, i,
860 		    irq_name);
861 	}
862 
863 	return (0);
864 }
865 
866 static void
mgb_intr_enable_all(if_ctx_t ctx)867 mgb_intr_enable_all(if_ctx_t ctx)
868 {
869 	struct mgb_softc *sc;
870 	if_softc_ctx_t scctx;
871 	int i, dmac_enable = 0, intr_sts = 0, vec_en = 0;
872 
873 	sc = iflib_get_softc(ctx);
874 	scctx = iflib_get_softc_ctx(ctx);
875 	intr_sts |= MGB_INTR_STS_ANY;
876 	vec_en |= MGB_INTR_STS_ANY;
877 
878 	for (i = 0; i < scctx->isc_nrxqsets; i++) {
879 		intr_sts |= MGB_INTR_STS_RX(i);
880 		dmac_enable |= MGB_DMAC_RX_INTR_ENBL(i);
881 		vec_en |= MGB_INTR_RX_VEC_STS(i);
882 	}
883 
884 	/* TX interrupts aren't needed ... */
885 
886 	CSR_WRITE_REG(sc, MGB_INTR_ENBL_SET, intr_sts);
887 	CSR_WRITE_REG(sc, MGB_INTR_VEC_ENBL_SET, vec_en);
888 	CSR_WRITE_REG(sc, MGB_DMAC_INTR_STS, dmac_enable);
889 	CSR_WRITE_REG(sc, MGB_DMAC_INTR_ENBL_SET, dmac_enable);
890 }
891 
892 static void
mgb_intr_disable_all(if_ctx_t ctx)893 mgb_intr_disable_all(if_ctx_t ctx)
894 {
895 	struct mgb_softc *sc;
896 
897 	sc = iflib_get_softc(ctx);
898 	CSR_WRITE_REG(sc, MGB_INTR_ENBL_CLR, UINT32_MAX);
899 	CSR_WRITE_REG(sc, MGB_INTR_VEC_ENBL_CLR, UINT32_MAX);
900 	CSR_WRITE_REG(sc, MGB_INTR_STS, UINT32_MAX);
901 
902 	CSR_WRITE_REG(sc, MGB_DMAC_INTR_ENBL_CLR, UINT32_MAX);
903 	CSR_WRITE_REG(sc, MGB_DMAC_INTR_STS, UINT32_MAX);
904 }
905 
906 static int
mgb_rx_queue_intr_enable(if_ctx_t ctx,uint16_t qid)907 mgb_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
908 {
909 	/* called after successful rx isr */
910 	struct mgb_softc *sc;
911 
912 	sc = iflib_get_softc(ctx);
913 	CSR_WRITE_REG(sc, MGB_INTR_VEC_ENBL_SET, MGB_INTR_RX_VEC_STS(qid));
914 	CSR_WRITE_REG(sc, MGB_INTR_ENBL_SET, MGB_INTR_STS_RX(qid));
915 
916 	CSR_WRITE_REG(sc, MGB_DMAC_INTR_STS, MGB_DMAC_RX_INTR_ENBL(qid));
917 	CSR_WRITE_REG(sc, MGB_DMAC_INTR_ENBL_SET, MGB_DMAC_RX_INTR_ENBL(qid));
918 	return (0);
919 }
920 
921 static int
mgb_tx_queue_intr_enable(if_ctx_t ctx,uint16_t qid)922 mgb_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
923 {
924 	/* XXX: not called (since tx interrupts not used) */
925 	struct mgb_softc *sc;
926 
927 	sc = iflib_get_softc(ctx);
928 
929 	CSR_WRITE_REG(sc, MGB_INTR_ENBL_SET, MGB_INTR_STS_TX(qid));
930 
931 	CSR_WRITE_REG(sc, MGB_DMAC_INTR_STS, MGB_DMAC_TX_INTR_ENBL(qid));
932 	CSR_WRITE_REG(sc, MGB_DMAC_INTR_ENBL_SET, MGB_DMAC_TX_INTR_ENBL(qid));
933 	return (0);
934 }
935 
936 static bool
mgb_intr_test(struct mgb_softc * sc)937 mgb_intr_test(struct mgb_softc *sc)
938 {
939 	int i;
940 
941 	sc->isr_test_flag = false;
942 	CSR_WRITE_REG(sc, MGB_INTR_STS, MGB_INTR_STS_TEST);
943 	CSR_WRITE_REG(sc, MGB_INTR_VEC_ENBL_SET, MGB_INTR_STS_ANY);
944 	CSR_WRITE_REG(sc, MGB_INTR_ENBL_SET,
945 	    MGB_INTR_STS_ANY | MGB_INTR_STS_TEST);
946 	CSR_WRITE_REG(sc, MGB_INTR_SET, MGB_INTR_STS_TEST);
947 	if (sc->isr_test_flag)
948 		return (true);
949 	for (i = 0; i < MGB_TIMEOUT; i++) {
950 		DELAY(10);
951 		if (sc->isr_test_flag)
952 			break;
953 	}
954 	CSR_WRITE_REG(sc, MGB_INTR_ENBL_CLR, MGB_INTR_STS_TEST);
955 	CSR_WRITE_REG(sc, MGB_INTR_STS, MGB_INTR_STS_TEST);
956 	return (sc->isr_test_flag);
957 }
958 
959 static int
mgb_isc_txd_encap(void * xsc,if_pkt_info_t ipi)960 mgb_isc_txd_encap(void *xsc , if_pkt_info_t ipi)
961 {
962 	struct mgb_softc *sc;
963 	struct mgb_ring_data *rdata;
964 	struct mgb_ring_desc *txd;
965 	bus_dma_segment_t *segs;
966 	qidx_t pidx, nsegs;
967 	int i;
968 
969 	KASSERT(ipi->ipi_qsidx == 0,
970 	    ("tried to refill TX Channel %d.\n", ipi->ipi_qsidx));
971 	sc = xsc;
972 	rdata = &sc->tx_ring_data;
973 
974 	pidx = ipi->ipi_pidx;
975 	segs = ipi->ipi_segs;
976 	nsegs = ipi->ipi_nsegs;
977 
978 	/* For each seg, create a descriptor */
979 	for (i = 0; i < nsegs; ++i) {
980 		KASSERT(nsegs == 1, ("Multisegment packet !!!!!\n"));
981 		txd = &rdata->ring[pidx];
982 		txd->ctl = htole32(
983 		    (segs[i].ds_len & MGB_DESC_CTL_BUFLEN_MASK ) |
984 		    /*
985 		     * XXX: This will be wrong in the multipacket case
986 		     * I suspect FS should be for the first packet and
987 		     * LS should be for the last packet
988 		     */
989 		    MGB_TX_DESC_CTL_FS | MGB_TX_DESC_CTL_LS |
990 		    MGB_DESC_CTL_FCS);
991 		txd->addr.low = htole32(CSR_TRANSLATE_ADDR_LOW32(
992 		    segs[i].ds_addr));
993 		txd->addr.high = htole32(CSR_TRANSLATE_ADDR_HIGH32(
994 		    segs[i].ds_addr));
995 		txd->sts = htole32(
996 		    (segs[i].ds_len << 16) & MGB_DESC_FRAME_LEN_MASK);
997 		pidx = MGB_NEXT_RING_IDX(pidx);
998 	}
999 	ipi->ipi_new_pidx = pidx;
1000 	return (0);
1001 }
1002 
1003 static void
mgb_isc_txd_flush(void * xsc,uint16_t txqid,qidx_t pidx)1004 mgb_isc_txd_flush(void *xsc, uint16_t txqid, qidx_t pidx)
1005 {
1006 	struct mgb_softc *sc;
1007 	struct mgb_ring_data *rdata;
1008 
1009 	KASSERT(txqid == 0, ("tried to flush TX Channel %d.\n", txqid));
1010 	sc = xsc;
1011 	rdata = &sc->tx_ring_data;
1012 
1013 	if (rdata->last_tail != pidx) {
1014 		rdata->last_tail = pidx;
1015 		CSR_WRITE_REG(sc, MGB_DMA_TX_TAIL(txqid), rdata->last_tail);
1016 	}
1017 }
1018 
1019 static int
mgb_isc_txd_credits_update(void * xsc,uint16_t txqid,bool clear)1020 mgb_isc_txd_credits_update(void *xsc, uint16_t txqid, bool clear)
1021 {
1022 	struct mgb_softc *sc;
1023 	struct mgb_ring_desc *txd;
1024 	struct mgb_ring_data *rdata;
1025 	int processed = 0;
1026 
1027 	/*
1028 	 * > If clear is true, we need to report the number of TX command ring
1029 	 * > descriptors that have been processed by the device.  If clear is
1030 	 * > false, we just need to report whether or not at least one TX
1031 	 * > command ring descriptor has been processed by the device.
1032 	 * - vmx driver
1033 	 */
1034 	KASSERT(txqid == 0, ("tried to credits_update TX Channel %d.\n",
1035 	    txqid));
1036 	sc = xsc;
1037 	rdata = &sc->tx_ring_data;
1038 
1039 	while (*(rdata->head_wb) != rdata->last_head) {
1040 		if (!clear)
1041 			return (1);
1042 
1043 		txd = &rdata->ring[rdata->last_head];
1044 		memset(txd, 0, sizeof(struct mgb_ring_desc));
1045 		rdata->last_head = MGB_NEXT_RING_IDX(rdata->last_head);
1046 		processed++;
1047 	}
1048 
1049 	return (processed);
1050 }
1051 
1052 static int
mgb_isc_rxd_available(void * xsc,uint16_t rxqid,qidx_t idx,qidx_t budget)1053 mgb_isc_rxd_available(void *xsc, uint16_t rxqid, qidx_t idx, qidx_t budget)
1054 {
1055 	struct mgb_softc *sc;
1056 	struct mgb_ring_data *rdata;
1057 	int avail = 0;
1058 
1059 	sc = xsc;
1060 	KASSERT(rxqid == 0, ("tried to check availability in RX Channel %d.\n",
1061 	    rxqid));
1062 
1063 	rdata = &sc->rx_ring_data;
1064 	for (; idx != *(rdata->head_wb); idx = MGB_NEXT_RING_IDX(idx)) {
1065 		avail++;
1066 		/* XXX: Could verify desc is device owned here */
1067 		if (avail == budget)
1068 			break;
1069 	}
1070 	return (avail);
1071 }
1072 
1073 static int
mgb_isc_rxd_pkt_get(void * xsc,if_rxd_info_t ri)1074 mgb_isc_rxd_pkt_get(void *xsc, if_rxd_info_t ri)
1075 {
1076 	struct mgb_softc *sc;
1077 	struct mgb_ring_data *rdata;
1078 	struct mgb_ring_desc rxd;
1079 	int total_len;
1080 
1081 	KASSERT(ri->iri_qsidx == 0,
1082 	    ("tried to check availability in RX Channel %d\n", ri->iri_qsidx));
1083 	sc = xsc;
1084 	total_len = 0;
1085 	rdata = &sc->rx_ring_data;
1086 
1087 	while (*(rdata->head_wb) != rdata->last_head) {
1088 		/* copy ring desc and do swapping */
1089 		rxd = rdata->ring[rdata->last_head];
1090 		rxd.ctl = le32toh(rxd.ctl);
1091 		rxd.addr.low = le32toh(rxd.ctl);
1092 		rxd.addr.high = le32toh(rxd.ctl);
1093 		rxd.sts = le32toh(rxd.ctl);
1094 
1095 		if ((rxd.ctl & MGB_DESC_CTL_OWN) != 0) {
1096 			device_printf(sc->dev,
1097 			    "Tried to read descriptor ... "
1098 			    "found that it's owned by the driver\n");
1099 			return (EINVAL);
1100 		}
1101 		if ((rxd.ctl & MGB_RX_DESC_CTL_FS) == 0) {
1102 			device_printf(sc->dev,
1103 			    "Tried to read descriptor ... "
1104 			    "found that FS is not set.\n");
1105 			device_printf(sc->dev, "Tried to read descriptor ... that it FS is not set.\n");
1106 			return (EINVAL);
1107 		}
1108 		/* XXX: Multi-packet support */
1109 		if ((rxd.ctl & MGB_RX_DESC_CTL_LS) == 0) {
1110 			device_printf(sc->dev,
1111 			    "Tried to read descriptor ... "
1112 			    "found that LS is not set. (Multi-buffer packets not yet supported)\n");
1113 			return (EINVAL);
1114 		}
1115 		ri->iri_frags[0].irf_flid = 0;
1116 		ri->iri_frags[0].irf_idx = rdata->last_head;
1117 		ri->iri_frags[0].irf_len = MGB_DESC_GET_FRAME_LEN(&rxd);
1118 		total_len += ri->iri_frags[0].irf_len;
1119 
1120 		rdata->last_head = MGB_NEXT_RING_IDX(rdata->last_head);
1121 		break;
1122 	}
1123 	ri->iri_nfrags = 1;
1124 	ri->iri_len = total_len;
1125 
1126 	return (0);
1127 }
1128 
1129 static void
mgb_isc_rxd_refill(void * xsc,if_rxd_update_t iru)1130 mgb_isc_rxd_refill(void *xsc, if_rxd_update_t iru)
1131 {
1132 	struct mgb_softc *sc;
1133 	struct mgb_ring_data *rdata;
1134 	struct mgb_ring_desc *rxd;
1135 	uint64_t *paddrs;
1136 	qidx_t *idxs;
1137 	qidx_t idx;
1138 	int count, len;
1139 
1140 	count = iru->iru_count;
1141 	len = iru->iru_buf_size;
1142 	idxs = iru->iru_idxs;
1143 	paddrs = iru->iru_paddrs;
1144 	KASSERT(iru->iru_qsidx == 0,
1145 	    ("tried to refill RX Channel %d.\n", iru->iru_qsidx));
1146 
1147 	sc = xsc;
1148 	rdata = &sc->rx_ring_data;
1149 
1150 	while (count > 0) {
1151 		idx = idxs[--count];
1152 		rxd = &rdata->ring[idx];
1153 
1154 		rxd->sts = 0;
1155 		rxd->addr.low =
1156 		    htole32(CSR_TRANSLATE_ADDR_LOW32(paddrs[count]));
1157 		rxd->addr.high =
1158 		    htole32(CSR_TRANSLATE_ADDR_HIGH32(paddrs[count]));
1159 		rxd->ctl = htole32(MGB_DESC_CTL_OWN |
1160 		    (len & MGB_DESC_CTL_BUFLEN_MASK));
1161 	}
1162 	return;
1163 }
1164 
1165 static void
mgb_isc_rxd_flush(void * xsc,uint16_t rxqid,uint8_t flid,qidx_t pidx)1166 mgb_isc_rxd_flush(void *xsc, uint16_t rxqid, uint8_t flid, qidx_t pidx)
1167 {
1168 	struct mgb_softc *sc;
1169 
1170 	sc = xsc;
1171 
1172 	KASSERT(rxqid == 0, ("tried to flush RX Channel %d.\n", rxqid));
1173 	/*
1174 	 * According to the programming guide, last_tail must be set to
1175 	 * the last valid RX descriptor, rather than to the one past that.
1176 	 * Note that this is not true for the TX ring!
1177 	 */
1178 	sc->rx_ring_data.last_tail = MGB_PREV_RING_IDX(pidx);
1179 	CSR_WRITE_REG(sc, MGB_DMA_RX_TAIL(rxqid), sc->rx_ring_data.last_tail);
1180 	return;
1181 }
1182 
1183 static int
mgb_test_bar(struct mgb_softc * sc)1184 mgb_test_bar(struct mgb_softc *sc)
1185 {
1186 	uint32_t id_rev, dev_id;
1187 
1188 	id_rev = CSR_READ_REG(sc, 0);
1189 	dev_id = id_rev >> 16;
1190 	if (dev_id == MGB_LAN7430_DEVICE_ID ||
1191 	    dev_id == MGB_LAN7431_DEVICE_ID) {
1192 		return (0);
1193 	} else {
1194 		device_printf(sc->dev, "ID check failed.\n");
1195 		return (ENXIO);
1196 	}
1197 }
1198 
1199 static int
mgb_alloc_regs(struct mgb_softc * sc)1200 mgb_alloc_regs(struct mgb_softc *sc)
1201 {
1202 	int rid;
1203 
1204 	rid = PCIR_BAR(MGB_BAR);
1205 	pci_enable_busmaster(sc->dev);
1206 	sc->regs = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1207 	    &rid, RF_ACTIVE);
1208 	if (sc->regs == NULL)
1209 		 return (ENXIO);
1210 
1211 	return (0);
1212 }
1213 
1214 static int
mgb_release_regs(struct mgb_softc * sc)1215 mgb_release_regs(struct mgb_softc *sc)
1216 {
1217 	int error = 0;
1218 
1219 	if (sc->regs != NULL)
1220 		error = bus_release_resource(sc->dev, SYS_RES_MEMORY,
1221 		    rman_get_rid(sc->regs), sc->regs);
1222 	sc->regs = NULL;
1223 	pci_disable_busmaster(sc->dev);
1224 	return (error);
1225 }
1226 
1227 static int
mgb_dma_init(struct mgb_softc * sc)1228 mgb_dma_init(struct mgb_softc *sc)
1229 {
1230 	if_softc_ctx_t scctx;
1231 	int ch, error = 0;
1232 
1233 	scctx = iflib_get_softc_ctx(sc->ctx);
1234 
1235 	for (ch = 0; ch < scctx->isc_nrxqsets; ch++)
1236 		if ((error = mgb_dma_rx_ring_init(sc, ch)))
1237 			goto fail;
1238 
1239 	for (ch = 0; ch < scctx->isc_nrxqsets; ch++)
1240 		if ((error = mgb_dma_tx_ring_init(sc, ch)))
1241 			goto fail;
1242 
1243 fail:
1244 	return (error);
1245 }
1246 
1247 static int
mgb_dma_rx_ring_init(struct mgb_softc * sc,int channel)1248 mgb_dma_rx_ring_init(struct mgb_softc *sc, int channel)
1249 {
1250 	struct mgb_ring_data *rdata;
1251 	int ring_config, error = 0;
1252 
1253 	rdata = &sc->rx_ring_data;
1254 	mgb_dmac_control(sc, MGB_DMAC_RX_START, 0, DMAC_RESET);
1255 	KASSERT(MGB_DMAC_STATE_IS_INITIAL(sc, MGB_DMAC_RX_START, channel),
1256 	    ("Trying to init channels when not in init state\n"));
1257 
1258 	/* write ring address */
1259 	if (rdata->ring_bus_addr == 0) {
1260 		device_printf(sc->dev, "Invalid ring bus addr.\n");
1261 		goto fail;
1262 	}
1263 
1264 	CSR_WRITE_REG(sc, MGB_DMA_RX_BASE_H(channel),
1265 	    CSR_TRANSLATE_ADDR_HIGH32(rdata->ring_bus_addr));
1266 	CSR_WRITE_REG(sc, MGB_DMA_RX_BASE_L(channel),
1267 	    CSR_TRANSLATE_ADDR_LOW32(rdata->ring_bus_addr));
1268 
1269 	/* write head pointer writeback address */
1270 	if (rdata->head_wb_bus_addr == 0) {
1271 		device_printf(sc->dev, "Invalid head wb bus addr.\n");
1272 		goto fail;
1273 	}
1274 	CSR_WRITE_REG(sc, MGB_DMA_RX_HEAD_WB_H(channel),
1275 	    CSR_TRANSLATE_ADDR_HIGH32(rdata->head_wb_bus_addr));
1276 	CSR_WRITE_REG(sc, MGB_DMA_RX_HEAD_WB_L(channel),
1277 	    CSR_TRANSLATE_ADDR_LOW32(rdata->head_wb_bus_addr));
1278 
1279 	/* Enable head pointer writeback */
1280 	CSR_WRITE_REG(sc, MGB_DMA_RX_CONFIG0(channel), MGB_DMA_HEAD_WB_ENBL);
1281 
1282 	ring_config = CSR_READ_REG(sc, MGB_DMA_RX_CONFIG1(channel));
1283 	/*  ring size */
1284 	ring_config &= ~MGB_DMA_RING_LEN_MASK;
1285 	ring_config |= (MGB_DMA_RING_SIZE & MGB_DMA_RING_LEN_MASK);
1286 	/* packet padding  (PAD_2 is better for IP header alignment ...) */
1287 	ring_config &= ~MGB_DMA_RING_PAD_MASK;
1288 	ring_config |= (MGB_DMA_RING_PAD_0 & MGB_DMA_RING_PAD_MASK);
1289 
1290 	CSR_WRITE_REG(sc, MGB_DMA_RX_CONFIG1(channel), ring_config);
1291 
1292 	rdata->last_head = CSR_READ_REG(sc, MGB_DMA_RX_HEAD(channel));
1293 
1294 	mgb_fct_control(sc, MGB_FCT_RX_CTL, channel, FCT_RESET);
1295 	if (error != 0) {
1296 		device_printf(sc->dev, "Failed to reset RX FCT.\n");
1297 		goto fail;
1298 	}
1299 	mgb_fct_control(sc, MGB_FCT_RX_CTL, channel, FCT_ENABLE);
1300 	if (error != 0) {
1301 		device_printf(sc->dev, "Failed to enable RX FCT.\n");
1302 		goto fail;
1303 	}
1304 	mgb_dmac_control(sc, MGB_DMAC_RX_START, channel, DMAC_START);
1305 	if (error != 0)
1306 		device_printf(sc->dev, "Failed to start RX DMAC.\n");
1307 fail:
1308 	return (error);
1309 }
1310 
1311 static int
mgb_dma_tx_ring_init(struct mgb_softc * sc,int channel)1312 mgb_dma_tx_ring_init(struct mgb_softc *sc, int channel)
1313 {
1314 	struct mgb_ring_data *rdata;
1315 	int ring_config, error = 0;
1316 
1317 	rdata = &sc->tx_ring_data;
1318 	if ((error = mgb_fct_control(sc, MGB_FCT_TX_CTL, channel, FCT_RESET))) {
1319 		device_printf(sc->dev, "Failed to reset TX FCT.\n");
1320 		goto fail;
1321 	}
1322 	if ((error = mgb_fct_control(sc, MGB_FCT_TX_CTL, channel,
1323 	    FCT_ENABLE))) {
1324 		device_printf(sc->dev, "Failed to enable TX FCT.\n");
1325 		goto fail;
1326 	}
1327 	if ((error = mgb_dmac_control(sc, MGB_DMAC_TX_START, channel,
1328 	    DMAC_RESET))) {
1329 		device_printf(sc->dev, "Failed to reset TX DMAC.\n");
1330 		goto fail;
1331 	}
1332 	KASSERT(MGB_DMAC_STATE_IS_INITIAL(sc, MGB_DMAC_TX_START, channel),
1333 	    ("Trying to init channels in not init state\n"));
1334 
1335 	/* write ring address */
1336 	if (rdata->ring_bus_addr == 0) {
1337 		device_printf(sc->dev, "Invalid ring bus addr.\n");
1338 		goto fail;
1339 	}
1340 	CSR_WRITE_REG(sc, MGB_DMA_TX_BASE_H(channel),
1341 	    CSR_TRANSLATE_ADDR_HIGH32(rdata->ring_bus_addr));
1342 	CSR_WRITE_REG(sc, MGB_DMA_TX_BASE_L(channel),
1343 	    CSR_TRANSLATE_ADDR_LOW32(rdata->ring_bus_addr));
1344 
1345 	/* write ring size */
1346 	ring_config = CSR_READ_REG(sc, MGB_DMA_TX_CONFIG1(channel));
1347 	ring_config &= ~MGB_DMA_RING_LEN_MASK;
1348 	ring_config |= (MGB_DMA_RING_SIZE & MGB_DMA_RING_LEN_MASK);
1349 	CSR_WRITE_REG(sc, MGB_DMA_TX_CONFIG1(channel), ring_config);
1350 
1351 	/* Enable interrupt on completion and head pointer writeback */
1352 	ring_config = (MGB_DMA_HEAD_WB_LS_ENBL | MGB_DMA_HEAD_WB_ENBL);
1353 	CSR_WRITE_REG(sc, MGB_DMA_TX_CONFIG0(channel), ring_config);
1354 
1355 	/* write head pointer writeback address */
1356 	if (rdata->head_wb_bus_addr == 0) {
1357 		device_printf(sc->dev, "Invalid head wb bus addr.\n");
1358 		goto fail;
1359 	}
1360 	CSR_WRITE_REG(sc, MGB_DMA_TX_HEAD_WB_H(channel),
1361 	    CSR_TRANSLATE_ADDR_HIGH32(rdata->head_wb_bus_addr));
1362 	CSR_WRITE_REG(sc, MGB_DMA_TX_HEAD_WB_L(channel),
1363 	    CSR_TRANSLATE_ADDR_LOW32(rdata->head_wb_bus_addr));
1364 
1365 	rdata->last_head = CSR_READ_REG(sc, MGB_DMA_TX_HEAD(channel));
1366 	KASSERT(rdata->last_head == 0, ("MGB_DMA_TX_HEAD was not reset.\n"));
1367 	rdata->last_tail = 0;
1368 	CSR_WRITE_REG(sc, MGB_DMA_TX_TAIL(channel), rdata->last_tail);
1369 
1370 	if ((error = mgb_dmac_control(sc, MGB_DMAC_TX_START, channel,
1371 	    DMAC_START)))
1372 		device_printf(sc->dev, "Failed to start TX DMAC.\n");
1373 fail:
1374 	return (error);
1375 }
1376 
1377 static int
mgb_dmac_control(struct mgb_softc * sc,int start,int channel,enum mgb_dmac_cmd cmd)1378 mgb_dmac_control(struct mgb_softc *sc, int start, int channel,
1379     enum mgb_dmac_cmd cmd)
1380 {
1381 	int error = 0;
1382 
1383 	switch (cmd) {
1384 	case DMAC_RESET:
1385 		CSR_WRITE_REG(sc, MGB_DMAC_CMD,
1386 		    MGB_DMAC_CMD_RESET(start, channel));
1387 		error = mgb_wait_for_bits(sc, MGB_DMAC_CMD, 0,
1388 		    MGB_DMAC_CMD_RESET(start, channel));
1389 		break;
1390 
1391 	case DMAC_START:
1392 		/*
1393 		 * NOTE: this simplifies the logic, since it will never
1394 		 * try to start in STOP_PENDING, but it also increases work.
1395 		 */
1396 		error = mgb_dmac_control(sc, start, channel, DMAC_STOP);
1397 		if (error != 0)
1398 			return (error);
1399 		CSR_WRITE_REG(sc, MGB_DMAC_CMD,
1400 		    MGB_DMAC_CMD_START(start, channel));
1401 		break;
1402 
1403 	case DMAC_STOP:
1404 		CSR_WRITE_REG(sc, MGB_DMAC_CMD,
1405 		    MGB_DMAC_CMD_STOP(start, channel));
1406 		error = mgb_wait_for_bits(sc, MGB_DMAC_CMD,
1407 		    MGB_DMAC_CMD_STOP(start, channel),
1408 		    MGB_DMAC_CMD_START(start, channel));
1409 		break;
1410 	}
1411 	return (error);
1412 }
1413 
1414 static int
mgb_fct_control(struct mgb_softc * sc,int reg,int channel,enum mgb_fct_cmd cmd)1415 mgb_fct_control(struct mgb_softc *sc, int reg, int channel,
1416     enum mgb_fct_cmd cmd)
1417 {
1418 
1419 	switch (cmd) {
1420 	case FCT_RESET:
1421 		CSR_WRITE_REG(sc, reg, MGB_FCT_RESET(channel));
1422 		return (mgb_wait_for_bits(sc, reg, 0, MGB_FCT_RESET(channel)));
1423 	case FCT_ENABLE:
1424 		CSR_WRITE_REG(sc, reg, MGB_FCT_ENBL(channel));
1425 		return (0);
1426 	case FCT_DISABLE:
1427 		CSR_WRITE_REG(sc, reg, MGB_FCT_DSBL(channel));
1428 		return (mgb_wait_for_bits(sc, reg, 0, MGB_FCT_ENBL(channel)));
1429 	}
1430 }
1431 
1432 static int
mgb_hw_teardown(struct mgb_softc * sc)1433 mgb_hw_teardown(struct mgb_softc *sc)
1434 {
1435 	int err = 0;
1436 
1437 	/* Stop MAC */
1438 	CSR_CLEAR_REG(sc, MGB_MAC_RX, MGB_MAC_ENBL);
1439 	CSR_WRITE_REG(sc, MGB_MAC_TX, MGB_MAC_ENBL);
1440 	if ((err = mgb_wait_for_bits(sc, MGB_MAC_RX, MGB_MAC_DSBL, 0)))
1441 		return (err);
1442 	if ((err = mgb_wait_for_bits(sc, MGB_MAC_TX, MGB_MAC_DSBL, 0)))
1443 		return (err);
1444 	return (err);
1445 }
1446 
1447 static int
mgb_hw_init(struct mgb_softc * sc)1448 mgb_hw_init(struct mgb_softc *sc)
1449 {
1450 	int error = 0;
1451 
1452 	error = mgb_hw_reset(sc);
1453 	if (error != 0)
1454 		goto fail;
1455 
1456 	mgb_mac_init(sc);
1457 
1458 	error = mgb_phy_reset(sc);
1459 	if (error != 0)
1460 		goto fail;
1461 
1462 	error = mgb_dmac_reset(sc);
1463 	if (error != 0)
1464 		goto fail;
1465 
1466 fail:
1467 	return (error);
1468 }
1469 
1470 static int
mgb_hw_reset(struct mgb_softc * sc)1471 mgb_hw_reset(struct mgb_softc *sc)
1472 {
1473 
1474 	CSR_UPDATE_REG(sc, MGB_HW_CFG, MGB_LITE_RESET);
1475 	return (mgb_wait_for_bits(sc, MGB_HW_CFG, 0, MGB_LITE_RESET));
1476 }
1477 
1478 static int
mgb_mac_init(struct mgb_softc * sc)1479 mgb_mac_init(struct mgb_softc *sc)
1480 {
1481 
1482 	/**
1483 	 * enable automatic duplex detection and
1484 	 * automatic speed detection
1485 	 */
1486 	CSR_UPDATE_REG(sc, MGB_MAC_CR, MGB_MAC_ADD_ENBL | MGB_MAC_ASD_ENBL);
1487 	CSR_UPDATE_REG(sc, MGB_MAC_TX, MGB_MAC_ENBL);
1488 	CSR_UPDATE_REG(sc, MGB_MAC_RX, MGB_MAC_ENBL);
1489 
1490 	return (MGB_STS_OK);
1491 }
1492 
1493 static int
mgb_phy_reset(struct mgb_softc * sc)1494 mgb_phy_reset(struct mgb_softc *sc)
1495 {
1496 
1497 	CSR_UPDATE_BYTE(sc, MGB_PMT_CTL, MGB_PHY_RESET);
1498 	if (mgb_wait_for_bits(sc, MGB_PMT_CTL, 0, MGB_PHY_RESET) ==
1499 	    MGB_STS_TIMEOUT)
1500 		return (MGB_STS_TIMEOUT);
1501 	return (mgb_wait_for_bits(sc, MGB_PMT_CTL, MGB_PHY_READY, 0));
1502 }
1503 
1504 static int
mgb_dmac_reset(struct mgb_softc * sc)1505 mgb_dmac_reset(struct mgb_softc *sc)
1506 {
1507 
1508 	CSR_WRITE_REG(sc, MGB_DMAC_CMD, MGB_DMAC_RESET);
1509 	return (mgb_wait_for_bits(sc, MGB_DMAC_CMD, 0, MGB_DMAC_RESET));
1510 }
1511 
1512 static int
mgb_wait_for_bits(struct mgb_softc * sc,int reg,int set_bits,int clear_bits)1513 mgb_wait_for_bits(struct mgb_softc *sc, int reg, int set_bits, int clear_bits)
1514 {
1515 	int i, val;
1516 
1517 	i = 0;
1518 	do {
1519 		/*
1520 		 * XXX: Datasheets states delay should be > 5 microseconds
1521 		 * for device reset.
1522 		 */
1523 		DELAY(100);
1524 		val = CSR_READ_REG(sc, reg);
1525 		if ((val & set_bits) == set_bits && (val & clear_bits) == 0)
1526 			return (MGB_STS_OK);
1527 	} while (i++ < MGB_TIMEOUT);
1528 
1529 	return (MGB_STS_TIMEOUT);
1530 }
1531 
1532 static void
mgb_get_ethaddr(struct mgb_softc * sc,struct ether_addr * dest)1533 mgb_get_ethaddr(struct mgb_softc *sc, struct ether_addr *dest)
1534 {
1535 
1536 	CSR_READ_REG_BYTES(sc, MGB_MAC_ADDR_BASE_L, &dest->octet[0], 4);
1537 	CSR_READ_REG_BYTES(sc, MGB_MAC_ADDR_BASE_H, &dest->octet[4], 2);
1538 }
1539 
1540 static int
mgb_miibus_readreg(device_t dev,int phy,int reg)1541 mgb_miibus_readreg(device_t dev, int phy, int reg)
1542 {
1543 	struct mgb_softc *sc;
1544 	int mii_access;
1545 
1546 	sc = iflib_get_softc(device_get_softc(dev));
1547 
1548 	if (mgb_wait_for_bits(sc, MGB_MII_ACCESS, 0, MGB_MII_BUSY) ==
1549 	    MGB_STS_TIMEOUT)
1550 		return (EIO);
1551 	mii_access = (phy & MGB_MII_PHY_ADDR_MASK) << MGB_MII_PHY_ADDR_SHIFT;
1552 	mii_access |= (reg & MGB_MII_REG_ADDR_MASK) << MGB_MII_REG_ADDR_SHIFT;
1553 	mii_access |= MGB_MII_BUSY | MGB_MII_READ;
1554 	CSR_WRITE_REG(sc, MGB_MII_ACCESS, mii_access);
1555 	if (mgb_wait_for_bits(sc, MGB_MII_ACCESS, 0, MGB_MII_BUSY) ==
1556 	    MGB_STS_TIMEOUT)
1557 		return (EIO);
1558 	return (CSR_READ_2_BYTES(sc, MGB_MII_DATA));
1559 }
1560 
1561 static int
mgb_miibus_writereg(device_t dev,int phy,int reg,int data)1562 mgb_miibus_writereg(device_t dev, int phy, int reg, int data)
1563 {
1564 	struct mgb_softc *sc;
1565 	int mii_access;
1566 
1567 	sc = iflib_get_softc(device_get_softc(dev));
1568 
1569 	if (mgb_wait_for_bits(sc, MGB_MII_ACCESS, 0, MGB_MII_BUSY) ==
1570 	    MGB_STS_TIMEOUT)
1571 		return (EIO);
1572 	mii_access = (phy & MGB_MII_PHY_ADDR_MASK) << MGB_MII_PHY_ADDR_SHIFT;
1573 	mii_access |= (reg & MGB_MII_REG_ADDR_MASK) << MGB_MII_REG_ADDR_SHIFT;
1574 	mii_access |= MGB_MII_BUSY | MGB_MII_WRITE;
1575 	CSR_WRITE_REG(sc, MGB_MII_DATA, data);
1576 	CSR_WRITE_REG(sc, MGB_MII_ACCESS, mii_access);
1577 	if (mgb_wait_for_bits(sc, MGB_MII_ACCESS, 0, MGB_MII_BUSY) ==
1578 	    MGB_STS_TIMEOUT)
1579 		return (EIO);
1580 	return (0);
1581 }
1582 
1583 /* XXX: May need to lock these up */
1584 static void
mgb_miibus_statchg(device_t dev)1585 mgb_miibus_statchg(device_t dev)
1586 {
1587 	struct mgb_softc *sc;
1588 	struct mii_data *miid;
1589 
1590 	sc = iflib_get_softc(device_get_softc(dev));
1591 	miid = device_get_softc(sc->miibus);
1592 	/* Update baudrate in iflib */
1593 	sc->baudrate = ifmedia_baudrate(miid->mii_media_active);
1594 	iflib_link_state_change(sc->ctx, sc->link_state, sc->baudrate);
1595 }
1596 
1597 static void
mgb_miibus_linkchg(device_t dev)1598 mgb_miibus_linkchg(device_t dev)
1599 {
1600 	struct mgb_softc *sc;
1601 	struct mii_data *miid;
1602 	int link_state;
1603 
1604 	sc = iflib_get_softc(device_get_softc(dev));
1605 	miid = device_get_softc(sc->miibus);
1606 	/* XXX: copied from miibus_linkchg **/
1607 	if (miid->mii_media_status & IFM_AVALID) {
1608 		if (miid->mii_media_status & IFM_ACTIVE)
1609 			link_state = LINK_STATE_UP;
1610 		else
1611 			link_state = LINK_STATE_DOWN;
1612 	} else
1613 		link_state = LINK_STATE_UNKNOWN;
1614 	sc->link_state = link_state;
1615 	iflib_link_state_change(sc->ctx, sc->link_state, sc->baudrate);
1616 }
1617