1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2019 The FreeBSD Foundation
5 *
6 * This driver was written by Gerald ND Aryeetey <gndaryee@uwaterloo.ca>
7 * under sponsorship from the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30 #include <sys/cdefs.h>
31 /*
32 * Microchip LAN7430/LAN7431 PCIe to Gigabit Ethernet Controller driver.
33 *
34 * Product information:
35 * LAN7430 https://www.microchip.com/en-us/product/LAN7430
36 * - Integrated IEEE 802.3 compliant PHY
37 * LAN7431 https://www.microchip.com/en-us/product/LAN7431
38 * - RGMII Interface
39 *
40 * This driver uses the iflib interface and the default 'ukphy' PHY driver.
41 *
42 * UNIMPLEMENTED FEATURES
43 * ----------------------
44 * A number of features supported by LAN743X device are not yet implemented in
45 * this driver:
46 *
47 * - Multiple (up to 4) RX queues support
48 * - Just needs to remove asserts and malloc multiple `rx_ring_data`
49 * structs based on ncpus.
50 * - RX/TX Checksum Offloading support
51 * - VLAN support
52 * - Receive Packet Filtering (Multicast Perfect/Hash Address) support
53 * - Wake on LAN (WoL) support
54 * - TX LSO support
55 * - Receive Side Scaling (RSS) support
56 * - Debugging Capabilities:
57 * - Could include MAC statistics and
58 * error status registers in sysctl.
59 */
60
61 #include <sys/param.h>
62 #include <sys/bus.h>
63 #include <sys/endian.h>
64 #include <sys/kdb.h>
65 #include <sys/kernel.h>
66 #include <sys/module.h>
67 #include <sys/rman.h>
68 #include <sys/socket.h>
69 #include <sys/sockio.h>
70 #include <machine/bus.h>
71 #include <machine/resource.h>
72
73 #include <net/ethernet.h>
74 #include <net/if.h>
75 #include <net/if_var.h>
76 #include <net/if_types.h>
77 #include <net/if_media.h>
78 #include <net/iflib.h>
79
80 #include <dev/mgb/if_mgb.h>
81 #include <dev/mii/mii.h>
82 #include <dev/mii/miivar.h>
83 #include <dev/pci/pcireg.h>
84 #include <dev/pci/pcivar.h>
85
86 #include "ifdi_if.h"
87 #include "miibus_if.h"
88
89 static const pci_vendor_info_t mgb_vendor_info_array[] = {
90 PVID(MGB_MICROCHIP_VENDOR_ID, MGB_LAN7430_DEVICE_ID,
91 "Microchip LAN7430 PCIe Gigabit Ethernet Controller"),
92 PVID(MGB_MICROCHIP_VENDOR_ID, MGB_LAN7431_DEVICE_ID,
93 "Microchip LAN7431 PCIe Gigabit Ethernet Controller"),
94 PVID_END
95 };
96
97 /* Device methods */
98 static device_register_t mgb_register;
99
100 /* IFLIB methods */
101 static ifdi_attach_pre_t mgb_attach_pre;
102 static ifdi_attach_post_t mgb_attach_post;
103 static ifdi_detach_t mgb_detach;
104
105 static ifdi_tx_queues_alloc_t mgb_tx_queues_alloc;
106 static ifdi_rx_queues_alloc_t mgb_rx_queues_alloc;
107 static ifdi_queues_free_t mgb_queues_free;
108
109 static ifdi_init_t mgb_init;
110 static ifdi_stop_t mgb_stop;
111
112 static ifdi_msix_intr_assign_t mgb_msix_intr_assign;
113 static ifdi_tx_queue_intr_enable_t mgb_tx_queue_intr_enable;
114 static ifdi_rx_queue_intr_enable_t mgb_rx_queue_intr_enable;
115 static ifdi_intr_enable_t mgb_intr_enable_all;
116 static ifdi_intr_disable_t mgb_intr_disable_all;
117
118 /* IFLIB_TXRX methods */
119 static int mgb_isc_txd_encap(void *,
120 if_pkt_info_t);
121 static void mgb_isc_txd_flush(void *,
122 uint16_t, qidx_t);
123 static int mgb_isc_txd_credits_update(void *,
124 uint16_t, bool);
125 static int mgb_isc_rxd_available(void *,
126 uint16_t, qidx_t, qidx_t);
127 static int mgb_isc_rxd_pkt_get(void *,
128 if_rxd_info_t);
129 static void mgb_isc_rxd_refill(void *,
130 if_rxd_update_t);
131 static void mgb_isc_rxd_flush(void *,
132 uint16_t, uint8_t, qidx_t);
133
134 /* Interrupts */
135 static driver_filter_t mgb_legacy_intr;
136 static driver_filter_t mgb_admin_intr;
137 static driver_filter_t mgb_rxq_intr;
138 static bool mgb_intr_test(struct mgb_softc *);
139
140 /* MII methods */
141 static miibus_readreg_t mgb_miibus_readreg;
142 static miibus_writereg_t mgb_miibus_writereg;
143 static miibus_linkchg_t mgb_miibus_linkchg;
144 static miibus_statchg_t mgb_miibus_statchg;
145
146 static int mgb_media_change(if_t);
147 static void mgb_media_status(if_t,
148 struct ifmediareq *);
149
150 /* Helper/Test functions */
151 static int mgb_test_bar(struct mgb_softc *);
152 static int mgb_alloc_regs(struct mgb_softc *);
153 static int mgb_release_regs(struct mgb_softc *);
154
155 static void mgb_get_ethaddr(struct mgb_softc *,
156 struct ether_addr *);
157
158 static int mgb_wait_for_bits(struct mgb_softc *,
159 int, int, int);
160
161 /* H/W init, reset and teardown helpers */
162 static int mgb_hw_init(struct mgb_softc *);
163 static int mgb_hw_teardown(struct mgb_softc *);
164 static int mgb_hw_reset(struct mgb_softc *);
165 static int mgb_mac_init(struct mgb_softc *);
166 static int mgb_dmac_reset(struct mgb_softc *);
167 static int mgb_phy_reset(struct mgb_softc *);
168
169 static int mgb_dma_init(struct mgb_softc *);
170 static int mgb_dma_tx_ring_init(struct mgb_softc *,
171 int);
172 static int mgb_dma_rx_ring_init(struct mgb_softc *,
173 int);
174
175 static int mgb_dmac_control(struct mgb_softc *,
176 int, int, enum mgb_dmac_cmd);
177 static int mgb_fct_control(struct mgb_softc *,
178 int, int, enum mgb_fct_cmd);
179
180 /*********************************************************************
181 * FreeBSD Device Interface Entry Points
182 *********************************************************************/
183
184 static device_method_t mgb_methods[] = {
185 /* Device interface */
186 DEVMETHOD(device_register, mgb_register),
187 DEVMETHOD(device_probe, iflib_device_probe),
188 DEVMETHOD(device_attach, iflib_device_attach),
189 DEVMETHOD(device_detach, iflib_device_detach),
190 DEVMETHOD(device_shutdown, iflib_device_shutdown),
191 DEVMETHOD(device_suspend, iflib_device_suspend),
192 DEVMETHOD(device_resume, iflib_device_resume),
193
194 /* MII Interface */
195 DEVMETHOD(miibus_readreg, mgb_miibus_readreg),
196 DEVMETHOD(miibus_writereg, mgb_miibus_writereg),
197 DEVMETHOD(miibus_linkchg, mgb_miibus_linkchg),
198 DEVMETHOD(miibus_statchg, mgb_miibus_statchg),
199
200 DEVMETHOD_END
201 };
202
203 static driver_t mgb_driver = {
204 "mgb", mgb_methods, sizeof(struct mgb_softc)
205 };
206
207 DRIVER_MODULE(mgb, pci, mgb_driver, NULL, NULL);
208 IFLIB_PNP_INFO(pci, mgb, mgb_vendor_info_array);
209 MODULE_VERSION(mgb, 1);
210
211 #if 0 /* MIIBUS_DEBUG */
212 /* If MIIBUS debug stuff is in attach then order matters. Use below instead. */
213 DRIVER_MODULE_ORDERED(miibus, mgb, miibus_driver, NULL, NULL,
214 SI_ORDER_ANY);
215 #endif /* MIIBUS_DEBUG */
216 DRIVER_MODULE(miibus, mgb, miibus_driver, NULL, NULL);
217
218 MODULE_DEPEND(mgb, pci, 1, 1, 1);
219 MODULE_DEPEND(mgb, ether, 1, 1, 1);
220 MODULE_DEPEND(mgb, miibus, 1, 1, 1);
221 MODULE_DEPEND(mgb, iflib, 1, 1, 1);
222
223 static device_method_t mgb_iflib_methods[] = {
224 DEVMETHOD(ifdi_attach_pre, mgb_attach_pre),
225 DEVMETHOD(ifdi_attach_post, mgb_attach_post),
226 DEVMETHOD(ifdi_detach, mgb_detach),
227
228 DEVMETHOD(ifdi_init, mgb_init),
229 DEVMETHOD(ifdi_stop, mgb_stop),
230
231 DEVMETHOD(ifdi_tx_queues_alloc, mgb_tx_queues_alloc),
232 DEVMETHOD(ifdi_rx_queues_alloc, mgb_rx_queues_alloc),
233 DEVMETHOD(ifdi_queues_free, mgb_queues_free),
234
235 DEVMETHOD(ifdi_msix_intr_assign, mgb_msix_intr_assign),
236 DEVMETHOD(ifdi_tx_queue_intr_enable, mgb_tx_queue_intr_enable),
237 DEVMETHOD(ifdi_rx_queue_intr_enable, mgb_rx_queue_intr_enable),
238 DEVMETHOD(ifdi_intr_enable, mgb_intr_enable_all),
239 DEVMETHOD(ifdi_intr_disable, mgb_intr_disable_all),
240
241 #if 0 /* Not yet implemented IFLIB methods */
242 /*
243 * Set multicast addresses, mtu and promiscuous mode
244 */
245 DEVMETHOD(ifdi_multi_set, mgb_multi_set),
246 DEVMETHOD(ifdi_mtu_set, mgb_mtu_set),
247 DEVMETHOD(ifdi_promisc_set, mgb_promisc_set),
248
249 /*
250 * Needed for VLAN support
251 */
252 DEVMETHOD(ifdi_vlan_register, mgb_vlan_register),
253 DEVMETHOD(ifdi_vlan_unregister, mgb_vlan_unregister),
254 DEVMETHOD(ifdi_needs_restart, mgb_if_needs_restart),
255
256 /*
257 * Needed for WOL support
258 * at the very least.
259 */
260 DEVMETHOD(ifdi_shutdown, mgb_shutdown),
261 DEVMETHOD(ifdi_suspend, mgb_suspend),
262 DEVMETHOD(ifdi_resume, mgb_resume),
263 #endif /* UNUSED_IFLIB_METHODS */
264 DEVMETHOD_END
265 };
266
267 static driver_t mgb_iflib_driver = {
268 "mgb", mgb_iflib_methods, sizeof(struct mgb_softc)
269 };
270
271 static struct if_txrx mgb_txrx = {
272 .ift_txd_encap = mgb_isc_txd_encap,
273 .ift_txd_flush = mgb_isc_txd_flush,
274 .ift_txd_credits_update = mgb_isc_txd_credits_update,
275 .ift_rxd_available = mgb_isc_rxd_available,
276 .ift_rxd_pkt_get = mgb_isc_rxd_pkt_get,
277 .ift_rxd_refill = mgb_isc_rxd_refill,
278 .ift_rxd_flush = mgb_isc_rxd_flush,
279
280 .ift_legacy_intr = mgb_legacy_intr
281 };
282
283 static struct if_shared_ctx mgb_sctx_init = {
284 .isc_magic = IFLIB_MAGIC,
285
286 .isc_q_align = PAGE_SIZE,
287 .isc_admin_intrcnt = 1,
288 .isc_flags = IFLIB_DRIVER_MEDIA /* | IFLIB_HAS_RXCQ | IFLIB_HAS_TXCQ*/,
289
290 .isc_vendor_info = mgb_vendor_info_array,
291 .isc_driver_version = "1",
292 .isc_driver = &mgb_iflib_driver,
293 /* 2 queues per set for TX and RX (ring queue, head writeback queue) */
294 .isc_ntxqs = 2,
295
296 .isc_tx_maxsize = MGB_DMA_MAXSEGS * MCLBYTES,
297 /* .isc_tx_nsegments = MGB_DMA_MAXSEGS, */
298 .isc_tx_maxsegsize = MCLBYTES,
299
300 .isc_ntxd_min = {1, 1}, /* Will want to make this bigger */
301 .isc_ntxd_max = {MGB_DMA_RING_SIZE, 1},
302 .isc_ntxd_default = {MGB_DMA_RING_SIZE, 1},
303
304 .isc_nrxqs = 2,
305
306 .isc_rx_maxsize = MCLBYTES,
307 .isc_rx_nsegments = 1,
308 .isc_rx_maxsegsize = MCLBYTES,
309
310 .isc_nrxd_min = {1, 1}, /* Will want to make this bigger */
311 .isc_nrxd_max = {MGB_DMA_RING_SIZE, 1},
312 .isc_nrxd_default = {MGB_DMA_RING_SIZE, 1},
313
314 .isc_nfl = 1, /*one free list since there is only one queue */
315 #if 0 /* UNUSED_CTX */
316
317 .isc_tso_maxsize = MGB_TSO_MAXSIZE + sizeof(struct ether_vlan_header),
318 .isc_tso_maxsegsize = MGB_TX_MAXSEGSIZE,
319 #endif /* UNUSED_CTX */
320 };
321
322 /*********************************************************************/
323
324 static void *
mgb_register(device_t dev)325 mgb_register(device_t dev)
326 {
327
328 return (&mgb_sctx_init);
329 }
330
331 static int
mgb_attach_pre(if_ctx_t ctx)332 mgb_attach_pre(if_ctx_t ctx)
333 {
334 struct mgb_softc *sc;
335 if_softc_ctx_t scctx;
336 int error, phyaddr, rid;
337 struct ether_addr hwaddr;
338 struct mii_data *miid;
339
340 sc = iflib_get_softc(ctx);
341 sc->ctx = ctx;
342 sc->dev = iflib_get_dev(ctx);
343 scctx = iflib_get_softc_ctx(ctx);
344
345 /* IFLIB required setup */
346 scctx->isc_txrx = &mgb_txrx;
347 scctx->isc_tx_nsegments = MGB_DMA_MAXSEGS;
348 /* Ring desc queues */
349 scctx->isc_txqsizes[0] = sizeof(struct mgb_ring_desc) *
350 scctx->isc_ntxd[0];
351 scctx->isc_rxqsizes[0] = sizeof(struct mgb_ring_desc) *
352 scctx->isc_nrxd[0];
353
354 /* Head WB queues */
355 scctx->isc_txqsizes[1] = sizeof(uint32_t) * scctx->isc_ntxd[1];
356 scctx->isc_rxqsizes[1] = sizeof(uint32_t) * scctx->isc_nrxd[1];
357
358 /* XXX: Must have 1 txqset, but can have up to 4 rxqsets */
359 scctx->isc_nrxqsets = 1;
360 scctx->isc_ntxqsets = 1;
361
362 /* scctx->isc_tx_csum_flags = (CSUM_TCP | CSUM_UDP) |
363 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6) | CSUM_TSO */
364 scctx->isc_tx_csum_flags = 0;
365 scctx->isc_capabilities = scctx->isc_capenable = 0;
366 #if 0
367 /*
368 * CSUM, TSO and VLAN support are TBD
369 */
370 IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 |
371 IFCAP_TSO4 | IFCAP_TSO6 |
372 IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 |
373 IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
374 IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO |
375 IFCAP_JUMBO_MTU;
376 scctx->isc_capabilities |= IFCAP_LRO | IFCAP_VLAN_HWFILTER;
377 #endif
378
379 /* get the BAR */
380 error = mgb_alloc_regs(sc);
381 if (error != 0) {
382 device_printf(sc->dev,
383 "Unable to allocate bus resource: registers.\n");
384 goto fail;
385 }
386
387 error = mgb_test_bar(sc);
388 if (error != 0)
389 goto fail;
390
391 error = mgb_hw_init(sc);
392 if (error != 0) {
393 device_printf(sc->dev,
394 "MGB device init failed. (err: %d)\n", error);
395 goto fail;
396 }
397
398 switch (pci_get_device(sc->dev)) {
399 case MGB_LAN7430_DEVICE_ID:
400 phyaddr = 1;
401 break;
402 case MGB_LAN7431_DEVICE_ID:
403 default:
404 phyaddr = MII_PHY_ANY;
405 break;
406 }
407
408 /* XXX: Would be nice(r) if locked methods were here */
409 error = mii_attach(sc->dev, &sc->miibus, iflib_get_ifp(ctx),
410 mgb_media_change, mgb_media_status,
411 BMSR_DEFCAPMASK, phyaddr, MII_OFFSET_ANY, MIIF_DOPAUSE);
412 if (error != 0) {
413 device_printf(sc->dev, "Failed to attach MII interface\n");
414 goto fail;
415 }
416
417 miid = device_get_softc(sc->miibus);
418 scctx->isc_media = &miid->mii_media;
419
420 scctx->isc_msix_bar = pci_msix_table_bar(sc->dev);
421 /** Setup PBA BAR **/
422 rid = pci_msix_pba_bar(sc->dev);
423 if (rid != scctx->isc_msix_bar) {
424 sc->pba = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
425 &rid, RF_ACTIVE);
426 if (sc->pba == NULL) {
427 error = ENXIO;
428 device_printf(sc->dev, "Failed to setup PBA BAR\n");
429 goto fail;
430 }
431 }
432
433 mgb_get_ethaddr(sc, &hwaddr);
434 if (ETHER_IS_BROADCAST(hwaddr.octet) ||
435 ETHER_IS_MULTICAST(hwaddr.octet) ||
436 ETHER_IS_ZERO(hwaddr.octet))
437 ether_gen_addr(iflib_get_ifp(ctx), &hwaddr);
438
439 /*
440 * XXX: if the MAC address was generated the linux driver
441 * writes it back to the device.
442 */
443 iflib_set_mac(ctx, hwaddr.octet);
444
445 /* Map all vectors to vector 0 (admin interrupts) by default. */
446 CSR_WRITE_REG(sc, MGB_INTR_VEC_RX_MAP, 0);
447 CSR_WRITE_REG(sc, MGB_INTR_VEC_TX_MAP, 0);
448 CSR_WRITE_REG(sc, MGB_INTR_VEC_OTHER_MAP, 0);
449
450 return (0);
451
452 fail:
453 mgb_detach(ctx);
454 return (error);
455 }
456
457 static int
mgb_attach_post(if_ctx_t ctx)458 mgb_attach_post(if_ctx_t ctx)
459 {
460 struct mgb_softc *sc;
461
462 sc = iflib_get_softc(ctx);
463
464 device_printf(sc->dev, "Interrupt test: %s\n",
465 (mgb_intr_test(sc) ? "PASS" : "FAIL"));
466
467 return (0);
468 }
469
470 static int
mgb_detach(if_ctx_t ctx)471 mgb_detach(if_ctx_t ctx)
472 {
473 struct mgb_softc *sc;
474 int error;
475
476 sc = iflib_get_softc(ctx);
477
478 /* XXX: Should report errors but still detach everything. */
479 error = mgb_hw_teardown(sc);
480
481 /* Release IRQs */
482 iflib_irq_free(ctx, &sc->rx_irq);
483 iflib_irq_free(ctx, &sc->admin_irq);
484
485 bus_generic_detach(sc->dev);
486
487 if (sc->pba != NULL)
488 error = bus_release_resource(sc->dev, SYS_RES_MEMORY,
489 rman_get_rid(sc->pba), sc->pba);
490 sc->pba = NULL;
491
492 error = mgb_release_regs(sc);
493
494 return (error);
495 }
496
497 static int
mgb_media_change(if_t ifp)498 mgb_media_change(if_t ifp)
499 {
500 struct mii_data *miid;
501 struct mii_softc *miisc;
502 struct mgb_softc *sc;
503 if_ctx_t ctx;
504 int needs_reset;
505
506 ctx = if_getsoftc(ifp);
507 sc = iflib_get_softc(ctx);
508 miid = device_get_softc(sc->miibus);
509 LIST_FOREACH(miisc, &miid->mii_phys, mii_list)
510 PHY_RESET(miisc);
511
512 needs_reset = mii_mediachg(miid);
513 if (needs_reset != 0)
514 if_init(ifp, ctx);
515 return (needs_reset);
516 }
517
518 static void
mgb_media_status(if_t ifp,struct ifmediareq * ifmr)519 mgb_media_status(if_t ifp, struct ifmediareq *ifmr)
520 {
521 struct mgb_softc *sc;
522 struct mii_data *miid;
523
524 sc = iflib_get_softc(if_getsoftc(ifp));
525 miid = device_get_softc(sc->miibus);
526 if ((if_getflags(ifp) & IFF_UP) == 0)
527 return;
528
529 mii_pollstat(miid);
530 ifmr->ifm_active = miid->mii_media_active;
531 ifmr->ifm_status = miid->mii_media_status;
532 }
533
534 static int
mgb_tx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int ntxqs,int ntxqsets)535 mgb_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs,
536 int ntxqsets)
537 {
538 struct mgb_softc *sc;
539 struct mgb_ring_data *rdata;
540 int q;
541
542 sc = iflib_get_softc(ctx);
543 KASSERT(ntxqsets == 1, ("ntxqsets = %d", ntxqsets));
544 rdata = &sc->tx_ring_data;
545 for (q = 0; q < ntxqsets; q++) {
546 KASSERT(ntxqs == 2, ("ntxqs = %d", ntxqs));
547 /* Ring */
548 rdata->ring = (struct mgb_ring_desc *) vaddrs[q * ntxqs + 0];
549 rdata->ring_bus_addr = paddrs[q * ntxqs + 0];
550
551 /* Head WB */
552 rdata->head_wb = (uint32_t *) vaddrs[q * ntxqs + 1];
553 rdata->head_wb_bus_addr = paddrs[q * ntxqs + 1];
554 }
555 return (0);
556 }
557
558 static int
mgb_rx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int nrxqs,int nrxqsets)559 mgb_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs,
560 int nrxqsets)
561 {
562 struct mgb_softc *sc;
563 struct mgb_ring_data *rdata;
564 int q;
565
566 sc = iflib_get_softc(ctx);
567 KASSERT(nrxqsets == 1, ("nrxqsets = %d", nrxqsets));
568 rdata = &sc->rx_ring_data;
569 for (q = 0; q < nrxqsets; q++) {
570 KASSERT(nrxqs == 2, ("nrxqs = %d", nrxqs));
571 /* Ring */
572 rdata->ring = (struct mgb_ring_desc *) vaddrs[q * nrxqs + 0];
573 rdata->ring_bus_addr = paddrs[q * nrxqs + 0];
574
575 /* Head WB */
576 rdata->head_wb = (uint32_t *) vaddrs[q * nrxqs + 1];
577 rdata->head_wb_bus_addr = paddrs[q * nrxqs + 1];
578 }
579 return (0);
580 }
581
582 static void
mgb_queues_free(if_ctx_t ctx)583 mgb_queues_free(if_ctx_t ctx)
584 {
585 struct mgb_softc *sc;
586
587 sc = iflib_get_softc(ctx);
588
589 memset(&sc->rx_ring_data, 0, sizeof(struct mgb_ring_data));
590 memset(&sc->tx_ring_data, 0, sizeof(struct mgb_ring_data));
591 }
592
593 static void
mgb_init(if_ctx_t ctx)594 mgb_init(if_ctx_t ctx)
595 {
596 struct mgb_softc *sc;
597 struct mii_data *miid;
598 int error;
599
600 sc = iflib_get_softc(ctx);
601 miid = device_get_softc(sc->miibus);
602 device_printf(sc->dev, "running init ...\n");
603
604 mgb_dma_init(sc);
605
606 /* XXX: Turn off perfect filtering, turn on (broad|multi|uni)cast rx */
607 CSR_CLEAR_REG(sc, MGB_RFE_CTL, MGB_RFE_ALLOW_PERFECT_FILTER);
608 CSR_UPDATE_REG(sc, MGB_RFE_CTL,
609 MGB_RFE_ALLOW_BROADCAST |
610 MGB_RFE_ALLOW_MULTICAST |
611 MGB_RFE_ALLOW_UNICAST);
612
613 error = mii_mediachg(miid);
614 /* Not much we can do if this fails. */
615 if (error)
616 device_printf(sc->dev, "%s: mii_mediachg returned %d", __func__,
617 error);
618 }
619
620 #if 0
621 static void
622 mgb_dump_some_stats(struct mgb_softc *sc)
623 {
624 int i;
625 int first_stat = 0x1200;
626 int last_stat = 0x12FC;
627
628 for (i = first_stat; i <= last_stat; i += 4)
629 if (CSR_READ_REG(sc, i) != 0)
630 device_printf(sc->dev, "0x%04x: 0x%08x\n", i,
631 CSR_READ_REG(sc, i));
632 char *stat_names[] = {
633 "MAC_ERR_STS ",
634 "FCT_INT_STS ",
635 "DMAC_CFG ",
636 "DMAC_CMD ",
637 "DMAC_INT_STS ",
638 "DMAC_INT_EN ",
639 "DMAC_RX_ERR_STS0 ",
640 "DMAC_RX_ERR_STS1 ",
641 "DMAC_RX_ERR_STS2 ",
642 "DMAC_RX_ERR_STS3 ",
643 "INT_STS ",
644 "INT_EN ",
645 "INT_VEC_EN ",
646 "INT_VEC_MAP0 ",
647 "INT_VEC_MAP1 ",
648 "INT_VEC_MAP2 ",
649 "TX_HEAD0",
650 "TX_TAIL0",
651 "DMAC_TX_ERR_STS0 ",
652 NULL
653 };
654 int stats[] = {
655 0x114,
656 0xA0,
657 0xC00,
658 0xC0C,
659 0xC10,
660 0xC14,
661 0xC60,
662 0xCA0,
663 0xCE0,
664 0xD20,
665 0x780,
666 0x788,
667 0x794,
668 0x7A0,
669 0x7A4,
670 0x780,
671 0xD58,
672 0xD5C,
673 0xD60,
674 0x0
675 };
676 i = 0;
677 printf("==============================\n");
678 while (stats[i++])
679 device_printf(sc->dev, "%s at offset 0x%04x = 0x%08x\n",
680 stat_names[i - 1], stats[i - 1],
681 CSR_READ_REG(sc, stats[i - 1]));
682 printf("==== TX RING DESCS ====\n");
683 for (i = 0; i < MGB_DMA_RING_SIZE; i++)
684 device_printf(sc->dev, "ring[%d].data0=0x%08x\n"
685 "ring[%d].data1=0x%08x\n"
686 "ring[%d].data2=0x%08x\n"
687 "ring[%d].data3=0x%08x\n",
688 i, sc->tx_ring_data.ring[i].ctl,
689 i, sc->tx_ring_data.ring[i].addr.low,
690 i, sc->tx_ring_data.ring[i].addr.high,
691 i, sc->tx_ring_data.ring[i].sts);
692 device_printf(sc->dev, "==== DUMP_TX_DMA_RAM ====\n");
693 CSR_WRITE_REG(sc, 0x24, 0xF); // DP_SEL & TX_RAM_0
694 for (i = 0; i < 128; i++) {
695 CSR_WRITE_REG(sc, 0x2C, i); // DP_ADDR
696
697 CSR_WRITE_REG(sc, 0x28, 0); // DP_CMD
698
699 while ((CSR_READ_REG(sc, 0x24) & 0x80000000) == 0) // DP_SEL & READY
700 DELAY(1000);
701
702 device_printf(sc->dev, "DMAC_TX_RAM_0[%u]=%08x\n", i,
703 CSR_READ_REG(sc, 0x30)); // DP_DATA
704 }
705 }
706 #endif
707
708 static void
mgb_stop(if_ctx_t ctx)709 mgb_stop(if_ctx_t ctx)
710 {
711 struct mgb_softc *sc ;
712 if_softc_ctx_t scctx;
713 int i;
714
715 sc = iflib_get_softc(ctx);
716 scctx = iflib_get_softc_ctx(ctx);
717
718 /* XXX: Could potentially timeout */
719 for (i = 0; i < scctx->isc_nrxqsets; i++) {
720 mgb_dmac_control(sc, MGB_DMAC_RX_START, 0, DMAC_STOP);
721 mgb_fct_control(sc, MGB_FCT_RX_CTL, 0, FCT_DISABLE);
722 }
723 for (i = 0; i < scctx->isc_ntxqsets; i++) {
724 mgb_dmac_control(sc, MGB_DMAC_TX_START, 0, DMAC_STOP);
725 mgb_fct_control(sc, MGB_FCT_TX_CTL, 0, FCT_DISABLE);
726 }
727 }
728
729 static int
mgb_legacy_intr(void * xsc)730 mgb_legacy_intr(void *xsc)
731 {
732 struct mgb_softc *sc;
733
734 sc = xsc;
735 iflib_admin_intr_deferred(sc->ctx);
736 return (FILTER_HANDLED);
737 }
738
739 static int
mgb_rxq_intr(void * xsc)740 mgb_rxq_intr(void *xsc)
741 {
742 struct mgb_softc *sc;
743 if_softc_ctx_t scctx;
744 uint32_t intr_sts, intr_en;
745 int qidx;
746
747 sc = xsc;
748 scctx = iflib_get_softc_ctx(sc->ctx);
749
750 intr_sts = CSR_READ_REG(sc, MGB_INTR_STS);
751 intr_en = CSR_READ_REG(sc, MGB_INTR_ENBL_SET);
752 intr_sts &= intr_en;
753
754 for (qidx = 0; qidx < scctx->isc_nrxqsets; qidx++) {
755 if ((intr_sts & MGB_INTR_STS_RX(qidx))){
756 CSR_WRITE_REG(sc, MGB_INTR_ENBL_CLR,
757 MGB_INTR_STS_RX(qidx));
758 CSR_WRITE_REG(sc, MGB_INTR_STS, MGB_INTR_STS_RX(qidx));
759 }
760 }
761 return (FILTER_SCHEDULE_THREAD);
762 }
763
764 static int
mgb_admin_intr(void * xsc)765 mgb_admin_intr(void *xsc)
766 {
767 struct mgb_softc *sc;
768 if_softc_ctx_t scctx;
769 uint32_t intr_sts, intr_en;
770 int qidx;
771
772 sc = xsc;
773 scctx = iflib_get_softc_ctx(sc->ctx);
774
775 intr_sts = CSR_READ_REG(sc, MGB_INTR_STS);
776 intr_en = CSR_READ_REG(sc, MGB_INTR_ENBL_SET);
777 intr_sts &= intr_en;
778
779 /* TODO: shouldn't continue if suspended */
780 if ((intr_sts & MGB_INTR_STS_ANY) == 0)
781 return (FILTER_STRAY);
782 if ((intr_sts & MGB_INTR_STS_TEST) != 0) {
783 sc->isr_test_flag = true;
784 CSR_WRITE_REG(sc, MGB_INTR_STS, MGB_INTR_STS_TEST);
785 return (FILTER_HANDLED);
786 }
787 if ((intr_sts & MGB_INTR_STS_RX_ANY) != 0) {
788 for (qidx = 0; qidx < scctx->isc_nrxqsets; qidx++) {
789 if ((intr_sts & MGB_INTR_STS_RX(qidx))){
790 iflib_rx_intr_deferred(sc->ctx, qidx);
791 }
792 }
793 return (FILTER_HANDLED);
794 }
795 /* XXX: TX interrupts should not occur */
796 if ((intr_sts & MGB_INTR_STS_TX_ANY) != 0) {
797 for (qidx = 0; qidx < scctx->isc_ntxqsets; qidx++) {
798 if ((intr_sts & MGB_INTR_STS_RX(qidx))) {
799 /* clear the interrupt sts and run handler */
800 CSR_WRITE_REG(sc, MGB_INTR_ENBL_CLR,
801 MGB_INTR_STS_TX(qidx));
802 CSR_WRITE_REG(sc, MGB_INTR_STS,
803 MGB_INTR_STS_TX(qidx));
804 iflib_tx_intr_deferred(sc->ctx, qidx);
805 }
806 }
807 return (FILTER_HANDLED);
808 }
809
810 return (FILTER_SCHEDULE_THREAD);
811 }
812
813 static int
mgb_msix_intr_assign(if_ctx_t ctx,int msix)814 mgb_msix_intr_assign(if_ctx_t ctx, int msix)
815 {
816 struct mgb_softc *sc;
817 if_softc_ctx_t scctx;
818 int error, i, vectorid;
819 char irq_name[16];
820
821 sc = iflib_get_softc(ctx);
822 scctx = iflib_get_softc_ctx(ctx);
823
824 KASSERT(scctx->isc_nrxqsets == 1 && scctx->isc_ntxqsets == 1,
825 ("num rxqsets/txqsets != 1 "));
826
827 /*
828 * First vector should be admin interrupts, others vectors are TX/RX
829 *
830 * RIDs start at 1, and vector ids start at 0.
831 */
832 vectorid = 0;
833 error = iflib_irq_alloc_generic(ctx, &sc->admin_irq, vectorid + 1,
834 IFLIB_INTR_ADMIN, mgb_admin_intr, sc, 0, "admin");
835 if (error) {
836 device_printf(sc->dev,
837 "Failed to register admin interrupt handler\n");
838 return (error);
839 }
840
841 for (i = 0; i < scctx->isc_nrxqsets; i++) {
842 vectorid++;
843 snprintf(irq_name, sizeof(irq_name), "rxq%d", i);
844 error = iflib_irq_alloc_generic(ctx, &sc->rx_irq, vectorid + 1,
845 IFLIB_INTR_RXTX, mgb_rxq_intr, sc, i, irq_name);
846 if (error) {
847 device_printf(sc->dev,
848 "Failed to register rxq %d interrupt handler\n", i);
849 return (error);
850 }
851 CSR_UPDATE_REG(sc, MGB_INTR_VEC_RX_MAP,
852 MGB_INTR_VEC_MAP(vectorid, i));
853 }
854
855 /* Not actually mapping hw TX interrupts ... */
856 for (i = 0; i < scctx->isc_ntxqsets; i++) {
857 snprintf(irq_name, sizeof(irq_name), "txq%d", i);
858 iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_TX, NULL, i,
859 irq_name);
860 }
861
862 return (0);
863 }
864
865 static void
mgb_intr_enable_all(if_ctx_t ctx)866 mgb_intr_enable_all(if_ctx_t ctx)
867 {
868 struct mgb_softc *sc;
869 if_softc_ctx_t scctx;
870 int i, dmac_enable = 0, intr_sts = 0, vec_en = 0;
871
872 sc = iflib_get_softc(ctx);
873 scctx = iflib_get_softc_ctx(ctx);
874 intr_sts |= MGB_INTR_STS_ANY;
875 vec_en |= MGB_INTR_STS_ANY;
876
877 for (i = 0; i < scctx->isc_nrxqsets; i++) {
878 intr_sts |= MGB_INTR_STS_RX(i);
879 dmac_enable |= MGB_DMAC_RX_INTR_ENBL(i);
880 vec_en |= MGB_INTR_RX_VEC_STS(i);
881 }
882
883 /* TX interrupts aren't needed ... */
884
885 CSR_WRITE_REG(sc, MGB_INTR_ENBL_SET, intr_sts);
886 CSR_WRITE_REG(sc, MGB_INTR_VEC_ENBL_SET, vec_en);
887 CSR_WRITE_REG(sc, MGB_DMAC_INTR_STS, dmac_enable);
888 CSR_WRITE_REG(sc, MGB_DMAC_INTR_ENBL_SET, dmac_enable);
889 }
890
891 static void
mgb_intr_disable_all(if_ctx_t ctx)892 mgb_intr_disable_all(if_ctx_t ctx)
893 {
894 struct mgb_softc *sc;
895
896 sc = iflib_get_softc(ctx);
897 CSR_WRITE_REG(sc, MGB_INTR_ENBL_CLR, UINT32_MAX);
898 CSR_WRITE_REG(sc, MGB_INTR_VEC_ENBL_CLR, UINT32_MAX);
899 CSR_WRITE_REG(sc, MGB_INTR_STS, UINT32_MAX);
900
901 CSR_WRITE_REG(sc, MGB_DMAC_INTR_ENBL_CLR, UINT32_MAX);
902 CSR_WRITE_REG(sc, MGB_DMAC_INTR_STS, UINT32_MAX);
903 }
904
905 static int
mgb_rx_queue_intr_enable(if_ctx_t ctx,uint16_t qid)906 mgb_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
907 {
908 /* called after successful rx isr */
909 struct mgb_softc *sc;
910
911 sc = iflib_get_softc(ctx);
912 CSR_WRITE_REG(sc, MGB_INTR_VEC_ENBL_SET, MGB_INTR_RX_VEC_STS(qid));
913 CSR_WRITE_REG(sc, MGB_INTR_ENBL_SET, MGB_INTR_STS_RX(qid));
914
915 CSR_WRITE_REG(sc, MGB_DMAC_INTR_STS, MGB_DMAC_RX_INTR_ENBL(qid));
916 CSR_WRITE_REG(sc, MGB_DMAC_INTR_ENBL_SET, MGB_DMAC_RX_INTR_ENBL(qid));
917 return (0);
918 }
919
920 static int
mgb_tx_queue_intr_enable(if_ctx_t ctx,uint16_t qid)921 mgb_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
922 {
923 /* XXX: not called (since tx interrupts not used) */
924 struct mgb_softc *sc;
925
926 sc = iflib_get_softc(ctx);
927
928 CSR_WRITE_REG(sc, MGB_INTR_ENBL_SET, MGB_INTR_STS_TX(qid));
929
930 CSR_WRITE_REG(sc, MGB_DMAC_INTR_STS, MGB_DMAC_TX_INTR_ENBL(qid));
931 CSR_WRITE_REG(sc, MGB_DMAC_INTR_ENBL_SET, MGB_DMAC_TX_INTR_ENBL(qid));
932 return (0);
933 }
934
935 static bool
mgb_intr_test(struct mgb_softc * sc)936 mgb_intr_test(struct mgb_softc *sc)
937 {
938 int i;
939
940 sc->isr_test_flag = false;
941 CSR_WRITE_REG(sc, MGB_INTR_STS, MGB_INTR_STS_TEST);
942 CSR_WRITE_REG(sc, MGB_INTR_VEC_ENBL_SET, MGB_INTR_STS_ANY);
943 CSR_WRITE_REG(sc, MGB_INTR_ENBL_SET,
944 MGB_INTR_STS_ANY | MGB_INTR_STS_TEST);
945 CSR_WRITE_REG(sc, MGB_INTR_SET, MGB_INTR_STS_TEST);
946 if (sc->isr_test_flag)
947 return (true);
948 for (i = 0; i < MGB_TIMEOUT; i++) {
949 DELAY(10);
950 if (sc->isr_test_flag)
951 break;
952 }
953 CSR_WRITE_REG(sc, MGB_INTR_ENBL_CLR, MGB_INTR_STS_TEST);
954 CSR_WRITE_REG(sc, MGB_INTR_STS, MGB_INTR_STS_TEST);
955 return (sc->isr_test_flag);
956 }
957
958 static int
mgb_isc_txd_encap(void * xsc,if_pkt_info_t ipi)959 mgb_isc_txd_encap(void *xsc , if_pkt_info_t ipi)
960 {
961 struct mgb_softc *sc;
962 struct mgb_ring_data *rdata;
963 struct mgb_ring_desc *txd;
964 bus_dma_segment_t *segs;
965 qidx_t pidx, nsegs;
966 int i;
967
968 KASSERT(ipi->ipi_qsidx == 0,
969 ("tried to refill TX Channel %d.\n", ipi->ipi_qsidx));
970 sc = xsc;
971 rdata = &sc->tx_ring_data;
972
973 pidx = ipi->ipi_pidx;
974 segs = ipi->ipi_segs;
975 nsegs = ipi->ipi_nsegs;
976
977 /* For each seg, create a descriptor */
978 for (i = 0; i < nsegs; ++i) {
979 KASSERT(nsegs == 1, ("Multisegment packet !!!!!\n"));
980 txd = &rdata->ring[pidx];
981 txd->ctl = htole32(
982 (segs[i].ds_len & MGB_DESC_CTL_BUFLEN_MASK ) |
983 /*
984 * XXX: This will be wrong in the multipacket case
985 * I suspect FS should be for the first packet and
986 * LS should be for the last packet
987 */
988 MGB_TX_DESC_CTL_FS | MGB_TX_DESC_CTL_LS |
989 MGB_DESC_CTL_FCS);
990 txd->addr.low = htole32(CSR_TRANSLATE_ADDR_LOW32(
991 segs[i].ds_addr));
992 txd->addr.high = htole32(CSR_TRANSLATE_ADDR_HIGH32(
993 segs[i].ds_addr));
994 txd->sts = htole32(
995 (segs[i].ds_len << 16) & MGB_DESC_FRAME_LEN_MASK);
996 pidx = MGB_NEXT_RING_IDX(pidx);
997 }
998 ipi->ipi_new_pidx = pidx;
999 return (0);
1000 }
1001
1002 static void
mgb_isc_txd_flush(void * xsc,uint16_t txqid,qidx_t pidx)1003 mgb_isc_txd_flush(void *xsc, uint16_t txqid, qidx_t pidx)
1004 {
1005 struct mgb_softc *sc;
1006 struct mgb_ring_data *rdata;
1007
1008 KASSERT(txqid == 0, ("tried to flush TX Channel %d.\n", txqid));
1009 sc = xsc;
1010 rdata = &sc->tx_ring_data;
1011
1012 if (rdata->last_tail != pidx) {
1013 rdata->last_tail = pidx;
1014 CSR_WRITE_REG(sc, MGB_DMA_TX_TAIL(txqid), rdata->last_tail);
1015 }
1016 }
1017
1018 static int
mgb_isc_txd_credits_update(void * xsc,uint16_t txqid,bool clear)1019 mgb_isc_txd_credits_update(void *xsc, uint16_t txqid, bool clear)
1020 {
1021 struct mgb_softc *sc;
1022 struct mgb_ring_desc *txd;
1023 struct mgb_ring_data *rdata;
1024 int processed = 0;
1025
1026 /*
1027 * > If clear is true, we need to report the number of TX command ring
1028 * > descriptors that have been processed by the device. If clear is
1029 * > false, we just need to report whether or not at least one TX
1030 * > command ring descriptor has been processed by the device.
1031 * - vmx driver
1032 */
1033 KASSERT(txqid == 0, ("tried to credits_update TX Channel %d.\n",
1034 txqid));
1035 sc = xsc;
1036 rdata = &sc->tx_ring_data;
1037
1038 while (*(rdata->head_wb) != rdata->last_head) {
1039 if (!clear)
1040 return (1);
1041
1042 txd = &rdata->ring[rdata->last_head];
1043 memset(txd, 0, sizeof(struct mgb_ring_desc));
1044 rdata->last_head = MGB_NEXT_RING_IDX(rdata->last_head);
1045 processed++;
1046 }
1047
1048 return (processed);
1049 }
1050
1051 static int
mgb_isc_rxd_available(void * xsc,uint16_t rxqid,qidx_t idx,qidx_t budget)1052 mgb_isc_rxd_available(void *xsc, uint16_t rxqid, qidx_t idx, qidx_t budget)
1053 {
1054 struct mgb_softc *sc;
1055 struct mgb_ring_data *rdata;
1056 int avail = 0;
1057
1058 sc = xsc;
1059 KASSERT(rxqid == 0, ("tried to check availability in RX Channel %d.\n",
1060 rxqid));
1061
1062 rdata = &sc->rx_ring_data;
1063 for (; idx != *(rdata->head_wb); idx = MGB_NEXT_RING_IDX(idx)) {
1064 avail++;
1065 /* XXX: Could verify desc is device owned here */
1066 if (avail == budget)
1067 break;
1068 }
1069 return (avail);
1070 }
1071
1072 static int
mgb_isc_rxd_pkt_get(void * xsc,if_rxd_info_t ri)1073 mgb_isc_rxd_pkt_get(void *xsc, if_rxd_info_t ri)
1074 {
1075 struct mgb_softc *sc;
1076 struct mgb_ring_data *rdata;
1077 struct mgb_ring_desc rxd;
1078 int total_len;
1079
1080 KASSERT(ri->iri_qsidx == 0,
1081 ("tried to check availability in RX Channel %d\n", ri->iri_qsidx));
1082 sc = xsc;
1083 total_len = 0;
1084 rdata = &sc->rx_ring_data;
1085
1086 while (*(rdata->head_wb) != rdata->last_head) {
1087 /* copy ring desc and do swapping */
1088 rxd = rdata->ring[rdata->last_head];
1089 rxd.ctl = le32toh(rxd.ctl);
1090 rxd.addr.low = le32toh(rxd.ctl);
1091 rxd.addr.high = le32toh(rxd.ctl);
1092 rxd.sts = le32toh(rxd.ctl);
1093
1094 if ((rxd.ctl & MGB_DESC_CTL_OWN) != 0) {
1095 device_printf(sc->dev,
1096 "Tried to read descriptor ... "
1097 "found that it's owned by the driver\n");
1098 return (EINVAL);
1099 }
1100 if ((rxd.ctl & MGB_RX_DESC_CTL_FS) == 0) {
1101 device_printf(sc->dev,
1102 "Tried to read descriptor ... "
1103 "found that FS is not set.\n");
1104 device_printf(sc->dev, "Tried to read descriptor ... that it FS is not set.\n");
1105 return (EINVAL);
1106 }
1107 /* XXX: Multi-packet support */
1108 if ((rxd.ctl & MGB_RX_DESC_CTL_LS) == 0) {
1109 device_printf(sc->dev,
1110 "Tried to read descriptor ... "
1111 "found that LS is not set. (Multi-buffer packets not yet supported)\n");
1112 return (EINVAL);
1113 }
1114 ri->iri_frags[0].irf_flid = 0;
1115 ri->iri_frags[0].irf_idx = rdata->last_head;
1116 ri->iri_frags[0].irf_len = MGB_DESC_GET_FRAME_LEN(&rxd);
1117 total_len += ri->iri_frags[0].irf_len;
1118
1119 rdata->last_head = MGB_NEXT_RING_IDX(rdata->last_head);
1120 break;
1121 }
1122 ri->iri_nfrags = 1;
1123 ri->iri_len = total_len;
1124
1125 return (0);
1126 }
1127
1128 static void
mgb_isc_rxd_refill(void * xsc,if_rxd_update_t iru)1129 mgb_isc_rxd_refill(void *xsc, if_rxd_update_t iru)
1130 {
1131 struct mgb_softc *sc;
1132 struct mgb_ring_data *rdata;
1133 struct mgb_ring_desc *rxd;
1134 uint64_t *paddrs;
1135 qidx_t *idxs;
1136 qidx_t idx;
1137 int count, len;
1138
1139 count = iru->iru_count;
1140 len = iru->iru_buf_size;
1141 idxs = iru->iru_idxs;
1142 paddrs = iru->iru_paddrs;
1143 KASSERT(iru->iru_qsidx == 0,
1144 ("tried to refill RX Channel %d.\n", iru->iru_qsidx));
1145
1146 sc = xsc;
1147 rdata = &sc->rx_ring_data;
1148
1149 while (count > 0) {
1150 idx = idxs[--count];
1151 rxd = &rdata->ring[idx];
1152
1153 rxd->sts = 0;
1154 rxd->addr.low =
1155 htole32(CSR_TRANSLATE_ADDR_LOW32(paddrs[count]));
1156 rxd->addr.high =
1157 htole32(CSR_TRANSLATE_ADDR_HIGH32(paddrs[count]));
1158 rxd->ctl = htole32(MGB_DESC_CTL_OWN |
1159 (len & MGB_DESC_CTL_BUFLEN_MASK));
1160 }
1161 return;
1162 }
1163
1164 static void
mgb_isc_rxd_flush(void * xsc,uint16_t rxqid,uint8_t flid,qidx_t pidx)1165 mgb_isc_rxd_flush(void *xsc, uint16_t rxqid, uint8_t flid, qidx_t pidx)
1166 {
1167 struct mgb_softc *sc;
1168
1169 sc = xsc;
1170
1171 KASSERT(rxqid == 0, ("tried to flush RX Channel %d.\n", rxqid));
1172 /*
1173 * According to the programming guide, last_tail must be set to
1174 * the last valid RX descriptor, rather than to the one past that.
1175 * Note that this is not true for the TX ring!
1176 */
1177 sc->rx_ring_data.last_tail = MGB_PREV_RING_IDX(pidx);
1178 CSR_WRITE_REG(sc, MGB_DMA_RX_TAIL(rxqid), sc->rx_ring_data.last_tail);
1179 return;
1180 }
1181
1182 static int
mgb_test_bar(struct mgb_softc * sc)1183 mgb_test_bar(struct mgb_softc *sc)
1184 {
1185 uint32_t id_rev, dev_id;
1186
1187 id_rev = CSR_READ_REG(sc, 0);
1188 dev_id = id_rev >> 16;
1189 if (dev_id == MGB_LAN7430_DEVICE_ID ||
1190 dev_id == MGB_LAN7431_DEVICE_ID) {
1191 return (0);
1192 } else {
1193 device_printf(sc->dev, "ID check failed.\n");
1194 return (ENXIO);
1195 }
1196 }
1197
1198 static int
mgb_alloc_regs(struct mgb_softc * sc)1199 mgb_alloc_regs(struct mgb_softc *sc)
1200 {
1201 int rid;
1202
1203 rid = PCIR_BAR(MGB_BAR);
1204 pci_enable_busmaster(sc->dev);
1205 sc->regs = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1206 &rid, RF_ACTIVE);
1207 if (sc->regs == NULL)
1208 return (ENXIO);
1209
1210 return (0);
1211 }
1212
1213 static int
mgb_release_regs(struct mgb_softc * sc)1214 mgb_release_regs(struct mgb_softc *sc)
1215 {
1216 int error = 0;
1217
1218 if (sc->regs != NULL)
1219 error = bus_release_resource(sc->dev, SYS_RES_MEMORY,
1220 rman_get_rid(sc->regs), sc->regs);
1221 sc->regs = NULL;
1222 pci_disable_busmaster(sc->dev);
1223 return (error);
1224 }
1225
1226 static int
mgb_dma_init(struct mgb_softc * sc)1227 mgb_dma_init(struct mgb_softc *sc)
1228 {
1229 if_softc_ctx_t scctx;
1230 int ch, error = 0;
1231
1232 scctx = iflib_get_softc_ctx(sc->ctx);
1233
1234 for (ch = 0; ch < scctx->isc_nrxqsets; ch++)
1235 if ((error = mgb_dma_rx_ring_init(sc, ch)))
1236 goto fail;
1237
1238 for (ch = 0; ch < scctx->isc_nrxqsets; ch++)
1239 if ((error = mgb_dma_tx_ring_init(sc, ch)))
1240 goto fail;
1241
1242 fail:
1243 return (error);
1244 }
1245
1246 static int
mgb_dma_rx_ring_init(struct mgb_softc * sc,int channel)1247 mgb_dma_rx_ring_init(struct mgb_softc *sc, int channel)
1248 {
1249 struct mgb_ring_data *rdata;
1250 int ring_config, error = 0;
1251
1252 rdata = &sc->rx_ring_data;
1253 mgb_dmac_control(sc, MGB_DMAC_RX_START, 0, DMAC_RESET);
1254 KASSERT(MGB_DMAC_STATE_IS_INITIAL(sc, MGB_DMAC_RX_START, channel),
1255 ("Trying to init channels when not in init state\n"));
1256
1257 /* write ring address */
1258 if (rdata->ring_bus_addr == 0) {
1259 device_printf(sc->dev, "Invalid ring bus addr.\n");
1260 goto fail;
1261 }
1262
1263 CSR_WRITE_REG(sc, MGB_DMA_RX_BASE_H(channel),
1264 CSR_TRANSLATE_ADDR_HIGH32(rdata->ring_bus_addr));
1265 CSR_WRITE_REG(sc, MGB_DMA_RX_BASE_L(channel),
1266 CSR_TRANSLATE_ADDR_LOW32(rdata->ring_bus_addr));
1267
1268 /* write head pointer writeback address */
1269 if (rdata->head_wb_bus_addr == 0) {
1270 device_printf(sc->dev, "Invalid head wb bus addr.\n");
1271 goto fail;
1272 }
1273 CSR_WRITE_REG(sc, MGB_DMA_RX_HEAD_WB_H(channel),
1274 CSR_TRANSLATE_ADDR_HIGH32(rdata->head_wb_bus_addr));
1275 CSR_WRITE_REG(sc, MGB_DMA_RX_HEAD_WB_L(channel),
1276 CSR_TRANSLATE_ADDR_LOW32(rdata->head_wb_bus_addr));
1277
1278 /* Enable head pointer writeback */
1279 CSR_WRITE_REG(sc, MGB_DMA_RX_CONFIG0(channel), MGB_DMA_HEAD_WB_ENBL);
1280
1281 ring_config = CSR_READ_REG(sc, MGB_DMA_RX_CONFIG1(channel));
1282 /* ring size */
1283 ring_config &= ~MGB_DMA_RING_LEN_MASK;
1284 ring_config |= (MGB_DMA_RING_SIZE & MGB_DMA_RING_LEN_MASK);
1285 /* packet padding (PAD_2 is better for IP header alignment ...) */
1286 ring_config &= ~MGB_DMA_RING_PAD_MASK;
1287 ring_config |= (MGB_DMA_RING_PAD_0 & MGB_DMA_RING_PAD_MASK);
1288
1289 CSR_WRITE_REG(sc, MGB_DMA_RX_CONFIG1(channel), ring_config);
1290
1291 rdata->last_head = CSR_READ_REG(sc, MGB_DMA_RX_HEAD(channel));
1292
1293 mgb_fct_control(sc, MGB_FCT_RX_CTL, channel, FCT_RESET);
1294 if (error != 0) {
1295 device_printf(sc->dev, "Failed to reset RX FCT.\n");
1296 goto fail;
1297 }
1298 mgb_fct_control(sc, MGB_FCT_RX_CTL, channel, FCT_ENABLE);
1299 if (error != 0) {
1300 device_printf(sc->dev, "Failed to enable RX FCT.\n");
1301 goto fail;
1302 }
1303 mgb_dmac_control(sc, MGB_DMAC_RX_START, channel, DMAC_START);
1304 if (error != 0)
1305 device_printf(sc->dev, "Failed to start RX DMAC.\n");
1306 fail:
1307 return (error);
1308 }
1309
1310 static int
mgb_dma_tx_ring_init(struct mgb_softc * sc,int channel)1311 mgb_dma_tx_ring_init(struct mgb_softc *sc, int channel)
1312 {
1313 struct mgb_ring_data *rdata;
1314 int ring_config, error = 0;
1315
1316 rdata = &sc->tx_ring_data;
1317 if ((error = mgb_fct_control(sc, MGB_FCT_TX_CTL, channel, FCT_RESET))) {
1318 device_printf(sc->dev, "Failed to reset TX FCT.\n");
1319 goto fail;
1320 }
1321 if ((error = mgb_fct_control(sc, MGB_FCT_TX_CTL, channel,
1322 FCT_ENABLE))) {
1323 device_printf(sc->dev, "Failed to enable TX FCT.\n");
1324 goto fail;
1325 }
1326 if ((error = mgb_dmac_control(sc, MGB_DMAC_TX_START, channel,
1327 DMAC_RESET))) {
1328 device_printf(sc->dev, "Failed to reset TX DMAC.\n");
1329 goto fail;
1330 }
1331 KASSERT(MGB_DMAC_STATE_IS_INITIAL(sc, MGB_DMAC_TX_START, channel),
1332 ("Trying to init channels in not init state\n"));
1333
1334 /* write ring address */
1335 if (rdata->ring_bus_addr == 0) {
1336 device_printf(sc->dev, "Invalid ring bus addr.\n");
1337 goto fail;
1338 }
1339 CSR_WRITE_REG(sc, MGB_DMA_TX_BASE_H(channel),
1340 CSR_TRANSLATE_ADDR_HIGH32(rdata->ring_bus_addr));
1341 CSR_WRITE_REG(sc, MGB_DMA_TX_BASE_L(channel),
1342 CSR_TRANSLATE_ADDR_LOW32(rdata->ring_bus_addr));
1343
1344 /* write ring size */
1345 ring_config = CSR_READ_REG(sc, MGB_DMA_TX_CONFIG1(channel));
1346 ring_config &= ~MGB_DMA_RING_LEN_MASK;
1347 ring_config |= (MGB_DMA_RING_SIZE & MGB_DMA_RING_LEN_MASK);
1348 CSR_WRITE_REG(sc, MGB_DMA_TX_CONFIG1(channel), ring_config);
1349
1350 /* Enable interrupt on completion and head pointer writeback */
1351 ring_config = (MGB_DMA_HEAD_WB_LS_ENBL | MGB_DMA_HEAD_WB_ENBL);
1352 CSR_WRITE_REG(sc, MGB_DMA_TX_CONFIG0(channel), ring_config);
1353
1354 /* write head pointer writeback address */
1355 if (rdata->head_wb_bus_addr == 0) {
1356 device_printf(sc->dev, "Invalid head wb bus addr.\n");
1357 goto fail;
1358 }
1359 CSR_WRITE_REG(sc, MGB_DMA_TX_HEAD_WB_H(channel),
1360 CSR_TRANSLATE_ADDR_HIGH32(rdata->head_wb_bus_addr));
1361 CSR_WRITE_REG(sc, MGB_DMA_TX_HEAD_WB_L(channel),
1362 CSR_TRANSLATE_ADDR_LOW32(rdata->head_wb_bus_addr));
1363
1364 rdata->last_head = CSR_READ_REG(sc, MGB_DMA_TX_HEAD(channel));
1365 KASSERT(rdata->last_head == 0, ("MGB_DMA_TX_HEAD was not reset.\n"));
1366 rdata->last_tail = 0;
1367 CSR_WRITE_REG(sc, MGB_DMA_TX_TAIL(channel), rdata->last_tail);
1368
1369 if ((error = mgb_dmac_control(sc, MGB_DMAC_TX_START, channel,
1370 DMAC_START)))
1371 device_printf(sc->dev, "Failed to start TX DMAC.\n");
1372 fail:
1373 return (error);
1374 }
1375
1376 static int
mgb_dmac_control(struct mgb_softc * sc,int start,int channel,enum mgb_dmac_cmd cmd)1377 mgb_dmac_control(struct mgb_softc *sc, int start, int channel,
1378 enum mgb_dmac_cmd cmd)
1379 {
1380 int error = 0;
1381
1382 switch (cmd) {
1383 case DMAC_RESET:
1384 CSR_WRITE_REG(sc, MGB_DMAC_CMD,
1385 MGB_DMAC_CMD_RESET(start, channel));
1386 error = mgb_wait_for_bits(sc, MGB_DMAC_CMD, 0,
1387 MGB_DMAC_CMD_RESET(start, channel));
1388 break;
1389
1390 case DMAC_START:
1391 /*
1392 * NOTE: this simplifies the logic, since it will never
1393 * try to start in STOP_PENDING, but it also increases work.
1394 */
1395 error = mgb_dmac_control(sc, start, channel, DMAC_STOP);
1396 if (error != 0)
1397 return (error);
1398 CSR_WRITE_REG(sc, MGB_DMAC_CMD,
1399 MGB_DMAC_CMD_START(start, channel));
1400 break;
1401
1402 case DMAC_STOP:
1403 CSR_WRITE_REG(sc, MGB_DMAC_CMD,
1404 MGB_DMAC_CMD_STOP(start, channel));
1405 error = mgb_wait_for_bits(sc, MGB_DMAC_CMD,
1406 MGB_DMAC_CMD_STOP(start, channel),
1407 MGB_DMAC_CMD_START(start, channel));
1408 break;
1409 }
1410 return (error);
1411 }
1412
1413 static int
mgb_fct_control(struct mgb_softc * sc,int reg,int channel,enum mgb_fct_cmd cmd)1414 mgb_fct_control(struct mgb_softc *sc, int reg, int channel,
1415 enum mgb_fct_cmd cmd)
1416 {
1417
1418 switch (cmd) {
1419 case FCT_RESET:
1420 CSR_WRITE_REG(sc, reg, MGB_FCT_RESET(channel));
1421 return (mgb_wait_for_bits(sc, reg, 0, MGB_FCT_RESET(channel)));
1422 case FCT_ENABLE:
1423 CSR_WRITE_REG(sc, reg, MGB_FCT_ENBL(channel));
1424 return (0);
1425 case FCT_DISABLE:
1426 CSR_WRITE_REG(sc, reg, MGB_FCT_DSBL(channel));
1427 return (mgb_wait_for_bits(sc, reg, 0, MGB_FCT_ENBL(channel)));
1428 }
1429 }
1430
1431 static int
mgb_hw_teardown(struct mgb_softc * sc)1432 mgb_hw_teardown(struct mgb_softc *sc)
1433 {
1434 int err = 0;
1435
1436 /* Stop MAC */
1437 CSR_CLEAR_REG(sc, MGB_MAC_RX, MGB_MAC_ENBL);
1438 CSR_WRITE_REG(sc, MGB_MAC_TX, MGB_MAC_ENBL);
1439 if ((err = mgb_wait_for_bits(sc, MGB_MAC_RX, MGB_MAC_DSBL, 0)))
1440 return (err);
1441 if ((err = mgb_wait_for_bits(sc, MGB_MAC_TX, MGB_MAC_DSBL, 0)))
1442 return (err);
1443 return (err);
1444 }
1445
1446 static int
mgb_hw_init(struct mgb_softc * sc)1447 mgb_hw_init(struct mgb_softc *sc)
1448 {
1449 int error = 0;
1450
1451 error = mgb_hw_reset(sc);
1452 if (error != 0)
1453 goto fail;
1454
1455 mgb_mac_init(sc);
1456
1457 error = mgb_phy_reset(sc);
1458 if (error != 0)
1459 goto fail;
1460
1461 error = mgb_dmac_reset(sc);
1462 if (error != 0)
1463 goto fail;
1464
1465 fail:
1466 return (error);
1467 }
1468
1469 static int
mgb_hw_reset(struct mgb_softc * sc)1470 mgb_hw_reset(struct mgb_softc *sc)
1471 {
1472
1473 CSR_UPDATE_REG(sc, MGB_HW_CFG, MGB_LITE_RESET);
1474 return (mgb_wait_for_bits(sc, MGB_HW_CFG, 0, MGB_LITE_RESET));
1475 }
1476
1477 static int
mgb_mac_init(struct mgb_softc * sc)1478 mgb_mac_init(struct mgb_softc *sc)
1479 {
1480
1481 /**
1482 * enable automatic duplex detection and
1483 * automatic speed detection
1484 */
1485 CSR_UPDATE_REG(sc, MGB_MAC_CR, MGB_MAC_ADD_ENBL | MGB_MAC_ASD_ENBL);
1486 CSR_UPDATE_REG(sc, MGB_MAC_TX, MGB_MAC_ENBL);
1487 CSR_UPDATE_REG(sc, MGB_MAC_RX, MGB_MAC_ENBL);
1488
1489 return (MGB_STS_OK);
1490 }
1491
1492 static int
mgb_phy_reset(struct mgb_softc * sc)1493 mgb_phy_reset(struct mgb_softc *sc)
1494 {
1495
1496 CSR_UPDATE_BYTE(sc, MGB_PMT_CTL, MGB_PHY_RESET);
1497 if (mgb_wait_for_bits(sc, MGB_PMT_CTL, 0, MGB_PHY_RESET) ==
1498 MGB_STS_TIMEOUT)
1499 return (MGB_STS_TIMEOUT);
1500 return (mgb_wait_for_bits(sc, MGB_PMT_CTL, MGB_PHY_READY, 0));
1501 }
1502
1503 static int
mgb_dmac_reset(struct mgb_softc * sc)1504 mgb_dmac_reset(struct mgb_softc *sc)
1505 {
1506
1507 CSR_WRITE_REG(sc, MGB_DMAC_CMD, MGB_DMAC_RESET);
1508 return (mgb_wait_for_bits(sc, MGB_DMAC_CMD, 0, MGB_DMAC_RESET));
1509 }
1510
1511 static int
mgb_wait_for_bits(struct mgb_softc * sc,int reg,int set_bits,int clear_bits)1512 mgb_wait_for_bits(struct mgb_softc *sc, int reg, int set_bits, int clear_bits)
1513 {
1514 int i, val;
1515
1516 i = 0;
1517 do {
1518 /*
1519 * XXX: Datasheets states delay should be > 5 microseconds
1520 * for device reset.
1521 */
1522 DELAY(100);
1523 val = CSR_READ_REG(sc, reg);
1524 if ((val & set_bits) == set_bits && (val & clear_bits) == 0)
1525 return (MGB_STS_OK);
1526 } while (i++ < MGB_TIMEOUT);
1527
1528 return (MGB_STS_TIMEOUT);
1529 }
1530
1531 static void
mgb_get_ethaddr(struct mgb_softc * sc,struct ether_addr * dest)1532 mgb_get_ethaddr(struct mgb_softc *sc, struct ether_addr *dest)
1533 {
1534
1535 CSR_READ_REG_BYTES(sc, MGB_MAC_ADDR_BASE_L, &dest->octet[0], 4);
1536 CSR_READ_REG_BYTES(sc, MGB_MAC_ADDR_BASE_H, &dest->octet[4], 2);
1537 }
1538
1539 static int
mgb_miibus_readreg(device_t dev,int phy,int reg)1540 mgb_miibus_readreg(device_t dev, int phy, int reg)
1541 {
1542 struct mgb_softc *sc;
1543 int mii_access;
1544
1545 sc = iflib_get_softc(device_get_softc(dev));
1546
1547 if (mgb_wait_for_bits(sc, MGB_MII_ACCESS, 0, MGB_MII_BUSY) ==
1548 MGB_STS_TIMEOUT)
1549 return (EIO);
1550 mii_access = (phy & MGB_MII_PHY_ADDR_MASK) << MGB_MII_PHY_ADDR_SHIFT;
1551 mii_access |= (reg & MGB_MII_REG_ADDR_MASK) << MGB_MII_REG_ADDR_SHIFT;
1552 mii_access |= MGB_MII_BUSY | MGB_MII_READ;
1553 CSR_WRITE_REG(sc, MGB_MII_ACCESS, mii_access);
1554 if (mgb_wait_for_bits(sc, MGB_MII_ACCESS, 0, MGB_MII_BUSY) ==
1555 MGB_STS_TIMEOUT)
1556 return (EIO);
1557 return (CSR_READ_2_BYTES(sc, MGB_MII_DATA));
1558 }
1559
1560 static int
mgb_miibus_writereg(device_t dev,int phy,int reg,int data)1561 mgb_miibus_writereg(device_t dev, int phy, int reg, int data)
1562 {
1563 struct mgb_softc *sc;
1564 int mii_access;
1565
1566 sc = iflib_get_softc(device_get_softc(dev));
1567
1568 if (mgb_wait_for_bits(sc, MGB_MII_ACCESS, 0, MGB_MII_BUSY) ==
1569 MGB_STS_TIMEOUT)
1570 return (EIO);
1571 mii_access = (phy & MGB_MII_PHY_ADDR_MASK) << MGB_MII_PHY_ADDR_SHIFT;
1572 mii_access |= (reg & MGB_MII_REG_ADDR_MASK) << MGB_MII_REG_ADDR_SHIFT;
1573 mii_access |= MGB_MII_BUSY | MGB_MII_WRITE;
1574 CSR_WRITE_REG(sc, MGB_MII_DATA, data);
1575 CSR_WRITE_REG(sc, MGB_MII_ACCESS, mii_access);
1576 if (mgb_wait_for_bits(sc, MGB_MII_ACCESS, 0, MGB_MII_BUSY) ==
1577 MGB_STS_TIMEOUT)
1578 return (EIO);
1579 return (0);
1580 }
1581
1582 /* XXX: May need to lock these up */
1583 static void
mgb_miibus_statchg(device_t dev)1584 mgb_miibus_statchg(device_t dev)
1585 {
1586 struct mgb_softc *sc;
1587 struct mii_data *miid;
1588
1589 sc = iflib_get_softc(device_get_softc(dev));
1590 miid = device_get_softc(sc->miibus);
1591 /* Update baudrate in iflib */
1592 sc->baudrate = ifmedia_baudrate(miid->mii_media_active);
1593 iflib_link_state_change(sc->ctx, sc->link_state, sc->baudrate);
1594 }
1595
1596 static void
mgb_miibus_linkchg(device_t dev)1597 mgb_miibus_linkchg(device_t dev)
1598 {
1599 struct mgb_softc *sc;
1600 struct mii_data *miid;
1601 int link_state;
1602
1603 sc = iflib_get_softc(device_get_softc(dev));
1604 miid = device_get_softc(sc->miibus);
1605 /* XXX: copied from miibus_linkchg **/
1606 if (miid->mii_media_status & IFM_AVALID) {
1607 if (miid->mii_media_status & IFM_ACTIVE)
1608 link_state = LINK_STATE_UP;
1609 else
1610 link_state = LINK_STATE_DOWN;
1611 } else
1612 link_state = LINK_STATE_UNKNOWN;
1613 sc->link_state = link_state;
1614 iflib_link_state_change(sc->ctx, sc->link_state, sc->baudrate);
1615 }
1616