xref: /freebsd/sys/dev/enetc/if_enetc.c (revision d7d962ead0b6e5e8a39202d0590022082bf5bfb6)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2021 Alstom Group.
5  * Copyright (c) 2021 Semihalf.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <sys/param.h>
32 #include <sys/bus.h>
33 #include <sys/endian.h>
34 #include <sys/kernel.h>
35 #include <sys/module.h>
36 #include <sys/rman.h>
37 #include <sys/socket.h>
38 #include <sys/sockio.h>
39 
40 #include <machine/bus.h>
41 #include <machine/resource.h>
42 
43 #include <net/ethernet.h>
44 #include <net/if.h>
45 #include <net/if_dl.h>
46 #include <net/if_var.h>
47 #include <net/if_types.h>
48 #include <net/if_media.h>
49 #include <net/iflib.h>
50 
51 #include <dev/enetc/enetc_hw.h>
52 #include <dev/enetc/enetc.h>
53 #include <dev/enetc/enetc_mdio.h>
54 #include <dev/mii/mii.h>
55 #include <dev/mii/miivar.h>
56 #include <dev/pci/pcireg.h>
57 #include <dev/pci/pcivar.h>
58 
59 #include <dev/ofw/ofw_bus.h>
60 #include <dev/ofw/ofw_bus_subr.h>
61 
62 #include "ifdi_if.h"
63 #include "miibus_if.h"
64 
65 static device_register_t		enetc_register;
66 
67 static ifdi_attach_pre_t		enetc_attach_pre;
68 static ifdi_attach_post_t		enetc_attach_post;
69 static ifdi_detach_t			enetc_detach;
70 
71 static ifdi_tx_queues_alloc_t		enetc_tx_queues_alloc;
72 static ifdi_rx_queues_alloc_t		enetc_rx_queues_alloc;
73 static ifdi_queues_free_t		enetc_queues_free;
74 
75 static ifdi_init_t			enetc_init;
76 static ifdi_stop_t			enetc_stop;
77 
78 static ifdi_msix_intr_assign_t		enetc_msix_intr_assign;
79 static ifdi_tx_queue_intr_enable_t	enetc_tx_queue_intr_enable;
80 static ifdi_rx_queue_intr_enable_t	enetc_rx_queue_intr_enable;
81 static ifdi_intr_enable_t		enetc_intr_enable;
82 static ifdi_intr_disable_t		enetc_intr_disable;
83 
84 static int	enetc_isc_txd_encap(void*, if_pkt_info_t);
85 static void	enetc_isc_txd_flush(void*, uint16_t, qidx_t);
86 static int	enetc_isc_txd_credits_update(void*, uint16_t, bool);
87 static int	enetc_isc_rxd_available(void*, uint16_t, qidx_t, qidx_t);
88 static int	enetc_isc_rxd_pkt_get(void*, if_rxd_info_t);
89 static void	enetc_isc_rxd_refill(void*, if_rxd_update_t);
90 static void	enetc_isc_rxd_flush(void*, uint16_t, uint8_t, qidx_t);
91 
92 static void	enetc_vlan_register(if_ctx_t, uint16_t);
93 static void	enetc_vlan_unregister(if_ctx_t, uint16_t);
94 
95 static uint64_t	enetc_get_counter(if_ctx_t, ift_counter);
96 static int	enetc_promisc_set(if_ctx_t, int);
97 static int	enetc_mtu_set(if_ctx_t, uint32_t);
98 static void	enetc_setup_multicast(if_ctx_t);
99 static void	enetc_timer(if_ctx_t, uint16_t);
100 static void	enetc_update_admin_status(if_ctx_t);
101 
102 static miibus_readreg_t		enetc_miibus_readreg;
103 static miibus_writereg_t	enetc_miibus_writereg;
104 static miibus_linkchg_t		enetc_miibus_linkchg;
105 static miibus_statchg_t		enetc_miibus_statchg;
106 
107 static int			enetc_media_change(if_t);
108 static void			enetc_media_status(if_t, struct ifmediareq*);
109 
110 static int			enetc_fixed_media_change(if_t);
111 static void			enetc_fixed_media_status(if_t, struct ifmediareq*);
112 
113 static void			enetc_max_nqueues(struct enetc_softc*, int*, int*);
114 static int			enetc_setup_phy(struct enetc_softc*);
115 
116 static void			enetc_get_hwaddr(struct enetc_softc*);
117 static void			enetc_set_hwaddr(struct enetc_softc*);
118 static int			enetc_setup_rss(struct enetc_softc*);
119 
120 static void			enetc_init_hw(struct enetc_softc*);
121 static void			enetc_init_ctrl(struct enetc_softc*);
122 static void			enetc_init_tx(struct enetc_softc*);
123 static void			enetc_init_rx(struct enetc_softc*);
124 
125 static int			enetc_ctrl_send(struct enetc_softc*,
126 				    uint16_t, uint16_t, iflib_dma_info_t);
127 
128 static const char enetc_driver_version[] = "1.0.0";
129 
130 static pci_vendor_info_t enetc_vendor_info_array[] = {
131 	PVID(PCI_VENDOR_FREESCALE, ENETC_DEV_ID_PF,
132 	    "Freescale ENETC PCIe Gigabit Ethernet Controller"),
133 	PVID_END
134 };
135 
136 #define ENETC_IFCAPS (IFCAP_VLAN_MTU | IFCAP_RXCSUM | IFCAP_JUMBO_MTU | \
137 	IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWFILTER)
138 
139 static device_method_t enetc_methods[] = {
140 	DEVMETHOD(device_register,	enetc_register),
141 	DEVMETHOD(device_probe,		iflib_device_probe),
142 	DEVMETHOD(device_attach,	iflib_device_attach),
143 	DEVMETHOD(device_detach,	iflib_device_detach),
144 	DEVMETHOD(device_shutdown,	iflib_device_shutdown),
145 	DEVMETHOD(device_suspend,	iflib_device_suspend),
146 	DEVMETHOD(device_resume,	iflib_device_resume),
147 
148 	DEVMETHOD(miibus_readreg,	enetc_miibus_readreg),
149 	DEVMETHOD(miibus_writereg,	enetc_miibus_writereg),
150 	DEVMETHOD(miibus_linkchg,	enetc_miibus_linkchg),
151 	DEVMETHOD(miibus_statchg,	enetc_miibus_statchg),
152 
153 	DEVMETHOD_END
154 };
155 
156 static driver_t enetc_driver = {
157 	"enetc", enetc_methods, sizeof(struct enetc_softc)
158 };
159 
160 static devclass_t enetc_devclass;
161 DRIVER_MODULE(miibus, enetc, miibus_driver, miibus_devclass, NULL, NULL);
162 /* Make sure miibus gets procesed first. */
163 DRIVER_MODULE_ORDERED(enetc, pci, enetc_driver, enetc_devclass, NULL, NULL,
164     SI_ORDER_ANY);
165 MODULE_VERSION(enetc, 1);
166 
167 IFLIB_PNP_INFO(pci, enetc, enetc_vendor_info_array);
168 
169 MODULE_DEPEND(enetc, ether, 1, 1, 1);
170 MODULE_DEPEND(enetc, iflib, 1, 1, 1);
171 MODULE_DEPEND(enetc, miibus, 1, 1, 1);
172 
173 static device_method_t enetc_iflib_methods[] = {
174 	DEVMETHOD(ifdi_attach_pre,		enetc_attach_pre),
175 	DEVMETHOD(ifdi_attach_post,		enetc_attach_post),
176 	DEVMETHOD(ifdi_detach,			enetc_detach),
177 
178 	DEVMETHOD(ifdi_init,			enetc_init),
179 	DEVMETHOD(ifdi_stop,			enetc_stop),
180 
181 	DEVMETHOD(ifdi_tx_queues_alloc,		enetc_tx_queues_alloc),
182 	DEVMETHOD(ifdi_rx_queues_alloc,		enetc_rx_queues_alloc),
183 	DEVMETHOD(ifdi_queues_free,		enetc_queues_free),
184 
185 	DEVMETHOD(ifdi_msix_intr_assign,	enetc_msix_intr_assign),
186 	DEVMETHOD(ifdi_tx_queue_intr_enable,	enetc_tx_queue_intr_enable),
187 	DEVMETHOD(ifdi_rx_queue_intr_enable,	enetc_rx_queue_intr_enable),
188 	DEVMETHOD(ifdi_intr_enable,		enetc_intr_enable),
189 	DEVMETHOD(ifdi_intr_disable,		enetc_intr_disable),
190 
191 	DEVMETHOD(ifdi_vlan_register,		enetc_vlan_register),
192 	DEVMETHOD(ifdi_vlan_unregister,		enetc_vlan_unregister),
193 
194 	DEVMETHOD(ifdi_get_counter,		enetc_get_counter),
195 	DEVMETHOD(ifdi_mtu_set,			enetc_mtu_set),
196 	DEVMETHOD(ifdi_multi_set,		enetc_setup_multicast),
197 	DEVMETHOD(ifdi_promisc_set,		enetc_promisc_set),
198 	DEVMETHOD(ifdi_timer,			enetc_timer),
199 	DEVMETHOD(ifdi_update_admin_status,	enetc_update_admin_status),
200 
201 	DEVMETHOD_END
202 };
203 
204 static driver_t enetc_iflib_driver = {
205 	"enetc", enetc_iflib_methods, sizeof(struct enetc_softc)
206 };
207 
208 static struct if_txrx enetc_txrx = {
209 	.ift_txd_encap = enetc_isc_txd_encap,
210 	.ift_txd_flush = enetc_isc_txd_flush,
211 	.ift_txd_credits_update = enetc_isc_txd_credits_update,
212 	.ift_rxd_available = enetc_isc_rxd_available,
213 	.ift_rxd_pkt_get = enetc_isc_rxd_pkt_get,
214 	.ift_rxd_refill = enetc_isc_rxd_refill,
215 	.ift_rxd_flush = enetc_isc_rxd_flush
216 };
217 
218 static struct if_shared_ctx enetc_sctx_init = {
219 	.isc_magic = IFLIB_MAGIC,
220 
221 	.isc_q_align = ENETC_RING_ALIGN,
222 
223 	.isc_tx_maxsize = ENETC_MAX_FRAME_LEN,
224 	.isc_tx_maxsegsize = PAGE_SIZE,
225 
226 	.isc_rx_maxsize = ENETC_MAX_FRAME_LEN,
227 	.isc_rx_maxsegsize = ENETC_MAX_FRAME_LEN,
228 	.isc_rx_nsegments = ENETC_MAX_SCATTER,
229 
230 	.isc_admin_intrcnt = 0,
231 
232 	.isc_nfl = 1,
233 	.isc_nrxqs = 1,
234 	.isc_ntxqs = 1,
235 
236 	.isc_vendor_info = enetc_vendor_info_array,
237 	.isc_driver_version = enetc_driver_version,
238 	.isc_driver = &enetc_iflib_driver,
239 
240 	.isc_flags = IFLIB_DRIVER_MEDIA | IFLIB_PRESERVE_TX_INDICES,
241 	.isc_ntxd_min = {ENETC_MIN_DESC},
242 	.isc_ntxd_max = {ENETC_MAX_DESC},
243 	.isc_ntxd_default = {ENETC_DEFAULT_DESC},
244 	.isc_nrxd_min = {ENETC_MIN_DESC},
245 	.isc_nrxd_max = {ENETC_MAX_DESC},
246 	.isc_nrxd_default = {ENETC_DEFAULT_DESC}
247 };
248 
249 static void*
250 enetc_register(device_t dev)
251 {
252 
253 	if (!ofw_bus_status_okay(dev))
254 		return (NULL);
255 
256 	return (&enetc_sctx_init);
257 }
258 
259 static void
260 enetc_max_nqueues(struct enetc_softc *sc, int *max_tx_nqueues,
261     int *max_rx_nqueues)
262 {
263 	uint32_t val;
264 
265 	val = ENETC_PORT_RD4(sc, ENETC_PCAPR0);
266 	*max_tx_nqueues = MIN(ENETC_PCAPR0_TXBDR(val), ENETC_MAX_QUEUES);
267 	*max_rx_nqueues = MIN(ENETC_PCAPR0_RXBDR(val), ENETC_MAX_QUEUES);
268 }
269 
270 static int
271 enetc_setup_fixed(struct enetc_softc *sc, phandle_t node)
272 {
273 	ssize_t size;
274 	int speed;
275 
276 	size = OF_getencprop(node, "speed", &speed, sizeof(speed));
277 	if (size <= 0) {
278 		device_printf(sc->dev,
279 		    "Device has fixed-link node without link speed specified\n");
280 		return (ENXIO);
281 	}
282 	switch (speed) {
283 	case 10:
284 		speed = IFM_10_T;
285 		break;
286 	case 100:
287 		speed = IFM_100_TX;
288 		break;
289 	case 1000:
290 		speed = IFM_1000_T;
291 		break;
292 	case 2500:
293 		speed = IFM_2500_T;
294 		break;
295 	default:
296 		device_printf(sc->dev, "Unsupported link speed value of %d\n",
297 		    speed);
298 		return (ENXIO);
299 	}
300 	speed |= IFM_ETHER;
301 
302 	if (OF_hasprop(node, "full-duplex"))
303 		speed |= IFM_FDX;
304 	else
305 		speed |= IFM_HDX;
306 
307 	sc->fixed_link = true;
308 
309 	ifmedia_init(&sc->fixed_ifmedia, 0, enetc_fixed_media_change,
310 	    enetc_fixed_media_status);
311 	ifmedia_add(&sc->fixed_ifmedia, speed, 0, NULL);
312 	ifmedia_set(&sc->fixed_ifmedia, speed);
313 	sc->shared->isc_media = &sc->fixed_ifmedia;
314 
315 	return (0);
316 }
317 
318 static int
319 enetc_setup_phy(struct enetc_softc *sc)
320 {
321 	phandle_t node, fixed_link, phy_handle;
322 	struct mii_data *miid;
323 	int phy_addr, error;
324 	ssize_t size;
325 
326 	node = ofw_bus_get_node(sc->dev);
327 	fixed_link = ofw_bus_find_child(node, "fixed-link");
328 	if (fixed_link != 0)
329 		return (enetc_setup_fixed(sc, fixed_link));
330 
331 	size = OF_getencprop(node, "phy-handle", &phy_handle, sizeof(phy_handle));
332 	if (size <= 0) {
333 		device_printf(sc->dev,
334 		    "Failed to acquire PHY handle from FDT.\n");
335 		return (ENXIO);
336 	}
337 	phy_handle = OF_node_from_xref(phy_handle);
338 	size = OF_getencprop(phy_handle, "reg", &phy_addr, sizeof(phy_addr));
339 	if (size <= 0) {
340 		device_printf(sc->dev, "Failed to obtain PHY address\n");
341 		return (ENXIO);
342 	}
343 	error = mii_attach(sc->dev, &sc->miibus, iflib_get_ifp(sc->ctx),
344 	    enetc_media_change, enetc_media_status,
345 	    BMSR_DEFCAPMASK, phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE);
346 	if (error != 0) {
347 		device_printf(sc->dev, "mii_attach failed\n");
348 		return (error);
349 	}
350 	miid = device_get_softc(sc->miibus);
351 	sc->shared->isc_media = &miid->mii_media;
352 
353 	return (0);
354 }
355 
356 static int
357 enetc_attach_pre(if_ctx_t ctx)
358 {
359 	struct ifnet *ifp;
360 	if_softc_ctx_t scctx;
361 	struct enetc_softc *sc;
362 	int error, rid;
363 
364 	sc = iflib_get_softc(ctx);
365 	scctx = iflib_get_softc_ctx(ctx);
366 	sc->ctx = ctx;
367 	sc->dev = iflib_get_dev(ctx);
368 	sc->shared = scctx;
369 	ifp = iflib_get_ifp(ctx);
370 
371 	pci_save_state(sc->dev);
372 	pcie_flr(sc->dev, 1000, false);
373 	pci_restore_state(sc->dev);
374 
375 	rid = PCIR_BAR(ENETC_BAR_REGS);
376 	sc->regs = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
377 	if (sc->regs == NULL) {
378 		device_printf(sc->dev,
379 		    "Failed to allocate BAR %d\n", ENETC_BAR_REGS);
380 		return (ENXIO);
381 	}
382 
383 	error = iflib_dma_alloc_align(ctx,
384 	    ENETC_MIN_DESC * sizeof(struct enetc_cbd),
385 	    ENETC_RING_ALIGN,
386 	    &sc->ctrl_queue.dma,
387 	    0);
388 	if (error != 0) {
389 		device_printf(sc->dev, "Failed to allocate control ring\n");
390 		goto fail;
391 	}
392 	sc->ctrl_queue.ring = (struct enetc_cbd*)sc->ctrl_queue.dma.idi_vaddr;
393 
394 	scctx->isc_txrx = &enetc_txrx;
395 	scctx->isc_tx_nsegments = ENETC_MAX_SCATTER;
396 	enetc_max_nqueues(sc, &scctx->isc_nrxqsets_max, &scctx->isc_ntxqsets_max);
397 
398 	if (scctx->isc_ntxd[0] % ENETC_DESC_ALIGN != 0) {
399 		device_printf(sc->dev,
400 		    "The number of TX descriptors has to be a multiple of %d\n",
401 		    ENETC_DESC_ALIGN);
402 		error = EINVAL;
403 		goto fail;
404 	}
405 	if (scctx->isc_nrxd[0] % ENETC_DESC_ALIGN != 0) {
406 		device_printf(sc->dev,
407 		    "The number of RX descriptors has to be a multiple of %d\n",
408 		    ENETC_DESC_ALIGN);
409 		error = EINVAL;
410 		goto fail;
411 	}
412 	scctx->isc_txqsizes[0] = scctx->isc_ntxd[0] * sizeof(union enetc_tx_bd);
413 	scctx->isc_rxqsizes[0] = scctx->isc_nrxd[0] * sizeof(union enetc_rx_bd);
414 	scctx->isc_txd_size[0] = sizeof(union enetc_tx_bd);
415 	scctx->isc_rxd_size[0] = sizeof(union enetc_rx_bd);
416 	scctx->isc_tx_csum_flags = 0;
417 	scctx->isc_capabilities = scctx->isc_capenable = ENETC_IFCAPS;
418 
419 	error = enetc_mtu_set(ctx, ETHERMTU);
420 	if (error != 0)
421 		goto fail;
422 
423 	scctx->isc_msix_bar = pci_msix_table_bar(sc->dev);
424 
425 	error = enetc_setup_phy(sc);
426 	if (error != 0)
427 		goto fail;
428 
429 	enetc_get_hwaddr(sc);
430 
431 	return (0);
432 fail:
433 	enetc_detach(ctx);
434 	return (error);
435 }
436 
437 static int
438 enetc_attach_post(if_ctx_t ctx)
439 {
440 
441 	enetc_init_hw(iflib_get_softc(ctx));
442 	return (0);
443 }
444 
445 static int
446 enetc_detach(if_ctx_t ctx)
447 {
448 	struct enetc_softc *sc;
449 	int error = 0, i;
450 
451 	sc = iflib_get_softc(ctx);
452 
453 	for (i = 0; i < sc->rx_num_queues; i++)
454 		iflib_irq_free(ctx, &sc->rx_queues[i].irq);
455 
456 	if (sc->miibus != NULL)
457 		device_delete_child(sc->dev, sc->miibus);
458 
459 	if (sc->regs != NULL)
460 		error = bus_release_resource(sc->dev, SYS_RES_MEMORY,
461 		    rman_get_rid(sc->regs), sc->regs);
462 
463 	if (sc->ctrl_queue.dma.idi_size != 0)
464 		iflib_dma_free(&sc->ctrl_queue.dma);
465 
466 	return (error);
467 }
468 
469 static int
470 enetc_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
471     int ntxqs, int ntxqsets)
472 {
473 	struct enetc_softc *sc;
474 	struct enetc_tx_queue *queue;
475 	int i;
476 
477 	sc = iflib_get_softc(ctx);
478 
479 	MPASS(ntxqs == 1);
480 
481 	sc->tx_queues = mallocarray(sc->tx_num_queues,
482 	    sizeof(struct enetc_tx_queue), M_DEVBUF, M_NOWAIT | M_ZERO);
483 	if (sc->tx_queues == NULL) {
484 		device_printf(sc->dev,
485 		    "Failed to allocate memory for TX queues.\n");
486 		return (ENOMEM);
487 	}
488 
489 	for (i = 0; i < sc->tx_num_queues; i++) {
490 		queue = &sc->tx_queues[i];
491 		queue->sc = sc;
492 		queue->ring = (union enetc_tx_bd*)(vaddrs[i]);
493 		queue->ring_paddr = paddrs[i];
494 		queue->next_to_clean = 0;
495 		queue->ring_full = false;
496 	}
497 
498 	return (0);
499 }
500 
501 static int
502 enetc_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
503     int nrxqs, int nrxqsets)
504 {
505 	struct enetc_softc *sc;
506 	struct enetc_rx_queue *queue;
507 	int i;
508 
509 	sc = iflib_get_softc(ctx);
510 	MPASS(nrxqs == 1);
511 
512 	sc->rx_queues = mallocarray(sc->rx_num_queues,
513 	    sizeof(struct enetc_rx_queue), M_DEVBUF, M_NOWAIT | M_ZERO);
514 	if (sc->rx_queues == NULL) {
515 		device_printf(sc->dev,
516 		    "Failed to allocate memory for RX queues.\n");
517 		return (ENOMEM);
518 	}
519 
520 	for (i = 0; i < sc->rx_num_queues; i++) {
521 		queue = &sc->rx_queues[i];
522 		queue->sc = sc;
523 		queue->qid = i;
524 		queue->ring = (union enetc_rx_bd*)(vaddrs[i]);
525 		queue->ring_paddr = paddrs[i];
526 	}
527 
528 	return (0);
529 }
530 
531 static void
532 enetc_queues_free(if_ctx_t ctx)
533 {
534 	struct enetc_softc *sc;
535 
536 	sc = iflib_get_softc(ctx);
537 
538 	if (sc->tx_queues != NULL) {
539 		free(sc->tx_queues, M_DEVBUF);
540 		sc->tx_queues = NULL;
541 	}
542 	if (sc->rx_queues != NULL) {
543 		free(sc->rx_queues, M_DEVBUF);
544 		sc->rx_queues = NULL;
545 	}
546 }
547 
548 static void
549 enetc_get_hwaddr(struct enetc_softc *sc)
550 {
551 	struct ether_addr hwaddr;
552 	uint16_t high;
553 	uint32_t low;
554 
555 	low = ENETC_PORT_RD4(sc, ENETC_PSIPMAR0(0));
556 	high = ENETC_PORT_RD2(sc, ENETC_PSIPMAR1(0));
557 
558 	memcpy(&hwaddr.octet[0], &low, 4);
559 	memcpy(&hwaddr.octet[4], &high, 2);
560 
561 	if (ETHER_IS_BROADCAST(hwaddr.octet) ||
562 	    ETHER_IS_MULTICAST(hwaddr.octet) ||
563 	    ETHER_IS_ZERO(hwaddr.octet)) {
564 		ether_gen_addr(iflib_get_ifp(sc->ctx), &hwaddr);
565 		device_printf(sc->dev,
566 		    "Failed to obtain MAC address, using a random one\n");
567 		memcpy(&low, &hwaddr.octet[0], 4);
568 		memcpy(&high, &hwaddr.octet[4], 2);
569 	}
570 
571 	iflib_set_mac(sc->ctx, hwaddr.octet);
572 }
573 
574 static void
575 enetc_set_hwaddr(struct enetc_softc *sc)
576 {
577 	struct ifnet *ifp;
578 	uint16_t high;
579 	uint32_t low;
580 	uint8_t *hwaddr;
581 
582 	ifp = iflib_get_ifp(sc->ctx);
583 	hwaddr = (uint8_t*)if_getlladdr(ifp);
584 	low = *((uint32_t*)hwaddr);
585 	high = *((uint16_t*)(hwaddr+4));
586 
587 	ENETC_PORT_WR4(sc, ENETC_PSIPMAR0(0), low);
588 	ENETC_PORT_WR2(sc, ENETC_PSIPMAR1(0), high);
589 }
590 
591 static int
592 enetc_setup_rss(struct enetc_softc *sc)
593 {
594 	struct iflib_dma_info dma;
595 	int error, i, buckets_num = 0;
596 	uint8_t *rss_table;
597 	uint32_t reg;
598 
599 	reg = ENETC_RD4(sc, ENETC_SIPCAPR0);
600 	if (reg & ENETC_SIPCAPR0_RSS) {
601 		reg = ENETC_RD4(sc, ENETC_SIRSSCAPR);
602 		buckets_num = ENETC_SIRSSCAPR_GET_NUM_RSS(reg);
603         }
604 	if (buckets_num == 0)
605 		return (ENOTSUP);
606 
607 	for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / sizeof(uint32_t); i++) {
608 		arc4rand((uint8_t *)&reg, sizeof(reg), 0);
609 		ENETC_PORT_WR4(sc, ENETC_PRSSK(i), reg);
610 	}
611 
612 	ENETC_WR4(sc, ENETC_SIRBGCR, sc->rx_num_queues);
613 
614 	error = iflib_dma_alloc_align(sc->ctx,
615 	    buckets_num * sizeof(*rss_table),
616 	    ENETC_RING_ALIGN,
617 	    &dma,
618 	    0);
619 	if (error != 0) {
620 		device_printf(sc->dev, "Failed to allocate DMA buffer for RSS\n");
621 		return (error);
622 	}
623 	rss_table = (uint8_t *)dma.idi_vaddr;
624 
625 	for (i = 0; i < buckets_num; i++)
626 		rss_table[i] = i % sc->rx_num_queues;
627 
628 	error = enetc_ctrl_send(sc, (BDCR_CMD_RSS << 8) | BDCR_CMD_RSS_WRITE,
629 	    buckets_num * sizeof(*rss_table), &dma);
630 	if (error != 0)
631 		device_printf(sc->dev, "Failed to setup RSS table\n");
632 
633 	iflib_dma_free(&dma);
634 
635 	return (error);
636 }
637 
638 static int
639 enetc_ctrl_send(struct enetc_softc *sc, uint16_t cmd, uint16_t size,
640     iflib_dma_info_t dma)
641 {
642 	struct enetc_ctrl_queue *queue;
643 	struct enetc_cbd *desc;
644 	int timeout = 1000;
645 
646 	queue = &sc->ctrl_queue;
647 	desc = &queue->ring[queue->pidx];
648 
649 	if (++queue->pidx == ENETC_MIN_DESC)
650 		queue->pidx = 0;
651 
652 	desc->addr[0] = (uint32_t)dma->idi_paddr;
653 	desc->addr[1] = (uint32_t)(dma->idi_paddr >> 32);
654 	desc->index = 0;
655 	desc->length = (uint16_t)size;
656 	desc->cmd = (uint8_t)cmd;
657 	desc->cls = (uint8_t)(cmd >> 8);
658 	desc->status_flags = 0;
659 
660 	/* Sync command packet, */
661 	bus_dmamap_sync(dma->idi_tag, dma->idi_map, BUS_DMASYNC_PREWRITE);
662 	/* and the control ring. */
663 	bus_dmamap_sync(queue->dma.idi_tag, queue->dma.idi_map, BUS_DMASYNC_PREWRITE);
664 	ENETC_WR4(sc, ENETC_SICBDRPIR, queue->pidx);
665 
666 	while (--timeout != 0) {
667 		DELAY(20);
668 		if (ENETC_RD4(sc, ENETC_SICBDRCIR) == queue->pidx)
669 			break;
670 	}
671 
672 	if (timeout == 0)
673 		return (ETIMEDOUT);
674 
675 	bus_dmamap_sync(dma->idi_tag, dma->idi_map, BUS_DMASYNC_POSTREAD);
676 	return (0);
677 }
678 
679 static void
680 enetc_init_hw(struct enetc_softc *sc)
681 {
682 	uint32_t val;
683 	int error;
684 
685 	ENETC_PORT_WR4(sc, ENETC_PM0_CMD_CFG,
686 	    ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC |
687 	    ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
688 	ENETC_PORT_WR4(sc, ENETC_PM0_RX_FIFO, ENETC_PM0_RX_FIFO_VAL);
689 	val = ENETC_PSICFGR0_SET_TXBDR(sc->tx_num_queues);
690 	val |= ENETC_PSICFGR0_SET_RXBDR(sc->rx_num_queues);
691 	val |= ENETC_PSICFGR0_SIVC(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S);
692 	ENETC_PORT_WR4(sc, ENETC_PSICFGR0(0), val);
693 	ENETC_PORT_WR4(sc, ENETC_PSIPVMR, ENETC_PSIPVMR_SET_VUTA(1));
694 	ENETC_PORT_WR4(sc, ENETC_PVCLCTR,  ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S);
695 	ENETC_PORT_WR4(sc, ENETC_PSIVLANFMR, ENETC_PSIVLANFMR_VS);
696 	ENETC_PORT_WR4(sc, ENETC_PAR_PORT_CFG, ENETC_PAR_PORT_L4CD);
697 	ENETC_PORT_WR4(sc, ENETC_PMR, ENETC_PMR_SI0EN | ENETC_PMR_PSPEED_1000M);
698 
699 	ENETC_WR4(sc, ENETC_SICAR0,
700 	    ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
701 	ENETC_WR4(sc, ENETC_SICAR1, ENETC_SICAR_MSI);
702 	ENETC_WR4(sc, ENETC_SICAR2,
703 	    ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
704 
705 	enetc_init_ctrl(sc);
706 	error = enetc_setup_rss(sc);
707 	if (error != 0)
708 		ENETC_WR4(sc, ENETC_SIMR, ENETC_SIMR_EN);
709 	else
710 		ENETC_WR4(sc, ENETC_SIMR, ENETC_SIMR_EN | ENETC_SIMR_RSSE);
711 
712 }
713 
714 static void
715 enetc_init_ctrl(struct enetc_softc *sc)
716 {
717 	struct enetc_ctrl_queue *queue = &sc->ctrl_queue;
718 
719 	ENETC_WR4(sc, ENETC_SICBDRBAR0,
720 	    (uint32_t)queue->dma.idi_paddr);
721 	ENETC_WR4(sc, ENETC_SICBDRBAR1,
722 	    (uint32_t)(queue->dma.idi_paddr >> 32));
723 	ENETC_WR4(sc, ENETC_SICBDRLENR,
724 	    queue->dma.idi_size / sizeof(struct enetc_cbd));
725 
726 	queue->pidx = 0;
727 	ENETC_WR4(sc, ENETC_SICBDRPIR, queue->pidx);
728 	ENETC_WR4(sc, ENETC_SICBDRCIR, queue->pidx);
729 	ENETC_WR4(sc, ENETC_SICBDRMR, ENETC_SICBDRMR_EN);
730 }
731 
732 static void
733 enetc_init_tx(struct enetc_softc *sc)
734 {
735 	struct enetc_tx_queue *queue;
736 	int i;
737 
738 	for (i = 0; i < sc->tx_num_queues; i++) {
739 		queue = &sc->tx_queues[i];
740 
741 		ENETC_TXQ_WR4(sc, i, ENETC_TBBAR0,
742 		    (uint32_t)queue->ring_paddr);
743 		ENETC_TXQ_WR4(sc, i, ENETC_TBBAR1,
744 		    (uint32_t)(queue->ring_paddr >> 32));
745 		ENETC_TXQ_WR4(sc, i, ENETC_TBLENR, sc->tx_queue_size);
746 
747 		/*
748 		 * Even though it is undoccumented resetting the TX ring
749 		 * indices results in TX hang.
750 		 * Do the same as Linux and simply keep those unchanged
751 		 * for the drivers lifetime.
752 		 */
753 #if 0
754 		ENETC_TXQ_WR4(sc, i, ENETC_TBPIR, 0);
755 		ENETC_TXQ_WR4(sc, i, ENETC_TBCIR, 0);
756 #endif
757 		ENETC_TXQ_WR4(sc, i, ENETC_TBMR, ENETC_TBMR_EN);
758 	}
759 
760 }
761 
762 static void
763 enetc_init_rx(struct enetc_softc *sc)
764 {
765 	struct enetc_rx_queue *queue;
766 	uint32_t rx_buf_size;
767 	int i;
768 
769 	rx_buf_size = iflib_get_rx_mbuf_sz(sc->ctx);
770 
771 	for (i = 0; i < sc->rx_num_queues; i++) {
772 		queue = &sc->rx_queues[i];
773 
774 		ENETC_RXQ_WR4(sc, i, ENETC_RBBAR0,
775 		    (uint32_t)queue->ring_paddr);
776 		ENETC_RXQ_WR4(sc, i, ENETC_RBBAR1,
777 		    (uint32_t)(queue->ring_paddr >> 32));
778 		ENETC_RXQ_WR4(sc, i, ENETC_RBLENR, sc->rx_queue_size);
779 		ENETC_RXQ_WR4(sc, i, ENETC_RBBSR, rx_buf_size);
780 		ENETC_RXQ_WR4(sc, i, ENETC_RBPIR, 0);
781 		ENETC_RXQ_WR4(sc, i, ENETC_RBCIR, 0);
782 		queue->enabled = false;
783 	}
784 }
785 
786 static u_int
787 enetc_hash_mac(void *arg, struct sockaddr_dl *sdl, u_int cnt)
788 {
789 	uint64_t *bitmap = arg;
790 	uint64_t address = 0;
791 	uint8_t hash = 0;
792 	bool bit;
793 	int i, j;
794 
795 	bcopy(LLADDR(sdl), &address, ETHER_ADDR_LEN);
796 
797 	/*
798 	 * The six bit hash is calculated by xoring every
799 	 * 6th bit of the address.
800 	 * It is then used as an index in a bitmap that is
801 	 * written to the device.
802 	 */
803 	for (i = 0; i < 6; i++) {
804 		bit = 0;
805 		for (j = 0; j < 8; j++)
806 			bit ^= address & BIT(i + j*6);
807 
808 		hash |= bit << i;
809 	}
810 
811 	*bitmap |= (1 << hash);
812 	return (1);
813 }
814 
815 static void
816 enetc_setup_multicast(if_ctx_t ctx)
817 {
818 	struct enetc_softc *sc;
819 	struct ifnet *ifp;
820 	uint64_t bitmap = 0;
821 	uint8_t revid;
822 
823 	sc = iflib_get_softc(ctx);
824 	ifp = iflib_get_ifp(ctx);
825 	revid = pci_get_revid(sc->dev);
826 
827 	if_foreach_llmaddr(ifp, enetc_hash_mac, &bitmap);
828 
829 	/*
830 	 * In revid 1 of this chip the positions multicast and unicast
831 	 * hash filter registers are flipped.
832 	 */
833 	ENETC_PORT_WR4(sc, ENETC_PSIMMHFR0(0, revid == 1), bitmap & UINT32_MAX);
834 	ENETC_PORT_WR4(sc, ENETC_PSIMMHFR1(0), bitmap >> 32);
835 
836 }
837 
838 static uint8_t
839 enetc_hash_vid(uint16_t vid)
840 {
841 	uint8_t hash = 0;
842 	bool bit;
843 	int i;
844 
845 	for (i = 0;i < 6;i++) {
846 		bit = vid & BIT(i);
847 		bit ^= vid & BIT(i + 6);
848 		hash |= bit << i;
849 	}
850 
851 	return (hash);
852 }
853 
854 static void
855 enetc_vlan_register(if_ctx_t ctx, uint16_t vid)
856 {
857 	struct enetc_softc *sc;
858 	uint8_t hash;
859 	uint64_t bitmap;
860 
861 	sc = iflib_get_softc(ctx);
862 	hash = enetc_hash_vid(vid);
863 
864 	/* Check if hash is alredy present in the bitmap. */
865 	if (++sc->vlan_bitmap[hash] != 1)
866 		return;
867 
868 	bitmap = ENETC_PORT_RD4(sc, ENETC_PSIVHFR0(0));
869 	bitmap |= (uint64_t)ENETC_PORT_RD4(sc, ENETC_PSIVHFR1(0)) << 32;
870 	bitmap |= BIT(hash);
871 	ENETC_PORT_WR4(sc, ENETC_PSIVHFR0(0), bitmap & UINT32_MAX);
872 	ENETC_PORT_WR4(sc, ENETC_PSIVHFR1(0), bitmap >> 32);
873 }
874 
875 static void
876 enetc_vlan_unregister(if_ctx_t ctx, uint16_t vid)
877 {
878 	struct enetc_softc *sc;
879 	uint8_t hash;
880 	uint64_t bitmap;
881 
882 	sc = iflib_get_softc(ctx);
883 	hash = enetc_hash_vid(vid);
884 
885 	MPASS(sc->vlan_bitmap[hash] > 0);
886 	if (--sc->vlan_bitmap[hash] != 0)
887 		return;
888 
889 	bitmap = ENETC_PORT_RD4(sc, ENETC_PSIVHFR0(0));
890 	bitmap |= (uint64_t)ENETC_PORT_RD4(sc, ENETC_PSIVHFR1(0)) << 32;
891 	bitmap &= ~BIT(hash);
892 	ENETC_PORT_WR4(sc, ENETC_PSIVHFR0(0), bitmap & UINT32_MAX);
893 	ENETC_PORT_WR4(sc, ENETC_PSIVHFR1(0), bitmap >> 32);
894 }
895 
896 static void
897 enetc_init(if_ctx_t ctx)
898 {
899 	struct enetc_softc *sc;
900 	struct mii_data *miid;
901 	struct ifnet *ifp;
902 	uint16_t max_frame_length;
903 	int baudrate;
904 
905 	sc = iflib_get_softc(ctx);
906 	ifp = iflib_get_ifp(ctx);
907 
908 	max_frame_length = sc->shared->isc_max_frame_size;
909 	MPASS(max_frame_length < ENETC_MAX_FRAME_LEN);
910 
911 	/* Set max RX and TX frame lengths. */
912 	ENETC_PORT_WR4(sc, ENETC_PM0_MAXFRM, max_frame_length);
913 	ENETC_PORT_WR4(sc, ENETC_PTCMSDUR(0), max_frame_length);
914 	ENETC_PORT_WR4(sc, ENETC_PTXMBAR, 2 * max_frame_length);
915 
916 	/* Set "VLAN promiscious" mode if filtering is disabled. */
917 	if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0)
918 		ENETC_PORT_WR4(sc, ENETC_PSIPVMR,
919 		    ENETC_PSIPVMR_SET_VUTA(1) | ENETC_PSIPVMR_SET_VP(1));
920 	else
921 		ENETC_PORT_WR4(sc, ENETC_PSIPVMR,
922 		    ENETC_PSIPVMR_SET_VUTA(1));
923 
924 	sc->rbmr = ENETC_RBMR_EN | ENETC_RBMR_AL;
925 
926 	if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING)
927 		sc->rbmr |= ENETC_RBMR_VTE;
928 
929 	/* Write MAC address to hardware. */
930 	enetc_set_hwaddr(sc);
931 
932 	enetc_init_tx(sc);
933 	enetc_init_rx(sc);
934 
935 	if (sc->fixed_link) {
936 		baudrate = ifmedia_baudrate(sc->fixed_ifmedia.ifm_cur->ifm_media);
937 		iflib_link_state_change(sc->ctx, LINK_STATE_UP, baudrate);
938 	} else {
939 		/*
940 		 * Can't return an error from this function, there is not much
941 		 * we can do if this fails.
942 		 */
943 		miid = device_get_softc(sc->miibus);
944 		(void)mii_mediachg(miid);
945 	}
946 
947 	enetc_promisc_set(ctx, if_getflags(ifp));
948 }
949 
950 static void
951 enetc_stop(if_ctx_t ctx)
952 {
953 	struct enetc_softc *sc;
954 	int i;
955 
956 	sc = iflib_get_softc(ctx);
957 
958 	for (i = 0; i < sc->tx_num_queues; i++)
959 		ENETC_TXQ_WR4(sc, i, ENETC_TBMR, 0);
960 
961 	for (i = 0; i < sc->rx_num_queues; i++)
962 		ENETC_RXQ_WR4(sc, i, ENETC_RBMR, 0);
963 }
964 
965 static int
966 enetc_msix_intr_assign(if_ctx_t ctx, int msix)
967 {
968 	struct enetc_softc *sc;
969 	struct enetc_rx_queue *rx_queue;
970 	struct enetc_tx_queue *tx_queue;
971 	int vector = 0, i, error;
972 	char irq_name[16];
973 
974 	sc = iflib_get_softc(ctx);
975 
976 	MPASS(sc->rx_num_queues + 1 <= ENETC_MSIX_COUNT);
977 	MPASS(sc->rx_num_queues == sc->tx_num_queues);
978 
979 	for (i = 0; i < sc->rx_num_queues; i++, vector++) {
980 		rx_queue = &sc->rx_queues[i];
981 		snprintf(irq_name, sizeof(irq_name), "rxtxq%d", i);
982 		error = iflib_irq_alloc_generic(ctx,
983 		    &rx_queue->irq, vector + 1, IFLIB_INTR_RXTX,
984 		    NULL, rx_queue, i, irq_name);
985 		if (error != 0)
986 			goto fail;
987 
988 		ENETC_WR4(sc, ENETC_SIMSIRRV(i), vector);
989 		ENETC_RXQ_WR4(sc, i, ENETC_RBICR1, ENETC_RX_INTR_TIME_THR);
990 		ENETC_RXQ_WR4(sc, i, ENETC_RBICR0,
991 		    ENETC_RBICR0_ICEN | ENETC_RBICR0_SET_ICPT(ENETC_RX_INTR_PKT_THR));
992 	}
993 	vector = 0;
994 	for (i = 0;i < sc->tx_num_queues; i++, vector++) {
995 		tx_queue = &sc->tx_queues[i];
996 		snprintf(irq_name, sizeof(irq_name), "txq%d", i);
997 		iflib_softirq_alloc_generic(ctx, &tx_queue->irq,
998 		    IFLIB_INTR_TX, tx_queue, i, irq_name);
999 
1000 		ENETC_WR4(sc, ENETC_SIMSITRV(i), vector);
1001 	}
1002 
1003 	return (0);
1004 fail:
1005 	for (i = 0; i < sc->rx_num_queues; i++) {
1006 		rx_queue = &sc->rx_queues[i];
1007 		iflib_irq_free(ctx, &rx_queue->irq);
1008 	}
1009 	return (error);
1010 }
1011 
1012 static int
1013 enetc_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
1014 {
1015 	struct enetc_softc *sc;
1016 
1017 	sc = iflib_get_softc(ctx);
1018 	ENETC_TXQ_RD4(sc, qid, ENETC_TBIDR);
1019 	return (0);
1020 }
1021 
1022 static int
1023 enetc_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
1024 {
1025 	struct enetc_softc *sc;
1026 
1027 	sc = iflib_get_softc(ctx);
1028 	ENETC_RXQ_RD4(sc, qid, ENETC_RBIDR);
1029 	return (0);
1030 }
1031 static void
1032 enetc_intr_enable(if_ctx_t ctx)
1033 {
1034 	struct enetc_softc *sc;
1035 	int i;
1036 
1037 	sc = iflib_get_softc(ctx);
1038 
1039 	for (i = 0; i < sc->rx_num_queues; i++)
1040 		ENETC_RXQ_WR4(sc, i, ENETC_RBIER, ENETC_RBIER_RXTIE);
1041 
1042 	for (i = 0; i < sc->tx_num_queues; i++)
1043 		ENETC_TXQ_WR4(sc, i, ENETC_TBIER, ENETC_TBIER_TXF);
1044 }
1045 
1046 static void
1047 enetc_intr_disable(if_ctx_t ctx)
1048 {
1049 	struct enetc_softc *sc;
1050 	int i;
1051 
1052 	sc = iflib_get_softc(ctx);
1053 
1054 	for (i = 0; i < sc->rx_num_queues; i++)
1055 		ENETC_RXQ_WR4(sc, i, ENETC_RBIER, 0);
1056 
1057 	for (i = 0; i < sc->tx_num_queues; i++)
1058 		ENETC_TXQ_WR4(sc, i, ENETC_TBIER, 0);
1059 }
1060 
1061 static int
1062 enetc_isc_txd_encap(void *data, if_pkt_info_t ipi)
1063 {
1064 	struct enetc_softc *sc = data;
1065 	struct enetc_tx_queue *queue;
1066 	union enetc_tx_bd *desc;
1067 	bus_dma_segment_t *segs;
1068 	qidx_t pidx, queue_len;
1069 	qidx_t i = 0;
1070 
1071 	queue = &sc->tx_queues[ipi->ipi_qsidx];
1072 	segs = ipi->ipi_segs;
1073 	pidx = ipi->ipi_pidx;
1074 	queue_len = sc->tx_queue_size;
1075 
1076 	/*
1077 	 * First descriptor is special. We use it to set frame
1078 	 * related information and offloads, e.g. VLAN tag.
1079 	 */
1080 	desc = &queue->ring[pidx];
1081 	bzero(desc, sizeof(*desc));
1082 	desc->frm_len = ipi->ipi_len;
1083 	desc->addr = segs[i].ds_addr;
1084 	desc->buf_len = segs[i].ds_len;
1085 	if (ipi->ipi_flags & IPI_TX_INTR)
1086 		desc->flags = ENETC_TXBD_FLAGS_FI;
1087 
1088 	i++;
1089 	if (++pidx == queue_len)
1090 		pidx = 0;
1091 
1092 	if (ipi->ipi_mflags & M_VLANTAG) {
1093 		/* VLAN tag is inserted in a separate descriptor. */
1094 		desc->flags |= ENETC_TXBD_FLAGS_EX;
1095 		desc = &queue->ring[pidx];
1096 		bzero(desc, sizeof(*desc));
1097 		desc->ext.vid = ipi->ipi_vtag;
1098 		desc->ext.e_flags = ENETC_TXBD_E_FLAGS_VLAN_INS;
1099 		if (++pidx == queue_len)
1100 			pidx = 0;
1101 	}
1102 
1103 	/* Now add remaining descriptors. */
1104 	for (;i < ipi->ipi_nsegs; i++) {
1105 		desc = &queue->ring[pidx];
1106 		bzero(desc, sizeof(*desc));
1107 		desc->addr = segs[i].ds_addr;
1108 		desc->buf_len = segs[i].ds_len;
1109 
1110 		if (++pidx == queue_len)
1111 			pidx = 0;
1112 	}
1113 
1114 	desc->flags |= ENETC_TXBD_FLAGS_F;
1115 	ipi->ipi_new_pidx = pidx;
1116 	if (pidx == queue->next_to_clean)
1117 		queue->ring_full = true;
1118 
1119 	return (0);
1120 }
1121 
1122 static void
1123 enetc_isc_txd_flush(void *data, uint16_t qid, qidx_t pidx)
1124 {
1125 	struct enetc_softc *sc = data;
1126 
1127 	ENETC_TXQ_WR4(sc, qid, ENETC_TBPIR, pidx);
1128 }
1129 
1130 static int
1131 enetc_isc_txd_credits_update(void *data, uint16_t qid, bool clear)
1132 {
1133 	struct enetc_softc *sc = data;
1134 	struct enetc_tx_queue *queue;
1135 	qidx_t next_to_clean, next_to_process;
1136 	int clean_count;
1137 
1138 	queue = &sc->tx_queues[qid];
1139 	next_to_process =
1140 	    ENETC_TXQ_RD4(sc, qid, ENETC_TBCIR) & ENETC_TBCIR_IDX_MASK;
1141 	next_to_clean = queue->next_to_clean;
1142 
1143 	if (next_to_clean == next_to_process && !queue->ring_full)
1144 		return (0);
1145 
1146 	if (!clear)
1147 		return (1);
1148 
1149 	clean_count = next_to_process - next_to_clean;
1150 	if (clean_count <= 0)
1151 		clean_count += sc->tx_queue_size;
1152 
1153 	queue->next_to_clean = next_to_process;
1154 	queue->ring_full = false;
1155 
1156 	return (clean_count);
1157 }
1158 
1159 static int
1160 enetc_isc_rxd_available(void *data, uint16_t qid, qidx_t pidx, qidx_t budget)
1161 {
1162 	struct enetc_softc *sc = data;
1163 	struct enetc_rx_queue *queue;
1164 	qidx_t hw_pidx, queue_len;
1165 	union enetc_rx_bd *desc;
1166 	int count = 0;
1167 
1168 	queue = &sc->rx_queues[qid];
1169 	desc = &queue->ring[pidx];
1170 	queue_len = sc->rx_queue_size;
1171 
1172 	if (desc->r.lstatus == 0)
1173 		return (0);
1174 
1175 	if (budget == 1)
1176 		return (1);
1177 
1178 	hw_pidx = ENETC_RXQ_RD4(sc, qid, ENETC_RBPIR);
1179 	while (pidx != hw_pidx && count < budget) {
1180 		desc = &queue->ring[pidx];
1181 		if (desc->r.lstatus & ENETC_RXBD_LSTATUS_F)
1182 			count++;
1183 
1184 		if (++pidx == queue_len)
1185 			pidx = 0;
1186 	}
1187 
1188 	return (count);
1189 }
1190 
1191 static int
1192 enetc_isc_rxd_pkt_get(void *data, if_rxd_info_t ri)
1193 {
1194 	struct enetc_softc *sc = data;
1195 	struct enetc_rx_queue *queue;
1196 	union enetc_rx_bd *desc;
1197 	uint16_t buf_len, pkt_size = 0;
1198 	qidx_t cidx, queue_len;
1199 	uint32_t status;
1200 	int i;
1201 
1202 	cidx = ri->iri_cidx;
1203 	queue = &sc->rx_queues[ri->iri_qsidx];
1204 	desc = &queue->ring[cidx];
1205 	status = desc->r.lstatus;
1206 	queue_len = sc->rx_queue_size;
1207 
1208 	/*
1209 	 * Ready bit will be set only when all descriptors
1210 	 * in the chain have been processed.
1211 	 */
1212 	if ((status & ENETC_RXBD_LSTATUS_R) == 0)
1213 		return (EAGAIN);
1214 
1215 	/* Pass RSS hash. */
1216 	if (status & ENETC_RXBD_FLAG_RSSV) {
1217 		ri->iri_flowid = desc->r.rss_hash;
1218 		ri->iri_rsstype = M_HASHTYPE_OPAQUE_HASH;
1219 	}
1220 
1221 	/* Pass IP checksum status. */
1222 	ri->iri_csum_flags = CSUM_IP_CHECKED;
1223 	if ((desc->r.parse_summary & ENETC_RXBD_PARSER_ERROR) == 0)
1224 		ri->iri_csum_flags |= CSUM_IP_VALID;
1225 
1226 	/* Pass extracted VLAN tag. */
1227 	if (status & ENETC_RXBD_FLAG_VLAN) {
1228 		ri->iri_vtag = desc->r.vlan_opt;
1229 		ri->iri_flags = M_VLANTAG;
1230 	}
1231 
1232 	for (i = 0; i < ENETC_MAX_SCATTER; i++) {
1233 		buf_len = desc->r.buf_len;
1234 		ri->iri_frags[i].irf_idx = cidx;
1235 		ri->iri_frags[i].irf_len = buf_len;
1236 		pkt_size += buf_len;
1237 		if (desc->r.lstatus & ENETC_RXBD_LSTATUS_F)
1238 			break;
1239 
1240 		if (++cidx == queue_len)
1241 			cidx = 0;
1242 
1243 		desc = &queue->ring[cidx];
1244 	}
1245 	ri->iri_nfrags = i + 1;
1246 	ri->iri_len = pkt_size + ENETC_RX_IP_ALIGN;
1247 	ri->iri_pad = ENETC_RX_IP_ALIGN;
1248 
1249 	MPASS(desc->r.lstatus & ENETC_RXBD_LSTATUS_F);
1250 	if (status & ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))
1251 		return (EBADMSG);
1252 
1253 	return (0);
1254 }
1255 
1256 static void
1257 enetc_isc_rxd_refill(void *data, if_rxd_update_t iru)
1258 {
1259 	struct enetc_softc *sc = data;
1260 	struct enetc_rx_queue *queue;
1261 	union enetc_rx_bd *desc;
1262 	qidx_t pidx, queue_len;
1263 	uint64_t *paddrs;
1264 	int i, count;
1265 
1266 	queue = &sc->rx_queues[iru->iru_qsidx];
1267 	paddrs = iru->iru_paddrs;
1268 	pidx = iru->iru_pidx;
1269 	count = iru->iru_count;
1270 	queue_len = sc->rx_queue_size;
1271 
1272 	for (i = 0; i < count; i++) {
1273 		desc = &queue->ring[pidx];
1274 		bzero(desc, sizeof(*desc));
1275 
1276 		desc->w.addr = paddrs[i];
1277 		if (++pidx == queue_len)
1278 			pidx = 0;
1279 	}
1280 	/*
1281 	 * After enabling the queue NIC will prefetch the first
1282 	 * 8 descriptors. It probably assumes that the RX is fully
1283 	 * refilled when cidx == pidx.
1284 	 * Enable it only if we have enough decriptors ready on the ring.
1285 	 */
1286 	if (!queue->enabled && pidx >= 8) {
1287 		ENETC_RXQ_WR4(sc, iru->iru_qsidx, ENETC_RBMR, sc->rbmr);
1288 		queue->enabled = true;
1289 	}
1290 }
1291 
1292 static void
1293 enetc_isc_rxd_flush(void *data, uint16_t qid, uint8_t flid, qidx_t pidx)
1294 {
1295 	struct enetc_softc *sc = data;
1296 
1297 	ENETC_RXQ_WR4(sc, qid, ENETC_RBCIR, pidx);
1298 }
1299 
1300 static uint64_t
1301 enetc_get_counter(if_ctx_t ctx, ift_counter cnt)
1302 {
1303 	struct enetc_softc *sc;
1304 	struct ifnet *ifp;
1305 
1306 	sc = iflib_get_softc(ctx);
1307 	ifp = iflib_get_ifp(ctx);
1308 
1309 	switch (cnt) {
1310 	case IFCOUNTER_IERRORS:
1311 		return (ENETC_PORT_RD8(sc, ENETC_PM0_RERR));
1312 	case IFCOUNTER_OERRORS:
1313 		return (ENETC_PORT_RD8(sc, ENETC_PM0_TERR));
1314 	default:
1315 		return (if_get_counter_default(ifp, cnt));
1316 	}
1317 }
1318 
1319 static int
1320 enetc_mtu_set(if_ctx_t ctx, uint32_t mtu)
1321 {
1322 	struct enetc_softc *sc = iflib_get_softc(ctx);
1323 	uint32_t max_frame_size;
1324 
1325 	max_frame_size = mtu +
1326 	    ETHER_HDR_LEN +
1327 	    ETHER_CRC_LEN +
1328 	    sizeof(struct ether_vlan_header);
1329 
1330 	if (max_frame_size > ENETC_MAX_FRAME_LEN)
1331 		return (EINVAL);
1332 
1333 	sc->shared->isc_max_frame_size = max_frame_size;
1334 
1335 	return (0);
1336 }
1337 
1338 static int
1339 enetc_promisc_set(if_ctx_t ctx, int flags)
1340 {
1341 	struct enetc_softc *sc;
1342 	uint32_t reg = 0;
1343 
1344 	sc = iflib_get_softc(ctx);
1345 
1346 	if (flags & IFF_PROMISC)
1347 		reg = ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0);
1348 	else if (flags & IFF_ALLMULTI)
1349 		reg = ENETC_PSIPMR_SET_MP(0);
1350 
1351 	ENETC_PORT_WR4(sc, ENETC_PSIPMR, reg);
1352 
1353 	return (0);
1354 }
1355 
1356 static void
1357 enetc_timer(if_ctx_t ctx, uint16_t qid)
1358 {
1359 	/*
1360 	 * Poll PHY status. Do this only for qid 0 to save
1361 	 * some cycles.
1362 	 */
1363 	if (qid == 0)
1364 		iflib_admin_intr_deferred(ctx);
1365 }
1366 
1367 static void
1368 enetc_update_admin_status(if_ctx_t ctx)
1369 {
1370 	struct enetc_softc *sc;
1371 	struct mii_data *miid;
1372 
1373 	sc = iflib_get_softc(ctx);
1374 
1375 	if (!sc->fixed_link) {
1376 		miid = device_get_softc(sc->miibus);
1377 		mii_tick(miid);
1378 	}
1379 }
1380 
1381 static int
1382 enetc_miibus_readreg(device_t dev, int phy, int reg)
1383 {
1384 	struct enetc_softc *sc;
1385 
1386 	sc = iflib_get_softc(device_get_softc(dev));
1387 	return (enetc_mdio_read(sc->regs, ENETC_PORT_BASE + ENETC_EMDIO_BASE,
1388 	    phy, reg));
1389 }
1390 
1391 static int
1392 enetc_miibus_writereg(device_t dev, int phy, int reg, int data)
1393 {
1394 	struct enetc_softc *sc;
1395 
1396 	sc = iflib_get_softc(device_get_softc(dev));
1397 	return (enetc_mdio_write(sc->regs, ENETC_PORT_BASE + ENETC_EMDIO_BASE,
1398 	    phy, reg, data));
1399 }
1400 
1401 static void
1402 enetc_miibus_linkchg(device_t dev)
1403 {
1404 
1405 	enetc_miibus_statchg(dev);
1406 }
1407 
1408 static void
1409 enetc_miibus_statchg(device_t dev)
1410 {
1411 	struct enetc_softc *sc;
1412 	struct mii_data *miid;
1413 	int link_state, baudrate;
1414 
1415 	sc = iflib_get_softc(device_get_softc(dev));
1416 	miid = device_get_softc(sc->miibus);
1417 
1418 	baudrate = ifmedia_baudrate(miid->mii_media_active);
1419 	if (miid->mii_media_status & IFM_AVALID) {
1420 		if (miid->mii_media_status & IFM_ACTIVE)
1421 			link_state = LINK_STATE_UP;
1422 		else
1423 			link_state = LINK_STATE_DOWN;
1424 	} else {
1425 		link_state = LINK_STATE_UNKNOWN;
1426 	}
1427 
1428 	iflib_link_state_change(sc->ctx, link_state, baudrate);
1429 
1430 }
1431 
1432 static int
1433 enetc_media_change(if_t ifp)
1434 {
1435 	struct enetc_softc *sc;
1436 	struct mii_data *miid;
1437 
1438 	sc = iflib_get_softc(ifp->if_softc);
1439 	miid = device_get_softc(sc->miibus);
1440 
1441 	mii_mediachg(miid);
1442 	return (0);
1443 }
1444 
1445 static void
1446 enetc_media_status(if_t ifp, struct ifmediareq* ifmr)
1447 {
1448 	struct enetc_softc *sc;
1449 	struct mii_data *miid;
1450 
1451 	sc = iflib_get_softc(ifp->if_softc);
1452 	miid = device_get_softc(sc->miibus);
1453 
1454 	mii_pollstat(miid);
1455 
1456 	ifmr->ifm_active = miid->mii_media_active;
1457 	ifmr->ifm_status = miid->mii_media_status;
1458 }
1459 
1460 static int
1461 enetc_fixed_media_change(if_t ifp)
1462 {
1463 
1464 	if_printf(ifp, "Can't change media in fixed-link mode.\n");
1465 	return (0);
1466 }
1467 static void
1468 enetc_fixed_media_status(if_t ifp, struct ifmediareq* ifmr)
1469 {
1470 	struct enetc_softc *sc;
1471 
1472 	sc = iflib_get_softc(ifp->if_softc);
1473 
1474 	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
1475 	ifmr->ifm_active = sc->fixed_ifmedia.ifm_cur->ifm_media;
1476 	return;
1477 }
1478