xref: /freebsd/sys/dev/enetc/if_enetc.c (revision 5def4c47d4bd90b209b9b4a4ba9faec15846d8fd)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2021 Alstom Group.
5  * Copyright (c) 2021 Semihalf.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <sys/param.h>
32 #include <sys/bus.h>
33 #include <sys/endian.h>
34 #include <sys/kernel.h>
35 #include <sys/module.h>
36 #include <sys/rman.h>
37 #include <sys/socket.h>
38 #include <sys/sockio.h>
39 
40 #include <machine/bus.h>
41 #include <machine/resource.h>
42 
43 #include <net/ethernet.h>
44 #include <net/if.h>
45 #include <net/if_dl.h>
46 #include <net/if_var.h>
47 #include <net/if_types.h>
48 #include <net/if_media.h>
49 #include <net/iflib.h>
50 
51 #include <dev/enetc/enetc_hw.h>
52 #include <dev/enetc/enetc.h>
53 #include <dev/enetc/enetc_mdio.h>
54 #include <dev/mii/mii.h>
55 #include <dev/mii/miivar.h>
56 #include <dev/pci/pcireg.h>
57 #include <dev/pci/pcivar.h>
58 
59 #include <dev/ofw/ofw_bus.h>
60 #include <dev/ofw/ofw_bus_subr.h>
61 
62 #include "ifdi_if.h"
63 #include "miibus_if.h"
64 
65 static device_register_t		enetc_register;
66 
67 static ifdi_attach_pre_t		enetc_attach_pre;
68 static ifdi_attach_post_t		enetc_attach_post;
69 static ifdi_detach_t			enetc_detach;
70 
71 static ifdi_tx_queues_alloc_t		enetc_tx_queues_alloc;
72 static ifdi_rx_queues_alloc_t		enetc_rx_queues_alloc;
73 static ifdi_queues_free_t		enetc_queues_free;
74 
75 static ifdi_init_t			enetc_init;
76 static ifdi_stop_t			enetc_stop;
77 
78 static ifdi_msix_intr_assign_t		enetc_msix_intr_assign;
79 static ifdi_tx_queue_intr_enable_t	enetc_tx_queue_intr_enable;
80 static ifdi_rx_queue_intr_enable_t	enetc_rx_queue_intr_enable;
81 static ifdi_intr_enable_t		enetc_intr_enable;
82 static ifdi_intr_disable_t		enetc_intr_disable;
83 
84 static int	enetc_isc_txd_encap(void*, if_pkt_info_t);
85 static void	enetc_isc_txd_flush(void*, uint16_t, qidx_t);
86 static int	enetc_isc_txd_credits_update(void*, uint16_t, bool);
87 static int	enetc_isc_rxd_available(void*, uint16_t, qidx_t, qidx_t);
88 static int	enetc_isc_rxd_pkt_get(void*, if_rxd_info_t);
89 static void	enetc_isc_rxd_refill(void*, if_rxd_update_t);
90 static void	enetc_isc_rxd_flush(void*, uint16_t, uint8_t, qidx_t);
91 
92 static void	enetc_vlan_register(if_ctx_t, uint16_t);
93 static void	enetc_vlan_unregister(if_ctx_t, uint16_t);
94 
95 static uint64_t	enetc_get_counter(if_ctx_t, ift_counter);
96 static int	enetc_promisc_set(if_ctx_t, int);
97 static int	enetc_mtu_set(if_ctx_t, uint32_t);
98 static void	enetc_setup_multicast(if_ctx_t);
99 static void	enetc_timer(if_ctx_t, uint16_t);
100 static void	enetc_update_admin_status(if_ctx_t);
101 
102 static miibus_readreg_t		enetc_miibus_readreg;
103 static miibus_writereg_t	enetc_miibus_writereg;
104 static miibus_linkchg_t		enetc_miibus_linkchg;
105 static miibus_statchg_t		enetc_miibus_statchg;
106 
107 static int			enetc_media_change(if_t);
108 static void			enetc_media_status(if_t, struct ifmediareq*);
109 
110 static int			enetc_fixed_media_change(if_t);
111 static void			enetc_fixed_media_status(if_t, struct ifmediareq*);
112 
113 static void			enetc_max_nqueues(struct enetc_softc*, int*, int*);
114 static int			enetc_setup_phy(struct enetc_softc*);
115 
116 static void			enetc_get_hwaddr(struct enetc_softc*);
117 static void			enetc_set_hwaddr(struct enetc_softc*);
118 static int			enetc_setup_rss(struct enetc_softc*);
119 
120 static void			enetc_init_hw(struct enetc_softc*);
121 static void			enetc_init_ctrl(struct enetc_softc*);
122 static void			enetc_init_tx(struct enetc_softc*);
123 static void			enetc_init_rx(struct enetc_softc*);
124 
125 static int			enetc_ctrl_send(struct enetc_softc*,
126 				    uint16_t, uint16_t, iflib_dma_info_t);
127 
128 static const char enetc_driver_version[] = "1.0.0";
129 
130 static pci_vendor_info_t enetc_vendor_info_array[] = {
131 	PVID(PCI_VENDOR_FREESCALE, ENETC_DEV_ID_PF,
132 	    "Freescale ENETC PCIe Gigabit Ethernet Controller"),
133 	PVID_END
134 };
135 
136 #define ENETC_IFCAPS (IFCAP_VLAN_MTU | IFCAP_RXCSUM | IFCAP_JUMBO_MTU | \
137 	IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWFILTER)
138 
139 static device_method_t enetc_methods[] = {
140 	DEVMETHOD(device_register,	enetc_register),
141 	DEVMETHOD(device_probe,		iflib_device_probe),
142 	DEVMETHOD(device_attach,	iflib_device_attach),
143 	DEVMETHOD(device_detach,	iflib_device_detach),
144 	DEVMETHOD(device_shutdown,	iflib_device_shutdown),
145 	DEVMETHOD(device_suspend,	iflib_device_suspend),
146 	DEVMETHOD(device_resume,	iflib_device_resume),
147 
148 	DEVMETHOD(miibus_readreg,	enetc_miibus_readreg),
149 	DEVMETHOD(miibus_writereg,	enetc_miibus_writereg),
150 	DEVMETHOD(miibus_linkchg,	enetc_miibus_linkchg),
151 	DEVMETHOD(miibus_statchg,	enetc_miibus_statchg),
152 
153 	DEVMETHOD_END
154 };
155 
156 static driver_t enetc_driver = {
157 	"enetc", enetc_methods, sizeof(struct enetc_softc)
158 };
159 
160 static devclass_t enetc_devclass;
161 DRIVER_MODULE(enetc, pci, enetc_driver, enetc_devclass, NULL, NULL);
162 DRIVER_MODULE(miibus, enetc, miibus_driver, miibus_devclass, NULL, NULL);
163 MODULE_VERSION(enetc, 1);
164 
165 IFLIB_PNP_INFO(pci, enetc, enetc_vendor_info_array);
166 
167 MODULE_DEPEND(enetc, ether, 1, 1, 1);
168 MODULE_DEPEND(enetc, iflib, 1, 1, 1);
169 MODULE_DEPEND(enetc, miibus, 1, 1, 1);
170 
171 static device_method_t enetc_iflib_methods[] = {
172 	DEVMETHOD(ifdi_attach_pre,		enetc_attach_pre),
173 	DEVMETHOD(ifdi_attach_post,		enetc_attach_post),
174 	DEVMETHOD(ifdi_detach,			enetc_detach),
175 
176 	DEVMETHOD(ifdi_init,			enetc_init),
177 	DEVMETHOD(ifdi_stop,			enetc_stop),
178 
179 	DEVMETHOD(ifdi_tx_queues_alloc,		enetc_tx_queues_alloc),
180 	DEVMETHOD(ifdi_rx_queues_alloc,		enetc_rx_queues_alloc),
181 	DEVMETHOD(ifdi_queues_free,		enetc_queues_free),
182 
183 	DEVMETHOD(ifdi_msix_intr_assign,	enetc_msix_intr_assign),
184 	DEVMETHOD(ifdi_tx_queue_intr_enable,	enetc_tx_queue_intr_enable),
185 	DEVMETHOD(ifdi_rx_queue_intr_enable,	enetc_rx_queue_intr_enable),
186 	DEVMETHOD(ifdi_intr_enable,		enetc_intr_enable),
187 	DEVMETHOD(ifdi_intr_disable,		enetc_intr_disable),
188 
189 	DEVMETHOD(ifdi_vlan_register,		enetc_vlan_register),
190 	DEVMETHOD(ifdi_vlan_unregister,		enetc_vlan_unregister),
191 
192 	DEVMETHOD(ifdi_get_counter,		enetc_get_counter),
193 	DEVMETHOD(ifdi_mtu_set,			enetc_mtu_set),
194 	DEVMETHOD(ifdi_multi_set,		enetc_setup_multicast),
195 	DEVMETHOD(ifdi_promisc_set,		enetc_promisc_set),
196 	DEVMETHOD(ifdi_timer,			enetc_timer),
197 	DEVMETHOD(ifdi_update_admin_status,	enetc_update_admin_status),
198 
199 	DEVMETHOD_END
200 };
201 
202 static driver_t enetc_iflib_driver = {
203 	"enetc", enetc_iflib_methods, sizeof(struct enetc_softc)
204 };
205 
206 static struct if_txrx enetc_txrx = {
207 	.ift_txd_encap = enetc_isc_txd_encap,
208 	.ift_txd_flush = enetc_isc_txd_flush,
209 	.ift_txd_credits_update = enetc_isc_txd_credits_update,
210 	.ift_rxd_available = enetc_isc_rxd_available,
211 	.ift_rxd_pkt_get = enetc_isc_rxd_pkt_get,
212 	.ift_rxd_refill = enetc_isc_rxd_refill,
213 	.ift_rxd_flush = enetc_isc_rxd_flush
214 };
215 
216 static struct if_shared_ctx enetc_sctx_init = {
217 	.isc_magic = IFLIB_MAGIC,
218 
219 	.isc_q_align = ENETC_RING_ALIGN,
220 
221 	.isc_tx_maxsize = ENETC_MAX_FRAME_LEN,
222 	.isc_tx_maxsegsize = PAGE_SIZE,
223 
224 	.isc_rx_maxsize = ENETC_MAX_FRAME_LEN,
225 	.isc_rx_maxsegsize = ENETC_MAX_FRAME_LEN,
226 	.isc_rx_nsegments = ENETC_MAX_SCATTER,
227 
228 	.isc_admin_intrcnt = 0,
229 
230 	.isc_nfl = 1,
231 	.isc_nrxqs = 1,
232 	.isc_ntxqs = 1,
233 
234 	.isc_vendor_info = enetc_vendor_info_array,
235 	.isc_driver_version = enetc_driver_version,
236 	.isc_driver = &enetc_iflib_driver,
237 
238 	.isc_flags = IFLIB_DRIVER_MEDIA | IFLIB_PRESERVE_TX_INDICES,
239 	.isc_ntxd_min = {ENETC_MIN_DESC},
240 	.isc_ntxd_max = {ENETC_MAX_DESC},
241 	.isc_ntxd_default = {ENETC_DEFAULT_DESC},
242 	.isc_nrxd_min = {ENETC_MIN_DESC},
243 	.isc_nrxd_max = {ENETC_MAX_DESC},
244 	.isc_nrxd_default = {ENETC_DEFAULT_DESC}
245 };
246 
247 static void*
248 enetc_register(device_t dev)
249 {
250 
251 	if (!ofw_bus_status_okay(dev))
252 		return (NULL);
253 
254 	return (&enetc_sctx_init);
255 }
256 
257 static void
258 enetc_max_nqueues(struct enetc_softc *sc, int *max_tx_nqueues,
259     int *max_rx_nqueues)
260 {
261 	uint32_t val;
262 
263 	val = ENETC_PORT_RD4(sc, ENETC_PCAPR0);
264 	*max_tx_nqueues = MIN(ENETC_PCAPR0_TXBDR(val), ENETC_MAX_QUEUES);
265 	*max_rx_nqueues = MIN(ENETC_PCAPR0_RXBDR(val), ENETC_MAX_QUEUES);
266 }
267 
268 static int
269 enetc_setup_fixed(struct enetc_softc *sc, phandle_t node)
270 {
271 	ssize_t size;
272 	int speed;
273 
274 	size = OF_getencprop(node, "speed", &speed, sizeof(speed));
275 	if (size <= 0) {
276 		device_printf(sc->dev,
277 		    "Device has fixed-link node without link speed specified\n");
278 		return (ENXIO);
279 	}
280 	switch (speed) {
281 	case 10:
282 		speed = IFM_10_T;
283 		break;
284 	case 100:
285 		speed = IFM_100_TX;
286 		break;
287 	case 1000:
288 		speed = IFM_1000_T;
289 		break;
290 	default:
291 		device_printf(sc->dev, "Unsupported link speed value of %d\n",
292 		    speed);
293 		return (ENXIO);
294 	}
295 	speed |= IFM_ETHER;
296 
297 	if (OF_hasprop(node, "full-duplex"))
298 		speed |= IFM_FDX;
299 	else
300 		speed |= IFM_HDX;
301 
302 	sc->fixed_link = true;
303 
304 	ifmedia_init(&sc->fixed_ifmedia, 0, enetc_fixed_media_change,
305 	    enetc_fixed_media_status);
306 	ifmedia_add(&sc->fixed_ifmedia, speed, 0, NULL);
307 	ifmedia_set(&sc->fixed_ifmedia, speed);
308 	sc->shared->isc_media = &sc->fixed_ifmedia;
309 
310 	return (0);
311 }
312 
313 static int
314 enetc_setup_phy(struct enetc_softc *sc)
315 {
316 	phandle_t node, fixed_link, phy_handle;
317 	struct mii_data *miid;
318 	int phy_addr, error;
319 	ssize_t size;
320 
321 	node = ofw_bus_get_node(sc->dev);
322 	fixed_link = ofw_bus_find_child(node, "fixed-link");
323 	if (fixed_link != 0)
324 		return (enetc_setup_fixed(sc, fixed_link));
325 
326 	size = OF_getencprop(node, "phy-handle", &phy_handle, sizeof(phy_handle));
327 	if (size <= 0) {
328 		device_printf(sc->dev,
329 		    "Failed to acquire PHY handle from FDT.\n");
330 		return (ENXIO);
331 	}
332 	phy_handle = OF_node_from_xref(phy_handle);
333 	size = OF_getencprop(phy_handle, "reg", &phy_addr, sizeof(phy_addr));
334 	if (size <= 0) {
335 		device_printf(sc->dev, "Failed to obtain PHY address\n");
336 		return (ENXIO);
337 	}
338 	error = mii_attach(sc->dev, &sc->miibus, iflib_get_ifp(sc->ctx),
339 	    enetc_media_change, enetc_media_status,
340 	    BMSR_DEFCAPMASK, phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE);
341 	if (error != 0) {
342 		device_printf(sc->dev, "mii_attach failed\n");
343 		return (error);
344 	}
345 	miid = device_get_softc(sc->miibus);
346 	sc->shared->isc_media = &miid->mii_media;
347 
348 	return (0);
349 }
350 
351 static int
352 enetc_attach_pre(if_ctx_t ctx)
353 {
354 	struct ifnet *ifp;
355 	if_softc_ctx_t scctx;
356 	struct enetc_softc *sc;
357 	int error, rid;
358 
359 	sc = iflib_get_softc(ctx);
360 	scctx = iflib_get_softc_ctx(ctx);
361 	sc->ctx = ctx;
362 	sc->dev = iflib_get_dev(ctx);
363 	sc->shared = scctx;
364 	ifp = iflib_get_ifp(ctx);
365 
366 	rid = PCIR_BAR(ENETC_BAR_REGS);
367 	sc->regs = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
368 	if (sc->regs == NULL) {
369 		device_printf(sc->dev,
370 		    "Failed to allocate BAR %d\n", ENETC_BAR_REGS);
371 		return (ENXIO);
372 	}
373 
374 	error = iflib_dma_alloc_align(ctx,
375 	    ENETC_MIN_DESC * sizeof(struct enetc_cbd),
376 	    ENETC_RING_ALIGN,
377 	    &sc->ctrl_queue.dma,
378 	    0);
379 	if (error != 0) {
380 		device_printf(sc->dev, "Failed to allocate control ring\n");
381 		goto fail;
382 	}
383 	sc->ctrl_queue.ring = (struct enetc_cbd*)sc->ctrl_queue.dma.idi_vaddr;
384 
385 	scctx->isc_txrx = &enetc_txrx;
386 	scctx->isc_tx_nsegments = ENETC_MAX_SCATTER;
387 	enetc_max_nqueues(sc, &scctx->isc_nrxqsets_max, &scctx->isc_ntxqsets_max);
388 
389 	if (scctx->isc_ntxd[0] % ENETC_DESC_ALIGN != 0) {
390 		device_printf(sc->dev,
391 		    "The number of TX descriptors has to be a multiple of %d\n",
392 		    ENETC_DESC_ALIGN);
393 		error = EINVAL;
394 		goto fail;
395 	}
396 	if (scctx->isc_nrxd[0] % ENETC_DESC_ALIGN != 0) {
397 		device_printf(sc->dev,
398 		    "The number of RX descriptors has to be a multiple of %d\n",
399 		    ENETC_DESC_ALIGN);
400 		error = EINVAL;
401 		goto fail;
402 	}
403 	scctx->isc_txqsizes[0] = scctx->isc_ntxd[0] * sizeof(union enetc_tx_bd);
404 	scctx->isc_rxqsizes[0] = scctx->isc_nrxd[0] * sizeof(union enetc_rx_bd);
405 	scctx->isc_txd_size[0] = sizeof(union enetc_tx_bd);
406 	scctx->isc_rxd_size[0] = sizeof(union enetc_rx_bd);
407 	scctx->isc_tx_csum_flags = 0;
408 	scctx->isc_capabilities = scctx->isc_capenable = ENETC_IFCAPS;
409 
410 	error = enetc_mtu_set(ctx, ETHERMTU);
411 	if (error != 0)
412 		goto fail;
413 
414 	scctx->isc_msix_bar = pci_msix_table_bar(sc->dev);
415 
416 	error = enetc_setup_phy(sc);
417 	if (error != 0)
418 		goto fail;
419 
420 	enetc_get_hwaddr(sc);
421 
422 	return (0);
423 fail:
424 	enetc_detach(ctx);
425 	return (error);
426 }
427 
428 static int
429 enetc_attach_post(if_ctx_t ctx)
430 {
431 
432 	enetc_init_hw(iflib_get_softc(ctx));
433 	return (0);
434 }
435 
436 static int
437 enetc_detach(if_ctx_t ctx)
438 {
439 	struct enetc_softc *sc;
440 	int error = 0, i;
441 
442 	sc = iflib_get_softc(ctx);
443 
444 	for (i = 0; i < sc->rx_num_queues; i++)
445 		iflib_irq_free(ctx, &sc->rx_queues[i].irq);
446 
447 	if (sc->miibus != NULL)
448 		device_delete_child(sc->dev, sc->miibus);
449 
450 	if (sc->regs != NULL)
451 		error = bus_release_resource(sc->dev, SYS_RES_MEMORY,
452 		    rman_get_rid(sc->regs), sc->regs);
453 
454 	if (sc->ctrl_queue.dma.idi_size != 0)
455 		iflib_dma_free(&sc->ctrl_queue.dma);
456 
457 	return (error);
458 }
459 
460 static int
461 enetc_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
462     int ntxqs, int ntxqsets)
463 {
464 	struct enetc_softc *sc;
465 	struct enetc_tx_queue *queue;
466 	int i;
467 
468 	sc = iflib_get_softc(ctx);
469 
470 	MPASS(ntxqs == 1);
471 
472 	sc->tx_queues = mallocarray(sc->tx_num_queues,
473 	    sizeof(struct enetc_tx_queue), M_DEVBUF, M_NOWAIT | M_ZERO);
474 	if (sc->tx_queues == NULL) {
475 		device_printf(sc->dev,
476 		    "Failed to allocate memory for TX queues.\n");
477 		return (ENOMEM);
478 	}
479 
480 	for (i = 0; i < sc->tx_num_queues; i++) {
481 		queue = &sc->tx_queues[i];
482 		queue->sc = sc;
483 		queue->ring = (union enetc_tx_bd*)(vaddrs[i]);
484 		queue->ring_paddr = paddrs[i];
485 		queue->next_to_clean = 0;
486 		queue->ring_full = false;
487 	}
488 
489 	return (0);
490 }
491 
492 static int
493 enetc_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
494     int nrxqs, int nrxqsets)
495 {
496 	struct enetc_softc *sc;
497 	struct enetc_rx_queue *queue;
498 	int i;
499 
500 	sc = iflib_get_softc(ctx);
501 	MPASS(nrxqs == 1);
502 
503 	sc->rx_queues = mallocarray(sc->rx_num_queues,
504 	    sizeof(struct enetc_rx_queue), M_DEVBUF, M_NOWAIT | M_ZERO);
505 	if (sc->rx_queues == NULL) {
506 		device_printf(sc->dev,
507 		    "Failed to allocate memory for RX queues.\n");
508 		return (ENOMEM);
509 	}
510 
511 	for (i = 0; i < sc->rx_num_queues; i++) {
512 		queue = &sc->rx_queues[i];
513 		queue->sc = sc;
514 		queue->qid = i;
515 		queue->ring = (union enetc_rx_bd*)(vaddrs[i]);
516 		queue->ring_paddr = paddrs[i];
517 	}
518 
519 	return (0);
520 }
521 
522 static void
523 enetc_queues_free(if_ctx_t ctx)
524 {
525 	struct enetc_softc *sc;
526 
527 	sc = iflib_get_softc(ctx);
528 
529 	if (sc->tx_queues != NULL) {
530 		free(sc->tx_queues, M_DEVBUF);
531 		sc->tx_queues = NULL;
532 	}
533 	if (sc->rx_queues != NULL) {
534 		free(sc->rx_queues, M_DEVBUF);
535 		sc->rx_queues = NULL;
536 	}
537 }
538 
539 static void
540 enetc_get_hwaddr(struct enetc_softc *sc)
541 {
542 	struct ether_addr hwaddr;
543 	uint16_t high;
544 	uint32_t low;
545 
546 	low = ENETC_PORT_RD4(sc, ENETC_PSIPMAR0(0));
547 	high = ENETC_PORT_RD2(sc, ENETC_PSIPMAR1(0));
548 
549 	memcpy(&hwaddr.octet[0], &low, 4);
550 	memcpy(&hwaddr.octet[4], &high, 2);
551 
552 	if (ETHER_IS_BROADCAST(hwaddr.octet) ||
553 	    ETHER_IS_MULTICAST(hwaddr.octet) ||
554 	    ETHER_IS_ZERO(hwaddr.octet)) {
555 		ether_gen_addr(iflib_get_ifp(sc->ctx), &hwaddr);
556 		device_printf(sc->dev,
557 		    "Failed to obtain MAC address, using a random one\n");
558 		memcpy(&low, &hwaddr.octet[0], 4);
559 		memcpy(&high, &hwaddr.octet[4], 2);
560 	}
561 
562 	iflib_set_mac(sc->ctx, hwaddr.octet);
563 }
564 
565 static void
566 enetc_set_hwaddr(struct enetc_softc *sc)
567 {
568 	struct ifnet *ifp;
569 	uint16_t high;
570 	uint32_t low;
571 	uint8_t *hwaddr;
572 
573 	ifp = iflib_get_ifp(sc->ctx);
574 	hwaddr = (uint8_t*)if_getlladdr(ifp);
575 	low = *((uint32_t*)hwaddr);
576 	high = *((uint16_t*)(hwaddr+4));
577 
578 	ENETC_PORT_WR4(sc, ENETC_PSIPMAR0(0), low);
579 	ENETC_PORT_WR2(sc, ENETC_PSIPMAR1(0), high);
580 }
581 
582 static int
583 enetc_setup_rss(struct enetc_softc *sc)
584 {
585 	struct iflib_dma_info dma;
586 	int error, i, buckets_num = 0;
587 	uint8_t *rss_table;
588 	uint32_t reg;
589 
590 	reg = ENETC_RD4(sc, ENETC_SIPCAPR0);
591 	if (reg & ENETC_SIPCAPR0_RSS) {
592 		reg = ENETC_RD4(sc, ENETC_SIRSSCAPR);
593 		buckets_num = ENETC_SIRSSCAPR_GET_NUM_RSS(reg);
594         }
595 	if (buckets_num == 0)
596 		return (ENOTSUP);
597 
598 	for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / sizeof(uint32_t); i++) {
599 		arc4rand((uint8_t *)&reg, sizeof(reg), 0);
600 		ENETC_PORT_WR4(sc, ENETC_PRSSK(i), reg);
601 	}
602 
603 	ENETC_WR4(sc, ENETC_SIRBGCR, sc->rx_num_queues);
604 
605 	error = iflib_dma_alloc_align(sc->ctx,
606 	    buckets_num * sizeof(*rss_table),
607 	    ENETC_RING_ALIGN,
608 	    &dma,
609 	    0);
610 	if (error != 0) {
611 		device_printf(sc->dev, "Failed to allocate DMA buffer for RSS\n");
612 		return (error);
613 	}
614 	rss_table = (uint8_t *)dma.idi_vaddr;
615 
616 	for (i = 0; i < buckets_num; i++)
617 		rss_table[i] = i % sc->rx_num_queues;
618 
619 	error = enetc_ctrl_send(sc, (BDCR_CMD_RSS << 8) | BDCR_CMD_RSS_WRITE,
620 	    buckets_num * sizeof(*rss_table), &dma);
621 	if (error != 0)
622 		device_printf(sc->dev, "Failed to setup RSS table\n");
623 
624 	iflib_dma_free(&dma);
625 
626 	return (error);
627 }
628 
629 static int
630 enetc_ctrl_send(struct enetc_softc *sc, uint16_t cmd, uint16_t size,
631     iflib_dma_info_t dma)
632 {
633 	struct enetc_ctrl_queue *queue;
634 	struct enetc_cbd *desc;
635 	int timeout = 1000;
636 
637 	queue = &sc->ctrl_queue;
638 	desc = &queue->ring[queue->pidx];
639 
640 	if (++queue->pidx == ENETC_MIN_DESC)
641 		queue->pidx = 0;
642 
643 	desc->addr[0] = (uint32_t)dma->idi_paddr;
644 	desc->addr[1] = (uint32_t)(dma->idi_paddr >> 32);
645 	desc->index = 0;
646 	desc->length = (uint16_t)size;
647 	desc->cmd = (uint8_t)cmd;
648 	desc->cls = (uint8_t)(cmd >> 8);
649 	desc->status_flags = 0;
650 
651 	/* Sync command packet, */
652 	bus_dmamap_sync(dma->idi_tag, dma->idi_map, BUS_DMASYNC_PREWRITE);
653 	/* and the control ring. */
654 	bus_dmamap_sync(queue->dma.idi_tag, queue->dma.idi_map, BUS_DMASYNC_PREWRITE);
655 	ENETC_WR4(sc, ENETC_SICBDRPIR, queue->pidx);
656 
657 	while (--timeout != 0) {
658 		DELAY(20);
659 		if (ENETC_RD4(sc, ENETC_SICBDRCIR) == queue->pidx)
660 			break;
661 	}
662 
663 	if (timeout == 0)
664 		return (ETIMEDOUT);
665 
666 	bus_dmamap_sync(dma->idi_tag, dma->idi_map, BUS_DMASYNC_POSTREAD);
667 	return (0);
668 }
669 
670 static void
671 enetc_init_hw(struct enetc_softc *sc)
672 {
673 	uint32_t val;
674 	int error;
675 
676 	ENETC_PORT_WR4(sc, ENETC_PM0_CMD_CFG,
677 	    ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC |
678 	    ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
679 	ENETC_PORT_WR4(sc, ENETC_PM0_RX_FIFO, ENETC_PM0_RX_FIFO_VAL);
680 	val = ENETC_PSICFGR0_SET_TXBDR(sc->tx_num_queues);
681 	val |= ENETC_PSICFGR0_SET_RXBDR(sc->rx_num_queues);
682 	val |= ENETC_PSICFGR0_SIVC(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S);
683 	ENETC_PORT_WR4(sc, ENETC_PSICFGR0(0), val);
684 	ENETC_PORT_WR4(sc, ENETC_PSIPVMR, ENETC_PSIPVMR_SET_VUTA(1));
685 	ENETC_PORT_WR4(sc, ENETC_PVCLCTR,  ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S);
686 	ENETC_PORT_WR4(sc, ENETC_PSIVLANFMR, ENETC_PSIVLANFMR_VS);
687 	ENETC_PORT_WR4(sc, ENETC_PAR_PORT_CFG, ENETC_PAR_PORT_L4CD);
688 	ENETC_PORT_WR4(sc, ENETC_PMR, ENETC_PMR_SI0EN | ENETC_PMR_PSPEED_1000M);
689 
690 	ENETC_WR4(sc, ENETC_SICAR0,
691 	    ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
692 	ENETC_WR4(sc, ENETC_SICAR1, ENETC_SICAR_MSI);
693 	ENETC_WR4(sc, ENETC_SICAR2,
694 	    ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
695 
696 	enetc_init_ctrl(sc);
697 	error = enetc_setup_rss(sc);
698 	if (error != 0)
699 		ENETC_WR4(sc, ENETC_SIMR, ENETC_SIMR_EN);
700 	else
701 		ENETC_WR4(sc, ENETC_SIMR, ENETC_SIMR_EN | ENETC_SIMR_RSSE);
702 
703 }
704 
705 static void
706 enetc_init_ctrl(struct enetc_softc *sc)
707 {
708 	struct enetc_ctrl_queue *queue = &sc->ctrl_queue;
709 
710 	ENETC_WR4(sc, ENETC_SICBDRBAR0,
711 	    (uint32_t)queue->dma.idi_paddr);
712 	ENETC_WR4(sc, ENETC_SICBDRBAR1,
713 	    (uint32_t)(queue->dma.idi_paddr >> 32));
714 	ENETC_WR4(sc, ENETC_SICBDRLENR,
715 	    queue->dma.idi_size / sizeof(struct enetc_cbd));
716 
717 	queue->pidx = 0;
718 	ENETC_WR4(sc, ENETC_SICBDRPIR, queue->pidx);
719 	ENETC_WR4(sc, ENETC_SICBDRCIR, queue->pidx);
720 	ENETC_WR4(sc, ENETC_SICBDRMR, ENETC_SICBDRMR_EN);
721 }
722 
723 static void
724 enetc_init_tx(struct enetc_softc *sc)
725 {
726 	struct enetc_tx_queue *queue;
727 	int i;
728 
729 	for (i = 0; i < sc->tx_num_queues; i++) {
730 		queue = &sc->tx_queues[i];
731 
732 		ENETC_TXQ_WR4(sc, i, ENETC_TBBAR0,
733 		    (uint32_t)queue->ring_paddr);
734 		ENETC_TXQ_WR4(sc, i, ENETC_TBBAR1,
735 		    (uint32_t)(queue->ring_paddr >> 32));
736 		ENETC_TXQ_WR4(sc, i, ENETC_TBLENR, sc->tx_queue_size);
737 
738 		/*
739 		 * Even though it is undoccumented resetting the TX ring
740 		 * indices results in TX hang.
741 		 * Do the same as Linux and simply keep those unchanged
742 		 * for the drivers lifetime.
743 		 */
744 #if 0
745 		ENETC_TXQ_WR4(sc, i, ENETC_TBPIR, 0);
746 		ENETC_TXQ_WR4(sc, i, ENETC_TBCIR, 0);
747 #endif
748 		ENETC_TXQ_WR4(sc, i, ENETC_TBMR, ENETC_TBMR_EN);
749 	}
750 
751 }
752 
753 static void
754 enetc_init_rx(struct enetc_softc *sc)
755 {
756 	struct enetc_rx_queue *queue;
757 	uint32_t rx_buf_size;
758 	int i;
759 
760 	rx_buf_size = iflib_get_rx_mbuf_sz(sc->ctx);
761 
762 	for (i = 0; i < sc->rx_num_queues; i++) {
763 		queue = &sc->rx_queues[i];
764 
765 		ENETC_RXQ_WR4(sc, i, ENETC_RBBAR0,
766 		    (uint32_t)queue->ring_paddr);
767 		ENETC_RXQ_WR4(sc, i, ENETC_RBBAR1,
768 		    (uint32_t)(queue->ring_paddr >> 32));
769 		ENETC_RXQ_WR4(sc, i, ENETC_RBLENR, sc->rx_queue_size);
770 		ENETC_RXQ_WR4(sc, i, ENETC_RBBSR, rx_buf_size);
771 		ENETC_RXQ_WR4(sc, i, ENETC_RBPIR, 0);
772 		ENETC_RXQ_WR4(sc, i, ENETC_RBCIR, 0);
773 		queue->enabled = false;
774 	}
775 }
776 
777 static u_int
778 enetc_hash_mac(void *arg, struct sockaddr_dl *sdl, u_int cnt)
779 {
780 	uint64_t *bitmap = arg;
781 	uint64_t address = 0;
782 	uint8_t hash = 0;
783 	bool bit;
784 	int i, j;
785 
786 	bcopy(LLADDR(sdl), &address, ETHER_ADDR_LEN);
787 
788 	/*
789 	 * The six bit hash is calculated by xoring every
790 	 * 6th bit of the address.
791 	 * It is then used as an index in a bitmap that is
792 	 * written to the device.
793 	 */
794 	for (i = 0; i < 6; i++) {
795 		bit = 0;
796 		for (j = 0; j < 8; j++)
797 			bit ^= address & BIT(i + j*6);
798 
799 		hash |= bit << i;
800 	}
801 
802 	*bitmap |= (1 << hash);
803 	return (1);
804 }
805 
806 static void
807 enetc_setup_multicast(if_ctx_t ctx)
808 {
809 	struct enetc_softc *sc;
810 	struct ifnet *ifp;
811 	uint64_t bitmap = 0;
812 	uint8_t revid;
813 
814 	sc = iflib_get_softc(ctx);
815 	ifp = iflib_get_ifp(ctx);
816 	revid = pci_get_revid(sc->dev);
817 
818 	if_foreach_llmaddr(ifp, enetc_hash_mac, &bitmap);
819 
820 	/*
821 	 * In revid 1 of this chip the positions multicast and unicast
822 	 * hash filter registers are flipped.
823 	 */
824 	ENETC_PORT_WR4(sc, ENETC_PSIMMHFR0(0, revid == 1), bitmap & UINT32_MAX);
825 	ENETC_PORT_WR4(sc, ENETC_PSIMMHFR1(0), bitmap >> 32);
826 
827 }
828 
829 static uint8_t
830 enetc_hash_vid(uint16_t vid)
831 {
832 	uint8_t hash = 0;
833 	bool bit;
834 	int i;
835 
836 	for (i = 0;i < 6;i++) {
837 		bit = vid & BIT(i);
838 		bit ^= vid & BIT(i + 6);
839 		hash |= bit << i;
840 	}
841 
842 	return (hash);
843 }
844 
845 static void
846 enetc_vlan_register(if_ctx_t ctx, uint16_t vid)
847 {
848 	struct enetc_softc *sc;
849 	uint8_t hash;
850 	uint64_t bitmap;
851 
852 	sc = iflib_get_softc(ctx);
853 	hash = enetc_hash_vid(vid);
854 
855 	/* Check if hash is alredy present in the bitmap. */
856 	if (++sc->vlan_bitmap[hash] != 1)
857 		return;
858 
859 	bitmap = ENETC_PORT_RD4(sc, ENETC_PSIVHFR0(0));
860 	bitmap |= (uint64_t)ENETC_PORT_RD4(sc, ENETC_PSIVHFR1(0)) << 32;
861 	bitmap |= BIT(hash);
862 	ENETC_PORT_WR4(sc, ENETC_PSIVHFR0(0), bitmap & UINT32_MAX);
863 	ENETC_PORT_WR4(sc, ENETC_PSIVHFR1(0), bitmap >> 32);
864 }
865 
866 static void
867 enetc_vlan_unregister(if_ctx_t ctx, uint16_t vid)
868 {
869 	struct enetc_softc *sc;
870 	uint8_t hash;
871 	uint64_t bitmap;
872 
873 	sc = iflib_get_softc(ctx);
874 	hash = enetc_hash_vid(vid);
875 
876 	MPASS(sc->vlan_bitmap[hash] > 0);
877 	if (--sc->vlan_bitmap[hash] != 0)
878 		return;
879 
880 	bitmap = ENETC_PORT_RD4(sc, ENETC_PSIVHFR0(0));
881 	bitmap |= (uint64_t)ENETC_PORT_RD4(sc, ENETC_PSIVHFR1(0)) << 32;
882 	bitmap &= ~BIT(hash);
883 	ENETC_PORT_WR4(sc, ENETC_PSIVHFR0(0), bitmap & UINT32_MAX);
884 	ENETC_PORT_WR4(sc, ENETC_PSIVHFR1(0), bitmap >> 32);
885 }
886 
887 static void
888 enetc_init(if_ctx_t ctx)
889 {
890 	struct enetc_softc *sc;
891 	struct mii_data *miid;
892 	struct ifnet *ifp;
893 	uint16_t max_frame_length;
894 	int baudrate;
895 
896 	sc = iflib_get_softc(ctx);
897 	ifp = iflib_get_ifp(ctx);
898 
899 	max_frame_length = sc->shared->isc_max_frame_size;
900 	MPASS(max_frame_length < ENETC_MAX_FRAME_LEN);
901 
902 	/* Set max RX and TX frame lengths. */
903 	ENETC_PORT_WR4(sc, ENETC_PM0_MAXFRM, max_frame_length);
904 	ENETC_PORT_WR4(sc, ENETC_PTCMSDUR(0), max_frame_length);
905 	ENETC_PORT_WR4(sc, ENETC_PTXMBAR, 2 * max_frame_length);
906 
907 	/* Set "VLAN promiscious" mode if filtering is disabled. */
908 	if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0)
909 		ENETC_PORT_WR4(sc, ENETC_PSIPVMR,
910 		    ENETC_PSIPVMR_SET_VUTA(1) | ENETC_PSIPVMR_SET_VP(1));
911 	else
912 		ENETC_PORT_WR4(sc, ENETC_PSIPVMR,
913 		    ENETC_PSIPVMR_SET_VUTA(1));
914 
915 	sc->rbmr = ENETC_RBMR_EN | ENETC_RBMR_AL;
916 
917 	if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING)
918 		sc->rbmr |= ENETC_RBMR_VTE;
919 
920 	/* Write MAC address to hardware. */
921 	enetc_set_hwaddr(sc);
922 
923 	enetc_init_tx(sc);
924 	enetc_init_rx(sc);
925 
926 	if (sc->fixed_link) {
927 		baudrate = ifmedia_baudrate(sc->fixed_ifmedia.ifm_cur->ifm_media);
928 		iflib_link_state_change(sc->ctx, LINK_STATE_UP, baudrate);
929 	} else {
930 		/*
931 		 * Can't return an error from this function, there is not much
932 		 * we can do if this fails.
933 		 */
934 		miid = device_get_softc(sc->miibus);
935 		(void)mii_mediachg(miid);
936 	}
937 
938 	enetc_promisc_set(ctx, if_getflags(ifp));
939 }
940 
941 static void
942 enetc_stop(if_ctx_t ctx)
943 {
944 	struct enetc_softc *sc;
945 	int i;
946 
947 	sc = iflib_get_softc(ctx);
948 
949 	for (i = 0; i < sc->tx_num_queues; i++)
950 		ENETC_TXQ_WR4(sc, i, ENETC_TBMR, 0);
951 
952 	for (i = 0; i < sc->rx_num_queues; i++)
953 		ENETC_RXQ_WR4(sc, i, ENETC_RBMR, 0);
954 }
955 
956 static int
957 enetc_msix_intr_assign(if_ctx_t ctx, int msix)
958 {
959 	struct enetc_softc *sc;
960 	struct enetc_rx_queue *rx_queue;
961 	struct enetc_tx_queue *tx_queue;
962 	int vector = 0, i, error;
963 	char irq_name[16];
964 
965 	sc = iflib_get_softc(ctx);
966 
967 	MPASS(sc->rx_num_queues + 1 <= ENETC_MSIX_COUNT);
968 	MPASS(sc->rx_num_queues == sc->tx_num_queues);
969 
970 	for (i = 0; i < sc->rx_num_queues; i++, vector++) {
971 		rx_queue = &sc->rx_queues[i];
972 		snprintf(irq_name, sizeof(irq_name), "rxtxq%d", i);
973 		error = iflib_irq_alloc_generic(ctx,
974 		    &rx_queue->irq, vector + 1, IFLIB_INTR_RXTX,
975 		    NULL, rx_queue, i, irq_name);
976 		if (error != 0)
977 			goto fail;
978 
979 		ENETC_WR4(sc, ENETC_SIMSIRRV(i), vector);
980 		ENETC_RXQ_WR4(sc, i, ENETC_RBICR1, ENETC_RX_INTR_TIME_THR);
981 		ENETC_RXQ_WR4(sc, i, ENETC_RBICR0,
982 		    ENETC_RBICR0_ICEN | ENETC_RBICR0_SET_ICPT(ENETC_RX_INTR_PKT_THR));
983 	}
984 	vector = 0;
985 	for (i = 0;i < sc->tx_num_queues; i++, vector++) {
986 		tx_queue = &sc->tx_queues[i];
987 		snprintf(irq_name, sizeof(irq_name), "txq%d", i);
988 		iflib_softirq_alloc_generic(ctx, &tx_queue->irq,
989 		    IFLIB_INTR_TX, tx_queue, i, irq_name);
990 
991 		ENETC_WR4(sc, ENETC_SIMSITRV(i), vector);
992 	}
993 
994 	return (0);
995 fail:
996 	for (i = 0; i < sc->rx_num_queues; i++) {
997 		rx_queue = &sc->rx_queues[i];
998 		iflib_irq_free(ctx, &rx_queue->irq);
999 	}
1000 	return (error);
1001 }
1002 
1003 static int
1004 enetc_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
1005 {
1006 	struct enetc_softc *sc;
1007 
1008 	sc = iflib_get_softc(ctx);
1009 	ENETC_TXQ_RD4(sc, qid, ENETC_TBIDR);
1010 	return (0);
1011 }
1012 
1013 static int
1014 enetc_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
1015 {
1016 	struct enetc_softc *sc;
1017 
1018 	sc = iflib_get_softc(ctx);
1019 	ENETC_RXQ_RD4(sc, qid, ENETC_RBIDR);
1020 	return (0);
1021 }
1022 static void
1023 enetc_intr_enable(if_ctx_t ctx)
1024 {
1025 	struct enetc_softc *sc;
1026 	int i;
1027 
1028 	sc = iflib_get_softc(ctx);
1029 
1030 	for (i = 0; i < sc->rx_num_queues; i++)
1031 		ENETC_RXQ_WR4(sc, i, ENETC_RBIER, ENETC_RBIER_RXTIE);
1032 
1033 	for (i = 0; i < sc->tx_num_queues; i++)
1034 		ENETC_TXQ_WR4(sc, i, ENETC_TBIER, ENETC_TBIER_TXF);
1035 }
1036 
1037 static void
1038 enetc_intr_disable(if_ctx_t ctx)
1039 {
1040 	struct enetc_softc *sc;
1041 	int i;
1042 
1043 	sc = iflib_get_softc(ctx);
1044 
1045 	for (i = 0; i < sc->rx_num_queues; i++)
1046 		ENETC_RXQ_WR4(sc, i, ENETC_RBIER, 0);
1047 
1048 	for (i = 0; i < sc->tx_num_queues; i++)
1049 		ENETC_TXQ_WR4(sc, i, ENETC_TBIER, 0);
1050 }
1051 
1052 static int
1053 enetc_isc_txd_encap(void *data, if_pkt_info_t ipi)
1054 {
1055 	struct enetc_softc *sc = data;
1056 	struct enetc_tx_queue *queue;
1057 	union enetc_tx_bd *desc;
1058 	bus_dma_segment_t *segs;
1059 	qidx_t pidx, queue_len;
1060 	qidx_t i = 0;
1061 
1062 	queue = &sc->tx_queues[ipi->ipi_qsidx];
1063 	segs = ipi->ipi_segs;
1064 	pidx = ipi->ipi_pidx;
1065 	queue_len = sc->tx_queue_size;
1066 
1067 	/*
1068 	 * First descriptor is special. We use it to set frame
1069 	 * related information and offloads, e.g. VLAN tag.
1070 	 */
1071 	desc = &queue->ring[pidx];
1072 	bzero(desc, sizeof(*desc));
1073 	desc->frm_len = ipi->ipi_len;
1074 	desc->addr = segs[i].ds_addr;
1075 	desc->buf_len = segs[i].ds_len;
1076 	if (ipi->ipi_flags & IPI_TX_INTR)
1077 		desc->flags = ENETC_TXBD_FLAGS_FI;
1078 
1079 	i++;
1080 	if (++pidx == queue_len)
1081 		pidx = 0;
1082 
1083 	if (ipi->ipi_mflags & M_VLANTAG) {
1084 		/* VLAN tag is inserted in a separate descriptor. */
1085 		desc->flags |= ENETC_TXBD_FLAGS_EX;
1086 		desc = &queue->ring[pidx];
1087 		bzero(desc, sizeof(*desc));
1088 		desc->ext.vid = ipi->ipi_vtag;
1089 		desc->ext.e_flags = ENETC_TXBD_E_FLAGS_VLAN_INS;
1090 		if (++pidx == queue_len)
1091 			pidx = 0;
1092 	}
1093 
1094 	/* Now add remaining descriptors. */
1095 	for (;i < ipi->ipi_nsegs; i++) {
1096 		desc = &queue->ring[pidx];
1097 		bzero(desc, sizeof(*desc));
1098 		desc->addr = segs[i].ds_addr;
1099 		desc->buf_len = segs[i].ds_len;
1100 
1101 		if (++pidx == queue_len)
1102 			pidx = 0;
1103 	}
1104 
1105 	desc->flags |= ENETC_TXBD_FLAGS_F;
1106 	ipi->ipi_new_pidx = pidx;
1107 	if (pidx == queue->next_to_clean)
1108 		queue->ring_full = true;
1109 
1110 	return (0);
1111 }
1112 
1113 static void
1114 enetc_isc_txd_flush(void *data, uint16_t qid, qidx_t pidx)
1115 {
1116 	struct enetc_softc *sc = data;
1117 
1118 	ENETC_TXQ_WR4(sc, qid, ENETC_TBPIR, pidx);
1119 }
1120 
1121 static int
1122 enetc_isc_txd_credits_update(void *data, uint16_t qid, bool clear)
1123 {
1124 	struct enetc_softc *sc = data;
1125 	struct enetc_tx_queue *queue;
1126 	qidx_t next_to_clean, next_to_process;
1127 	int clean_count;
1128 
1129 	queue = &sc->tx_queues[qid];
1130 	next_to_process =
1131 	    ENETC_TXQ_RD4(sc, qid, ENETC_TBCIR) & ENETC_TBCIR_IDX_MASK;
1132 	next_to_clean = queue->next_to_clean;
1133 
1134 	if (next_to_clean == next_to_process && !queue->ring_full)
1135 		return (0);
1136 
1137 	if (!clear)
1138 		return (1);
1139 
1140 	clean_count = next_to_process - next_to_clean;
1141 	if (clean_count <= 0)
1142 		clean_count += sc->tx_queue_size;
1143 
1144 	queue->next_to_clean = next_to_process;
1145 	queue->ring_full = false;
1146 
1147 	return (clean_count);
1148 }
1149 
1150 static int
1151 enetc_isc_rxd_available(void *data, uint16_t qid, qidx_t pidx, qidx_t budget)
1152 {
1153 	struct enetc_softc *sc = data;
1154 	struct enetc_rx_queue *queue;
1155 	qidx_t hw_pidx, queue_len;
1156 	union enetc_rx_bd *desc;
1157 	int count = 0;
1158 
1159 	queue = &sc->rx_queues[qid];
1160 	desc = &queue->ring[pidx];
1161 	queue_len = sc->rx_queue_size;
1162 
1163 	if (desc->r.lstatus == 0)
1164 		return (0);
1165 
1166 	if (budget == 1)
1167 		return (1);
1168 
1169 	hw_pidx = ENETC_RXQ_RD4(sc, qid, ENETC_RBPIR);
1170 	while (pidx != hw_pidx && count < budget) {
1171 		desc = &queue->ring[pidx];
1172 		if (desc->r.lstatus & ENETC_RXBD_LSTATUS_F)
1173 			count++;
1174 
1175 		if (++pidx == queue_len)
1176 			pidx = 0;
1177 	}
1178 
1179 	return (count);
1180 }
1181 
1182 static int
1183 enetc_isc_rxd_pkt_get(void *data, if_rxd_info_t ri)
1184 {
1185 	struct enetc_softc *sc = data;
1186 	struct enetc_rx_queue *queue;
1187 	union enetc_rx_bd *desc;
1188 	uint16_t buf_len, pkt_size = 0;
1189 	qidx_t cidx, queue_len;
1190 	uint32_t status;
1191 	int i;
1192 
1193 	cidx = ri->iri_cidx;
1194 	queue = &sc->rx_queues[ri->iri_qsidx];
1195 	desc = &queue->ring[cidx];
1196 	status = desc->r.lstatus;
1197 	queue_len = sc->rx_queue_size;
1198 
1199 	/*
1200 	 * Ready bit will be set only when all descriptors
1201 	 * in the chain have been processed.
1202 	 */
1203 	if ((status & ENETC_RXBD_LSTATUS_R) == 0)
1204 		return (EAGAIN);
1205 
1206 	/* Pass RSS hash. */
1207 	if (status & ENETC_RXBD_FLAG_RSSV) {
1208 		ri->iri_flowid = desc->r.rss_hash;
1209 		ri->iri_rsstype = M_HASHTYPE_OPAQUE_HASH;
1210 	}
1211 
1212 	/* Pass IP checksum status. */
1213 	ri->iri_csum_flags = CSUM_IP_CHECKED;
1214 	if ((desc->r.parse_summary & ENETC_RXBD_PARSER_ERROR) == 0)
1215 		ri->iri_csum_flags |= CSUM_IP_VALID;
1216 
1217 	/* Pass extracted VLAN tag. */
1218 	if (status & ENETC_RXBD_FLAG_VLAN) {
1219 		ri->iri_vtag = desc->r.vlan_opt;
1220 		ri->iri_flags = M_VLANTAG;
1221 	}
1222 
1223 	for (i = 0; i < ENETC_MAX_SCATTER; i++) {
1224 		buf_len = desc->r.buf_len;
1225 		ri->iri_frags[i].irf_idx = cidx;
1226 		ri->iri_frags[i].irf_len = buf_len;
1227 		pkt_size += buf_len;
1228 		if (desc->r.lstatus & ENETC_RXBD_LSTATUS_F)
1229 			break;
1230 
1231 		if (++cidx == queue_len)
1232 			cidx = 0;
1233 
1234 		desc = &queue->ring[cidx];
1235 	}
1236 	ri->iri_nfrags = i + 1;
1237 	ri->iri_len = pkt_size + ENETC_RX_IP_ALIGN;
1238 	ri->iri_pad = ENETC_RX_IP_ALIGN;
1239 
1240 	MPASS(desc->r.lstatus & ENETC_RXBD_LSTATUS_F);
1241 	if (status & ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))
1242 		return (EBADMSG);
1243 
1244 	return (0);
1245 }
1246 
1247 static void
1248 enetc_isc_rxd_refill(void *data, if_rxd_update_t iru)
1249 {
1250 	struct enetc_softc *sc = data;
1251 	struct enetc_rx_queue *queue;
1252 	union enetc_rx_bd *desc;
1253 	qidx_t pidx, queue_len;
1254 	uint64_t *paddrs;
1255 	int i, count;
1256 
1257 	queue = &sc->rx_queues[iru->iru_qsidx];
1258 	paddrs = iru->iru_paddrs;
1259 	pidx = iru->iru_pidx;
1260 	count = iru->iru_count;
1261 	queue_len = sc->rx_queue_size;
1262 
1263 	for (i = 0; i < count; i++) {
1264 		desc = &queue->ring[pidx];
1265 		bzero(desc, sizeof(*desc));
1266 
1267 		desc->w.addr = paddrs[i];
1268 		if (++pidx == queue_len)
1269 			pidx = 0;
1270 	}
1271 	/*
1272 	 * After enabling the queue NIC will prefetch the first
1273 	 * 8 descriptors. It probably assumes that the RX is fully
1274 	 * refilled when cidx == pidx.
1275 	 * Enable it only if we have enough decriptors ready on the ring.
1276 	 */
1277 	if (!queue->enabled && pidx >= 8) {
1278 		ENETC_RXQ_WR4(sc, iru->iru_qsidx, ENETC_RBMR, sc->rbmr);
1279 		queue->enabled = true;
1280 	}
1281 }
1282 
1283 static void
1284 enetc_isc_rxd_flush(void *data, uint16_t qid, uint8_t flid, qidx_t pidx)
1285 {
1286 	struct enetc_softc *sc = data;
1287 
1288 	ENETC_RXQ_WR4(sc, qid, ENETC_RBCIR, pidx);
1289 }
1290 
1291 static uint64_t
1292 enetc_get_counter(if_ctx_t ctx, ift_counter cnt)
1293 {
1294 	struct enetc_softc *sc;
1295 	struct ifnet *ifp;
1296 
1297 	sc = iflib_get_softc(ctx);
1298 	ifp = iflib_get_ifp(ctx);
1299 
1300 	switch (cnt) {
1301 	case IFCOUNTER_IERRORS:
1302 		return (ENETC_PORT_RD8(sc, ENETC_PM0_RERR));
1303 	case IFCOUNTER_OERRORS:
1304 		return (ENETC_PORT_RD8(sc, ENETC_PM0_TERR));
1305 	default:
1306 		return (if_get_counter_default(ifp, cnt));
1307 	}
1308 }
1309 
1310 static int
1311 enetc_mtu_set(if_ctx_t ctx, uint32_t mtu)
1312 {
1313 	struct enetc_softc *sc = iflib_get_softc(ctx);
1314 	uint32_t max_frame_size;
1315 
1316 	max_frame_size = mtu +
1317 	    ETHER_HDR_LEN +
1318 	    ETHER_CRC_LEN +
1319 	    sizeof(struct ether_vlan_header);
1320 
1321 	if (max_frame_size > ENETC_MAX_FRAME_LEN)
1322 		return (EINVAL);
1323 
1324 	sc->shared->isc_max_frame_size = max_frame_size;
1325 
1326 	return (0);
1327 }
1328 
1329 static int
1330 enetc_promisc_set(if_ctx_t ctx, int flags)
1331 {
1332 	struct enetc_softc *sc;
1333 	uint32_t reg = 0;
1334 
1335 	sc = iflib_get_softc(ctx);
1336 
1337 	if (flags & IFF_PROMISC)
1338 		reg = ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0);
1339 	else if (flags & IFF_ALLMULTI)
1340 		reg = ENETC_PSIPMR_SET_MP(0);
1341 
1342 	ENETC_PORT_WR4(sc, ENETC_PSIPMR, reg);
1343 
1344 	return (0);
1345 }
1346 
1347 static void
1348 enetc_timer(if_ctx_t ctx, uint16_t qid)
1349 {
1350 	/*
1351 	 * Poll PHY status. Do this only for qid 0 to save
1352 	 * some cycles.
1353 	 */
1354 	if (qid == 0)
1355 		iflib_admin_intr_deferred(ctx);
1356 }
1357 
1358 static void
1359 enetc_update_admin_status(if_ctx_t ctx)
1360 {
1361 	struct enetc_softc *sc;
1362 	struct mii_data *miid;
1363 
1364 	sc = iflib_get_softc(ctx);
1365 
1366 	if (!sc->fixed_link) {
1367 		miid = device_get_softc(sc->miibus);
1368 		mii_tick(miid);
1369 	}
1370 }
1371 
1372 static int
1373 enetc_miibus_readreg(device_t dev, int phy, int reg)
1374 {
1375 	struct enetc_softc *sc;
1376 
1377 	sc = iflib_get_softc(device_get_softc(dev));
1378 	return (enetc_mdio_read(sc->regs, ENETC_PORT_BASE + ENETC_EMDIO_BASE,
1379 	    phy, reg));
1380 }
1381 
1382 static int
1383 enetc_miibus_writereg(device_t dev, int phy, int reg, int data)
1384 {
1385 	struct enetc_softc *sc;
1386 
1387 	sc = iflib_get_softc(device_get_softc(dev));
1388 	return (enetc_mdio_write(sc->regs, ENETC_PORT_BASE + ENETC_EMDIO_BASE,
1389 	    phy, reg, data));
1390 }
1391 
1392 static void
1393 enetc_miibus_linkchg(device_t dev)
1394 {
1395 
1396 	enetc_miibus_statchg(dev);
1397 }
1398 
1399 static void
1400 enetc_miibus_statchg(device_t dev)
1401 {
1402 	struct enetc_softc *sc;
1403 	struct mii_data *miid;
1404 	int link_state, baudrate;
1405 
1406 	sc = iflib_get_softc(device_get_softc(dev));
1407 	miid = device_get_softc(sc->miibus);
1408 
1409 	baudrate = ifmedia_baudrate(miid->mii_media_active);
1410 	if (miid->mii_media_status & IFM_AVALID) {
1411 		if (miid->mii_media_status & IFM_ACTIVE)
1412 			link_state = LINK_STATE_UP;
1413 		else
1414 			link_state = LINK_STATE_DOWN;
1415 	} else {
1416 		link_state = LINK_STATE_UNKNOWN;
1417 	}
1418 
1419 	iflib_link_state_change(sc->ctx, link_state, baudrate);
1420 
1421 }
1422 
1423 static int
1424 enetc_media_change(if_t ifp)
1425 {
1426 	struct enetc_softc *sc;
1427 	struct mii_data *miid;
1428 
1429 	sc = iflib_get_softc(ifp->if_softc);
1430 	miid = device_get_softc(sc->miibus);
1431 
1432 	mii_mediachg(miid);
1433 	return (0);
1434 }
1435 
1436 static void
1437 enetc_media_status(if_t ifp, struct ifmediareq* ifmr)
1438 {
1439 	struct enetc_softc *sc;
1440 	struct mii_data *miid;
1441 
1442 	sc = iflib_get_softc(ifp->if_softc);
1443 	miid = device_get_softc(sc->miibus);
1444 
1445 	mii_pollstat(miid);
1446 
1447 	ifmr->ifm_active = miid->mii_media_active;
1448 	ifmr->ifm_status = miid->mii_media_status;
1449 }
1450 
1451 static int
1452 enetc_fixed_media_change(if_t ifp)
1453 {
1454 
1455 	if_printf(ifp, "Can't change media in fixed-link mode.\n");
1456 	return (0);
1457 }
1458 static void
1459 enetc_fixed_media_status(if_t ifp, struct ifmediareq* ifmr)
1460 {
1461 	struct enetc_softc *sc;
1462 
1463 	sc = iflib_get_softc(ifp->if_softc);
1464 
1465 	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
1466 	ifmr->ifm_active = sc->fixed_ifmedia.ifm_cur->ifm_media;
1467 	return;
1468 }
1469