xref: /freebsd/sys/dev/enetc/if_enetc.c (revision 4f8f43b06ed07e96a250855488cc531799d5b78f)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2021 Alstom Group.
5  * Copyright (c) 2021 Semihalf.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 #include <sys/param.h>
30 #include <sys/bus.h>
31 #include <sys/endian.h>
32 #include <sys/kernel.h>
33 #include <sys/module.h>
34 #include <sys/rman.h>
35 #include <sys/socket.h>
36 #include <sys/sockio.h>
37 
38 #include <machine/bus.h>
39 #include <machine/resource.h>
40 
41 #include <net/ethernet.h>
42 #include <net/if.h>
43 #include <net/if_dl.h>
44 #include <net/if_var.h>
45 #include <net/if_types.h>
46 #include <net/if_media.h>
47 #include <net/iflib.h>
48 
49 #include <dev/enetc/enetc_hw.h>
50 #include <dev/enetc/enetc.h>
51 #include <dev/enetc/enetc_mdio.h>
52 #include <dev/mii/mii.h>
53 #include <dev/mii/miivar.h>
54 #include <dev/pci/pcireg.h>
55 #include <dev/pci/pcivar.h>
56 
57 #include <dev/ofw/ofw_bus.h>
58 #include <dev/ofw/ofw_bus_subr.h>
59 
60 #include "ifdi_if.h"
61 #include "miibus_if.h"
62 
63 static device_register_t		enetc_register;
64 
65 static ifdi_attach_pre_t		enetc_attach_pre;
66 static ifdi_attach_post_t		enetc_attach_post;
67 static ifdi_detach_t			enetc_detach;
68 
69 static ifdi_tx_queues_alloc_t		enetc_tx_queues_alloc;
70 static ifdi_rx_queues_alloc_t		enetc_rx_queues_alloc;
71 static ifdi_queues_free_t		enetc_queues_free;
72 
73 static ifdi_init_t			enetc_init;
74 static ifdi_stop_t			enetc_stop;
75 
76 static ifdi_msix_intr_assign_t		enetc_msix_intr_assign;
77 static ifdi_tx_queue_intr_enable_t	enetc_tx_queue_intr_enable;
78 static ifdi_rx_queue_intr_enable_t	enetc_rx_queue_intr_enable;
79 static ifdi_intr_enable_t		enetc_intr_enable;
80 static ifdi_intr_disable_t		enetc_intr_disable;
81 
82 static int	enetc_isc_txd_encap(void*, if_pkt_info_t);
83 static void	enetc_isc_txd_flush(void*, uint16_t, qidx_t);
84 static int	enetc_isc_txd_credits_update(void*, uint16_t, bool);
85 static int	enetc_isc_rxd_available(void*, uint16_t, qidx_t, qidx_t);
86 static int	enetc_isc_rxd_pkt_get(void*, if_rxd_info_t);
87 static void	enetc_isc_rxd_refill(void*, if_rxd_update_t);
88 static void	enetc_isc_rxd_flush(void*, uint16_t, uint8_t, qidx_t);
89 
90 static void	enetc_vlan_register(if_ctx_t, uint16_t);
91 static void	enetc_vlan_unregister(if_ctx_t, uint16_t);
92 
93 static uint64_t	enetc_get_counter(if_ctx_t, ift_counter);
94 static int	enetc_promisc_set(if_ctx_t, int);
95 static int	enetc_mtu_set(if_ctx_t, uint32_t);
96 static void	enetc_setup_multicast(if_ctx_t);
97 static void	enetc_timer(if_ctx_t, uint16_t);
98 static void	enetc_update_admin_status(if_ctx_t);
99 static bool	enetc_if_needs_restart(if_ctx_t, enum iflib_restart_event);
100 
101 static miibus_readreg_t		enetc_miibus_readreg;
102 static miibus_writereg_t	enetc_miibus_writereg;
103 static miibus_linkchg_t		enetc_miibus_linkchg;
104 static miibus_statchg_t		enetc_miibus_statchg;
105 
106 static int			enetc_media_change(if_t);
107 static void			enetc_media_status(if_t, struct ifmediareq*);
108 
109 static int			enetc_fixed_media_change(if_t);
110 static void			enetc_fixed_media_status(if_t, struct ifmediareq*);
111 
112 static void			enetc_max_nqueues(struct enetc_softc*, int*, int*);
113 static int			enetc_setup_phy(struct enetc_softc*);
114 
115 static void			enetc_get_hwaddr(struct enetc_softc*);
116 static void			enetc_set_hwaddr(struct enetc_softc*);
117 static int			enetc_setup_rss(struct enetc_softc*);
118 
119 static void			enetc_init_hw(struct enetc_softc*);
120 static void			enetc_init_ctrl(struct enetc_softc*);
121 static void			enetc_init_tx(struct enetc_softc*);
122 static void			enetc_init_rx(struct enetc_softc*);
123 
124 static int			enetc_ctrl_send(struct enetc_softc*,
125 				    uint16_t, uint16_t, iflib_dma_info_t);
126 
127 static const char enetc_driver_version[] = "1.0.0";
128 
129 static const pci_vendor_info_t enetc_vendor_info_array[] = {
130 	PVID(PCI_VENDOR_FREESCALE, ENETC_DEV_ID_PF,
131 	    "Freescale ENETC PCIe Gigabit Ethernet Controller"),
132 	PVID_END
133 };
134 
135 #define ENETC_IFCAPS (IFCAP_VLAN_MTU | IFCAP_RXCSUM | IFCAP_JUMBO_MTU | \
136 	IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWFILTER)
137 
138 static device_method_t enetc_methods[] = {
139 	DEVMETHOD(device_register,	enetc_register),
140 	DEVMETHOD(device_probe,		iflib_device_probe),
141 	DEVMETHOD(device_attach,	iflib_device_attach),
142 	DEVMETHOD(device_detach,	iflib_device_detach),
143 	DEVMETHOD(device_shutdown,	iflib_device_shutdown),
144 	DEVMETHOD(device_suspend,	iflib_device_suspend),
145 	DEVMETHOD(device_resume,	iflib_device_resume),
146 
147 	DEVMETHOD(miibus_readreg,	enetc_miibus_readreg),
148 	DEVMETHOD(miibus_writereg,	enetc_miibus_writereg),
149 	DEVMETHOD(miibus_linkchg,	enetc_miibus_linkchg),
150 	DEVMETHOD(miibus_statchg,	enetc_miibus_statchg),
151 
152 	DEVMETHOD(bus_setup_intr,		bus_generic_setup_intr),
153 	DEVMETHOD(bus_teardown_intr,		bus_generic_teardown_intr),
154 	DEVMETHOD(bus_release_resource,		bus_generic_release_resource),
155 	DEVMETHOD(bus_activate_resource,	bus_generic_activate_resource),
156 	DEVMETHOD(bus_deactivate_resource,	bus_generic_deactivate_resource),
157 	DEVMETHOD(bus_adjust_resource,		bus_generic_adjust_resource),
158 	DEVMETHOD(bus_alloc_resource,		bus_generic_alloc_resource),
159 
160 	DEVMETHOD_END
161 };
162 
163 static driver_t enetc_driver = {
164 	"enetc", enetc_methods, sizeof(struct enetc_softc)
165 };
166 
167 DRIVER_MODULE(miibus, enetc, miibus_fdt_driver, NULL, NULL);
168 /* Make sure miibus gets procesed first. */
169 DRIVER_MODULE_ORDERED(enetc, pci, enetc_driver, NULL, NULL, SI_ORDER_ANY);
170 MODULE_VERSION(enetc, 1);
171 
172 IFLIB_PNP_INFO(pci, enetc, enetc_vendor_info_array);
173 
174 MODULE_DEPEND(enetc, ether, 1, 1, 1);
175 MODULE_DEPEND(enetc, iflib, 1, 1, 1);
176 MODULE_DEPEND(enetc, miibus, 1, 1, 1);
177 
178 static device_method_t enetc_iflib_methods[] = {
179 	DEVMETHOD(ifdi_attach_pre,		enetc_attach_pre),
180 	DEVMETHOD(ifdi_attach_post,		enetc_attach_post),
181 	DEVMETHOD(ifdi_detach,			enetc_detach),
182 
183 	DEVMETHOD(ifdi_init,			enetc_init),
184 	DEVMETHOD(ifdi_stop,			enetc_stop),
185 
186 	DEVMETHOD(ifdi_tx_queues_alloc,		enetc_tx_queues_alloc),
187 	DEVMETHOD(ifdi_rx_queues_alloc,		enetc_rx_queues_alloc),
188 	DEVMETHOD(ifdi_queues_free,		enetc_queues_free),
189 
190 	DEVMETHOD(ifdi_msix_intr_assign,	enetc_msix_intr_assign),
191 	DEVMETHOD(ifdi_tx_queue_intr_enable,	enetc_tx_queue_intr_enable),
192 	DEVMETHOD(ifdi_rx_queue_intr_enable,	enetc_rx_queue_intr_enable),
193 	DEVMETHOD(ifdi_intr_enable,		enetc_intr_enable),
194 	DEVMETHOD(ifdi_intr_disable,		enetc_intr_disable),
195 
196 	DEVMETHOD(ifdi_vlan_register,		enetc_vlan_register),
197 	DEVMETHOD(ifdi_vlan_unregister,		enetc_vlan_unregister),
198 
199 	DEVMETHOD(ifdi_get_counter,		enetc_get_counter),
200 	DEVMETHOD(ifdi_mtu_set,			enetc_mtu_set),
201 	DEVMETHOD(ifdi_multi_set,		enetc_setup_multicast),
202 	DEVMETHOD(ifdi_promisc_set,		enetc_promisc_set),
203 	DEVMETHOD(ifdi_timer,			enetc_timer),
204 	DEVMETHOD(ifdi_update_admin_status,	enetc_update_admin_status),
205 
206 	DEVMETHOD(ifdi_needs_restart,		enetc_if_needs_restart),
207 
208 	DEVMETHOD_END
209 };
210 
211 static driver_t enetc_iflib_driver = {
212 	"enetc", enetc_iflib_methods, sizeof(struct enetc_softc)
213 };
214 
215 static struct if_txrx enetc_txrx = {
216 	.ift_txd_encap = enetc_isc_txd_encap,
217 	.ift_txd_flush = enetc_isc_txd_flush,
218 	.ift_txd_credits_update = enetc_isc_txd_credits_update,
219 	.ift_rxd_available = enetc_isc_rxd_available,
220 	.ift_rxd_pkt_get = enetc_isc_rxd_pkt_get,
221 	.ift_rxd_refill = enetc_isc_rxd_refill,
222 	.ift_rxd_flush = enetc_isc_rxd_flush
223 };
224 
225 static struct if_shared_ctx enetc_sctx_init = {
226 	.isc_magic = IFLIB_MAGIC,
227 
228 	.isc_q_align = ENETC_RING_ALIGN,
229 
230 	.isc_tx_maxsize = ENETC_MAX_FRAME_LEN,
231 	.isc_tx_maxsegsize = PAGE_SIZE,
232 
233 	.isc_rx_maxsize = ENETC_MAX_FRAME_LEN,
234 	.isc_rx_maxsegsize = ENETC_MAX_FRAME_LEN,
235 	.isc_rx_nsegments = ENETC_MAX_SCATTER,
236 
237 	.isc_admin_intrcnt = 0,
238 
239 	.isc_nfl = 1,
240 	.isc_nrxqs = 1,
241 	.isc_ntxqs = 1,
242 
243 	.isc_vendor_info = enetc_vendor_info_array,
244 	.isc_driver_version = enetc_driver_version,
245 	.isc_driver = &enetc_iflib_driver,
246 
247 	.isc_flags = IFLIB_DRIVER_MEDIA | IFLIB_PRESERVE_TX_INDICES,
248 	.isc_ntxd_min = {ENETC_MIN_DESC},
249 	.isc_ntxd_max = {ENETC_MAX_DESC},
250 	.isc_ntxd_default = {ENETC_DEFAULT_DESC},
251 	.isc_nrxd_min = {ENETC_MIN_DESC},
252 	.isc_nrxd_max = {ENETC_MAX_DESC},
253 	.isc_nrxd_default = {ENETC_DEFAULT_DESC}
254 };
255 
256 static void*
257 enetc_register(device_t dev)
258 {
259 
260 	if (!ofw_bus_status_okay(dev))
261 		return (NULL);
262 
263 	return (&enetc_sctx_init);
264 }
265 
266 static void
267 enetc_max_nqueues(struct enetc_softc *sc, int *max_tx_nqueues,
268     int *max_rx_nqueues)
269 {
270 	uint32_t val;
271 
272 	val = ENETC_PORT_RD4(sc, ENETC_PCAPR0);
273 	*max_tx_nqueues = MIN(ENETC_PCAPR0_TXBDR(val), ENETC_MAX_QUEUES);
274 	*max_rx_nqueues = MIN(ENETC_PCAPR0_RXBDR(val), ENETC_MAX_QUEUES);
275 }
276 
277 static int
278 enetc_setup_fixed(struct enetc_softc *sc, phandle_t node)
279 {
280 	ssize_t size;
281 	int speed;
282 
283 	size = OF_getencprop(node, "speed", &speed, sizeof(speed));
284 	if (size <= 0) {
285 		device_printf(sc->dev,
286 		    "Device has fixed-link node without link speed specified\n");
287 		return (ENXIO);
288 	}
289 	switch (speed) {
290 	case 10:
291 		speed = IFM_10_T;
292 		break;
293 	case 100:
294 		speed = IFM_100_TX;
295 		break;
296 	case 1000:
297 		speed = IFM_1000_T;
298 		break;
299 	case 2500:
300 		speed = IFM_2500_T;
301 		break;
302 	default:
303 		device_printf(sc->dev, "Unsupported link speed value of %d\n",
304 		    speed);
305 		return (ENXIO);
306 	}
307 	speed |= IFM_ETHER;
308 
309 	if (OF_hasprop(node, "full-duplex"))
310 		speed |= IFM_FDX;
311 	else
312 		speed |= IFM_HDX;
313 
314 	sc->fixed_link = true;
315 
316 	ifmedia_init(&sc->fixed_ifmedia, 0, enetc_fixed_media_change,
317 	    enetc_fixed_media_status);
318 	ifmedia_add(&sc->fixed_ifmedia, speed, 0, NULL);
319 	ifmedia_set(&sc->fixed_ifmedia, speed);
320 	sc->shared->isc_media = &sc->fixed_ifmedia;
321 
322 	return (0);
323 }
324 
325 static int
326 enetc_setup_phy(struct enetc_softc *sc)
327 {
328 	phandle_t node, fixed_link, phy_handle;
329 	struct mii_data *miid;
330 	int phy_addr, error;
331 	ssize_t size;
332 
333 	node = ofw_bus_get_node(sc->dev);
334 	fixed_link = ofw_bus_find_child(node, "fixed-link");
335 	if (fixed_link != 0)
336 		return (enetc_setup_fixed(sc, fixed_link));
337 
338 	size = OF_getencprop(node, "phy-handle", &phy_handle, sizeof(phy_handle));
339 	if (size <= 0) {
340 		device_printf(sc->dev,
341 		    "Failed to acquire PHY handle from FDT.\n");
342 		return (ENXIO);
343 	}
344 	phy_handle = OF_node_from_xref(phy_handle);
345 	size = OF_getencprop(phy_handle, "reg", &phy_addr, sizeof(phy_addr));
346 	if (size <= 0) {
347 		device_printf(sc->dev, "Failed to obtain PHY address\n");
348 		return (ENXIO);
349 	}
350 	error = mii_attach(sc->dev, &sc->miibus, iflib_get_ifp(sc->ctx),
351 	    enetc_media_change, enetc_media_status,
352 	    BMSR_DEFCAPMASK, phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE);
353 	if (error != 0) {
354 		device_printf(sc->dev, "mii_attach failed\n");
355 		return (error);
356 	}
357 	miid = device_get_softc(sc->miibus);
358 	sc->shared->isc_media = &miid->mii_media;
359 
360 	return (0);
361 }
362 
363 static int
364 enetc_attach_pre(if_ctx_t ctx)
365 {
366 	if_softc_ctx_t scctx;
367 	struct enetc_softc *sc;
368 	int error, rid;
369 
370 	sc = iflib_get_softc(ctx);
371 	scctx = iflib_get_softc_ctx(ctx);
372 	sc->ctx = ctx;
373 	sc->dev = iflib_get_dev(ctx);
374 	sc->shared = scctx;
375 
376 	mtx_init(&sc->mii_lock, "enetc_mdio", NULL, MTX_DEF);
377 
378 	pci_save_state(sc->dev);
379 	pcie_flr(sc->dev, 1000, false);
380 	pci_restore_state(sc->dev);
381 
382 	rid = PCIR_BAR(ENETC_BAR_REGS);
383 	sc->regs = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
384 	if (sc->regs == NULL) {
385 		device_printf(sc->dev,
386 		    "Failed to allocate BAR %d\n", ENETC_BAR_REGS);
387 		return (ENXIO);
388 	}
389 
390 	error = iflib_dma_alloc_align(ctx,
391 	    ENETC_MIN_DESC * sizeof(struct enetc_cbd),
392 	    ENETC_RING_ALIGN,
393 	    &sc->ctrl_queue.dma,
394 	    0);
395 	if (error != 0) {
396 		device_printf(sc->dev, "Failed to allocate control ring\n");
397 		goto fail;
398 	}
399 	sc->ctrl_queue.ring = (struct enetc_cbd*)sc->ctrl_queue.dma.idi_vaddr;
400 
401 	scctx->isc_txrx = &enetc_txrx;
402 	scctx->isc_tx_nsegments = ENETC_MAX_SCATTER;
403 	enetc_max_nqueues(sc, &scctx->isc_nrxqsets_max, &scctx->isc_ntxqsets_max);
404 
405 	if (scctx->isc_ntxd[0] % ENETC_DESC_ALIGN != 0) {
406 		device_printf(sc->dev,
407 		    "The number of TX descriptors has to be a multiple of %d\n",
408 		    ENETC_DESC_ALIGN);
409 		error = EINVAL;
410 		goto fail;
411 	}
412 	if (scctx->isc_nrxd[0] % ENETC_DESC_ALIGN != 0) {
413 		device_printf(sc->dev,
414 		    "The number of RX descriptors has to be a multiple of %d\n",
415 		    ENETC_DESC_ALIGN);
416 		error = EINVAL;
417 		goto fail;
418 	}
419 	scctx->isc_txqsizes[0] = scctx->isc_ntxd[0] * sizeof(union enetc_tx_bd);
420 	scctx->isc_rxqsizes[0] = scctx->isc_nrxd[0] * sizeof(union enetc_rx_bd);
421 	scctx->isc_txd_size[0] = sizeof(union enetc_tx_bd);
422 	scctx->isc_rxd_size[0] = sizeof(union enetc_rx_bd);
423 	scctx->isc_tx_csum_flags = 0;
424 	scctx->isc_capabilities = scctx->isc_capenable = ENETC_IFCAPS;
425 
426 	error = enetc_mtu_set(ctx, ETHERMTU);
427 	if (error != 0)
428 		goto fail;
429 
430 	scctx->isc_msix_bar = pci_msix_table_bar(sc->dev);
431 
432 	error = enetc_setup_phy(sc);
433 	if (error != 0)
434 		goto fail;
435 
436 	enetc_get_hwaddr(sc);
437 
438 	return (0);
439 fail:
440 	enetc_detach(ctx);
441 	return (error);
442 }
443 
444 static int
445 enetc_attach_post(if_ctx_t ctx)
446 {
447 
448 	enetc_init_hw(iflib_get_softc(ctx));
449 	return (0);
450 }
451 
452 static int
453 enetc_detach(if_ctx_t ctx)
454 {
455 	struct enetc_softc *sc;
456 	int error = 0, i;
457 
458 	sc = iflib_get_softc(ctx);
459 
460 	for (i = 0; i < sc->rx_num_queues; i++)
461 		iflib_irq_free(ctx, &sc->rx_queues[i].irq);
462 
463 	if (sc->miibus != NULL)
464 		device_delete_child(sc->dev, sc->miibus);
465 
466 	if (sc->regs != NULL)
467 		error = bus_release_resource(sc->dev, SYS_RES_MEMORY,
468 		    rman_get_rid(sc->regs), sc->regs);
469 
470 	if (sc->ctrl_queue.dma.idi_size != 0)
471 		iflib_dma_free(&sc->ctrl_queue.dma);
472 
473 	mtx_destroy(&sc->mii_lock);
474 
475 	return (error);
476 }
477 
478 static int
479 enetc_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
480     int ntxqs, int ntxqsets)
481 {
482 	struct enetc_softc *sc;
483 	struct enetc_tx_queue *queue;
484 	int i;
485 
486 	sc = iflib_get_softc(ctx);
487 
488 	MPASS(ntxqs == 1);
489 
490 	sc->tx_queues = mallocarray(sc->tx_num_queues,
491 	    sizeof(struct enetc_tx_queue), M_DEVBUF, M_NOWAIT | M_ZERO);
492 	if (sc->tx_queues == NULL) {
493 		device_printf(sc->dev,
494 		    "Failed to allocate memory for TX queues.\n");
495 		return (ENOMEM);
496 	}
497 
498 	for (i = 0; i < sc->tx_num_queues; i++) {
499 		queue = &sc->tx_queues[i];
500 		queue->sc = sc;
501 		queue->ring = (union enetc_tx_bd*)(vaddrs[i]);
502 		queue->ring_paddr = paddrs[i];
503 		queue->cidx = 0;
504 	}
505 
506 	return (0);
507 }
508 
509 static int
510 enetc_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
511     int nrxqs, int nrxqsets)
512 {
513 	struct enetc_softc *sc;
514 	struct enetc_rx_queue *queue;
515 	int i;
516 
517 	sc = iflib_get_softc(ctx);
518 	MPASS(nrxqs == 1);
519 
520 	sc->rx_queues = mallocarray(sc->rx_num_queues,
521 	    sizeof(struct enetc_rx_queue), M_DEVBUF, M_NOWAIT | M_ZERO);
522 	if (sc->rx_queues == NULL) {
523 		device_printf(sc->dev,
524 		    "Failed to allocate memory for RX queues.\n");
525 		return (ENOMEM);
526 	}
527 
528 	for (i = 0; i < sc->rx_num_queues; i++) {
529 		queue = &sc->rx_queues[i];
530 		queue->sc = sc;
531 		queue->qid = i;
532 		queue->ring = (union enetc_rx_bd*)(vaddrs[i]);
533 		queue->ring_paddr = paddrs[i];
534 	}
535 
536 	return (0);
537 }
538 
539 static void
540 enetc_queues_free(if_ctx_t ctx)
541 {
542 	struct enetc_softc *sc;
543 
544 	sc = iflib_get_softc(ctx);
545 
546 	if (sc->tx_queues != NULL) {
547 		free(sc->tx_queues, M_DEVBUF);
548 		sc->tx_queues = NULL;
549 	}
550 	if (sc->rx_queues != NULL) {
551 		free(sc->rx_queues, M_DEVBUF);
552 		sc->rx_queues = NULL;
553 	}
554 }
555 
556 static void
557 enetc_get_hwaddr(struct enetc_softc *sc)
558 {
559 	struct ether_addr hwaddr;
560 	uint16_t high;
561 	uint32_t low;
562 
563 	low = ENETC_PORT_RD4(sc, ENETC_PSIPMAR0(0));
564 	high = ENETC_PORT_RD2(sc, ENETC_PSIPMAR1(0));
565 
566 	memcpy(&hwaddr.octet[0], &low, 4);
567 	memcpy(&hwaddr.octet[4], &high, 2);
568 
569 	if (ETHER_IS_BROADCAST(hwaddr.octet) ||
570 	    ETHER_IS_MULTICAST(hwaddr.octet) ||
571 	    ETHER_IS_ZERO(hwaddr.octet)) {
572 		ether_gen_addr(iflib_get_ifp(sc->ctx), &hwaddr);
573 		device_printf(sc->dev,
574 		    "Failed to obtain MAC address, using a random one\n");
575 		memcpy(&low, &hwaddr.octet[0], 4);
576 		memcpy(&high, &hwaddr.octet[4], 2);
577 	}
578 
579 	iflib_set_mac(sc->ctx, hwaddr.octet);
580 }
581 
582 static void
583 enetc_set_hwaddr(struct enetc_softc *sc)
584 {
585 	if_t ifp;
586 	uint16_t high;
587 	uint32_t low;
588 	uint8_t *hwaddr;
589 
590 	ifp = iflib_get_ifp(sc->ctx);
591 	hwaddr = (uint8_t*)if_getlladdr(ifp);
592 	low = *((uint32_t*)hwaddr);
593 	high = *((uint16_t*)(hwaddr+4));
594 
595 	ENETC_PORT_WR4(sc, ENETC_PSIPMAR0(0), low);
596 	ENETC_PORT_WR2(sc, ENETC_PSIPMAR1(0), high);
597 }
598 
599 static int
600 enetc_setup_rss(struct enetc_softc *sc)
601 {
602 	struct iflib_dma_info dma;
603 	int error, i, buckets_num = 0;
604 	uint8_t *rss_table;
605 	uint32_t reg;
606 
607 	reg = ENETC_RD4(sc, ENETC_SIPCAPR0);
608 	if (reg & ENETC_SIPCAPR0_RSS) {
609 		reg = ENETC_RD4(sc, ENETC_SIRSSCAPR);
610 		buckets_num = ENETC_SIRSSCAPR_GET_NUM_RSS(reg);
611         }
612 	if (buckets_num == 0)
613 		return (ENOTSUP);
614 
615 	for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / sizeof(uint32_t); i++) {
616 		arc4rand((uint8_t *)&reg, sizeof(reg), 0);
617 		ENETC_PORT_WR4(sc, ENETC_PRSSK(i), reg);
618 	}
619 
620 	ENETC_WR4(sc, ENETC_SIRBGCR, sc->rx_num_queues);
621 
622 	error = iflib_dma_alloc_align(sc->ctx,
623 	    buckets_num * sizeof(*rss_table),
624 	    ENETC_RING_ALIGN,
625 	    &dma,
626 	    0);
627 	if (error != 0) {
628 		device_printf(sc->dev, "Failed to allocate DMA buffer for RSS\n");
629 		return (error);
630 	}
631 	rss_table = (uint8_t *)dma.idi_vaddr;
632 
633 	for (i = 0; i < buckets_num; i++)
634 		rss_table[i] = i % sc->rx_num_queues;
635 
636 	error = enetc_ctrl_send(sc, (BDCR_CMD_RSS << 8) | BDCR_CMD_RSS_WRITE,
637 	    buckets_num * sizeof(*rss_table), &dma);
638 	if (error != 0)
639 		device_printf(sc->dev, "Failed to setup RSS table\n");
640 
641 	iflib_dma_free(&dma);
642 
643 	return (error);
644 }
645 
646 static int
647 enetc_ctrl_send(struct enetc_softc *sc, uint16_t cmd, uint16_t size,
648     iflib_dma_info_t dma)
649 {
650 	struct enetc_ctrl_queue *queue;
651 	struct enetc_cbd *desc;
652 	int timeout = 1000;
653 
654 	queue = &sc->ctrl_queue;
655 	desc = &queue->ring[queue->pidx];
656 
657 	if (++queue->pidx == ENETC_MIN_DESC)
658 		queue->pidx = 0;
659 
660 	desc->addr[0] = (uint32_t)dma->idi_paddr;
661 	desc->addr[1] = (uint32_t)(dma->idi_paddr >> 32);
662 	desc->index = 0;
663 	desc->length = (uint16_t)size;
664 	desc->cmd = (uint8_t)cmd;
665 	desc->cls = (uint8_t)(cmd >> 8);
666 	desc->status_flags = 0;
667 
668 	/* Sync command packet, */
669 	bus_dmamap_sync(dma->idi_tag, dma->idi_map, BUS_DMASYNC_PREWRITE);
670 	/* and the control ring. */
671 	bus_dmamap_sync(queue->dma.idi_tag, queue->dma.idi_map, BUS_DMASYNC_PREWRITE);
672 	ENETC_WR4(sc, ENETC_SICBDRPIR, queue->pidx);
673 
674 	while (--timeout != 0) {
675 		DELAY(20);
676 		if (ENETC_RD4(sc, ENETC_SICBDRCIR) == queue->pidx)
677 			break;
678 	}
679 
680 	if (timeout == 0)
681 		return (ETIMEDOUT);
682 
683 	bus_dmamap_sync(dma->idi_tag, dma->idi_map, BUS_DMASYNC_POSTREAD);
684 	return (0);
685 }
686 
687 static void
688 enetc_init_hw(struct enetc_softc *sc)
689 {
690 	uint32_t val;
691 	int error;
692 
693 	ENETC_PORT_WR4(sc, ENETC_PM0_CMD_CFG,
694 	    ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC |
695 	    ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
696 	ENETC_PORT_WR4(sc, ENETC_PM0_RX_FIFO, ENETC_PM0_RX_FIFO_VAL);
697 	val = ENETC_PSICFGR0_SET_TXBDR(sc->tx_num_queues);
698 	val |= ENETC_PSICFGR0_SET_RXBDR(sc->rx_num_queues);
699 	val |= ENETC_PSICFGR0_SIVC(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S);
700 	ENETC_PORT_WR4(sc, ENETC_PSICFGR0(0), val);
701 	ENETC_PORT_WR4(sc, ENETC_PSIPVMR, ENETC_PSIPVMR_SET_VUTA(1));
702 	ENETC_PORT_WR4(sc, ENETC_PVCLCTR,  ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S);
703 	ENETC_PORT_WR4(sc, ENETC_PSIVLANFMR, ENETC_PSIVLANFMR_VS);
704 	ENETC_PORT_WR4(sc, ENETC_PAR_PORT_CFG, ENETC_PAR_PORT_L4CD);
705 	ENETC_PORT_WR4(sc, ENETC_PMR, ENETC_PMR_SI0EN | ENETC_PMR_PSPEED_1000M);
706 
707 	ENETC_WR4(sc, ENETC_SICAR0,
708 	    ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
709 	ENETC_WR4(sc, ENETC_SICAR1, ENETC_SICAR_MSI);
710 	ENETC_WR4(sc, ENETC_SICAR2,
711 	    ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
712 
713 	enetc_init_ctrl(sc);
714 	error = enetc_setup_rss(sc);
715 	if (error != 0)
716 		ENETC_WR4(sc, ENETC_SIMR, ENETC_SIMR_EN);
717 	else
718 		ENETC_WR4(sc, ENETC_SIMR, ENETC_SIMR_EN | ENETC_SIMR_RSSE);
719 
720 }
721 
722 static void
723 enetc_init_ctrl(struct enetc_softc *sc)
724 {
725 	struct enetc_ctrl_queue *queue = &sc->ctrl_queue;
726 
727 	ENETC_WR4(sc, ENETC_SICBDRBAR0,
728 	    (uint32_t)queue->dma.idi_paddr);
729 	ENETC_WR4(sc, ENETC_SICBDRBAR1,
730 	    (uint32_t)(queue->dma.idi_paddr >> 32));
731 	ENETC_WR4(sc, ENETC_SICBDRLENR,
732 	    queue->dma.idi_size / sizeof(struct enetc_cbd));
733 
734 	queue->pidx = 0;
735 	ENETC_WR4(sc, ENETC_SICBDRPIR, queue->pidx);
736 	ENETC_WR4(sc, ENETC_SICBDRCIR, queue->pidx);
737 	ENETC_WR4(sc, ENETC_SICBDRMR, ENETC_SICBDRMR_EN);
738 }
739 
740 static void
741 enetc_init_tx(struct enetc_softc *sc)
742 {
743 	struct enetc_tx_queue *queue;
744 	int i;
745 
746 	for (i = 0; i < sc->tx_num_queues; i++) {
747 		queue = &sc->tx_queues[i];
748 
749 		ENETC_TXQ_WR4(sc, i, ENETC_TBBAR0,
750 		    (uint32_t)queue->ring_paddr);
751 		ENETC_TXQ_WR4(sc, i, ENETC_TBBAR1,
752 		    (uint32_t)(queue->ring_paddr >> 32));
753 		ENETC_TXQ_WR4(sc, i, ENETC_TBLENR, sc->tx_queue_size);
754 
755 		/*
756 		 * Even though it is undoccumented resetting the TX ring
757 		 * indices results in TX hang.
758 		 * Do the same as Linux and simply keep those unchanged
759 		 * for the drivers lifetime.
760 		 */
761 #if 0
762 		ENETC_TXQ_WR4(sc, i, ENETC_TBPIR, 0);
763 		ENETC_TXQ_WR4(sc, i, ENETC_TBCIR, 0);
764 #endif
765 		ENETC_TXQ_WR4(sc, i, ENETC_TBMR, ENETC_TBMR_EN);
766 	}
767 
768 }
769 
770 static void
771 enetc_init_rx(struct enetc_softc *sc)
772 {
773 	struct enetc_rx_queue *queue;
774 	uint32_t rx_buf_size;
775 	int i;
776 
777 	rx_buf_size = iflib_get_rx_mbuf_sz(sc->ctx);
778 
779 	for (i = 0; i < sc->rx_num_queues; i++) {
780 		queue = &sc->rx_queues[i];
781 
782 		ENETC_RXQ_WR4(sc, i, ENETC_RBBAR0,
783 		    (uint32_t)queue->ring_paddr);
784 		ENETC_RXQ_WR4(sc, i, ENETC_RBBAR1,
785 		    (uint32_t)(queue->ring_paddr >> 32));
786 		ENETC_RXQ_WR4(sc, i, ENETC_RBLENR, sc->rx_queue_size);
787 		ENETC_RXQ_WR4(sc, i, ENETC_RBBSR, rx_buf_size);
788 		ENETC_RXQ_WR4(sc, i, ENETC_RBPIR, 0);
789 		ENETC_RXQ_WR4(sc, i, ENETC_RBCIR, 0);
790 		queue->enabled = false;
791 	}
792 }
793 
794 static u_int
795 enetc_hash_mac(void *arg, struct sockaddr_dl *sdl, u_int cnt)
796 {
797 	uint64_t *bitmap = arg;
798 	uint64_t address = 0;
799 	uint8_t hash = 0;
800 	bool bit;
801 	int i, j;
802 
803 	bcopy(LLADDR(sdl), &address, ETHER_ADDR_LEN);
804 
805 	/*
806 	 * The six bit hash is calculated by xoring every
807 	 * 6th bit of the address.
808 	 * It is then used as an index in a bitmap that is
809 	 * written to the device.
810 	 */
811 	for (i = 0; i < 6; i++) {
812 		bit = 0;
813 		for (j = 0; j < 8; j++)
814 			bit ^= !!(address & BIT(i + j*6));
815 
816 		hash |= bit << i;
817 	}
818 
819 	*bitmap |= (1 << hash);
820 	return (1);
821 }
822 
823 static void
824 enetc_setup_multicast(if_ctx_t ctx)
825 {
826 	struct enetc_softc *sc;
827 	if_t ifp;
828 	uint64_t bitmap = 0;
829 	uint8_t revid;
830 
831 	sc = iflib_get_softc(ctx);
832 	ifp = iflib_get_ifp(ctx);
833 	revid = pci_get_revid(sc->dev);
834 
835 	if_foreach_llmaddr(ifp, enetc_hash_mac, &bitmap);
836 
837 	/*
838 	 * In revid 1 of this chip the positions multicast and unicast
839 	 * hash filter registers are flipped.
840 	 */
841 	ENETC_PORT_WR4(sc, ENETC_PSIMMHFR0(0, revid == 1), bitmap & UINT32_MAX);
842 	ENETC_PORT_WR4(sc, ENETC_PSIMMHFR1(0), bitmap >> 32);
843 
844 }
845 
846 static uint8_t
847 enetc_hash_vid(uint16_t vid)
848 {
849 	uint8_t hash = 0;
850 	bool bit;
851 	int i;
852 
853 	for (i = 0;i < 6;i++) {
854 		bit = vid & BIT(i);
855 		bit ^= !!(vid & BIT(i + 6));
856 		hash |= bit << i;
857 	}
858 
859 	return (hash);
860 }
861 
862 static void
863 enetc_vlan_register(if_ctx_t ctx, uint16_t vid)
864 {
865 	struct enetc_softc *sc;
866 	uint8_t hash;
867 	uint64_t bitmap;
868 
869 	sc = iflib_get_softc(ctx);
870 	hash = enetc_hash_vid(vid);
871 
872 	/* Check if hash is already present in the bitmap. */
873 	if (++sc->vlan_bitmap[hash] != 1)
874 		return;
875 
876 	bitmap = ENETC_PORT_RD4(sc, ENETC_PSIVHFR0(0));
877 	bitmap |= (uint64_t)ENETC_PORT_RD4(sc, ENETC_PSIVHFR1(0)) << 32;
878 	bitmap |= BIT(hash);
879 	ENETC_PORT_WR4(sc, ENETC_PSIVHFR0(0), bitmap & UINT32_MAX);
880 	ENETC_PORT_WR4(sc, ENETC_PSIVHFR1(0), bitmap >> 32);
881 }
882 
883 static void
884 enetc_vlan_unregister(if_ctx_t ctx, uint16_t vid)
885 {
886 	struct enetc_softc *sc;
887 	uint8_t hash;
888 	uint64_t bitmap;
889 
890 	sc = iflib_get_softc(ctx);
891 	hash = enetc_hash_vid(vid);
892 
893 	MPASS(sc->vlan_bitmap[hash] > 0);
894 	if (--sc->vlan_bitmap[hash] != 0)
895 		return;
896 
897 	bitmap = ENETC_PORT_RD4(sc, ENETC_PSIVHFR0(0));
898 	bitmap |= (uint64_t)ENETC_PORT_RD4(sc, ENETC_PSIVHFR1(0)) << 32;
899 	bitmap &= ~BIT(hash);
900 	ENETC_PORT_WR4(sc, ENETC_PSIVHFR0(0), bitmap & UINT32_MAX);
901 	ENETC_PORT_WR4(sc, ENETC_PSIVHFR1(0), bitmap >> 32);
902 }
903 
904 static void
905 enetc_init(if_ctx_t ctx)
906 {
907 	struct enetc_softc *sc;
908 	struct mii_data *miid;
909 	if_t ifp;
910 	uint16_t max_frame_length;
911 	int baudrate;
912 
913 	sc = iflib_get_softc(ctx);
914 	ifp = iflib_get_ifp(ctx);
915 
916 	max_frame_length = sc->shared->isc_max_frame_size;
917 	MPASS(max_frame_length < ENETC_MAX_FRAME_LEN);
918 
919 	/* Set max RX and TX frame lengths. */
920 	ENETC_PORT_WR4(sc, ENETC_PM0_MAXFRM, max_frame_length);
921 	ENETC_PORT_WR4(sc, ENETC_PTCMSDUR(0), max_frame_length);
922 	ENETC_PORT_WR4(sc, ENETC_PTXMBAR, 2 * max_frame_length);
923 
924 	/* Set "VLAN promiscious" mode if filtering is disabled. */
925 	if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0)
926 		ENETC_PORT_WR4(sc, ENETC_PSIPVMR,
927 		    ENETC_PSIPVMR_SET_VUTA(1) | ENETC_PSIPVMR_SET_VP(1));
928 	else
929 		ENETC_PORT_WR4(sc, ENETC_PSIPVMR,
930 		    ENETC_PSIPVMR_SET_VUTA(1));
931 
932 	sc->rbmr = ENETC_RBMR_EN;
933 
934 	if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING)
935 		sc->rbmr |= ENETC_RBMR_VTE;
936 
937 	/* Write MAC address to hardware. */
938 	enetc_set_hwaddr(sc);
939 
940 	enetc_init_tx(sc);
941 	enetc_init_rx(sc);
942 
943 	if (sc->fixed_link) {
944 		baudrate = ifmedia_baudrate(sc->fixed_ifmedia.ifm_cur->ifm_media);
945 		iflib_link_state_change(sc->ctx, LINK_STATE_UP, baudrate);
946 	} else {
947 		/*
948 		 * Can't return an error from this function, there is not much
949 		 * we can do if this fails.
950 		 */
951 		miid = device_get_softc(sc->miibus);
952 		(void)mii_mediachg(miid);
953 	}
954 
955 	enetc_promisc_set(ctx, if_getflags(ifp));
956 }
957 
958 static void
959 enetc_disable_txq(struct enetc_softc *sc, int qid)
960 {
961 	qidx_t cidx, pidx;
962 	int timeout = 10000;	/* this * DELAY(100) = 1s */
963 
964 	/* At this point iflib shouldn't be enquing any more frames. */
965 	pidx = ENETC_TXQ_RD4(sc, qid, ENETC_TBPIR);
966 	cidx = ENETC_TXQ_RD4(sc, qid, ENETC_TBCIR);
967 
968 	while (pidx != cidx && timeout--) {
969 		DELAY(100);
970 		cidx = ENETC_TXQ_RD4(sc, qid, ENETC_TBCIR);
971 	}
972 
973 	if (timeout == 0)
974 		device_printf(sc->dev,
975 		    "Timeout while waiting for txq%d to stop transmitting packets\n",
976 		    qid);
977 
978 	ENETC_TXQ_WR4(sc, qid, ENETC_TBMR, 0);
979 }
980 
981 static void
982 enetc_stop(if_ctx_t ctx)
983 {
984 	struct enetc_softc *sc;
985 	int i;
986 
987 	sc = iflib_get_softc(ctx);
988 
989 	for (i = 0; i < sc->rx_num_queues; i++)
990 		ENETC_RXQ_WR4(sc, i, ENETC_RBMR, 0);
991 
992 	for (i = 0; i < sc->tx_num_queues; i++)
993 		enetc_disable_txq(sc, i);
994 }
995 
996 static int
997 enetc_msix_intr_assign(if_ctx_t ctx, int msix)
998 {
999 	struct enetc_softc *sc;
1000 	struct enetc_rx_queue *rx_queue;
1001 	struct enetc_tx_queue *tx_queue;
1002 	int vector = 0, i, error;
1003 	char irq_name[16];
1004 
1005 	sc = iflib_get_softc(ctx);
1006 
1007 	MPASS(sc->rx_num_queues + 1 <= ENETC_MSIX_COUNT);
1008 	MPASS(sc->rx_num_queues == sc->tx_num_queues);
1009 
1010 	for (i = 0; i < sc->rx_num_queues; i++, vector++) {
1011 		rx_queue = &sc->rx_queues[i];
1012 		snprintf(irq_name, sizeof(irq_name), "rxtxq%d", i);
1013 		error = iflib_irq_alloc_generic(ctx,
1014 		    &rx_queue->irq, vector + 1, IFLIB_INTR_RXTX,
1015 		    NULL, rx_queue, i, irq_name);
1016 		if (error != 0)
1017 			goto fail;
1018 
1019 		ENETC_WR4(sc, ENETC_SIMSIRRV(i), vector);
1020 		ENETC_RXQ_WR4(sc, i, ENETC_RBICR1, ENETC_RX_INTR_TIME_THR);
1021 		ENETC_RXQ_WR4(sc, i, ENETC_RBICR0,
1022 		    ENETC_RBICR0_ICEN | ENETC_RBICR0_SET_ICPT(ENETC_RX_INTR_PKT_THR));
1023 	}
1024 	vector = 0;
1025 	for (i = 0;i < sc->tx_num_queues; i++, vector++) {
1026 		tx_queue = &sc->tx_queues[i];
1027 		snprintf(irq_name, sizeof(irq_name), "txq%d", i);
1028 		iflib_softirq_alloc_generic(ctx, &tx_queue->irq,
1029 		    IFLIB_INTR_TX, tx_queue, i, irq_name);
1030 
1031 		ENETC_WR4(sc, ENETC_SIMSITRV(i), vector);
1032 	}
1033 
1034 	return (0);
1035 fail:
1036 	for (i = 0; i < sc->rx_num_queues; i++) {
1037 		rx_queue = &sc->rx_queues[i];
1038 		iflib_irq_free(ctx, &rx_queue->irq);
1039 	}
1040 	return (error);
1041 }
1042 
1043 static int
1044 enetc_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
1045 {
1046 	struct enetc_softc *sc;
1047 
1048 	sc = iflib_get_softc(ctx);
1049 	ENETC_TXQ_RD4(sc, qid, ENETC_TBIDR);
1050 	return (0);
1051 }
1052 
1053 static int
1054 enetc_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
1055 {
1056 	struct enetc_softc *sc;
1057 
1058 	sc = iflib_get_softc(ctx);
1059 	ENETC_RXQ_RD4(sc, qid, ENETC_RBIDR);
1060 	return (0);
1061 }
1062 static void
1063 enetc_intr_enable(if_ctx_t ctx)
1064 {
1065 	struct enetc_softc *sc;
1066 	int i;
1067 
1068 	sc = iflib_get_softc(ctx);
1069 
1070 	for (i = 0; i < sc->rx_num_queues; i++)
1071 		ENETC_RXQ_WR4(sc, i, ENETC_RBIER, ENETC_RBIER_RXTIE);
1072 
1073 	for (i = 0; i < sc->tx_num_queues; i++)
1074 		ENETC_TXQ_WR4(sc, i, ENETC_TBIER, ENETC_TBIER_TXF);
1075 }
1076 
1077 static void
1078 enetc_intr_disable(if_ctx_t ctx)
1079 {
1080 	struct enetc_softc *sc;
1081 	int i;
1082 
1083 	sc = iflib_get_softc(ctx);
1084 
1085 	for (i = 0; i < sc->rx_num_queues; i++)
1086 		ENETC_RXQ_WR4(sc, i, ENETC_RBIER, 0);
1087 
1088 	for (i = 0; i < sc->tx_num_queues; i++)
1089 		ENETC_TXQ_WR4(sc, i, ENETC_TBIER, 0);
1090 }
1091 
1092 static int
1093 enetc_isc_txd_encap(void *data, if_pkt_info_t ipi)
1094 {
1095 	struct enetc_softc *sc = data;
1096 	struct enetc_tx_queue *queue;
1097 	union enetc_tx_bd *desc;
1098 	bus_dma_segment_t *segs;
1099 	qidx_t pidx, queue_len;
1100 	qidx_t i = 0;
1101 
1102 	queue = &sc->tx_queues[ipi->ipi_qsidx];
1103 	segs = ipi->ipi_segs;
1104 	pidx = ipi->ipi_pidx;
1105 	queue_len = sc->tx_queue_size;
1106 
1107 	/*
1108 	 * First descriptor is special. We use it to set frame
1109 	 * related information and offloads, e.g. VLAN tag.
1110 	 */
1111 	desc = &queue->ring[pidx];
1112 	bzero(desc, sizeof(*desc));
1113 	desc->frm_len = ipi->ipi_len;
1114 	desc->addr = segs[i].ds_addr;
1115 	desc->buf_len = segs[i].ds_len;
1116 	if (ipi->ipi_flags & IPI_TX_INTR)
1117 		desc->flags = ENETC_TXBD_FLAGS_FI;
1118 
1119 	i++;
1120 	if (++pidx == queue_len)
1121 		pidx = 0;
1122 
1123 	if (ipi->ipi_mflags & M_VLANTAG) {
1124 		/* VLAN tag is inserted in a separate descriptor. */
1125 		desc->flags |= ENETC_TXBD_FLAGS_EX;
1126 		desc = &queue->ring[pidx];
1127 		bzero(desc, sizeof(*desc));
1128 		desc->ext.vid = ipi->ipi_vtag;
1129 		desc->ext.e_flags = ENETC_TXBD_E_FLAGS_VLAN_INS;
1130 		if (++pidx == queue_len)
1131 			pidx = 0;
1132 	}
1133 
1134 	/* Now add remaining descriptors. */
1135 	for (;i < ipi->ipi_nsegs; i++) {
1136 		desc = &queue->ring[pidx];
1137 		bzero(desc, sizeof(*desc));
1138 		desc->addr = segs[i].ds_addr;
1139 		desc->buf_len = segs[i].ds_len;
1140 
1141 		if (++pidx == queue_len)
1142 			pidx = 0;
1143 	}
1144 
1145 	desc->flags |= ENETC_TXBD_FLAGS_F;
1146 	ipi->ipi_new_pidx = pidx;
1147 
1148 	return (0);
1149 }
1150 
1151 static void
1152 enetc_isc_txd_flush(void *data, uint16_t qid, qidx_t pidx)
1153 {
1154 	struct enetc_softc *sc = data;
1155 
1156 	ENETC_TXQ_WR4(sc, qid, ENETC_TBPIR, pidx);
1157 }
1158 
1159 static int
1160 enetc_isc_txd_credits_update(void *data, uint16_t qid, bool clear)
1161 {
1162 	struct enetc_softc *sc = data;
1163 	struct enetc_tx_queue *queue;
1164 	int cidx, hw_cidx, count;
1165 
1166 	queue = &sc->tx_queues[qid];
1167 	hw_cidx = ENETC_TXQ_RD4(sc, qid, ENETC_TBCIR) & ENETC_TBCIR_IDX_MASK;
1168 	cidx = queue->cidx;
1169 
1170 	/*
1171 	 * RM states that the ring can hold at most ring_size - 1 descriptors.
1172 	 * Thanks to that we can assume that the ring is empty if cidx == pidx.
1173 	 * This requirement is guaranteed implicitly by iflib as it will only
1174 	 * encap a new frame if we have at least nfrags + 2 descriptors available
1175 	 * on the ring. This driver uses at most one additional descriptor for
1176 	 * VLAN tag insertion.
1177 	 * Also RM states that the TBCIR register is only updated once all
1178 	 * descriptors in the chain have been processed.
1179 	 */
1180 	if (cidx == hw_cidx)
1181 		return (0);
1182 
1183 	if (!clear)
1184 		return (1);
1185 
1186 	count = hw_cidx - cidx;
1187 	if (count < 0)
1188 		count += sc->tx_queue_size;
1189 
1190 	queue->cidx = hw_cidx;
1191 
1192 	return (count);
1193 }
1194 
1195 static int
1196 enetc_isc_rxd_available(void *data, uint16_t qid, qidx_t pidx, qidx_t budget)
1197 {
1198 	struct enetc_softc *sc = data;
1199 	struct enetc_rx_queue *queue;
1200 	qidx_t hw_pidx, queue_len;
1201 	union enetc_rx_bd *desc;
1202 	int count = 0;
1203 
1204 	queue = &sc->rx_queues[qid];
1205 	desc = &queue->ring[pidx];
1206 	queue_len = sc->rx_queue_size;
1207 
1208 	if (desc->r.lstatus == 0)
1209 		return (0);
1210 
1211 	if (budget == 1)
1212 		return (1);
1213 
1214 	hw_pidx = ENETC_RXQ_RD4(sc, qid, ENETC_RBPIR);
1215 	while (pidx != hw_pidx && count < budget) {
1216 		desc = &queue->ring[pidx];
1217 		if (desc->r.lstatus & ENETC_RXBD_LSTATUS_F)
1218 			count++;
1219 
1220 		if (++pidx == queue_len)
1221 			pidx = 0;
1222 	}
1223 
1224 	return (count);
1225 }
1226 
1227 static int
1228 enetc_isc_rxd_pkt_get(void *data, if_rxd_info_t ri)
1229 {
1230 	struct enetc_softc *sc = data;
1231 	struct enetc_rx_queue *queue;
1232 	union enetc_rx_bd *desc;
1233 	uint16_t buf_len, pkt_size = 0;
1234 	qidx_t cidx, queue_len;
1235 	uint32_t status;
1236 	int i;
1237 
1238 	cidx = ri->iri_cidx;
1239 	queue = &sc->rx_queues[ri->iri_qsidx];
1240 	desc = &queue->ring[cidx];
1241 	status = desc->r.lstatus;
1242 	queue_len = sc->rx_queue_size;
1243 
1244 	/*
1245 	 * Ready bit will be set only when all descriptors
1246 	 * in the chain have been processed.
1247 	 */
1248 	if ((status & ENETC_RXBD_LSTATUS_R) == 0)
1249 		return (EAGAIN);
1250 
1251 	/* Pass RSS hash. */
1252 	if (status & ENETC_RXBD_FLAG_RSSV) {
1253 		ri->iri_flowid = desc->r.rss_hash;
1254 		ri->iri_rsstype = M_HASHTYPE_OPAQUE_HASH;
1255 	}
1256 
1257 	/* Pass IP checksum status. */
1258 	ri->iri_csum_flags = CSUM_IP_CHECKED;
1259 	if ((desc->r.parse_summary & ENETC_RXBD_PARSER_ERROR) == 0)
1260 		ri->iri_csum_flags |= CSUM_IP_VALID;
1261 
1262 	/* Pass extracted VLAN tag. */
1263 	if (status & ENETC_RXBD_FLAG_VLAN) {
1264 		ri->iri_vtag = desc->r.vlan_opt;
1265 		ri->iri_flags = M_VLANTAG;
1266 	}
1267 
1268 	for (i = 0; i < ENETC_MAX_SCATTER; i++) {
1269 		buf_len = desc->r.buf_len;
1270 		ri->iri_frags[i].irf_idx = cidx;
1271 		ri->iri_frags[i].irf_len = buf_len;
1272 		pkt_size += buf_len;
1273 		if (desc->r.lstatus & ENETC_RXBD_LSTATUS_F)
1274 			break;
1275 
1276 		if (++cidx == queue_len)
1277 			cidx = 0;
1278 
1279 		desc = &queue->ring[cidx];
1280 	}
1281 	ri->iri_nfrags = i + 1;
1282 	ri->iri_len = pkt_size;
1283 
1284 	MPASS(desc->r.lstatus & ENETC_RXBD_LSTATUS_F);
1285 	if (status & ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))
1286 		return (EBADMSG);
1287 
1288 	return (0);
1289 }
1290 
1291 static void
1292 enetc_isc_rxd_refill(void *data, if_rxd_update_t iru)
1293 {
1294 	struct enetc_softc *sc = data;
1295 	struct enetc_rx_queue *queue;
1296 	union enetc_rx_bd *desc;
1297 	qidx_t pidx, queue_len;
1298 	uint64_t *paddrs;
1299 	int i, count;
1300 
1301 	queue = &sc->rx_queues[iru->iru_qsidx];
1302 	paddrs = iru->iru_paddrs;
1303 	pidx = iru->iru_pidx;
1304 	count = iru->iru_count;
1305 	queue_len = sc->rx_queue_size;
1306 
1307 	for (i = 0; i < count; i++) {
1308 		desc = &queue->ring[pidx];
1309 		bzero(desc, sizeof(*desc));
1310 
1311 		desc->w.addr = paddrs[i];
1312 		if (++pidx == queue_len)
1313 			pidx = 0;
1314 	}
1315 	/*
1316 	 * After enabling the queue NIC will prefetch the first
1317 	 * 8 descriptors. It probably assumes that the RX is fully
1318 	 * refilled when cidx == pidx.
1319 	 * Enable it only if we have enough descriptors ready on the ring.
1320 	 */
1321 	if (!queue->enabled && pidx >= 8) {
1322 		ENETC_RXQ_WR4(sc, iru->iru_qsidx, ENETC_RBMR, sc->rbmr);
1323 		queue->enabled = true;
1324 	}
1325 }
1326 
1327 static void
1328 enetc_isc_rxd_flush(void *data, uint16_t qid, uint8_t flid, qidx_t pidx)
1329 {
1330 	struct enetc_softc *sc = data;
1331 
1332 	ENETC_RXQ_WR4(sc, qid, ENETC_RBCIR, pidx);
1333 }
1334 
1335 static uint64_t
1336 enetc_get_counter(if_ctx_t ctx, ift_counter cnt)
1337 {
1338 	struct enetc_softc *sc;
1339 	if_t ifp;
1340 
1341 	sc = iflib_get_softc(ctx);
1342 	ifp = iflib_get_ifp(ctx);
1343 
1344 	switch (cnt) {
1345 	case IFCOUNTER_IERRORS:
1346 		return (ENETC_PORT_RD8(sc, ENETC_PM0_RERR));
1347 	case IFCOUNTER_OERRORS:
1348 		return (ENETC_PORT_RD8(sc, ENETC_PM0_TERR));
1349 	default:
1350 		return (if_get_counter_default(ifp, cnt));
1351 	}
1352 }
1353 
1354 static int
1355 enetc_mtu_set(if_ctx_t ctx, uint32_t mtu)
1356 {
1357 	struct enetc_softc *sc = iflib_get_softc(ctx);
1358 	uint32_t max_frame_size;
1359 
1360 	max_frame_size = mtu +
1361 	    ETHER_HDR_LEN +
1362 	    ETHER_CRC_LEN +
1363 	    sizeof(struct ether_vlan_header);
1364 
1365 	if (max_frame_size > ENETC_MAX_FRAME_LEN)
1366 		return (EINVAL);
1367 
1368 	sc->shared->isc_max_frame_size = max_frame_size;
1369 
1370 	return (0);
1371 }
1372 
1373 static int
1374 enetc_promisc_set(if_ctx_t ctx, int flags)
1375 {
1376 	struct enetc_softc *sc;
1377 	uint32_t reg = 0;
1378 
1379 	sc = iflib_get_softc(ctx);
1380 
1381 	if (flags & IFF_PROMISC)
1382 		reg = ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0);
1383 	else if (flags & IFF_ALLMULTI)
1384 		reg = ENETC_PSIPMR_SET_MP(0);
1385 
1386 	ENETC_PORT_WR4(sc, ENETC_PSIPMR, reg);
1387 
1388 	return (0);
1389 }
1390 
1391 static void
1392 enetc_timer(if_ctx_t ctx, uint16_t qid)
1393 {
1394 	/*
1395 	 * Poll PHY status. Do this only for qid 0 to save
1396 	 * some cycles.
1397 	 */
1398 	if (qid == 0)
1399 		iflib_admin_intr_deferred(ctx);
1400 }
1401 
1402 static void
1403 enetc_update_admin_status(if_ctx_t ctx)
1404 {
1405 	struct enetc_softc *sc;
1406 	struct mii_data *miid;
1407 
1408 	sc = iflib_get_softc(ctx);
1409 
1410 	if (!sc->fixed_link) {
1411 		miid = device_get_softc(sc->miibus);
1412 		mii_tick(miid);
1413 	}
1414 }
1415 
1416 static bool
1417 enetc_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1418 {
1419 	switch (event) {
1420 	case IFLIB_RESTART_VLAN_CONFIG:
1421 	default:
1422 		return (false);
1423 	}
1424 }
1425 
1426 static int
1427 enetc_miibus_readreg(device_t dev, int phy, int reg)
1428 {
1429 	struct enetc_softc *sc;
1430 	int val;
1431 
1432 	sc = iflib_get_softc(device_get_softc(dev));
1433 
1434 	mtx_lock(&sc->mii_lock);
1435 	val = enetc_mdio_read(sc->regs, ENETC_PORT_BASE + ENETC_EMDIO_BASE,
1436 	    phy, reg);
1437 	mtx_unlock(&sc->mii_lock);
1438 
1439 	return (val);
1440 }
1441 
1442 static int
1443 enetc_miibus_writereg(device_t dev, int phy, int reg, int data)
1444 {
1445 	struct enetc_softc *sc;
1446 	int ret;
1447 
1448 	sc = iflib_get_softc(device_get_softc(dev));
1449 
1450 	mtx_lock(&sc->mii_lock);
1451 	ret = enetc_mdio_write(sc->regs, ENETC_PORT_BASE + ENETC_EMDIO_BASE,
1452 	    phy, reg, data);
1453 	mtx_unlock(&sc->mii_lock);
1454 
1455 	return (ret);
1456 }
1457 
1458 static void
1459 enetc_miibus_linkchg(device_t dev)
1460 {
1461 
1462 	enetc_miibus_statchg(dev);
1463 }
1464 
1465 static void
1466 enetc_miibus_statchg(device_t dev)
1467 {
1468 	struct enetc_softc *sc;
1469 	struct mii_data *miid;
1470 	int link_state, baudrate;
1471 
1472 	sc = iflib_get_softc(device_get_softc(dev));
1473 	miid = device_get_softc(sc->miibus);
1474 
1475 	baudrate = ifmedia_baudrate(miid->mii_media_active);
1476 	if (miid->mii_media_status & IFM_AVALID) {
1477 		if (miid->mii_media_status & IFM_ACTIVE)
1478 			link_state = LINK_STATE_UP;
1479 		else
1480 			link_state = LINK_STATE_DOWN;
1481 	} else {
1482 		link_state = LINK_STATE_UNKNOWN;
1483 	}
1484 
1485 	iflib_link_state_change(sc->ctx, link_state, baudrate);
1486 
1487 }
1488 
1489 static int
1490 enetc_media_change(if_t ifp)
1491 {
1492 	struct enetc_softc *sc;
1493 	struct mii_data *miid;
1494 
1495 	sc = iflib_get_softc(if_getsoftc(ifp));
1496 	miid = device_get_softc(sc->miibus);
1497 
1498 	mii_mediachg(miid);
1499 	return (0);
1500 }
1501 
1502 static void
1503 enetc_media_status(if_t ifp, struct ifmediareq* ifmr)
1504 {
1505 	struct enetc_softc *sc;
1506 	struct mii_data *miid;
1507 
1508 	sc = iflib_get_softc(if_getsoftc(ifp));
1509 	miid = device_get_softc(sc->miibus);
1510 
1511 	mii_pollstat(miid);
1512 
1513 	ifmr->ifm_active = miid->mii_media_active;
1514 	ifmr->ifm_status = miid->mii_media_status;
1515 }
1516 
1517 static int
1518 enetc_fixed_media_change(if_t ifp)
1519 {
1520 
1521 	if_printf(ifp, "Can't change media in fixed-link mode.\n");
1522 	return (0);
1523 }
1524 static void
1525 enetc_fixed_media_status(if_t ifp, struct ifmediareq* ifmr)
1526 {
1527 	struct enetc_softc *sc;
1528 
1529 	sc = iflib_get_softc(if_getsoftc(ifp));
1530 
1531 	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
1532 	ifmr->ifm_active = sc->fixed_ifmedia.ifm_cur->ifm_media;
1533 	return;
1534 }
1535