xref: /freebsd/sys/dev/enetc/if_enetc.c (revision 11a9117871e6037ae7b8011b243939322efce569)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2021 Alstom Group.
5  * Copyright (c) 2021 Semihalf.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/param.h>
29 #include <sys/bus.h>
30 #include <sys/endian.h>
31 #include <sys/kernel.h>
32 #include <sys/module.h>
33 #include <sys/rman.h>
34 #include <sys/socket.h>
35 #include <sys/sockio.h>
36 
37 #include <machine/bus.h>
38 #include <machine/resource.h>
39 
40 #include <net/ethernet.h>
41 #include <net/if.h>
42 #include <net/if_dl.h>
43 #include <net/if_var.h>
44 #include <net/if_types.h>
45 #include <net/if_media.h>
46 #include <net/iflib.h>
47 
48 #include <dev/enetc/enetc_hw.h>
49 #include <dev/enetc/enetc.h>
50 #include <dev/enetc/enetc_mdio.h>
51 #include <dev/mii/mii.h>
52 #include <dev/mii/miivar.h>
53 #include <dev/pci/pcireg.h>
54 #include <dev/pci/pcivar.h>
55 
56 #include <dev/ofw/ofw_bus.h>
57 #include <dev/ofw/ofw_bus_subr.h>
58 
59 #include "ifdi_if.h"
60 #include "miibus_if.h"
61 
62 static device_register_t		enetc_register;
63 
64 static ifdi_attach_pre_t		enetc_attach_pre;
65 static ifdi_attach_post_t		enetc_attach_post;
66 static ifdi_detach_t			enetc_detach;
67 
68 static ifdi_tx_queues_alloc_t		enetc_tx_queues_alloc;
69 static ifdi_rx_queues_alloc_t		enetc_rx_queues_alloc;
70 static ifdi_queues_free_t		enetc_queues_free;
71 
72 static ifdi_init_t			enetc_init;
73 static ifdi_stop_t			enetc_stop;
74 
75 static ifdi_msix_intr_assign_t		enetc_msix_intr_assign;
76 static ifdi_tx_queue_intr_enable_t	enetc_tx_queue_intr_enable;
77 static ifdi_rx_queue_intr_enable_t	enetc_rx_queue_intr_enable;
78 static ifdi_intr_enable_t		enetc_intr_enable;
79 static ifdi_intr_disable_t		enetc_intr_disable;
80 
81 static int	enetc_isc_txd_encap(void*, if_pkt_info_t);
82 static void	enetc_isc_txd_flush(void*, uint16_t, qidx_t);
83 static int	enetc_isc_txd_credits_update(void*, uint16_t, bool);
84 static int	enetc_isc_rxd_available(void*, uint16_t, qidx_t, qidx_t);
85 static int	enetc_isc_rxd_pkt_get(void*, if_rxd_info_t);
86 static void	enetc_isc_rxd_refill(void*, if_rxd_update_t);
87 static void	enetc_isc_rxd_flush(void*, uint16_t, uint8_t, qidx_t);
88 
89 static void	enetc_vlan_register(if_ctx_t, uint16_t);
90 static void	enetc_vlan_unregister(if_ctx_t, uint16_t);
91 
92 static uint64_t	enetc_get_counter(if_ctx_t, ift_counter);
93 static int	enetc_promisc_set(if_ctx_t, int);
94 static int	enetc_mtu_set(if_ctx_t, uint32_t);
95 static void	enetc_setup_multicast(if_ctx_t);
96 static void	enetc_timer(if_ctx_t, uint16_t);
97 static void	enetc_update_admin_status(if_ctx_t);
98 static bool	enetc_if_needs_restart(if_ctx_t, enum iflib_restart_event);
99 
100 static miibus_readreg_t		enetc_miibus_readreg;
101 static miibus_writereg_t	enetc_miibus_writereg;
102 static miibus_linkchg_t		enetc_miibus_linkchg;
103 static miibus_statchg_t		enetc_miibus_statchg;
104 
105 static int			enetc_media_change(if_t);
106 static void			enetc_media_status(if_t, struct ifmediareq*);
107 
108 static int			enetc_fixed_media_change(if_t);
109 static void			enetc_fixed_media_status(if_t, struct ifmediareq*);
110 
111 static void			enetc_max_nqueues(struct enetc_softc*, int*, int*);
112 static int			enetc_setup_phy(struct enetc_softc*);
113 
114 static void			enetc_get_hwaddr(struct enetc_softc*);
115 static void			enetc_set_hwaddr(struct enetc_softc*);
116 static int			enetc_setup_rss(struct enetc_softc*);
117 
118 static void			enetc_init_hw(struct enetc_softc*);
119 static void			enetc_init_ctrl(struct enetc_softc*);
120 static void			enetc_init_tx(struct enetc_softc*);
121 static void			enetc_init_rx(struct enetc_softc*);
122 
123 static int			enetc_ctrl_send(struct enetc_softc*,
124 				    uint16_t, uint16_t, iflib_dma_info_t);
125 
126 static const char enetc_driver_version[] = "1.0.0";
127 
128 static const pci_vendor_info_t enetc_vendor_info_array[] = {
129 	PVID(PCI_VENDOR_FREESCALE, ENETC_DEV_ID_PF,
130 	    "Freescale ENETC PCIe Gigabit Ethernet Controller"),
131 	PVID_END
132 };
133 
134 #define ENETC_IFCAPS (IFCAP_VLAN_MTU | IFCAP_RXCSUM | IFCAP_JUMBO_MTU | \
135 	IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWFILTER)
136 
137 static device_method_t enetc_methods[] = {
138 	DEVMETHOD(device_register,	enetc_register),
139 	DEVMETHOD(device_probe,		iflib_device_probe),
140 	DEVMETHOD(device_attach,	iflib_device_attach),
141 	DEVMETHOD(device_detach,	iflib_device_detach),
142 	DEVMETHOD(device_shutdown,	iflib_device_shutdown),
143 	DEVMETHOD(device_suspend,	iflib_device_suspend),
144 	DEVMETHOD(device_resume,	iflib_device_resume),
145 
146 	DEVMETHOD(miibus_readreg,	enetc_miibus_readreg),
147 	DEVMETHOD(miibus_writereg,	enetc_miibus_writereg),
148 	DEVMETHOD(miibus_linkchg,	enetc_miibus_linkchg),
149 	DEVMETHOD(miibus_statchg,	enetc_miibus_statchg),
150 
151 	DEVMETHOD(bus_setup_intr,		bus_generic_setup_intr),
152 	DEVMETHOD(bus_teardown_intr,		bus_generic_teardown_intr),
153 	DEVMETHOD(bus_release_resource,		bus_generic_release_resource),
154 	DEVMETHOD(bus_activate_resource,	bus_generic_activate_resource),
155 	DEVMETHOD(bus_deactivate_resource,	bus_generic_deactivate_resource),
156 	DEVMETHOD(bus_adjust_resource,		bus_generic_adjust_resource),
157 	DEVMETHOD(bus_alloc_resource,		bus_generic_alloc_resource),
158 
159 	DEVMETHOD_END
160 };
161 
162 static driver_t enetc_driver = {
163 	"enetc", enetc_methods, sizeof(struct enetc_softc)
164 };
165 
166 DRIVER_MODULE(miibus, enetc, miibus_fdt_driver, NULL, NULL);
167 /* Make sure miibus gets procesed first. */
168 DRIVER_MODULE_ORDERED(enetc, pci, enetc_driver, NULL, NULL, SI_ORDER_ANY);
169 MODULE_VERSION(enetc, 1);
170 
171 IFLIB_PNP_INFO(pci, enetc, enetc_vendor_info_array);
172 
173 MODULE_DEPEND(enetc, ether, 1, 1, 1);
174 MODULE_DEPEND(enetc, iflib, 1, 1, 1);
175 MODULE_DEPEND(enetc, miibus, 1, 1, 1);
176 
177 static device_method_t enetc_iflib_methods[] = {
178 	DEVMETHOD(ifdi_attach_pre,		enetc_attach_pre),
179 	DEVMETHOD(ifdi_attach_post,		enetc_attach_post),
180 	DEVMETHOD(ifdi_detach,			enetc_detach),
181 
182 	DEVMETHOD(ifdi_init,			enetc_init),
183 	DEVMETHOD(ifdi_stop,			enetc_stop),
184 
185 	DEVMETHOD(ifdi_tx_queues_alloc,		enetc_tx_queues_alloc),
186 	DEVMETHOD(ifdi_rx_queues_alloc,		enetc_rx_queues_alloc),
187 	DEVMETHOD(ifdi_queues_free,		enetc_queues_free),
188 
189 	DEVMETHOD(ifdi_msix_intr_assign,	enetc_msix_intr_assign),
190 	DEVMETHOD(ifdi_tx_queue_intr_enable,	enetc_tx_queue_intr_enable),
191 	DEVMETHOD(ifdi_rx_queue_intr_enable,	enetc_rx_queue_intr_enable),
192 	DEVMETHOD(ifdi_intr_enable,		enetc_intr_enable),
193 	DEVMETHOD(ifdi_intr_disable,		enetc_intr_disable),
194 
195 	DEVMETHOD(ifdi_vlan_register,		enetc_vlan_register),
196 	DEVMETHOD(ifdi_vlan_unregister,		enetc_vlan_unregister),
197 
198 	DEVMETHOD(ifdi_get_counter,		enetc_get_counter),
199 	DEVMETHOD(ifdi_mtu_set,			enetc_mtu_set),
200 	DEVMETHOD(ifdi_multi_set,		enetc_setup_multicast),
201 	DEVMETHOD(ifdi_promisc_set,		enetc_promisc_set),
202 	DEVMETHOD(ifdi_timer,			enetc_timer),
203 	DEVMETHOD(ifdi_update_admin_status,	enetc_update_admin_status),
204 
205 	DEVMETHOD(ifdi_needs_restart,		enetc_if_needs_restart),
206 
207 	DEVMETHOD_END
208 };
209 
210 static driver_t enetc_iflib_driver = {
211 	"enetc", enetc_iflib_methods, sizeof(struct enetc_softc)
212 };
213 
214 static struct if_txrx enetc_txrx = {
215 	.ift_txd_encap = enetc_isc_txd_encap,
216 	.ift_txd_flush = enetc_isc_txd_flush,
217 	.ift_txd_credits_update = enetc_isc_txd_credits_update,
218 	.ift_rxd_available = enetc_isc_rxd_available,
219 	.ift_rxd_pkt_get = enetc_isc_rxd_pkt_get,
220 	.ift_rxd_refill = enetc_isc_rxd_refill,
221 	.ift_rxd_flush = enetc_isc_rxd_flush
222 };
223 
224 static struct if_shared_ctx enetc_sctx_init = {
225 	.isc_magic = IFLIB_MAGIC,
226 
227 	.isc_q_align = ENETC_RING_ALIGN,
228 
229 	.isc_tx_maxsize = ENETC_MAX_FRAME_LEN,
230 	.isc_tx_maxsegsize = PAGE_SIZE,
231 
232 	.isc_rx_maxsize = ENETC_MAX_FRAME_LEN,
233 	.isc_rx_maxsegsize = ENETC_MAX_FRAME_LEN,
234 	.isc_rx_nsegments = ENETC_MAX_SCATTER,
235 
236 	.isc_admin_intrcnt = 0,
237 
238 	.isc_nfl = 1,
239 	.isc_nrxqs = 1,
240 	.isc_ntxqs = 1,
241 
242 	.isc_vendor_info = enetc_vendor_info_array,
243 	.isc_driver_version = enetc_driver_version,
244 	.isc_driver = &enetc_iflib_driver,
245 
246 	.isc_flags = IFLIB_DRIVER_MEDIA | IFLIB_PRESERVE_TX_INDICES,
247 	.isc_ntxd_min = {ENETC_MIN_DESC},
248 	.isc_ntxd_max = {ENETC_MAX_DESC},
249 	.isc_ntxd_default = {ENETC_DEFAULT_DESC},
250 	.isc_nrxd_min = {ENETC_MIN_DESC},
251 	.isc_nrxd_max = {ENETC_MAX_DESC},
252 	.isc_nrxd_default = {ENETC_DEFAULT_DESC}
253 };
254 
255 static void*
enetc_register(device_t dev)256 enetc_register(device_t dev)
257 {
258 
259 	if (!ofw_bus_status_okay(dev))
260 		return (NULL);
261 
262 	return (&enetc_sctx_init);
263 }
264 
265 static void
enetc_max_nqueues(struct enetc_softc * sc,int * max_tx_nqueues,int * max_rx_nqueues)266 enetc_max_nqueues(struct enetc_softc *sc, int *max_tx_nqueues,
267     int *max_rx_nqueues)
268 {
269 	uint32_t val;
270 
271 	val = ENETC_PORT_RD4(sc, ENETC_PCAPR0);
272 	*max_tx_nqueues = MIN(ENETC_PCAPR0_TXBDR(val), ENETC_MAX_QUEUES);
273 	*max_rx_nqueues = MIN(ENETC_PCAPR0_RXBDR(val), ENETC_MAX_QUEUES);
274 }
275 
276 static int
enetc_setup_fixed(struct enetc_softc * sc,phandle_t node)277 enetc_setup_fixed(struct enetc_softc *sc, phandle_t node)
278 {
279 	ssize_t size;
280 	int speed;
281 
282 	size = OF_getencprop(node, "speed", &speed, sizeof(speed));
283 	if (size <= 0) {
284 		device_printf(sc->dev,
285 		    "Device has fixed-link node without link speed specified\n");
286 		return (ENXIO);
287 	}
288 	switch (speed) {
289 	case 10:
290 		speed = IFM_10_T;
291 		break;
292 	case 100:
293 		speed = IFM_100_TX;
294 		break;
295 	case 1000:
296 		speed = IFM_1000_T;
297 		break;
298 	case 2500:
299 		speed = IFM_2500_T;
300 		break;
301 	default:
302 		device_printf(sc->dev, "Unsupported link speed value of %d\n",
303 		    speed);
304 		return (ENXIO);
305 	}
306 	speed |= IFM_ETHER;
307 
308 	if (OF_hasprop(node, "full-duplex"))
309 		speed |= IFM_FDX;
310 	else
311 		speed |= IFM_HDX;
312 
313 	sc->fixed_link = true;
314 
315 	ifmedia_init(&sc->fixed_ifmedia, 0, enetc_fixed_media_change,
316 	    enetc_fixed_media_status);
317 	ifmedia_add(&sc->fixed_ifmedia, speed, 0, NULL);
318 	ifmedia_set(&sc->fixed_ifmedia, speed);
319 	sc->shared->isc_media = &sc->fixed_ifmedia;
320 
321 	return (0);
322 }
323 
324 static int
enetc_setup_phy(struct enetc_softc * sc)325 enetc_setup_phy(struct enetc_softc *sc)
326 {
327 	phandle_t node, fixed_link, phy_handle;
328 	struct mii_data *miid;
329 	int phy_addr, error;
330 	ssize_t size;
331 
332 	node = ofw_bus_get_node(sc->dev);
333 	fixed_link = ofw_bus_find_child(node, "fixed-link");
334 	if (fixed_link != 0)
335 		return (enetc_setup_fixed(sc, fixed_link));
336 
337 	size = OF_getencprop(node, "phy-handle", &phy_handle, sizeof(phy_handle));
338 	if (size <= 0) {
339 		device_printf(sc->dev,
340 		    "Failed to acquire PHY handle from FDT.\n");
341 		return (ENXIO);
342 	}
343 	phy_handle = OF_node_from_xref(phy_handle);
344 	size = OF_getencprop(phy_handle, "reg", &phy_addr, sizeof(phy_addr));
345 	if (size <= 0) {
346 		device_printf(sc->dev, "Failed to obtain PHY address\n");
347 		return (ENXIO);
348 	}
349 	error = mii_attach(sc->dev, &sc->miibus, iflib_get_ifp(sc->ctx),
350 	    enetc_media_change, enetc_media_status,
351 	    BMSR_DEFCAPMASK, phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE);
352 	if (error != 0) {
353 		device_printf(sc->dev, "mii_attach failed\n");
354 		return (error);
355 	}
356 	miid = device_get_softc(sc->miibus);
357 	sc->shared->isc_media = &miid->mii_media;
358 
359 	return (0);
360 }
361 
362 static int
enetc_attach_pre(if_ctx_t ctx)363 enetc_attach_pre(if_ctx_t ctx)
364 {
365 	if_softc_ctx_t scctx;
366 	struct enetc_softc *sc;
367 	int error, rid;
368 
369 	sc = iflib_get_softc(ctx);
370 	scctx = iflib_get_softc_ctx(ctx);
371 	sc->ctx = ctx;
372 	sc->dev = iflib_get_dev(ctx);
373 	sc->shared = scctx;
374 
375 	mtx_init(&sc->mii_lock, "enetc_mdio", NULL, MTX_DEF);
376 
377 	pci_save_state(sc->dev);
378 	pcie_flr(sc->dev, 1000, false);
379 	pci_restore_state(sc->dev);
380 
381 	rid = PCIR_BAR(ENETC_BAR_REGS);
382 	sc->regs = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
383 	if (sc->regs == NULL) {
384 		device_printf(sc->dev,
385 		    "Failed to allocate BAR %d\n", ENETC_BAR_REGS);
386 		return (ENXIO);
387 	}
388 
389 	error = iflib_dma_alloc_align(ctx,
390 	    ENETC_MIN_DESC * sizeof(struct enetc_cbd),
391 	    ENETC_RING_ALIGN,
392 	    &sc->ctrl_queue.dma,
393 	    0);
394 	if (error != 0) {
395 		device_printf(sc->dev, "Failed to allocate control ring\n");
396 		goto fail;
397 	}
398 	sc->ctrl_queue.ring = (struct enetc_cbd*)sc->ctrl_queue.dma.idi_vaddr;
399 
400 	scctx->isc_txrx = &enetc_txrx;
401 	scctx->isc_tx_nsegments = ENETC_MAX_SCATTER;
402 	enetc_max_nqueues(sc, &scctx->isc_nrxqsets_max, &scctx->isc_ntxqsets_max);
403 
404 	if (scctx->isc_ntxd[0] % ENETC_DESC_ALIGN != 0) {
405 		device_printf(sc->dev,
406 		    "The number of TX descriptors has to be a multiple of %d\n",
407 		    ENETC_DESC_ALIGN);
408 		error = EINVAL;
409 		goto fail;
410 	}
411 	if (scctx->isc_nrxd[0] % ENETC_DESC_ALIGN != 0) {
412 		device_printf(sc->dev,
413 		    "The number of RX descriptors has to be a multiple of %d\n",
414 		    ENETC_DESC_ALIGN);
415 		error = EINVAL;
416 		goto fail;
417 	}
418 	scctx->isc_txqsizes[0] = scctx->isc_ntxd[0] * sizeof(union enetc_tx_bd);
419 	scctx->isc_rxqsizes[0] = scctx->isc_nrxd[0] * sizeof(union enetc_rx_bd);
420 	scctx->isc_txd_size[0] = sizeof(union enetc_tx_bd);
421 	scctx->isc_rxd_size[0] = sizeof(union enetc_rx_bd);
422 	scctx->isc_tx_csum_flags = 0;
423 	scctx->isc_capabilities = scctx->isc_capenable = ENETC_IFCAPS;
424 
425 	error = enetc_mtu_set(ctx, ETHERMTU);
426 	if (error != 0)
427 		goto fail;
428 
429 	scctx->isc_msix_bar = pci_msix_table_bar(sc->dev);
430 
431 	error = enetc_setup_phy(sc);
432 	if (error != 0)
433 		goto fail;
434 
435 	enetc_get_hwaddr(sc);
436 
437 	return (0);
438 fail:
439 	enetc_detach(ctx);
440 	return (error);
441 }
442 
443 static int
enetc_attach_post(if_ctx_t ctx)444 enetc_attach_post(if_ctx_t ctx)
445 {
446 
447 	enetc_init_hw(iflib_get_softc(ctx));
448 	return (0);
449 }
450 
451 static int
enetc_detach(if_ctx_t ctx)452 enetc_detach(if_ctx_t ctx)
453 {
454 	struct enetc_softc *sc;
455 	int error = 0, i;
456 
457 	sc = iflib_get_softc(ctx);
458 
459 	for (i = 0; i < sc->rx_num_queues; i++)
460 		iflib_irq_free(ctx, &sc->rx_queues[i].irq);
461 
462 	bus_generic_detach(sc->dev);
463 
464 	if (sc->regs != NULL)
465 		error = bus_release_resource(sc->dev, SYS_RES_MEMORY,
466 		    rman_get_rid(sc->regs), sc->regs);
467 
468 	if (sc->ctrl_queue.dma.idi_size != 0)
469 		iflib_dma_free(&sc->ctrl_queue.dma);
470 
471 	mtx_destroy(&sc->mii_lock);
472 
473 	return (error);
474 }
475 
476 static int
enetc_tx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int ntxqs,int ntxqsets)477 enetc_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
478     int ntxqs, int ntxqsets)
479 {
480 	struct enetc_softc *sc;
481 	struct enetc_tx_queue *queue;
482 	int i;
483 
484 	sc = iflib_get_softc(ctx);
485 
486 	MPASS(ntxqs == 1);
487 
488 	sc->tx_queues = mallocarray(sc->tx_num_queues,
489 	    sizeof(struct enetc_tx_queue), M_DEVBUF, M_NOWAIT | M_ZERO);
490 	if (sc->tx_queues == NULL) {
491 		device_printf(sc->dev,
492 		    "Failed to allocate memory for TX queues.\n");
493 		return (ENOMEM);
494 	}
495 
496 	for (i = 0; i < sc->tx_num_queues; i++) {
497 		queue = &sc->tx_queues[i];
498 		queue->sc = sc;
499 		queue->ring = (union enetc_tx_bd*)(vaddrs[i]);
500 		queue->ring_paddr = paddrs[i];
501 		queue->cidx = 0;
502 	}
503 
504 	return (0);
505 }
506 
507 static int
enetc_rx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int nrxqs,int nrxqsets)508 enetc_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
509     int nrxqs, int nrxqsets)
510 {
511 	struct enetc_softc *sc;
512 	struct enetc_rx_queue *queue;
513 	int i;
514 
515 	sc = iflib_get_softc(ctx);
516 	MPASS(nrxqs == 1);
517 
518 	sc->rx_queues = mallocarray(sc->rx_num_queues,
519 	    sizeof(struct enetc_rx_queue), M_DEVBUF, M_NOWAIT | M_ZERO);
520 	if (sc->rx_queues == NULL) {
521 		device_printf(sc->dev,
522 		    "Failed to allocate memory for RX queues.\n");
523 		return (ENOMEM);
524 	}
525 
526 	for (i = 0; i < sc->rx_num_queues; i++) {
527 		queue = &sc->rx_queues[i];
528 		queue->sc = sc;
529 		queue->qid = i;
530 		queue->ring = (union enetc_rx_bd*)(vaddrs[i]);
531 		queue->ring_paddr = paddrs[i];
532 	}
533 
534 	return (0);
535 }
536 
537 static void
enetc_queues_free(if_ctx_t ctx)538 enetc_queues_free(if_ctx_t ctx)
539 {
540 	struct enetc_softc *sc;
541 
542 	sc = iflib_get_softc(ctx);
543 
544 	if (sc->tx_queues != NULL) {
545 		free(sc->tx_queues, M_DEVBUF);
546 		sc->tx_queues = NULL;
547 	}
548 	if (sc->rx_queues != NULL) {
549 		free(sc->rx_queues, M_DEVBUF);
550 		sc->rx_queues = NULL;
551 	}
552 }
553 
554 static void
enetc_get_hwaddr(struct enetc_softc * sc)555 enetc_get_hwaddr(struct enetc_softc *sc)
556 {
557 	struct ether_addr hwaddr;
558 	uint16_t high;
559 	uint32_t low;
560 
561 	low = ENETC_PORT_RD4(sc, ENETC_PSIPMAR0(0));
562 	high = ENETC_PORT_RD2(sc, ENETC_PSIPMAR1(0));
563 
564 	memcpy(&hwaddr.octet[0], &low, 4);
565 	memcpy(&hwaddr.octet[4], &high, 2);
566 
567 	if (ETHER_IS_BROADCAST(hwaddr.octet) ||
568 	    ETHER_IS_MULTICAST(hwaddr.octet) ||
569 	    ETHER_IS_ZERO(hwaddr.octet)) {
570 		ether_gen_addr(iflib_get_ifp(sc->ctx), &hwaddr);
571 		device_printf(sc->dev,
572 		    "Failed to obtain MAC address, using a random one\n");
573 		memcpy(&low, &hwaddr.octet[0], 4);
574 		memcpy(&high, &hwaddr.octet[4], 2);
575 	}
576 
577 	iflib_set_mac(sc->ctx, hwaddr.octet);
578 }
579 
580 static void
enetc_set_hwaddr(struct enetc_softc * sc)581 enetc_set_hwaddr(struct enetc_softc *sc)
582 {
583 	if_t ifp;
584 	uint16_t high;
585 	uint32_t low;
586 	uint8_t *hwaddr;
587 
588 	ifp = iflib_get_ifp(sc->ctx);
589 	hwaddr = (uint8_t*)if_getlladdr(ifp);
590 	low = *((uint32_t*)hwaddr);
591 	high = *((uint16_t*)(hwaddr+4));
592 
593 	ENETC_PORT_WR4(sc, ENETC_PSIPMAR0(0), low);
594 	ENETC_PORT_WR2(sc, ENETC_PSIPMAR1(0), high);
595 }
596 
597 static int
enetc_setup_rss(struct enetc_softc * sc)598 enetc_setup_rss(struct enetc_softc *sc)
599 {
600 	struct iflib_dma_info dma;
601 	int error, i, buckets_num = 0;
602 	uint8_t *rss_table;
603 	uint32_t reg;
604 
605 	reg = ENETC_RD4(sc, ENETC_SIPCAPR0);
606 	if (reg & ENETC_SIPCAPR0_RSS) {
607 		reg = ENETC_RD4(sc, ENETC_SIRSSCAPR);
608 		buckets_num = ENETC_SIRSSCAPR_GET_NUM_RSS(reg);
609         }
610 	if (buckets_num == 0)
611 		return (ENOTSUP);
612 
613 	for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / sizeof(uint32_t); i++) {
614 		arc4rand((uint8_t *)&reg, sizeof(reg), 0);
615 		ENETC_PORT_WR4(sc, ENETC_PRSSK(i), reg);
616 	}
617 
618 	ENETC_WR4(sc, ENETC_SIRBGCR, sc->rx_num_queues);
619 
620 	error = iflib_dma_alloc_align(sc->ctx,
621 	    buckets_num * sizeof(*rss_table),
622 	    ENETC_RING_ALIGN,
623 	    &dma,
624 	    0);
625 	if (error != 0) {
626 		device_printf(sc->dev, "Failed to allocate DMA buffer for RSS\n");
627 		return (error);
628 	}
629 	rss_table = (uint8_t *)dma.idi_vaddr;
630 
631 	for (i = 0; i < buckets_num; i++)
632 		rss_table[i] = i % sc->rx_num_queues;
633 
634 	error = enetc_ctrl_send(sc, (BDCR_CMD_RSS << 8) | BDCR_CMD_RSS_WRITE,
635 	    buckets_num * sizeof(*rss_table), &dma);
636 	if (error != 0)
637 		device_printf(sc->dev, "Failed to setup RSS table\n");
638 
639 	iflib_dma_free(&dma);
640 
641 	return (error);
642 }
643 
644 static int
enetc_ctrl_send(struct enetc_softc * sc,uint16_t cmd,uint16_t size,iflib_dma_info_t dma)645 enetc_ctrl_send(struct enetc_softc *sc, uint16_t cmd, uint16_t size,
646     iflib_dma_info_t dma)
647 {
648 	struct enetc_ctrl_queue *queue;
649 	struct enetc_cbd *desc;
650 	int timeout = 1000;
651 
652 	queue = &sc->ctrl_queue;
653 	desc = &queue->ring[queue->pidx];
654 
655 	if (++queue->pidx == ENETC_MIN_DESC)
656 		queue->pidx = 0;
657 
658 	desc->addr[0] = (uint32_t)dma->idi_paddr;
659 	desc->addr[1] = (uint32_t)(dma->idi_paddr >> 32);
660 	desc->index = 0;
661 	desc->length = (uint16_t)size;
662 	desc->cmd = (uint8_t)cmd;
663 	desc->cls = (uint8_t)(cmd >> 8);
664 	desc->status_flags = 0;
665 
666 	/* Sync command packet, */
667 	bus_dmamap_sync(dma->idi_tag, dma->idi_map, BUS_DMASYNC_PREWRITE);
668 	/* and the control ring. */
669 	bus_dmamap_sync(queue->dma.idi_tag, queue->dma.idi_map, BUS_DMASYNC_PREWRITE);
670 	ENETC_WR4(sc, ENETC_SICBDRPIR, queue->pidx);
671 
672 	while (--timeout != 0) {
673 		DELAY(20);
674 		if (ENETC_RD4(sc, ENETC_SICBDRCIR) == queue->pidx)
675 			break;
676 	}
677 
678 	if (timeout == 0)
679 		return (ETIMEDOUT);
680 
681 	bus_dmamap_sync(dma->idi_tag, dma->idi_map, BUS_DMASYNC_POSTREAD);
682 	return (0);
683 }
684 
685 static void
enetc_init_hw(struct enetc_softc * sc)686 enetc_init_hw(struct enetc_softc *sc)
687 {
688 	uint32_t val;
689 	int error;
690 
691 	ENETC_PORT_WR4(sc, ENETC_PM0_CMD_CFG,
692 	    ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC |
693 	    ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
694 	ENETC_PORT_WR4(sc, ENETC_PM0_RX_FIFO, ENETC_PM0_RX_FIFO_VAL);
695 	val = ENETC_PSICFGR0_SET_TXBDR(sc->tx_num_queues);
696 	val |= ENETC_PSICFGR0_SET_RXBDR(sc->rx_num_queues);
697 	val |= ENETC_PSICFGR0_SIVC(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S);
698 	ENETC_PORT_WR4(sc, ENETC_PSICFGR0(0), val);
699 	ENETC_PORT_WR4(sc, ENETC_PSIPVMR, ENETC_PSIPVMR_SET_VUTA(1));
700 	ENETC_PORT_WR4(sc, ENETC_PVCLCTR,  ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S);
701 	ENETC_PORT_WR4(sc, ENETC_PSIVLANFMR, ENETC_PSIVLANFMR_VS);
702 	ENETC_PORT_WR4(sc, ENETC_PAR_PORT_CFG, ENETC_PAR_PORT_L4CD);
703 	ENETC_PORT_WR4(sc, ENETC_PMR, ENETC_PMR_SI0EN | ENETC_PMR_PSPEED_1000M);
704 
705 	ENETC_WR4(sc, ENETC_SICAR0,
706 	    ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
707 	ENETC_WR4(sc, ENETC_SICAR1, ENETC_SICAR_MSI);
708 	ENETC_WR4(sc, ENETC_SICAR2,
709 	    ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
710 
711 	enetc_init_ctrl(sc);
712 	error = enetc_setup_rss(sc);
713 	if (error != 0)
714 		ENETC_WR4(sc, ENETC_SIMR, ENETC_SIMR_EN);
715 	else
716 		ENETC_WR4(sc, ENETC_SIMR, ENETC_SIMR_EN | ENETC_SIMR_RSSE);
717 
718 }
719 
720 static void
enetc_init_ctrl(struct enetc_softc * sc)721 enetc_init_ctrl(struct enetc_softc *sc)
722 {
723 	struct enetc_ctrl_queue *queue = &sc->ctrl_queue;
724 
725 	ENETC_WR4(sc, ENETC_SICBDRBAR0,
726 	    (uint32_t)queue->dma.idi_paddr);
727 	ENETC_WR4(sc, ENETC_SICBDRBAR1,
728 	    (uint32_t)(queue->dma.idi_paddr >> 32));
729 	ENETC_WR4(sc, ENETC_SICBDRLENR,
730 	    queue->dma.idi_size / sizeof(struct enetc_cbd));
731 
732 	queue->pidx = 0;
733 	ENETC_WR4(sc, ENETC_SICBDRPIR, queue->pidx);
734 	ENETC_WR4(sc, ENETC_SICBDRCIR, queue->pidx);
735 	ENETC_WR4(sc, ENETC_SICBDRMR, ENETC_SICBDRMR_EN);
736 }
737 
738 static void
enetc_init_tx(struct enetc_softc * sc)739 enetc_init_tx(struct enetc_softc *sc)
740 {
741 	struct enetc_tx_queue *queue;
742 	int i;
743 
744 	for (i = 0; i < sc->tx_num_queues; i++) {
745 		queue = &sc->tx_queues[i];
746 
747 		ENETC_TXQ_WR4(sc, i, ENETC_TBBAR0,
748 		    (uint32_t)queue->ring_paddr);
749 		ENETC_TXQ_WR4(sc, i, ENETC_TBBAR1,
750 		    (uint32_t)(queue->ring_paddr >> 32));
751 		ENETC_TXQ_WR4(sc, i, ENETC_TBLENR, sc->tx_queue_size);
752 
753 		/*
754 		 * Even though it is undoccumented resetting the TX ring
755 		 * indices results in TX hang.
756 		 * Do the same as Linux and simply keep those unchanged
757 		 * for the drivers lifetime.
758 		 */
759 #if 0
760 		ENETC_TXQ_WR4(sc, i, ENETC_TBPIR, 0);
761 		ENETC_TXQ_WR4(sc, i, ENETC_TBCIR, 0);
762 #endif
763 		ENETC_TXQ_WR4(sc, i, ENETC_TBMR, ENETC_TBMR_EN);
764 	}
765 
766 }
767 
768 static void
enetc_init_rx(struct enetc_softc * sc)769 enetc_init_rx(struct enetc_softc *sc)
770 {
771 	struct enetc_rx_queue *queue;
772 	uint32_t rx_buf_size;
773 	int i;
774 
775 	rx_buf_size = iflib_get_rx_mbuf_sz(sc->ctx);
776 
777 	for (i = 0; i < sc->rx_num_queues; i++) {
778 		queue = &sc->rx_queues[i];
779 
780 		ENETC_RXQ_WR4(sc, i, ENETC_RBBAR0,
781 		    (uint32_t)queue->ring_paddr);
782 		ENETC_RXQ_WR4(sc, i, ENETC_RBBAR1,
783 		    (uint32_t)(queue->ring_paddr >> 32));
784 		ENETC_RXQ_WR4(sc, i, ENETC_RBLENR, sc->rx_queue_size);
785 		ENETC_RXQ_WR4(sc, i, ENETC_RBBSR, rx_buf_size);
786 		ENETC_RXQ_WR4(sc, i, ENETC_RBPIR, 0);
787 		ENETC_RXQ_WR4(sc, i, ENETC_RBCIR, 0);
788 		queue->enabled = false;
789 	}
790 }
791 
792 static u_int
enetc_hash_mac(void * arg,struct sockaddr_dl * sdl,u_int cnt)793 enetc_hash_mac(void *arg, struct sockaddr_dl *sdl, u_int cnt)
794 {
795 	uint64_t *bitmap = arg;
796 	uint64_t address = 0;
797 	uint8_t hash = 0;
798 	bool bit;
799 	int i, j;
800 
801 	bcopy(LLADDR(sdl), &address, ETHER_ADDR_LEN);
802 
803 	/*
804 	 * The six bit hash is calculated by xoring every
805 	 * 6th bit of the address.
806 	 * It is then used as an index in a bitmap that is
807 	 * written to the device.
808 	 */
809 	for (i = 0; i < 6; i++) {
810 		bit = 0;
811 		for (j = 0; j < 8; j++)
812 			bit ^= !!(address & BIT(i + j*6));
813 
814 		hash |= bit << i;
815 	}
816 
817 	*bitmap |= (1 << hash);
818 	return (1);
819 }
820 
821 static void
enetc_setup_multicast(if_ctx_t ctx)822 enetc_setup_multicast(if_ctx_t ctx)
823 {
824 	struct enetc_softc *sc;
825 	if_t ifp;
826 	uint64_t bitmap = 0;
827 	uint8_t revid;
828 
829 	sc = iflib_get_softc(ctx);
830 	ifp = iflib_get_ifp(ctx);
831 	revid = pci_get_revid(sc->dev);
832 
833 	if_foreach_llmaddr(ifp, enetc_hash_mac, &bitmap);
834 
835 	/*
836 	 * In revid 1 of this chip the positions multicast and unicast
837 	 * hash filter registers are flipped.
838 	 */
839 	ENETC_PORT_WR4(sc, ENETC_PSIMMHFR0(0, revid == 1), bitmap & UINT32_MAX);
840 	ENETC_PORT_WR4(sc, ENETC_PSIMMHFR1(0), bitmap >> 32);
841 
842 }
843 
844 static uint8_t
enetc_hash_vid(uint16_t vid)845 enetc_hash_vid(uint16_t vid)
846 {
847 	uint8_t hash = 0;
848 	bool bit;
849 	int i;
850 
851 	for (i = 0;i < 6;i++) {
852 		bit = vid & BIT(i);
853 		bit ^= !!(vid & BIT(i + 6));
854 		hash |= bit << i;
855 	}
856 
857 	return (hash);
858 }
859 
860 static void
enetc_vlan_register(if_ctx_t ctx,uint16_t vid)861 enetc_vlan_register(if_ctx_t ctx, uint16_t vid)
862 {
863 	struct enetc_softc *sc;
864 	uint8_t hash;
865 	uint64_t bitmap;
866 
867 	sc = iflib_get_softc(ctx);
868 	hash = enetc_hash_vid(vid);
869 
870 	/* Check if hash is already present in the bitmap. */
871 	if (++sc->vlan_bitmap[hash] != 1)
872 		return;
873 
874 	bitmap = ENETC_PORT_RD4(sc, ENETC_PSIVHFR0(0));
875 	bitmap |= (uint64_t)ENETC_PORT_RD4(sc, ENETC_PSIVHFR1(0)) << 32;
876 	bitmap |= BIT(hash);
877 	ENETC_PORT_WR4(sc, ENETC_PSIVHFR0(0), bitmap & UINT32_MAX);
878 	ENETC_PORT_WR4(sc, ENETC_PSIVHFR1(0), bitmap >> 32);
879 }
880 
881 static void
enetc_vlan_unregister(if_ctx_t ctx,uint16_t vid)882 enetc_vlan_unregister(if_ctx_t ctx, uint16_t vid)
883 {
884 	struct enetc_softc *sc;
885 	uint8_t hash;
886 	uint64_t bitmap;
887 
888 	sc = iflib_get_softc(ctx);
889 	hash = enetc_hash_vid(vid);
890 
891 	MPASS(sc->vlan_bitmap[hash] > 0);
892 	if (--sc->vlan_bitmap[hash] != 0)
893 		return;
894 
895 	bitmap = ENETC_PORT_RD4(sc, ENETC_PSIVHFR0(0));
896 	bitmap |= (uint64_t)ENETC_PORT_RD4(sc, ENETC_PSIVHFR1(0)) << 32;
897 	bitmap &= ~BIT(hash);
898 	ENETC_PORT_WR4(sc, ENETC_PSIVHFR0(0), bitmap & UINT32_MAX);
899 	ENETC_PORT_WR4(sc, ENETC_PSIVHFR1(0), bitmap >> 32);
900 }
901 
902 static void
enetc_init(if_ctx_t ctx)903 enetc_init(if_ctx_t ctx)
904 {
905 	struct enetc_softc *sc;
906 	struct mii_data *miid;
907 	if_t ifp;
908 	uint16_t max_frame_length;
909 	int baudrate;
910 
911 	sc = iflib_get_softc(ctx);
912 	ifp = iflib_get_ifp(ctx);
913 
914 	max_frame_length = sc->shared->isc_max_frame_size;
915 	MPASS(max_frame_length < ENETC_MAX_FRAME_LEN);
916 
917 	/* Set max RX and TX frame lengths. */
918 	ENETC_PORT_WR4(sc, ENETC_PM0_MAXFRM, max_frame_length);
919 	ENETC_PORT_WR4(sc, ENETC_PTCMSDUR(0), max_frame_length);
920 	ENETC_PORT_WR4(sc, ENETC_PTXMBAR, 2 * max_frame_length);
921 
922 	/* Set "VLAN promiscious" mode if filtering is disabled. */
923 	if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0)
924 		ENETC_PORT_WR4(sc, ENETC_PSIPVMR,
925 		    ENETC_PSIPVMR_SET_VUTA(1) | ENETC_PSIPVMR_SET_VP(1));
926 	else
927 		ENETC_PORT_WR4(sc, ENETC_PSIPVMR,
928 		    ENETC_PSIPVMR_SET_VUTA(1));
929 
930 	sc->rbmr = ENETC_RBMR_EN;
931 
932 	if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING)
933 		sc->rbmr |= ENETC_RBMR_VTE;
934 
935 	/* Write MAC address to hardware. */
936 	enetc_set_hwaddr(sc);
937 
938 	enetc_init_tx(sc);
939 	enetc_init_rx(sc);
940 
941 	if (sc->fixed_link) {
942 		baudrate = ifmedia_baudrate(sc->fixed_ifmedia.ifm_cur->ifm_media);
943 		iflib_link_state_change(sc->ctx, LINK_STATE_UP, baudrate);
944 	} else {
945 		/*
946 		 * Can't return an error from this function, there is not much
947 		 * we can do if this fails.
948 		 */
949 		miid = device_get_softc(sc->miibus);
950 		(void)mii_mediachg(miid);
951 	}
952 
953 	enetc_promisc_set(ctx, if_getflags(ifp));
954 }
955 
956 static void
enetc_disable_txq(struct enetc_softc * sc,int qid)957 enetc_disable_txq(struct enetc_softc *sc, int qid)
958 {
959 	qidx_t cidx, pidx;
960 	int timeout = 10000;	/* this * DELAY(100) = 1s */
961 
962 	/* At this point iflib shouldn't be enquing any more frames. */
963 	pidx = ENETC_TXQ_RD4(sc, qid, ENETC_TBPIR);
964 	cidx = ENETC_TXQ_RD4(sc, qid, ENETC_TBCIR);
965 
966 	while (pidx != cidx && timeout--) {
967 		DELAY(100);
968 		cidx = ENETC_TXQ_RD4(sc, qid, ENETC_TBCIR);
969 	}
970 
971 	if (timeout == 0)
972 		device_printf(sc->dev,
973 		    "Timeout while waiting for txq%d to stop transmitting packets\n",
974 		    qid);
975 
976 	ENETC_TXQ_WR4(sc, qid, ENETC_TBMR, 0);
977 }
978 
979 static void
enetc_stop(if_ctx_t ctx)980 enetc_stop(if_ctx_t ctx)
981 {
982 	struct enetc_softc *sc;
983 	int i;
984 
985 	sc = iflib_get_softc(ctx);
986 
987 	for (i = 0; i < sc->rx_num_queues; i++)
988 		ENETC_RXQ_WR4(sc, i, ENETC_RBMR, 0);
989 
990 	for (i = 0; i < sc->tx_num_queues; i++)
991 		enetc_disable_txq(sc, i);
992 }
993 
994 static int
enetc_msix_intr_assign(if_ctx_t ctx,int msix)995 enetc_msix_intr_assign(if_ctx_t ctx, int msix)
996 {
997 	struct enetc_softc *sc;
998 	struct enetc_rx_queue *rx_queue;
999 	struct enetc_tx_queue *tx_queue;
1000 	int vector = 0, i, error;
1001 	char irq_name[16];
1002 
1003 	sc = iflib_get_softc(ctx);
1004 
1005 	MPASS(sc->rx_num_queues + 1 <= ENETC_MSIX_COUNT);
1006 	MPASS(sc->rx_num_queues == sc->tx_num_queues);
1007 
1008 	for (i = 0; i < sc->rx_num_queues; i++, vector++) {
1009 		rx_queue = &sc->rx_queues[i];
1010 		snprintf(irq_name, sizeof(irq_name), "rxtxq%d", i);
1011 		error = iflib_irq_alloc_generic(ctx,
1012 		    &rx_queue->irq, vector + 1, IFLIB_INTR_RXTX,
1013 		    NULL, rx_queue, i, irq_name);
1014 		if (error != 0)
1015 			goto fail;
1016 
1017 		ENETC_WR4(sc, ENETC_SIMSIRRV(i), vector);
1018 		ENETC_RXQ_WR4(sc, i, ENETC_RBICR1, ENETC_RX_INTR_TIME_THR);
1019 		ENETC_RXQ_WR4(sc, i, ENETC_RBICR0,
1020 		    ENETC_RBICR0_ICEN | ENETC_RBICR0_SET_ICPT(ENETC_RX_INTR_PKT_THR));
1021 	}
1022 	vector = 0;
1023 	for (i = 0;i < sc->tx_num_queues; i++, vector++) {
1024 		tx_queue = &sc->tx_queues[i];
1025 		snprintf(irq_name, sizeof(irq_name), "txq%d", i);
1026 		iflib_softirq_alloc_generic(ctx, &tx_queue->irq,
1027 		    IFLIB_INTR_TX, tx_queue, i, irq_name);
1028 
1029 		ENETC_WR4(sc, ENETC_SIMSITRV(i), vector);
1030 	}
1031 
1032 	return (0);
1033 fail:
1034 	for (i = 0; i < sc->rx_num_queues; i++) {
1035 		rx_queue = &sc->rx_queues[i];
1036 		iflib_irq_free(ctx, &rx_queue->irq);
1037 	}
1038 	return (error);
1039 }
1040 
1041 static int
enetc_tx_queue_intr_enable(if_ctx_t ctx,uint16_t qid)1042 enetc_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
1043 {
1044 	struct enetc_softc *sc;
1045 
1046 	sc = iflib_get_softc(ctx);
1047 	ENETC_TXQ_RD4(sc, qid, ENETC_TBIDR);
1048 	return (0);
1049 }
1050 
1051 static int
enetc_rx_queue_intr_enable(if_ctx_t ctx,uint16_t qid)1052 enetc_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
1053 {
1054 	struct enetc_softc *sc;
1055 
1056 	sc = iflib_get_softc(ctx);
1057 	ENETC_RXQ_RD4(sc, qid, ENETC_RBIDR);
1058 	return (0);
1059 }
1060 static void
enetc_intr_enable(if_ctx_t ctx)1061 enetc_intr_enable(if_ctx_t ctx)
1062 {
1063 	struct enetc_softc *sc;
1064 	int i;
1065 
1066 	sc = iflib_get_softc(ctx);
1067 
1068 	for (i = 0; i < sc->rx_num_queues; i++)
1069 		ENETC_RXQ_WR4(sc, i, ENETC_RBIER, ENETC_RBIER_RXTIE);
1070 
1071 	for (i = 0; i < sc->tx_num_queues; i++)
1072 		ENETC_TXQ_WR4(sc, i, ENETC_TBIER, ENETC_TBIER_TXF);
1073 }
1074 
1075 static void
enetc_intr_disable(if_ctx_t ctx)1076 enetc_intr_disable(if_ctx_t ctx)
1077 {
1078 	struct enetc_softc *sc;
1079 	int i;
1080 
1081 	sc = iflib_get_softc(ctx);
1082 
1083 	for (i = 0; i < sc->rx_num_queues; i++)
1084 		ENETC_RXQ_WR4(sc, i, ENETC_RBIER, 0);
1085 
1086 	for (i = 0; i < sc->tx_num_queues; i++)
1087 		ENETC_TXQ_WR4(sc, i, ENETC_TBIER, 0);
1088 }
1089 
1090 static int
enetc_isc_txd_encap(void * data,if_pkt_info_t ipi)1091 enetc_isc_txd_encap(void *data, if_pkt_info_t ipi)
1092 {
1093 	struct enetc_softc *sc = data;
1094 	struct enetc_tx_queue *queue;
1095 	union enetc_tx_bd *desc;
1096 	bus_dma_segment_t *segs;
1097 	qidx_t pidx, queue_len;
1098 	qidx_t i = 0;
1099 
1100 	queue = &sc->tx_queues[ipi->ipi_qsidx];
1101 	segs = ipi->ipi_segs;
1102 	pidx = ipi->ipi_pidx;
1103 	queue_len = sc->tx_queue_size;
1104 
1105 	/*
1106 	 * First descriptor is special. We use it to set frame
1107 	 * related information and offloads, e.g. VLAN tag.
1108 	 */
1109 	desc = &queue->ring[pidx];
1110 	bzero(desc, sizeof(*desc));
1111 	desc->frm_len = ipi->ipi_len;
1112 	desc->addr = segs[i].ds_addr;
1113 	desc->buf_len = segs[i].ds_len;
1114 	if (ipi->ipi_flags & IPI_TX_INTR)
1115 		desc->flags = ENETC_TXBD_FLAGS_FI;
1116 
1117 	i++;
1118 	if (++pidx == queue_len)
1119 		pidx = 0;
1120 
1121 	if (ipi->ipi_mflags & M_VLANTAG) {
1122 		/* VLAN tag is inserted in a separate descriptor. */
1123 		desc->flags |= ENETC_TXBD_FLAGS_EX;
1124 		desc = &queue->ring[pidx];
1125 		bzero(desc, sizeof(*desc));
1126 		desc->ext.vid = ipi->ipi_vtag;
1127 		desc->ext.e_flags = ENETC_TXBD_E_FLAGS_VLAN_INS;
1128 		if (++pidx == queue_len)
1129 			pidx = 0;
1130 	}
1131 
1132 	/* Now add remaining descriptors. */
1133 	for (;i < ipi->ipi_nsegs; i++) {
1134 		desc = &queue->ring[pidx];
1135 		bzero(desc, sizeof(*desc));
1136 		desc->addr = segs[i].ds_addr;
1137 		desc->buf_len = segs[i].ds_len;
1138 
1139 		if (++pidx == queue_len)
1140 			pidx = 0;
1141 	}
1142 
1143 	desc->flags |= ENETC_TXBD_FLAGS_F;
1144 	ipi->ipi_new_pidx = pidx;
1145 
1146 	return (0);
1147 }
1148 
1149 static void
enetc_isc_txd_flush(void * data,uint16_t qid,qidx_t pidx)1150 enetc_isc_txd_flush(void *data, uint16_t qid, qidx_t pidx)
1151 {
1152 	struct enetc_softc *sc = data;
1153 
1154 	ENETC_TXQ_WR4(sc, qid, ENETC_TBPIR, pidx);
1155 }
1156 
1157 static int
enetc_isc_txd_credits_update(void * data,uint16_t qid,bool clear)1158 enetc_isc_txd_credits_update(void *data, uint16_t qid, bool clear)
1159 {
1160 	struct enetc_softc *sc = data;
1161 	struct enetc_tx_queue *queue;
1162 	int cidx, hw_cidx, count;
1163 
1164 	queue = &sc->tx_queues[qid];
1165 	hw_cidx = ENETC_TXQ_RD4(sc, qid, ENETC_TBCIR) & ENETC_TBCIR_IDX_MASK;
1166 	cidx = queue->cidx;
1167 
1168 	/*
1169 	 * RM states that the ring can hold at most ring_size - 1 descriptors.
1170 	 * Thanks to that we can assume that the ring is empty if cidx == pidx.
1171 	 * This requirement is guaranteed implicitly by iflib as it will only
1172 	 * encap a new frame if we have at least nfrags + 2 descriptors available
1173 	 * on the ring. This driver uses at most one additional descriptor for
1174 	 * VLAN tag insertion.
1175 	 * Also RM states that the TBCIR register is only updated once all
1176 	 * descriptors in the chain have been processed.
1177 	 */
1178 	if (cidx == hw_cidx)
1179 		return (0);
1180 
1181 	if (!clear)
1182 		return (1);
1183 
1184 	count = hw_cidx - cidx;
1185 	if (count < 0)
1186 		count += sc->tx_queue_size;
1187 
1188 	queue->cidx = hw_cidx;
1189 
1190 	return (count);
1191 }
1192 
1193 static int
enetc_isc_rxd_available(void * data,uint16_t qid,qidx_t pidx,qidx_t budget)1194 enetc_isc_rxd_available(void *data, uint16_t qid, qidx_t pidx, qidx_t budget)
1195 {
1196 	struct enetc_softc *sc = data;
1197 	struct enetc_rx_queue *queue;
1198 	qidx_t hw_pidx, queue_len;
1199 	union enetc_rx_bd *desc;
1200 	int count = 0;
1201 
1202 	queue = &sc->rx_queues[qid];
1203 	desc = &queue->ring[pidx];
1204 	queue_len = sc->rx_queue_size;
1205 
1206 	if (desc->r.lstatus == 0)
1207 		return (0);
1208 
1209 	if (budget == 1)
1210 		return (1);
1211 
1212 	hw_pidx = ENETC_RXQ_RD4(sc, qid, ENETC_RBPIR);
1213 	while (pidx != hw_pidx && count < budget) {
1214 		desc = &queue->ring[pidx];
1215 		if (desc->r.lstatus & ENETC_RXBD_LSTATUS_F)
1216 			count++;
1217 
1218 		if (++pidx == queue_len)
1219 			pidx = 0;
1220 	}
1221 
1222 	return (count);
1223 }
1224 
1225 static int
enetc_isc_rxd_pkt_get(void * data,if_rxd_info_t ri)1226 enetc_isc_rxd_pkt_get(void *data, if_rxd_info_t ri)
1227 {
1228 	struct enetc_softc *sc = data;
1229 	struct enetc_rx_queue *queue;
1230 	union enetc_rx_bd *desc;
1231 	uint16_t buf_len, pkt_size = 0;
1232 	qidx_t cidx, queue_len;
1233 	uint32_t status;
1234 	int i;
1235 
1236 	cidx = ri->iri_cidx;
1237 	queue = &sc->rx_queues[ri->iri_qsidx];
1238 	desc = &queue->ring[cidx];
1239 	status = desc->r.lstatus;
1240 	queue_len = sc->rx_queue_size;
1241 
1242 	/*
1243 	 * Ready bit will be set only when all descriptors
1244 	 * in the chain have been processed.
1245 	 */
1246 	if ((status & ENETC_RXBD_LSTATUS_R) == 0)
1247 		return (EAGAIN);
1248 
1249 	/* Pass RSS hash. */
1250 	if (status & ENETC_RXBD_FLAG_RSSV) {
1251 		ri->iri_flowid = desc->r.rss_hash;
1252 		ri->iri_rsstype = M_HASHTYPE_OPAQUE_HASH;
1253 	}
1254 
1255 	/* Pass IP checksum status. */
1256 	ri->iri_csum_flags = CSUM_IP_CHECKED;
1257 	if ((desc->r.parse_summary & ENETC_RXBD_PARSER_ERROR) == 0)
1258 		ri->iri_csum_flags |= CSUM_IP_VALID;
1259 
1260 	/* Pass extracted VLAN tag. */
1261 	if (status & ENETC_RXBD_FLAG_VLAN) {
1262 		ri->iri_vtag = desc->r.vlan_opt;
1263 		ri->iri_flags = M_VLANTAG;
1264 	}
1265 
1266 	for (i = 0; i < ENETC_MAX_SCATTER; i++) {
1267 		buf_len = desc->r.buf_len;
1268 		ri->iri_frags[i].irf_idx = cidx;
1269 		ri->iri_frags[i].irf_len = buf_len;
1270 		pkt_size += buf_len;
1271 		if (desc->r.lstatus & ENETC_RXBD_LSTATUS_F)
1272 			break;
1273 
1274 		if (++cidx == queue_len)
1275 			cidx = 0;
1276 
1277 		desc = &queue->ring[cidx];
1278 	}
1279 	ri->iri_nfrags = i + 1;
1280 	ri->iri_len = pkt_size;
1281 
1282 	MPASS(desc->r.lstatus & ENETC_RXBD_LSTATUS_F);
1283 	if (status & ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))
1284 		return (EBADMSG);
1285 
1286 	return (0);
1287 }
1288 
1289 static void
enetc_isc_rxd_refill(void * data,if_rxd_update_t iru)1290 enetc_isc_rxd_refill(void *data, if_rxd_update_t iru)
1291 {
1292 	struct enetc_softc *sc = data;
1293 	struct enetc_rx_queue *queue;
1294 	union enetc_rx_bd *desc;
1295 	qidx_t pidx, queue_len;
1296 	uint64_t *paddrs;
1297 	int i, count;
1298 
1299 	queue = &sc->rx_queues[iru->iru_qsidx];
1300 	paddrs = iru->iru_paddrs;
1301 	pidx = iru->iru_pidx;
1302 	count = iru->iru_count;
1303 	queue_len = sc->rx_queue_size;
1304 
1305 	for (i = 0; i < count; i++) {
1306 		desc = &queue->ring[pidx];
1307 		bzero(desc, sizeof(*desc));
1308 
1309 		desc->w.addr = paddrs[i];
1310 		if (++pidx == queue_len)
1311 			pidx = 0;
1312 	}
1313 	/*
1314 	 * After enabling the queue NIC will prefetch the first
1315 	 * 8 descriptors. It probably assumes that the RX is fully
1316 	 * refilled when cidx == pidx.
1317 	 * Enable it only if we have enough descriptors ready on the ring.
1318 	 */
1319 	if (!queue->enabled && pidx >= 8) {
1320 		ENETC_RXQ_WR4(sc, iru->iru_qsidx, ENETC_RBMR, sc->rbmr);
1321 		queue->enabled = true;
1322 	}
1323 }
1324 
1325 static void
enetc_isc_rxd_flush(void * data,uint16_t qid,uint8_t flid,qidx_t pidx)1326 enetc_isc_rxd_flush(void *data, uint16_t qid, uint8_t flid, qidx_t pidx)
1327 {
1328 	struct enetc_softc *sc = data;
1329 
1330 	ENETC_RXQ_WR4(sc, qid, ENETC_RBCIR, pidx);
1331 }
1332 
1333 static uint64_t
enetc_get_counter(if_ctx_t ctx,ift_counter cnt)1334 enetc_get_counter(if_ctx_t ctx, ift_counter cnt)
1335 {
1336 	struct enetc_softc *sc;
1337 	if_t ifp;
1338 
1339 	sc = iflib_get_softc(ctx);
1340 	ifp = iflib_get_ifp(ctx);
1341 
1342 	switch (cnt) {
1343 	case IFCOUNTER_IERRORS:
1344 		return (ENETC_PORT_RD8(sc, ENETC_PM0_RERR));
1345 	case IFCOUNTER_OERRORS:
1346 		return (ENETC_PORT_RD8(sc, ENETC_PM0_TERR));
1347 	default:
1348 		return (if_get_counter_default(ifp, cnt));
1349 	}
1350 }
1351 
1352 static int
enetc_mtu_set(if_ctx_t ctx,uint32_t mtu)1353 enetc_mtu_set(if_ctx_t ctx, uint32_t mtu)
1354 {
1355 	struct enetc_softc *sc = iflib_get_softc(ctx);
1356 	uint32_t max_frame_size;
1357 
1358 	max_frame_size = mtu +
1359 	    ETHER_HDR_LEN +
1360 	    ETHER_CRC_LEN +
1361 	    sizeof(struct ether_vlan_header);
1362 
1363 	if (max_frame_size > ENETC_MAX_FRAME_LEN)
1364 		return (EINVAL);
1365 
1366 	sc->shared->isc_max_frame_size = max_frame_size;
1367 
1368 	return (0);
1369 }
1370 
1371 static int
enetc_promisc_set(if_ctx_t ctx,int flags)1372 enetc_promisc_set(if_ctx_t ctx, int flags)
1373 {
1374 	struct enetc_softc *sc;
1375 	uint32_t reg = 0;
1376 
1377 	sc = iflib_get_softc(ctx);
1378 
1379 	if (flags & IFF_PROMISC)
1380 		reg = ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0);
1381 	else if (flags & IFF_ALLMULTI)
1382 		reg = ENETC_PSIPMR_SET_MP(0);
1383 
1384 	ENETC_PORT_WR4(sc, ENETC_PSIPMR, reg);
1385 
1386 	return (0);
1387 }
1388 
1389 static void
enetc_timer(if_ctx_t ctx,uint16_t qid)1390 enetc_timer(if_ctx_t ctx, uint16_t qid)
1391 {
1392 	/*
1393 	 * Poll PHY status. Do this only for qid 0 to save
1394 	 * some cycles.
1395 	 */
1396 	if (qid == 0)
1397 		iflib_admin_intr_deferred(ctx);
1398 }
1399 
1400 static void
enetc_update_admin_status(if_ctx_t ctx)1401 enetc_update_admin_status(if_ctx_t ctx)
1402 {
1403 	struct enetc_softc *sc;
1404 	struct mii_data *miid;
1405 
1406 	sc = iflib_get_softc(ctx);
1407 
1408 	if (!sc->fixed_link) {
1409 		miid = device_get_softc(sc->miibus);
1410 		mii_tick(miid);
1411 	}
1412 }
1413 
1414 static bool
enetc_if_needs_restart(if_ctx_t ctx __unused,enum iflib_restart_event event)1415 enetc_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1416 {
1417 	switch (event) {
1418 	case IFLIB_RESTART_VLAN_CONFIG:
1419 	default:
1420 		return (false);
1421 	}
1422 }
1423 
1424 static int
enetc_miibus_readreg(device_t dev,int phy,int reg)1425 enetc_miibus_readreg(device_t dev, int phy, int reg)
1426 {
1427 	struct enetc_softc *sc;
1428 	int val;
1429 
1430 	sc = iflib_get_softc(device_get_softc(dev));
1431 
1432 	mtx_lock(&sc->mii_lock);
1433 	val = enetc_mdio_read(sc->regs, ENETC_PORT_BASE + ENETC_EMDIO_BASE,
1434 	    phy, reg);
1435 	mtx_unlock(&sc->mii_lock);
1436 
1437 	return (val);
1438 }
1439 
1440 static int
enetc_miibus_writereg(device_t dev,int phy,int reg,int data)1441 enetc_miibus_writereg(device_t dev, int phy, int reg, int data)
1442 {
1443 	struct enetc_softc *sc;
1444 	int ret;
1445 
1446 	sc = iflib_get_softc(device_get_softc(dev));
1447 
1448 	mtx_lock(&sc->mii_lock);
1449 	ret = enetc_mdio_write(sc->regs, ENETC_PORT_BASE + ENETC_EMDIO_BASE,
1450 	    phy, reg, data);
1451 	mtx_unlock(&sc->mii_lock);
1452 
1453 	return (ret);
1454 }
1455 
1456 static void
enetc_miibus_linkchg(device_t dev)1457 enetc_miibus_linkchg(device_t dev)
1458 {
1459 
1460 	enetc_miibus_statchg(dev);
1461 }
1462 
1463 static void
enetc_miibus_statchg(device_t dev)1464 enetc_miibus_statchg(device_t dev)
1465 {
1466 	struct enetc_softc *sc;
1467 	struct mii_data *miid;
1468 	int link_state, baudrate;
1469 
1470 	sc = iflib_get_softc(device_get_softc(dev));
1471 	miid = device_get_softc(sc->miibus);
1472 
1473 	baudrate = ifmedia_baudrate(miid->mii_media_active);
1474 	if (miid->mii_media_status & IFM_AVALID) {
1475 		if (miid->mii_media_status & IFM_ACTIVE)
1476 			link_state = LINK_STATE_UP;
1477 		else
1478 			link_state = LINK_STATE_DOWN;
1479 	} else {
1480 		link_state = LINK_STATE_UNKNOWN;
1481 	}
1482 
1483 	iflib_link_state_change(sc->ctx, link_state, baudrate);
1484 
1485 }
1486 
1487 static int
enetc_media_change(if_t ifp)1488 enetc_media_change(if_t ifp)
1489 {
1490 	struct enetc_softc *sc;
1491 	struct mii_data *miid;
1492 
1493 	sc = iflib_get_softc(if_getsoftc(ifp));
1494 	miid = device_get_softc(sc->miibus);
1495 
1496 	mii_mediachg(miid);
1497 	return (0);
1498 }
1499 
1500 static void
enetc_media_status(if_t ifp,struct ifmediareq * ifmr)1501 enetc_media_status(if_t ifp, struct ifmediareq* ifmr)
1502 {
1503 	struct enetc_softc *sc;
1504 	struct mii_data *miid;
1505 
1506 	sc = iflib_get_softc(if_getsoftc(ifp));
1507 	miid = device_get_softc(sc->miibus);
1508 
1509 	mii_pollstat(miid);
1510 
1511 	ifmr->ifm_active = miid->mii_media_active;
1512 	ifmr->ifm_status = miid->mii_media_status;
1513 }
1514 
1515 static int
enetc_fixed_media_change(if_t ifp)1516 enetc_fixed_media_change(if_t ifp)
1517 {
1518 
1519 	if_printf(ifp, "Can't change media in fixed-link mode.\n");
1520 	return (0);
1521 }
1522 static void
enetc_fixed_media_status(if_t ifp,struct ifmediareq * ifmr)1523 enetc_fixed_media_status(if_t ifp, struct ifmediareq* ifmr)
1524 {
1525 	struct enetc_softc *sc;
1526 
1527 	sc = iflib_get_softc(if_getsoftc(ifp));
1528 
1529 	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
1530 	ifmr->ifm_active = sc->fixed_ifmedia.ifm_cur->ifm_media;
1531 	return;
1532 }
1533