xref: /freebsd/sys/dev/al_eth/al_eth.c (revision 63f537551380d2dab29fa402ad1269feae17e594)
1 /*-
2  * Copyright (c) 2015,2016 Annapurna Labs Ltd. and affiliates
3  * All rights reserved.
4  *
5  * Developed by Semihalf.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/bus.h>
33 #include <sys/kernel.h>
34 #include <sys/kthread.h>
35 #include <sys/lock.h>
36 #include <sys/mbuf.h>
37 #include <sys/malloc.h>
38 #include <sys/module.h>
39 #include <sys/rman.h>
40 #include <sys/socket.h>
41 #include <sys/sockio.h>
42 #include <sys/sysctl.h>
43 #include <sys/taskqueue.h>
44 
45 #include <machine/atomic.h>
46 
47 #include "opt_inet.h"
48 #include "opt_inet6.h"
49 
50 #include <net/ethernet.h>
51 #include <net/if.h>
52 #include <net/if_var.h>
53 #include <net/if_arp.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56 #include <net/if_types.h>
57 #include <netinet/in.h>
58 #include <net/if_vlan_var.h>
59 #include <netinet/tcp.h>
60 #include <netinet/tcp_lro.h>
61 
62 #ifdef INET
63 #include <netinet/in.h>
64 #include <netinet/in_systm.h>
65 #include <netinet/in_var.h>
66 #include <netinet/ip.h>
67 #endif
68 
69 #ifdef INET6
70 #include <netinet/ip6.h>
71 #endif
72 
73 #include <sys/sockio.h>
74 
75 #include <dev/pci/pcireg.h>
76 #include <dev/pci/pcivar.h>
77 
78 #include <dev/mii/mii.h>
79 #include <dev/mii/miivar.h>
80 
81 #include <al_hal_common.h>
82 #include <al_hal_plat_services.h>
83 #include <al_hal_udma_config.h>
84 #include <al_hal_udma_iofic.h>
85 #include <al_hal_udma_debug.h>
86 #include <al_hal_eth.h>
87 
88 #include "al_eth.h"
89 #include "al_init_eth_lm.h"
90 #include "arm/annapurna/alpine/alpine_serdes.h"
91 
92 #include "miibus_if.h"
93 
94 #define	device_printf_dbg(fmt, ...) do {				\
95 	if (AL_DBG_LEVEL >= AL_DBG_LEVEL_DBG) { AL_DBG_LOCK();		\
96 	    device_printf(fmt, __VA_ARGS__); AL_DBG_UNLOCK();}		\
97 	} while (0)
98 
99 MALLOC_DEFINE(M_IFAL, "if_al_malloc", "All allocated data for AL ETH driver");
100 
101 /* move out to some pci header file */
102 #define	PCI_VENDOR_ID_ANNAPURNA_LABS	0x1c36
103 #define	PCI_DEVICE_ID_AL_ETH		0x0001
104 #define	PCI_DEVICE_ID_AL_ETH_ADVANCED	0x0002
105 #define	PCI_DEVICE_ID_AL_ETH_NIC	0x0003
106 #define	PCI_DEVICE_ID_AL_ETH_FPGA_NIC	0x0030
107 #define	PCI_DEVICE_ID_AL_CRYPTO		0x0011
108 #define	PCI_DEVICE_ID_AL_CRYPTO_VF	0x8011
109 #define	PCI_DEVICE_ID_AL_RAID_DMA	0x0021
110 #define	PCI_DEVICE_ID_AL_RAID_DMA_VF	0x8021
111 #define	PCI_DEVICE_ID_AL_USB		0x0041
112 
113 #define	MAC_ADDR_STR "%02x:%02x:%02x:%02x:%02x:%02x"
114 #define	MAC_ADDR(addr) addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]
115 
116 #define	AL_ETH_MAC_TABLE_UNICAST_IDX_BASE	0
117 #define	AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT	4
118 #define	AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX	(AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + \
119 						 AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT)
120 
121 #define	AL_ETH_MAC_TABLE_DROP_IDX		(AL_ETH_FWD_MAC_NUM - 1)
122 #define	AL_ETH_MAC_TABLE_BROADCAST_IDX		(AL_ETH_MAC_TABLE_DROP_IDX - 1)
123 
124 #define	AL_ETH_THASH_UDMA_SHIFT		0
125 #define	AL_ETH_THASH_UDMA_MASK		(0xF << AL_ETH_THASH_UDMA_SHIFT)
126 
127 #define	AL_ETH_THASH_Q_SHIFT		4
128 #define	AL_ETH_THASH_Q_MASK		(0x3 << AL_ETH_THASH_Q_SHIFT)
129 
130 /* the following defines should be moved to hal */
131 #define	AL_ETH_FSM_ENTRY_IPV4_TCP		0
132 #define	AL_ETH_FSM_ENTRY_IPV4_UDP		1
133 #define	AL_ETH_FSM_ENTRY_IPV6_TCP		2
134 #define	AL_ETH_FSM_ENTRY_IPV6_UDP		3
135 #define	AL_ETH_FSM_ENTRY_IPV6_NO_UDP_TCP	4
136 #define	AL_ETH_FSM_ENTRY_IPV4_NO_UDP_TCP	5
137 
138 /* FSM DATA format */
139 #define	AL_ETH_FSM_DATA_OUTER_2_TUPLE	0
140 #define	AL_ETH_FSM_DATA_OUTER_4_TUPLE	1
141 #define	AL_ETH_FSM_DATA_INNER_2_TUPLE	2
142 #define	AL_ETH_FSM_DATA_INNER_4_TUPLE	3
143 
144 #define	AL_ETH_FSM_DATA_HASH_SEL	(1 << 2)
145 
146 #define	AL_ETH_FSM_DATA_DEFAULT_Q	0
147 #define	AL_ETH_FSM_DATA_DEFAULT_UDMA	0
148 
149 #define	AL_BR_SIZE	512
150 #define	AL_TSO_SIZE	65500
151 #define	AL_DEFAULT_MTU	1500
152 
153 #define	CSUM_OFFLOAD		(CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP)
154 
155 #define	AL_IP_ALIGNMENT_OFFSET	2
156 
157 #define	SFP_I2C_ADDR		0x50
158 
159 #define	AL_MASK_GROUP_A_INT	0x7
160 #define	AL_MASK_GROUP_B_INT	0xF
161 #define	AL_MASK_GROUP_C_INT	0xF
162 #define	AL_MASK_GROUP_D_INT	0xFFFFFFFF
163 
164 #define	AL_REG_OFFSET_FORWARD_INTR	(0x1800000 + 0x1210)
165 #define	AL_EN_FORWARD_INTR	0x1FFFF
166 #define	AL_DIS_FORWARD_INTR	0
167 
168 #define	AL_M2S_MASK_INIT	0x480
169 #define	AL_S2M_MASK_INIT	0x1E0
170 #define	AL_M2S_S2M_MASK_NOT_INT	(0x3f << 25)
171 
172 #define	AL_10BASE_T_SPEED	10
173 #define	AL_100BASE_TX_SPEED	100
174 #define	AL_1000BASE_T_SPEED	1000
175 
176 #define	AL_RX_LOCK_INIT(_sc)	mtx_init(&((_sc)->if_rx_lock), "ALRXL", "ALRXL", MTX_DEF)
177 #define	AL_RX_LOCK(_sc)		mtx_lock(&((_sc)->if_rx_lock))
178 #define	AL_RX_UNLOCK(_sc)	mtx_unlock(&((_sc)->if_rx_lock))
179 
180 /* helper functions */
181 static int al_is_device_supported(device_t);
182 
183 static void al_eth_init_rings(struct al_eth_adapter *);
184 static void al_eth_flow_ctrl_disable(struct al_eth_adapter *);
185 int al_eth_fpga_read_pci_config(void *, int, uint32_t *);
186 int al_eth_fpga_write_pci_config(void *, int, uint32_t);
187 int al_eth_read_pci_config(void *, int, uint32_t *);
188 int al_eth_write_pci_config(void *, int, uint32_t);
189 void al_eth_irq_config(uint32_t *, uint32_t);
190 void al_eth_forward_int_config(uint32_t *, uint32_t);
191 static void al_eth_start_xmit(void *, int);
192 static void al_eth_rx_recv_work(void *, int);
193 static int al_eth_up(struct al_eth_adapter *);
194 static void al_eth_down(struct al_eth_adapter *);
195 static void al_eth_interrupts_unmask(struct al_eth_adapter *);
196 static void al_eth_interrupts_mask(struct al_eth_adapter *);
197 static int al_eth_check_mtu(struct al_eth_adapter *, int);
198 static uint64_t al_get_counter(if_t, ift_counter);
199 static void al_eth_req_rx_buff_size(struct al_eth_adapter *, int);
200 static int al_eth_board_params_init(struct al_eth_adapter *);
201 static int al_media_update(if_t);
202 static void al_media_status(if_t, struct ifmediareq *);
203 static int al_eth_function_reset(struct al_eth_adapter *);
204 static int al_eth_hw_init_adapter(struct al_eth_adapter *);
205 static void al_eth_serdes_init(struct al_eth_adapter *);
206 static void al_eth_lm_config(struct al_eth_adapter *);
207 static int al_eth_hw_init(struct al_eth_adapter *);
208 
209 static void al_tick_stats(void *);
210 
211 /* ifnet entry points */
212 static void al_init(void *);
213 static int al_mq_start(if_t, struct mbuf *);
214 static void al_qflush(if_t);
215 static int al_ioctl(if_t ifp, u_long, caddr_t);
216 
217 /* bus entry points */
218 static int al_probe(device_t);
219 static int al_attach(device_t);
220 static int al_detach(device_t);
221 static int al_shutdown(device_t);
222 
223 /* mii bus support routines */
224 static int al_miibus_readreg(device_t, int, int);
225 static int al_miibus_writereg(device_t, int, int, int);
226 static void al_miibus_statchg(device_t);
227 static void al_miibus_linkchg(device_t);
228 
229 struct al_eth_adapter* g_adapters[16];
230 uint32_t g_adapters_count;
231 
232 /* flag for napi-like mbuf processing, controlled from sysctl */
233 static int napi = 0;
234 
235 static device_method_t al_methods[] = {
236 	/* Device interface */
237 	DEVMETHOD(device_probe,		al_probe),
238 	DEVMETHOD(device_attach,	al_attach),
239 	DEVMETHOD(device_detach,	al_detach),
240 	DEVMETHOD(device_shutdown,	al_shutdown),
241 
242 	DEVMETHOD(miibus_readreg,	al_miibus_readreg),
243 	DEVMETHOD(miibus_writereg,	al_miibus_writereg),
244 	DEVMETHOD(miibus_statchg,	al_miibus_statchg),
245 	DEVMETHOD(miibus_linkchg,	al_miibus_linkchg),
246 	{ 0, 0 }
247 };
248 
249 static driver_t al_driver = {
250 	"al",
251 	al_methods,
252 	sizeof(struct al_eth_adapter),
253 };
254 
255 DRIVER_MODULE(al, pci, al_driver, 0, 0);
256 DRIVER_MODULE(miibus, al, miibus_driver, 0, 0);
257 
258 static int
259 al_probe(device_t dev)
260 {
261 	if ((al_is_device_supported(dev)) != 0) {
262 		device_set_desc(dev, "al");
263 		return (BUS_PROBE_DEFAULT);
264 	}
265 	return (ENXIO);
266 }
267 
268 static int
269 al_attach(device_t dev)
270 {
271 	struct al_eth_adapter *adapter;
272 	struct sysctl_oid_list *child;
273 	struct sysctl_ctx_list *ctx;
274 	struct sysctl_oid *tree;
275 	if_t ifp;
276 	uint32_t dev_id;
277 	uint32_t rev_id;
278 	int bar_udma;
279 	int bar_mac;
280 	int bar_ec;
281 	int err;
282 
283 	err = 0;
284 	ifp = NULL;
285 	dev_id = rev_id = 0;
286 	ctx = device_get_sysctl_ctx(dev);
287 	tree = SYSCTL_PARENT(device_get_sysctl_tree(dev));
288 	child = SYSCTL_CHILDREN(tree);
289 
290 	if (g_adapters_count == 0) {
291 		SYSCTL_ADD_INT(ctx, child, OID_AUTO, "napi",
292 		    CTLFLAG_RW, &napi, 0, "Use pseudo-napi mechanism");
293 	}
294 	adapter = device_get_softc(dev);
295 	adapter->dev = dev;
296 	adapter->board_type = ALPINE_INTEGRATED;
297 	snprintf(adapter->name, AL_ETH_NAME_MAX_LEN, "%s",
298 	    device_get_nameunit(dev));
299 	AL_RX_LOCK_INIT(adapter);
300 
301 	g_adapters[g_adapters_count] = adapter;
302 
303 	bar_udma = PCIR_BAR(AL_ETH_UDMA_BAR);
304 	adapter->udma_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
305 	    &bar_udma, RF_ACTIVE);
306 	if (adapter->udma_res == NULL) {
307 		device_printf(adapter->dev,
308 		    "could not allocate memory resources for DMA.\n");
309 		err = ENOMEM;
310 		goto err_res_dma;
311 	}
312 	adapter->udma_base = al_bus_dma_to_va(rman_get_bustag(adapter->udma_res),
313 	    rman_get_bushandle(adapter->udma_res));
314 	bar_mac = PCIR_BAR(AL_ETH_MAC_BAR);
315 	adapter->mac_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
316 	    &bar_mac, RF_ACTIVE);
317 	if (adapter->mac_res == NULL) {
318 		device_printf(adapter->dev,
319 		    "could not allocate memory resources for MAC.\n");
320 		err = ENOMEM;
321 		goto err_res_mac;
322 	}
323 	adapter->mac_base = al_bus_dma_to_va(rman_get_bustag(adapter->mac_res),
324 	    rman_get_bushandle(adapter->mac_res));
325 
326 	bar_ec = PCIR_BAR(AL_ETH_EC_BAR);
327 	adapter->ec_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &bar_ec,
328 	    RF_ACTIVE);
329 	if (adapter->ec_res == NULL) {
330 		device_printf(adapter->dev,
331 		    "could not allocate memory resources for EC.\n");
332 		err = ENOMEM;
333 		goto err_res_ec;
334 	}
335 	adapter->ec_base = al_bus_dma_to_va(rman_get_bustag(adapter->ec_res),
336 	    rman_get_bushandle(adapter->ec_res));
337 
338 	adapter->netdev = ifp = if_alloc(IFT_ETHER);
339 
340 	if_setsoftc(ifp, adapter);
341 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
342 	if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
343 	if_setflags(ifp, if_getdrvflags(ifp));
344 	if_setflagbits(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_ALLMULTI, 0);
345 	if_settransmitfn(ifp, al_mq_start);
346 	if_setqflushfn(ifp, al_qflush);
347 	if_setioctlfn(ifp, al_ioctl);
348 	if_setinitfn(ifp, al_init);
349 	if_setgetcounterfn(ifp, al_get_counter);
350 	if_setmtu(ifp, AL_DEFAULT_MTU);
351 
352 	adapter->if_flags = if_getflags(ifp);
353 
354 	if_setcapabilities(ifp, if_getcapenable(ifp) );
355 
356 	if_setcapabilitiesbit(ifp, IFCAP_HWCSUM |
357 	    IFCAP_HWCSUM_IPV6 | IFCAP_TSO |
358 	    IFCAP_LRO | IFCAP_JUMBO_MTU, 0);
359 
360 	if_setcapenable(ifp, if_getcapabilities(ifp));
361 
362 	adapter->id_number = g_adapters_count;
363 
364 	if (adapter->board_type == ALPINE_INTEGRATED) {
365 		dev_id = pci_get_device(adapter->dev);
366 		rev_id = pci_get_revid(adapter->dev);
367 	} else {
368 		al_eth_fpga_read_pci_config(adapter->internal_pcie_base,
369 		    PCIR_DEVICE, &dev_id);
370 		al_eth_fpga_read_pci_config(adapter->internal_pcie_base,
371 		    PCIR_REVID, &rev_id);
372 	}
373 
374 	adapter->dev_id = dev_id;
375 	adapter->rev_id = rev_id;
376 
377 	/* set default ring sizes */
378 	adapter->tx_ring_count = AL_ETH_DEFAULT_TX_SW_DESCS;
379 	adapter->tx_descs_count = AL_ETH_DEFAULT_TX_HW_DESCS;
380 	adapter->rx_ring_count = AL_ETH_DEFAULT_RX_DESCS;
381 	adapter->rx_descs_count = AL_ETH_DEFAULT_RX_DESCS;
382 
383 	adapter->num_tx_queues = AL_ETH_NUM_QUEUES;
384 	adapter->num_rx_queues = AL_ETH_NUM_QUEUES;
385 
386 	adapter->small_copy_len	= AL_ETH_DEFAULT_SMALL_PACKET_LEN;
387 	adapter->link_poll_interval = AL_ETH_DEFAULT_LINK_POLL_INTERVAL;
388 	adapter->max_rx_buff_alloc_size = AL_ETH_DEFAULT_MAX_RX_BUFF_ALLOC_SIZE;
389 
390 	al_eth_req_rx_buff_size(adapter, if_getmtu(adapter->netdev));
391 
392 	adapter->link_config.force_1000_base_x = AL_ETH_DEFAULT_FORCE_1000_BASEX;
393 
394 	err = al_eth_board_params_init(adapter);
395 	if (err != 0)
396 		goto err;
397 
398 	if (adapter->mac_mode == AL_ETH_MAC_MODE_10GbE_Serial) {
399 		ifmedia_init(&adapter->media, IFM_IMASK,
400 		    al_media_update, al_media_status);
401 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
402 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
403 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
404 		ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
405 	}
406 
407 	al_eth_function_reset(adapter);
408 
409 	err = al_eth_hw_init_adapter(adapter);
410 	if (err != 0)
411 		goto err;
412 
413 	al_eth_init_rings(adapter);
414 	g_adapters_count++;
415 
416 	al_eth_lm_config(adapter);
417 	mtx_init(&adapter->stats_mtx, "AlStatsMtx", NULL, MTX_DEF);
418 	mtx_init(&adapter->wd_mtx, "AlWdMtx", NULL, MTX_DEF);
419 	callout_init_mtx(&adapter->stats_callout, &adapter->stats_mtx, 0);
420 	callout_init_mtx(&adapter->wd_callout, &adapter->wd_mtx, 0);
421 
422 	ether_ifattach(ifp, adapter->mac_addr);
423 	if_setmtu(ifp, AL_DEFAULT_MTU);
424 
425 	if (adapter->mac_mode == AL_ETH_MAC_MODE_RGMII) {
426 		al_eth_hw_init(adapter);
427 
428 		/* Attach PHY(s) */
429 		err = mii_attach(adapter->dev, &adapter->miibus, adapter->netdev,
430 		    al_media_update, al_media_status, BMSR_DEFCAPMASK, 0,
431 		    MII_OFFSET_ANY, 0);
432 		if (err != 0) {
433 			device_printf(adapter->dev, "attaching PHYs failed\n");
434 			return (err);
435 		}
436 
437 		adapter->mii = device_get_softc(adapter->miibus);
438 	}
439 
440 	return (err);
441 
442 err:
443 	bus_release_resource(dev, SYS_RES_MEMORY, bar_ec, adapter->ec_res);
444 err_res_ec:
445 	bus_release_resource(dev, SYS_RES_MEMORY, bar_mac, adapter->mac_res);
446 err_res_mac:
447 	bus_release_resource(dev, SYS_RES_MEMORY, bar_udma, adapter->udma_res);
448 err_res_dma:
449 	return (err);
450 }
451 
452 static int
453 al_detach(device_t dev)
454 {
455 	struct al_eth_adapter *adapter;
456 
457 	adapter = device_get_softc(dev);
458 	ether_ifdetach(adapter->netdev);
459 
460 	mtx_destroy(&adapter->stats_mtx);
461 	mtx_destroy(&adapter->wd_mtx);
462 
463 	al_eth_down(adapter);
464 
465 	bus_release_resource(dev, SYS_RES_IRQ,    0, adapter->irq_res);
466 	bus_release_resource(dev, SYS_RES_MEMORY, 0, adapter->ec_res);
467 	bus_release_resource(dev, SYS_RES_MEMORY, 0, adapter->mac_res);
468 	bus_release_resource(dev, SYS_RES_MEMORY, 0, adapter->udma_res);
469 
470 	return (0);
471 }
472 
473 int
474 al_eth_fpga_read_pci_config(void *handle, int where, uint32_t *val)
475 {
476 
477 	/* handle is the base address of the adapter */
478 	*val = al_reg_read32((void*)((u_long)handle + where));
479 
480 	return (0);
481 }
482 
483 int
484 al_eth_fpga_write_pci_config(void *handle, int where, uint32_t val)
485 {
486 
487 	/* handle is the base address of the adapter */
488 	al_reg_write32((void*)((u_long)handle + where), val);
489 	return (0);
490 }
491 
492 int
493 al_eth_read_pci_config(void *handle, int where, uint32_t *val)
494 {
495 
496 	/* handle is a pci_dev */
497 	*val = pci_read_config((device_t)handle, where, sizeof(*val));
498 	return (0);
499 }
500 
501 int
502 al_eth_write_pci_config(void *handle, int where, uint32_t val)
503 {
504 
505 	/* handle is a pci_dev */
506 	pci_write_config((device_t)handle, where, val, sizeof(val));
507 	return (0);
508 }
509 
510 void
511 al_eth_irq_config(uint32_t *offset, uint32_t value)
512 {
513 
514 	al_reg_write32_relaxed(offset, value);
515 }
516 
517 void
518 al_eth_forward_int_config(uint32_t *offset, uint32_t value)
519 {
520 
521 	al_reg_write32(offset, value);
522 }
523 
524 static void
525 al_eth_serdes_init(struct al_eth_adapter *adapter)
526 {
527 	void __iomem	*serdes_base;
528 
529 	adapter->serdes_init = false;
530 
531 	serdes_base = alpine_serdes_resource_get(adapter->serdes_grp);
532 	if (serdes_base == NULL) {
533 		device_printf(adapter->dev, "serdes_base get failed!\n");
534 		return;
535 	}
536 
537 	serdes_base = al_bus_dma_to_va(serdes_tag, serdes_base);
538 
539 	al_serdes_handle_grp_init(serdes_base, adapter->serdes_grp,
540 	    &adapter->serdes_obj);
541 
542 	adapter->serdes_init = true;
543 }
544 
545 static void
546 al_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
547 {
548 	bus_addr_t *paddr;
549 
550 	paddr = arg;
551 	*paddr = segs->ds_addr;
552 }
553 
554 static int
555 al_dma_alloc_coherent(device_t dev, bus_dma_tag_t *tag, bus_dmamap_t *map,
556     bus_addr_t *baddr, void **vaddr, uint32_t size)
557 {
558 	int ret;
559 	uint32_t maxsize = ((size - 1)/PAGE_SIZE + 1) * PAGE_SIZE;
560 
561 	ret = bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0,
562 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
563 	    maxsize, 1, maxsize, BUS_DMA_COHERENT, NULL, NULL, tag);
564 	if (ret != 0) {
565 		device_printf(dev,
566 		    "failed to create bus tag, ret = %d\n", ret);
567 		return (ret);
568 	}
569 
570 	ret = bus_dmamem_alloc(*tag, vaddr,
571 	    BUS_DMA_COHERENT | BUS_DMA_ZERO, map);
572 	if (ret != 0) {
573 		device_printf(dev,
574 		    "failed to allocate dmamem, ret = %d\n", ret);
575 		return (ret);
576 	}
577 
578 	ret = bus_dmamap_load(*tag, *map, *vaddr,
579 	    size, al_dma_map_addr, baddr, 0);
580 	if (ret != 0) {
581 		device_printf(dev,
582 		    "failed to allocate bus_dmamap_load, ret = %d\n", ret);
583 		return (ret);
584 	}
585 
586 	return (0);
587 }
588 
589 static void
590 al_dma_free_coherent(bus_dma_tag_t tag, bus_dmamap_t map, void *vaddr)
591 {
592 
593 	bus_dmamap_unload(tag, map);
594 	bus_dmamem_free(tag, vaddr, map);
595 	bus_dma_tag_destroy(tag);
596 }
597 
598 static void
599 al_eth_mac_table_unicast_add(struct al_eth_adapter *adapter,
600     uint8_t idx, uint8_t udma_mask)
601 {
602 	struct al_eth_fwd_mac_table_entry entry = { { 0 } };
603 
604 	memcpy(entry.addr, adapter->mac_addr, sizeof(adapter->mac_addr));
605 
606 	memset(entry.mask, 0xff, sizeof(entry.mask));
607 	entry.rx_valid = true;
608 	entry.tx_valid = false;
609 	entry.udma_mask = udma_mask;
610 	entry.filter = false;
611 
612 	device_printf_dbg(adapter->dev,
613 	    "%s: [%d]: addr "MAC_ADDR_STR" mask "MAC_ADDR_STR"\n",
614 	    __func__, idx, MAC_ADDR(entry.addr), MAC_ADDR(entry.mask));
615 
616 	al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry);
617 }
618 
619 static void
620 al_eth_mac_table_all_multicast_add(struct al_eth_adapter *adapter, uint8_t idx,
621     uint8_t udma_mask)
622 {
623 	struct al_eth_fwd_mac_table_entry entry = { { 0 } };
624 
625 	memset(entry.addr, 0x00, sizeof(entry.addr));
626 	memset(entry.mask, 0x00, sizeof(entry.mask));
627 	entry.mask[0] |= 1;
628 	entry.addr[0] |= 1;
629 
630 	entry.rx_valid = true;
631 	entry.tx_valid = false;
632 	entry.udma_mask = udma_mask;
633 	entry.filter = false;
634 
635 	device_printf_dbg(adapter->dev,
636 	    "%s: [%d]: addr "MAC_ADDR_STR" mask "MAC_ADDR_STR"\n",
637 	    __func__, idx, MAC_ADDR(entry.addr), MAC_ADDR(entry.mask));
638 
639 	al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry);
640 }
641 
642 static void
643 al_eth_mac_table_broadcast_add(struct al_eth_adapter *adapter,
644     uint8_t idx, uint8_t udma_mask)
645 {
646 	struct al_eth_fwd_mac_table_entry entry = { { 0 } };
647 
648 	memset(entry.addr, 0xff, sizeof(entry.addr));
649 	memset(entry.mask, 0xff, sizeof(entry.mask));
650 
651 	entry.rx_valid = true;
652 	entry.tx_valid = false;
653 	entry.udma_mask = udma_mask;
654 	entry.filter = false;
655 
656 	device_printf_dbg(adapter->dev,
657 	    "%s: [%d]: addr "MAC_ADDR_STR" mask "MAC_ADDR_STR"\n",
658 	    __func__, idx, MAC_ADDR(entry.addr), MAC_ADDR(entry.mask));
659 
660 	al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry);
661 }
662 
663 static void
664 al_eth_mac_table_promiscuous_set(struct al_eth_adapter *adapter,
665     bool promiscuous)
666 {
667 	struct al_eth_fwd_mac_table_entry entry = { { 0 } };
668 
669 	memset(entry.addr, 0x00, sizeof(entry.addr));
670 	memset(entry.mask, 0x00, sizeof(entry.mask));
671 
672 	entry.rx_valid = true;
673 	entry.tx_valid = false;
674 	entry.udma_mask = (promiscuous) ? 1 : 0;
675 	entry.filter = (promiscuous) ? false : true;
676 
677 	device_printf_dbg(adapter->dev, "%s: %s promiscuous mode\n",
678 	    __func__, (promiscuous) ? "enter" : "exit");
679 
680 	al_eth_fwd_mac_table_set(&adapter->hal_adapter,
681 	    AL_ETH_MAC_TABLE_DROP_IDX, &entry);
682 }
683 
684 static void
685 al_eth_set_thash_table_entry(struct al_eth_adapter *adapter, uint8_t idx,
686     uint8_t udma, uint32_t queue)
687 {
688 
689 	if (udma != 0)
690 		panic("only UDMA0 is supporter");
691 
692 	if (queue >= AL_ETH_NUM_QUEUES)
693 		panic("invalid queue number");
694 
695 	al_eth_thash_table_set(&adapter->hal_adapter, idx, udma, queue);
696 }
697 
698 /* init FSM, no tunneling supported yet, if packet is tcp/udp over ipv4/ipv6, use 4 tuple hash */
699 static void
700 al_eth_fsm_table_init(struct al_eth_adapter *adapter)
701 {
702 	uint32_t val;
703 	int i;
704 
705 	for (i = 0; i < AL_ETH_RX_FSM_TABLE_SIZE; i++) {
706 		uint8_t outer_type = AL_ETH_FSM_ENTRY_OUTER(i);
707 		switch (outer_type) {
708 		case AL_ETH_FSM_ENTRY_IPV4_TCP:
709 		case AL_ETH_FSM_ENTRY_IPV4_UDP:
710 		case AL_ETH_FSM_ENTRY_IPV6_TCP:
711 		case AL_ETH_FSM_ENTRY_IPV6_UDP:
712 			val = AL_ETH_FSM_DATA_OUTER_4_TUPLE |
713 			    AL_ETH_FSM_DATA_HASH_SEL;
714 			break;
715 		case AL_ETH_FSM_ENTRY_IPV6_NO_UDP_TCP:
716 		case AL_ETH_FSM_ENTRY_IPV4_NO_UDP_TCP:
717 			val = AL_ETH_FSM_DATA_OUTER_2_TUPLE |
718 			    AL_ETH_FSM_DATA_HASH_SEL;
719 			break;
720 		default:
721 			val = AL_ETH_FSM_DATA_DEFAULT_Q |
722 			    AL_ETH_FSM_DATA_DEFAULT_UDMA;
723 		}
724 		al_eth_fsm_table_set(&adapter->hal_adapter, i, val);
725 	}
726 }
727 
728 static void
729 al_eth_mac_table_entry_clear(struct al_eth_adapter *adapter,
730     uint8_t idx)
731 {
732 	struct al_eth_fwd_mac_table_entry entry = { { 0 } };
733 
734 	device_printf_dbg(adapter->dev, "%s: clear entry %d\n", __func__, idx);
735 
736 	al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry);
737 }
738 
739 static int
740 al_eth_hw_init_adapter(struct al_eth_adapter *adapter)
741 {
742 	struct al_eth_adapter_params *params = &adapter->eth_hal_params;
743 	int rc;
744 
745 	/* params->dev_id = adapter->dev_id; */
746 	params->rev_id = adapter->rev_id;
747 	params->udma_id = 0;
748 	params->enable_rx_parser = 1; /* enable rx epe parser*/
749 	params->udma_regs_base = adapter->udma_base; /* UDMA register base address */
750 	params->ec_regs_base = adapter->ec_base; /* Ethernet controller registers base address */
751 	params->mac_regs_base = adapter->mac_base; /* Ethernet MAC registers base address */
752 	params->name = adapter->name;
753 	params->serdes_lane = adapter->serdes_lane;
754 
755 	rc = al_eth_adapter_init(&adapter->hal_adapter, params);
756 	if (rc != 0)
757 		device_printf(adapter->dev, "%s failed at hal init!\n",
758 		    __func__);
759 
760 	if ((adapter->board_type == ALPINE_NIC) ||
761 	    (adapter->board_type == ALPINE_FPGA_NIC)) {
762 		/* in pcie NIC mode, force eth UDMA to access PCIE0 using the vmid */
763 		struct al_udma_gen_tgtid_conf conf;
764 		int i;
765 		for (i = 0; i < DMA_MAX_Q; i++) {
766 			conf.tx_q_conf[i].queue_en = AL_TRUE;
767 			conf.tx_q_conf[i].desc_en = AL_FALSE;
768 			conf.tx_q_conf[i].tgtid = 0x100; /* for access from PCIE0 */
769 			conf.rx_q_conf[i].queue_en = AL_TRUE;
770 			conf.rx_q_conf[i].desc_en = AL_FALSE;
771 			conf.rx_q_conf[i].tgtid = 0x100; /* for access from PCIE0 */
772 		}
773 		al_udma_gen_tgtid_conf_set(adapter->udma_base, &conf);
774 	}
775 
776 	return (rc);
777 }
778 
779 static void
780 al_eth_lm_config(struct al_eth_adapter *adapter)
781 {
782 	struct al_eth_lm_init_params params = {0};
783 
784 	params.adapter = &adapter->hal_adapter;
785 	params.serdes_obj = &adapter->serdes_obj;
786 	params.lane = adapter->serdes_lane;
787 	params.sfp_detection = adapter->sfp_detection_needed;
788 	if (adapter->sfp_detection_needed == true) {
789 		params.sfp_bus_id = adapter->i2c_adapter_id;
790 		params.sfp_i2c_addr = SFP_I2C_ADDR;
791 	}
792 
793 	if (adapter->sfp_detection_needed == false) {
794 		switch (adapter->mac_mode) {
795 		case AL_ETH_MAC_MODE_10GbE_Serial:
796 			if ((adapter->lt_en != 0) && (adapter->an_en != 0))
797 				params.default_mode = AL_ETH_LM_MODE_10G_DA;
798 			else
799 				params.default_mode = AL_ETH_LM_MODE_10G_OPTIC;
800 			break;
801 		case AL_ETH_MAC_MODE_SGMII:
802 			params.default_mode = AL_ETH_LM_MODE_1G;
803 			break;
804 		default:
805 			params.default_mode = AL_ETH_LM_MODE_10G_DA;
806 		}
807 	} else
808 		params.default_mode = AL_ETH_LM_MODE_10G_DA;
809 
810 	params.link_training = adapter->lt_en;
811 	params.rx_equal = true;
812 	params.static_values = !adapter->dont_override_serdes;
813 	params.i2c_context = adapter;
814 	params.kr_fec_enable = false;
815 
816 	params.retimer_exist = adapter->retimer.exist;
817 	params.retimer_bus_id = adapter->retimer.bus_id;
818 	params.retimer_i2c_addr = adapter->retimer.i2c_addr;
819 	params.retimer_channel = adapter->retimer.channel;
820 
821 	al_eth_lm_init(&adapter->lm_context, &params);
822 }
823 
824 static int
825 al_eth_board_params_init(struct al_eth_adapter *adapter)
826 {
827 
828 	if (adapter->board_type == ALPINE_NIC) {
829 		adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial;
830 		adapter->sfp_detection_needed = false;
831 		adapter->phy_exist = false;
832 		adapter->an_en = false;
833 		adapter->lt_en = false;
834 		adapter->ref_clk_freq = AL_ETH_REF_FREQ_375_MHZ;
835 		adapter->mdio_freq = AL_ETH_DEFAULT_MDIO_FREQ_KHZ;
836 	} else if (adapter->board_type == ALPINE_FPGA_NIC) {
837 		adapter->mac_mode = AL_ETH_MAC_MODE_SGMII;
838 		adapter->sfp_detection_needed = false;
839 		adapter->phy_exist = false;
840 		adapter->an_en = false;
841 		adapter->lt_en = false;
842 		adapter->ref_clk_freq = AL_ETH_REF_FREQ_375_MHZ;
843 		adapter->mdio_freq = AL_ETH_DEFAULT_MDIO_FREQ_KHZ;
844 	} else {
845 		struct al_eth_board_params params;
846 		int rc;
847 
848 		adapter->auto_speed = false;
849 
850 		rc = al_eth_board_params_get(adapter->mac_base, &params);
851 		if (rc != 0) {
852 			device_printf(adapter->dev,
853 			    "board info not available\n");
854 			return (-1);
855 		}
856 
857 		adapter->phy_exist = params.phy_exist == true;
858 		adapter->phy_addr = params.phy_mdio_addr;
859 		adapter->an_en = params.autoneg_enable;
860 		adapter->lt_en = params.kr_lt_enable;
861 		adapter->serdes_grp = params.serdes_grp;
862 		adapter->serdes_lane = params.serdes_lane;
863 		adapter->sfp_detection_needed = params.sfp_plus_module_exist;
864 		adapter->i2c_adapter_id = params.i2c_adapter_id;
865 		adapter->ref_clk_freq = params.ref_clk_freq;
866 		adapter->dont_override_serdes = params.dont_override_serdes;
867 		adapter->link_config.active_duplex = !params.half_duplex;
868 		adapter->link_config.autoneg = !params.an_disable;
869 		adapter->link_config.force_1000_base_x = params.force_1000_base_x;
870 		adapter->retimer.exist = params.retimer_exist;
871 		adapter->retimer.bus_id = params.retimer_bus_id;
872 		adapter->retimer.i2c_addr = params.retimer_i2c_addr;
873 		adapter->retimer.channel = params.retimer_channel;
874 
875 		switch (params.speed) {
876 		default:
877 			device_printf(adapter->dev,
878 			    "%s: invalid speed (%d)\n", __func__, params.speed);
879 		case AL_ETH_BOARD_1G_SPEED_1000M:
880 			adapter->link_config.active_speed = 1000;
881 			break;
882 		case AL_ETH_BOARD_1G_SPEED_100M:
883 			adapter->link_config.active_speed = 100;
884 			break;
885 		case AL_ETH_BOARD_1G_SPEED_10M:
886 			adapter->link_config.active_speed = 10;
887 			break;
888 		}
889 
890 		switch (params.mdio_freq) {
891 		default:
892 			device_printf(adapter->dev,
893 			    "%s: invalid mdio freq (%d)\n", __func__,
894 			    params.mdio_freq);
895 		case AL_ETH_BOARD_MDIO_FREQ_2_5_MHZ:
896 			adapter->mdio_freq = AL_ETH_DEFAULT_MDIO_FREQ_KHZ;
897 			break;
898 		case AL_ETH_BOARD_MDIO_FREQ_1_MHZ:
899 			adapter->mdio_freq = AL_ETH_MDIO_FREQ_1000_KHZ;
900 			break;
901 		}
902 
903 		switch (params.media_type) {
904 		case AL_ETH_BOARD_MEDIA_TYPE_RGMII:
905 			if (params.sfp_plus_module_exist == true)
906 				/* Backward compatibility */
907 				adapter->mac_mode = AL_ETH_MAC_MODE_SGMII;
908 			else
909 				adapter->mac_mode = AL_ETH_MAC_MODE_RGMII;
910 
911 			adapter->use_lm = false;
912 			break;
913 		case AL_ETH_BOARD_MEDIA_TYPE_SGMII:
914 			adapter->mac_mode = AL_ETH_MAC_MODE_SGMII;
915 			adapter->use_lm = true;
916 			break;
917 		case AL_ETH_BOARD_MEDIA_TYPE_10GBASE_SR:
918 			adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial;
919 			adapter->use_lm = true;
920 			break;
921 		case AL_ETH_BOARD_MEDIA_TYPE_AUTO_DETECT:
922 			adapter->sfp_detection_needed = true;
923 			adapter->auto_speed = false;
924 			adapter->use_lm = true;
925 			break;
926 		case AL_ETH_BOARD_MEDIA_TYPE_AUTO_DETECT_AUTO_SPEED:
927 			adapter->sfp_detection_needed = true;
928 			adapter->auto_speed = true;
929 			adapter->mac_mode_set = false;
930 			adapter->use_lm = true;
931 
932 			adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial;
933 			break;
934 		default:
935 			device_printf(adapter->dev,
936 			    "%s: unsupported media type %d\n",
937 			    __func__, params.media_type);
938 			return (-1);
939 		}
940 
941 		device_printf(adapter->dev,
942 		    "Board info: phy exist %s. phy addr %d. mdio freq %u Khz. "
943 		    "SFP connected %s. media %d\n",
944 		    params.phy_exist ? "Yes" : "No",
945 		    params.phy_mdio_addr, adapter->mdio_freq,
946 		    params.sfp_plus_module_exist ? "Yes" : "No",
947 		    params.media_type);
948 	}
949 
950 	al_eth_mac_addr_read(adapter->ec_base, 0, adapter->mac_addr);
951 
952 	return (0);
953 }
954 
955 static int
956 al_eth_function_reset(struct al_eth_adapter *adapter)
957 {
958 	struct al_eth_board_params params;
959 	int rc;
960 
961 	/* save board params so we restore it after reset */
962 	al_eth_board_params_get(adapter->mac_base, &params);
963 	al_eth_mac_addr_read(adapter->ec_base, 0, adapter->mac_addr);
964 	if (adapter->board_type == ALPINE_INTEGRATED)
965 		rc = al_eth_flr_rmn(&al_eth_read_pci_config,
966 		    &al_eth_write_pci_config,
967 		    adapter->dev, adapter->mac_base);
968 	else
969 		rc = al_eth_flr_rmn(&al_eth_fpga_read_pci_config,
970 		    &al_eth_fpga_write_pci_config,
971 		    adapter->internal_pcie_base, adapter->mac_base);
972 
973 	/* restore params */
974 	al_eth_board_params_set(adapter->mac_base, &params);
975 	al_eth_mac_addr_store(adapter->ec_base, 0, adapter->mac_addr);
976 
977 	return (rc);
978 }
979 
980 static void
981 al_eth_init_rings(struct al_eth_adapter *adapter)
982 {
983 	int i;
984 
985 	for (i = 0; i < adapter->num_tx_queues; i++) {
986 		struct al_eth_ring *ring = &adapter->tx_ring[i];
987 
988 		ring->ring_id = i;
989 		ring->dev = adapter->dev;
990 		ring->adapter = adapter;
991 		ring->netdev = adapter->netdev;
992 		al_udma_q_handle_get(&adapter->hal_adapter.tx_udma, i,
993 		    &ring->dma_q);
994 		ring->sw_count = adapter->tx_ring_count;
995 		ring->hw_count = adapter->tx_descs_count;
996 		ring->unmask_reg_offset = al_udma_iofic_unmask_offset_get((struct unit_regs *)adapter->udma_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_C);
997 		ring->unmask_val = ~(1 << i);
998 	}
999 
1000 	for (i = 0; i < adapter->num_rx_queues; i++) {
1001 		struct al_eth_ring *ring = &adapter->rx_ring[i];
1002 
1003 		ring->ring_id = i;
1004 		ring->dev = adapter->dev;
1005 		ring->adapter = adapter;
1006 		ring->netdev = adapter->netdev;
1007 		al_udma_q_handle_get(&adapter->hal_adapter.rx_udma, i, &ring->dma_q);
1008 		ring->sw_count = adapter->rx_ring_count;
1009 		ring->hw_count = adapter->rx_descs_count;
1010 		ring->unmask_reg_offset = al_udma_iofic_unmask_offset_get(
1011 		    (struct unit_regs *)adapter->udma_base,
1012 		    AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_B);
1013 		ring->unmask_val = ~(1 << i);
1014 	}
1015 }
1016 
1017 static void
1018 al_init_locked(void *arg)
1019 {
1020 	struct al_eth_adapter *adapter = arg;
1021 	if_t ifp = adapter->netdev;
1022 	int rc = 0;
1023 
1024 	al_eth_down(adapter);
1025 	rc = al_eth_up(adapter);
1026 
1027 	if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1028 	if (rc == 0)
1029 		if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
1030 }
1031 
1032 static void
1033 al_init(void *arg)
1034 {
1035 	struct al_eth_adapter *adapter = arg;
1036 
1037 	al_init_locked(adapter);
1038 }
1039 
1040 static inline int
1041 al_eth_alloc_rx_buf(struct al_eth_adapter *adapter,
1042     struct al_eth_ring *rx_ring,
1043     struct al_eth_rx_buffer *rx_info)
1044 {
1045 	struct al_buf *al_buf;
1046 	bus_dma_segment_t segs[2];
1047 	int error;
1048 	int nsegs;
1049 
1050 	if (rx_info->m != NULL)
1051 		return (0);
1052 
1053 	rx_info->data_size = adapter->rx_mbuf_sz;
1054 
1055 	AL_RX_LOCK(adapter);
1056 
1057 	/* Get mbuf using UMA allocator */
1058 	rx_info->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1059 	    rx_info->data_size);
1060 	AL_RX_UNLOCK(adapter);
1061 
1062 	if (rx_info->m == NULL)
1063 		return (ENOMEM);
1064 
1065 	rx_info->m->m_pkthdr.len = rx_info->m->m_len = adapter->rx_mbuf_sz;
1066 
1067 	/* Map packets for DMA */
1068 	error = bus_dmamap_load_mbuf_sg(rx_ring->dma_buf_tag, rx_info->dma_map,
1069 	    rx_info->m, segs, &nsegs, BUS_DMA_NOWAIT);
1070 	if (__predict_false(error)) {
1071 		device_printf(rx_ring->dev, "failed to map mbuf, error = %d\n",
1072 		    error);
1073 		m_freem(rx_info->m);
1074 		rx_info->m = NULL;
1075 		return (EFAULT);
1076 	}
1077 
1078 	al_buf = &rx_info->al_buf;
1079 	al_buf->addr = segs[0].ds_addr + AL_IP_ALIGNMENT_OFFSET;
1080 	al_buf->len = rx_info->data_size - AL_IP_ALIGNMENT_OFFSET;
1081 
1082 	return (0);
1083 }
1084 
1085 static int
1086 al_eth_refill_rx_bufs(struct al_eth_adapter *adapter, unsigned int qid,
1087     unsigned int num)
1088 {
1089 	struct al_eth_ring *rx_ring = &adapter->rx_ring[qid];
1090 	uint16_t next_to_use;
1091 	unsigned int i;
1092 
1093 	next_to_use = rx_ring->next_to_use;
1094 
1095 	for (i = 0; i < num; i++) {
1096 		int rc;
1097 		struct al_eth_rx_buffer *rx_info =
1098 		    &rx_ring->rx_buffer_info[next_to_use];
1099 
1100 		if (__predict_false(al_eth_alloc_rx_buf(adapter,
1101 		    rx_ring, rx_info) < 0)) {
1102 			device_printf(adapter->dev,
1103 			    "failed to alloc buffer for rx queue %d\n", qid);
1104 			break;
1105 		}
1106 
1107 		rc = al_eth_rx_buffer_add(rx_ring->dma_q,
1108 		    &rx_info->al_buf, AL_ETH_RX_FLAGS_INT, NULL);
1109 		if (__predict_false(rc)) {
1110 			device_printf(adapter->dev,
1111 			    "failed to add buffer for rx queue %d\n", qid);
1112 			break;
1113 		}
1114 
1115 		next_to_use = AL_ETH_RX_RING_IDX_NEXT(rx_ring, next_to_use);
1116 	}
1117 
1118 	if (__predict_false(i < num))
1119 		device_printf(adapter->dev,
1120 		    "refilled rx queue %d with %d pages only - available %d\n",
1121 		    qid, i, al_udma_available_get(rx_ring->dma_q));
1122 
1123 	if (__predict_true(i))
1124 		al_eth_rx_buffer_action(rx_ring->dma_q, i);
1125 
1126 	rx_ring->next_to_use = next_to_use;
1127 
1128 	return (i);
1129 }
1130 
1131 /*
1132  * al_eth_refill_all_rx_bufs - allocate all queues Rx buffers
1133  * @adapter: board private structure
1134  */
1135 static void
1136 al_eth_refill_all_rx_bufs(struct al_eth_adapter *adapter)
1137 {
1138 	int i;
1139 
1140 	for (i = 0; i < adapter->num_rx_queues; i++)
1141 		al_eth_refill_rx_bufs(adapter, i, AL_ETH_DEFAULT_RX_DESCS - 1);
1142 }
1143 
1144 static void
1145 al_eth_tx_do_cleanup(struct al_eth_ring *tx_ring)
1146 {
1147 	unsigned int total_done;
1148 	uint16_t next_to_clean;
1149 	int qid = tx_ring->ring_id;
1150 
1151 	total_done = al_eth_comp_tx_get(tx_ring->dma_q);
1152 	device_printf_dbg(tx_ring->dev,
1153 	    "tx_poll: q %d total completed descs %x\n", qid, total_done);
1154 	next_to_clean = tx_ring->next_to_clean;
1155 
1156 	while (total_done != 0) {
1157 		struct al_eth_tx_buffer *tx_info;
1158 		struct mbuf *mbuf;
1159 
1160 		tx_info = &tx_ring->tx_buffer_info[next_to_clean];
1161 		/* stop if not all descriptors of the packet are completed */
1162 		if (tx_info->tx_descs > total_done)
1163 			break;
1164 
1165 		mbuf = tx_info->m;
1166 
1167 		tx_info->m = NULL;
1168 
1169 		device_printf_dbg(tx_ring->dev,
1170 		    "tx_poll: q %d mbuf %p completed\n", qid, mbuf);
1171 
1172 		/* map is no longer required */
1173 		bus_dmamap_unload(tx_ring->dma_buf_tag, tx_info->dma_map);
1174 
1175 		m_freem(mbuf);
1176 		total_done -= tx_info->tx_descs;
1177 		next_to_clean = AL_ETH_TX_RING_IDX_NEXT(tx_ring, next_to_clean);
1178 	}
1179 
1180 	tx_ring->next_to_clean = next_to_clean;
1181 
1182 	device_printf_dbg(tx_ring->dev, "tx_poll: q %d done next to clean %x\n",
1183 	    qid, next_to_clean);
1184 
1185 	/*
1186 	 * need to make the rings circular update visible to
1187 	 * al_eth_start_xmit() before checking for netif_queue_stopped().
1188 	 */
1189 	al_smp_data_memory_barrier();
1190 }
1191 
1192 static void
1193 al_eth_tx_csum(struct al_eth_ring *tx_ring, struct al_eth_tx_buffer *tx_info,
1194     struct al_eth_pkt *hal_pkt, struct mbuf *m)
1195 {
1196 	uint32_t mss = m->m_pkthdr.tso_segsz;
1197 	struct ether_vlan_header *eh;
1198 	uint16_t etype;
1199 #ifdef INET
1200 	struct ip *ip;
1201 #endif
1202 #ifdef INET6
1203 	struct ip6_hdr *ip6;
1204 #endif
1205 	struct tcphdr *th = NULL;
1206 	int	ehdrlen, ip_hlen = 0;
1207 	uint8_t	ipproto = 0;
1208 	uint32_t offload = 0;
1209 
1210 	if (mss != 0)
1211 		offload = 1;
1212 
1213 	if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0)
1214 		offload = 1;
1215 
1216 	if ((m->m_pkthdr.csum_flags & CSUM_OFFLOAD) != 0)
1217 		offload = 1;
1218 
1219 	if (offload != 0) {
1220 		struct al_eth_meta_data *meta = &tx_ring->hal_meta;
1221 
1222 		if (mss != 0)
1223 			hal_pkt->flags |= (AL_ETH_TX_FLAGS_TSO |
1224 			    AL_ETH_TX_FLAGS_L4_CSUM);
1225 		else
1226 			hal_pkt->flags |= (AL_ETH_TX_FLAGS_L4_CSUM |
1227 			    AL_ETH_TX_FLAGS_L4_PARTIAL_CSUM);
1228 
1229 		/*
1230 		 * Determine where frame payload starts.
1231 		 * Jump over vlan headers if already present,
1232 		 * helpful for QinQ too.
1233 		 */
1234 		eh = mtod(m, struct ether_vlan_header *);
1235 		if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1236 			etype = ntohs(eh->evl_proto);
1237 			ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1238 		} else {
1239 			etype = ntohs(eh->evl_encap_proto);
1240 			ehdrlen = ETHER_HDR_LEN;
1241 		}
1242 
1243 		switch (etype) {
1244 #ifdef INET
1245 		case ETHERTYPE_IP:
1246 			ip = (struct ip *)(m->m_data + ehdrlen);
1247 			ip_hlen = ip->ip_hl << 2;
1248 			ipproto = ip->ip_p;
1249 			hal_pkt->l3_proto_idx = AL_ETH_PROTO_ID_IPv4;
1250 			th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
1251 			if (mss != 0)
1252 				hal_pkt->flags |= AL_ETH_TX_FLAGS_IPV4_L3_CSUM;
1253 			if (ipproto == IPPROTO_TCP)
1254 				hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_TCP;
1255 			else
1256 				hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_UDP;
1257 			break;
1258 #endif /* INET */
1259 #ifdef INET6
1260 		case ETHERTYPE_IPV6:
1261 			ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1262 			hal_pkt->l3_proto_idx = AL_ETH_PROTO_ID_IPv6;
1263 			ip_hlen = sizeof(struct ip6_hdr);
1264 			th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
1265 			ipproto = ip6->ip6_nxt;
1266 			if (ipproto == IPPROTO_TCP)
1267 				hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_TCP;
1268 			else
1269 				hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_UDP;
1270 			break;
1271 #endif /* INET6 */
1272 		default:
1273 			break;
1274 		}
1275 
1276 		meta->words_valid = 4;
1277 		meta->l3_header_len = ip_hlen;
1278 		meta->l3_header_offset = ehdrlen;
1279 		if (th != NULL)
1280 			meta->l4_header_len = th->th_off; /* this param needed only for TSO */
1281 		meta->mss_idx_sel = 0;			/* check how to select MSS */
1282 		meta->mss_val = mss;
1283 		hal_pkt->meta = meta;
1284 	} else
1285 		hal_pkt->meta = NULL;
1286 }
1287 
1288 #define	XMIT_QUEUE_TIMEOUT	100
1289 
1290 static void
1291 al_eth_xmit_mbuf(struct al_eth_ring *tx_ring, struct mbuf *m)
1292 {
1293 	struct al_eth_tx_buffer *tx_info;
1294 	int error;
1295 	int nsegs, a;
1296 	uint16_t next_to_use;
1297 	bus_dma_segment_t segs[AL_ETH_PKT_MAX_BUFS + 1];
1298 	struct al_eth_pkt *hal_pkt;
1299 	struct al_buf *al_buf;
1300 	bool remap;
1301 
1302 	/* Check if queue is ready */
1303 	if (unlikely(tx_ring->stall) != 0) {
1304 		for (a = 0; a < XMIT_QUEUE_TIMEOUT; a++) {
1305 			if (al_udma_available_get(tx_ring->dma_q) >=
1306 			    (AL_ETH_DEFAULT_TX_HW_DESCS -
1307 			    AL_ETH_TX_WAKEUP_THRESH)) {
1308 				tx_ring->stall = 0;
1309 				break;
1310 			}
1311 			pause("stall", 1);
1312 		}
1313 		if (a == XMIT_QUEUE_TIMEOUT) {
1314 			device_printf(tx_ring->dev,
1315 			    "timeout waiting for queue %d ready!\n",
1316 			    tx_ring->ring_id);
1317 			return;
1318 		} else {
1319 			device_printf_dbg(tx_ring->dev,
1320 			    "queue %d is ready!\n", tx_ring->ring_id);
1321 		}
1322 	}
1323 
1324 	next_to_use = tx_ring->next_to_use;
1325 	tx_info = &tx_ring->tx_buffer_info[next_to_use];
1326 	tx_info->m = m;
1327 	hal_pkt = &tx_info->hal_pkt;
1328 
1329 	if (m == NULL) {
1330 		device_printf(tx_ring->dev, "mbuf is NULL\n");
1331 		return;
1332 	}
1333 
1334 	remap = true;
1335 	/* Map packets for DMA */
1336 retry:
1337 	error = bus_dmamap_load_mbuf_sg(tx_ring->dma_buf_tag, tx_info->dma_map,
1338 	    m, segs, &nsegs, BUS_DMA_NOWAIT);
1339 	if (__predict_false(error)) {
1340 		struct mbuf *m_new;
1341 
1342 		if (error == EFBIG) {
1343 			/* Try it again? - one try */
1344 			if (remap == true) {
1345 				remap = false;
1346 				m_new = m_defrag(m, M_NOWAIT);
1347 				if (m_new == NULL) {
1348 					device_printf(tx_ring->dev,
1349 					    "failed to defrag mbuf\n");
1350 					goto exit;
1351 				}
1352 				m = m_new;
1353 				goto retry;
1354 			} else {
1355 				device_printf(tx_ring->dev,
1356 				    "failed to map mbuf, error %d\n", error);
1357 				goto exit;
1358 			}
1359 		} else {
1360 			device_printf(tx_ring->dev,
1361 			    "failed to map mbuf, error %d\n", error);
1362 			goto exit;
1363 		}
1364 	}
1365 
1366 	/* set flags and meta data */
1367 	hal_pkt->flags = AL_ETH_TX_FLAGS_INT;
1368 	al_eth_tx_csum(tx_ring, tx_info, hal_pkt, m);
1369 
1370 	al_buf = hal_pkt->bufs;
1371 	for (a = 0; a < nsegs; a++) {
1372 		al_buf->addr = segs[a].ds_addr;
1373 		al_buf->len = segs[a].ds_len;
1374 
1375 		al_buf++;
1376 	}
1377 
1378 	hal_pkt->num_of_bufs = nsegs;
1379 
1380 	/* prepare the packet's descriptors to dma engine */
1381 	tx_info->tx_descs = al_eth_tx_pkt_prepare(tx_ring->dma_q, hal_pkt);
1382 
1383 	if (tx_info->tx_descs == 0)
1384 		goto exit;
1385 
1386 	/*
1387 	 * stop the queue when no more space available, the packet can have up
1388 	 * to AL_ETH_PKT_MAX_BUFS + 1 buffers and a meta descriptor
1389 	 */
1390 	if (unlikely(al_udma_available_get(tx_ring->dma_q) <
1391 	    (AL_ETH_PKT_MAX_BUFS + 2))) {
1392 		tx_ring->stall = 1;
1393 		device_printf_dbg(tx_ring->dev, "stall, stopping queue %d...\n",
1394 		    tx_ring->ring_id);
1395 		al_data_memory_barrier();
1396 	}
1397 
1398 	tx_ring->next_to_use = AL_ETH_TX_RING_IDX_NEXT(tx_ring, next_to_use);
1399 
1400 	/* trigger the dma engine */
1401 	al_eth_tx_dma_action(tx_ring->dma_q, tx_info->tx_descs);
1402 	return;
1403 
1404 exit:
1405 	m_freem(m);
1406 }
1407 
1408 static void
1409 al_eth_tx_cmpl_work(void *arg, int pending)
1410 {
1411 	struct al_eth_ring *tx_ring = arg;
1412 
1413 	if (napi != 0) {
1414 		tx_ring->cmpl_is_running = 1;
1415 		al_data_memory_barrier();
1416 	}
1417 
1418 	al_eth_tx_do_cleanup(tx_ring);
1419 
1420 	if (napi != 0) {
1421 		tx_ring->cmpl_is_running = 0;
1422 		al_data_memory_barrier();
1423 	}
1424 	/* all work done, enable IRQs */
1425 	al_eth_irq_config(tx_ring->unmask_reg_offset, tx_ring->unmask_val);
1426 }
1427 
1428 static int
1429 al_eth_tx_cmlp_irq_filter(void *arg)
1430 {
1431 	struct al_eth_ring *tx_ring = arg;
1432 
1433 	/* Interrupt should be auto-masked upon arrival */
1434 
1435 	device_printf_dbg(tx_ring->dev, "%s for ring ID = %d\n", __func__,
1436 	    tx_ring->ring_id);
1437 
1438 	/*
1439 	 * For napi, if work is not running, schedule it. Always schedule
1440 	 * for casual (non-napi) packet handling.
1441 	 */
1442 	if ((napi == 0) || (napi && tx_ring->cmpl_is_running == 0))
1443 		taskqueue_enqueue(tx_ring->cmpl_tq, &tx_ring->cmpl_task);
1444 
1445 	/* Do not run bottom half */
1446 	return (FILTER_HANDLED);
1447 }
1448 
1449 static int
1450 al_eth_rx_recv_irq_filter(void *arg)
1451 {
1452 	struct al_eth_ring *rx_ring = arg;
1453 
1454 	/* Interrupt should be auto-masked upon arrival */
1455 
1456 	device_printf_dbg(rx_ring->dev, "%s for ring ID = %d\n", __func__,
1457 	    rx_ring->ring_id);
1458 
1459 	/*
1460 	 * For napi, if work is not running, schedule it. Always schedule
1461 	 * for casual (non-napi) packet handling.
1462 	 */
1463 	if ((napi == 0) || (napi && rx_ring->enqueue_is_running == 0))
1464 		taskqueue_enqueue(rx_ring->enqueue_tq, &rx_ring->enqueue_task);
1465 
1466 	/* Do not run bottom half */
1467 	return (FILTER_HANDLED);
1468 }
1469 
1470 /*
1471  * al_eth_rx_checksum - indicate in mbuf if hw indicated a good cksum
1472  * @adapter: structure containing adapter specific data
1473  * @hal_pkt: HAL structure for the packet
1474  * @mbuf: mbuf currently being received and modified
1475  */
1476 static inline void
1477 al_eth_rx_checksum(struct al_eth_adapter *adapter,
1478     struct al_eth_pkt *hal_pkt, struct mbuf *mbuf)
1479 {
1480 
1481 	/* if IPv4 and error */
1482 	if (unlikely((if_getcapenable(adapter->netdev) & IFCAP_RXCSUM) &&
1483 	    (hal_pkt->l3_proto_idx == AL_ETH_PROTO_ID_IPv4) &&
1484 	    (hal_pkt->flags & AL_ETH_RX_FLAGS_L3_CSUM_ERR))) {
1485 		device_printf(adapter->dev,"rx ipv4 header checksum error\n");
1486 		return;
1487 	}
1488 
1489 	/* if IPv6 and error */
1490 	if (unlikely((if_getcapenable(adapter->netdev) & IFCAP_RXCSUM_IPV6) &&
1491 	    (hal_pkt->l3_proto_idx == AL_ETH_PROTO_ID_IPv6) &&
1492 	    (hal_pkt->flags & AL_ETH_RX_FLAGS_L3_CSUM_ERR))) {
1493 		device_printf(adapter->dev,"rx ipv6 header checksum error\n");
1494 		return;
1495 	}
1496 
1497 	/* if TCP/UDP */
1498 	if (likely((hal_pkt->l4_proto_idx == AL_ETH_PROTO_ID_TCP) ||
1499 	   (hal_pkt->l4_proto_idx == AL_ETH_PROTO_ID_UDP))) {
1500 		if (unlikely(hal_pkt->flags & AL_ETH_RX_FLAGS_L4_CSUM_ERR)) {
1501 			device_printf_dbg(adapter->dev, "rx L4 checksum error\n");
1502 
1503 			/* TCP/UDP checksum error */
1504 			mbuf->m_pkthdr.csum_flags = 0;
1505 		} else {
1506 			device_printf_dbg(adapter->dev, "rx checksum correct\n");
1507 
1508 			/* IP Checksum Good */
1509 			mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
1510 			mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1511 		}
1512 	}
1513 }
1514 
1515 static struct mbuf*
1516 al_eth_rx_mbuf(struct al_eth_adapter *adapter,
1517     struct al_eth_ring *rx_ring, struct al_eth_pkt *hal_pkt,
1518     unsigned int descs, uint16_t *next_to_clean)
1519 {
1520 	struct mbuf *mbuf;
1521 	struct al_eth_rx_buffer *rx_info =
1522 	    &rx_ring->rx_buffer_info[*next_to_clean];
1523 	unsigned int len;
1524 
1525 	len = hal_pkt->bufs[0].len;
1526 	device_printf_dbg(adapter->dev, "rx_info %p data %p\n", rx_info,
1527 	   rx_info->m);
1528 
1529 	if (rx_info->m == NULL) {
1530 		*next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring,
1531 		    *next_to_clean);
1532 		return (NULL);
1533 	}
1534 
1535 	mbuf = rx_info->m;
1536 	mbuf->m_pkthdr.len = len;
1537 	mbuf->m_len = len;
1538 	mbuf->m_pkthdr.rcvif = rx_ring->netdev;
1539 	mbuf->m_flags |= M_PKTHDR;
1540 
1541 	if (len <= adapter->small_copy_len) {
1542 		struct mbuf *smbuf;
1543 		device_printf_dbg(adapter->dev, "rx small packet. len %d\n", len);
1544 
1545 		AL_RX_LOCK(adapter);
1546 		smbuf = m_gethdr(M_NOWAIT, MT_DATA);
1547 		AL_RX_UNLOCK(adapter);
1548 		if (__predict_false(smbuf == NULL)) {
1549 			device_printf(adapter->dev, "smbuf is NULL\n");
1550 			return (NULL);
1551 		}
1552 
1553 		smbuf->m_data = smbuf->m_data + AL_IP_ALIGNMENT_OFFSET;
1554 		memcpy(smbuf->m_data, mbuf->m_data + AL_IP_ALIGNMENT_OFFSET, len);
1555 
1556 		smbuf->m_len = len;
1557 		smbuf->m_pkthdr.rcvif = rx_ring->netdev;
1558 
1559 		/* first desc of a non-ps chain */
1560 		smbuf->m_flags |= M_PKTHDR;
1561 		smbuf->m_pkthdr.len = smbuf->m_len;
1562 
1563 		*next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring,
1564 		    *next_to_clean);
1565 
1566 		return (smbuf);
1567 	}
1568 	mbuf->m_data = mbuf->m_data + AL_IP_ALIGNMENT_OFFSET;
1569 
1570 	/* Unmap the buffer */
1571 	bus_dmamap_unload(rx_ring->dma_buf_tag, rx_info->dma_map);
1572 
1573 	rx_info->m = NULL;
1574 	*next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring, *next_to_clean);
1575 
1576 	return (mbuf);
1577 }
1578 
1579 static void
1580 al_eth_rx_recv_work(void *arg, int pending)
1581 {
1582 	struct al_eth_ring *rx_ring = arg;
1583 	struct mbuf *mbuf;
1584 	struct lro_entry *queued;
1585 	unsigned int qid = rx_ring->ring_id;
1586 	struct al_eth_pkt *hal_pkt = &rx_ring->hal_pkt;
1587 	uint16_t next_to_clean = rx_ring->next_to_clean;
1588 	uint32_t refill_required;
1589 	uint32_t refill_actual;
1590 	uint32_t do_if_input;
1591 
1592 	if (napi != 0) {
1593 		rx_ring->enqueue_is_running = 1;
1594 		al_data_memory_barrier();
1595 	}
1596 
1597 	do {
1598 		unsigned int descs;
1599 
1600 		descs = al_eth_pkt_rx(rx_ring->dma_q, hal_pkt);
1601 		if (unlikely(descs == 0))
1602 			break;
1603 
1604 		device_printf_dbg(rx_ring->dev, "rx_poll: q %d got packet "
1605 		    "from hal. descs %d\n", qid, descs);
1606 		device_printf_dbg(rx_ring->dev, "rx_poll: q %d flags %x. "
1607 		    "l3 proto %d l4 proto %d\n", qid, hal_pkt->flags,
1608 		    hal_pkt->l3_proto_idx, hal_pkt->l4_proto_idx);
1609 
1610 		/* ignore if detected dma or eth controller errors */
1611 		if ((hal_pkt->flags & (AL_ETH_RX_ERROR |
1612 		    AL_UDMA_CDESC_ERROR)) != 0) {
1613 			device_printf(rx_ring->dev, "receive packet with error. "
1614 			    "flags = 0x%x\n", hal_pkt->flags);
1615 			next_to_clean = AL_ETH_RX_RING_IDX_ADD(rx_ring,
1616 			    next_to_clean, descs);
1617 			continue;
1618 		}
1619 
1620 		/* allocate mbuf and fill it */
1621 		mbuf = al_eth_rx_mbuf(rx_ring->adapter, rx_ring, hal_pkt, descs,
1622 		    &next_to_clean);
1623 
1624 		/* exit if we failed to retrieve a buffer */
1625 		if (unlikely(mbuf == NULL)) {
1626 			next_to_clean = AL_ETH_RX_RING_IDX_ADD(rx_ring,
1627 			    next_to_clean, descs);
1628 			break;
1629 		}
1630 
1631 		if (__predict_true(if_getcapenable(rx_ring->netdev) & IFCAP_RXCSUM ||
1632 		    if_getcapenable(rx_ring->netdev) & IFCAP_RXCSUM_IPV6)) {
1633 			al_eth_rx_checksum(rx_ring->adapter, hal_pkt, mbuf);
1634 		}
1635 
1636 		mbuf->m_pkthdr.flowid = qid;
1637 		M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE);
1638 
1639 		/*
1640 		 * LRO is only for IP/TCP packets and TCP checksum of the packet
1641 		 * should be computed by hardware.
1642 		 */
1643 		do_if_input = 1;
1644 		if ((rx_ring->lro_enabled != 0) &&
1645 		    ((mbuf->m_pkthdr.csum_flags & CSUM_IP_VALID) != 0) &&
1646 		    hal_pkt->l4_proto_idx == AL_ETH_PROTO_ID_TCP) {
1647 			/*
1648 			 * Send to the stack if:
1649 			 *  - LRO not enabled, or
1650 			 *  - no LRO resources, or
1651 			 *  - lro enqueue fails
1652 			 */
1653 			if (rx_ring->lro.lro_cnt != 0) {
1654 				if (tcp_lro_rx(&rx_ring->lro, mbuf, 0) == 0)
1655 					do_if_input = 0;
1656 			}
1657 		}
1658 
1659 		if (do_if_input)
1660 			if_input(rx_ring->netdev, mbuf);
1661 
1662 	} while (1);
1663 
1664 	rx_ring->next_to_clean = next_to_clean;
1665 
1666 	refill_required = al_udma_available_get(rx_ring->dma_q);
1667 	refill_actual = al_eth_refill_rx_bufs(rx_ring->adapter, qid,
1668 	    refill_required);
1669 
1670 	if (unlikely(refill_actual < refill_required)) {
1671 		device_printf_dbg(rx_ring->dev,
1672 		    "%s: not filling rx queue %d\n", __func__, qid);
1673 	}
1674 
1675 	while (((queued = LIST_FIRST(&rx_ring->lro.lro_active)) != NULL)) {
1676 		LIST_REMOVE(queued, next);
1677 		tcp_lro_flush(&rx_ring->lro, queued);
1678 	}
1679 
1680 	if (napi != 0) {
1681 		rx_ring->enqueue_is_running = 0;
1682 		al_data_memory_barrier();
1683 	}
1684 	/* unmask irq */
1685 	al_eth_irq_config(rx_ring->unmask_reg_offset, rx_ring->unmask_val);
1686 }
1687 
1688 static void
1689 al_eth_start_xmit(void *arg, int pending)
1690 {
1691 	struct al_eth_ring *tx_ring = arg;
1692 	struct mbuf *mbuf;
1693 
1694 	if (napi != 0) {
1695 		tx_ring->enqueue_is_running = 1;
1696 		al_data_memory_barrier();
1697 	}
1698 
1699 	while (1) {
1700 		mtx_lock(&tx_ring->br_mtx);
1701 		mbuf = drbr_dequeue(NULL, tx_ring->br);
1702 		mtx_unlock(&tx_ring->br_mtx);
1703 
1704 		if (mbuf == NULL)
1705 			break;
1706 
1707 		al_eth_xmit_mbuf(tx_ring, mbuf);
1708 	}
1709 
1710 	if (napi != 0) {
1711 		tx_ring->enqueue_is_running = 0;
1712 		al_data_memory_barrier();
1713 		while (1) {
1714 			mtx_lock(&tx_ring->br_mtx);
1715 			mbuf = drbr_dequeue(NULL, tx_ring->br);
1716 			mtx_unlock(&tx_ring->br_mtx);
1717 			if (mbuf == NULL)
1718 				break;
1719 			al_eth_xmit_mbuf(tx_ring, mbuf);
1720 		}
1721 	}
1722 }
1723 
1724 static int
1725 al_mq_start(if_t ifp, struct mbuf *m)
1726 {
1727 	struct al_eth_adapter *adapter = if_getsoftc(ifp);
1728 	struct al_eth_ring *tx_ring;
1729 	int i;
1730 	int ret;
1731 
1732 	/* Which queue to use */
1733 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
1734 		i = m->m_pkthdr.flowid % adapter->num_tx_queues;
1735 	else
1736 		i = curcpu % adapter->num_tx_queues;
1737 
1738 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
1739 	    IFF_DRV_RUNNING) {
1740 		return (EFAULT);
1741 	}
1742 
1743 	tx_ring = &adapter->tx_ring[i];
1744 
1745 	device_printf_dbg(adapter->dev, "dgb start() - assuming link is active, "
1746 	    "sending packet to queue %d\n", i);
1747 
1748 	ret = drbr_enqueue(ifp, tx_ring->br, m);
1749 
1750 	/*
1751 	 * For napi, if work is not running, schedule it. Always schedule
1752 	 * for casual (non-napi) packet handling.
1753 	 */
1754 	if ((napi == 0) || ((napi != 0) && (tx_ring->enqueue_is_running == 0)))
1755 		taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
1756 
1757 	return (ret);
1758 }
1759 
1760 static void
1761 al_qflush(if_t ifp)
1762 {
1763 
1764 	/* unused */
1765 }
1766 
1767 static inline void
1768 al_eth_flow_ctrl_init(struct al_eth_adapter *adapter)
1769 {
1770 	uint8_t default_flow_ctrl;
1771 
1772 	default_flow_ctrl = AL_ETH_FLOW_CTRL_TX_PAUSE;
1773 	default_flow_ctrl |= AL_ETH_FLOW_CTRL_RX_PAUSE;
1774 
1775 	adapter->link_config.flow_ctrl_supported = default_flow_ctrl;
1776 }
1777 
1778 static int
1779 al_eth_flow_ctrl_config(struct al_eth_adapter *adapter)
1780 {
1781 	struct al_eth_flow_control_params *flow_ctrl_params;
1782 	uint8_t active = adapter->link_config.flow_ctrl_active;
1783 	int i;
1784 
1785 	flow_ctrl_params = &adapter->flow_ctrl_params;
1786 
1787 	flow_ctrl_params->type = AL_ETH_FLOW_CONTROL_TYPE_LINK_PAUSE;
1788 	flow_ctrl_params->obay_enable =
1789 	    ((active & AL_ETH_FLOW_CTRL_RX_PAUSE) != 0);
1790 	flow_ctrl_params->gen_enable =
1791 	    ((active & AL_ETH_FLOW_CTRL_TX_PAUSE) != 0);
1792 
1793 	flow_ctrl_params->rx_fifo_th_high = AL_ETH_FLOW_CTRL_RX_FIFO_TH_HIGH;
1794 	flow_ctrl_params->rx_fifo_th_low = AL_ETH_FLOW_CTRL_RX_FIFO_TH_LOW;
1795 	flow_ctrl_params->quanta = AL_ETH_FLOW_CTRL_QUANTA;
1796 	flow_ctrl_params->quanta_th = AL_ETH_FLOW_CTRL_QUANTA_TH;
1797 
1798 	/* map priority to queue index, queue id = priority/2 */
1799 	for (i = 0; i < AL_ETH_FWD_PRIO_TABLE_NUM; i++)
1800 		flow_ctrl_params->prio_q_map[0][i] =  1 << (i >> 1);
1801 
1802 	al_eth_flow_control_config(&adapter->hal_adapter, flow_ctrl_params);
1803 
1804 	return (0);
1805 }
1806 
1807 static void
1808 al_eth_flow_ctrl_enable(struct al_eth_adapter *adapter)
1809 {
1810 
1811 	/*
1812 	 * change the active configuration to the default / force by ethtool
1813 	 * and call to configure
1814 	 */
1815 	adapter->link_config.flow_ctrl_active =
1816 	    adapter->link_config.flow_ctrl_supported;
1817 
1818 	al_eth_flow_ctrl_config(adapter);
1819 }
1820 
1821 static void
1822 al_eth_flow_ctrl_disable(struct al_eth_adapter *adapter)
1823 {
1824 
1825 	adapter->link_config.flow_ctrl_active = 0;
1826 	al_eth_flow_ctrl_config(adapter);
1827 }
1828 
1829 static int
1830 al_eth_hw_init(struct al_eth_adapter *adapter)
1831 {
1832 	int rc;
1833 
1834 	rc = al_eth_hw_init_adapter(adapter);
1835 	if (rc != 0)
1836 		return (rc);
1837 
1838 	rc = al_eth_mac_config(&adapter->hal_adapter, adapter->mac_mode);
1839 	if (rc < 0) {
1840 		device_printf(adapter->dev, "%s failed to configure mac!\n",
1841 		    __func__);
1842 		return (rc);
1843 	}
1844 
1845 	if ((adapter->mac_mode == AL_ETH_MAC_MODE_SGMII) ||
1846 	    (adapter->mac_mode == AL_ETH_MAC_MODE_RGMII &&
1847 	     adapter->phy_exist == false)) {
1848 		rc = al_eth_mac_link_config(&adapter->hal_adapter,
1849 		    adapter->link_config.force_1000_base_x,
1850 		    adapter->link_config.autoneg,
1851 		    adapter->link_config.active_speed,
1852 		    adapter->link_config.active_duplex);
1853 		if (rc != 0) {
1854 			device_printf(adapter->dev,
1855 			    "%s failed to configure link parameters!\n",
1856 			    __func__);
1857 			return (rc);
1858 		}
1859 	}
1860 
1861 	rc = al_eth_mdio_config(&adapter->hal_adapter,
1862 	    AL_ETH_MDIO_TYPE_CLAUSE_22, AL_TRUE /* shared_mdio_if */,
1863 	    adapter->ref_clk_freq, adapter->mdio_freq);
1864 	if (rc != 0) {
1865 		device_printf(adapter->dev, "%s failed at mdio config!\n",
1866 		    __func__);
1867 		return (rc);
1868 	}
1869 
1870 	al_eth_flow_ctrl_init(adapter);
1871 
1872 	return (rc);
1873 }
1874 
1875 static int
1876 al_eth_hw_stop(struct al_eth_adapter *adapter)
1877 {
1878 
1879 	al_eth_mac_stop(&adapter->hal_adapter);
1880 
1881 	/*
1882 	 * wait till pending rx packets written and UDMA becomes idle,
1883 	 * the MAC has ~10KB fifo, 10us should be enough time for the
1884 	 * UDMA to write to the memory
1885 	 */
1886 	DELAY(10);
1887 
1888 	al_eth_adapter_stop(&adapter->hal_adapter);
1889 
1890 	adapter->flags |= AL_ETH_FLAG_RESET_REQUESTED;
1891 
1892 	/* disable flow ctrl to avoid pause packets*/
1893 	al_eth_flow_ctrl_disable(adapter);
1894 
1895 	return (0);
1896 }
1897 
1898 /*
1899  * al_eth_intr_intx_all - Legacy Interrupt Handler for all interrupts
1900  * @irq: interrupt number
1901  * @data: pointer to a network interface device structure
1902  */
1903 static int
1904 al_eth_intr_intx_all(void *data)
1905 {
1906 	struct al_eth_adapter *adapter = data;
1907 
1908 	struct unit_regs __iomem *regs_base =
1909 	    (struct unit_regs __iomem *)adapter->udma_base;
1910 	uint32_t reg;
1911 
1912 	reg = al_udma_iofic_read_cause(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
1913 	    AL_INT_GROUP_A);
1914 	if (likely(reg))
1915 		device_printf_dbg(adapter->dev, "%s group A cause %x\n",
1916 		    __func__, reg);
1917 
1918 	if (unlikely(reg & AL_INT_GROUP_A_GROUP_D_SUM)) {
1919 		struct al_iofic_grp_ctrl __iomem *sec_ints_base;
1920 		uint32_t cause_d =  al_udma_iofic_read_cause(regs_base,
1921 		    AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_D);
1922 
1923 		sec_ints_base =
1924 		    &regs_base->gen.interrupt_regs.secondary_iofic_ctrl[0];
1925 		if (cause_d != 0) {
1926 			device_printf_dbg(adapter->dev,
1927 			    "got interrupt from group D. cause %x\n", cause_d);
1928 
1929 			cause_d = al_iofic_read_cause(sec_ints_base,
1930 			    AL_INT_GROUP_A);
1931 			device_printf(adapter->dev,
1932 			    "secondary A cause %x\n", cause_d);
1933 
1934 			cause_d = al_iofic_read_cause(sec_ints_base,
1935 			    AL_INT_GROUP_B);
1936 
1937 			device_printf_dbg(adapter->dev,
1938 			    "secondary B cause %x\n", cause_d);
1939 		}
1940 	}
1941 	if ((reg & AL_INT_GROUP_A_GROUP_B_SUM) != 0 ) {
1942 		uint32_t cause_b = al_udma_iofic_read_cause(regs_base,
1943 		    AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_B);
1944 		int qid;
1945 		device_printf_dbg(adapter->dev, "secondary B cause %x\n",
1946 		    cause_b);
1947 		for (qid = 0; qid < adapter->num_rx_queues; qid++) {
1948 			if (cause_b & (1 << qid)) {
1949 				/* mask */
1950 				al_udma_iofic_mask(
1951 				    (struct unit_regs __iomem *)adapter->udma_base,
1952 				    AL_UDMA_IOFIC_LEVEL_PRIMARY,
1953 				    AL_INT_GROUP_B, 1 << qid);
1954 			}
1955 		}
1956 	}
1957 	if ((reg & AL_INT_GROUP_A_GROUP_C_SUM) != 0) {
1958 		uint32_t cause_c = al_udma_iofic_read_cause(regs_base,
1959 		    AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_C);
1960 		int qid;
1961 		device_printf_dbg(adapter->dev, "secondary C cause %x\n", cause_c);
1962 		for (qid = 0; qid < adapter->num_tx_queues; qid++) {
1963 			if ((cause_c & (1 << qid)) != 0) {
1964 				al_udma_iofic_mask(
1965 				    (struct unit_regs __iomem *)adapter->udma_base,
1966 				    AL_UDMA_IOFIC_LEVEL_PRIMARY,
1967 				    AL_INT_GROUP_C, 1 << qid);
1968 			}
1969 		}
1970 	}
1971 
1972 	al_eth_tx_cmlp_irq_filter(adapter->tx_ring);
1973 
1974 	return (0);
1975 }
1976 
1977 static int
1978 al_eth_intr_msix_all(void *data)
1979 {
1980 	struct al_eth_adapter *adapter = data;
1981 
1982 	device_printf_dbg(adapter->dev, "%s\n", __func__);
1983 	return (0);
1984 }
1985 
1986 static int
1987 al_eth_intr_msix_mgmt(void *data)
1988 {
1989 	struct al_eth_adapter *adapter = data;
1990 
1991 	device_printf_dbg(adapter->dev, "%s\n", __func__);
1992 	return (0);
1993 }
1994 
1995 static int
1996 al_eth_enable_msix(struct al_eth_adapter *adapter)
1997 {
1998 	int i, msix_vecs, rc, count;
1999 
2000 	device_printf_dbg(adapter->dev, "%s\n", __func__);
2001 	msix_vecs = 1 + adapter->num_rx_queues + adapter->num_tx_queues;
2002 
2003 	device_printf_dbg(adapter->dev,
2004 	    "Try to enable MSIX, vector numbers = %d\n", msix_vecs);
2005 
2006 	adapter->msix_entries = malloc(msix_vecs*sizeof(*adapter->msix_entries),
2007 	    M_IFAL, M_ZERO | M_WAITOK);
2008 
2009 	if (adapter->msix_entries == NULL) {
2010 		device_printf_dbg(adapter->dev, "failed to allocate"
2011 		    " msix_entries %d\n", msix_vecs);
2012 		rc = ENOMEM;
2013 		goto exit;
2014 	}
2015 
2016 	/* management vector (GROUP_A) @2*/
2017 	adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].entry = 2;
2018 	adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector = 0;
2019 
2020 	/* rx queues start @3 */
2021 	for (i = 0; i < adapter->num_rx_queues; i++) {
2022 		int irq_idx = AL_ETH_RXQ_IRQ_IDX(adapter, i);
2023 
2024 		adapter->msix_entries[irq_idx].entry = 3 + i;
2025 		adapter->msix_entries[irq_idx].vector = 0;
2026 	}
2027 	/* tx queues start @7 */
2028 	for (i = 0; i < adapter->num_tx_queues; i++) {
2029 		int irq_idx = AL_ETH_TXQ_IRQ_IDX(adapter, i);
2030 
2031 		adapter->msix_entries[irq_idx].entry = 3 +
2032 		    AL_ETH_MAX_HW_QUEUES + i;
2033 		adapter->msix_entries[irq_idx].vector = 0;
2034 	}
2035 
2036 	count = msix_vecs + 2; /* entries start from 2 */
2037 	rc = pci_alloc_msix(adapter->dev, &count);
2038 
2039 	if (rc != 0) {
2040 		device_printf_dbg(adapter->dev, "failed to allocate MSIX "
2041 		    "vectors %d\n", msix_vecs+2);
2042 		device_printf_dbg(adapter->dev, "ret = %d\n", rc);
2043 		goto msix_entries_exit;
2044 	}
2045 
2046 	if (count != msix_vecs + 2) {
2047 		device_printf_dbg(adapter->dev, "failed to allocate all MSIX "
2048 		    "vectors %d, allocated %d\n", msix_vecs+2, count);
2049 		rc = ENOSPC;
2050 		goto msix_entries_exit;
2051 	}
2052 
2053 	for (i = 0; i < msix_vecs; i++)
2054 	    adapter->msix_entries[i].vector = 2 + 1 + i;
2055 
2056 	device_printf_dbg(adapter->dev, "successfully enabled MSIX,"
2057 	    " vectors %d\n", msix_vecs);
2058 
2059 	adapter->msix_vecs = msix_vecs;
2060 	adapter->flags |= AL_ETH_FLAG_MSIX_ENABLED;
2061 	goto exit;
2062 
2063 msix_entries_exit:
2064 	adapter->msix_vecs = 0;
2065 	free(adapter->msix_entries, M_IFAL);
2066 	adapter->msix_entries = NULL;
2067 
2068 exit:
2069 	return (rc);
2070 }
2071 
2072 static int
2073 al_eth_setup_int_mode(struct al_eth_adapter *adapter)
2074 {
2075 	int i, rc;
2076 
2077 	rc = al_eth_enable_msix(adapter);
2078 	if (rc != 0) {
2079 		device_printf(adapter->dev, "Failed to enable MSIX mode.\n");
2080 		return (rc);
2081 	}
2082 
2083 	adapter->irq_vecs = max(1, adapter->msix_vecs);
2084 	/* single INTX mode */
2085 	if (adapter->msix_vecs == 0) {
2086 		snprintf(adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].name,
2087 		    AL_ETH_IRQNAME_SIZE, "al-eth-intx-all@pci:%s",
2088 		    device_get_name(adapter->dev));
2089 		adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].handler =
2090 		    al_eth_intr_intx_all;
2091 		/* IRQ vector will be resolved from device resources */
2092 		adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector = 0;
2093 		adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].data = adapter;
2094 
2095 		device_printf(adapter->dev, "%s and vector %d \n", __func__,
2096 		    adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector);
2097 
2098 		return (0);
2099 	}
2100 	/* single MSI-X mode */
2101 	if (adapter->msix_vecs == 1) {
2102 		snprintf(adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].name,
2103 		    AL_ETH_IRQNAME_SIZE, "al-eth-msix-all@pci:%s",
2104 		    device_get_name(adapter->dev));
2105 		adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].handler =
2106 		    al_eth_intr_msix_all;
2107 		adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector =
2108 		    adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector;
2109 		adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].data = adapter;
2110 
2111 		return (0);
2112 	}
2113 	/* MSI-X per queue */
2114 	snprintf(adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].name, AL_ETH_IRQNAME_SIZE,
2115 	    "al-eth-msix-mgmt@pci:%s", device_get_name(adapter->dev));
2116 	adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].handler = al_eth_intr_msix_mgmt;
2117 
2118 	adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].data = adapter;
2119 	adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector =
2120 	    adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector;
2121 
2122 	for (i = 0; i < adapter->num_rx_queues; i++) {
2123 		int irq_idx = AL_ETH_RXQ_IRQ_IDX(adapter, i);
2124 
2125 		snprintf(adapter->irq_tbl[irq_idx].name, AL_ETH_IRQNAME_SIZE,
2126 		    "al-eth-rx-comp-%d@pci:%s", i,
2127 		    device_get_name(adapter->dev));
2128 		adapter->irq_tbl[irq_idx].handler = al_eth_rx_recv_irq_filter;
2129 		adapter->irq_tbl[irq_idx].data = &adapter->rx_ring[i];
2130 		adapter->irq_tbl[irq_idx].vector =
2131 		    adapter->msix_entries[irq_idx].vector;
2132 	}
2133 
2134 	for (i = 0; i < adapter->num_tx_queues; i++) {
2135 		int irq_idx = AL_ETH_TXQ_IRQ_IDX(adapter, i);
2136 
2137 		snprintf(adapter->irq_tbl[irq_idx].name,
2138 		    AL_ETH_IRQNAME_SIZE, "al-eth-tx-comp-%d@pci:%s", i,
2139 		    device_get_name(adapter->dev));
2140 		adapter->irq_tbl[irq_idx].handler = al_eth_tx_cmlp_irq_filter;
2141 		adapter->irq_tbl[irq_idx].data = &adapter->tx_ring[i];
2142 		adapter->irq_tbl[irq_idx].vector =
2143 		    adapter->msix_entries[irq_idx].vector;
2144 	}
2145 
2146 	return (0);
2147 }
2148 
2149 static void
2150 __al_eth_free_irq(struct al_eth_adapter *adapter)
2151 {
2152 	struct al_eth_irq *irq;
2153 	int i, rc;
2154 
2155 	for (i = 0; i < adapter->irq_vecs; i++) {
2156 		irq = &adapter->irq_tbl[i];
2157 		if (irq->requested != 0) {
2158 			device_printf_dbg(adapter->dev, "tear down irq: %d\n",
2159 			    irq->vector);
2160 			rc = bus_teardown_intr(adapter->dev, irq->res,
2161 			    irq->cookie);
2162 			if (rc != 0)
2163 				device_printf(adapter->dev, "failed to tear "
2164 				    "down irq: %d\n", irq->vector);
2165 		}
2166 		irq->requested = 0;
2167 	}
2168 }
2169 
2170 static void
2171 al_eth_free_irq(struct al_eth_adapter *adapter)
2172 {
2173 	struct al_eth_irq *irq;
2174 	int i, rc;
2175 #ifdef CONFIG_RFS_ACCEL
2176 	if (adapter->msix_vecs >= 1) {
2177 		free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
2178 		adapter->netdev->rx_cpu_rmap = NULL;
2179 	}
2180 #endif
2181 
2182 	__al_eth_free_irq(adapter);
2183 
2184 	for (i = 0; i < adapter->irq_vecs; i++) {
2185 		irq = &adapter->irq_tbl[i];
2186 		if (irq->res == NULL)
2187 			continue;
2188 		device_printf_dbg(adapter->dev, "release resource irq: %d\n",
2189 		    irq->vector);
2190 		rc = bus_release_resource(adapter->dev, SYS_RES_IRQ, irq->vector,
2191 		    irq->res);
2192 		irq->res = NULL;
2193 		if (rc != 0)
2194 			device_printf(adapter->dev, "dev has no parent while "
2195 			    "releasing res for irq: %d\n", irq->vector);
2196 	}
2197 
2198 	pci_release_msi(adapter->dev);
2199 
2200 	adapter->flags &= ~AL_ETH_FLAG_MSIX_ENABLED;
2201 
2202 	adapter->msix_vecs = 0;
2203 	free(adapter->msix_entries, M_IFAL);
2204 	adapter->msix_entries = NULL;
2205 }
2206 
2207 static int
2208 al_eth_request_irq(struct al_eth_adapter *adapter)
2209 {
2210 	unsigned long flags;
2211 	struct al_eth_irq *irq;
2212 	int rc = 0, i, v;
2213 
2214 	if ((adapter->flags & AL_ETH_FLAG_MSIX_ENABLED) != 0)
2215 		flags = RF_ACTIVE;
2216 	else
2217 		flags = RF_ACTIVE | RF_SHAREABLE;
2218 
2219 	for (i = 0; i < adapter->irq_vecs; i++) {
2220 		irq = &adapter->irq_tbl[i];
2221 
2222 		if (irq->requested != 0)
2223 			continue;
2224 
2225 		irq->res = bus_alloc_resource_any(adapter->dev, SYS_RES_IRQ,
2226 		    &irq->vector, flags);
2227 		if (irq->res == NULL) {
2228 			device_printf(adapter->dev, "could not allocate "
2229 			    "irq vector=%d\n", irq->vector);
2230 			rc = ENXIO;
2231 			goto exit_res;
2232 		}
2233 
2234 		if ((rc = bus_setup_intr(adapter->dev, irq->res,
2235 		    INTR_TYPE_NET | INTR_MPSAFE, irq->handler,
2236 		    NULL, irq->data, &irq->cookie)) != 0) {
2237 			device_printf(adapter->dev, "failed to register "
2238 			    "interrupt handler for irq %ju: %d\n",
2239 			    (uintmax_t)rman_get_start(irq->res), rc);
2240 			goto exit_intr;
2241 		}
2242 		irq->requested = 1;
2243 	}
2244 	goto exit;
2245 
2246 exit_intr:
2247 	v = i - 1; /* -1 because we omit the operation that failed */
2248 	while (v-- >= 0) {
2249 		int bti;
2250 		irq = &adapter->irq_tbl[v];
2251 		bti = bus_teardown_intr(adapter->dev, irq->res, irq->cookie);
2252 		if (bti != 0) {
2253 			device_printf(adapter->dev, "failed to tear "
2254 			    "down irq: %d\n", irq->vector);
2255 		}
2256 
2257 		irq->requested = 0;
2258 		device_printf_dbg(adapter->dev, "exit_intr: releasing irq %d\n",
2259 		    irq->vector);
2260 	}
2261 
2262 exit_res:
2263 	v = i - 1; /* -1 because we omit the operation that failed */
2264 	while (v-- >= 0) {
2265 		int brr;
2266 		irq = &adapter->irq_tbl[v];
2267 		device_printf_dbg(adapter->dev, "exit_res: releasing resource"
2268 		    " for irq %d\n", irq->vector);
2269 		brr = bus_release_resource(adapter->dev, SYS_RES_IRQ,
2270 		    irq->vector, irq->res);
2271 		if (brr != 0)
2272 			device_printf(adapter->dev, "dev has no parent while "
2273 			    "releasing res for irq: %d\n", irq->vector);
2274 		irq->res = NULL;
2275 	}
2276 
2277 exit:
2278 	return (rc);
2279 }
2280 
2281 /**
2282  * al_eth_setup_tx_resources - allocate Tx resources (Descriptors)
2283  * @adapter: network interface device structure
2284  * @qid: queue index
2285  *
2286  * Return 0 on success, negative on failure
2287  **/
2288 static int
2289 al_eth_setup_tx_resources(struct al_eth_adapter *adapter, int qid)
2290 {
2291 	struct al_eth_ring *tx_ring = &adapter->tx_ring[qid];
2292 	device_t dev = tx_ring->dev;
2293 	struct al_udma_q_params *q_params = &tx_ring->q_params;
2294 	int size;
2295 	int ret;
2296 
2297 	if (adapter->up)
2298 		return (0);
2299 
2300 	size = sizeof(struct al_eth_tx_buffer) * tx_ring->sw_count;
2301 
2302 	tx_ring->tx_buffer_info = malloc(size, M_IFAL, M_ZERO | M_WAITOK);
2303 	if (tx_ring->tx_buffer_info == NULL)
2304 		return (ENOMEM);
2305 
2306 	tx_ring->descs_size = tx_ring->hw_count * sizeof(union al_udma_desc);
2307 	q_params->size = tx_ring->hw_count;
2308 
2309 	ret = al_dma_alloc_coherent(dev, &q_params->desc_phy_base_tag,
2310 	    (bus_dmamap_t *)&q_params->desc_phy_base_map,
2311 	    (bus_addr_t *)&q_params->desc_phy_base,
2312 	    (void**)&q_params->desc_base, tx_ring->descs_size);
2313 	if (ret != 0) {
2314 		device_printf(dev, "failed to al_dma_alloc_coherent,"
2315 		    " ret = %d\n", ret);
2316 		return (ENOMEM);
2317 	}
2318 
2319 	if (q_params->desc_base == NULL)
2320 		return (ENOMEM);
2321 
2322 	device_printf_dbg(dev, "Initializing ring queues %d\n", qid);
2323 
2324 	/* Allocate Ring Queue */
2325 	mtx_init(&tx_ring->br_mtx, "AlRingMtx", NULL, MTX_DEF);
2326 	tx_ring->br = buf_ring_alloc(AL_BR_SIZE, M_DEVBUF, M_WAITOK,
2327 	    &tx_ring->br_mtx);
2328 	if (tx_ring->br == NULL) {
2329 		device_printf(dev, "Critical Failure setting up buf ring\n");
2330 		return (ENOMEM);
2331 	}
2332 
2333 	/* Allocate taskqueues */
2334 	TASK_INIT(&tx_ring->enqueue_task, 0, al_eth_start_xmit, tx_ring);
2335 	tx_ring->enqueue_tq = taskqueue_create_fast("al_tx_enque", M_NOWAIT,
2336 	    taskqueue_thread_enqueue, &tx_ring->enqueue_tq);
2337 	taskqueue_start_threads(&tx_ring->enqueue_tq, 1, PI_NET, "%s txeq",
2338 	    device_get_nameunit(adapter->dev));
2339 	TASK_INIT(&tx_ring->cmpl_task, 0, al_eth_tx_cmpl_work, tx_ring);
2340 	tx_ring->cmpl_tq = taskqueue_create_fast("al_tx_cmpl", M_NOWAIT,
2341 	    taskqueue_thread_enqueue, &tx_ring->cmpl_tq);
2342 	taskqueue_start_threads(&tx_ring->cmpl_tq, 1, PI_REALTIME, "%s txcq",
2343 	    device_get_nameunit(adapter->dev));
2344 
2345 	/* Setup DMA descriptor areas. */
2346 	ret = bus_dma_tag_create(bus_get_dma_tag(dev),
2347 	    1, 0,			/* alignment, bounds */
2348 	    BUS_SPACE_MAXADDR,		/* lowaddr */
2349 	    BUS_SPACE_MAXADDR,		/* highaddr */
2350 	    NULL, NULL,			/* filter, filterarg */
2351 	    AL_TSO_SIZE,		/* maxsize */
2352 	    AL_ETH_PKT_MAX_BUFS,	/* nsegments */
2353 	    PAGE_SIZE,			/* maxsegsize */
2354 	    0,				/* flags */
2355 	    NULL,			/* lockfunc */
2356 	    NULL,			/* lockfuncarg */
2357 	    &tx_ring->dma_buf_tag);
2358 
2359 	if (ret != 0) {
2360 		device_printf(dev,"Unable to allocate dma_buf_tag, ret = %d\n",
2361 		    ret);
2362 		return (ret);
2363 	}
2364 
2365 	for (size = 0; size < tx_ring->sw_count; size++) {
2366 		ret = bus_dmamap_create(tx_ring->dma_buf_tag, 0,
2367 		    &tx_ring->tx_buffer_info[size].dma_map);
2368 		if (ret != 0) {
2369 			device_printf(dev, "Unable to map DMA TX "
2370 			    "buffer memory [iter=%d]\n", size);
2371 			return (ret);
2372 		}
2373 	}
2374 
2375 	/* completion queue not used for tx */
2376 	q_params->cdesc_base = NULL;
2377 	/* size in bytes of the udma completion ring descriptor */
2378 	q_params->cdesc_size = 8;
2379 	tx_ring->next_to_use = 0;
2380 	tx_ring->next_to_clean = 0;
2381 
2382 	return (0);
2383 }
2384 
2385 /*
2386  * al_eth_free_tx_resources - Free Tx Resources per Queue
2387  * @adapter: network interface device structure
2388  * @qid: queue index
2389  *
2390  * Free all transmit software resources
2391  */
2392 static void
2393 al_eth_free_tx_resources(struct al_eth_adapter *adapter, int qid)
2394 {
2395 	struct al_eth_ring *tx_ring = &adapter->tx_ring[qid];
2396 	struct al_udma_q_params *q_params = &tx_ring->q_params;
2397 	int size;
2398 
2399 	/* At this point interrupts' handlers must be deactivated */
2400 	while (taskqueue_cancel(tx_ring->cmpl_tq, &tx_ring->cmpl_task, NULL))
2401 		taskqueue_drain(tx_ring->cmpl_tq, &tx_ring->cmpl_task);
2402 
2403 	taskqueue_free(tx_ring->cmpl_tq);
2404 	while (taskqueue_cancel(tx_ring->enqueue_tq,
2405 	    &tx_ring->enqueue_task, NULL)) {
2406 		taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
2407 	}
2408 
2409 	taskqueue_free(tx_ring->enqueue_tq);
2410 
2411 	if (tx_ring->br != NULL) {
2412 		drbr_flush(adapter->netdev, tx_ring->br);
2413 		buf_ring_free(tx_ring->br, M_DEVBUF);
2414 	}
2415 
2416 	for (size = 0; size < tx_ring->sw_count; size++) {
2417 		m_freem(tx_ring->tx_buffer_info[size].m);
2418 		tx_ring->tx_buffer_info[size].m = NULL;
2419 
2420 		bus_dmamap_unload(tx_ring->dma_buf_tag,
2421 		    tx_ring->tx_buffer_info[size].dma_map);
2422 		bus_dmamap_destroy(tx_ring->dma_buf_tag,
2423 		    tx_ring->tx_buffer_info[size].dma_map);
2424 	}
2425 	bus_dma_tag_destroy(tx_ring->dma_buf_tag);
2426 
2427 	free(tx_ring->tx_buffer_info, M_IFAL);
2428 	tx_ring->tx_buffer_info = NULL;
2429 
2430 	mtx_destroy(&tx_ring->br_mtx);
2431 
2432 	/* if not set, then don't free */
2433 	if (q_params->desc_base == NULL)
2434 		return;
2435 
2436 	al_dma_free_coherent(q_params->desc_phy_base_tag,
2437 	    q_params->desc_phy_base_map, q_params->desc_base);
2438 
2439 	q_params->desc_base = NULL;
2440 }
2441 
2442 /*
2443  * al_eth_free_all_tx_resources - Free Tx Resources for All Queues
2444  * @adapter: board private structure
2445  *
2446  * Free all transmit software resources
2447  */
2448 static void
2449 al_eth_free_all_tx_resources(struct al_eth_adapter *adapter)
2450 {
2451 	int i;
2452 
2453 	for (i = 0; i < adapter->num_tx_queues; i++)
2454 		if (adapter->tx_ring[i].q_params.desc_base)
2455 			al_eth_free_tx_resources(adapter, i);
2456 }
2457 
2458 /*
2459  * al_eth_setup_rx_resources - allocate Rx resources (Descriptors)
2460  * @adapter: network interface device structure
2461  * @qid: queue index
2462  *
2463  * Returns 0 on success, negative on failure
2464  */
2465 static int
2466 al_eth_setup_rx_resources(struct al_eth_adapter *adapter, unsigned int qid)
2467 {
2468 	struct al_eth_ring *rx_ring = &adapter->rx_ring[qid];
2469 	device_t dev = rx_ring->dev;
2470 	struct al_udma_q_params *q_params = &rx_ring->q_params;
2471 	int size;
2472 	int ret;
2473 
2474 	size = sizeof(struct al_eth_rx_buffer) * rx_ring->sw_count;
2475 
2476 	/* alloc extra element so in rx path we can always prefetch rx_info + 1 */
2477 	size += 1;
2478 
2479 	rx_ring->rx_buffer_info = malloc(size, M_IFAL, M_ZERO | M_WAITOK);
2480 	if (rx_ring->rx_buffer_info == NULL)
2481 		return (ENOMEM);
2482 
2483 	rx_ring->descs_size = rx_ring->hw_count * sizeof(union al_udma_desc);
2484 	q_params->size = rx_ring->hw_count;
2485 
2486 	ret = al_dma_alloc_coherent(dev, &q_params->desc_phy_base_tag,
2487 	    &q_params->desc_phy_base_map,
2488 	    (bus_addr_t *)&q_params->desc_phy_base,
2489 	    (void**)&q_params->desc_base, rx_ring->descs_size);
2490 
2491 	if ((q_params->desc_base == NULL) || (ret != 0))
2492 		return (ENOMEM);
2493 
2494 	/* size in bytes of the udma completion ring descriptor */
2495 	q_params->cdesc_size = 16;
2496 	rx_ring->cdescs_size = rx_ring->hw_count * q_params->cdesc_size;
2497 	ret = al_dma_alloc_coherent(dev, &q_params->cdesc_phy_base_tag,
2498 	    &q_params->cdesc_phy_base_map,
2499 	    (bus_addr_t *)&q_params->cdesc_phy_base,
2500 	    (void**)&q_params->cdesc_base, rx_ring->cdescs_size);
2501 
2502 	if ((q_params->cdesc_base == NULL) || (ret != 0))
2503 		return (ENOMEM);
2504 
2505 	/* Allocate taskqueues */
2506 	NET_TASK_INIT(&rx_ring->enqueue_task, 0, al_eth_rx_recv_work, rx_ring);
2507 	rx_ring->enqueue_tq = taskqueue_create_fast("al_rx_enque", M_NOWAIT,
2508 	    taskqueue_thread_enqueue, &rx_ring->enqueue_tq);
2509 	taskqueue_start_threads(&rx_ring->enqueue_tq, 1, PI_NET, "%s rxeq",
2510 	    device_get_nameunit(adapter->dev));
2511 
2512 	/* Setup DMA descriptor areas. */
2513 	ret = bus_dma_tag_create(bus_get_dma_tag(dev),
2514 	    1, 0,			/* alignment, bounds */
2515 	    BUS_SPACE_MAXADDR,		/* lowaddr */
2516 	    BUS_SPACE_MAXADDR,		/* highaddr */
2517 	    NULL, NULL,			/* filter, filterarg */
2518 	    AL_TSO_SIZE,		/* maxsize */
2519 	    1,				/* nsegments */
2520 	    AL_TSO_SIZE,		/* maxsegsize */
2521 	    0,				/* flags */
2522 	    NULL,			/* lockfunc */
2523 	    NULL,			/* lockfuncarg */
2524 	    &rx_ring->dma_buf_tag);
2525 
2526 	if (ret != 0) {
2527 		device_printf(dev,"Unable to allocate RX dma_buf_tag\n");
2528 		return (ret);
2529 	}
2530 
2531 	for (size = 0; size < rx_ring->sw_count; size++) {
2532 		ret = bus_dmamap_create(rx_ring->dma_buf_tag, 0,
2533 		    &rx_ring->rx_buffer_info[size].dma_map);
2534 		if (ret != 0) {
2535 			device_printf(dev,"Unable to map DMA RX buffer memory\n");
2536 			return (ret);
2537 		}
2538 	}
2539 
2540 	/* Zero out the descriptor ring */
2541 	memset(q_params->cdesc_base, 0, rx_ring->cdescs_size);
2542 
2543 	/* Create LRO for the ring */
2544 	if ((if_getcapenable(adapter->netdev) & IFCAP_LRO) != 0) {
2545 		int err = tcp_lro_init(&rx_ring->lro);
2546 		if (err != 0) {
2547 			device_printf(adapter->dev,
2548 			    "LRO[%d] Initialization failed!\n", qid);
2549 		} else {
2550 			device_printf_dbg(adapter->dev,
2551 			    "RX Soft LRO[%d] Initialized\n", qid);
2552 			rx_ring->lro_enabled = true;
2553 			rx_ring->lro.ifp = adapter->netdev;
2554 		}
2555 	}
2556 
2557 	rx_ring->next_to_clean = 0;
2558 	rx_ring->next_to_use = 0;
2559 
2560 	return (0);
2561 }
2562 
2563 /*
2564  * al_eth_free_rx_resources - Free Rx Resources
2565  * @adapter: network interface device structure
2566  * @qid: queue index
2567  *
2568  * Free all receive software resources
2569  */
2570 static void
2571 al_eth_free_rx_resources(struct al_eth_adapter *adapter, unsigned int qid)
2572 {
2573 	struct al_eth_ring *rx_ring = &adapter->rx_ring[qid];
2574 	struct al_udma_q_params *q_params = &rx_ring->q_params;
2575 	int size;
2576 
2577 	/* At this point interrupts' handlers must be deactivated */
2578 	while (taskqueue_cancel(rx_ring->enqueue_tq,
2579 	    &rx_ring->enqueue_task, NULL)) {
2580 		taskqueue_drain(rx_ring->enqueue_tq, &rx_ring->enqueue_task);
2581 	}
2582 
2583 	taskqueue_free(rx_ring->enqueue_tq);
2584 
2585 	for (size = 0; size < rx_ring->sw_count; size++) {
2586 		m_freem(rx_ring->rx_buffer_info[size].m);
2587 		rx_ring->rx_buffer_info[size].m = NULL;
2588 		bus_dmamap_unload(rx_ring->dma_buf_tag,
2589 		    rx_ring->rx_buffer_info[size].dma_map);
2590 		bus_dmamap_destroy(rx_ring->dma_buf_tag,
2591 		    rx_ring->rx_buffer_info[size].dma_map);
2592 	}
2593 	bus_dma_tag_destroy(rx_ring->dma_buf_tag);
2594 
2595 	free(rx_ring->rx_buffer_info, M_IFAL);
2596 	rx_ring->rx_buffer_info = NULL;
2597 
2598 	/* if not set, then don't free */
2599 	if (q_params->desc_base == NULL)
2600 		return;
2601 
2602 	al_dma_free_coherent(q_params->desc_phy_base_tag,
2603 	    q_params->desc_phy_base_map, q_params->desc_base);
2604 
2605 	q_params->desc_base = NULL;
2606 
2607 	/* if not set, then don't free */
2608 	if (q_params->cdesc_base == NULL)
2609 		return;
2610 
2611 	al_dma_free_coherent(q_params->cdesc_phy_base_tag,
2612 	    q_params->cdesc_phy_base_map, q_params->cdesc_base);
2613 
2614 	q_params->cdesc_phy_base = 0;
2615 
2616 	/* Free LRO resources */
2617 	tcp_lro_free(&rx_ring->lro);
2618 }
2619 
2620 /*
2621  * al_eth_free_all_rx_resources - Free Rx Resources for All Queues
2622  * @adapter: board private structure
2623  *
2624  * Free all receive software resources
2625  */
2626 static void
2627 al_eth_free_all_rx_resources(struct al_eth_adapter *adapter)
2628 {
2629 	int i;
2630 
2631 	for (i = 0; i < adapter->num_rx_queues; i++)
2632 		if (adapter->rx_ring[i].q_params.desc_base != 0)
2633 			al_eth_free_rx_resources(adapter, i);
2634 }
2635 
2636 /*
2637  * al_eth_setup_all_rx_resources - allocate all queues Rx resources
2638  * @adapter: board private structure
2639  *
2640  * Return 0 on success, negative on failure
2641  */
2642 static int
2643 al_eth_setup_all_rx_resources(struct al_eth_adapter *adapter)
2644 {
2645 	int i, rc = 0;
2646 
2647 	for (i = 0; i < adapter->num_rx_queues; i++) {
2648 		rc = al_eth_setup_rx_resources(adapter, i);
2649 		if (rc == 0)
2650 			continue;
2651 
2652 		device_printf(adapter->dev, "Allocation for Rx Queue %u failed\n", i);
2653 		goto err_setup_rx;
2654 	}
2655 	return (0);
2656 
2657 err_setup_rx:
2658 	/* rewind the index freeing the rings as we go */
2659 	while (i--)
2660 		al_eth_free_rx_resources(adapter, i);
2661 	return (rc);
2662 }
2663 
2664 /*
2665  * al_eth_setup_all_tx_resources - allocate all queues Tx resources
2666  * @adapter: private structure
2667  *
2668  * Return 0 on success, negative on failure
2669  */
2670 static int
2671 al_eth_setup_all_tx_resources(struct al_eth_adapter *adapter)
2672 {
2673 	int i, rc = 0;
2674 
2675 	for (i = 0; i < adapter->num_tx_queues; i++) {
2676 		rc = al_eth_setup_tx_resources(adapter, i);
2677 		if (rc == 0)
2678 			continue;
2679 
2680 		device_printf(adapter->dev,
2681 		    "Allocation for Tx Queue %u failed\n", i);
2682 		goto err_setup_tx;
2683 	}
2684 
2685 	return (0);
2686 
2687 err_setup_tx:
2688 	/* rewind the index freeing the rings as we go */
2689 	while (i--)
2690 		al_eth_free_tx_resources(adapter, i);
2691 
2692 	return (rc);
2693 }
2694 
2695 static void
2696 al_eth_disable_int_sync(struct al_eth_adapter *adapter)
2697 {
2698 
2699 	/* disable forwarding interrupts from eth through pci end point */
2700 	if ((adapter->board_type == ALPINE_FPGA_NIC) ||
2701 	    (adapter->board_type == ALPINE_NIC)) {
2702 		al_eth_forward_int_config((uint32_t*)adapter->internal_pcie_base +
2703 		    AL_REG_OFFSET_FORWARD_INTR, AL_DIS_FORWARD_INTR);
2704 	}
2705 
2706 	/* mask hw interrupts */
2707 	al_eth_interrupts_mask(adapter);
2708 }
2709 
2710 static void
2711 al_eth_interrupts_unmask(struct al_eth_adapter *adapter)
2712 {
2713 	uint32_t group_a_mask = AL_INT_GROUP_A_GROUP_D_SUM; /* enable group D summery */
2714 	uint32_t group_b_mask = (1 << adapter->num_rx_queues) - 1;/* bit per Rx q*/
2715 	uint32_t group_c_mask = (1 << adapter->num_tx_queues) - 1;/* bit per Tx q*/
2716 	uint32_t group_d_mask = 3 << 8;
2717 	struct unit_regs __iomem *regs_base =
2718 	    (struct unit_regs __iomem *)adapter->udma_base;
2719 
2720 	if (adapter->int_mode == AL_IOFIC_MODE_LEGACY)
2721 		group_a_mask |= AL_INT_GROUP_A_GROUP_B_SUM |
2722 		    AL_INT_GROUP_A_GROUP_C_SUM |
2723 		    AL_INT_GROUP_A_GROUP_D_SUM;
2724 
2725 	al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2726 	    AL_INT_GROUP_A, group_a_mask);
2727 	al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2728 	    AL_INT_GROUP_B, group_b_mask);
2729 	al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2730 	    AL_INT_GROUP_C, group_c_mask);
2731 	al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2732 	    AL_INT_GROUP_D, group_d_mask);
2733 }
2734 
2735 static void
2736 al_eth_interrupts_mask(struct al_eth_adapter *adapter)
2737 {
2738 	struct unit_regs __iomem *regs_base =
2739 	    (struct unit_regs __iomem *)adapter->udma_base;
2740 
2741 	/* mask all interrupts */
2742 	al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2743 	    AL_INT_GROUP_A, AL_MASK_GROUP_A_INT);
2744 	al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2745 	    AL_INT_GROUP_B, AL_MASK_GROUP_B_INT);
2746 	al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2747 	    AL_INT_GROUP_C, AL_MASK_GROUP_C_INT);
2748 	al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2749 	    AL_INT_GROUP_D, AL_MASK_GROUP_D_INT);
2750 }
2751 
2752 static int
2753 al_eth_configure_int_mode(struct al_eth_adapter *adapter)
2754 {
2755 	enum al_iofic_mode int_mode;
2756 	uint32_t m2s_errors_disable = AL_M2S_MASK_INIT;
2757 	uint32_t m2s_aborts_disable = AL_M2S_MASK_INIT;
2758 	uint32_t s2m_errors_disable = AL_S2M_MASK_INIT;
2759 	uint32_t s2m_aborts_disable = AL_S2M_MASK_INIT;
2760 
2761 	/* single INTX mode */
2762 	if (adapter->msix_vecs == 0)
2763 		int_mode = AL_IOFIC_MODE_LEGACY;
2764 	else if (adapter->msix_vecs > 1)
2765 		int_mode = AL_IOFIC_MODE_MSIX_PER_Q;
2766 	else {
2767 		device_printf(adapter->dev,
2768 		    "udma doesn't support single MSI-X mode yet.\n");
2769 		return (EIO);
2770 	}
2771 
2772 	if (adapter->board_type != ALPINE_INTEGRATED) {
2773 		m2s_errors_disable |= AL_M2S_S2M_MASK_NOT_INT;
2774 		m2s_errors_disable |= AL_M2S_S2M_MASK_NOT_INT;
2775 		s2m_aborts_disable |= AL_M2S_S2M_MASK_NOT_INT;
2776 		s2m_aborts_disable |= AL_M2S_S2M_MASK_NOT_INT;
2777 	}
2778 
2779 	if (al_udma_iofic_config((struct unit_regs __iomem *)adapter->udma_base,
2780 	    int_mode, m2s_errors_disable, m2s_aborts_disable,
2781 	    s2m_errors_disable, s2m_aborts_disable)) {
2782 		device_printf(adapter->dev,
2783 		    "al_udma_unit_int_config failed!.\n");
2784 		return (EIO);
2785 	}
2786 	adapter->int_mode = int_mode;
2787 	device_printf_dbg(adapter->dev, "using %s interrupt mode\n",
2788 	    int_mode == AL_IOFIC_MODE_LEGACY ? "INTx" :
2789 	    int_mode == AL_IOFIC_MODE_MSIX_PER_Q ? "MSI-X per Queue" : "Unknown");
2790 	/* set interrupt moderation resolution to 15us */
2791 	al_iofic_moder_res_config(&((struct unit_regs *)(adapter->udma_base))->gen.interrupt_regs.main_iofic, AL_INT_GROUP_B, 15);
2792 	al_iofic_moder_res_config(&((struct unit_regs *)(adapter->udma_base))->gen.interrupt_regs.main_iofic, AL_INT_GROUP_C, 15);
2793 	/* by default interrupt coalescing is disabled */
2794 	adapter->tx_usecs = 0;
2795 	adapter->rx_usecs = 0;
2796 
2797 	return (0);
2798 }
2799 
2800 /*
2801  * ethtool_rxfh_indir_default - get default value for RX flow hash indirection
2802  * @index: Index in RX flow hash indirection table
2803  * @n_rx_rings: Number of RX rings to use
2804  *
2805  * This function provides the default policy for RX flow hash indirection.
2806  */
2807 static inline uint32_t
2808 ethtool_rxfh_indir_default(uint32_t index, uint32_t n_rx_rings)
2809 {
2810 
2811 	return (index % n_rx_rings);
2812 }
2813 
2814 static void*
2815 al_eth_update_stats(struct al_eth_adapter *adapter)
2816 {
2817 	struct al_eth_mac_stats *mac_stats = &adapter->mac_stats;
2818 
2819 	if (adapter->up == 0)
2820 		return (NULL);
2821 
2822 	al_eth_mac_stats_get(&adapter->hal_adapter, mac_stats);
2823 
2824 	return (NULL);
2825 }
2826 
2827 static uint64_t
2828 al_get_counter(if_t ifp, ift_counter cnt)
2829 {
2830 	struct al_eth_adapter *adapter;
2831 	struct al_eth_mac_stats *mac_stats;
2832 	uint64_t rv;
2833 
2834 	adapter = if_getsoftc(ifp);
2835 	mac_stats = &adapter->mac_stats;
2836 
2837 	switch (cnt) {
2838 	case IFCOUNTER_IPACKETS:
2839 		return (mac_stats->aFramesReceivedOK); /* including pause frames */
2840 	case IFCOUNTER_OPACKETS:
2841 		return (mac_stats->aFramesTransmittedOK);
2842 	case IFCOUNTER_IBYTES:
2843 		return (mac_stats->aOctetsReceivedOK);
2844 	case IFCOUNTER_OBYTES:
2845 		return (mac_stats->aOctetsTransmittedOK);
2846 	case IFCOUNTER_IMCASTS:
2847 		return (mac_stats->ifInMulticastPkts);
2848 	case IFCOUNTER_OMCASTS:
2849 		return (mac_stats->ifOutMulticastPkts);
2850 	case IFCOUNTER_COLLISIONS:
2851 		return (0);
2852 	case IFCOUNTER_IQDROPS:
2853 		return (mac_stats->etherStatsDropEvents);
2854 	case IFCOUNTER_IERRORS:
2855 		rv = mac_stats->ifInErrors +
2856 		    mac_stats->etherStatsUndersizePkts + /* good but short */
2857 		    mac_stats->etherStatsFragments + /* short and bad*/
2858 		    mac_stats->etherStatsJabbers + /* with crc errors */
2859 		    mac_stats->etherStatsOversizePkts +
2860 		    mac_stats->aFrameCheckSequenceErrors +
2861 		    mac_stats->aAlignmentErrors;
2862 		return (rv);
2863 	case IFCOUNTER_OERRORS:
2864 		return (mac_stats->ifOutErrors);
2865 	default:
2866 		return (if_get_counter_default(ifp, cnt));
2867 	}
2868 }
2869 
2870 static u_int
2871 al_count_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2872 {
2873 	unsigned char *mac;
2874 
2875 	mac = LLADDR(sdl);
2876 	/* default mc address inside mac address */
2877 	if (mac[3] != 0 && mac[4] != 0 && mac[5] != 1)
2878 		return (1);
2879 	else
2880 		return (0);
2881 }
2882 
2883 static u_int
2884 al_program_addr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2885 {
2886 	struct al_eth_adapter *adapter = arg;
2887 
2888 	al_eth_mac_table_unicast_add(adapter,
2889 	    AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + 1 + cnt, 1);
2890 
2891 	return (1);
2892 }
2893 
2894 /*
2895  *  Unicast, Multicast and Promiscuous mode set
2896  *
2897  *  The set_rx_mode entry point is called whenever the unicast or multicast
2898  *  address lists or the network interface flags are updated.  This routine is
2899  *  responsible for configuring the hardware for proper unicast, multicast,
2900  *  promiscuous mode, and all-multi behavior.
2901  */
2902 static void
2903 al_eth_set_rx_mode(struct al_eth_adapter *adapter)
2904 {
2905 	if_t ifp = adapter->netdev;
2906 	int mc, uc;
2907 	uint8_t i;
2908 
2909 	/* XXXGL: why generic count won't work? */
2910 	mc = if_foreach_llmaddr(ifp, al_count_maddr, NULL);
2911 	uc = if_lladdr_count(ifp);
2912 
2913 	if ((if_getflags(ifp) & IFF_PROMISC) != 0) {
2914 		al_eth_mac_table_promiscuous_set(adapter, true);
2915 	} else {
2916 		if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) {
2917 			/* This interface is in all-multicasts mode (used by multicast routers). */
2918 			al_eth_mac_table_all_multicast_add(adapter,
2919 			    AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX, 1);
2920 		} else {
2921 			if (mc == 0) {
2922 				al_eth_mac_table_entry_clear(adapter,
2923 				    AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX);
2924 			} else {
2925 				al_eth_mac_table_all_multicast_add(adapter,
2926 				    AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX, 1);
2927 			}
2928 		}
2929 		if (uc != 0) {
2930 			i = AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + 1;
2931 			if (uc > AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT) {
2932 				/*
2933 				 * In this case there are more addresses then
2934 				 * entries in the mac table - set promiscuous
2935 				 */
2936 				al_eth_mac_table_promiscuous_set(adapter, true);
2937 				return;
2938 			}
2939 
2940 			/* clear the last configuration */
2941 			while (i < (AL_ETH_MAC_TABLE_UNICAST_IDX_BASE +
2942 				    AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT)) {
2943 				al_eth_mac_table_entry_clear(adapter, i);
2944 				i++;
2945 			}
2946 
2947 			/* set new addresses */
2948 			if_foreach_lladdr(ifp, al_program_addr, adapter);
2949 		}
2950 		al_eth_mac_table_promiscuous_set(adapter, false);
2951 	}
2952 }
2953 
2954 static void
2955 al_eth_config_rx_fwd(struct al_eth_adapter *adapter)
2956 {
2957 	struct al_eth_fwd_ctrl_table_entry entry;
2958 	int i;
2959 
2960 	/* let priority be equal to pbits */
2961 	for (i = 0; i < AL_ETH_FWD_PBITS_TABLE_NUM; i++)
2962 		al_eth_fwd_pbits_table_set(&adapter->hal_adapter, i, i);
2963 
2964 	/* map priority to queue index, queue id = priority/2 */
2965 	for (i = 0; i < AL_ETH_FWD_PRIO_TABLE_NUM; i++)
2966 		al_eth_fwd_priority_table_set(&adapter->hal_adapter, i, i >> 1);
2967 
2968 	entry.prio_sel = AL_ETH_CTRL_TABLE_PRIO_SEL_VAL_0;
2969 	entry.queue_sel_1 = AL_ETH_CTRL_TABLE_QUEUE_SEL_1_THASH_TABLE;
2970 	entry.queue_sel_2 = AL_ETH_CTRL_TABLE_QUEUE_SEL_2_NO_PRIO;
2971 	entry.udma_sel = AL_ETH_CTRL_TABLE_UDMA_SEL_MAC_TABLE;
2972 	entry.filter = false;
2973 
2974 	al_eth_ctrl_table_def_set(&adapter->hal_adapter, AL_FALSE, &entry);
2975 
2976 	/*
2977 	 * By default set the mac table to forward all unicast packets to our
2978 	 * MAC address and all broadcast. all the rest will be dropped.
2979 	 */
2980 	al_eth_mac_table_unicast_add(adapter, AL_ETH_MAC_TABLE_UNICAST_IDX_BASE,
2981 	    1);
2982 	al_eth_mac_table_broadcast_add(adapter, AL_ETH_MAC_TABLE_BROADCAST_IDX, 1);
2983 	al_eth_mac_table_promiscuous_set(adapter, false);
2984 
2985 	/* set toeplitz hash keys */
2986 	for (i = 0; i < sizeof(adapter->toeplitz_hash_key); i++)
2987 		*((uint8_t*)adapter->toeplitz_hash_key + i) = (uint8_t)random();
2988 
2989 	for (i = 0; i < AL_ETH_RX_HASH_KEY_NUM; i++)
2990 		al_eth_hash_key_set(&adapter->hal_adapter, i,
2991 		    htonl(adapter->toeplitz_hash_key[i]));
2992 
2993 	for (i = 0; i < AL_ETH_RX_RSS_TABLE_SIZE; i++) {
2994 		adapter->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i,
2995 		    AL_ETH_NUM_QUEUES);
2996 		al_eth_set_thash_table_entry(adapter, i, 0,
2997 		    adapter->rss_ind_tbl[i]);
2998 	}
2999 
3000 	al_eth_fsm_table_init(adapter);
3001 }
3002 
3003 static void
3004 al_eth_req_rx_buff_size(struct al_eth_adapter *adapter, int size)
3005 {
3006 
3007 	/*
3008 	* Determine the correct mbuf pool
3009 	* for doing jumbo frames
3010 	* Try from the smallest up to maximum supported
3011 	*/
3012 	adapter->rx_mbuf_sz = MCLBYTES;
3013 	if (size > 2048) {
3014 		if (adapter->max_rx_buff_alloc_size > 2048)
3015 			adapter->rx_mbuf_sz = MJUMPAGESIZE;
3016 		else
3017 			return;
3018 	}
3019 	if (size > 4096) {
3020 		if (adapter->max_rx_buff_alloc_size > 4096)
3021 			adapter->rx_mbuf_sz = MJUM9BYTES;
3022 		else
3023 			return;
3024 	}
3025 	if (size > 9216) {
3026 		if (adapter->max_rx_buff_alloc_size > 9216)
3027 			adapter->rx_mbuf_sz = MJUM16BYTES;
3028 		else
3029 			return;
3030 	}
3031 }
3032 
3033 static int
3034 al_eth_change_mtu(struct al_eth_adapter *adapter, int new_mtu)
3035 {
3036 	int max_frame = new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
3037 	    ETHER_VLAN_ENCAP_LEN;
3038 
3039 	al_eth_req_rx_buff_size(adapter, new_mtu);
3040 
3041 	device_printf_dbg(adapter->dev, "set MTU to %d\n", new_mtu);
3042 	al_eth_rx_pkt_limit_config(&adapter->hal_adapter,
3043 	    AL_ETH_MIN_FRAME_LEN, max_frame);
3044 
3045 	al_eth_tso_mss_config(&adapter->hal_adapter, 0, new_mtu - 100);
3046 
3047 	return (0);
3048 }
3049 
3050 static int
3051 al_eth_check_mtu(struct al_eth_adapter *adapter, int new_mtu)
3052 {
3053 	int max_frame = new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN;
3054 
3055 	if ((new_mtu < AL_ETH_MIN_FRAME_LEN) ||
3056 	    (max_frame > AL_ETH_MAX_FRAME_LEN)) {
3057 		return (EINVAL);
3058 	}
3059 
3060 	return (0);
3061 }
3062 
3063 static int
3064 al_eth_udma_queue_enable(struct al_eth_adapter *adapter, enum al_udma_type type,
3065     int qid)
3066 {
3067 	int rc = 0;
3068 	char *name = (type == UDMA_TX) ? "Tx" : "Rx";
3069 	struct al_udma_q_params *q_params;
3070 
3071 	if (type == UDMA_TX)
3072 		q_params = &adapter->tx_ring[qid].q_params;
3073 	else
3074 		q_params = &adapter->rx_ring[qid].q_params;
3075 
3076 	rc = al_eth_queue_config(&adapter->hal_adapter, type, qid, q_params);
3077 	if (rc < 0) {
3078 		device_printf(adapter->dev, "config %s queue %u failed\n", name,
3079 		    qid);
3080 		return (rc);
3081 	}
3082 	return (rc);
3083 }
3084 
3085 static int
3086 al_eth_udma_queues_enable_all(struct al_eth_adapter *adapter)
3087 {
3088 	int i;
3089 
3090 	for (i = 0; i < adapter->num_tx_queues; i++)
3091 		al_eth_udma_queue_enable(adapter, UDMA_TX, i);
3092 
3093 	for (i = 0; i < adapter->num_rx_queues; i++)
3094 		al_eth_udma_queue_enable(adapter, UDMA_RX, i);
3095 
3096 	return (0);
3097 }
3098 
3099 static void
3100 al_eth_up_complete(struct al_eth_adapter *adapter)
3101 {
3102 
3103 	al_eth_configure_int_mode(adapter);
3104 	al_eth_config_rx_fwd(adapter);
3105 	al_eth_change_mtu(adapter, if_getmtu(adapter->netdev));
3106 	al_eth_udma_queues_enable_all(adapter);
3107 	al_eth_refill_all_rx_bufs(adapter);
3108 	al_eth_interrupts_unmask(adapter);
3109 
3110 	/* enable forwarding interrupts from eth through pci end point */
3111 	if ((adapter->board_type == ALPINE_FPGA_NIC) ||
3112 	    (adapter->board_type == ALPINE_NIC)) {
3113 		al_eth_forward_int_config((uint32_t*)adapter->internal_pcie_base +
3114 		    AL_REG_OFFSET_FORWARD_INTR, AL_EN_FORWARD_INTR);
3115 	}
3116 
3117 	al_eth_flow_ctrl_enable(adapter);
3118 
3119 	mtx_lock(&adapter->stats_mtx);
3120 	callout_reset(&adapter->stats_callout, hz, al_tick_stats, (void*)adapter);
3121 	mtx_unlock(&adapter->stats_mtx);
3122 
3123 	al_eth_mac_start(&adapter->hal_adapter);
3124 }
3125 
3126 static int
3127 al_media_update(if_t ifp)
3128 {
3129 	struct al_eth_adapter *adapter = if_getsoftc(ifp);
3130 
3131 	if ((if_getflags(ifp) & IFF_UP) != 0)
3132 		mii_mediachg(adapter->mii);
3133 
3134 	return (0);
3135 }
3136 
3137 static void
3138 al_media_status(if_t ifp, struct ifmediareq *ifmr)
3139 {
3140 	struct al_eth_adapter *sc = if_getsoftc(ifp);
3141 	struct mii_data *mii;
3142 
3143 	if (sc->mii == NULL) {
3144 		ifmr->ifm_active = IFM_ETHER | IFM_NONE;
3145 		ifmr->ifm_status = 0;
3146 
3147 		return;
3148 	}
3149 
3150 	mii = sc->mii;
3151 	mii_pollstat(mii);
3152 
3153 	ifmr->ifm_active = mii->mii_media_active;
3154 	ifmr->ifm_status = mii->mii_media_status;
3155 }
3156 
3157 static void
3158 al_tick(void *arg)
3159 {
3160 	struct al_eth_adapter *adapter = arg;
3161 
3162 	mii_tick(adapter->mii);
3163 
3164 	/* Schedule another timeout one second from now */
3165 	callout_schedule(&adapter->wd_callout, hz);
3166 }
3167 
3168 static void
3169 al_tick_stats(void *arg)
3170 {
3171 	struct al_eth_adapter *adapter = arg;
3172 
3173 	al_eth_update_stats(adapter);
3174 
3175 	callout_schedule(&adapter->stats_callout, hz);
3176 }
3177 
3178 static int
3179 al_eth_up(struct al_eth_adapter *adapter)
3180 {
3181 	if_t ifp = adapter->netdev;
3182 	int rc;
3183 
3184 	if (adapter->up)
3185 		return (0);
3186 
3187 	if ((adapter->flags & AL_ETH_FLAG_RESET_REQUESTED) != 0) {
3188 		al_eth_function_reset(adapter);
3189 		adapter->flags &= ~AL_ETH_FLAG_RESET_REQUESTED;
3190 	}
3191 
3192 	if_sethwassist(ifp, 0);
3193 	if ((if_getcapenable(ifp) & IFCAP_TSO) != 0)
3194 		if_sethwassistbits(ifp, CSUM_TSO, 0);
3195 	if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
3196 		if_sethwassistbits(ifp, (CSUM_TCP | CSUM_UDP), 0);
3197 	if ((if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6) != 0)
3198 		if_sethwassistbits(ifp, (CSUM_TCP_IPV6 | CSUM_UDP_IPV6), 0);
3199 
3200 	al_eth_serdes_init(adapter);
3201 
3202 	rc = al_eth_hw_init(adapter);
3203 	if (rc != 0)
3204 		goto err_hw_init_open;
3205 
3206 	rc = al_eth_setup_int_mode(adapter);
3207 	if (rc != 0) {
3208 		device_printf(adapter->dev,
3209 		    "%s failed at setup interrupt mode!\n", __func__);
3210 		goto err_setup_int;
3211 	}
3212 
3213 	/* allocate transmit descriptors */
3214 	rc = al_eth_setup_all_tx_resources(adapter);
3215 	if (rc != 0)
3216 		goto err_setup_tx;
3217 
3218 	/* allocate receive descriptors */
3219 	rc = al_eth_setup_all_rx_resources(adapter);
3220 	if (rc != 0)
3221 		goto err_setup_rx;
3222 
3223 	rc = al_eth_request_irq(adapter);
3224 	if (rc != 0)
3225 		goto err_req_irq;
3226 
3227 	al_eth_up_complete(adapter);
3228 
3229 	adapter->up = true;
3230 
3231 	if (adapter->mac_mode == AL_ETH_MAC_MODE_10GbE_Serial)
3232 		if_link_state_change(adapter->netdev, LINK_STATE_UP);
3233 
3234 	if (adapter->mac_mode == AL_ETH_MAC_MODE_RGMII) {
3235 		mii_mediachg(adapter->mii);
3236 
3237 		/* Schedule watchdog timeout */
3238 		mtx_lock(&adapter->wd_mtx);
3239 		callout_reset(&adapter->wd_callout, hz, al_tick, adapter);
3240 		mtx_unlock(&adapter->wd_mtx);
3241 
3242 		mii_pollstat(adapter->mii);
3243 	}
3244 
3245 	return (rc);
3246 
3247 err_req_irq:
3248 	al_eth_free_all_rx_resources(adapter);
3249 err_setup_rx:
3250 	al_eth_free_all_tx_resources(adapter);
3251 err_setup_tx:
3252 	al_eth_free_irq(adapter);
3253 err_setup_int:
3254 	al_eth_hw_stop(adapter);
3255 err_hw_init_open:
3256 	al_eth_function_reset(adapter);
3257 
3258 	return (rc);
3259 }
3260 
3261 static int
3262 al_shutdown(device_t dev)
3263 {
3264 	struct al_eth_adapter *adapter = device_get_softc(dev);
3265 
3266 	al_eth_down(adapter);
3267 
3268 	return (0);
3269 }
3270 
3271 static void
3272 al_eth_down(struct al_eth_adapter *adapter)
3273 {
3274 
3275 	device_printf_dbg(adapter->dev, "al_eth_down: begin\n");
3276 
3277 	adapter->up = false;
3278 
3279 	mtx_lock(&adapter->wd_mtx);
3280 	callout_stop(&adapter->wd_callout);
3281 	mtx_unlock(&adapter->wd_mtx);
3282 
3283 	al_eth_disable_int_sync(adapter);
3284 
3285 	mtx_lock(&adapter->stats_mtx);
3286 	callout_stop(&adapter->stats_callout);
3287 	mtx_unlock(&adapter->stats_mtx);
3288 
3289 	al_eth_free_irq(adapter);
3290 	al_eth_hw_stop(adapter);
3291 
3292 	al_eth_free_all_tx_resources(adapter);
3293 	al_eth_free_all_rx_resources(adapter);
3294 }
3295 
3296 static int
3297 al_ioctl(if_t ifp, u_long command, caddr_t data)
3298 {
3299 	struct al_eth_adapter	*adapter = if_getsoftc(ifp);
3300 	struct ifreq		*ifr = (struct ifreq *)data;
3301 	int			error = 0;
3302 
3303 	switch (command) {
3304 	case SIOCSIFMTU:
3305 	{
3306 		error = al_eth_check_mtu(adapter, ifr->ifr_mtu);
3307 		if (error != 0) {
3308 			device_printf(adapter->dev, "ioctl wrong mtu %u\n",
3309 			    if_getmtu(adapter->netdev));
3310 			break;
3311 		}
3312 
3313 		if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
3314 		if_setmtu(adapter->netdev, ifr->ifr_mtu);
3315 		al_init(adapter);
3316 		break;
3317 	}
3318 	case SIOCSIFFLAGS:
3319 		if ((if_getflags(ifp) & IFF_UP) != 0) {
3320 			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
3321 				if (((if_getflags(ifp) ^ adapter->if_flags) &
3322 				    (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
3323 					device_printf_dbg(adapter->dev,
3324 					    "ioctl promisc/allmulti\n");
3325 					al_eth_set_rx_mode(adapter);
3326 				}
3327 			} else {
3328 				error = al_eth_up(adapter);
3329 				if (error == 0)
3330 					if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
3331 			}
3332 		} else {
3333 			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
3334 				al_eth_down(adapter);
3335 				if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
3336 			}
3337 		}
3338 
3339 		adapter->if_flags = if_getflags(ifp);
3340 		break;
3341 
3342 	case SIOCADDMULTI:
3343 	case SIOCDELMULTI:
3344 		if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
3345 			device_printf_dbg(adapter->dev,
3346 			    "ioctl add/del multi before\n");
3347 			al_eth_set_rx_mode(adapter);
3348 #ifdef DEVICE_POLLING
3349 			if ((if_getcapenable(ifp) & IFCAP_POLLING) == 0)
3350 #endif
3351 		}
3352 		break;
3353 	case SIOCSIFMEDIA:
3354 	case SIOCGIFMEDIA:
3355 		if (adapter->mii != NULL)
3356 			error = ifmedia_ioctl(ifp, ifr,
3357 			    &adapter->mii->mii_media, command);
3358 		else
3359 			error = ifmedia_ioctl(ifp, ifr,
3360 			    &adapter->media, command);
3361 		break;
3362 	case SIOCSIFCAP:
3363 	    {
3364 		int mask, reinit;
3365 
3366 		reinit = 0;
3367 		mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
3368 #ifdef DEVICE_POLLING
3369 		if ((mask & IFCAP_POLLING) != 0) {
3370 			if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
3371 				if (error != 0)
3372 					return (error);
3373 				if_setcapenablebit(ifp, IFCAP_POLLING, 0);
3374 			} else {
3375 				error = ether_poll_deregister(ifp);
3376 				/* Enable interrupt even in error case */
3377 				if_setcapenablebit(ifp, 0, IFCAP_POLLING);
3378 			}
3379 		}
3380 #endif
3381 		if ((mask & IFCAP_HWCSUM) != 0) {
3382 			/* apply to both rx and tx */
3383 			if_togglecapenable(ifp, IFCAP_HWCSUM);
3384 			reinit = 1;
3385 		}
3386 		if ((mask & IFCAP_HWCSUM_IPV6) != 0) {
3387 			if_togglecapenable(ifp, IFCAP_HWCSUM_IPV6);
3388 			reinit = 1;
3389 		}
3390 		if ((mask & IFCAP_TSO) != 0) {
3391 			if_togglecapenable(ifp, IFCAP_TSO);
3392 			reinit = 1;
3393 		}
3394 		if ((mask & IFCAP_LRO) != 0) {
3395 			if_togglecapenable(ifp, IFCAP_LRO);
3396 		}
3397 		if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
3398 			if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
3399 			reinit = 1;
3400 		}
3401 		if ((mask & IFCAP_VLAN_HWFILTER) != 0) {
3402 			if_togglecapenable(ifp, IFCAP_VLAN_HWFILTER);
3403 			reinit = 1;
3404 		}
3405 		if ((mask & IFCAP_VLAN_HWTSO) != 0) {
3406 			if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
3407 			reinit = 1;
3408 		}
3409 		if ((reinit != 0) &&
3410 		    ((if_getdrvflags(ifp) & IFF_DRV_RUNNING)) != 0)
3411 		{
3412 			al_init(adapter);
3413 		}
3414 		break;
3415 	    }
3416 
3417 	default:
3418 		error = ether_ioctl(ifp, command, data);
3419 		break;
3420 	}
3421 
3422 	return (error);
3423 }
3424 
3425 static int
3426 al_is_device_supported(device_t dev)
3427 {
3428 	uint16_t pci_vendor_id = pci_get_vendor(dev);
3429 	uint16_t pci_device_id = pci_get_device(dev);
3430 
3431 	return (pci_vendor_id == PCI_VENDOR_ID_ANNAPURNA_LABS &&
3432 	    (pci_device_id == PCI_DEVICE_ID_AL_ETH ||
3433 	    pci_device_id == PCI_DEVICE_ID_AL_ETH_ADVANCED ||
3434 	    pci_device_id == PCI_DEVICE_ID_AL_ETH_NIC ||
3435 	    pci_device_id == PCI_DEVICE_ID_AL_ETH_FPGA_NIC));
3436 }
3437 
3438 /* Time in mSec to keep trying to read / write from MDIO in case of error */
3439 #define	MDIO_TIMEOUT_MSEC	100
3440 #define	MDIO_PAUSE_MSEC		10
3441 
3442 static int
3443 al_miibus_readreg(device_t dev, int phy, int reg)
3444 {
3445 	struct al_eth_adapter *adapter = device_get_softc(dev);
3446 	uint16_t value = 0;
3447 	int rc;
3448 	int timeout = MDIO_TIMEOUT_MSEC;
3449 
3450 	while (timeout > 0) {
3451 		rc = al_eth_mdio_read(&adapter->hal_adapter, adapter->phy_addr,
3452 		    -1, reg, &value);
3453 
3454 		if (rc == 0)
3455 			return (value);
3456 
3457 		device_printf_dbg(adapter->dev,
3458 		    "mdio read failed. try again in 10 msec\n");
3459 
3460 		timeout -= MDIO_PAUSE_MSEC;
3461 		pause("readred pause", MDIO_PAUSE_MSEC);
3462 	}
3463 
3464 	if (rc != 0)
3465 		device_printf(adapter->dev, "MDIO read failed on timeout\n");
3466 
3467 	return (value);
3468 }
3469 
3470 static int
3471 al_miibus_writereg(device_t dev, int phy, int reg, int value)
3472 {
3473 	struct al_eth_adapter *adapter = device_get_softc(dev);
3474 	int rc;
3475 	int timeout = MDIO_TIMEOUT_MSEC;
3476 
3477 	while (timeout > 0) {
3478 		rc = al_eth_mdio_write(&adapter->hal_adapter, adapter->phy_addr,
3479 		    -1, reg, value);
3480 
3481 		if (rc == 0)
3482 			return (0);
3483 
3484 		device_printf(adapter->dev,
3485 		    "mdio write failed. try again in 10 msec\n");
3486 
3487 		timeout -= MDIO_PAUSE_MSEC;
3488 		pause("miibus writereg", MDIO_PAUSE_MSEC);
3489 	}
3490 
3491 	if (rc != 0)
3492 		device_printf(adapter->dev, "MDIO write failed on timeout\n");
3493 
3494 	return (rc);
3495 }
3496 
3497 static void
3498 al_miibus_statchg(device_t dev)
3499 {
3500 	struct al_eth_adapter *adapter = device_get_softc(dev);
3501 
3502 	device_printf_dbg(adapter->dev,
3503 	    "al_miibus_statchg: state has changed!\n");
3504 	device_printf_dbg(adapter->dev,
3505 	    "al_miibus_statchg: active = 0x%x status = 0x%x\n",
3506 	    adapter->mii->mii_media_active, adapter->mii->mii_media_status);
3507 
3508 	if (adapter->up == 0)
3509 		return;
3510 
3511 	if ((adapter->mii->mii_media_status & IFM_AVALID) != 0) {
3512 		if (adapter->mii->mii_media_status & IFM_ACTIVE) {
3513 			device_printf(adapter->dev, "link is UP\n");
3514 			if_link_state_change(adapter->netdev, LINK_STATE_UP);
3515 		} else {
3516 			device_printf(adapter->dev, "link is DOWN\n");
3517 			if_link_state_change(adapter->netdev, LINK_STATE_DOWN);
3518 		}
3519 	}
3520 }
3521 
3522 static void
3523 al_miibus_linkchg(device_t dev)
3524 {
3525 	struct al_eth_adapter *adapter = device_get_softc(dev);
3526 	uint8_t duplex = 0;
3527 	uint8_t speed = 0;
3528 
3529 	if (adapter->mii == NULL)
3530 		return;
3531 
3532 	if ((if_getflags(adapter->netdev) & IFF_UP) == 0)
3533 		return;
3534 
3535 	/* Ignore link changes when link is not ready */
3536 	if ((adapter->mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) !=
3537 	    (IFM_AVALID | IFM_ACTIVE)) {
3538 		return;
3539 	}
3540 
3541 	if ((adapter->mii->mii_media_active & IFM_FDX) != 0)
3542 		duplex = 1;
3543 
3544 	speed = IFM_SUBTYPE(adapter->mii->mii_media_active);
3545 
3546 	if (speed == IFM_10_T) {
3547 		al_eth_mac_link_config(&adapter->hal_adapter, 0, 1,
3548 		    AL_10BASE_T_SPEED, duplex);
3549 		return;
3550 	}
3551 
3552 	if (speed == IFM_100_TX) {
3553 		al_eth_mac_link_config(&adapter->hal_adapter, 0, 1,
3554 		    AL_100BASE_TX_SPEED, duplex);
3555 		return;
3556 	}
3557 
3558 	if (speed == IFM_1000_T) {
3559 		al_eth_mac_link_config(&adapter->hal_adapter, 0, 1,
3560 		    AL_1000BASE_T_SPEED, duplex);
3561 		return;
3562 	}
3563 
3564 	device_printf(adapter->dev, "ERROR: unknown MII media active 0x%08x\n",
3565 	    adapter->mii->mii_media_active);
3566 }
3567