xref: /freebsd/sys/dev/al_eth/al_eth.c (revision 792bbaba989533a1fc93823df1720c8c4aaf0442)
1 /*-
2  * Copyright (c) 2015,2016 Annapurna Labs Ltd. and affiliates
3  * All rights reserved.
4  *
5  * Developed by Semihalf.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/bus.h>
35 #include <sys/kernel.h>
36 #include <sys/kthread.h>
37 #include <sys/lock.h>
38 #include <sys/mbuf.h>
39 #include <sys/malloc.h>
40 #include <sys/module.h>
41 #include <sys/rman.h>
42 #include <sys/socket.h>
43 #include <sys/sockio.h>
44 #include <sys/sysctl.h>
45 #include <sys/taskqueue.h>
46 
47 #include <machine/atomic.h>
48 
49 #include "opt_inet.h"
50 #include "opt_inet6.h"
51 
52 #include <net/ethernet.h>
53 #include <net/if.h>
54 #include <net/if_var.h>
55 #include <net/if_arp.h>
56 #include <net/if_dl.h>
57 #include <net/if_media.h>
58 #include <net/if_types.h>
59 #include <netinet/in.h>
60 #include <net/if_vlan_var.h>
61 #include <netinet/tcp.h>
62 #include <netinet/tcp_lro.h>
63 
64 #ifdef INET
65 #include <netinet/in.h>
66 #include <netinet/in_systm.h>
67 #include <netinet/in_var.h>
68 #include <netinet/ip.h>
69 #endif
70 
71 #ifdef INET6
72 #include <netinet/ip6.h>
73 #endif
74 
75 #include <sys/sockio.h>
76 
77 #include <dev/pci/pcireg.h>
78 #include <dev/pci/pcivar.h>
79 
80 #include <dev/mii/mii.h>
81 #include <dev/mii/miivar.h>
82 
83 #include <al_hal_common.h>
84 #include <al_hal_plat_services.h>
85 #include <al_hal_udma_config.h>
86 #include <al_hal_udma_iofic.h>
87 #include <al_hal_udma_debug.h>
88 #include <al_hal_eth.h>
89 
90 #include "al_eth.h"
91 #include "al_init_eth_lm.h"
92 #include "arm/annapurna/alpine/alpine_serdes.h"
93 
94 #include "miibus_if.h"
95 
96 #define	device_printf_dbg(fmt, ...) do {				\
97 	if (AL_DBG_LEVEL >= AL_DBG_LEVEL_DBG) { AL_DBG_LOCK();		\
98 	    device_printf(fmt, __VA_ARGS__); AL_DBG_UNLOCK();}		\
99 	} while (0)
100 
101 MALLOC_DEFINE(M_IFAL, "if_al_malloc", "All allocated data for AL ETH driver");
102 
103 /* move out to some pci header file */
104 #define	PCI_VENDOR_ID_ANNAPURNA_LABS	0x1c36
105 #define	PCI_DEVICE_ID_AL_ETH		0x0001
106 #define	PCI_DEVICE_ID_AL_ETH_ADVANCED	0x0002
107 #define	PCI_DEVICE_ID_AL_ETH_NIC	0x0003
108 #define	PCI_DEVICE_ID_AL_ETH_FPGA_NIC	0x0030
109 #define	PCI_DEVICE_ID_AL_CRYPTO		0x0011
110 #define	PCI_DEVICE_ID_AL_CRYPTO_VF	0x8011
111 #define	PCI_DEVICE_ID_AL_RAID_DMA	0x0021
112 #define	PCI_DEVICE_ID_AL_RAID_DMA_VF	0x8021
113 #define	PCI_DEVICE_ID_AL_USB		0x0041
114 
115 #define	MAC_ADDR_STR "%02x:%02x:%02x:%02x:%02x:%02x"
116 #define	MAC_ADDR(addr) addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]
117 
118 #define	AL_ETH_MAC_TABLE_UNICAST_IDX_BASE	0
119 #define	AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT	4
120 #define	AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX	(AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + \
121 						 AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT)
122 
123 #define	AL_ETH_MAC_TABLE_DROP_IDX		(AL_ETH_FWD_MAC_NUM - 1)
124 #define	AL_ETH_MAC_TABLE_BROADCAST_IDX		(AL_ETH_MAC_TABLE_DROP_IDX - 1)
125 
126 #define	AL_ETH_THASH_UDMA_SHIFT		0
127 #define	AL_ETH_THASH_UDMA_MASK		(0xF << AL_ETH_THASH_UDMA_SHIFT)
128 
129 #define	AL_ETH_THASH_Q_SHIFT		4
130 #define	AL_ETH_THASH_Q_MASK		(0x3 << AL_ETH_THASH_Q_SHIFT)
131 
132 /* the following defines should be moved to hal */
133 #define	AL_ETH_FSM_ENTRY_IPV4_TCP		0
134 #define	AL_ETH_FSM_ENTRY_IPV4_UDP		1
135 #define	AL_ETH_FSM_ENTRY_IPV6_TCP		2
136 #define	AL_ETH_FSM_ENTRY_IPV6_UDP		3
137 #define	AL_ETH_FSM_ENTRY_IPV6_NO_UDP_TCP	4
138 #define	AL_ETH_FSM_ENTRY_IPV4_NO_UDP_TCP	5
139 
140 /* FSM DATA format */
141 #define	AL_ETH_FSM_DATA_OUTER_2_TUPLE	0
142 #define	AL_ETH_FSM_DATA_OUTER_4_TUPLE	1
143 #define	AL_ETH_FSM_DATA_INNER_2_TUPLE	2
144 #define	AL_ETH_FSM_DATA_INNER_4_TUPLE	3
145 
146 #define	AL_ETH_FSM_DATA_HASH_SEL	(1 << 2)
147 
148 #define	AL_ETH_FSM_DATA_DEFAULT_Q	0
149 #define	AL_ETH_FSM_DATA_DEFAULT_UDMA	0
150 
151 #define	AL_BR_SIZE	512
152 #define	AL_TSO_SIZE	65500
153 #define	AL_DEFAULT_MTU	1500
154 
155 #define	CSUM_OFFLOAD		(CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP)
156 
157 #define	AL_IP_ALIGNMENT_OFFSET	2
158 
159 #define	SFP_I2C_ADDR		0x50
160 
161 #define	AL_MASK_GROUP_A_INT	0x7
162 #define	AL_MASK_GROUP_B_INT	0xF
163 #define	AL_MASK_GROUP_C_INT	0xF
164 #define	AL_MASK_GROUP_D_INT	0xFFFFFFFF
165 
166 #define	AL_REG_OFFSET_FORWARD_INTR	(0x1800000 + 0x1210)
167 #define	AL_EN_FORWARD_INTR	0x1FFFF
168 #define	AL_DIS_FORWARD_INTR	0
169 
170 #define	AL_M2S_MASK_INIT	0x480
171 #define	AL_S2M_MASK_INIT	0x1E0
172 #define	AL_M2S_S2M_MASK_NOT_INT	(0x3f << 25)
173 
174 #define	AL_10BASE_T_SPEED	10
175 #define	AL_100BASE_TX_SPEED	100
176 #define	AL_1000BASE_T_SPEED	1000
177 
178 static devclass_t al_devclass;
179 
180 #define	AL_RX_LOCK_INIT(_sc)	mtx_init(&((_sc)->if_rx_lock), "ALRXL", "ALRXL", MTX_DEF)
181 #define	AL_RX_LOCK(_sc)		mtx_lock(&((_sc)->if_rx_lock))
182 #define	AL_RX_UNLOCK(_sc)	mtx_unlock(&((_sc)->if_rx_lock))
183 
184 /* helper functions */
185 static int al_is_device_supported(device_t);
186 
187 static void al_eth_init_rings(struct al_eth_adapter *);
188 static void al_eth_flow_ctrl_disable(struct al_eth_adapter *);
189 int al_eth_fpga_read_pci_config(void *, int, uint32_t *);
190 int al_eth_fpga_write_pci_config(void *, int, uint32_t);
191 int al_eth_read_pci_config(void *, int, uint32_t *);
192 int al_eth_write_pci_config(void *, int, uint32_t);
193 void al_eth_irq_config(uint32_t *, uint32_t);
194 void al_eth_forward_int_config(uint32_t *, uint32_t);
195 static void al_eth_start_xmit(void *, int);
196 static void al_eth_rx_recv_work(void *, int);
197 static int al_eth_up(struct al_eth_adapter *);
198 static void al_eth_down(struct al_eth_adapter *);
199 static void al_eth_interrupts_unmask(struct al_eth_adapter *);
200 static void al_eth_interrupts_mask(struct al_eth_adapter *);
201 static int al_eth_check_mtu(struct al_eth_adapter *, int);
202 static uint64_t al_get_counter(struct ifnet *, ift_counter);
203 static void al_eth_req_rx_buff_size(struct al_eth_adapter *, int);
204 static int al_eth_board_params_init(struct al_eth_adapter *);
205 static int al_media_update(struct ifnet *);
206 static void al_media_status(struct ifnet *, struct ifmediareq *);
207 static int al_eth_function_reset(struct al_eth_adapter *);
208 static int al_eth_hw_init_adapter(struct al_eth_adapter *);
209 static void al_eth_serdes_init(struct al_eth_adapter *);
210 static void al_eth_lm_config(struct al_eth_adapter *);
211 static int al_eth_hw_init(struct al_eth_adapter *);
212 
213 static void al_tick_stats(void *);
214 
215 /* ifnet entry points */
216 static void al_init(void *);
217 static int al_mq_start(struct ifnet *, struct mbuf *);
218 static void al_qflush(struct ifnet *);
219 static int al_ioctl(struct ifnet * ifp, u_long, caddr_t);
220 
221 /* bus entry points */
222 static int al_probe(device_t);
223 static int al_attach(device_t);
224 static int al_detach(device_t);
225 static int al_shutdown(device_t);
226 
227 /* mii bus support routines */
228 static int al_miibus_readreg(device_t, int, int);
229 static int al_miibus_writereg(device_t, int, int, int);
230 static void al_miibus_statchg(device_t);
231 static void al_miibus_linkchg(device_t);
232 
233 struct al_eth_adapter* g_adapters[16];
234 uint32_t g_adapters_count;
235 
236 /* flag for napi-like mbuf processing, controlled from sysctl */
237 static int napi = 0;
238 
239 static device_method_t al_methods[] = {
240 	/* Device interface */
241 	DEVMETHOD(device_probe,		al_probe),
242 	DEVMETHOD(device_attach,	al_attach),
243 	DEVMETHOD(device_detach,	al_detach),
244 	DEVMETHOD(device_shutdown,	al_shutdown),
245 
246 	DEVMETHOD(miibus_readreg,	al_miibus_readreg),
247 	DEVMETHOD(miibus_writereg,	al_miibus_writereg),
248 	DEVMETHOD(miibus_statchg,	al_miibus_statchg),
249 	DEVMETHOD(miibus_linkchg,	al_miibus_linkchg),
250 	{ 0, 0 }
251 };
252 
253 static driver_t al_driver = {
254 	"al",
255 	al_methods,
256 	sizeof(struct al_eth_adapter),
257 };
258 
259 DRIVER_MODULE(al, pci, al_driver, al_devclass, 0, 0);
260 DRIVER_MODULE(miibus, al, miibus_driver, miibus_devclass, 0, 0);
261 
262 static int
263 al_probe(device_t dev)
264 {
265 	if ((al_is_device_supported(dev)) != 0) {
266 		device_set_desc(dev, "al");
267 		return (BUS_PROBE_DEFAULT);
268 	}
269 	return (ENXIO);
270 }
271 
272 static int
273 al_attach(device_t dev)
274 {
275 	struct al_eth_lm_context *lm_context;
276 	struct al_eth_adapter *adapter;
277 	struct sysctl_oid_list *child;
278 	struct sysctl_ctx_list *ctx;
279 	struct sysctl_oid *tree;
280 	struct ifnet *ifp;
281 	uint32_t dev_id;
282 	uint32_t rev_id;
283 	int bar_udma;
284 	int bar_mac;
285 	int bar_ec;
286 	int err;
287 
288 	err = 0;
289 	ifp = NULL;
290 	dev_id = rev_id = 0;
291 	ctx = device_get_sysctl_ctx(dev);
292 	tree = SYSCTL_PARENT(device_get_sysctl_tree(dev));
293 	child = SYSCTL_CHILDREN(tree);
294 
295 	if (g_adapters_count == 0) {
296 		SYSCTL_ADD_INT(ctx, child, OID_AUTO, "napi",
297 		    CTLFLAG_RW, &napi, 0, "Use pseudo-napi mechanism");
298 	}
299 	adapter = device_get_softc(dev);
300 	adapter->dev = dev;
301 	adapter->board_type = ALPINE_INTEGRATED;
302 	snprintf(adapter->name, AL_ETH_NAME_MAX_LEN, "%s",
303 	    device_get_nameunit(dev));
304 	AL_RX_LOCK_INIT(adapter);
305 
306 	g_adapters[g_adapters_count] = adapter;
307 
308 	lm_context = &adapter->lm_context;
309 
310 	bar_udma = PCIR_BAR(AL_ETH_UDMA_BAR);
311 	adapter->udma_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
312 	    &bar_udma, RF_ACTIVE);
313 	if (adapter->udma_res == NULL) {
314 		device_printf(adapter->dev,
315 		    "could not allocate memory resources for DMA.\n");
316 		err = ENOMEM;
317 		goto err_res_dma;
318 	}
319 	adapter->udma_base = al_bus_dma_to_va(rman_get_bustag(adapter->udma_res),
320 	    rman_get_bushandle(adapter->udma_res));
321 	bar_mac = PCIR_BAR(AL_ETH_MAC_BAR);
322 	adapter->mac_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
323 	    &bar_mac, RF_ACTIVE);
324 	if (adapter->mac_res == NULL) {
325 		device_printf(adapter->dev,
326 		    "could not allocate memory resources for MAC.\n");
327 		err = ENOMEM;
328 		goto err_res_mac;
329 	}
330 	adapter->mac_base = al_bus_dma_to_va(rman_get_bustag(adapter->mac_res),
331 	    rman_get_bushandle(adapter->mac_res));
332 
333 	bar_ec = PCIR_BAR(AL_ETH_EC_BAR);
334 	adapter->ec_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &bar_ec,
335 	    RF_ACTIVE);
336 	if (adapter->ec_res == NULL) {
337 		device_printf(adapter->dev,
338 		    "could not allocate memory resources for EC.\n");
339 		err = ENOMEM;
340 		goto err_res_ec;
341 	}
342 	adapter->ec_base = al_bus_dma_to_va(rman_get_bustag(adapter->ec_res),
343 	    rman_get_bushandle(adapter->ec_res));
344 
345 	adapter->netdev = ifp = if_alloc(IFT_ETHER);
346 
347 	adapter->netdev->if_link_state = LINK_STATE_DOWN;
348 
349 	ifp->if_softc = adapter;
350 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
351 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
352 	ifp->if_flags = ifp->if_drv_flags;
353 	ifp->if_flags |= IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_ALLMULTI;
354 	ifp->if_transmit = al_mq_start;
355 	ifp->if_qflush = al_qflush;
356 	ifp->if_ioctl = al_ioctl;
357 	ifp->if_init = al_init;
358 	ifp->if_get_counter = al_get_counter;
359 	ifp->if_mtu = AL_DEFAULT_MTU;
360 
361 	adapter->if_flags = ifp->if_flags;
362 
363 	ifp->if_capabilities = ifp->if_capenable = 0;
364 
365 	ifp->if_capabilities |= IFCAP_HWCSUM |
366 	    IFCAP_HWCSUM_IPV6 | IFCAP_TSO |
367 	    IFCAP_LRO | IFCAP_JUMBO_MTU;
368 
369 	ifp->if_capenable = ifp->if_capabilities;
370 
371 	adapter->id_number = g_adapters_count;
372 
373 	if (adapter->board_type == ALPINE_INTEGRATED) {
374 		dev_id = pci_get_device(adapter->dev);
375 		rev_id = pci_get_revid(adapter->dev);
376 	} else {
377 		al_eth_fpga_read_pci_config(adapter->internal_pcie_base,
378 		    PCIR_DEVICE, &dev_id);
379 		al_eth_fpga_read_pci_config(adapter->internal_pcie_base,
380 		    PCIR_REVID, &rev_id);
381 	}
382 
383 	adapter->dev_id = dev_id;
384 	adapter->rev_id = rev_id;
385 
386 	/* set default ring sizes */
387 	adapter->tx_ring_count = AL_ETH_DEFAULT_TX_SW_DESCS;
388 	adapter->tx_descs_count = AL_ETH_DEFAULT_TX_HW_DESCS;
389 	adapter->rx_ring_count = AL_ETH_DEFAULT_RX_DESCS;
390 	adapter->rx_descs_count = AL_ETH_DEFAULT_RX_DESCS;
391 
392 	adapter->num_tx_queues = AL_ETH_NUM_QUEUES;
393 	adapter->num_rx_queues = AL_ETH_NUM_QUEUES;
394 
395 	adapter->small_copy_len	= AL_ETH_DEFAULT_SMALL_PACKET_LEN;
396 	adapter->link_poll_interval = AL_ETH_DEFAULT_LINK_POLL_INTERVAL;
397 	adapter->max_rx_buff_alloc_size = AL_ETH_DEFAULT_MAX_RX_BUFF_ALLOC_SIZE;
398 
399 	al_eth_req_rx_buff_size(adapter, adapter->netdev->if_mtu);
400 
401 	adapter->link_config.force_1000_base_x = AL_ETH_DEFAULT_FORCE_1000_BASEX;
402 
403 	err = al_eth_board_params_init(adapter);
404 	if (err != 0)
405 		goto err;
406 
407 	if (adapter->mac_mode == AL_ETH_MAC_MODE_10GbE_Serial) {
408 		ifmedia_init(&adapter->media, IFM_IMASK,
409 		    al_media_update, al_media_status);
410 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
411 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
412 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
413 		ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
414 	}
415 
416 	al_eth_function_reset(adapter);
417 
418 	err = al_eth_hw_init_adapter(adapter);
419 	if (err != 0)
420 		goto err;
421 
422 	al_eth_init_rings(adapter);
423 	g_adapters_count++;
424 
425 	al_eth_lm_config(adapter);
426 	mtx_init(&adapter->stats_mtx, "AlStatsMtx", NULL, MTX_DEF);
427 	mtx_init(&adapter->wd_mtx, "AlWdMtx", NULL, MTX_DEF);
428 	callout_init_mtx(&adapter->stats_callout, &adapter->stats_mtx, 0);
429 	callout_init_mtx(&adapter->wd_callout, &adapter->wd_mtx, 0);
430 
431 	ether_ifattach(ifp, adapter->mac_addr);
432 	ifp->if_mtu = AL_DEFAULT_MTU;
433 
434 	if (adapter->mac_mode == AL_ETH_MAC_MODE_RGMII) {
435 		al_eth_hw_init(adapter);
436 
437 		/* Attach PHY(s) */
438 		err = mii_attach(adapter->dev, &adapter->miibus, adapter->netdev,
439 		    al_media_update, al_media_status, BMSR_DEFCAPMASK, 0,
440 		    MII_OFFSET_ANY, 0);
441 		if (err != 0) {
442 			device_printf(adapter->dev, "attaching PHYs failed\n");
443 			return (err);
444 		}
445 
446 		adapter->mii = device_get_softc(adapter->miibus);
447 	}
448 
449 	return (err);
450 
451 err:
452 	bus_release_resource(dev, SYS_RES_MEMORY, bar_ec, adapter->ec_res);
453 err_res_ec:
454 	bus_release_resource(dev, SYS_RES_MEMORY, bar_mac, adapter->mac_res);
455 err_res_mac:
456 	bus_release_resource(dev, SYS_RES_MEMORY, bar_udma, adapter->udma_res);
457 err_res_dma:
458 	return (err);
459 }
460 
461 static int
462 al_detach(device_t dev)
463 {
464 	struct al_eth_adapter *adapter;
465 
466 	adapter = device_get_softc(dev);
467 	ether_ifdetach(adapter->netdev);
468 
469 	mtx_destroy(&adapter->stats_mtx);
470 	mtx_destroy(&adapter->wd_mtx);
471 
472 	al_eth_down(adapter);
473 
474 	bus_release_resource(dev, SYS_RES_IRQ,    0, adapter->irq_res);
475 	bus_release_resource(dev, SYS_RES_MEMORY, 0, adapter->ec_res);
476 	bus_release_resource(dev, SYS_RES_MEMORY, 0, adapter->mac_res);
477 	bus_release_resource(dev, SYS_RES_MEMORY, 0, adapter->udma_res);
478 
479 	return (0);
480 }
481 
482 int
483 al_eth_fpga_read_pci_config(void *handle, int where, uint32_t *val)
484 {
485 
486 	/* handle is the base address of the adapter */
487 	*val = al_reg_read32((void*)((u_long)handle + where));
488 
489 	return (0);
490 }
491 
492 int
493 al_eth_fpga_write_pci_config(void *handle, int where, uint32_t val)
494 {
495 
496 	/* handle is the base address of the adapter */
497 	al_reg_write32((void*)((u_long)handle + where), val);
498 	return (0);
499 }
500 
501 int
502 al_eth_read_pci_config(void *handle, int where, uint32_t *val)
503 {
504 
505 	/* handle is a pci_dev */
506 	*val = pci_read_config((device_t)handle, where, sizeof(*val));
507 	return (0);
508 }
509 
510 int
511 al_eth_write_pci_config(void *handle, int where, uint32_t val)
512 {
513 
514 	/* handle is a pci_dev */
515 	pci_write_config((device_t)handle, where, val, sizeof(val));
516 	return (0);
517 }
518 
519 void
520 al_eth_irq_config(uint32_t *offset, uint32_t value)
521 {
522 
523 	al_reg_write32_relaxed(offset, value);
524 }
525 
526 void
527 al_eth_forward_int_config(uint32_t *offset, uint32_t value)
528 {
529 
530 	al_reg_write32(offset, value);
531 }
532 
533 static void
534 al_eth_serdes_init(struct al_eth_adapter *adapter)
535 {
536 	void __iomem	*serdes_base;
537 
538 	adapter->serdes_init = false;
539 
540 	serdes_base = alpine_serdes_resource_get(adapter->serdes_grp);
541 	if (serdes_base == NULL) {
542 		device_printf(adapter->dev, "serdes_base get failed!\n");
543 		return;
544 	}
545 
546 	serdes_base = al_bus_dma_to_va(serdes_tag, serdes_base);
547 
548 	al_serdes_handle_grp_init(serdes_base, adapter->serdes_grp,
549 	    &adapter->serdes_obj);
550 
551 	adapter->serdes_init = true;
552 }
553 
554 static void
555 al_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
556 {
557 	bus_addr_t *paddr;
558 
559 	paddr = arg;
560 	*paddr = segs->ds_addr;
561 }
562 
563 static int
564 al_dma_alloc_coherent(struct device *dev, bus_dma_tag_t *tag, bus_dmamap_t *map,
565     bus_addr_t *baddr, void **vaddr, uint32_t size)
566 {
567 	int ret;
568 	uint32_t maxsize = ((size - 1)/PAGE_SIZE + 1) * PAGE_SIZE;
569 
570 	ret = bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0,
571 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
572 	    maxsize, 1, maxsize, BUS_DMA_COHERENT, NULL, NULL, tag);
573 	if (ret != 0) {
574 		device_printf(dev,
575 		    "failed to create bus tag, ret = %d\n", ret);
576 		return (ret);
577 	}
578 
579 	ret = bus_dmamem_alloc(*tag, vaddr,
580 	    BUS_DMA_COHERENT | BUS_DMA_ZERO, map);
581 	if (ret != 0) {
582 		device_printf(dev,
583 		    "failed to allocate dmamem, ret = %d\n", ret);
584 		return (ret);
585 	}
586 
587 	ret = bus_dmamap_load(*tag, *map, *vaddr,
588 	    size, al_dma_map_addr, baddr, 0);
589 	if (ret != 0) {
590 		device_printf(dev,
591 		    "failed to allocate bus_dmamap_load, ret = %d\n", ret);
592 		return (ret);
593 	}
594 
595 	return (0);
596 }
597 
598 static void
599 al_dma_free_coherent(bus_dma_tag_t tag, bus_dmamap_t map, void *vaddr)
600 {
601 
602 	bus_dmamap_unload(tag, map);
603 	bus_dmamem_free(tag, vaddr, map);
604 	bus_dma_tag_destroy(tag);
605 }
606 
607 static void
608 al_eth_mac_table_unicast_add(struct al_eth_adapter *adapter,
609     uint8_t idx, uint8_t *addr, uint8_t udma_mask)
610 {
611 	struct al_eth_fwd_mac_table_entry entry = { { 0 } };
612 
613 	memcpy(entry.addr, adapter->mac_addr, sizeof(adapter->mac_addr));
614 
615 	memset(entry.mask, 0xff, sizeof(entry.mask));
616 	entry.rx_valid = true;
617 	entry.tx_valid = false;
618 	entry.udma_mask = udma_mask;
619 	entry.filter = false;
620 
621 	device_printf_dbg(adapter->dev,
622 	    "%s: [%d]: addr "MAC_ADDR_STR" mask "MAC_ADDR_STR"\n",
623 	    __func__, idx, MAC_ADDR(entry.addr), MAC_ADDR(entry.mask));
624 
625 	al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry);
626 }
627 
628 static void
629 al_eth_mac_table_all_multicast_add(struct al_eth_adapter *adapter, uint8_t idx,
630     uint8_t udma_mask)
631 {
632 	struct al_eth_fwd_mac_table_entry entry = { { 0 } };
633 
634 	memset(entry.addr, 0x00, sizeof(entry.addr));
635 	memset(entry.mask, 0x00, sizeof(entry.mask));
636 	entry.mask[0] |= 1;
637 	entry.addr[0] |= 1;
638 
639 	entry.rx_valid = true;
640 	entry.tx_valid = false;
641 	entry.udma_mask = udma_mask;
642 	entry.filter = false;
643 
644 	device_printf_dbg(adapter->dev,
645 	    "%s: [%d]: addr "MAC_ADDR_STR" mask "MAC_ADDR_STR"\n",
646 	    __func__, idx, MAC_ADDR(entry.addr), MAC_ADDR(entry.mask));
647 
648 	al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry);
649 }
650 
651 static void
652 al_eth_mac_table_broadcast_add(struct al_eth_adapter *adapter,
653     uint8_t idx, uint8_t udma_mask)
654 {
655 	struct al_eth_fwd_mac_table_entry entry = { { 0 } };
656 
657 	memset(entry.addr, 0xff, sizeof(entry.addr));
658 	memset(entry.mask, 0xff, sizeof(entry.mask));
659 
660 	entry.rx_valid = true;
661 	entry.tx_valid = false;
662 	entry.udma_mask = udma_mask;
663 	entry.filter = false;
664 
665 	device_printf_dbg(adapter->dev,
666 	    "%s: [%d]: addr "MAC_ADDR_STR" mask "MAC_ADDR_STR"\n",
667 	    __func__, idx, MAC_ADDR(entry.addr), MAC_ADDR(entry.mask));
668 
669 	al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry);
670 }
671 
672 static void
673 al_eth_mac_table_promiscuous_set(struct al_eth_adapter *adapter,
674     boolean_t promiscuous)
675 {
676 	struct al_eth_fwd_mac_table_entry entry = { { 0 } };
677 
678 	memset(entry.addr, 0x00, sizeof(entry.addr));
679 	memset(entry.mask, 0x00, sizeof(entry.mask));
680 
681 	entry.rx_valid = true;
682 	entry.tx_valid = false;
683 	entry.udma_mask = (promiscuous) ? 1 : 0;
684 	entry.filter = (promiscuous) ? false : true;
685 
686 	device_printf_dbg(adapter->dev, "%s: %s promiscuous mode\n",
687 	    __func__, (promiscuous) ? "enter" : "exit");
688 
689 	al_eth_fwd_mac_table_set(&adapter->hal_adapter,
690 	    AL_ETH_MAC_TABLE_DROP_IDX, &entry);
691 }
692 
693 static void
694 al_eth_set_thash_table_entry(struct al_eth_adapter *adapter, uint8_t idx,
695     uint8_t udma, uint32_t queue)
696 {
697 
698 	if (udma != 0)
699 		panic("only UDMA0 is supporter");
700 
701 	if (queue >= AL_ETH_NUM_QUEUES)
702 		panic("invalid queue number");
703 
704 	al_eth_thash_table_set(&adapter->hal_adapter, idx, udma, queue);
705 }
706 
707 /* init FSM, no tunneling supported yet, if packet is tcp/udp over ipv4/ipv6, use 4 tuple hash */
708 static void
709 al_eth_fsm_table_init(struct al_eth_adapter *adapter)
710 {
711 	uint32_t val;
712 	int i;
713 
714 	for (i = 0; i < AL_ETH_RX_FSM_TABLE_SIZE; i++) {
715 		uint8_t outer_type = AL_ETH_FSM_ENTRY_OUTER(i);
716 		switch (outer_type) {
717 		case AL_ETH_FSM_ENTRY_IPV4_TCP:
718 		case AL_ETH_FSM_ENTRY_IPV4_UDP:
719 		case AL_ETH_FSM_ENTRY_IPV6_TCP:
720 		case AL_ETH_FSM_ENTRY_IPV6_UDP:
721 			val = AL_ETH_FSM_DATA_OUTER_4_TUPLE |
722 			    AL_ETH_FSM_DATA_HASH_SEL;
723 			break;
724 		case AL_ETH_FSM_ENTRY_IPV6_NO_UDP_TCP:
725 		case AL_ETH_FSM_ENTRY_IPV4_NO_UDP_TCP:
726 			val = AL_ETH_FSM_DATA_OUTER_2_TUPLE |
727 			    AL_ETH_FSM_DATA_HASH_SEL;
728 			break;
729 		default:
730 			val = AL_ETH_FSM_DATA_DEFAULT_Q |
731 			    AL_ETH_FSM_DATA_DEFAULT_UDMA;
732 		}
733 		al_eth_fsm_table_set(&adapter->hal_adapter, i, val);
734 	}
735 }
736 
737 static void
738 al_eth_mac_table_entry_clear(struct al_eth_adapter *adapter,
739     uint8_t idx)
740 {
741 	struct al_eth_fwd_mac_table_entry entry = { { 0 } };
742 
743 	device_printf_dbg(adapter->dev, "%s: clear entry %d\n", __func__, idx);
744 
745 	al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry);
746 }
747 
748 static int
749 al_eth_hw_init_adapter(struct al_eth_adapter *adapter)
750 {
751 	struct al_eth_adapter_params *params = &adapter->eth_hal_params;
752 	int rc;
753 
754 	/* params->dev_id = adapter->dev_id; */
755 	params->rev_id = adapter->rev_id;
756 	params->udma_id = 0;
757 	params->enable_rx_parser = 1; /* enable rx epe parser*/
758 	params->udma_regs_base = adapter->udma_base; /* UDMA register base address */
759 	params->ec_regs_base = adapter->ec_base; /* Ethernet controller registers base address */
760 	params->mac_regs_base = adapter->mac_base; /* Ethernet MAC registers base address */
761 	params->name = adapter->name;
762 	params->serdes_lane = adapter->serdes_lane;
763 
764 	rc = al_eth_adapter_init(&adapter->hal_adapter, params);
765 	if (rc != 0)
766 		device_printf(adapter->dev, "%s failed at hal init!\n",
767 		    __func__);
768 
769 	if ((adapter->board_type == ALPINE_NIC) ||
770 	    (adapter->board_type == ALPINE_FPGA_NIC)) {
771 		/* in pcie NIC mode, force eth UDMA to access PCIE0 using the vmid */
772 		struct al_udma_gen_tgtid_conf conf;
773 		int i;
774 		for (i = 0; i < DMA_MAX_Q; i++) {
775 			conf.tx_q_conf[i].queue_en = AL_TRUE;
776 			conf.tx_q_conf[i].desc_en = AL_FALSE;
777 			conf.tx_q_conf[i].tgtid = 0x100; /* for access from PCIE0 */
778 			conf.rx_q_conf[i].queue_en = AL_TRUE;
779 			conf.rx_q_conf[i].desc_en = AL_FALSE;
780 			conf.rx_q_conf[i].tgtid = 0x100; /* for access from PCIE0 */
781 		}
782 		al_udma_gen_tgtid_conf_set(adapter->udma_base, &conf);
783 	}
784 
785 	return (rc);
786 }
787 
788 static void
789 al_eth_lm_config(struct al_eth_adapter *adapter)
790 {
791 	struct al_eth_lm_init_params params = {0};
792 
793 	params.adapter = &adapter->hal_adapter;
794 	params.serdes_obj = &adapter->serdes_obj;
795 	params.lane = adapter->serdes_lane;
796 	params.sfp_detection = adapter->sfp_detection_needed;
797 	if (adapter->sfp_detection_needed == true) {
798 		params.sfp_bus_id = adapter->i2c_adapter_id;
799 		params.sfp_i2c_addr = SFP_I2C_ADDR;
800 	}
801 
802 	if (adapter->sfp_detection_needed == false) {
803 		switch (adapter->mac_mode) {
804 		case AL_ETH_MAC_MODE_10GbE_Serial:
805 			if ((adapter->lt_en != 0) && (adapter->an_en != 0))
806 				params.default_mode = AL_ETH_LM_MODE_10G_DA;
807 			else
808 				params.default_mode = AL_ETH_LM_MODE_10G_OPTIC;
809 			break;
810 		case AL_ETH_MAC_MODE_SGMII:
811 			params.default_mode = AL_ETH_LM_MODE_1G;
812 			break;
813 		default:
814 			params.default_mode = AL_ETH_LM_MODE_10G_DA;
815 		}
816 	} else
817 		params.default_mode = AL_ETH_LM_MODE_10G_DA;
818 
819 	params.link_training = adapter->lt_en;
820 	params.rx_equal = true;
821 	params.static_values = !adapter->dont_override_serdes;
822 	params.i2c_context = adapter;
823 	params.kr_fec_enable = false;
824 
825 	params.retimer_exist = adapter->retimer.exist;
826 	params.retimer_bus_id = adapter->retimer.bus_id;
827 	params.retimer_i2c_addr = adapter->retimer.i2c_addr;
828 	params.retimer_channel = adapter->retimer.channel;
829 
830 	al_eth_lm_init(&adapter->lm_context, &params);
831 }
832 
833 static int
834 al_eth_board_params_init(struct al_eth_adapter *adapter)
835 {
836 
837 	if (adapter->board_type == ALPINE_NIC) {
838 		adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial;
839 		adapter->sfp_detection_needed = false;
840 		adapter->phy_exist = false;
841 		adapter->an_en = false;
842 		adapter->lt_en = false;
843 		adapter->ref_clk_freq = AL_ETH_REF_FREQ_375_MHZ;
844 		adapter->mdio_freq = AL_ETH_DEFAULT_MDIO_FREQ_KHZ;
845 	} else if (adapter->board_type == ALPINE_FPGA_NIC) {
846 		adapter->mac_mode = AL_ETH_MAC_MODE_SGMII;
847 		adapter->sfp_detection_needed = false;
848 		adapter->phy_exist = false;
849 		adapter->an_en = false;
850 		adapter->lt_en = false;
851 		adapter->ref_clk_freq = AL_ETH_REF_FREQ_375_MHZ;
852 		adapter->mdio_freq = AL_ETH_DEFAULT_MDIO_FREQ_KHZ;
853 	} else {
854 		struct al_eth_board_params params;
855 		int rc;
856 
857 		adapter->auto_speed = false;
858 
859 		rc = al_eth_board_params_get(adapter->mac_base, &params);
860 		if (rc != 0) {
861 			device_printf(adapter->dev,
862 			    "board info not available\n");
863 			return (-1);
864 		}
865 
866 		adapter->phy_exist = params.phy_exist == TRUE;
867 		adapter->phy_addr = params.phy_mdio_addr;
868 		adapter->an_en = params.autoneg_enable;
869 		adapter->lt_en = params.kr_lt_enable;
870 		adapter->serdes_grp = params.serdes_grp;
871 		adapter->serdes_lane = params.serdes_lane;
872 		adapter->sfp_detection_needed = params.sfp_plus_module_exist;
873 		adapter->i2c_adapter_id = params.i2c_adapter_id;
874 		adapter->ref_clk_freq = params.ref_clk_freq;
875 		adapter->dont_override_serdes = params.dont_override_serdes;
876 		adapter->link_config.active_duplex = !params.half_duplex;
877 		adapter->link_config.autoneg = !params.an_disable;
878 		adapter->link_config.force_1000_base_x = params.force_1000_base_x;
879 		adapter->retimer.exist = params.retimer_exist;
880 		adapter->retimer.bus_id = params.retimer_bus_id;
881 		adapter->retimer.i2c_addr = params.retimer_i2c_addr;
882 		adapter->retimer.channel = params.retimer_channel;
883 
884 		switch (params.speed) {
885 		default:
886 			device_printf(adapter->dev,
887 			    "%s: invalid speed (%d)\n", __func__, params.speed);
888 		case AL_ETH_BOARD_1G_SPEED_1000M:
889 			adapter->link_config.active_speed = 1000;
890 			break;
891 		case AL_ETH_BOARD_1G_SPEED_100M:
892 			adapter->link_config.active_speed = 100;
893 			break;
894 		case AL_ETH_BOARD_1G_SPEED_10M:
895 			adapter->link_config.active_speed = 10;
896 			break;
897 		}
898 
899 		switch (params.mdio_freq) {
900 		default:
901 			device_printf(adapter->dev,
902 			    "%s: invalid mdio freq (%d)\n", __func__,
903 			    params.mdio_freq);
904 		case AL_ETH_BOARD_MDIO_FREQ_2_5_MHZ:
905 			adapter->mdio_freq = AL_ETH_DEFAULT_MDIO_FREQ_KHZ;
906 			break;
907 		case AL_ETH_BOARD_MDIO_FREQ_1_MHZ:
908 			adapter->mdio_freq = AL_ETH_MDIO_FREQ_1000_KHZ;
909 			break;
910 		}
911 
912 		switch (params.media_type) {
913 		case AL_ETH_BOARD_MEDIA_TYPE_RGMII:
914 			if (params.sfp_plus_module_exist == TRUE)
915 				/* Backward compatibility */
916 				adapter->mac_mode = AL_ETH_MAC_MODE_SGMII;
917 			else
918 				adapter->mac_mode = AL_ETH_MAC_MODE_RGMII;
919 
920 			adapter->use_lm = false;
921 			break;
922 		case AL_ETH_BOARD_MEDIA_TYPE_SGMII:
923 			adapter->mac_mode = AL_ETH_MAC_MODE_SGMII;
924 			adapter->use_lm = true;
925 			break;
926 		case AL_ETH_BOARD_MEDIA_TYPE_10GBASE_SR:
927 			adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial;
928 			adapter->use_lm = true;
929 			break;
930 		case AL_ETH_BOARD_MEDIA_TYPE_AUTO_DETECT:
931 			adapter->sfp_detection_needed = TRUE;
932 			adapter->auto_speed = false;
933 			adapter->use_lm = true;
934 			break;
935 		case AL_ETH_BOARD_MEDIA_TYPE_AUTO_DETECT_AUTO_SPEED:
936 			adapter->sfp_detection_needed = TRUE;
937 			adapter->auto_speed = true;
938 			adapter->mac_mode_set = false;
939 			adapter->use_lm = true;
940 
941 			adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial;
942 			break;
943 		default:
944 			device_printf(adapter->dev,
945 			    "%s: unsupported media type %d\n",
946 			    __func__, params.media_type);
947 			return (-1);
948 		}
949 
950 		device_printf(adapter->dev,
951 		    "Board info: phy exist %s. phy addr %d. mdio freq %u Khz. "
952 		    "SFP connected %s. media %d\n",
953 		    params.phy_exist == TRUE ? "Yes" : "No",
954 		    params.phy_mdio_addr, adapter->mdio_freq,
955 		    params.sfp_plus_module_exist == TRUE ? "Yes" : "No",
956 		    params.media_type);
957 	}
958 
959 	al_eth_mac_addr_read(adapter->ec_base, 0, adapter->mac_addr);
960 
961 	return (0);
962 }
963 
964 static int
965 al_eth_function_reset(struct al_eth_adapter *adapter)
966 {
967 	struct al_eth_board_params params;
968 	int rc;
969 
970 	/* save board params so we restore it after reset */
971 	al_eth_board_params_get(adapter->mac_base, &params);
972 	al_eth_mac_addr_read(adapter->ec_base, 0, adapter->mac_addr);
973 	if (adapter->board_type == ALPINE_INTEGRATED)
974 		rc = al_eth_flr_rmn(&al_eth_read_pci_config,
975 		    &al_eth_write_pci_config,
976 		    adapter->dev, adapter->mac_base);
977 	else
978 		rc = al_eth_flr_rmn(&al_eth_fpga_read_pci_config,
979 		    &al_eth_fpga_write_pci_config,
980 		    adapter->internal_pcie_base, adapter->mac_base);
981 
982 	/* restore params */
983 	al_eth_board_params_set(adapter->mac_base, &params);
984 	al_eth_mac_addr_store(adapter->ec_base, 0, adapter->mac_addr);
985 
986 	return (rc);
987 }
988 
989 static void
990 al_eth_init_rings(struct al_eth_adapter *adapter)
991 {
992 	int i;
993 
994 	for (i = 0; i < adapter->num_tx_queues; i++) {
995 		struct al_eth_ring *ring = &adapter->tx_ring[i];
996 
997 		ring->ring_id = i;
998 		ring->dev = adapter->dev;
999 		ring->adapter = adapter;
1000 		ring->netdev = adapter->netdev;
1001 		al_udma_q_handle_get(&adapter->hal_adapter.tx_udma, i,
1002 		    &ring->dma_q);
1003 		ring->sw_count = adapter->tx_ring_count;
1004 		ring->hw_count = adapter->tx_descs_count;
1005 		ring->unmask_reg_offset = al_udma_iofic_unmask_offset_get((struct unit_regs *)adapter->udma_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_C);
1006 		ring->unmask_val = ~(1 << i);
1007 	}
1008 
1009 	for (i = 0; i < adapter->num_rx_queues; i++) {
1010 		struct al_eth_ring *ring = &adapter->rx_ring[i];
1011 
1012 		ring->ring_id = i;
1013 		ring->dev = adapter->dev;
1014 		ring->adapter = adapter;
1015 		ring->netdev = adapter->netdev;
1016 		al_udma_q_handle_get(&adapter->hal_adapter.rx_udma, i, &ring->dma_q);
1017 		ring->sw_count = adapter->rx_ring_count;
1018 		ring->hw_count = adapter->rx_descs_count;
1019 		ring->unmask_reg_offset = al_udma_iofic_unmask_offset_get(
1020 		    (struct unit_regs *)adapter->udma_base,
1021 		    AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_B);
1022 		ring->unmask_val = ~(1 << i);
1023 	}
1024 }
1025 
1026 static void
1027 al_init_locked(void *arg)
1028 {
1029 	struct al_eth_adapter *adapter = arg;
1030 	if_t ifp = adapter->netdev;
1031 	int rc = 0;
1032 
1033 	al_eth_down(adapter);
1034 	rc = al_eth_up(adapter);
1035 
1036 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1037 	if (rc == 0)
1038 		ifp->if_drv_flags |= IFF_DRV_RUNNING;
1039 }
1040 
1041 static void
1042 al_init(void *arg)
1043 {
1044 	struct al_eth_adapter *adapter = arg;
1045 
1046 	al_init_locked(adapter);
1047 }
1048 
1049 static inline int
1050 al_eth_alloc_rx_buf(struct al_eth_adapter *adapter,
1051     struct al_eth_ring *rx_ring,
1052     struct al_eth_rx_buffer *rx_info)
1053 {
1054 	struct al_buf *al_buf;
1055 	bus_dma_segment_t segs[2];
1056 	int error;
1057 	int nsegs;
1058 
1059 	if (rx_info->m != NULL)
1060 		return (0);
1061 
1062 	rx_info->data_size = adapter->rx_mbuf_sz;
1063 
1064 	AL_RX_LOCK(adapter);
1065 
1066 	/* Get mbuf using UMA allocator */
1067 	rx_info->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1068 	    rx_info->data_size);
1069 	AL_RX_UNLOCK(adapter);
1070 
1071 	if (rx_info->m == NULL)
1072 		return (ENOMEM);
1073 
1074 	rx_info->m->m_pkthdr.len = rx_info->m->m_len = adapter->rx_mbuf_sz;
1075 
1076 	/* Map packets for DMA */
1077 	error = bus_dmamap_load_mbuf_sg(rx_ring->dma_buf_tag, rx_info->dma_map,
1078 	    rx_info->m, segs, &nsegs, BUS_DMA_NOWAIT);
1079 	if (__predict_false(error)) {
1080 		device_printf(rx_ring->dev, "failed to map mbuf, error = %d\n",
1081 		    error);
1082 		m_freem(rx_info->m);
1083 		rx_info->m = NULL;
1084 		return (EFAULT);
1085 	}
1086 
1087 	al_buf = &rx_info->al_buf;
1088 	al_buf->addr = segs[0].ds_addr + AL_IP_ALIGNMENT_OFFSET;
1089 	al_buf->len = rx_info->data_size - AL_IP_ALIGNMENT_OFFSET;
1090 
1091 	return (0);
1092 }
1093 
1094 static int
1095 al_eth_refill_rx_bufs(struct al_eth_adapter *adapter, unsigned int qid,
1096     unsigned int num)
1097 {
1098 	struct al_eth_ring *rx_ring = &adapter->rx_ring[qid];
1099 	uint16_t next_to_use;
1100 	unsigned int i;
1101 
1102 	next_to_use = rx_ring->next_to_use;
1103 
1104 	for (i = 0; i < num; i++) {
1105 		int rc;
1106 		struct al_eth_rx_buffer *rx_info =
1107 		    &rx_ring->rx_buffer_info[next_to_use];
1108 
1109 		if (__predict_false(al_eth_alloc_rx_buf(adapter,
1110 		    rx_ring, rx_info) < 0)) {
1111 			device_printf(adapter->dev,
1112 			    "failed to alloc buffer for rx queue %d\n", qid);
1113 			break;
1114 		}
1115 
1116 		rc = al_eth_rx_buffer_add(rx_ring->dma_q,
1117 		    &rx_info->al_buf, AL_ETH_RX_FLAGS_INT, NULL);
1118 		if (__predict_false(rc)) {
1119 			device_printf(adapter->dev,
1120 			    "failed to add buffer for rx queue %d\n", qid);
1121 			break;
1122 		}
1123 
1124 		next_to_use = AL_ETH_RX_RING_IDX_NEXT(rx_ring, next_to_use);
1125 	}
1126 
1127 	if (__predict_false(i < num))
1128 		device_printf(adapter->dev,
1129 		    "refilled rx queue %d with %d pages only - available %d\n",
1130 		    qid, i, al_udma_available_get(rx_ring->dma_q));
1131 
1132 	if (__predict_true(i))
1133 		al_eth_rx_buffer_action(rx_ring->dma_q, i);
1134 
1135 	rx_ring->next_to_use = next_to_use;
1136 
1137 	return (i);
1138 }
1139 
1140 /*
1141  * al_eth_refill_all_rx_bufs - allocate all queues Rx buffers
1142  * @adapter: board private structure
1143  */
1144 static void
1145 al_eth_refill_all_rx_bufs(struct al_eth_adapter *adapter)
1146 {
1147 	int i;
1148 
1149 	for (i = 0; i < adapter->num_rx_queues; i++)
1150 		al_eth_refill_rx_bufs(adapter, i, AL_ETH_DEFAULT_RX_DESCS - 1);
1151 }
1152 
1153 static void
1154 al_eth_tx_do_cleanup(struct al_eth_ring *tx_ring)
1155 {
1156 	unsigned int total_done;
1157 	uint16_t next_to_clean;
1158 	int qid = tx_ring->ring_id;
1159 
1160 	total_done = al_eth_comp_tx_get(tx_ring->dma_q);
1161 	device_printf_dbg(tx_ring->dev,
1162 	    "tx_poll: q %d total completed descs %x\n", qid, total_done);
1163 	next_to_clean = tx_ring->next_to_clean;
1164 
1165 	while (total_done != 0) {
1166 		struct al_eth_tx_buffer *tx_info;
1167 		struct mbuf *mbuf;
1168 
1169 		tx_info = &tx_ring->tx_buffer_info[next_to_clean];
1170 		/* stop if not all descriptors of the packet are completed */
1171 		if (tx_info->tx_descs > total_done)
1172 			break;
1173 
1174 		mbuf = tx_info->m;
1175 
1176 		tx_info->m = NULL;
1177 
1178 		device_printf_dbg(tx_ring->dev,
1179 		    "tx_poll: q %d mbuf %p completed\n", qid, mbuf);
1180 
1181 		/* map is no longer required */
1182 		bus_dmamap_unload(tx_ring->dma_buf_tag, tx_info->dma_map);
1183 
1184 		m_freem(mbuf);
1185 		total_done -= tx_info->tx_descs;
1186 		next_to_clean = AL_ETH_TX_RING_IDX_NEXT(tx_ring, next_to_clean);
1187 	}
1188 
1189 	tx_ring->next_to_clean = next_to_clean;
1190 
1191 	device_printf_dbg(tx_ring->dev, "tx_poll: q %d done next to clean %x\n",
1192 	    qid, next_to_clean);
1193 
1194 	/*
1195 	 * need to make the rings circular update visible to
1196 	 * al_eth_start_xmit() before checking for netif_queue_stopped().
1197 	 */
1198 	al_smp_data_memory_barrier();
1199 }
1200 
1201 static void
1202 al_eth_tx_csum(struct al_eth_ring *tx_ring, struct al_eth_tx_buffer *tx_info,
1203     struct al_eth_pkt *hal_pkt, struct mbuf *m)
1204 {
1205 	uint32_t mss = m->m_pkthdr.tso_segsz;
1206 	struct ether_vlan_header *eh;
1207 	uint16_t etype;
1208 	struct ip *ip;
1209 	struct ip6_hdr *ip6;
1210 	struct tcphdr *th = NULL;
1211 	int	ehdrlen, ip_hlen = 0;
1212 	uint8_t	ipproto = 0;
1213 	uint32_t offload = 0;
1214 
1215 	if (mss != 0)
1216 		offload = 1;
1217 
1218 	if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0)
1219 		offload = 1;
1220 
1221 	if ((m->m_pkthdr.csum_flags & CSUM_OFFLOAD) != 0)
1222 		offload = 1;
1223 
1224 	if (offload != 0) {
1225 		struct al_eth_meta_data *meta = &tx_ring->hal_meta;
1226 
1227 		if (mss != 0)
1228 			hal_pkt->flags |= (AL_ETH_TX_FLAGS_TSO |
1229 			    AL_ETH_TX_FLAGS_L4_CSUM);
1230 		else
1231 			hal_pkt->flags |= (AL_ETH_TX_FLAGS_L4_CSUM |
1232 			    AL_ETH_TX_FLAGS_L4_PARTIAL_CSUM);
1233 
1234 		/*
1235 		 * Determine where frame payload starts.
1236 		 * Jump over vlan headers if already present,
1237 		 * helpful for QinQ too.
1238 		 */
1239 		eh = mtod(m, struct ether_vlan_header *);
1240 		if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1241 			etype = ntohs(eh->evl_proto);
1242 			ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1243 		} else {
1244 			etype = ntohs(eh->evl_encap_proto);
1245 			ehdrlen = ETHER_HDR_LEN;
1246 		}
1247 
1248 		switch (etype) {
1249 		case ETHERTYPE_IP:
1250 			ip = (struct ip *)(m->m_data + ehdrlen);
1251 			ip_hlen = ip->ip_hl << 2;
1252 			ipproto = ip->ip_p;
1253 			hal_pkt->l3_proto_idx = AL_ETH_PROTO_ID_IPv4;
1254 			th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
1255 			if (mss != 0)
1256 				hal_pkt->flags |= AL_ETH_TX_FLAGS_IPV4_L3_CSUM;
1257 			if (ipproto == IPPROTO_TCP)
1258 				hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_TCP;
1259 			else
1260 				hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_UDP;
1261 			break;
1262 		case ETHERTYPE_IPV6:
1263 			ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1264 			hal_pkt->l3_proto_idx = AL_ETH_PROTO_ID_IPv6;
1265 			ip_hlen = sizeof(struct ip6_hdr);
1266 			th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
1267 			ipproto = ip6->ip6_nxt;
1268 			if (ipproto == IPPROTO_TCP)
1269 				hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_TCP;
1270 			else
1271 				hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_UDP;
1272 			break;
1273 		default:
1274 			break;
1275 		}
1276 
1277 		meta->words_valid = 4;
1278 		meta->l3_header_len = ip_hlen;
1279 		meta->l3_header_offset = ehdrlen;
1280 		if (th != NULL)
1281 			meta->l4_header_len = th->th_off; /* this param needed only for TSO */
1282 		meta->mss_idx_sel = 0;			/* check how to select MSS */
1283 		meta->mss_val = mss;
1284 		hal_pkt->meta = meta;
1285 	} else
1286 		hal_pkt->meta = NULL;
1287 }
1288 
1289 #define	XMIT_QUEUE_TIMEOUT	100
1290 
1291 static void
1292 al_eth_xmit_mbuf(struct al_eth_ring *tx_ring, struct mbuf *m)
1293 {
1294 	struct al_eth_tx_buffer *tx_info;
1295 	int error;
1296 	int nsegs, a;
1297 	uint16_t next_to_use;
1298 	bus_dma_segment_t segs[AL_ETH_PKT_MAX_BUFS + 1];
1299 	struct al_eth_pkt *hal_pkt;
1300 	struct al_buf *al_buf;
1301 	boolean_t remap;
1302 
1303 	/* Check if queue is ready */
1304 	if (unlikely(tx_ring->stall) != 0) {
1305 		for (a = 0; a < XMIT_QUEUE_TIMEOUT; a++) {
1306 			if (al_udma_available_get(tx_ring->dma_q) >=
1307 			    (AL_ETH_DEFAULT_TX_HW_DESCS -
1308 			    AL_ETH_TX_WAKEUP_THRESH)) {
1309 				tx_ring->stall = 0;
1310 				break;
1311 			}
1312 			pause("stall", 1);
1313 		}
1314 		if (a == XMIT_QUEUE_TIMEOUT) {
1315 			device_printf(tx_ring->dev,
1316 			    "timeout waiting for queue %d ready!\n",
1317 			    tx_ring->ring_id);
1318 			return;
1319 		} else {
1320 			device_printf_dbg(tx_ring->dev,
1321 			    "queue %d is ready!\n", tx_ring->ring_id);
1322 		}
1323 	}
1324 
1325 	next_to_use = tx_ring->next_to_use;
1326 	tx_info = &tx_ring->tx_buffer_info[next_to_use];
1327 	tx_info->m = m;
1328 	hal_pkt = &tx_info->hal_pkt;
1329 
1330 	if (m == NULL) {
1331 		device_printf(tx_ring->dev, "mbuf is NULL\n");
1332 		return;
1333 	}
1334 
1335 	remap = TRUE;
1336 	/* Map packets for DMA */
1337 retry:
1338 	error = bus_dmamap_load_mbuf_sg(tx_ring->dma_buf_tag, tx_info->dma_map,
1339 	    m, segs, &nsegs, BUS_DMA_NOWAIT);
1340 	if (__predict_false(error)) {
1341 		struct mbuf *m_new;
1342 
1343 		if (error == EFBIG) {
1344 			/* Try it again? - one try */
1345 			if (remap == TRUE) {
1346 				remap = FALSE;
1347 				m_new = m_defrag(m, M_NOWAIT);
1348 				if (m_new == NULL) {
1349 					device_printf(tx_ring->dev,
1350 					    "failed to defrag mbuf\n");
1351 					goto exit;
1352 				}
1353 				m = m_new;
1354 				goto retry;
1355 			} else {
1356 				device_printf(tx_ring->dev,
1357 				    "failed to map mbuf, error %d\n", error);
1358 				goto exit;
1359 			}
1360 		} else {
1361 			device_printf(tx_ring->dev,
1362 			    "failed to map mbuf, error %d\n", error);
1363 			goto exit;
1364 		}
1365 	}
1366 
1367 	/* set flags and meta data */
1368 	hal_pkt->flags = AL_ETH_TX_FLAGS_INT;
1369 	al_eth_tx_csum(tx_ring, tx_info, hal_pkt, m);
1370 
1371 	al_buf = hal_pkt->bufs;
1372 	for (a = 0; a < nsegs; a++) {
1373 		al_buf->addr = segs[a].ds_addr;
1374 		al_buf->len = segs[a].ds_len;
1375 
1376 		al_buf++;
1377 	}
1378 
1379 	hal_pkt->num_of_bufs = nsegs;
1380 
1381 	/* prepare the packet's descriptors to dma engine */
1382 	tx_info->tx_descs = al_eth_tx_pkt_prepare(tx_ring->dma_q, hal_pkt);
1383 
1384 	if (tx_info->tx_descs == 0)
1385 		goto exit;
1386 
1387 	/*
1388 	 * stop the queue when no more space available, the packet can have up
1389 	 * to AL_ETH_PKT_MAX_BUFS + 1 buffers and a meta descriptor
1390 	 */
1391 	if (unlikely(al_udma_available_get(tx_ring->dma_q) <
1392 	    (AL_ETH_PKT_MAX_BUFS + 2))) {
1393 		tx_ring->stall = 1;
1394 		device_printf_dbg(tx_ring->dev, "stall, stopping queue %d...\n",
1395 		    tx_ring->ring_id);
1396 		al_data_memory_barrier();
1397 	}
1398 
1399 	tx_ring->next_to_use = AL_ETH_TX_RING_IDX_NEXT(tx_ring, next_to_use);
1400 
1401 	/* trigger the dma engine */
1402 	al_eth_tx_dma_action(tx_ring->dma_q, tx_info->tx_descs);
1403 	return;
1404 
1405 exit:
1406 	m_freem(m);
1407 }
1408 
1409 static void
1410 al_eth_tx_cmpl_work(void *arg, int pending)
1411 {
1412 	struct al_eth_ring *tx_ring = arg;
1413 
1414 	if (napi != 0) {
1415 		tx_ring->cmpl_is_running = 1;
1416 		al_data_memory_barrier();
1417 	}
1418 
1419 	al_eth_tx_do_cleanup(tx_ring);
1420 
1421 	if (napi != 0) {
1422 		tx_ring->cmpl_is_running = 0;
1423 		al_data_memory_barrier();
1424 	}
1425 	/* all work done, enable IRQs */
1426 	al_eth_irq_config(tx_ring->unmask_reg_offset, tx_ring->unmask_val);
1427 }
1428 
1429 static int
1430 al_eth_tx_cmlp_irq_filter(void *arg)
1431 {
1432 	struct al_eth_ring *tx_ring = arg;
1433 
1434 	/* Interrupt should be auto-masked upon arrival */
1435 
1436 	device_printf_dbg(tx_ring->dev, "%s for ring ID = %d\n", __func__,
1437 	    tx_ring->ring_id);
1438 
1439 	/*
1440 	 * For napi, if work is not running, schedule it. Always schedule
1441 	 * for casual (non-napi) packet handling.
1442 	 */
1443 	if ((napi == 0) || (napi && tx_ring->cmpl_is_running == 0))
1444 		taskqueue_enqueue(tx_ring->cmpl_tq, &tx_ring->cmpl_task);
1445 
1446 	/* Do not run bottom half */
1447 	return (FILTER_HANDLED);
1448 }
1449 
1450 static int
1451 al_eth_rx_recv_irq_filter(void *arg)
1452 {
1453 	struct al_eth_ring *rx_ring = arg;
1454 
1455 	/* Interrupt should be auto-masked upon arrival */
1456 
1457 	device_printf_dbg(rx_ring->dev, "%s for ring ID = %d\n", __func__,
1458 	    rx_ring->ring_id);
1459 
1460 	/*
1461 	 * For napi, if work is not running, schedule it. Always schedule
1462 	 * for casual (non-napi) packet handling.
1463 	 */
1464 	if ((napi == 0) || (napi && rx_ring->enqueue_is_running == 0))
1465 		taskqueue_enqueue(rx_ring->enqueue_tq, &rx_ring->enqueue_task);
1466 
1467 	/* Do not run bottom half */
1468 	return (FILTER_HANDLED);
1469 }
1470 
1471 /*
1472  * al_eth_rx_checksum - indicate in mbuf if hw indicated a good cksum
1473  * @adapter: structure containing adapter specific data
1474  * @hal_pkt: HAL structure for the packet
1475  * @mbuf: mbuf currently being received and modified
1476  */
1477 static inline void
1478 al_eth_rx_checksum(struct al_eth_adapter *adapter,
1479     struct al_eth_pkt *hal_pkt, struct mbuf *mbuf)
1480 {
1481 
1482 	/* if IPv4 and error */
1483 	if (unlikely((adapter->netdev->if_capenable & IFCAP_RXCSUM) &&
1484 	    (hal_pkt->l3_proto_idx == AL_ETH_PROTO_ID_IPv4) &&
1485 	    (hal_pkt->flags & AL_ETH_RX_FLAGS_L3_CSUM_ERR))) {
1486 		device_printf(adapter->dev,"rx ipv4 header checksum error\n");
1487 		return;
1488 	}
1489 
1490 	/* if IPv6 and error */
1491 	if (unlikely((adapter->netdev->if_capenable & IFCAP_RXCSUM_IPV6) &&
1492 	    (hal_pkt->l3_proto_idx == AL_ETH_PROTO_ID_IPv6) &&
1493 	    (hal_pkt->flags & AL_ETH_RX_FLAGS_L3_CSUM_ERR))) {
1494 		device_printf(adapter->dev,"rx ipv6 header checksum error\n");
1495 		return;
1496 	}
1497 
1498 	/* if TCP/UDP */
1499 	if (likely((hal_pkt->l4_proto_idx == AL_ETH_PROTO_ID_TCP) ||
1500 	   (hal_pkt->l4_proto_idx == AL_ETH_PROTO_ID_UDP))) {
1501 		if (unlikely(hal_pkt->flags & AL_ETH_RX_FLAGS_L4_CSUM_ERR)) {
1502 			device_printf_dbg(adapter->dev, "rx L4 checksum error\n");
1503 
1504 			/* TCP/UDP checksum error */
1505 			mbuf->m_pkthdr.csum_flags = 0;
1506 		} else {
1507 			device_printf_dbg(adapter->dev, "rx checksum correct\n");
1508 
1509 			/* IP Checksum Good */
1510 			mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
1511 			mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1512 		}
1513 	}
1514 }
1515 
1516 static struct mbuf*
1517 al_eth_rx_mbuf(struct al_eth_adapter *adapter,
1518     struct al_eth_ring *rx_ring, struct al_eth_pkt *hal_pkt,
1519     unsigned int descs, uint16_t *next_to_clean)
1520 {
1521 	struct mbuf *mbuf;
1522 	struct al_eth_rx_buffer *rx_info =
1523 	    &rx_ring->rx_buffer_info[*next_to_clean];
1524 	unsigned int len;
1525 
1526 	len = hal_pkt->bufs[0].len;
1527 	device_printf_dbg(adapter->dev, "rx_info %p data %p\n", rx_info,
1528 	   rx_info->m);
1529 
1530 	if (rx_info->m == NULL) {
1531 		*next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring,
1532 		    *next_to_clean);
1533 		return (NULL);
1534 	}
1535 
1536 	mbuf = rx_info->m;
1537 	mbuf->m_pkthdr.len = len;
1538 	mbuf->m_len = len;
1539 	mbuf->m_pkthdr.rcvif = rx_ring->netdev;
1540 	mbuf->m_flags |= M_PKTHDR;
1541 
1542 	if (len <= adapter->small_copy_len) {
1543 		struct mbuf *smbuf;
1544 		device_printf_dbg(adapter->dev, "rx small packet. len %d\n", len);
1545 
1546 		AL_RX_LOCK(adapter);
1547 		smbuf = m_gethdr(M_NOWAIT, MT_DATA);
1548 		AL_RX_UNLOCK(adapter);
1549 		if (__predict_false(smbuf == NULL)) {
1550 			device_printf(adapter->dev, "smbuf is NULL\n");
1551 			return (NULL);
1552 		}
1553 
1554 		smbuf->m_data = smbuf->m_data + AL_IP_ALIGNMENT_OFFSET;
1555 		memcpy(smbuf->m_data, mbuf->m_data + AL_IP_ALIGNMENT_OFFSET, len);
1556 
1557 		smbuf->m_len = len;
1558 		smbuf->m_pkthdr.rcvif = rx_ring->netdev;
1559 
1560 		/* first desc of a non-ps chain */
1561 		smbuf->m_flags |= M_PKTHDR;
1562 		smbuf->m_pkthdr.len = smbuf->m_len;
1563 
1564 		*next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring,
1565 		    *next_to_clean);
1566 
1567 		return (smbuf);
1568 	}
1569 	mbuf->m_data = mbuf->m_data + AL_IP_ALIGNMENT_OFFSET;
1570 
1571 	/* Unmap the buffer */
1572 	bus_dmamap_unload(rx_ring->dma_buf_tag, rx_info->dma_map);
1573 
1574 	rx_info->m = NULL;
1575 	*next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring, *next_to_clean);
1576 
1577 	return (mbuf);
1578 }
1579 
1580 static void
1581 al_eth_rx_recv_work(void *arg, int pending)
1582 {
1583 	struct al_eth_ring *rx_ring = arg;
1584 	struct mbuf *mbuf;
1585 	struct lro_entry *queued;
1586 	unsigned int qid = rx_ring->ring_id;
1587 	struct al_eth_pkt *hal_pkt = &rx_ring->hal_pkt;
1588 	uint16_t next_to_clean = rx_ring->next_to_clean;
1589 	uint32_t refill_required;
1590 	uint32_t refill_actual;
1591 	uint32_t do_if_input;
1592 
1593 	if (napi != 0) {
1594 		rx_ring->enqueue_is_running = 1;
1595 		al_data_memory_barrier();
1596 	}
1597 
1598 	do {
1599 		unsigned int descs;
1600 
1601 		descs = al_eth_pkt_rx(rx_ring->dma_q, hal_pkt);
1602 		if (unlikely(descs == 0))
1603 			break;
1604 
1605 		device_printf_dbg(rx_ring->dev, "rx_poll: q %d got packet "
1606 		    "from hal. descs %d\n", qid, descs);
1607 		device_printf_dbg(rx_ring->dev, "rx_poll: q %d flags %x. "
1608 		    "l3 proto %d l4 proto %d\n", qid, hal_pkt->flags,
1609 		    hal_pkt->l3_proto_idx, hal_pkt->l4_proto_idx);
1610 
1611 		/* ignore if detected dma or eth controller errors */
1612 		if ((hal_pkt->flags & (AL_ETH_RX_ERROR |
1613 		    AL_UDMA_CDESC_ERROR)) != 0) {
1614 			device_printf(rx_ring->dev, "receive packet with error. "
1615 			    "flags = 0x%x\n", hal_pkt->flags);
1616 			next_to_clean = AL_ETH_RX_RING_IDX_ADD(rx_ring,
1617 			    next_to_clean, descs);
1618 			continue;
1619 		}
1620 
1621 		/* allocate mbuf and fill it */
1622 		mbuf = al_eth_rx_mbuf(rx_ring->adapter, rx_ring, hal_pkt, descs,
1623 		    &next_to_clean);
1624 
1625 		/* exit if we failed to retrieve a buffer */
1626 		if (unlikely(mbuf == NULL)) {
1627 			next_to_clean = AL_ETH_RX_RING_IDX_ADD(rx_ring,
1628 			    next_to_clean, descs);
1629 			break;
1630 		}
1631 
1632 		if (__predict_true(rx_ring->netdev->if_capenable & IFCAP_RXCSUM ||
1633 		    rx_ring->netdev->if_capenable & IFCAP_RXCSUM_IPV6)) {
1634 			al_eth_rx_checksum(rx_ring->adapter, hal_pkt, mbuf);
1635 		}
1636 
1637 #if __FreeBSD_version >= 800000
1638 		mbuf->m_pkthdr.flowid = qid;
1639 		M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE);
1640 #endif
1641 
1642 		/*
1643 		 * LRO is only for IP/TCP packets and TCP checksum of the packet
1644 		 * should be computed by hardware.
1645 		 */
1646 		do_if_input = 1;
1647 		if ((rx_ring->lro_enabled != 0) &&
1648 		    ((mbuf->m_pkthdr.csum_flags & CSUM_IP_VALID) != 0) &&
1649 		    hal_pkt->l4_proto_idx == AL_ETH_PROTO_ID_TCP) {
1650 			/*
1651 			 * Send to the stack if:
1652 			 *  - LRO not enabled, or
1653 			 *  - no LRO resources, or
1654 			 *  - lro enqueue fails
1655 			 */
1656 			if (rx_ring->lro.lro_cnt != 0) {
1657 				if (tcp_lro_rx(&rx_ring->lro, mbuf, 0) == 0)
1658 					do_if_input = 0;
1659 			}
1660 		}
1661 
1662 		if (do_if_input)
1663 			(*rx_ring->netdev->if_input)(rx_ring->netdev, mbuf);
1664 
1665 	} while (1);
1666 
1667 	rx_ring->next_to_clean = next_to_clean;
1668 
1669 	refill_required = al_udma_available_get(rx_ring->dma_q);
1670 	refill_actual = al_eth_refill_rx_bufs(rx_ring->adapter, qid,
1671 	    refill_required);
1672 
1673 	if (unlikely(refill_actual < refill_required)) {
1674 		device_printf_dbg(rx_ring->dev,
1675 		    "%s: not filling rx queue %d\n", __func__, qid);
1676 	}
1677 
1678 	while (((queued = LIST_FIRST(&rx_ring->lro.lro_active)) != NULL)) {
1679 		LIST_REMOVE(queued, next);
1680 		tcp_lro_flush(&rx_ring->lro, queued);
1681 	}
1682 
1683 	if (napi != 0) {
1684 		rx_ring->enqueue_is_running = 0;
1685 		al_data_memory_barrier();
1686 	}
1687 	/* unmask irq */
1688 	al_eth_irq_config(rx_ring->unmask_reg_offset, rx_ring->unmask_val);
1689 }
1690 
1691 static void
1692 al_eth_start_xmit(void *arg, int pending)
1693 {
1694 	struct al_eth_ring *tx_ring = arg;
1695 	struct mbuf *mbuf;
1696 
1697 	if (napi != 0) {
1698 		tx_ring->enqueue_is_running = 1;
1699 		al_data_memory_barrier();
1700 	}
1701 
1702 	while (1) {
1703 		mtx_lock(&tx_ring->br_mtx);
1704 		mbuf = drbr_dequeue(NULL, tx_ring->br);
1705 		mtx_unlock(&tx_ring->br_mtx);
1706 
1707 		if (mbuf == NULL)
1708 			break;
1709 
1710 		al_eth_xmit_mbuf(tx_ring, mbuf);
1711 	}
1712 
1713 	if (napi != 0) {
1714 		tx_ring->enqueue_is_running = 0;
1715 		al_data_memory_barrier();
1716 		while (1) {
1717 			mtx_lock(&tx_ring->br_mtx);
1718 			mbuf = drbr_dequeue(NULL, tx_ring->br);
1719 			mtx_unlock(&tx_ring->br_mtx);
1720 			if (mbuf == NULL)
1721 				break;
1722 			al_eth_xmit_mbuf(tx_ring, mbuf);
1723 		}
1724 	}
1725 }
1726 
1727 static int
1728 al_mq_start(struct ifnet *ifp, struct mbuf *m)
1729 {
1730 	struct al_eth_adapter *adapter = ifp->if_softc;
1731 	struct al_eth_ring *tx_ring;
1732 	int i;
1733 	int ret;
1734 
1735 	/* Which queue to use */
1736 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
1737 		i = m->m_pkthdr.flowid % adapter->num_tx_queues;
1738 	else
1739 		i = curcpu % adapter->num_tx_queues;
1740 
1741 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
1742 	    IFF_DRV_RUNNING) {
1743 		return (EFAULT);
1744 	}
1745 
1746 	tx_ring = &adapter->tx_ring[i];
1747 
1748 	device_printf_dbg(adapter->dev, "dgb start() - assuming link is active, "
1749 	    "sending packet to queue %d\n", i);
1750 
1751 	ret = drbr_enqueue(ifp, tx_ring->br, m);
1752 
1753 	/*
1754 	 * For napi, if work is not running, schedule it. Always schedule
1755 	 * for casual (non-napi) packet handling.
1756 	 */
1757 	if ((napi == 0) || ((napi != 0) && (tx_ring->enqueue_is_running == 0)))
1758 		taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
1759 
1760 	return (ret);
1761 }
1762 
1763 static void
1764 al_qflush(struct ifnet * ifp)
1765 {
1766 
1767 	/* unused */
1768 }
1769 
1770 static inline void
1771 al_eth_flow_ctrl_init(struct al_eth_adapter *adapter)
1772 {
1773 	uint8_t default_flow_ctrl;
1774 
1775 	default_flow_ctrl = AL_ETH_FLOW_CTRL_TX_PAUSE;
1776 	default_flow_ctrl |= AL_ETH_FLOW_CTRL_RX_PAUSE;
1777 
1778 	adapter->link_config.flow_ctrl_supported = default_flow_ctrl;
1779 }
1780 
1781 static int
1782 al_eth_flow_ctrl_config(struct al_eth_adapter *adapter)
1783 {
1784 	struct al_eth_flow_control_params *flow_ctrl_params;
1785 	uint8_t active = adapter->link_config.flow_ctrl_active;
1786 	int i;
1787 
1788 	flow_ctrl_params = &adapter->flow_ctrl_params;
1789 
1790 	flow_ctrl_params->type = AL_ETH_FLOW_CONTROL_TYPE_LINK_PAUSE;
1791 	flow_ctrl_params->obay_enable =
1792 	    ((active & AL_ETH_FLOW_CTRL_RX_PAUSE) != 0);
1793 	flow_ctrl_params->gen_enable =
1794 	    ((active & AL_ETH_FLOW_CTRL_TX_PAUSE) != 0);
1795 
1796 	flow_ctrl_params->rx_fifo_th_high = AL_ETH_FLOW_CTRL_RX_FIFO_TH_HIGH;
1797 	flow_ctrl_params->rx_fifo_th_low = AL_ETH_FLOW_CTRL_RX_FIFO_TH_LOW;
1798 	flow_ctrl_params->quanta = AL_ETH_FLOW_CTRL_QUANTA;
1799 	flow_ctrl_params->quanta_th = AL_ETH_FLOW_CTRL_QUANTA_TH;
1800 
1801 	/* map priority to queue index, queue id = priority/2 */
1802 	for (i = 0; i < AL_ETH_FWD_PRIO_TABLE_NUM; i++)
1803 		flow_ctrl_params->prio_q_map[0][i] =  1 << (i >> 1);
1804 
1805 	al_eth_flow_control_config(&adapter->hal_adapter, flow_ctrl_params);
1806 
1807 	return (0);
1808 }
1809 
1810 static void
1811 al_eth_flow_ctrl_enable(struct al_eth_adapter *adapter)
1812 {
1813 
1814 	/*
1815 	 * change the active configuration to the default / force by ethtool
1816 	 * and call to configure
1817 	 */
1818 	adapter->link_config.flow_ctrl_active =
1819 	    adapter->link_config.flow_ctrl_supported;
1820 
1821 	al_eth_flow_ctrl_config(adapter);
1822 }
1823 
1824 static void
1825 al_eth_flow_ctrl_disable(struct al_eth_adapter *adapter)
1826 {
1827 
1828 	adapter->link_config.flow_ctrl_active = 0;
1829 	al_eth_flow_ctrl_config(adapter);
1830 }
1831 
1832 static int
1833 al_eth_hw_init(struct al_eth_adapter *adapter)
1834 {
1835 	int rc;
1836 
1837 	rc = al_eth_hw_init_adapter(adapter);
1838 	if (rc != 0)
1839 		return (rc);
1840 
1841 	rc = al_eth_mac_config(&adapter->hal_adapter, adapter->mac_mode);
1842 	if (rc < 0) {
1843 		device_printf(adapter->dev, "%s failed to configure mac!\n",
1844 		    __func__);
1845 		return (rc);
1846 	}
1847 
1848 	if ((adapter->mac_mode == AL_ETH_MAC_MODE_SGMII) ||
1849 	    (adapter->mac_mode == AL_ETH_MAC_MODE_RGMII &&
1850 	     adapter->phy_exist == FALSE)) {
1851 		rc = al_eth_mac_link_config(&adapter->hal_adapter,
1852 		    adapter->link_config.force_1000_base_x,
1853 		    adapter->link_config.autoneg,
1854 		    adapter->link_config.active_speed,
1855 		    adapter->link_config.active_duplex);
1856 		if (rc != 0) {
1857 			device_printf(adapter->dev,
1858 			    "%s failed to configure link parameters!\n",
1859 			    __func__);
1860 			return (rc);
1861 		}
1862 	}
1863 
1864 	rc = al_eth_mdio_config(&adapter->hal_adapter,
1865 	    AL_ETH_MDIO_TYPE_CLAUSE_22, TRUE /* shared_mdio_if */,
1866 	    adapter->ref_clk_freq, adapter->mdio_freq);
1867 	if (rc != 0) {
1868 		device_printf(adapter->dev, "%s failed at mdio config!\n",
1869 		    __func__);
1870 		return (rc);
1871 	}
1872 
1873 	al_eth_flow_ctrl_init(adapter);
1874 
1875 	return (rc);
1876 }
1877 
1878 static int
1879 al_eth_hw_stop(struct al_eth_adapter *adapter)
1880 {
1881 
1882 	al_eth_mac_stop(&adapter->hal_adapter);
1883 
1884 	/*
1885 	 * wait till pending rx packets written and UDMA becomes idle,
1886 	 * the MAC has ~10KB fifo, 10us should be enought time for the
1887 	 * UDMA to write to the memory
1888 	 */
1889 	DELAY(10);
1890 
1891 	al_eth_adapter_stop(&adapter->hal_adapter);
1892 
1893 	adapter->flags |= AL_ETH_FLAG_RESET_REQUESTED;
1894 
1895 	/* disable flow ctrl to avoid pause packets*/
1896 	al_eth_flow_ctrl_disable(adapter);
1897 
1898 	return (0);
1899 }
1900 
1901 /*
1902  * al_eth_intr_intx_all - Legacy Interrupt Handler for all interrupts
1903  * @irq: interrupt number
1904  * @data: pointer to a network interface device structure
1905  */
1906 static int
1907 al_eth_intr_intx_all(void *data)
1908 {
1909 	struct al_eth_adapter *adapter = data;
1910 
1911 	struct unit_regs __iomem *regs_base =
1912 	    (struct unit_regs __iomem *)adapter->udma_base;
1913 	uint32_t reg;
1914 
1915 	reg = al_udma_iofic_read_cause(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
1916 	    AL_INT_GROUP_A);
1917 	if (likely(reg))
1918 		device_printf_dbg(adapter->dev, "%s group A cause %x\n",
1919 		    __func__, reg);
1920 
1921 	if (unlikely(reg & AL_INT_GROUP_A_GROUP_D_SUM)) {
1922 		struct al_iofic_grp_ctrl __iomem *sec_ints_base;
1923 		uint32_t cause_d =  al_udma_iofic_read_cause(regs_base,
1924 		    AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_D);
1925 
1926 		sec_ints_base =
1927 		    &regs_base->gen.interrupt_regs.secondary_iofic_ctrl[0];
1928 		if (cause_d != 0) {
1929 			device_printf_dbg(adapter->dev,
1930 			    "got interrupt from group D. cause %x\n", cause_d);
1931 
1932 			cause_d = al_iofic_read_cause(sec_ints_base,
1933 			    AL_INT_GROUP_A);
1934 			device_printf(adapter->dev,
1935 			    "secondary A cause %x\n", cause_d);
1936 
1937 			cause_d = al_iofic_read_cause(sec_ints_base,
1938 			    AL_INT_GROUP_B);
1939 
1940 			device_printf_dbg(adapter->dev,
1941 			    "secondary B cause %x\n", cause_d);
1942 		}
1943 	}
1944 	if ((reg & AL_INT_GROUP_A_GROUP_B_SUM) != 0 ) {
1945 		uint32_t cause_b = al_udma_iofic_read_cause(regs_base,
1946 		    AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_B);
1947 		int qid;
1948 		device_printf_dbg(adapter->dev, "secondary B cause %x\n",
1949 		    cause_b);
1950 		for (qid = 0; qid < adapter->num_rx_queues; qid++) {
1951 			if (cause_b & (1 << qid)) {
1952 				/* mask */
1953 				al_udma_iofic_mask(
1954 				    (struct unit_regs __iomem *)adapter->udma_base,
1955 				    AL_UDMA_IOFIC_LEVEL_PRIMARY,
1956 				    AL_INT_GROUP_B, 1 << qid);
1957 			}
1958 		}
1959 	}
1960 	if ((reg & AL_INT_GROUP_A_GROUP_C_SUM) != 0) {
1961 		uint32_t cause_c = al_udma_iofic_read_cause(regs_base,
1962 		    AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_C);
1963 		int qid;
1964 		device_printf_dbg(adapter->dev, "secondary C cause %x\n", cause_c);
1965 		for (qid = 0; qid < adapter->num_tx_queues; qid++) {
1966 			if ((cause_c & (1 << qid)) != 0) {
1967 				al_udma_iofic_mask(
1968 				    (struct unit_regs __iomem *)adapter->udma_base,
1969 				    AL_UDMA_IOFIC_LEVEL_PRIMARY,
1970 				    AL_INT_GROUP_C, 1 << qid);
1971 			}
1972 		}
1973 	}
1974 
1975 	al_eth_tx_cmlp_irq_filter(adapter->tx_ring);
1976 
1977 	return (0);
1978 }
1979 
1980 static int
1981 al_eth_intr_msix_all(void *data)
1982 {
1983 	struct al_eth_adapter *adapter = data;
1984 
1985 	device_printf_dbg(adapter->dev, "%s\n", __func__);
1986 	return (0);
1987 }
1988 
1989 static int
1990 al_eth_intr_msix_mgmt(void *data)
1991 {
1992 	struct al_eth_adapter *adapter = data;
1993 
1994 	device_printf_dbg(adapter->dev, "%s\n", __func__);
1995 	return (0);
1996 }
1997 
1998 static int
1999 al_eth_enable_msix(struct al_eth_adapter *adapter)
2000 {
2001 	int i, msix_vecs, rc, count;
2002 
2003 	device_printf_dbg(adapter->dev, "%s\n", __func__);
2004 	msix_vecs = 1 + adapter->num_rx_queues + adapter->num_tx_queues;
2005 
2006 	device_printf_dbg(adapter->dev,
2007 	    "Try to enable MSIX, vector numbers = %d\n", msix_vecs);
2008 
2009 	adapter->msix_entries = malloc(msix_vecs*sizeof(*adapter->msix_entries),
2010 	    M_IFAL, M_ZERO | M_WAITOK);
2011 
2012 	if (adapter->msix_entries == 0) {
2013 		device_printf_dbg(adapter->dev, "failed to allocate"
2014 		    " msix_entries %d\n", msix_vecs);
2015 		rc = ENOMEM;
2016 		goto exit;
2017 	}
2018 
2019 	/* management vector (GROUP_A) @2*/
2020 	adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].entry = 2;
2021 	adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector = 0;
2022 
2023 	/* rx queues start @3 */
2024 	for (i = 0; i < adapter->num_rx_queues; i++) {
2025 		int irq_idx = AL_ETH_RXQ_IRQ_IDX(adapter, i);
2026 
2027 		adapter->msix_entries[irq_idx].entry = 3 + i;
2028 		adapter->msix_entries[irq_idx].vector = 0;
2029 	}
2030 	/* tx queues start @7 */
2031 	for (i = 0; i < adapter->num_tx_queues; i++) {
2032 		int irq_idx = AL_ETH_TXQ_IRQ_IDX(adapter, i);
2033 
2034 		adapter->msix_entries[irq_idx].entry = 3 +
2035 		    AL_ETH_MAX_HW_QUEUES + i;
2036 		adapter->msix_entries[irq_idx].vector = 0;
2037 	}
2038 
2039 	count = msix_vecs + 2; /* entries start from 2 */
2040 	rc = pci_alloc_msix(adapter->dev, &count);
2041 
2042 	if (rc != 0) {
2043 		device_printf_dbg(adapter->dev, "failed to allocate MSIX "
2044 		    "vectors %d\n", msix_vecs+2);
2045 		device_printf_dbg(adapter->dev, "ret = %d\n", rc);
2046 		goto msix_entries_exit;
2047 	}
2048 
2049 	if (count != msix_vecs + 2) {
2050 		device_printf_dbg(adapter->dev, "failed to allocate all MSIX "
2051 		    "vectors %d, allocated %d\n", msix_vecs+2, count);
2052 		rc = ENOSPC;
2053 		goto msix_entries_exit;
2054 	}
2055 
2056 	for (i = 0; i < msix_vecs; i++)
2057 	    adapter->msix_entries[i].vector = 2 + 1 + i;
2058 
2059 	device_printf_dbg(adapter->dev, "successfully enabled MSIX,"
2060 	    " vectors %d\n", msix_vecs);
2061 
2062 	adapter->msix_vecs = msix_vecs;
2063 	adapter->flags |= AL_ETH_FLAG_MSIX_ENABLED;
2064 	goto exit;
2065 
2066 msix_entries_exit:
2067 	adapter->msix_vecs = 0;
2068 	free(adapter->msix_entries, M_IFAL);
2069 	adapter->msix_entries = NULL;
2070 
2071 exit:
2072 	return (rc);
2073 }
2074 
2075 static int
2076 al_eth_setup_int_mode(struct al_eth_adapter *adapter)
2077 {
2078 	int i, rc;
2079 
2080 	rc = al_eth_enable_msix(adapter);
2081 	if (rc != 0) {
2082 		device_printf(adapter->dev, "Failed to enable MSIX mode.\n");
2083 		return (rc);
2084 	}
2085 
2086 	adapter->irq_vecs = max(1, adapter->msix_vecs);
2087 	/* single INTX mode */
2088 	if (adapter->msix_vecs == 0) {
2089 		snprintf(adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].name,
2090 		    AL_ETH_IRQNAME_SIZE, "al-eth-intx-all@pci:%s",
2091 		    device_get_name(adapter->dev));
2092 		adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].handler =
2093 		    al_eth_intr_intx_all;
2094 		/* IRQ vector will be resolved from device resources */
2095 		adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector = 0;
2096 		adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].data = adapter;
2097 
2098 		device_printf(adapter->dev, "%s and vector %d \n", __func__,
2099 		    adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector);
2100 
2101 		return (0);
2102 	}
2103 	/* single MSI-X mode */
2104 	if (adapter->msix_vecs == 1) {
2105 		snprintf(adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].name,
2106 		    AL_ETH_IRQNAME_SIZE, "al-eth-msix-all@pci:%s",
2107 		    device_get_name(adapter->dev));
2108 		adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].handler =
2109 		    al_eth_intr_msix_all;
2110 		adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector =
2111 		    adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector;
2112 		adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].data = adapter;
2113 
2114 		return (0);
2115 	}
2116 	/* MSI-X per queue */
2117 	snprintf(adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].name, AL_ETH_IRQNAME_SIZE,
2118 	    "al-eth-msix-mgmt@pci:%s", device_get_name(adapter->dev));
2119 	adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].handler = al_eth_intr_msix_mgmt;
2120 
2121 	adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].data = adapter;
2122 	adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector =
2123 	    adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector;
2124 
2125 	for (i = 0; i < adapter->num_rx_queues; i++) {
2126 		int irq_idx = AL_ETH_RXQ_IRQ_IDX(adapter, i);
2127 
2128 		snprintf(adapter->irq_tbl[irq_idx].name, AL_ETH_IRQNAME_SIZE,
2129 		    "al-eth-rx-comp-%d@pci:%s", i,
2130 		    device_get_name(adapter->dev));
2131 		adapter->irq_tbl[irq_idx].handler = al_eth_rx_recv_irq_filter;
2132 		adapter->irq_tbl[irq_idx].data = &adapter->rx_ring[i];
2133 		adapter->irq_tbl[irq_idx].vector =
2134 		    adapter->msix_entries[irq_idx].vector;
2135 	}
2136 
2137 	for (i = 0; i < adapter->num_tx_queues; i++) {
2138 		int irq_idx = AL_ETH_TXQ_IRQ_IDX(adapter, i);
2139 
2140 		snprintf(adapter->irq_tbl[irq_idx].name,
2141 		    AL_ETH_IRQNAME_SIZE, "al-eth-tx-comp-%d@pci:%s", i,
2142 		    device_get_name(adapter->dev));
2143 		adapter->irq_tbl[irq_idx].handler = al_eth_tx_cmlp_irq_filter;
2144 		adapter->irq_tbl[irq_idx].data = &adapter->tx_ring[i];
2145 		adapter->irq_tbl[irq_idx].vector =
2146 		    adapter->msix_entries[irq_idx].vector;
2147 	}
2148 
2149 	return (0);
2150 }
2151 
2152 static void
2153 __al_eth_free_irq(struct al_eth_adapter *adapter)
2154 {
2155 	struct al_eth_irq *irq;
2156 	int i, rc;
2157 
2158 	for (i = 0; i < adapter->irq_vecs; i++) {
2159 		irq = &adapter->irq_tbl[i];
2160 		if (irq->requested != 0) {
2161 			device_printf_dbg(adapter->dev, "tear down irq: %d\n",
2162 			    irq->vector);
2163 			rc = bus_teardown_intr(adapter->dev, irq->res,
2164 			    irq->cookie);
2165 			if (rc != 0)
2166 				device_printf(adapter->dev, "failed to tear "
2167 				    "down irq: %d\n", irq->vector);
2168 
2169 		}
2170 		irq->requested = 0;
2171 	}
2172 }
2173 
2174 static void
2175 al_eth_free_irq(struct al_eth_adapter *adapter)
2176 {
2177 	struct al_eth_irq *irq;
2178 	int i, rc;
2179 #ifdef CONFIG_RFS_ACCEL
2180 	if (adapter->msix_vecs >= 1) {
2181 		free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
2182 		adapter->netdev->rx_cpu_rmap = NULL;
2183 	}
2184 #endif
2185 
2186 	__al_eth_free_irq(adapter);
2187 
2188 	for (i = 0; i < adapter->irq_vecs; i++) {
2189 		irq = &adapter->irq_tbl[i];
2190 		if (irq->res == NULL)
2191 			continue;
2192 		device_printf_dbg(adapter->dev, "release resource irq: %d\n",
2193 		    irq->vector);
2194 		rc = bus_release_resource(adapter->dev, SYS_RES_IRQ, irq->vector,
2195 		    irq->res);
2196 		irq->res = NULL;
2197 		if (rc != 0)
2198 			device_printf(adapter->dev, "dev has no parent while "
2199 			    "releasing res for irq: %d\n", irq->vector);
2200 	}
2201 
2202 	pci_release_msi(adapter->dev);
2203 
2204 	adapter->flags &= ~AL_ETH_FLAG_MSIX_ENABLED;
2205 
2206 	adapter->msix_vecs = 0;
2207 	free(adapter->msix_entries, M_IFAL);
2208 	adapter->msix_entries = NULL;
2209 }
2210 
2211 static int
2212 al_eth_request_irq(struct al_eth_adapter *adapter)
2213 {
2214 	unsigned long flags;
2215 	struct al_eth_irq *irq;
2216 	int rc = 0, i, v;
2217 
2218 	if ((adapter->flags & AL_ETH_FLAG_MSIX_ENABLED) != 0)
2219 		flags = RF_ACTIVE;
2220 	else
2221 		flags = RF_ACTIVE | RF_SHAREABLE;
2222 
2223 	for (i = 0; i < adapter->irq_vecs; i++) {
2224 		irq = &adapter->irq_tbl[i];
2225 
2226 		if (irq->requested != 0)
2227 			continue;
2228 
2229 		irq->res = bus_alloc_resource_any(adapter->dev, SYS_RES_IRQ,
2230 		    &irq->vector, flags);
2231 		if (irq->res == NULL) {
2232 			device_printf(adapter->dev, "could not allocate "
2233 			    "irq vector=%d\n", irq->vector);
2234 			rc = ENXIO;
2235 			goto exit_res;
2236 		}
2237 
2238 		if ((rc = bus_setup_intr(adapter->dev, irq->res,
2239 		    INTR_TYPE_NET | INTR_MPSAFE, irq->handler,
2240 		    NULL, irq->data, &irq->cookie)) != 0) {
2241 			device_printf(adapter->dev, "failed to register "
2242 			    "interrupt handler for irq %ju: %d\n",
2243 			    (uintmax_t)rman_get_start(irq->res), rc);
2244 			goto exit_intr;
2245 		}
2246 		irq->requested = 1;
2247 	}
2248 	goto exit;
2249 
2250 exit_intr:
2251 	v = i - 1; /* -1 because we omit the operation that failed */
2252 	while (v-- >= 0) {
2253 		int bti;
2254 		irq = &adapter->irq_tbl[v];
2255 		bti = bus_teardown_intr(adapter->dev, irq->res, irq->cookie);
2256 		if (bti != 0) {
2257 			device_printf(adapter->dev, "failed to tear "
2258 			    "down irq: %d\n", irq->vector);
2259 		}
2260 
2261 		irq->requested = 0;
2262 		device_printf_dbg(adapter->dev, "exit_intr: releasing irq %d\n",
2263 		    irq->vector);
2264 	}
2265 
2266 exit_res:
2267 	v = i - 1; /* -1 because we omit the operation that failed */
2268 	while (v-- >= 0) {
2269 		int brr;
2270 		irq = &adapter->irq_tbl[v];
2271 		device_printf_dbg(adapter->dev, "exit_res: releasing resource"
2272 		    " for irq %d\n", irq->vector);
2273 		brr = bus_release_resource(adapter->dev, SYS_RES_IRQ,
2274 		    irq->vector, irq->res);
2275 		if (brr != 0)
2276 			device_printf(adapter->dev, "dev has no parent while "
2277 			    "releasing res for irq: %d\n", irq->vector);
2278 		irq->res = NULL;
2279 	}
2280 
2281 exit:
2282 	return (rc);
2283 }
2284 
2285 /**
2286  * al_eth_setup_tx_resources - allocate Tx resources (Descriptors)
2287  * @adapter: network interface device structure
2288  * @qid: queue index
2289  *
2290  * Return 0 on success, negative on failure
2291  **/
2292 static int
2293 al_eth_setup_tx_resources(struct al_eth_adapter *adapter, int qid)
2294 {
2295 	struct al_eth_ring *tx_ring = &adapter->tx_ring[qid];
2296 	struct device *dev = tx_ring->dev;
2297 	struct al_udma_q_params *q_params = &tx_ring->q_params;
2298 	int size;
2299 	int ret;
2300 
2301 	if (adapter->up)
2302 		return (0);
2303 
2304 	size = sizeof(struct al_eth_tx_buffer) * tx_ring->sw_count;
2305 
2306 	tx_ring->tx_buffer_info = malloc(size, M_IFAL, M_ZERO | M_WAITOK);
2307 	if (tx_ring->tx_buffer_info == NULL)
2308 		return (ENOMEM);
2309 
2310 	tx_ring->descs_size = tx_ring->hw_count * sizeof(union al_udma_desc);
2311 	q_params->size = tx_ring->hw_count;
2312 
2313 	ret = al_dma_alloc_coherent(dev, &q_params->desc_phy_base_tag,
2314 	    (bus_dmamap_t *)&q_params->desc_phy_base_map,
2315 	    (bus_addr_t *)&q_params->desc_phy_base,
2316 	    (void**)&q_params->desc_base, tx_ring->descs_size);
2317 	if (ret != 0) {
2318 		device_printf(dev, "failed to al_dma_alloc_coherent,"
2319 		    " ret = %d\n", ret);
2320 		return (ENOMEM);
2321 	}
2322 
2323 	if (q_params->desc_base == NULL)
2324 		return (ENOMEM);
2325 
2326 	device_printf_dbg(dev, "Initializing ring queues %d\n", qid);
2327 
2328 	/* Allocate Ring Queue */
2329 	mtx_init(&tx_ring->br_mtx, "AlRingMtx", NULL, MTX_DEF);
2330 	tx_ring->br = buf_ring_alloc(AL_BR_SIZE, M_DEVBUF, M_WAITOK,
2331 	    &tx_ring->br_mtx);
2332 	if (tx_ring->br == NULL) {
2333 		device_printf(dev, "Critical Failure setting up buf ring\n");
2334 		return (ENOMEM);
2335 	}
2336 
2337 	/* Allocate taskqueues */
2338 	TASK_INIT(&tx_ring->enqueue_task, 0, al_eth_start_xmit, tx_ring);
2339 	tx_ring->enqueue_tq = taskqueue_create_fast("al_tx_enque", M_NOWAIT,
2340 	    taskqueue_thread_enqueue, &tx_ring->enqueue_tq);
2341 	taskqueue_start_threads(&tx_ring->enqueue_tq, 1, PI_NET, "%s txeq",
2342 	    device_get_nameunit(adapter->dev));
2343 	TASK_INIT(&tx_ring->cmpl_task, 0, al_eth_tx_cmpl_work, tx_ring);
2344 	tx_ring->cmpl_tq = taskqueue_create_fast("al_tx_cmpl", M_NOWAIT,
2345 	    taskqueue_thread_enqueue, &tx_ring->cmpl_tq);
2346 	taskqueue_start_threads(&tx_ring->cmpl_tq, 1, PI_REALTIME, "%s txcq",
2347 	    device_get_nameunit(adapter->dev));
2348 
2349 	/* Setup DMA descriptor areas. */
2350 	ret = bus_dma_tag_create(bus_get_dma_tag(dev),
2351 	    1, 0,			/* alignment, bounds */
2352 	    BUS_SPACE_MAXADDR,		/* lowaddr */
2353 	    BUS_SPACE_MAXADDR,		/* highaddr */
2354 	    NULL, NULL,			/* filter, filterarg */
2355 	    AL_TSO_SIZE,		/* maxsize */
2356 	    AL_ETH_PKT_MAX_BUFS,	/* nsegments */
2357 	    PAGE_SIZE,			/* maxsegsize */
2358 	    0,				/* flags */
2359 	    NULL,			/* lockfunc */
2360 	    NULL,			/* lockfuncarg */
2361 	    &tx_ring->dma_buf_tag);
2362 
2363 	if (ret != 0) {
2364 		device_printf(dev,"Unable to allocate dma_buf_tag, ret = %d\n",
2365 		    ret);
2366 		return (ret);
2367 	}
2368 
2369 	for (size = 0; size < tx_ring->sw_count; size++) {
2370 		ret = bus_dmamap_create(tx_ring->dma_buf_tag, 0,
2371 		    &tx_ring->tx_buffer_info[size].dma_map);
2372 		if (ret != 0) {
2373 			device_printf(dev, "Unable to map DMA TX "
2374 			    "buffer memory [iter=%d]\n", size);
2375 			return (ret);
2376 		}
2377 	}
2378 
2379 	/* completion queue not used for tx */
2380 	q_params->cdesc_base = NULL;
2381 	/* size in bytes of the udma completion ring descriptor */
2382 	q_params->cdesc_size = 8;
2383 	tx_ring->next_to_use = 0;
2384 	tx_ring->next_to_clean = 0;
2385 
2386 	return (0);
2387 }
2388 
2389 /*
2390  * al_eth_free_tx_resources - Free Tx Resources per Queue
2391  * @adapter: network interface device structure
2392  * @qid: queue index
2393  *
2394  * Free all transmit software resources
2395  */
2396 static void
2397 al_eth_free_tx_resources(struct al_eth_adapter *adapter, int qid)
2398 {
2399 	struct al_eth_ring *tx_ring = &adapter->tx_ring[qid];
2400 	struct al_udma_q_params *q_params = &tx_ring->q_params;
2401 	int size;
2402 
2403 	/* At this point interrupts' handlers must be deactivated */
2404 	while (taskqueue_cancel(tx_ring->cmpl_tq, &tx_ring->cmpl_task, NULL))
2405 		taskqueue_drain(tx_ring->cmpl_tq, &tx_ring->cmpl_task);
2406 
2407 	taskqueue_free(tx_ring->cmpl_tq);
2408 	while (taskqueue_cancel(tx_ring->enqueue_tq,
2409 	    &tx_ring->enqueue_task, NULL)) {
2410 		taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
2411 	}
2412 
2413 	taskqueue_free(tx_ring->enqueue_tq);
2414 
2415 	if (tx_ring->br != NULL) {
2416 		drbr_flush(adapter->netdev, tx_ring->br);
2417 		buf_ring_free(tx_ring->br, M_DEVBUF);
2418 	}
2419 
2420 	for (size = 0; size < tx_ring->sw_count; size++) {
2421 		m_freem(tx_ring->tx_buffer_info[size].m);
2422 		tx_ring->tx_buffer_info[size].m = NULL;
2423 
2424 		bus_dmamap_unload(tx_ring->dma_buf_tag,
2425 		    tx_ring->tx_buffer_info[size].dma_map);
2426 		bus_dmamap_destroy(tx_ring->dma_buf_tag,
2427 		    tx_ring->tx_buffer_info[size].dma_map);
2428 	}
2429 	bus_dma_tag_destroy(tx_ring->dma_buf_tag);
2430 
2431 	free(tx_ring->tx_buffer_info, M_IFAL);
2432 	tx_ring->tx_buffer_info = NULL;
2433 
2434 	mtx_destroy(&tx_ring->br_mtx);
2435 
2436 	/* if not set, then don't free */
2437 	if (q_params->desc_base == NULL)
2438 		return;
2439 
2440 	al_dma_free_coherent(q_params->desc_phy_base_tag,
2441 	    q_params->desc_phy_base_map, q_params->desc_base);
2442 
2443 	q_params->desc_base = NULL;
2444 }
2445 
2446 /*
2447  * al_eth_free_all_tx_resources - Free Tx Resources for All Queues
2448  * @adapter: board private structure
2449  *
2450  * Free all transmit software resources
2451  */
2452 static void
2453 al_eth_free_all_tx_resources(struct al_eth_adapter *adapter)
2454 {
2455 	int i;
2456 
2457 	for (i = 0; i < adapter->num_tx_queues; i++)
2458 		if (adapter->tx_ring[i].q_params.desc_base)
2459 			al_eth_free_tx_resources(adapter, i);
2460 }
2461 
2462 /*
2463  * al_eth_setup_rx_resources - allocate Rx resources (Descriptors)
2464  * @adapter: network interface device structure
2465  * @qid: queue index
2466  *
2467  * Returns 0 on success, negative on failure
2468  */
2469 static int
2470 al_eth_setup_rx_resources(struct al_eth_adapter *adapter, unsigned int qid)
2471 {
2472 	struct al_eth_ring *rx_ring = &adapter->rx_ring[qid];
2473 	struct device *dev = rx_ring->dev;
2474 	struct al_udma_q_params *q_params = &rx_ring->q_params;
2475 	int size;
2476 	int ret;
2477 
2478 	size = sizeof(struct al_eth_rx_buffer) * rx_ring->sw_count;
2479 
2480 	/* alloc extra element so in rx path we can always prefetch rx_info + 1 */
2481 	size += 1;
2482 
2483 	rx_ring->rx_buffer_info = malloc(size, M_IFAL, M_ZERO | M_WAITOK);
2484 	if (rx_ring->rx_buffer_info == NULL)
2485 		return (ENOMEM);
2486 
2487 	rx_ring->descs_size = rx_ring->hw_count * sizeof(union al_udma_desc);
2488 	q_params->size = rx_ring->hw_count;
2489 
2490 	ret = al_dma_alloc_coherent(dev, &q_params->desc_phy_base_tag,
2491 	    &q_params->desc_phy_base_map,
2492 	    (bus_addr_t *)&q_params->desc_phy_base,
2493 	    (void**)&q_params->desc_base, rx_ring->descs_size);
2494 
2495 	if ((q_params->desc_base == NULL) || (ret != 0))
2496 		return (ENOMEM);
2497 
2498 	/* size in bytes of the udma completion ring descriptor */
2499 	q_params->cdesc_size = 16;
2500 	rx_ring->cdescs_size = rx_ring->hw_count * q_params->cdesc_size;
2501 	ret = al_dma_alloc_coherent(dev, &q_params->cdesc_phy_base_tag,
2502 	    &q_params->cdesc_phy_base_map,
2503 	    (bus_addr_t *)&q_params->cdesc_phy_base,
2504 	    (void**)&q_params->cdesc_base, rx_ring->cdescs_size);
2505 
2506 	if ((q_params->cdesc_base == NULL) || (ret != 0))
2507 		return (ENOMEM);
2508 
2509 	/* Allocate taskqueues */
2510 	TASK_INIT(&rx_ring->enqueue_task, 0, al_eth_rx_recv_work, rx_ring);
2511 	rx_ring->enqueue_tq = taskqueue_create_fast("al_rx_enque", M_NOWAIT,
2512 	    taskqueue_thread_enqueue, &rx_ring->enqueue_tq);
2513 	taskqueue_start_threads(&rx_ring->enqueue_tq, 1, PI_NET, "%s rxeq",
2514 	    device_get_nameunit(adapter->dev));
2515 
2516 	/* Setup DMA descriptor areas. */
2517 	ret = bus_dma_tag_create(bus_get_dma_tag(dev),
2518 	    1, 0,			/* alignment, bounds */
2519 	    BUS_SPACE_MAXADDR,		/* lowaddr */
2520 	    BUS_SPACE_MAXADDR,		/* highaddr */
2521 	    NULL, NULL,			/* filter, filterarg */
2522 	    AL_TSO_SIZE,		/* maxsize */
2523 	    1,				/* nsegments */
2524 	    AL_TSO_SIZE,		/* maxsegsize */
2525 	    0,				/* flags */
2526 	    NULL,			/* lockfunc */
2527 	    NULL,			/* lockfuncarg */
2528 	    &rx_ring->dma_buf_tag);
2529 
2530 	if (ret != 0) {
2531 		device_printf(dev,"Unable to allocate RX dma_buf_tag\n");
2532 		return (ret);
2533 	}
2534 
2535 	for (size = 0; size < rx_ring->sw_count; size++) {
2536 		ret = bus_dmamap_create(rx_ring->dma_buf_tag, 0,
2537 		    &rx_ring->rx_buffer_info[size].dma_map);
2538 		if (ret != 0) {
2539 			device_printf(dev,"Unable to map DMA RX buffer memory\n");
2540 			return (ret);
2541 		}
2542 	}
2543 
2544 	/* Zero out the descriptor ring */
2545 	memset(q_params->cdesc_base, 0, rx_ring->cdescs_size);
2546 
2547 	/* Create LRO for the ring */
2548 	if ((adapter->netdev->if_capenable & IFCAP_LRO) != 0) {
2549 		int err = tcp_lro_init(&rx_ring->lro);
2550 		if (err != 0) {
2551 			device_printf(adapter->dev,
2552 			    "LRO[%d] Initialization failed!\n", qid);
2553 		} else {
2554 			device_printf_dbg(adapter->dev,
2555 			    "RX Soft LRO[%d] Initialized\n", qid);
2556 			rx_ring->lro_enabled = TRUE;
2557 			rx_ring->lro.ifp = adapter->netdev;
2558 		}
2559 	}
2560 
2561 	rx_ring->next_to_clean = 0;
2562 	rx_ring->next_to_use = 0;
2563 
2564 	return (0);
2565 }
2566 
2567 /*
2568  * al_eth_free_rx_resources - Free Rx Resources
2569  * @adapter: network interface device structure
2570  * @qid: queue index
2571  *
2572  * Free all receive software resources
2573  */
2574 static void
2575 al_eth_free_rx_resources(struct al_eth_adapter *adapter, unsigned int qid)
2576 {
2577 	struct al_eth_ring *rx_ring = &adapter->rx_ring[qid];
2578 	struct al_udma_q_params *q_params = &rx_ring->q_params;
2579 	int size;
2580 
2581 	/* At this point interrupts' handlers must be deactivated */
2582 	while (taskqueue_cancel(rx_ring->enqueue_tq,
2583 	    &rx_ring->enqueue_task, NULL)) {
2584 		taskqueue_drain(rx_ring->enqueue_tq, &rx_ring->enqueue_task);
2585 	}
2586 
2587 	taskqueue_free(rx_ring->enqueue_tq);
2588 
2589 	for (size = 0; size < rx_ring->sw_count; size++) {
2590 		m_freem(rx_ring->rx_buffer_info[size].m);
2591 		rx_ring->rx_buffer_info[size].m = NULL;
2592 		bus_dmamap_unload(rx_ring->dma_buf_tag,
2593 		    rx_ring->rx_buffer_info[size].dma_map);
2594 		bus_dmamap_destroy(rx_ring->dma_buf_tag,
2595 		    rx_ring->rx_buffer_info[size].dma_map);
2596 	}
2597 	bus_dma_tag_destroy(rx_ring->dma_buf_tag);
2598 
2599 	free(rx_ring->rx_buffer_info, M_IFAL);
2600 	rx_ring->rx_buffer_info = NULL;
2601 
2602 	/* if not set, then don't free */
2603 	if (q_params->desc_base == NULL)
2604 		return;
2605 
2606 	al_dma_free_coherent(q_params->desc_phy_base_tag,
2607 	    q_params->desc_phy_base_map, q_params->desc_base);
2608 
2609 	q_params->desc_base = NULL;
2610 
2611 	/* if not set, then don't free */
2612 	if (q_params->cdesc_base == NULL)
2613 		return;
2614 
2615 	al_dma_free_coherent(q_params->cdesc_phy_base_tag,
2616 	    q_params->cdesc_phy_base_map, q_params->cdesc_base);
2617 
2618 	q_params->cdesc_phy_base = 0;
2619 
2620 	/* Free LRO resources */
2621 	tcp_lro_free(&rx_ring->lro);
2622 }
2623 
2624 /*
2625  * al_eth_free_all_rx_resources - Free Rx Resources for All Queues
2626  * @adapter: board private structure
2627  *
2628  * Free all receive software resources
2629  */
2630 static void
2631 al_eth_free_all_rx_resources(struct al_eth_adapter *adapter)
2632 {
2633 	int i;
2634 
2635 	for (i = 0; i < adapter->num_rx_queues; i++)
2636 		if (adapter->rx_ring[i].q_params.desc_base != 0)
2637 			al_eth_free_rx_resources(adapter, i);
2638 }
2639 
2640 /*
2641  * al_eth_setup_all_rx_resources - allocate all queues Rx resources
2642  * @adapter: board private structure
2643  *
2644  * Return 0 on success, negative on failure
2645  */
2646 static int
2647 al_eth_setup_all_rx_resources(struct al_eth_adapter *adapter)
2648 {
2649 	int i, rc = 0;
2650 
2651 	for (i = 0; i < adapter->num_rx_queues; i++) {
2652 		rc = al_eth_setup_rx_resources(adapter, i);
2653 		if (rc == 0)
2654 			continue;
2655 
2656 		device_printf(adapter->dev, "Allocation for Rx Queue %u failed\n", i);
2657 		goto err_setup_rx;
2658 	}
2659 	return (0);
2660 
2661 err_setup_rx:
2662 	/* rewind the index freeing the rings as we go */
2663 	while (i--)
2664 		al_eth_free_rx_resources(adapter, i);
2665 	return (rc);
2666 }
2667 
2668 /*
2669  * al_eth_setup_all_tx_resources - allocate all queues Tx resources
2670  * @adapter: private structure
2671  *
2672  * Return 0 on success, negative on failure
2673  */
2674 static int
2675 al_eth_setup_all_tx_resources(struct al_eth_adapter *adapter)
2676 {
2677 	int i, rc = 0;
2678 
2679 	for (i = 0; i < adapter->num_tx_queues; i++) {
2680 		rc = al_eth_setup_tx_resources(adapter, i);
2681 		if (rc == 0)
2682 			continue;
2683 
2684 		device_printf(adapter->dev,
2685 		    "Allocation for Tx Queue %u failed\n", i);
2686 		goto err_setup_tx;
2687 	}
2688 
2689 	return (0);
2690 
2691 err_setup_tx:
2692 	/* rewind the index freeing the rings as we go */
2693 	while (i--)
2694 		al_eth_free_tx_resources(adapter, i);
2695 
2696 	return (rc);
2697 }
2698 
2699 static void
2700 al_eth_disable_int_sync(struct al_eth_adapter *adapter)
2701 {
2702 
2703 	/* disable forwarding interrupts from eth through pci end point */
2704 	if ((adapter->board_type == ALPINE_FPGA_NIC) ||
2705 	    (adapter->board_type == ALPINE_NIC)) {
2706 		al_eth_forward_int_config((uint32_t*)adapter->internal_pcie_base +
2707 		    AL_REG_OFFSET_FORWARD_INTR, AL_DIS_FORWARD_INTR);
2708 	}
2709 
2710 	/* mask hw interrupts */
2711 	al_eth_interrupts_mask(adapter);
2712 }
2713 
2714 static void
2715 al_eth_interrupts_unmask(struct al_eth_adapter *adapter)
2716 {
2717 	uint32_t group_a_mask = AL_INT_GROUP_A_GROUP_D_SUM; /* enable group D summery */
2718 	uint32_t group_b_mask = (1 << adapter->num_rx_queues) - 1;/* bit per Rx q*/
2719 	uint32_t group_c_mask = (1 << adapter->num_tx_queues) - 1;/* bit per Tx q*/
2720 	uint32_t group_d_mask = 3 << 8;
2721 	struct unit_regs __iomem *regs_base =
2722 	    (struct unit_regs __iomem *)adapter->udma_base;
2723 
2724 	if (adapter->int_mode == AL_IOFIC_MODE_LEGACY)
2725 		group_a_mask |= AL_INT_GROUP_A_GROUP_B_SUM |
2726 		    AL_INT_GROUP_A_GROUP_C_SUM |
2727 		    AL_INT_GROUP_A_GROUP_D_SUM;
2728 
2729 	al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2730 	    AL_INT_GROUP_A, group_a_mask);
2731 	al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2732 	    AL_INT_GROUP_B, group_b_mask);
2733 	al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2734 	    AL_INT_GROUP_C, group_c_mask);
2735 	al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2736 	    AL_INT_GROUP_D, group_d_mask);
2737 }
2738 
2739 static void
2740 al_eth_interrupts_mask(struct al_eth_adapter *adapter)
2741 {
2742 	struct unit_regs __iomem *regs_base =
2743 	    (struct unit_regs __iomem *)adapter->udma_base;
2744 
2745 	/* mask all interrupts */
2746 	al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2747 	    AL_INT_GROUP_A, AL_MASK_GROUP_A_INT);
2748 	al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2749 	    AL_INT_GROUP_B, AL_MASK_GROUP_B_INT);
2750 	al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2751 	    AL_INT_GROUP_C, AL_MASK_GROUP_C_INT);
2752 	al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2753 	    AL_INT_GROUP_D, AL_MASK_GROUP_D_INT);
2754 }
2755 
2756 static int
2757 al_eth_configure_int_mode(struct al_eth_adapter *adapter)
2758 {
2759 	enum al_iofic_mode int_mode;
2760 	uint32_t m2s_errors_disable = AL_M2S_MASK_INIT;
2761 	uint32_t m2s_aborts_disable = AL_M2S_MASK_INIT;
2762 	uint32_t s2m_errors_disable = AL_S2M_MASK_INIT;
2763 	uint32_t s2m_aborts_disable = AL_S2M_MASK_INIT;
2764 
2765 	/* single INTX mode */
2766 	if (adapter->msix_vecs == 0)
2767 		int_mode = AL_IOFIC_MODE_LEGACY;
2768 	else if (adapter->msix_vecs > 1)
2769 		int_mode = AL_IOFIC_MODE_MSIX_PER_Q;
2770 	else {
2771 		device_printf(adapter->dev,
2772 		    "udma doesn't support single MSI-X mode yet.\n");
2773 		return (EIO);
2774 	}
2775 
2776 	if (adapter->board_type != ALPINE_INTEGRATED) {
2777 		m2s_errors_disable |= AL_M2S_S2M_MASK_NOT_INT;
2778 		m2s_errors_disable |= AL_M2S_S2M_MASK_NOT_INT;
2779 		s2m_aborts_disable |= AL_M2S_S2M_MASK_NOT_INT;
2780 		s2m_aborts_disable |= AL_M2S_S2M_MASK_NOT_INT;
2781 	}
2782 
2783 	if (al_udma_iofic_config((struct unit_regs __iomem *)adapter->udma_base,
2784 	    int_mode, m2s_errors_disable, m2s_aborts_disable,
2785 	    s2m_errors_disable, s2m_aborts_disable)) {
2786 		device_printf(adapter->dev,
2787 		    "al_udma_unit_int_config failed!.\n");
2788 		return (EIO);
2789 	}
2790 	adapter->int_mode = int_mode;
2791 	device_printf_dbg(adapter->dev, "using %s interrupt mode\n",
2792 	    int_mode == AL_IOFIC_MODE_LEGACY ? "INTx" :
2793 	    int_mode == AL_IOFIC_MODE_MSIX_PER_Q ? "MSI-X per Queue" : "Unknown");
2794 	/* set interrupt moderation resolution to 15us */
2795 	al_iofic_moder_res_config(&((struct unit_regs *)(adapter->udma_base))->gen.interrupt_regs.main_iofic, AL_INT_GROUP_B, 15);
2796 	al_iofic_moder_res_config(&((struct unit_regs *)(adapter->udma_base))->gen.interrupt_regs.main_iofic, AL_INT_GROUP_C, 15);
2797 	/* by default interrupt coalescing is disabled */
2798 	adapter->tx_usecs = 0;
2799 	adapter->rx_usecs = 0;
2800 
2801 	return (0);
2802 }
2803 
2804 /*
2805  * ethtool_rxfh_indir_default - get default value for RX flow hash indirection
2806  * @index: Index in RX flow hash indirection table
2807  * @n_rx_rings: Number of RX rings to use
2808  *
2809  * This function provides the default policy for RX flow hash indirection.
2810  */
2811 static inline uint32_t
2812 ethtool_rxfh_indir_default(uint32_t index, uint32_t n_rx_rings)
2813 {
2814 
2815 	return (index % n_rx_rings);
2816 }
2817 
2818 static void*
2819 al_eth_update_stats(struct al_eth_adapter *adapter)
2820 {
2821 	struct al_eth_mac_stats *mac_stats = &adapter->mac_stats;
2822 
2823 	if (adapter->up == 0)
2824 		return (NULL);
2825 
2826 	al_eth_mac_stats_get(&adapter->hal_adapter, mac_stats);
2827 
2828 	return (NULL);
2829 }
2830 
2831 static uint64_t
2832 al_get_counter(struct ifnet *ifp, ift_counter cnt)
2833 {
2834 	struct al_eth_adapter *adapter;
2835 	struct al_eth_mac_stats *mac_stats;
2836 	uint64_t rv;
2837 
2838 	adapter = if_getsoftc(ifp);
2839 	mac_stats = &adapter->mac_stats;
2840 
2841 	switch (cnt) {
2842 	case IFCOUNTER_IPACKETS:
2843 		return (mac_stats->aFramesReceivedOK); /* including pause frames */
2844 	case IFCOUNTER_OPACKETS:
2845 		return (mac_stats->aFramesTransmittedOK);
2846 	case IFCOUNTER_IBYTES:
2847 		return (mac_stats->aOctetsReceivedOK);
2848 	case IFCOUNTER_OBYTES:
2849 		return (mac_stats->aOctetsTransmittedOK);
2850 	case IFCOUNTER_IMCASTS:
2851 		return (mac_stats->ifInMulticastPkts);
2852 	case IFCOUNTER_OMCASTS:
2853 		return (mac_stats->ifOutMulticastPkts);
2854 	case IFCOUNTER_COLLISIONS:
2855 		return (0);
2856 	case IFCOUNTER_IQDROPS:
2857 		return (mac_stats->etherStatsDropEvents);
2858 	case IFCOUNTER_IERRORS:
2859 		rv = mac_stats->ifInErrors +
2860 		    mac_stats->etherStatsUndersizePkts + /* good but short */
2861 		    mac_stats->etherStatsFragments + /* short and bad*/
2862 		    mac_stats->etherStatsJabbers + /* with crc errors */
2863 		    mac_stats->etherStatsOversizePkts +
2864 		    mac_stats->aFrameCheckSequenceErrors +
2865 		    mac_stats->aAlignmentErrors;
2866 		return (rv);
2867 	case IFCOUNTER_OERRORS:
2868 		return (mac_stats->ifOutErrors);
2869 	default:
2870 		return (if_get_counter_default(ifp, cnt));
2871 	}
2872 }
2873 
2874 /*
2875  *  Unicast, Multicast and Promiscuous mode set
2876  *
2877  *  The set_rx_mode entry point is called whenever the unicast or multicast
2878  *  address lists or the network interface flags are updated.  This routine is
2879  *  responsible for configuring the hardware for proper unicast, multicast,
2880  *  promiscuous mode, and all-multi behavior.
2881  */
2882 #define	MAX_NUM_MULTICAST_ADDRESSES 32
2883 #define	MAX_NUM_ADDRESSES           32
2884 
2885 static void
2886 al_eth_set_rx_mode(struct al_eth_adapter *adapter)
2887 {
2888 	struct ifnet *ifp = adapter->netdev;
2889 	struct ifmultiaddr *ifma; /* multicast addresses configured */
2890 	struct ifaddr *ifua; /* unicast address */
2891 	int mc = 0;
2892 	int uc = 0;
2893 	uint8_t i;
2894 	unsigned char *mac;
2895 
2896 	if_maddr_rlock(ifp);
2897 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2898 		if (ifma->ifma_addr->sa_family != AF_LINK)
2899 			continue;
2900 		if (mc == MAX_NUM_MULTICAST_ADDRESSES)
2901 			break;
2902 
2903 		mac = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
2904 		/* default mc address inside mac address */
2905 		if (mac[3] != 0 && mac[4] != 0 && mac[5] != 1)
2906 			mc++;
2907 	}
2908 	if_maddr_runlock(ifp);
2909 
2910 	if_addr_rlock(ifp);
2911 	TAILQ_FOREACH(ifua, &ifp->if_addrhead, ifa_link) {
2912 		if (ifua->ifa_addr->sa_family != AF_LINK)
2913 			continue;
2914 		if (uc == MAX_NUM_ADDRESSES)
2915 			break;
2916 		uc++;
2917 	}
2918 	if_addr_runlock(ifp);
2919 
2920 	if ((ifp->if_flags & IFF_PROMISC) != 0) {
2921 		al_eth_mac_table_promiscuous_set(adapter, true);
2922 	} else {
2923 		if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
2924 			/* This interface is in all-multicasts mode (used by multicast routers). */
2925 			al_eth_mac_table_all_multicast_add(adapter,
2926 			    AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX, 1);
2927 		} else {
2928 			if (mc == 0) {
2929 				al_eth_mac_table_entry_clear(adapter,
2930 				    AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX);
2931 			} else {
2932 				al_eth_mac_table_all_multicast_add(adapter,
2933 				    AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX, 1);
2934 			}
2935 		}
2936 		if (uc != 0) {
2937 			i = AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + 1;
2938 			if (uc > AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT) {
2939 				/*
2940 				 * In this case there are more addresses then
2941 				 * entries in the mac table - set promiscuous
2942 				 */
2943 				al_eth_mac_table_promiscuous_set(adapter, true);
2944 				return;
2945 			}
2946 
2947 			/* clear the last configuration */
2948 			while (i < (AL_ETH_MAC_TABLE_UNICAST_IDX_BASE +
2949 				    AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT)) {
2950 				al_eth_mac_table_entry_clear(adapter, i);
2951 				i++;
2952 			}
2953 
2954 			/* set new addresses */
2955 			i = AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + 1;
2956 			if_addr_rlock(ifp);
2957 			TAILQ_FOREACH(ifua, &ifp->if_addrhead, ifa_link) {
2958 				if (ifua->ifa_addr->sa_family != AF_LINK) {
2959 					continue;
2960 				}
2961 				al_eth_mac_table_unicast_add(adapter, i,
2962 				    (unsigned char *)ifua->ifa_addr, 1);
2963 				i++;
2964 			}
2965 			if_addr_runlock(ifp);
2966 
2967 		}
2968 		al_eth_mac_table_promiscuous_set(adapter, false);
2969 	}
2970 }
2971 
2972 static void
2973 al_eth_config_rx_fwd(struct al_eth_adapter *adapter)
2974 {
2975 	struct al_eth_fwd_ctrl_table_entry entry;
2976 	int i;
2977 
2978 	/* let priority be equal to pbits */
2979 	for (i = 0; i < AL_ETH_FWD_PBITS_TABLE_NUM; i++)
2980 		al_eth_fwd_pbits_table_set(&adapter->hal_adapter, i, i);
2981 
2982 	/* map priority to queue index, queue id = priority/2 */
2983 	for (i = 0; i < AL_ETH_FWD_PRIO_TABLE_NUM; i++)
2984 		al_eth_fwd_priority_table_set(&adapter->hal_adapter, i, i >> 1);
2985 
2986 	entry.prio_sel = AL_ETH_CTRL_TABLE_PRIO_SEL_VAL_0;
2987 	entry.queue_sel_1 = AL_ETH_CTRL_TABLE_QUEUE_SEL_1_THASH_TABLE;
2988 	entry.queue_sel_2 = AL_ETH_CTRL_TABLE_QUEUE_SEL_2_NO_PRIO;
2989 	entry.udma_sel = AL_ETH_CTRL_TABLE_UDMA_SEL_MAC_TABLE;
2990 	entry.filter = FALSE;
2991 
2992 	al_eth_ctrl_table_def_set(&adapter->hal_adapter, FALSE, &entry);
2993 
2994 	/*
2995 	 * By default set the mac table to forward all unicast packets to our
2996 	 * MAC address and all broadcast. all the rest will be dropped.
2997 	 */
2998 	al_eth_mac_table_unicast_add(adapter, AL_ETH_MAC_TABLE_UNICAST_IDX_BASE,
2999 	    adapter->mac_addr, 1);
3000 	al_eth_mac_table_broadcast_add(adapter, AL_ETH_MAC_TABLE_BROADCAST_IDX, 1);
3001 	al_eth_mac_table_promiscuous_set(adapter, false);
3002 
3003 	/* set toeplitz hash keys */
3004 	for (i = 0; i < sizeof(adapter->toeplitz_hash_key); i++)
3005 		*((uint8_t*)adapter->toeplitz_hash_key + i) = (uint8_t)random();
3006 
3007 	for (i = 0; i < AL_ETH_RX_HASH_KEY_NUM; i++)
3008 		al_eth_hash_key_set(&adapter->hal_adapter, i,
3009 		    htonl(adapter->toeplitz_hash_key[i]));
3010 
3011 	for (i = 0; i < AL_ETH_RX_RSS_TABLE_SIZE; i++) {
3012 		adapter->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i,
3013 		    AL_ETH_NUM_QUEUES);
3014 		al_eth_set_thash_table_entry(adapter, i, 0,
3015 		    adapter->rss_ind_tbl[i]);
3016 	}
3017 
3018 	al_eth_fsm_table_init(adapter);
3019 }
3020 
3021 static void
3022 al_eth_req_rx_buff_size(struct al_eth_adapter *adapter, int size)
3023 {
3024 
3025 	/*
3026 	* Determine the correct mbuf pool
3027 	* for doing jumbo frames
3028 	* Try from the smallest up to maximum supported
3029 	*/
3030 	adapter->rx_mbuf_sz = MCLBYTES;
3031 	if (size > 2048) {
3032 		if (adapter->max_rx_buff_alloc_size > 2048)
3033 			adapter->rx_mbuf_sz = MJUMPAGESIZE;
3034 		else
3035 			return;
3036 	}
3037 	if (size > 4096) {
3038 		if (adapter->max_rx_buff_alloc_size > 4096)
3039 			adapter->rx_mbuf_sz = MJUM9BYTES;
3040 		else
3041 			return;
3042 	}
3043 	if (size > 9216) {
3044 		if (adapter->max_rx_buff_alloc_size > 9216)
3045 			adapter->rx_mbuf_sz = MJUM16BYTES;
3046 		else
3047 			return;
3048 	}
3049 }
3050 
3051 static int
3052 al_eth_change_mtu(struct al_eth_adapter *adapter, int new_mtu)
3053 {
3054 	int max_frame = new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
3055 	    ETHER_VLAN_ENCAP_LEN;
3056 
3057 	al_eth_req_rx_buff_size(adapter, new_mtu);
3058 
3059 	device_printf_dbg(adapter->dev, "set MTU to %d\n", new_mtu);
3060 	al_eth_rx_pkt_limit_config(&adapter->hal_adapter,
3061 	    AL_ETH_MIN_FRAME_LEN, max_frame);
3062 
3063 	al_eth_tso_mss_config(&adapter->hal_adapter, 0, new_mtu - 100);
3064 
3065 	return (0);
3066 }
3067 
3068 static int
3069 al_eth_check_mtu(struct al_eth_adapter *adapter, int new_mtu)
3070 {
3071 	int max_frame = new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN;
3072 
3073 	if ((new_mtu < AL_ETH_MIN_FRAME_LEN) ||
3074 	    (max_frame > AL_ETH_MAX_FRAME_LEN)) {
3075 		return (EINVAL);
3076 	}
3077 
3078 	return (0);
3079 }
3080 
3081 static int
3082 al_eth_udma_queue_enable(struct al_eth_adapter *adapter, enum al_udma_type type,
3083     int qid)
3084 {
3085 	int rc = 0;
3086 	char *name = (type == UDMA_TX) ? "Tx" : "Rx";
3087 	struct al_udma_q_params *q_params;
3088 
3089 	if (type == UDMA_TX)
3090 		q_params = &adapter->tx_ring[qid].q_params;
3091 	else
3092 		q_params = &adapter->rx_ring[qid].q_params;
3093 
3094 	rc = al_eth_queue_config(&adapter->hal_adapter, type, qid, q_params);
3095 	if (rc < 0) {
3096 		device_printf(adapter->dev, "config %s queue %u failed\n", name,
3097 		    qid);
3098 		return (rc);
3099 	}
3100 	return (rc);
3101 }
3102 
3103 static int
3104 al_eth_udma_queues_enable_all(struct al_eth_adapter *adapter)
3105 {
3106 	int i;
3107 
3108 	for (i = 0; i < adapter->num_tx_queues; i++)
3109 		al_eth_udma_queue_enable(adapter, UDMA_TX, i);
3110 
3111 	for (i = 0; i < adapter->num_rx_queues; i++)
3112 		al_eth_udma_queue_enable(adapter, UDMA_RX, i);
3113 
3114 	return (0);
3115 }
3116 
3117 static void
3118 al_eth_up_complete(struct al_eth_adapter *adapter)
3119 {
3120 
3121 	al_eth_configure_int_mode(adapter);
3122 	al_eth_config_rx_fwd(adapter);
3123 	al_eth_change_mtu(adapter, adapter->netdev->if_mtu);
3124 	al_eth_udma_queues_enable_all(adapter);
3125 	al_eth_refill_all_rx_bufs(adapter);
3126 	al_eth_interrupts_unmask(adapter);
3127 
3128 	/* enable forwarding interrupts from eth through pci end point */
3129 	if ((adapter->board_type == ALPINE_FPGA_NIC) ||
3130 	    (adapter->board_type == ALPINE_NIC)) {
3131 		al_eth_forward_int_config((uint32_t*)adapter->internal_pcie_base +
3132 		    AL_REG_OFFSET_FORWARD_INTR, AL_EN_FORWARD_INTR);
3133 	}
3134 
3135 	al_eth_flow_ctrl_enable(adapter);
3136 
3137 	mtx_lock(&adapter->stats_mtx);
3138 	callout_reset(&adapter->stats_callout, hz, al_tick_stats, (void*)adapter);
3139 	mtx_unlock(&adapter->stats_mtx);
3140 
3141 	al_eth_mac_start(&adapter->hal_adapter);
3142 }
3143 
3144 static int
3145 al_media_update(struct ifnet *ifp)
3146 {
3147 	struct al_eth_adapter *adapter = ifp->if_softc;
3148 
3149 	if ((ifp->if_flags & IFF_UP) != 0)
3150 		mii_mediachg(adapter->mii);
3151 
3152 	return (0);
3153 }
3154 
3155 static void
3156 al_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
3157 {
3158 	struct al_eth_adapter *sc = ifp->if_softc;
3159 	struct mii_data *mii;
3160 
3161 	if (sc->mii == NULL) {
3162 		ifmr->ifm_active = IFM_ETHER | IFM_NONE;
3163 		ifmr->ifm_status = 0;
3164 
3165 		return;
3166 	}
3167 
3168 	mii = sc->mii;
3169 	mii_pollstat(mii);
3170 
3171 	ifmr->ifm_active = mii->mii_media_active;
3172 	ifmr->ifm_status = mii->mii_media_status;
3173 }
3174 
3175 static void
3176 al_tick(void *arg)
3177 {
3178 	struct al_eth_adapter *adapter = arg;
3179 
3180 	mii_tick(adapter->mii);
3181 
3182 	/* Schedule another timeout one second from now */
3183 	callout_schedule(&adapter->wd_callout, hz);
3184 }
3185 
3186 static void
3187 al_tick_stats(void *arg)
3188 {
3189 	struct al_eth_adapter *adapter = arg;
3190 
3191 	al_eth_update_stats(adapter);
3192 
3193 	callout_schedule(&adapter->stats_callout, hz);
3194 }
3195 
3196 static int
3197 al_eth_up(struct al_eth_adapter *adapter)
3198 {
3199 	struct ifnet *ifp = adapter->netdev;
3200 	int rc;
3201 
3202 	if (adapter->up)
3203 		return (0);
3204 
3205 	if ((adapter->flags & AL_ETH_FLAG_RESET_REQUESTED) != 0) {
3206 		al_eth_function_reset(adapter);
3207 		adapter->flags &= ~AL_ETH_FLAG_RESET_REQUESTED;
3208 	}
3209 
3210 	ifp->if_hwassist = 0;
3211 	if ((ifp->if_capenable & IFCAP_TSO) != 0)
3212 		ifp->if_hwassist |= CSUM_TSO;
3213 	if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
3214 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
3215 	if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6) != 0)
3216 		ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
3217 
3218 	al_eth_serdes_init(adapter);
3219 
3220 	rc = al_eth_hw_init(adapter);
3221 	if (rc != 0)
3222 		goto err_hw_init_open;
3223 
3224 	rc = al_eth_setup_int_mode(adapter);
3225 	if (rc != 0) {
3226 		device_printf(adapter->dev,
3227 		    "%s failed at setup interrupt mode!\n", __func__);
3228 		goto err_setup_int;
3229 	}
3230 
3231 	/* allocate transmit descriptors */
3232 	rc = al_eth_setup_all_tx_resources(adapter);
3233 	if (rc != 0)
3234 		goto err_setup_tx;
3235 
3236 	/* allocate receive descriptors */
3237 	rc = al_eth_setup_all_rx_resources(adapter);
3238 	if (rc != 0)
3239 		goto err_setup_rx;
3240 
3241 	rc = al_eth_request_irq(adapter);
3242 	if (rc != 0)
3243 		goto err_req_irq;
3244 
3245 	al_eth_up_complete(adapter);
3246 
3247 	adapter->up = true;
3248 
3249 	if (adapter->mac_mode == AL_ETH_MAC_MODE_10GbE_Serial)
3250 		adapter->netdev->if_link_state = LINK_STATE_UP;
3251 
3252 	if (adapter->mac_mode == AL_ETH_MAC_MODE_RGMII) {
3253 		mii_mediachg(adapter->mii);
3254 
3255 		/* Schedule watchdog timeout */
3256 		mtx_lock(&adapter->wd_mtx);
3257 		callout_reset(&adapter->wd_callout, hz, al_tick, adapter);
3258 		mtx_unlock(&adapter->wd_mtx);
3259 
3260 		mii_pollstat(adapter->mii);
3261 	}
3262 
3263 	return (rc);
3264 
3265 err_req_irq:
3266 	al_eth_free_all_rx_resources(adapter);
3267 err_setup_rx:
3268 	al_eth_free_all_tx_resources(adapter);
3269 err_setup_tx:
3270 	al_eth_free_irq(adapter);
3271 err_setup_int:
3272 	al_eth_hw_stop(adapter);
3273 err_hw_init_open:
3274 	al_eth_function_reset(adapter);
3275 
3276 	return (rc);
3277 }
3278 
3279 static int
3280 al_shutdown(device_t dev)
3281 {
3282 	struct al_eth_adapter *adapter = device_get_softc(dev);
3283 
3284 	al_eth_down(adapter);
3285 
3286 	return (0);
3287 }
3288 
3289 static void
3290 al_eth_down(struct al_eth_adapter *adapter)
3291 {
3292 
3293 	device_printf_dbg(adapter->dev, "al_eth_down: begin\n");
3294 
3295 	adapter->up = false;
3296 
3297 	mtx_lock(&adapter->wd_mtx);
3298 	callout_stop(&adapter->wd_callout);
3299 	mtx_unlock(&adapter->wd_mtx);
3300 
3301 	al_eth_disable_int_sync(adapter);
3302 
3303 	mtx_lock(&adapter->stats_mtx);
3304 	callout_stop(&adapter->stats_callout);
3305 	mtx_unlock(&adapter->stats_mtx);
3306 
3307 	al_eth_free_irq(adapter);
3308 	al_eth_hw_stop(adapter);
3309 
3310 	al_eth_free_all_tx_resources(adapter);
3311 	al_eth_free_all_rx_resources(adapter);
3312 }
3313 
3314 static int
3315 al_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
3316 {
3317 	struct al_eth_adapter	*adapter = ifp->if_softc;
3318 	struct ifreq		*ifr = (struct ifreq *)data;
3319 	int			error = 0;
3320 
3321 	switch (command) {
3322 	case SIOCSIFMTU:
3323 	{
3324 		error = al_eth_check_mtu(adapter, ifr->ifr_mtu);
3325 		if (error != 0) {
3326 			device_printf(adapter->dev, "ioctl wrong mtu %u\n",
3327 			    adapter->netdev->if_mtu);
3328 			break;
3329 		}
3330 
3331 		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3332 		adapter->netdev->if_mtu = ifr->ifr_mtu;
3333 		al_init(adapter);
3334 		break;
3335 	}
3336 	case SIOCSIFFLAGS:
3337 		if ((ifp->if_flags & IFF_UP) != 0) {
3338 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
3339 				if (((ifp->if_flags ^ adapter->if_flags) &
3340 				    (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
3341 					device_printf_dbg(adapter->dev,
3342 					    "ioctl promisc/allmulti\n");
3343 					al_eth_set_rx_mode(adapter);
3344 				}
3345 			} else {
3346 				error = al_eth_up(adapter);
3347 				if (error == 0)
3348 					ifp->if_drv_flags |= IFF_DRV_RUNNING;
3349 			}
3350 		} else {
3351 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
3352 				al_eth_down(adapter);
3353 				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3354 			}
3355 		}
3356 
3357 		adapter->if_flags = ifp->if_flags;
3358 		break;
3359 
3360 	case SIOCADDMULTI:
3361 	case SIOCDELMULTI:
3362 		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
3363 			device_printf_dbg(adapter->dev,
3364 			    "ioctl add/del multi before\n");
3365 			al_eth_set_rx_mode(adapter);
3366 #ifdef DEVICE_POLLING
3367 			if ((ifp->if_capenable & IFCAP_POLLING) == 0)
3368 #endif
3369 		}
3370 		break;
3371 	case SIOCSIFMEDIA:
3372 	case SIOCGIFMEDIA:
3373 		if (adapter->mii != NULL)
3374 			error = ifmedia_ioctl(ifp, ifr,
3375 			    &adapter->mii->mii_media, command);
3376 		else
3377 			error = ifmedia_ioctl(ifp, ifr,
3378 			    &adapter->media, command);
3379 		break;
3380 	case SIOCSIFCAP:
3381 	    {
3382 		int mask, reinit;
3383 
3384 		reinit = 0;
3385 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3386 #ifdef DEVICE_POLLING
3387 		if ((mask & IFCAP_POLLING) != 0) {
3388 			if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
3389 				if (error != 0)
3390 					return (error);
3391 				ifp->if_capenable |= IFCAP_POLLING;
3392 			} else {
3393 				error = ether_poll_deregister(ifp);
3394 				/* Enable interrupt even in error case */
3395 				ifp->if_capenable &= ~IFCAP_POLLING;
3396 			}
3397 		}
3398 #endif
3399 		if ((mask & IFCAP_HWCSUM) != 0) {
3400 			/* apply to both rx and tx */
3401 			ifp->if_capenable ^= IFCAP_HWCSUM;
3402 			reinit = 1;
3403 		}
3404 		if ((mask & IFCAP_HWCSUM_IPV6) != 0) {
3405 			ifp->if_capenable ^= IFCAP_HWCSUM_IPV6;
3406 			reinit = 1;
3407 		}
3408 		if ((mask & IFCAP_TSO) != 0) {
3409 			ifp->if_capenable ^= IFCAP_TSO;
3410 			reinit = 1;
3411 		}
3412 		if ((mask & IFCAP_LRO) != 0) {
3413 			ifp->if_capenable ^= IFCAP_LRO;
3414 		}
3415 		if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
3416 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
3417 			reinit = 1;
3418 		}
3419 		if ((mask & IFCAP_VLAN_HWFILTER) != 0) {
3420 			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
3421 			reinit = 1;
3422 		}
3423 		if ((mask & IFCAP_VLAN_HWTSO) != 0) {
3424 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
3425 			reinit = 1;
3426 		}
3427 		if ((reinit != 0) &&
3428 		    ((ifp->if_drv_flags & IFF_DRV_RUNNING)) != 0)
3429 		{
3430 			al_init(adapter);
3431 		}
3432 		break;
3433 	    }
3434 
3435 	default:
3436 		error = ether_ioctl(ifp, command, data);
3437 		break;
3438 	}
3439 
3440 	return (error);
3441 }
3442 
3443 static int
3444 al_is_device_supported(device_t dev)
3445 {
3446 	uint16_t pci_vendor_id = pci_get_vendor(dev);
3447 	uint16_t pci_device_id = pci_get_device(dev);
3448 
3449 	return (pci_vendor_id == PCI_VENDOR_ID_ANNAPURNA_LABS &&
3450 	    (pci_device_id == PCI_DEVICE_ID_AL_ETH ||
3451 	    pci_device_id == PCI_DEVICE_ID_AL_ETH_ADVANCED ||
3452 	    pci_device_id == PCI_DEVICE_ID_AL_ETH_NIC ||
3453 	    pci_device_id == PCI_DEVICE_ID_AL_ETH_FPGA_NIC));
3454 }
3455 
3456 /* Time in mSec to keep trying to read / write from MDIO in case of error */
3457 #define	MDIO_TIMEOUT_MSEC	100
3458 #define	MDIO_PAUSE_MSEC		10
3459 
3460 static int
3461 al_miibus_readreg(device_t dev, int phy, int reg)
3462 {
3463 	struct al_eth_adapter *adapter = device_get_softc(dev);
3464 	uint16_t value = 0;
3465 	int rc;
3466 	int timeout = MDIO_TIMEOUT_MSEC;
3467 
3468 	while (timeout > 0) {
3469 		rc = al_eth_mdio_read(&adapter->hal_adapter, adapter->phy_addr,
3470 		    -1, reg, &value);
3471 
3472 		if (rc == 0)
3473 			return (value);
3474 
3475 		device_printf_dbg(adapter->dev,
3476 		    "mdio read failed. try again in 10 msec\n");
3477 
3478 		timeout -= MDIO_PAUSE_MSEC;
3479 		pause("readred pause", MDIO_PAUSE_MSEC);
3480 	}
3481 
3482 	if (rc != 0)
3483 		device_printf(adapter->dev, "MDIO read failed on timeout\n");
3484 
3485 	return (value);
3486 }
3487 
3488 static int
3489 al_miibus_writereg(device_t dev, int phy, int reg, int value)
3490 {
3491 	struct al_eth_adapter *adapter = device_get_softc(dev);
3492 	int rc;
3493 	int timeout = MDIO_TIMEOUT_MSEC;
3494 
3495 	while (timeout > 0) {
3496 		rc = al_eth_mdio_write(&adapter->hal_adapter, adapter->phy_addr,
3497 		    -1, reg, value);
3498 
3499 		if (rc == 0)
3500 			return (0);
3501 
3502 		device_printf(adapter->dev,
3503 		    "mdio write failed. try again in 10 msec\n");
3504 
3505 		timeout -= MDIO_PAUSE_MSEC;
3506 		pause("miibus writereg", MDIO_PAUSE_MSEC);
3507 	}
3508 
3509 	if (rc != 0)
3510 		device_printf(adapter->dev, "MDIO write failed on timeout\n");
3511 
3512 	return (rc);
3513 }
3514 
3515 static void
3516 al_miibus_statchg(device_t dev)
3517 {
3518 	struct al_eth_adapter *adapter = device_get_softc(dev);
3519 
3520 	device_printf_dbg(adapter->dev,
3521 	    "al_miibus_statchg: state has changed!\n");
3522 	device_printf_dbg(adapter->dev,
3523 	    "al_miibus_statchg: active = 0x%x status = 0x%x\n",
3524 	    adapter->mii->mii_media_active, adapter->mii->mii_media_status);
3525 
3526 	if (adapter->up == 0)
3527 		return;
3528 
3529 	if ((adapter->mii->mii_media_status & IFM_AVALID) != 0) {
3530 		if (adapter->mii->mii_media_status & IFM_ACTIVE) {
3531 			device_printf(adapter->dev, "link is UP\n");
3532 			adapter->netdev->if_link_state = LINK_STATE_UP;
3533 		} else {
3534 			device_printf(adapter->dev, "link is DOWN\n");
3535 			adapter->netdev->if_link_state = LINK_STATE_DOWN;
3536 		}
3537 	}
3538 }
3539 
3540 static void
3541 al_miibus_linkchg(device_t dev)
3542 {
3543 	struct al_eth_adapter *adapter = device_get_softc(dev);
3544 	uint8_t duplex = 0;
3545 	uint8_t speed = 0;
3546 
3547 	if (adapter->mii == 0)
3548 		return;
3549 
3550 	if ((adapter->netdev->if_flags & IFF_UP) == 0)
3551 		return;
3552 
3553 	/* Ignore link changes when link is not ready */
3554 	if ((adapter->mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) !=
3555 	    (IFM_AVALID | IFM_ACTIVE)) {
3556 		return;
3557 	}
3558 
3559 	if ((adapter->mii->mii_media_active & IFM_FDX) != 0)
3560 		duplex = 1;
3561 
3562 	speed = IFM_SUBTYPE(adapter->mii->mii_media_active);
3563 
3564 	if (speed == IFM_10_T) {
3565 		al_eth_mac_link_config(&adapter->hal_adapter, 0, 1,
3566 		    AL_10BASE_T_SPEED, duplex);
3567 		return;
3568 	}
3569 
3570 	if (speed == IFM_100_TX) {
3571 		al_eth_mac_link_config(&adapter->hal_adapter, 0, 1,
3572 		    AL_100BASE_TX_SPEED, duplex);
3573 		return;
3574 	}
3575 
3576 	if (speed == IFM_1000_T) {
3577 		al_eth_mac_link_config(&adapter->hal_adapter, 0, 1,
3578 		    AL_1000BASE_T_SPEED, duplex);
3579 		return;
3580 	}
3581 
3582 	device_printf(adapter->dev, "ERROR: unknown MII media active 0x%08x\n",
3583 	    adapter->mii->mii_media_active);
3584 }
3585