xref: /freebsd/sys/dev/al_eth/al_eth.c (revision 1b9cfd6a625dc82611846cb9a53c1886f7af3758)
1 /*-
2  * Copyright (c) 2015,2016 Annapurna Labs Ltd. and affiliates
3  * All rights reserved.
4  *
5  * Developed by Semihalf.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/bus.h>
32 #include <sys/kernel.h>
33 #include <sys/kthread.h>
34 #include <sys/lock.h>
35 #include <sys/mbuf.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/rman.h>
39 #include <sys/socket.h>
40 #include <sys/sockio.h>
41 #include <sys/sysctl.h>
42 #include <sys/taskqueue.h>
43 
44 #include <machine/atomic.h>
45 
46 #include "opt_inet.h"
47 #include "opt_inet6.h"
48 
49 #include <net/ethernet.h>
50 #include <net/if.h>
51 #include <net/if_var.h>
52 #include <net/if_arp.h>
53 #include <net/if_dl.h>
54 #include <net/if_media.h>
55 #include <net/if_types.h>
56 #include <netinet/in.h>
57 #include <net/if_vlan_var.h>
58 #include <netinet/tcp.h>
59 #include <netinet/tcp_lro.h>
60 
61 #ifdef INET
62 #include <netinet/in.h>
63 #include <netinet/in_systm.h>
64 #include <netinet/in_var.h>
65 #include <netinet/ip.h>
66 #endif
67 
68 #ifdef INET6
69 #include <netinet/ip6.h>
70 #endif
71 
72 #include <sys/sockio.h>
73 
74 #include <dev/pci/pcireg.h>
75 #include <dev/pci/pcivar.h>
76 
77 #include <dev/mii/mii.h>
78 #include <dev/mii/miivar.h>
79 
80 #include <al_hal_common.h>
81 #include <al_hal_plat_services.h>
82 #include <al_hal_udma_config.h>
83 #include <al_hal_udma_iofic.h>
84 #include <al_hal_udma_debug.h>
85 #include <al_hal_eth.h>
86 
87 #include "al_eth.h"
88 #include "al_init_eth_lm.h"
89 #include "arm/annapurna/alpine/alpine_serdes.h"
90 
91 #include "miibus_if.h"
92 
93 #define	device_printf_dbg(fmt, ...) do {				\
94 	if (AL_DBG_LEVEL >= AL_DBG_LEVEL_DBG) { AL_DBG_LOCK();		\
95 	    device_printf(fmt, __VA_ARGS__); AL_DBG_UNLOCK();}		\
96 	} while (0)
97 
98 MALLOC_DEFINE(M_IFAL, "if_al_malloc", "All allocated data for AL ETH driver");
99 
100 /* move out to some pci header file */
101 #define	PCI_VENDOR_ID_ANNAPURNA_LABS	0x1c36
102 #define	PCI_DEVICE_ID_AL_ETH		0x0001
103 #define	PCI_DEVICE_ID_AL_ETH_ADVANCED	0x0002
104 #define	PCI_DEVICE_ID_AL_ETH_NIC	0x0003
105 #define	PCI_DEVICE_ID_AL_ETH_FPGA_NIC	0x0030
106 #define	PCI_DEVICE_ID_AL_CRYPTO		0x0011
107 #define	PCI_DEVICE_ID_AL_CRYPTO_VF	0x8011
108 #define	PCI_DEVICE_ID_AL_RAID_DMA	0x0021
109 #define	PCI_DEVICE_ID_AL_RAID_DMA_VF	0x8021
110 #define	PCI_DEVICE_ID_AL_USB		0x0041
111 
112 #define	MAC_ADDR_STR "%02x:%02x:%02x:%02x:%02x:%02x"
113 #define	MAC_ADDR(addr) addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]
114 
115 #define	AL_ETH_MAC_TABLE_UNICAST_IDX_BASE	0
116 #define	AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT	4
117 #define	AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX	(AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + \
118 						 AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT)
119 
120 #define	AL_ETH_MAC_TABLE_DROP_IDX		(AL_ETH_FWD_MAC_NUM - 1)
121 #define	AL_ETH_MAC_TABLE_BROADCAST_IDX		(AL_ETH_MAC_TABLE_DROP_IDX - 1)
122 
123 #define	AL_ETH_THASH_UDMA_SHIFT		0
124 #define	AL_ETH_THASH_UDMA_MASK		(0xF << AL_ETH_THASH_UDMA_SHIFT)
125 
126 #define	AL_ETH_THASH_Q_SHIFT		4
127 #define	AL_ETH_THASH_Q_MASK		(0x3 << AL_ETH_THASH_Q_SHIFT)
128 
129 /* the following defines should be moved to hal */
130 #define	AL_ETH_FSM_ENTRY_IPV4_TCP		0
131 #define	AL_ETH_FSM_ENTRY_IPV4_UDP		1
132 #define	AL_ETH_FSM_ENTRY_IPV6_TCP		2
133 #define	AL_ETH_FSM_ENTRY_IPV6_UDP		3
134 #define	AL_ETH_FSM_ENTRY_IPV6_NO_UDP_TCP	4
135 #define	AL_ETH_FSM_ENTRY_IPV4_NO_UDP_TCP	5
136 
137 /* FSM DATA format */
138 #define	AL_ETH_FSM_DATA_OUTER_2_TUPLE	0
139 #define	AL_ETH_FSM_DATA_OUTER_4_TUPLE	1
140 #define	AL_ETH_FSM_DATA_INNER_2_TUPLE	2
141 #define	AL_ETH_FSM_DATA_INNER_4_TUPLE	3
142 
143 #define	AL_ETH_FSM_DATA_HASH_SEL	(1 << 2)
144 
145 #define	AL_ETH_FSM_DATA_DEFAULT_Q	0
146 #define	AL_ETH_FSM_DATA_DEFAULT_UDMA	0
147 
148 #define	AL_BR_SIZE	512
149 #define	AL_TSO_SIZE	65500
150 #define	AL_DEFAULT_MTU	1500
151 
152 #define	CSUM_OFFLOAD		(CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP)
153 
154 #define	AL_IP_ALIGNMENT_OFFSET	2
155 
156 #define	SFP_I2C_ADDR		0x50
157 
158 #define	AL_MASK_GROUP_A_INT	0x7
159 #define	AL_MASK_GROUP_B_INT	0xF
160 #define	AL_MASK_GROUP_C_INT	0xF
161 #define	AL_MASK_GROUP_D_INT	0xFFFFFFFF
162 
163 #define	AL_REG_OFFSET_FORWARD_INTR	(0x1800000 + 0x1210)
164 #define	AL_EN_FORWARD_INTR	0x1FFFF
165 #define	AL_DIS_FORWARD_INTR	0
166 
167 #define	AL_M2S_MASK_INIT	0x480
168 #define	AL_S2M_MASK_INIT	0x1E0
169 #define	AL_M2S_S2M_MASK_NOT_INT	(0x3f << 25)
170 
171 #define	AL_10BASE_T_SPEED	10
172 #define	AL_100BASE_TX_SPEED	100
173 #define	AL_1000BASE_T_SPEED	1000
174 
175 #define	AL_RX_LOCK_INIT(_sc)	mtx_init(&((_sc)->if_rx_lock), "ALRXL", "ALRXL", MTX_DEF)
176 #define	AL_RX_LOCK(_sc)		mtx_lock(&((_sc)->if_rx_lock))
177 #define	AL_RX_UNLOCK(_sc)	mtx_unlock(&((_sc)->if_rx_lock))
178 
179 /* helper functions */
180 static int al_is_device_supported(device_t);
181 
182 static void al_eth_init_rings(struct al_eth_adapter *);
183 static void al_eth_flow_ctrl_disable(struct al_eth_adapter *);
184 int al_eth_fpga_read_pci_config(void *, int, uint32_t *);
185 int al_eth_fpga_write_pci_config(void *, int, uint32_t);
186 int al_eth_read_pci_config(void *, int, uint32_t *);
187 int al_eth_write_pci_config(void *, int, uint32_t);
188 void al_eth_irq_config(uint32_t *, uint32_t);
189 void al_eth_forward_int_config(uint32_t *, uint32_t);
190 static void al_eth_start_xmit(void *, int);
191 static void al_eth_rx_recv_work(void *, int);
192 static int al_eth_up(struct al_eth_adapter *);
193 static void al_eth_down(struct al_eth_adapter *);
194 static void al_eth_interrupts_unmask(struct al_eth_adapter *);
195 static void al_eth_interrupts_mask(struct al_eth_adapter *);
196 static int al_eth_check_mtu(struct al_eth_adapter *, int);
197 static uint64_t al_get_counter(if_t, ift_counter);
198 static void al_eth_req_rx_buff_size(struct al_eth_adapter *, int);
199 static int al_eth_board_params_init(struct al_eth_adapter *);
200 static int al_media_update(if_t);
201 static void al_media_status(if_t, struct ifmediareq *);
202 static int al_eth_function_reset(struct al_eth_adapter *);
203 static int al_eth_hw_init_adapter(struct al_eth_adapter *);
204 static void al_eth_serdes_init(struct al_eth_adapter *);
205 static void al_eth_lm_config(struct al_eth_adapter *);
206 static int al_eth_hw_init(struct al_eth_adapter *);
207 
208 static void al_tick_stats(void *);
209 
210 /* ifnet entry points */
211 static void al_init(void *);
212 static int al_mq_start(if_t, struct mbuf *);
213 static void al_qflush(if_t);
214 static int al_ioctl(if_t ifp, u_long, caddr_t);
215 
216 /* bus entry points */
217 static int al_probe(device_t);
218 static int al_attach(device_t);
219 static int al_detach(device_t);
220 static int al_shutdown(device_t);
221 
222 /* mii bus support routines */
223 static int al_miibus_readreg(device_t, int, int);
224 static int al_miibus_writereg(device_t, int, int, int);
225 static void al_miibus_statchg(device_t);
226 static void al_miibus_linkchg(device_t);
227 
228 struct al_eth_adapter* g_adapters[16];
229 uint32_t g_adapters_count;
230 
231 /* flag for napi-like mbuf processing, controlled from sysctl */
232 static int napi = 0;
233 
234 static device_method_t al_methods[] = {
235 	/* Device interface */
236 	DEVMETHOD(device_probe,		al_probe),
237 	DEVMETHOD(device_attach,	al_attach),
238 	DEVMETHOD(device_detach,	al_detach),
239 	DEVMETHOD(device_shutdown,	al_shutdown),
240 
241 	DEVMETHOD(miibus_readreg,	al_miibus_readreg),
242 	DEVMETHOD(miibus_writereg,	al_miibus_writereg),
243 	DEVMETHOD(miibus_statchg,	al_miibus_statchg),
244 	DEVMETHOD(miibus_linkchg,	al_miibus_linkchg),
245 	{ 0, 0 }
246 };
247 
248 static driver_t al_driver = {
249 	"al",
250 	al_methods,
251 	sizeof(struct al_eth_adapter),
252 };
253 
254 DRIVER_MODULE(al, pci, al_driver, 0, 0);
255 DRIVER_MODULE(miibus, al, miibus_driver, 0, 0);
256 
257 static int
258 al_probe(device_t dev)
259 {
260 	if ((al_is_device_supported(dev)) != 0) {
261 		device_set_desc(dev, "al");
262 		return (BUS_PROBE_DEFAULT);
263 	}
264 	return (ENXIO);
265 }
266 
267 static int
268 al_attach(device_t dev)
269 {
270 	struct al_eth_adapter *adapter;
271 	struct sysctl_oid_list *child;
272 	struct sysctl_ctx_list *ctx;
273 	struct sysctl_oid *tree;
274 	if_t ifp;
275 	uint32_t dev_id;
276 	uint32_t rev_id;
277 	int bar_udma;
278 	int bar_mac;
279 	int bar_ec;
280 	int err;
281 
282 	err = 0;
283 	ifp = NULL;
284 	dev_id = rev_id = 0;
285 	ctx = device_get_sysctl_ctx(dev);
286 	tree = SYSCTL_PARENT(device_get_sysctl_tree(dev));
287 	child = SYSCTL_CHILDREN(tree);
288 
289 	if (g_adapters_count == 0) {
290 		SYSCTL_ADD_INT(ctx, child, OID_AUTO, "napi",
291 		    CTLFLAG_RW, &napi, 0, "Use pseudo-napi mechanism");
292 	}
293 	adapter = device_get_softc(dev);
294 	adapter->dev = dev;
295 	adapter->board_type = ALPINE_INTEGRATED;
296 	snprintf(adapter->name, AL_ETH_NAME_MAX_LEN, "%s",
297 	    device_get_nameunit(dev));
298 	AL_RX_LOCK_INIT(adapter);
299 
300 	g_adapters[g_adapters_count] = adapter;
301 
302 	bar_udma = PCIR_BAR(AL_ETH_UDMA_BAR);
303 	adapter->udma_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
304 	    &bar_udma, RF_ACTIVE);
305 	if (adapter->udma_res == NULL) {
306 		device_printf(adapter->dev,
307 		    "could not allocate memory resources for DMA.\n");
308 		err = ENOMEM;
309 		goto err_res_dma;
310 	}
311 	adapter->udma_base = al_bus_dma_to_va(rman_get_bustag(adapter->udma_res),
312 	    rman_get_bushandle(adapter->udma_res));
313 	bar_mac = PCIR_BAR(AL_ETH_MAC_BAR);
314 	adapter->mac_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
315 	    &bar_mac, RF_ACTIVE);
316 	if (adapter->mac_res == NULL) {
317 		device_printf(adapter->dev,
318 		    "could not allocate memory resources for MAC.\n");
319 		err = ENOMEM;
320 		goto err_res_mac;
321 	}
322 	adapter->mac_base = al_bus_dma_to_va(rman_get_bustag(adapter->mac_res),
323 	    rman_get_bushandle(adapter->mac_res));
324 
325 	bar_ec = PCIR_BAR(AL_ETH_EC_BAR);
326 	adapter->ec_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &bar_ec,
327 	    RF_ACTIVE);
328 	if (adapter->ec_res == NULL) {
329 		device_printf(adapter->dev,
330 		    "could not allocate memory resources for EC.\n");
331 		err = ENOMEM;
332 		goto err_res_ec;
333 	}
334 	adapter->ec_base = al_bus_dma_to_va(rman_get_bustag(adapter->ec_res),
335 	    rman_get_bushandle(adapter->ec_res));
336 
337 	adapter->netdev = ifp = if_alloc(IFT_ETHER);
338 
339 	if_setsoftc(ifp, adapter);
340 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
341 	if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
342 	if_setflags(ifp, if_getdrvflags(ifp));
343 	if_setflagbits(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_ALLMULTI, 0);
344 	if_settransmitfn(ifp, al_mq_start);
345 	if_setqflushfn(ifp, al_qflush);
346 	if_setioctlfn(ifp, al_ioctl);
347 	if_setinitfn(ifp, al_init);
348 	if_setgetcounterfn(ifp, al_get_counter);
349 	if_setmtu(ifp, AL_DEFAULT_MTU);
350 
351 	adapter->if_flags = if_getflags(ifp);
352 
353 	if_setcapabilities(ifp, if_getcapenable(ifp) );
354 
355 	if_setcapabilitiesbit(ifp, IFCAP_HWCSUM |
356 	    IFCAP_HWCSUM_IPV6 | IFCAP_TSO |
357 	    IFCAP_LRO | IFCAP_JUMBO_MTU, 0);
358 
359 	if_setcapenable(ifp, if_getcapabilities(ifp));
360 
361 	adapter->id_number = g_adapters_count;
362 
363 	if (adapter->board_type == ALPINE_INTEGRATED) {
364 		dev_id = pci_get_device(adapter->dev);
365 		rev_id = pci_get_revid(adapter->dev);
366 	} else {
367 		al_eth_fpga_read_pci_config(adapter->internal_pcie_base,
368 		    PCIR_DEVICE, &dev_id);
369 		al_eth_fpga_read_pci_config(adapter->internal_pcie_base,
370 		    PCIR_REVID, &rev_id);
371 	}
372 
373 	adapter->dev_id = dev_id;
374 	adapter->rev_id = rev_id;
375 
376 	/* set default ring sizes */
377 	adapter->tx_ring_count = AL_ETH_DEFAULT_TX_SW_DESCS;
378 	adapter->tx_descs_count = AL_ETH_DEFAULT_TX_HW_DESCS;
379 	adapter->rx_ring_count = AL_ETH_DEFAULT_RX_DESCS;
380 	adapter->rx_descs_count = AL_ETH_DEFAULT_RX_DESCS;
381 
382 	adapter->num_tx_queues = AL_ETH_NUM_QUEUES;
383 	adapter->num_rx_queues = AL_ETH_NUM_QUEUES;
384 
385 	adapter->small_copy_len	= AL_ETH_DEFAULT_SMALL_PACKET_LEN;
386 	adapter->link_poll_interval = AL_ETH_DEFAULT_LINK_POLL_INTERVAL;
387 	adapter->max_rx_buff_alloc_size = AL_ETH_DEFAULT_MAX_RX_BUFF_ALLOC_SIZE;
388 
389 	al_eth_req_rx_buff_size(adapter, if_getmtu(adapter->netdev));
390 
391 	adapter->link_config.force_1000_base_x = AL_ETH_DEFAULT_FORCE_1000_BASEX;
392 
393 	err = al_eth_board_params_init(adapter);
394 	if (err != 0)
395 		goto err;
396 
397 	if (adapter->mac_mode == AL_ETH_MAC_MODE_10GbE_Serial) {
398 		ifmedia_init(&adapter->media, IFM_IMASK,
399 		    al_media_update, al_media_status);
400 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
401 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
402 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
403 		ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
404 	}
405 
406 	al_eth_function_reset(adapter);
407 
408 	err = al_eth_hw_init_adapter(adapter);
409 	if (err != 0)
410 		goto err;
411 
412 	al_eth_init_rings(adapter);
413 	g_adapters_count++;
414 
415 	al_eth_lm_config(adapter);
416 	mtx_init(&adapter->stats_mtx, "AlStatsMtx", NULL, MTX_DEF);
417 	mtx_init(&adapter->wd_mtx, "AlWdMtx", NULL, MTX_DEF);
418 	callout_init_mtx(&adapter->stats_callout, &adapter->stats_mtx, 0);
419 	callout_init_mtx(&adapter->wd_callout, &adapter->wd_mtx, 0);
420 
421 	ether_ifattach(ifp, adapter->mac_addr);
422 	if_setmtu(ifp, AL_DEFAULT_MTU);
423 
424 	if (adapter->mac_mode == AL_ETH_MAC_MODE_RGMII) {
425 		al_eth_hw_init(adapter);
426 
427 		/* Attach PHY(s) */
428 		err = mii_attach(adapter->dev, &adapter->miibus, adapter->netdev,
429 		    al_media_update, al_media_status, BMSR_DEFCAPMASK, 0,
430 		    MII_OFFSET_ANY, 0);
431 		if (err != 0) {
432 			device_printf(adapter->dev, "attaching PHYs failed\n");
433 			return (err);
434 		}
435 
436 		adapter->mii = device_get_softc(adapter->miibus);
437 	}
438 
439 	return (err);
440 
441 err:
442 	bus_release_resource(dev, SYS_RES_MEMORY, bar_ec, adapter->ec_res);
443 err_res_ec:
444 	bus_release_resource(dev, SYS_RES_MEMORY, bar_mac, adapter->mac_res);
445 err_res_mac:
446 	bus_release_resource(dev, SYS_RES_MEMORY, bar_udma, adapter->udma_res);
447 err_res_dma:
448 	return (err);
449 }
450 
451 static int
452 al_detach(device_t dev)
453 {
454 	struct al_eth_adapter *adapter;
455 
456 	adapter = device_get_softc(dev);
457 	ether_ifdetach(adapter->netdev);
458 
459 	mtx_destroy(&adapter->stats_mtx);
460 	mtx_destroy(&adapter->wd_mtx);
461 
462 	al_eth_down(adapter);
463 
464 	bus_release_resource(dev, SYS_RES_IRQ,    0, adapter->irq_res);
465 	bus_release_resource(dev, SYS_RES_MEMORY, 0, adapter->ec_res);
466 	bus_release_resource(dev, SYS_RES_MEMORY, 0, adapter->mac_res);
467 	bus_release_resource(dev, SYS_RES_MEMORY, 0, adapter->udma_res);
468 
469 	return (0);
470 }
471 
472 int
473 al_eth_fpga_read_pci_config(void *handle, int where, uint32_t *val)
474 {
475 
476 	/* handle is the base address of the adapter */
477 	*val = al_reg_read32((void*)((u_long)handle + where));
478 
479 	return (0);
480 }
481 
482 int
483 al_eth_fpga_write_pci_config(void *handle, int where, uint32_t val)
484 {
485 
486 	/* handle is the base address of the adapter */
487 	al_reg_write32((void*)((u_long)handle + where), val);
488 	return (0);
489 }
490 
491 int
492 al_eth_read_pci_config(void *handle, int where, uint32_t *val)
493 {
494 
495 	/* handle is a pci_dev */
496 	*val = pci_read_config((device_t)handle, where, sizeof(*val));
497 	return (0);
498 }
499 
500 int
501 al_eth_write_pci_config(void *handle, int where, uint32_t val)
502 {
503 
504 	/* handle is a pci_dev */
505 	pci_write_config((device_t)handle, where, val, sizeof(val));
506 	return (0);
507 }
508 
509 void
510 al_eth_irq_config(uint32_t *offset, uint32_t value)
511 {
512 
513 	al_reg_write32_relaxed(offset, value);
514 }
515 
516 void
517 al_eth_forward_int_config(uint32_t *offset, uint32_t value)
518 {
519 
520 	al_reg_write32(offset, value);
521 }
522 
523 static void
524 al_eth_serdes_init(struct al_eth_adapter *adapter)
525 {
526 	void __iomem	*serdes_base;
527 
528 	adapter->serdes_init = false;
529 
530 	serdes_base = alpine_serdes_resource_get(adapter->serdes_grp);
531 	if (serdes_base == NULL) {
532 		device_printf(adapter->dev, "serdes_base get failed!\n");
533 		return;
534 	}
535 
536 	serdes_base = al_bus_dma_to_va(serdes_tag, serdes_base);
537 
538 	al_serdes_handle_grp_init(serdes_base, adapter->serdes_grp,
539 	    &adapter->serdes_obj);
540 
541 	adapter->serdes_init = true;
542 }
543 
544 static void
545 al_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
546 {
547 	bus_addr_t *paddr;
548 
549 	paddr = arg;
550 	*paddr = segs->ds_addr;
551 }
552 
553 static int
554 al_dma_alloc_coherent(device_t dev, bus_dma_tag_t *tag, bus_dmamap_t *map,
555     bus_addr_t *baddr, void **vaddr, uint32_t size)
556 {
557 	int ret;
558 	uint32_t maxsize = ((size - 1)/PAGE_SIZE + 1) * PAGE_SIZE;
559 
560 	ret = bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0,
561 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
562 	    maxsize, 1, maxsize, BUS_DMA_COHERENT, NULL, NULL, tag);
563 	if (ret != 0) {
564 		device_printf(dev,
565 		    "failed to create bus tag, ret = %d\n", ret);
566 		return (ret);
567 	}
568 
569 	ret = bus_dmamem_alloc(*tag, vaddr,
570 	    BUS_DMA_COHERENT | BUS_DMA_ZERO, map);
571 	if (ret != 0) {
572 		device_printf(dev,
573 		    "failed to allocate dmamem, ret = %d\n", ret);
574 		return (ret);
575 	}
576 
577 	ret = bus_dmamap_load(*tag, *map, *vaddr,
578 	    size, al_dma_map_addr, baddr, 0);
579 	if (ret != 0) {
580 		device_printf(dev,
581 		    "failed to allocate bus_dmamap_load, ret = %d\n", ret);
582 		return (ret);
583 	}
584 
585 	return (0);
586 }
587 
588 static void
589 al_dma_free_coherent(bus_dma_tag_t tag, bus_dmamap_t map, void *vaddr)
590 {
591 
592 	bus_dmamap_unload(tag, map);
593 	bus_dmamem_free(tag, vaddr, map);
594 	bus_dma_tag_destroy(tag);
595 }
596 
597 static void
598 al_eth_mac_table_unicast_add(struct al_eth_adapter *adapter,
599     uint8_t idx, uint8_t udma_mask)
600 {
601 	struct al_eth_fwd_mac_table_entry entry = { { 0 } };
602 
603 	memcpy(entry.addr, adapter->mac_addr, sizeof(adapter->mac_addr));
604 
605 	memset(entry.mask, 0xff, sizeof(entry.mask));
606 	entry.rx_valid = true;
607 	entry.tx_valid = false;
608 	entry.udma_mask = udma_mask;
609 	entry.filter = false;
610 
611 	device_printf_dbg(adapter->dev,
612 	    "%s: [%d]: addr "MAC_ADDR_STR" mask "MAC_ADDR_STR"\n",
613 	    __func__, idx, MAC_ADDR(entry.addr), MAC_ADDR(entry.mask));
614 
615 	al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry);
616 }
617 
618 static void
619 al_eth_mac_table_all_multicast_add(struct al_eth_adapter *adapter, uint8_t idx,
620     uint8_t udma_mask)
621 {
622 	struct al_eth_fwd_mac_table_entry entry = { { 0 } };
623 
624 	memset(entry.addr, 0x00, sizeof(entry.addr));
625 	memset(entry.mask, 0x00, sizeof(entry.mask));
626 	entry.mask[0] |= 1;
627 	entry.addr[0] |= 1;
628 
629 	entry.rx_valid = true;
630 	entry.tx_valid = false;
631 	entry.udma_mask = udma_mask;
632 	entry.filter = false;
633 
634 	device_printf_dbg(adapter->dev,
635 	    "%s: [%d]: addr "MAC_ADDR_STR" mask "MAC_ADDR_STR"\n",
636 	    __func__, idx, MAC_ADDR(entry.addr), MAC_ADDR(entry.mask));
637 
638 	al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry);
639 }
640 
641 static void
642 al_eth_mac_table_broadcast_add(struct al_eth_adapter *adapter,
643     uint8_t idx, uint8_t udma_mask)
644 {
645 	struct al_eth_fwd_mac_table_entry entry = { { 0 } };
646 
647 	memset(entry.addr, 0xff, sizeof(entry.addr));
648 	memset(entry.mask, 0xff, sizeof(entry.mask));
649 
650 	entry.rx_valid = true;
651 	entry.tx_valid = false;
652 	entry.udma_mask = udma_mask;
653 	entry.filter = false;
654 
655 	device_printf_dbg(adapter->dev,
656 	    "%s: [%d]: addr "MAC_ADDR_STR" mask "MAC_ADDR_STR"\n",
657 	    __func__, idx, MAC_ADDR(entry.addr), MAC_ADDR(entry.mask));
658 
659 	al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry);
660 }
661 
662 static void
663 al_eth_mac_table_promiscuous_set(struct al_eth_adapter *adapter,
664     bool promiscuous)
665 {
666 	struct al_eth_fwd_mac_table_entry entry = { { 0 } };
667 
668 	memset(entry.addr, 0x00, sizeof(entry.addr));
669 	memset(entry.mask, 0x00, sizeof(entry.mask));
670 
671 	entry.rx_valid = true;
672 	entry.tx_valid = false;
673 	entry.udma_mask = (promiscuous) ? 1 : 0;
674 	entry.filter = (promiscuous) ? false : true;
675 
676 	device_printf_dbg(adapter->dev, "%s: %s promiscuous mode\n",
677 	    __func__, (promiscuous) ? "enter" : "exit");
678 
679 	al_eth_fwd_mac_table_set(&adapter->hal_adapter,
680 	    AL_ETH_MAC_TABLE_DROP_IDX, &entry);
681 }
682 
683 static void
684 al_eth_set_thash_table_entry(struct al_eth_adapter *adapter, uint8_t idx,
685     uint8_t udma, uint32_t queue)
686 {
687 
688 	if (udma != 0)
689 		panic("only UDMA0 is supporter");
690 
691 	if (queue >= AL_ETH_NUM_QUEUES)
692 		panic("invalid queue number");
693 
694 	al_eth_thash_table_set(&adapter->hal_adapter, idx, udma, queue);
695 }
696 
697 /* init FSM, no tunneling supported yet, if packet is tcp/udp over ipv4/ipv6, use 4 tuple hash */
698 static void
699 al_eth_fsm_table_init(struct al_eth_adapter *adapter)
700 {
701 	uint32_t val;
702 	int i;
703 
704 	for (i = 0; i < AL_ETH_RX_FSM_TABLE_SIZE; i++) {
705 		uint8_t outer_type = AL_ETH_FSM_ENTRY_OUTER(i);
706 		switch (outer_type) {
707 		case AL_ETH_FSM_ENTRY_IPV4_TCP:
708 		case AL_ETH_FSM_ENTRY_IPV4_UDP:
709 		case AL_ETH_FSM_ENTRY_IPV6_TCP:
710 		case AL_ETH_FSM_ENTRY_IPV6_UDP:
711 			val = AL_ETH_FSM_DATA_OUTER_4_TUPLE |
712 			    AL_ETH_FSM_DATA_HASH_SEL;
713 			break;
714 		case AL_ETH_FSM_ENTRY_IPV6_NO_UDP_TCP:
715 		case AL_ETH_FSM_ENTRY_IPV4_NO_UDP_TCP:
716 			val = AL_ETH_FSM_DATA_OUTER_2_TUPLE |
717 			    AL_ETH_FSM_DATA_HASH_SEL;
718 			break;
719 		default:
720 			val = AL_ETH_FSM_DATA_DEFAULT_Q |
721 			    AL_ETH_FSM_DATA_DEFAULT_UDMA;
722 		}
723 		al_eth_fsm_table_set(&adapter->hal_adapter, i, val);
724 	}
725 }
726 
727 static void
728 al_eth_mac_table_entry_clear(struct al_eth_adapter *adapter,
729     uint8_t idx)
730 {
731 	struct al_eth_fwd_mac_table_entry entry = { { 0 } };
732 
733 	device_printf_dbg(adapter->dev, "%s: clear entry %d\n", __func__, idx);
734 
735 	al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry);
736 }
737 
738 static int
739 al_eth_hw_init_adapter(struct al_eth_adapter *adapter)
740 {
741 	struct al_eth_adapter_params *params = &adapter->eth_hal_params;
742 	int rc;
743 
744 	/* params->dev_id = adapter->dev_id; */
745 	params->rev_id = adapter->rev_id;
746 	params->udma_id = 0;
747 	params->enable_rx_parser = 1; /* enable rx epe parser*/
748 	params->udma_regs_base = adapter->udma_base; /* UDMA register base address */
749 	params->ec_regs_base = adapter->ec_base; /* Ethernet controller registers base address */
750 	params->mac_regs_base = adapter->mac_base; /* Ethernet MAC registers base address */
751 	params->name = adapter->name;
752 	params->serdes_lane = adapter->serdes_lane;
753 
754 	rc = al_eth_adapter_init(&adapter->hal_adapter, params);
755 	if (rc != 0)
756 		device_printf(adapter->dev, "%s failed at hal init!\n",
757 		    __func__);
758 
759 	if ((adapter->board_type == ALPINE_NIC) ||
760 	    (adapter->board_type == ALPINE_FPGA_NIC)) {
761 		/* in pcie NIC mode, force eth UDMA to access PCIE0 using the vmid */
762 		struct al_udma_gen_tgtid_conf conf;
763 		int i;
764 		for (i = 0; i < DMA_MAX_Q; i++) {
765 			conf.tx_q_conf[i].queue_en = AL_TRUE;
766 			conf.tx_q_conf[i].desc_en = AL_FALSE;
767 			conf.tx_q_conf[i].tgtid = 0x100; /* for access from PCIE0 */
768 			conf.rx_q_conf[i].queue_en = AL_TRUE;
769 			conf.rx_q_conf[i].desc_en = AL_FALSE;
770 			conf.rx_q_conf[i].tgtid = 0x100; /* for access from PCIE0 */
771 		}
772 		al_udma_gen_tgtid_conf_set(adapter->udma_base, &conf);
773 	}
774 
775 	return (rc);
776 }
777 
778 static void
779 al_eth_lm_config(struct al_eth_adapter *adapter)
780 {
781 	struct al_eth_lm_init_params params = {0};
782 
783 	params.adapter = &adapter->hal_adapter;
784 	params.serdes_obj = &adapter->serdes_obj;
785 	params.lane = adapter->serdes_lane;
786 	params.sfp_detection = adapter->sfp_detection_needed;
787 	if (adapter->sfp_detection_needed == true) {
788 		params.sfp_bus_id = adapter->i2c_adapter_id;
789 		params.sfp_i2c_addr = SFP_I2C_ADDR;
790 	}
791 
792 	if (adapter->sfp_detection_needed == false) {
793 		switch (adapter->mac_mode) {
794 		case AL_ETH_MAC_MODE_10GbE_Serial:
795 			if ((adapter->lt_en != 0) && (adapter->an_en != 0))
796 				params.default_mode = AL_ETH_LM_MODE_10G_DA;
797 			else
798 				params.default_mode = AL_ETH_LM_MODE_10G_OPTIC;
799 			break;
800 		case AL_ETH_MAC_MODE_SGMII:
801 			params.default_mode = AL_ETH_LM_MODE_1G;
802 			break;
803 		default:
804 			params.default_mode = AL_ETH_LM_MODE_10G_DA;
805 		}
806 	} else
807 		params.default_mode = AL_ETH_LM_MODE_10G_DA;
808 
809 	params.link_training = adapter->lt_en;
810 	params.rx_equal = true;
811 	params.static_values = !adapter->dont_override_serdes;
812 	params.i2c_context = adapter;
813 	params.kr_fec_enable = false;
814 
815 	params.retimer_exist = adapter->retimer.exist;
816 	params.retimer_bus_id = adapter->retimer.bus_id;
817 	params.retimer_i2c_addr = adapter->retimer.i2c_addr;
818 	params.retimer_channel = adapter->retimer.channel;
819 
820 	al_eth_lm_init(&adapter->lm_context, &params);
821 }
822 
823 static int
824 al_eth_board_params_init(struct al_eth_adapter *adapter)
825 {
826 
827 	if (adapter->board_type == ALPINE_NIC) {
828 		adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial;
829 		adapter->sfp_detection_needed = false;
830 		adapter->phy_exist = false;
831 		adapter->an_en = false;
832 		adapter->lt_en = false;
833 		adapter->ref_clk_freq = AL_ETH_REF_FREQ_375_MHZ;
834 		adapter->mdio_freq = AL_ETH_DEFAULT_MDIO_FREQ_KHZ;
835 	} else if (adapter->board_type == ALPINE_FPGA_NIC) {
836 		adapter->mac_mode = AL_ETH_MAC_MODE_SGMII;
837 		adapter->sfp_detection_needed = false;
838 		adapter->phy_exist = false;
839 		adapter->an_en = false;
840 		adapter->lt_en = false;
841 		adapter->ref_clk_freq = AL_ETH_REF_FREQ_375_MHZ;
842 		adapter->mdio_freq = AL_ETH_DEFAULT_MDIO_FREQ_KHZ;
843 	} else {
844 		struct al_eth_board_params params;
845 		int rc;
846 
847 		adapter->auto_speed = false;
848 
849 		rc = al_eth_board_params_get(adapter->mac_base, &params);
850 		if (rc != 0) {
851 			device_printf(adapter->dev,
852 			    "board info not available\n");
853 			return (-1);
854 		}
855 
856 		adapter->phy_exist = params.phy_exist == true;
857 		adapter->phy_addr = params.phy_mdio_addr;
858 		adapter->an_en = params.autoneg_enable;
859 		adapter->lt_en = params.kr_lt_enable;
860 		adapter->serdes_grp = params.serdes_grp;
861 		adapter->serdes_lane = params.serdes_lane;
862 		adapter->sfp_detection_needed = params.sfp_plus_module_exist;
863 		adapter->i2c_adapter_id = params.i2c_adapter_id;
864 		adapter->ref_clk_freq = params.ref_clk_freq;
865 		adapter->dont_override_serdes = params.dont_override_serdes;
866 		adapter->link_config.active_duplex = !params.half_duplex;
867 		adapter->link_config.autoneg = !params.an_disable;
868 		adapter->link_config.force_1000_base_x = params.force_1000_base_x;
869 		adapter->retimer.exist = params.retimer_exist;
870 		adapter->retimer.bus_id = params.retimer_bus_id;
871 		adapter->retimer.i2c_addr = params.retimer_i2c_addr;
872 		adapter->retimer.channel = params.retimer_channel;
873 
874 		switch (params.speed) {
875 		default:
876 			device_printf(adapter->dev,
877 			    "%s: invalid speed (%d)\n", __func__, params.speed);
878 		case AL_ETH_BOARD_1G_SPEED_1000M:
879 			adapter->link_config.active_speed = 1000;
880 			break;
881 		case AL_ETH_BOARD_1G_SPEED_100M:
882 			adapter->link_config.active_speed = 100;
883 			break;
884 		case AL_ETH_BOARD_1G_SPEED_10M:
885 			adapter->link_config.active_speed = 10;
886 			break;
887 		}
888 
889 		switch (params.mdio_freq) {
890 		default:
891 			device_printf(adapter->dev,
892 			    "%s: invalid mdio freq (%d)\n", __func__,
893 			    params.mdio_freq);
894 		case AL_ETH_BOARD_MDIO_FREQ_2_5_MHZ:
895 			adapter->mdio_freq = AL_ETH_DEFAULT_MDIO_FREQ_KHZ;
896 			break;
897 		case AL_ETH_BOARD_MDIO_FREQ_1_MHZ:
898 			adapter->mdio_freq = AL_ETH_MDIO_FREQ_1000_KHZ;
899 			break;
900 		}
901 
902 		switch (params.media_type) {
903 		case AL_ETH_BOARD_MEDIA_TYPE_RGMII:
904 			if (params.sfp_plus_module_exist == true)
905 				/* Backward compatibility */
906 				adapter->mac_mode = AL_ETH_MAC_MODE_SGMII;
907 			else
908 				adapter->mac_mode = AL_ETH_MAC_MODE_RGMII;
909 
910 			adapter->use_lm = false;
911 			break;
912 		case AL_ETH_BOARD_MEDIA_TYPE_SGMII:
913 			adapter->mac_mode = AL_ETH_MAC_MODE_SGMII;
914 			adapter->use_lm = true;
915 			break;
916 		case AL_ETH_BOARD_MEDIA_TYPE_10GBASE_SR:
917 			adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial;
918 			adapter->use_lm = true;
919 			break;
920 		case AL_ETH_BOARD_MEDIA_TYPE_AUTO_DETECT:
921 			adapter->sfp_detection_needed = true;
922 			adapter->auto_speed = false;
923 			adapter->use_lm = true;
924 			break;
925 		case AL_ETH_BOARD_MEDIA_TYPE_AUTO_DETECT_AUTO_SPEED:
926 			adapter->sfp_detection_needed = true;
927 			adapter->auto_speed = true;
928 			adapter->mac_mode_set = false;
929 			adapter->use_lm = true;
930 
931 			adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial;
932 			break;
933 		default:
934 			device_printf(adapter->dev,
935 			    "%s: unsupported media type %d\n",
936 			    __func__, params.media_type);
937 			return (-1);
938 		}
939 
940 		device_printf(adapter->dev,
941 		    "Board info: phy exist %s. phy addr %d. mdio freq %u Khz. "
942 		    "SFP connected %s. media %d\n",
943 		    params.phy_exist ? "Yes" : "No",
944 		    params.phy_mdio_addr, adapter->mdio_freq,
945 		    params.sfp_plus_module_exist ? "Yes" : "No",
946 		    params.media_type);
947 	}
948 
949 	al_eth_mac_addr_read(adapter->ec_base, 0, adapter->mac_addr);
950 
951 	return (0);
952 }
953 
954 static int
955 al_eth_function_reset(struct al_eth_adapter *adapter)
956 {
957 	struct al_eth_board_params params;
958 	int rc;
959 
960 	/* save board params so we restore it after reset */
961 	al_eth_board_params_get(adapter->mac_base, &params);
962 	al_eth_mac_addr_read(adapter->ec_base, 0, adapter->mac_addr);
963 	if (adapter->board_type == ALPINE_INTEGRATED)
964 		rc = al_eth_flr_rmn(&al_eth_read_pci_config,
965 		    &al_eth_write_pci_config,
966 		    adapter->dev, adapter->mac_base);
967 	else
968 		rc = al_eth_flr_rmn(&al_eth_fpga_read_pci_config,
969 		    &al_eth_fpga_write_pci_config,
970 		    adapter->internal_pcie_base, adapter->mac_base);
971 
972 	/* restore params */
973 	al_eth_board_params_set(adapter->mac_base, &params);
974 	al_eth_mac_addr_store(adapter->ec_base, 0, adapter->mac_addr);
975 
976 	return (rc);
977 }
978 
979 static void
980 al_eth_init_rings(struct al_eth_adapter *adapter)
981 {
982 	int i;
983 
984 	for (i = 0; i < adapter->num_tx_queues; i++) {
985 		struct al_eth_ring *ring = &adapter->tx_ring[i];
986 
987 		ring->ring_id = i;
988 		ring->dev = adapter->dev;
989 		ring->adapter = adapter;
990 		ring->netdev = adapter->netdev;
991 		al_udma_q_handle_get(&adapter->hal_adapter.tx_udma, i,
992 		    &ring->dma_q);
993 		ring->sw_count = adapter->tx_ring_count;
994 		ring->hw_count = adapter->tx_descs_count;
995 		ring->unmask_reg_offset = al_udma_iofic_unmask_offset_get((struct unit_regs *)adapter->udma_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_C);
996 		ring->unmask_val = ~(1 << i);
997 	}
998 
999 	for (i = 0; i < adapter->num_rx_queues; i++) {
1000 		struct al_eth_ring *ring = &adapter->rx_ring[i];
1001 
1002 		ring->ring_id = i;
1003 		ring->dev = adapter->dev;
1004 		ring->adapter = adapter;
1005 		ring->netdev = adapter->netdev;
1006 		al_udma_q_handle_get(&adapter->hal_adapter.rx_udma, i, &ring->dma_q);
1007 		ring->sw_count = adapter->rx_ring_count;
1008 		ring->hw_count = adapter->rx_descs_count;
1009 		ring->unmask_reg_offset = al_udma_iofic_unmask_offset_get(
1010 		    (struct unit_regs *)adapter->udma_base,
1011 		    AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_B);
1012 		ring->unmask_val = ~(1 << i);
1013 	}
1014 }
1015 
1016 static void
1017 al_init_locked(void *arg)
1018 {
1019 	struct al_eth_adapter *adapter = arg;
1020 	if_t ifp = adapter->netdev;
1021 	int rc = 0;
1022 
1023 	al_eth_down(adapter);
1024 	rc = al_eth_up(adapter);
1025 
1026 	if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1027 	if (rc == 0)
1028 		if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
1029 }
1030 
1031 static void
1032 al_init(void *arg)
1033 {
1034 	struct al_eth_adapter *adapter = arg;
1035 
1036 	al_init_locked(adapter);
1037 }
1038 
1039 static inline int
1040 al_eth_alloc_rx_buf(struct al_eth_adapter *adapter,
1041     struct al_eth_ring *rx_ring,
1042     struct al_eth_rx_buffer *rx_info)
1043 {
1044 	struct al_buf *al_buf;
1045 	bus_dma_segment_t segs[2];
1046 	int error;
1047 	int nsegs;
1048 
1049 	if (rx_info->m != NULL)
1050 		return (0);
1051 
1052 	rx_info->data_size = adapter->rx_mbuf_sz;
1053 
1054 	AL_RX_LOCK(adapter);
1055 
1056 	/* Get mbuf using UMA allocator */
1057 	rx_info->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1058 	    rx_info->data_size);
1059 	AL_RX_UNLOCK(adapter);
1060 
1061 	if (rx_info->m == NULL)
1062 		return (ENOMEM);
1063 
1064 	rx_info->m->m_pkthdr.len = rx_info->m->m_len = adapter->rx_mbuf_sz;
1065 
1066 	/* Map packets for DMA */
1067 	error = bus_dmamap_load_mbuf_sg(rx_ring->dma_buf_tag, rx_info->dma_map,
1068 	    rx_info->m, segs, &nsegs, BUS_DMA_NOWAIT);
1069 	if (__predict_false(error)) {
1070 		device_printf(rx_ring->dev, "failed to map mbuf, error = %d\n",
1071 		    error);
1072 		m_freem(rx_info->m);
1073 		rx_info->m = NULL;
1074 		return (EFAULT);
1075 	}
1076 
1077 	al_buf = &rx_info->al_buf;
1078 	al_buf->addr = segs[0].ds_addr + AL_IP_ALIGNMENT_OFFSET;
1079 	al_buf->len = rx_info->data_size - AL_IP_ALIGNMENT_OFFSET;
1080 
1081 	return (0);
1082 }
1083 
1084 static int
1085 al_eth_refill_rx_bufs(struct al_eth_adapter *adapter, unsigned int qid,
1086     unsigned int num)
1087 {
1088 	struct al_eth_ring *rx_ring = &adapter->rx_ring[qid];
1089 	uint16_t next_to_use;
1090 	unsigned int i;
1091 
1092 	next_to_use = rx_ring->next_to_use;
1093 
1094 	for (i = 0; i < num; i++) {
1095 		int rc;
1096 		struct al_eth_rx_buffer *rx_info =
1097 		    &rx_ring->rx_buffer_info[next_to_use];
1098 
1099 		if (__predict_false(al_eth_alloc_rx_buf(adapter,
1100 		    rx_ring, rx_info) < 0)) {
1101 			device_printf(adapter->dev,
1102 			    "failed to alloc buffer for rx queue %d\n", qid);
1103 			break;
1104 		}
1105 
1106 		rc = al_eth_rx_buffer_add(rx_ring->dma_q,
1107 		    &rx_info->al_buf, AL_ETH_RX_FLAGS_INT, NULL);
1108 		if (__predict_false(rc)) {
1109 			device_printf(adapter->dev,
1110 			    "failed to add buffer for rx queue %d\n", qid);
1111 			break;
1112 		}
1113 
1114 		next_to_use = AL_ETH_RX_RING_IDX_NEXT(rx_ring, next_to_use);
1115 	}
1116 
1117 	if (__predict_false(i < num))
1118 		device_printf(adapter->dev,
1119 		    "refilled rx queue %d with %d pages only - available %d\n",
1120 		    qid, i, al_udma_available_get(rx_ring->dma_q));
1121 
1122 	if (__predict_true(i))
1123 		al_eth_rx_buffer_action(rx_ring->dma_q, i);
1124 
1125 	rx_ring->next_to_use = next_to_use;
1126 
1127 	return (i);
1128 }
1129 
1130 /*
1131  * al_eth_refill_all_rx_bufs - allocate all queues Rx buffers
1132  * @adapter: board private structure
1133  */
1134 static void
1135 al_eth_refill_all_rx_bufs(struct al_eth_adapter *adapter)
1136 {
1137 	int i;
1138 
1139 	for (i = 0; i < adapter->num_rx_queues; i++)
1140 		al_eth_refill_rx_bufs(adapter, i, AL_ETH_DEFAULT_RX_DESCS - 1);
1141 }
1142 
1143 static void
1144 al_eth_tx_do_cleanup(struct al_eth_ring *tx_ring)
1145 {
1146 	unsigned int total_done;
1147 	uint16_t next_to_clean;
1148 	int qid = tx_ring->ring_id;
1149 
1150 	total_done = al_eth_comp_tx_get(tx_ring->dma_q);
1151 	device_printf_dbg(tx_ring->dev,
1152 	    "tx_poll: q %d total completed descs %x\n", qid, total_done);
1153 	next_to_clean = tx_ring->next_to_clean;
1154 
1155 	while (total_done != 0) {
1156 		struct al_eth_tx_buffer *tx_info;
1157 		struct mbuf *mbuf;
1158 
1159 		tx_info = &tx_ring->tx_buffer_info[next_to_clean];
1160 		/* stop if not all descriptors of the packet are completed */
1161 		if (tx_info->tx_descs > total_done)
1162 			break;
1163 
1164 		mbuf = tx_info->m;
1165 
1166 		tx_info->m = NULL;
1167 
1168 		device_printf_dbg(tx_ring->dev,
1169 		    "tx_poll: q %d mbuf %p completed\n", qid, mbuf);
1170 
1171 		/* map is no longer required */
1172 		bus_dmamap_unload(tx_ring->dma_buf_tag, tx_info->dma_map);
1173 
1174 		m_freem(mbuf);
1175 		total_done -= tx_info->tx_descs;
1176 		next_to_clean = AL_ETH_TX_RING_IDX_NEXT(tx_ring, next_to_clean);
1177 	}
1178 
1179 	tx_ring->next_to_clean = next_to_clean;
1180 
1181 	device_printf_dbg(tx_ring->dev, "tx_poll: q %d done next to clean %x\n",
1182 	    qid, next_to_clean);
1183 
1184 	/*
1185 	 * need to make the rings circular update visible to
1186 	 * al_eth_start_xmit() before checking for netif_queue_stopped().
1187 	 */
1188 	al_smp_data_memory_barrier();
1189 }
1190 
1191 static void
1192 al_eth_tx_csum(struct al_eth_ring *tx_ring, struct al_eth_tx_buffer *tx_info,
1193     struct al_eth_pkt *hal_pkt, struct mbuf *m)
1194 {
1195 	uint32_t mss = m->m_pkthdr.tso_segsz;
1196 	struct ether_vlan_header *eh;
1197 	uint16_t etype;
1198 #ifdef INET
1199 	struct ip *ip;
1200 #endif
1201 #ifdef INET6
1202 	struct ip6_hdr *ip6;
1203 #endif
1204 	struct tcphdr *th = NULL;
1205 	int	ehdrlen, ip_hlen = 0;
1206 	uint8_t	ipproto = 0;
1207 	uint32_t offload = 0;
1208 
1209 	if (mss != 0)
1210 		offload = 1;
1211 
1212 	if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0)
1213 		offload = 1;
1214 
1215 	if ((m->m_pkthdr.csum_flags & CSUM_OFFLOAD) != 0)
1216 		offload = 1;
1217 
1218 	if (offload != 0) {
1219 		struct al_eth_meta_data *meta = &tx_ring->hal_meta;
1220 
1221 		if (mss != 0)
1222 			hal_pkt->flags |= (AL_ETH_TX_FLAGS_TSO |
1223 			    AL_ETH_TX_FLAGS_L4_CSUM);
1224 		else
1225 			hal_pkt->flags |= (AL_ETH_TX_FLAGS_L4_CSUM |
1226 			    AL_ETH_TX_FLAGS_L4_PARTIAL_CSUM);
1227 
1228 		/*
1229 		 * Determine where frame payload starts.
1230 		 * Jump over vlan headers if already present,
1231 		 * helpful for QinQ too.
1232 		 */
1233 		eh = mtod(m, struct ether_vlan_header *);
1234 		if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1235 			etype = ntohs(eh->evl_proto);
1236 			ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1237 		} else {
1238 			etype = ntohs(eh->evl_encap_proto);
1239 			ehdrlen = ETHER_HDR_LEN;
1240 		}
1241 
1242 		switch (etype) {
1243 #ifdef INET
1244 		case ETHERTYPE_IP:
1245 			ip = (struct ip *)(m->m_data + ehdrlen);
1246 			ip_hlen = ip->ip_hl << 2;
1247 			ipproto = ip->ip_p;
1248 			hal_pkt->l3_proto_idx = AL_ETH_PROTO_ID_IPv4;
1249 			th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
1250 			if (mss != 0)
1251 				hal_pkt->flags |= AL_ETH_TX_FLAGS_IPV4_L3_CSUM;
1252 			if (ipproto == IPPROTO_TCP)
1253 				hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_TCP;
1254 			else
1255 				hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_UDP;
1256 			break;
1257 #endif /* INET */
1258 #ifdef INET6
1259 		case ETHERTYPE_IPV6:
1260 			ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1261 			hal_pkt->l3_proto_idx = AL_ETH_PROTO_ID_IPv6;
1262 			ip_hlen = sizeof(struct ip6_hdr);
1263 			th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
1264 			ipproto = ip6->ip6_nxt;
1265 			if (ipproto == IPPROTO_TCP)
1266 				hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_TCP;
1267 			else
1268 				hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_UDP;
1269 			break;
1270 #endif /* INET6 */
1271 		default:
1272 			break;
1273 		}
1274 
1275 		meta->words_valid = 4;
1276 		meta->l3_header_len = ip_hlen;
1277 		meta->l3_header_offset = ehdrlen;
1278 		if (th != NULL)
1279 			meta->l4_header_len = th->th_off; /* this param needed only for TSO */
1280 		meta->mss_idx_sel = 0;			/* check how to select MSS */
1281 		meta->mss_val = mss;
1282 		hal_pkt->meta = meta;
1283 	} else
1284 		hal_pkt->meta = NULL;
1285 }
1286 
1287 #define	XMIT_QUEUE_TIMEOUT	100
1288 
1289 static void
1290 al_eth_xmit_mbuf(struct al_eth_ring *tx_ring, struct mbuf *m)
1291 {
1292 	struct al_eth_tx_buffer *tx_info;
1293 	int error;
1294 	int nsegs, a;
1295 	uint16_t next_to_use;
1296 	bus_dma_segment_t segs[AL_ETH_PKT_MAX_BUFS + 1];
1297 	struct al_eth_pkt *hal_pkt;
1298 	struct al_buf *al_buf;
1299 	bool remap;
1300 
1301 	/* Check if queue is ready */
1302 	if (unlikely(tx_ring->stall) != 0) {
1303 		for (a = 0; a < XMIT_QUEUE_TIMEOUT; a++) {
1304 			if (al_udma_available_get(tx_ring->dma_q) >=
1305 			    (AL_ETH_DEFAULT_TX_HW_DESCS -
1306 			    AL_ETH_TX_WAKEUP_THRESH)) {
1307 				tx_ring->stall = 0;
1308 				break;
1309 			}
1310 			pause("stall", 1);
1311 		}
1312 		if (a == XMIT_QUEUE_TIMEOUT) {
1313 			device_printf(tx_ring->dev,
1314 			    "timeout waiting for queue %d ready!\n",
1315 			    tx_ring->ring_id);
1316 			return;
1317 		} else {
1318 			device_printf_dbg(tx_ring->dev,
1319 			    "queue %d is ready!\n", tx_ring->ring_id);
1320 		}
1321 	}
1322 
1323 	next_to_use = tx_ring->next_to_use;
1324 	tx_info = &tx_ring->tx_buffer_info[next_to_use];
1325 	tx_info->m = m;
1326 	hal_pkt = &tx_info->hal_pkt;
1327 
1328 	if (m == NULL) {
1329 		device_printf(tx_ring->dev, "mbuf is NULL\n");
1330 		return;
1331 	}
1332 
1333 	remap = true;
1334 	/* Map packets for DMA */
1335 retry:
1336 	error = bus_dmamap_load_mbuf_sg(tx_ring->dma_buf_tag, tx_info->dma_map,
1337 	    m, segs, &nsegs, BUS_DMA_NOWAIT);
1338 	if (__predict_false(error)) {
1339 		struct mbuf *m_new;
1340 
1341 		if (error == EFBIG) {
1342 			/* Try it again? - one try */
1343 			if (remap == true) {
1344 				remap = false;
1345 				m_new = m_defrag(m, M_NOWAIT);
1346 				if (m_new == NULL) {
1347 					device_printf(tx_ring->dev,
1348 					    "failed to defrag mbuf\n");
1349 					goto exit;
1350 				}
1351 				m = m_new;
1352 				goto retry;
1353 			} else {
1354 				device_printf(tx_ring->dev,
1355 				    "failed to map mbuf, error %d\n", error);
1356 				goto exit;
1357 			}
1358 		} else {
1359 			device_printf(tx_ring->dev,
1360 			    "failed to map mbuf, error %d\n", error);
1361 			goto exit;
1362 		}
1363 	}
1364 
1365 	/* set flags and meta data */
1366 	hal_pkt->flags = AL_ETH_TX_FLAGS_INT;
1367 	al_eth_tx_csum(tx_ring, tx_info, hal_pkt, m);
1368 
1369 	al_buf = hal_pkt->bufs;
1370 	for (a = 0; a < nsegs; a++) {
1371 		al_buf->addr = segs[a].ds_addr;
1372 		al_buf->len = segs[a].ds_len;
1373 
1374 		al_buf++;
1375 	}
1376 
1377 	hal_pkt->num_of_bufs = nsegs;
1378 
1379 	/* prepare the packet's descriptors to dma engine */
1380 	tx_info->tx_descs = al_eth_tx_pkt_prepare(tx_ring->dma_q, hal_pkt);
1381 
1382 	if (tx_info->tx_descs == 0)
1383 		goto exit;
1384 
1385 	/*
1386 	 * stop the queue when no more space available, the packet can have up
1387 	 * to AL_ETH_PKT_MAX_BUFS + 1 buffers and a meta descriptor
1388 	 */
1389 	if (unlikely(al_udma_available_get(tx_ring->dma_q) <
1390 	    (AL_ETH_PKT_MAX_BUFS + 2))) {
1391 		tx_ring->stall = 1;
1392 		device_printf_dbg(tx_ring->dev, "stall, stopping queue %d...\n",
1393 		    tx_ring->ring_id);
1394 		al_data_memory_barrier();
1395 	}
1396 
1397 	tx_ring->next_to_use = AL_ETH_TX_RING_IDX_NEXT(tx_ring, next_to_use);
1398 
1399 	/* trigger the dma engine */
1400 	al_eth_tx_dma_action(tx_ring->dma_q, tx_info->tx_descs);
1401 	return;
1402 
1403 exit:
1404 	m_freem(m);
1405 }
1406 
1407 static void
1408 al_eth_tx_cmpl_work(void *arg, int pending)
1409 {
1410 	struct al_eth_ring *tx_ring = arg;
1411 
1412 	if (napi != 0) {
1413 		tx_ring->cmpl_is_running = 1;
1414 		al_data_memory_barrier();
1415 	}
1416 
1417 	al_eth_tx_do_cleanup(tx_ring);
1418 
1419 	if (napi != 0) {
1420 		tx_ring->cmpl_is_running = 0;
1421 		al_data_memory_barrier();
1422 	}
1423 	/* all work done, enable IRQs */
1424 	al_eth_irq_config(tx_ring->unmask_reg_offset, tx_ring->unmask_val);
1425 }
1426 
1427 static int
1428 al_eth_tx_cmlp_irq_filter(void *arg)
1429 {
1430 	struct al_eth_ring *tx_ring = arg;
1431 
1432 	/* Interrupt should be auto-masked upon arrival */
1433 
1434 	device_printf_dbg(tx_ring->dev, "%s for ring ID = %d\n", __func__,
1435 	    tx_ring->ring_id);
1436 
1437 	/*
1438 	 * For napi, if work is not running, schedule it. Always schedule
1439 	 * for casual (non-napi) packet handling.
1440 	 */
1441 	if ((napi == 0) || (napi && tx_ring->cmpl_is_running == 0))
1442 		taskqueue_enqueue(tx_ring->cmpl_tq, &tx_ring->cmpl_task);
1443 
1444 	/* Do not run bottom half */
1445 	return (FILTER_HANDLED);
1446 }
1447 
1448 static int
1449 al_eth_rx_recv_irq_filter(void *arg)
1450 {
1451 	struct al_eth_ring *rx_ring = arg;
1452 
1453 	/* Interrupt should be auto-masked upon arrival */
1454 
1455 	device_printf_dbg(rx_ring->dev, "%s for ring ID = %d\n", __func__,
1456 	    rx_ring->ring_id);
1457 
1458 	/*
1459 	 * For napi, if work is not running, schedule it. Always schedule
1460 	 * for casual (non-napi) packet handling.
1461 	 */
1462 	if ((napi == 0) || (napi && rx_ring->enqueue_is_running == 0))
1463 		taskqueue_enqueue(rx_ring->enqueue_tq, &rx_ring->enqueue_task);
1464 
1465 	/* Do not run bottom half */
1466 	return (FILTER_HANDLED);
1467 }
1468 
1469 /*
1470  * al_eth_rx_checksum - indicate in mbuf if hw indicated a good cksum
1471  * @adapter: structure containing adapter specific data
1472  * @hal_pkt: HAL structure for the packet
1473  * @mbuf: mbuf currently being received and modified
1474  */
1475 static inline void
1476 al_eth_rx_checksum(struct al_eth_adapter *adapter,
1477     struct al_eth_pkt *hal_pkt, struct mbuf *mbuf)
1478 {
1479 
1480 	/* if IPv4 and error */
1481 	if (unlikely((if_getcapenable(adapter->netdev) & IFCAP_RXCSUM) &&
1482 	    (hal_pkt->l3_proto_idx == AL_ETH_PROTO_ID_IPv4) &&
1483 	    (hal_pkt->flags & AL_ETH_RX_FLAGS_L3_CSUM_ERR))) {
1484 		device_printf(adapter->dev,"rx ipv4 header checksum error\n");
1485 		return;
1486 	}
1487 
1488 	/* if IPv6 and error */
1489 	if (unlikely((if_getcapenable(adapter->netdev) & IFCAP_RXCSUM_IPV6) &&
1490 	    (hal_pkt->l3_proto_idx == AL_ETH_PROTO_ID_IPv6) &&
1491 	    (hal_pkt->flags & AL_ETH_RX_FLAGS_L3_CSUM_ERR))) {
1492 		device_printf(adapter->dev,"rx ipv6 header checksum error\n");
1493 		return;
1494 	}
1495 
1496 	/* if TCP/UDP */
1497 	if (likely((hal_pkt->l4_proto_idx == AL_ETH_PROTO_ID_TCP) ||
1498 	   (hal_pkt->l4_proto_idx == AL_ETH_PROTO_ID_UDP))) {
1499 		if (unlikely(hal_pkt->flags & AL_ETH_RX_FLAGS_L4_CSUM_ERR)) {
1500 			device_printf_dbg(adapter->dev, "rx L4 checksum error\n");
1501 
1502 			/* TCP/UDP checksum error */
1503 			mbuf->m_pkthdr.csum_flags = 0;
1504 		} else {
1505 			device_printf_dbg(adapter->dev, "rx checksum correct\n");
1506 
1507 			/* IP Checksum Good */
1508 			mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
1509 			mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1510 		}
1511 	}
1512 }
1513 
1514 static struct mbuf*
1515 al_eth_rx_mbuf(struct al_eth_adapter *adapter,
1516     struct al_eth_ring *rx_ring, struct al_eth_pkt *hal_pkt,
1517     unsigned int descs, uint16_t *next_to_clean)
1518 {
1519 	struct mbuf *mbuf;
1520 	struct al_eth_rx_buffer *rx_info =
1521 	    &rx_ring->rx_buffer_info[*next_to_clean];
1522 	unsigned int len;
1523 
1524 	len = hal_pkt->bufs[0].len;
1525 	device_printf_dbg(adapter->dev, "rx_info %p data %p\n", rx_info,
1526 	   rx_info->m);
1527 
1528 	if (rx_info->m == NULL) {
1529 		*next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring,
1530 		    *next_to_clean);
1531 		return (NULL);
1532 	}
1533 
1534 	mbuf = rx_info->m;
1535 	mbuf->m_pkthdr.len = len;
1536 	mbuf->m_len = len;
1537 	mbuf->m_pkthdr.rcvif = rx_ring->netdev;
1538 	mbuf->m_flags |= M_PKTHDR;
1539 
1540 	if (len <= adapter->small_copy_len) {
1541 		struct mbuf *smbuf;
1542 		device_printf_dbg(adapter->dev, "rx small packet. len %d\n", len);
1543 
1544 		AL_RX_LOCK(adapter);
1545 		smbuf = m_gethdr(M_NOWAIT, MT_DATA);
1546 		AL_RX_UNLOCK(adapter);
1547 		if (__predict_false(smbuf == NULL)) {
1548 			device_printf(adapter->dev, "smbuf is NULL\n");
1549 			return (NULL);
1550 		}
1551 
1552 		smbuf->m_data = smbuf->m_data + AL_IP_ALIGNMENT_OFFSET;
1553 		memcpy(smbuf->m_data, mbuf->m_data + AL_IP_ALIGNMENT_OFFSET, len);
1554 
1555 		smbuf->m_len = len;
1556 		smbuf->m_pkthdr.rcvif = rx_ring->netdev;
1557 
1558 		/* first desc of a non-ps chain */
1559 		smbuf->m_flags |= M_PKTHDR;
1560 		smbuf->m_pkthdr.len = smbuf->m_len;
1561 
1562 		*next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring,
1563 		    *next_to_clean);
1564 
1565 		return (smbuf);
1566 	}
1567 	mbuf->m_data = mbuf->m_data + AL_IP_ALIGNMENT_OFFSET;
1568 
1569 	/* Unmap the buffer */
1570 	bus_dmamap_unload(rx_ring->dma_buf_tag, rx_info->dma_map);
1571 
1572 	rx_info->m = NULL;
1573 	*next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring, *next_to_clean);
1574 
1575 	return (mbuf);
1576 }
1577 
1578 static void
1579 al_eth_rx_recv_work(void *arg, int pending)
1580 {
1581 	struct al_eth_ring *rx_ring = arg;
1582 	struct mbuf *mbuf;
1583 	struct lro_entry *queued;
1584 	unsigned int qid = rx_ring->ring_id;
1585 	struct al_eth_pkt *hal_pkt = &rx_ring->hal_pkt;
1586 	uint16_t next_to_clean = rx_ring->next_to_clean;
1587 	uint32_t refill_required;
1588 	uint32_t refill_actual;
1589 	uint32_t do_if_input;
1590 
1591 	if (napi != 0) {
1592 		rx_ring->enqueue_is_running = 1;
1593 		al_data_memory_barrier();
1594 	}
1595 
1596 	do {
1597 		unsigned int descs;
1598 
1599 		descs = al_eth_pkt_rx(rx_ring->dma_q, hal_pkt);
1600 		if (unlikely(descs == 0))
1601 			break;
1602 
1603 		device_printf_dbg(rx_ring->dev, "rx_poll: q %d got packet "
1604 		    "from hal. descs %d\n", qid, descs);
1605 		device_printf_dbg(rx_ring->dev, "rx_poll: q %d flags %x. "
1606 		    "l3 proto %d l4 proto %d\n", qid, hal_pkt->flags,
1607 		    hal_pkt->l3_proto_idx, hal_pkt->l4_proto_idx);
1608 
1609 		/* ignore if detected dma or eth controller errors */
1610 		if ((hal_pkt->flags & (AL_ETH_RX_ERROR |
1611 		    AL_UDMA_CDESC_ERROR)) != 0) {
1612 			device_printf(rx_ring->dev, "receive packet with error. "
1613 			    "flags = 0x%x\n", hal_pkt->flags);
1614 			next_to_clean = AL_ETH_RX_RING_IDX_ADD(rx_ring,
1615 			    next_to_clean, descs);
1616 			continue;
1617 		}
1618 
1619 		/* allocate mbuf and fill it */
1620 		mbuf = al_eth_rx_mbuf(rx_ring->adapter, rx_ring, hal_pkt, descs,
1621 		    &next_to_clean);
1622 
1623 		/* exit if we failed to retrieve a buffer */
1624 		if (unlikely(mbuf == NULL)) {
1625 			next_to_clean = AL_ETH_RX_RING_IDX_ADD(rx_ring,
1626 			    next_to_clean, descs);
1627 			break;
1628 		}
1629 
1630 		if (__predict_true(if_getcapenable(rx_ring->netdev) & IFCAP_RXCSUM ||
1631 		    if_getcapenable(rx_ring->netdev) & IFCAP_RXCSUM_IPV6)) {
1632 			al_eth_rx_checksum(rx_ring->adapter, hal_pkt, mbuf);
1633 		}
1634 
1635 		mbuf->m_pkthdr.flowid = qid;
1636 		M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE);
1637 
1638 		/*
1639 		 * LRO is only for IP/TCP packets and TCP checksum of the packet
1640 		 * should be computed by hardware.
1641 		 */
1642 		do_if_input = 1;
1643 		if ((rx_ring->lro_enabled != 0) &&
1644 		    ((mbuf->m_pkthdr.csum_flags & CSUM_IP_VALID) != 0) &&
1645 		    hal_pkt->l4_proto_idx == AL_ETH_PROTO_ID_TCP) {
1646 			/*
1647 			 * Send to the stack if:
1648 			 *  - LRO not enabled, or
1649 			 *  - no LRO resources, or
1650 			 *  - lro enqueue fails
1651 			 */
1652 			if (rx_ring->lro.lro_cnt != 0) {
1653 				if (tcp_lro_rx(&rx_ring->lro, mbuf, 0) == 0)
1654 					do_if_input = 0;
1655 			}
1656 		}
1657 
1658 		if (do_if_input)
1659 			if_input(rx_ring->netdev, mbuf);
1660 
1661 	} while (1);
1662 
1663 	rx_ring->next_to_clean = next_to_clean;
1664 
1665 	refill_required = al_udma_available_get(rx_ring->dma_q);
1666 	refill_actual = al_eth_refill_rx_bufs(rx_ring->adapter, qid,
1667 	    refill_required);
1668 
1669 	if (unlikely(refill_actual < refill_required)) {
1670 		device_printf_dbg(rx_ring->dev,
1671 		    "%s: not filling rx queue %d\n", __func__, qid);
1672 	}
1673 
1674 	while (((queued = LIST_FIRST(&rx_ring->lro.lro_active)) != NULL)) {
1675 		LIST_REMOVE(queued, next);
1676 		tcp_lro_flush(&rx_ring->lro, queued);
1677 	}
1678 
1679 	if (napi != 0) {
1680 		rx_ring->enqueue_is_running = 0;
1681 		al_data_memory_barrier();
1682 	}
1683 	/* unmask irq */
1684 	al_eth_irq_config(rx_ring->unmask_reg_offset, rx_ring->unmask_val);
1685 }
1686 
1687 static void
1688 al_eth_start_xmit(void *arg, int pending)
1689 {
1690 	struct al_eth_ring *tx_ring = arg;
1691 	struct mbuf *mbuf;
1692 
1693 	if (napi != 0) {
1694 		tx_ring->enqueue_is_running = 1;
1695 		al_data_memory_barrier();
1696 	}
1697 
1698 	while (1) {
1699 		mtx_lock(&tx_ring->br_mtx);
1700 		mbuf = drbr_dequeue(NULL, tx_ring->br);
1701 		mtx_unlock(&tx_ring->br_mtx);
1702 
1703 		if (mbuf == NULL)
1704 			break;
1705 
1706 		al_eth_xmit_mbuf(tx_ring, mbuf);
1707 	}
1708 
1709 	if (napi != 0) {
1710 		tx_ring->enqueue_is_running = 0;
1711 		al_data_memory_barrier();
1712 		while (1) {
1713 			mtx_lock(&tx_ring->br_mtx);
1714 			mbuf = drbr_dequeue(NULL, tx_ring->br);
1715 			mtx_unlock(&tx_ring->br_mtx);
1716 			if (mbuf == NULL)
1717 				break;
1718 			al_eth_xmit_mbuf(tx_ring, mbuf);
1719 		}
1720 	}
1721 }
1722 
1723 static int
1724 al_mq_start(if_t ifp, struct mbuf *m)
1725 {
1726 	struct al_eth_adapter *adapter = if_getsoftc(ifp);
1727 	struct al_eth_ring *tx_ring;
1728 	int i;
1729 	int ret;
1730 
1731 	/* Which queue to use */
1732 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
1733 		i = m->m_pkthdr.flowid % adapter->num_tx_queues;
1734 	else
1735 		i = curcpu % adapter->num_tx_queues;
1736 
1737 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
1738 	    IFF_DRV_RUNNING) {
1739 		return (EFAULT);
1740 	}
1741 
1742 	tx_ring = &adapter->tx_ring[i];
1743 
1744 	device_printf_dbg(adapter->dev, "dgb start() - assuming link is active, "
1745 	    "sending packet to queue %d\n", i);
1746 
1747 	ret = drbr_enqueue(ifp, tx_ring->br, m);
1748 
1749 	/*
1750 	 * For napi, if work is not running, schedule it. Always schedule
1751 	 * for casual (non-napi) packet handling.
1752 	 */
1753 	if ((napi == 0) || ((napi != 0) && (tx_ring->enqueue_is_running == 0)))
1754 		taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
1755 
1756 	return (ret);
1757 }
1758 
1759 static void
1760 al_qflush(if_t ifp)
1761 {
1762 
1763 	/* unused */
1764 }
1765 
1766 static inline void
1767 al_eth_flow_ctrl_init(struct al_eth_adapter *adapter)
1768 {
1769 	uint8_t default_flow_ctrl;
1770 
1771 	default_flow_ctrl = AL_ETH_FLOW_CTRL_TX_PAUSE;
1772 	default_flow_ctrl |= AL_ETH_FLOW_CTRL_RX_PAUSE;
1773 
1774 	adapter->link_config.flow_ctrl_supported = default_flow_ctrl;
1775 }
1776 
1777 static int
1778 al_eth_flow_ctrl_config(struct al_eth_adapter *adapter)
1779 {
1780 	struct al_eth_flow_control_params *flow_ctrl_params;
1781 	uint8_t active = adapter->link_config.flow_ctrl_active;
1782 	int i;
1783 
1784 	flow_ctrl_params = &adapter->flow_ctrl_params;
1785 
1786 	flow_ctrl_params->type = AL_ETH_FLOW_CONTROL_TYPE_LINK_PAUSE;
1787 	flow_ctrl_params->obay_enable =
1788 	    ((active & AL_ETH_FLOW_CTRL_RX_PAUSE) != 0);
1789 	flow_ctrl_params->gen_enable =
1790 	    ((active & AL_ETH_FLOW_CTRL_TX_PAUSE) != 0);
1791 
1792 	flow_ctrl_params->rx_fifo_th_high = AL_ETH_FLOW_CTRL_RX_FIFO_TH_HIGH;
1793 	flow_ctrl_params->rx_fifo_th_low = AL_ETH_FLOW_CTRL_RX_FIFO_TH_LOW;
1794 	flow_ctrl_params->quanta = AL_ETH_FLOW_CTRL_QUANTA;
1795 	flow_ctrl_params->quanta_th = AL_ETH_FLOW_CTRL_QUANTA_TH;
1796 
1797 	/* map priority to queue index, queue id = priority/2 */
1798 	for (i = 0; i < AL_ETH_FWD_PRIO_TABLE_NUM; i++)
1799 		flow_ctrl_params->prio_q_map[0][i] =  1 << (i >> 1);
1800 
1801 	al_eth_flow_control_config(&adapter->hal_adapter, flow_ctrl_params);
1802 
1803 	return (0);
1804 }
1805 
1806 static void
1807 al_eth_flow_ctrl_enable(struct al_eth_adapter *adapter)
1808 {
1809 
1810 	/*
1811 	 * change the active configuration to the default / force by ethtool
1812 	 * and call to configure
1813 	 */
1814 	adapter->link_config.flow_ctrl_active =
1815 	    adapter->link_config.flow_ctrl_supported;
1816 
1817 	al_eth_flow_ctrl_config(adapter);
1818 }
1819 
1820 static void
1821 al_eth_flow_ctrl_disable(struct al_eth_adapter *adapter)
1822 {
1823 
1824 	adapter->link_config.flow_ctrl_active = 0;
1825 	al_eth_flow_ctrl_config(adapter);
1826 }
1827 
1828 static int
1829 al_eth_hw_init(struct al_eth_adapter *adapter)
1830 {
1831 	int rc;
1832 
1833 	rc = al_eth_hw_init_adapter(adapter);
1834 	if (rc != 0)
1835 		return (rc);
1836 
1837 	rc = al_eth_mac_config(&adapter->hal_adapter, adapter->mac_mode);
1838 	if (rc < 0) {
1839 		device_printf(adapter->dev, "%s failed to configure mac!\n",
1840 		    __func__);
1841 		return (rc);
1842 	}
1843 
1844 	if ((adapter->mac_mode == AL_ETH_MAC_MODE_SGMII) ||
1845 	    (adapter->mac_mode == AL_ETH_MAC_MODE_RGMII &&
1846 	     adapter->phy_exist == false)) {
1847 		rc = al_eth_mac_link_config(&adapter->hal_adapter,
1848 		    adapter->link_config.force_1000_base_x,
1849 		    adapter->link_config.autoneg,
1850 		    adapter->link_config.active_speed,
1851 		    adapter->link_config.active_duplex);
1852 		if (rc != 0) {
1853 			device_printf(adapter->dev,
1854 			    "%s failed to configure link parameters!\n",
1855 			    __func__);
1856 			return (rc);
1857 		}
1858 	}
1859 
1860 	rc = al_eth_mdio_config(&adapter->hal_adapter,
1861 	    AL_ETH_MDIO_TYPE_CLAUSE_22, AL_TRUE /* shared_mdio_if */,
1862 	    adapter->ref_clk_freq, adapter->mdio_freq);
1863 	if (rc != 0) {
1864 		device_printf(adapter->dev, "%s failed at mdio config!\n",
1865 		    __func__);
1866 		return (rc);
1867 	}
1868 
1869 	al_eth_flow_ctrl_init(adapter);
1870 
1871 	return (rc);
1872 }
1873 
1874 static int
1875 al_eth_hw_stop(struct al_eth_adapter *adapter)
1876 {
1877 
1878 	al_eth_mac_stop(&adapter->hal_adapter);
1879 
1880 	/*
1881 	 * wait till pending rx packets written and UDMA becomes idle,
1882 	 * the MAC has ~10KB fifo, 10us should be enough time for the
1883 	 * UDMA to write to the memory
1884 	 */
1885 	DELAY(10);
1886 
1887 	al_eth_adapter_stop(&adapter->hal_adapter);
1888 
1889 	adapter->flags |= AL_ETH_FLAG_RESET_REQUESTED;
1890 
1891 	/* disable flow ctrl to avoid pause packets*/
1892 	al_eth_flow_ctrl_disable(adapter);
1893 
1894 	return (0);
1895 }
1896 
1897 /*
1898  * al_eth_intr_intx_all - Legacy Interrupt Handler for all interrupts
1899  * @irq: interrupt number
1900  * @data: pointer to a network interface device structure
1901  */
1902 static int
1903 al_eth_intr_intx_all(void *data)
1904 {
1905 	struct al_eth_adapter *adapter = data;
1906 
1907 	struct unit_regs __iomem *regs_base =
1908 	    (struct unit_regs __iomem *)adapter->udma_base;
1909 	uint32_t reg;
1910 
1911 	reg = al_udma_iofic_read_cause(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
1912 	    AL_INT_GROUP_A);
1913 	if (likely(reg))
1914 		device_printf_dbg(adapter->dev, "%s group A cause %x\n",
1915 		    __func__, reg);
1916 
1917 	if (unlikely(reg & AL_INT_GROUP_A_GROUP_D_SUM)) {
1918 		struct al_iofic_grp_ctrl __iomem *sec_ints_base;
1919 		uint32_t cause_d =  al_udma_iofic_read_cause(regs_base,
1920 		    AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_D);
1921 
1922 		sec_ints_base =
1923 		    &regs_base->gen.interrupt_regs.secondary_iofic_ctrl[0];
1924 		if (cause_d != 0) {
1925 			device_printf_dbg(adapter->dev,
1926 			    "got interrupt from group D. cause %x\n", cause_d);
1927 
1928 			cause_d = al_iofic_read_cause(sec_ints_base,
1929 			    AL_INT_GROUP_A);
1930 			device_printf(adapter->dev,
1931 			    "secondary A cause %x\n", cause_d);
1932 
1933 			cause_d = al_iofic_read_cause(sec_ints_base,
1934 			    AL_INT_GROUP_B);
1935 
1936 			device_printf_dbg(adapter->dev,
1937 			    "secondary B cause %x\n", cause_d);
1938 		}
1939 	}
1940 	if ((reg & AL_INT_GROUP_A_GROUP_B_SUM) != 0 ) {
1941 		uint32_t cause_b = al_udma_iofic_read_cause(regs_base,
1942 		    AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_B);
1943 		int qid;
1944 		device_printf_dbg(adapter->dev, "secondary B cause %x\n",
1945 		    cause_b);
1946 		for (qid = 0; qid < adapter->num_rx_queues; qid++) {
1947 			if (cause_b & (1 << qid)) {
1948 				/* mask */
1949 				al_udma_iofic_mask(
1950 				    (struct unit_regs __iomem *)adapter->udma_base,
1951 				    AL_UDMA_IOFIC_LEVEL_PRIMARY,
1952 				    AL_INT_GROUP_B, 1 << qid);
1953 			}
1954 		}
1955 	}
1956 	if ((reg & AL_INT_GROUP_A_GROUP_C_SUM) != 0) {
1957 		uint32_t cause_c = al_udma_iofic_read_cause(regs_base,
1958 		    AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_C);
1959 		int qid;
1960 		device_printf_dbg(adapter->dev, "secondary C cause %x\n", cause_c);
1961 		for (qid = 0; qid < adapter->num_tx_queues; qid++) {
1962 			if ((cause_c & (1 << qid)) != 0) {
1963 				al_udma_iofic_mask(
1964 				    (struct unit_regs __iomem *)adapter->udma_base,
1965 				    AL_UDMA_IOFIC_LEVEL_PRIMARY,
1966 				    AL_INT_GROUP_C, 1 << qid);
1967 			}
1968 		}
1969 	}
1970 
1971 	al_eth_tx_cmlp_irq_filter(adapter->tx_ring);
1972 
1973 	return (0);
1974 }
1975 
1976 static int
1977 al_eth_intr_msix_all(void *data)
1978 {
1979 	struct al_eth_adapter *adapter = data;
1980 
1981 	device_printf_dbg(adapter->dev, "%s\n", __func__);
1982 	return (0);
1983 }
1984 
1985 static int
1986 al_eth_intr_msix_mgmt(void *data)
1987 {
1988 	struct al_eth_adapter *adapter = data;
1989 
1990 	device_printf_dbg(adapter->dev, "%s\n", __func__);
1991 	return (0);
1992 }
1993 
1994 static int
1995 al_eth_enable_msix(struct al_eth_adapter *adapter)
1996 {
1997 	int i, msix_vecs, rc, count;
1998 
1999 	device_printf_dbg(adapter->dev, "%s\n", __func__);
2000 	msix_vecs = 1 + adapter->num_rx_queues + adapter->num_tx_queues;
2001 
2002 	device_printf_dbg(adapter->dev,
2003 	    "Try to enable MSIX, vector numbers = %d\n", msix_vecs);
2004 
2005 	adapter->msix_entries = malloc(msix_vecs*sizeof(*adapter->msix_entries),
2006 	    M_IFAL, M_ZERO | M_WAITOK);
2007 	/* management vector (GROUP_A) @2*/
2008 	adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].entry = 2;
2009 	adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector = 0;
2010 
2011 	/* rx queues start @3 */
2012 	for (i = 0; i < adapter->num_rx_queues; i++) {
2013 		int irq_idx = AL_ETH_RXQ_IRQ_IDX(adapter, i);
2014 
2015 		adapter->msix_entries[irq_idx].entry = 3 + i;
2016 		adapter->msix_entries[irq_idx].vector = 0;
2017 	}
2018 	/* tx queues start @7 */
2019 	for (i = 0; i < adapter->num_tx_queues; i++) {
2020 		int irq_idx = AL_ETH_TXQ_IRQ_IDX(adapter, i);
2021 
2022 		adapter->msix_entries[irq_idx].entry = 3 +
2023 		    AL_ETH_MAX_HW_QUEUES + i;
2024 		adapter->msix_entries[irq_idx].vector = 0;
2025 	}
2026 
2027 	count = msix_vecs + 2; /* entries start from 2 */
2028 	rc = pci_alloc_msix(adapter->dev, &count);
2029 
2030 	if (rc != 0) {
2031 		device_printf_dbg(adapter->dev, "failed to allocate MSIX "
2032 		    "vectors %d\n", msix_vecs+2);
2033 		device_printf_dbg(adapter->dev, "ret = %d\n", rc);
2034 		goto msix_entries_exit;
2035 	}
2036 
2037 	if (count != msix_vecs + 2) {
2038 		device_printf_dbg(adapter->dev, "failed to allocate all MSIX "
2039 		    "vectors %d, allocated %d\n", msix_vecs+2, count);
2040 		rc = ENOSPC;
2041 		goto msix_entries_exit;
2042 	}
2043 
2044 	for (i = 0; i < msix_vecs; i++)
2045 	    adapter->msix_entries[i].vector = 2 + 1 + i;
2046 
2047 	device_printf_dbg(adapter->dev, "successfully enabled MSIX,"
2048 	    " vectors %d\n", msix_vecs);
2049 
2050 	adapter->msix_vecs = msix_vecs;
2051 	adapter->flags |= AL_ETH_FLAG_MSIX_ENABLED;
2052 	goto exit;
2053 
2054 msix_entries_exit:
2055 	adapter->msix_vecs = 0;
2056 	free(adapter->msix_entries, M_IFAL);
2057 	adapter->msix_entries = NULL;
2058 
2059 exit:
2060 	return (rc);
2061 }
2062 
2063 static int
2064 al_eth_setup_int_mode(struct al_eth_adapter *adapter)
2065 {
2066 	int i, rc;
2067 
2068 	rc = al_eth_enable_msix(adapter);
2069 	if (rc != 0) {
2070 		device_printf(adapter->dev, "Failed to enable MSIX mode.\n");
2071 		return (rc);
2072 	}
2073 
2074 	adapter->irq_vecs = max(1, adapter->msix_vecs);
2075 	/* single INTX mode */
2076 	if (adapter->msix_vecs == 0) {
2077 		snprintf(adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].name,
2078 		    AL_ETH_IRQNAME_SIZE, "al-eth-intx-all@pci:%s",
2079 		    device_get_name(adapter->dev));
2080 		adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].handler =
2081 		    al_eth_intr_intx_all;
2082 		/* IRQ vector will be resolved from device resources */
2083 		adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector = 0;
2084 		adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].data = adapter;
2085 
2086 		device_printf(adapter->dev, "%s and vector %d \n", __func__,
2087 		    adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector);
2088 
2089 		return (0);
2090 	}
2091 	/* single MSI-X mode */
2092 	if (adapter->msix_vecs == 1) {
2093 		snprintf(adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].name,
2094 		    AL_ETH_IRQNAME_SIZE, "al-eth-msix-all@pci:%s",
2095 		    device_get_name(adapter->dev));
2096 		adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].handler =
2097 		    al_eth_intr_msix_all;
2098 		adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector =
2099 		    adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector;
2100 		adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].data = adapter;
2101 
2102 		return (0);
2103 	}
2104 	/* MSI-X per queue */
2105 	snprintf(adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].name, AL_ETH_IRQNAME_SIZE,
2106 	    "al-eth-msix-mgmt@pci:%s", device_get_name(adapter->dev));
2107 	adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].handler = al_eth_intr_msix_mgmt;
2108 
2109 	adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].data = adapter;
2110 	adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector =
2111 	    adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector;
2112 
2113 	for (i = 0; i < adapter->num_rx_queues; i++) {
2114 		int irq_idx = AL_ETH_RXQ_IRQ_IDX(adapter, i);
2115 
2116 		snprintf(adapter->irq_tbl[irq_idx].name, AL_ETH_IRQNAME_SIZE,
2117 		    "al-eth-rx-comp-%d@pci:%s", i,
2118 		    device_get_name(adapter->dev));
2119 		adapter->irq_tbl[irq_idx].handler = al_eth_rx_recv_irq_filter;
2120 		adapter->irq_tbl[irq_idx].data = &adapter->rx_ring[i];
2121 		adapter->irq_tbl[irq_idx].vector =
2122 		    adapter->msix_entries[irq_idx].vector;
2123 	}
2124 
2125 	for (i = 0; i < adapter->num_tx_queues; i++) {
2126 		int irq_idx = AL_ETH_TXQ_IRQ_IDX(adapter, i);
2127 
2128 		snprintf(adapter->irq_tbl[irq_idx].name,
2129 		    AL_ETH_IRQNAME_SIZE, "al-eth-tx-comp-%d@pci:%s", i,
2130 		    device_get_name(adapter->dev));
2131 		adapter->irq_tbl[irq_idx].handler = al_eth_tx_cmlp_irq_filter;
2132 		adapter->irq_tbl[irq_idx].data = &adapter->tx_ring[i];
2133 		adapter->irq_tbl[irq_idx].vector =
2134 		    adapter->msix_entries[irq_idx].vector;
2135 	}
2136 
2137 	return (0);
2138 }
2139 
2140 static void
2141 __al_eth_free_irq(struct al_eth_adapter *adapter)
2142 {
2143 	struct al_eth_irq *irq;
2144 	int i, rc;
2145 
2146 	for (i = 0; i < adapter->irq_vecs; i++) {
2147 		irq = &adapter->irq_tbl[i];
2148 		if (irq->requested != 0) {
2149 			device_printf_dbg(adapter->dev, "tear down irq: %d\n",
2150 			    irq->vector);
2151 			rc = bus_teardown_intr(adapter->dev, irq->res,
2152 			    irq->cookie);
2153 			if (rc != 0)
2154 				device_printf(adapter->dev, "failed to tear "
2155 				    "down irq: %d\n", irq->vector);
2156 		}
2157 		irq->requested = 0;
2158 	}
2159 }
2160 
2161 static void
2162 al_eth_free_irq(struct al_eth_adapter *adapter)
2163 {
2164 	struct al_eth_irq *irq;
2165 	int i, rc;
2166 #ifdef CONFIG_RFS_ACCEL
2167 	if (adapter->msix_vecs >= 1) {
2168 		free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
2169 		adapter->netdev->rx_cpu_rmap = NULL;
2170 	}
2171 #endif
2172 
2173 	__al_eth_free_irq(adapter);
2174 
2175 	for (i = 0; i < adapter->irq_vecs; i++) {
2176 		irq = &adapter->irq_tbl[i];
2177 		if (irq->res == NULL)
2178 			continue;
2179 		device_printf_dbg(adapter->dev, "release resource irq: %d\n",
2180 		    irq->vector);
2181 		rc = bus_release_resource(adapter->dev, SYS_RES_IRQ, irq->vector,
2182 		    irq->res);
2183 		irq->res = NULL;
2184 		if (rc != 0)
2185 			device_printf(adapter->dev, "dev has no parent while "
2186 			    "releasing res for irq: %d\n", irq->vector);
2187 	}
2188 
2189 	pci_release_msi(adapter->dev);
2190 
2191 	adapter->flags &= ~AL_ETH_FLAG_MSIX_ENABLED;
2192 
2193 	adapter->msix_vecs = 0;
2194 	free(adapter->msix_entries, M_IFAL);
2195 	adapter->msix_entries = NULL;
2196 }
2197 
2198 static int
2199 al_eth_request_irq(struct al_eth_adapter *adapter)
2200 {
2201 	unsigned long flags;
2202 	struct al_eth_irq *irq;
2203 	int rc = 0, i, v;
2204 
2205 	if ((adapter->flags & AL_ETH_FLAG_MSIX_ENABLED) != 0)
2206 		flags = RF_ACTIVE;
2207 	else
2208 		flags = RF_ACTIVE | RF_SHAREABLE;
2209 
2210 	for (i = 0; i < adapter->irq_vecs; i++) {
2211 		irq = &adapter->irq_tbl[i];
2212 
2213 		if (irq->requested != 0)
2214 			continue;
2215 
2216 		irq->res = bus_alloc_resource_any(adapter->dev, SYS_RES_IRQ,
2217 		    &irq->vector, flags);
2218 		if (irq->res == NULL) {
2219 			device_printf(adapter->dev, "could not allocate "
2220 			    "irq vector=%d\n", irq->vector);
2221 			rc = ENXIO;
2222 			goto exit_res;
2223 		}
2224 
2225 		if ((rc = bus_setup_intr(adapter->dev, irq->res,
2226 		    INTR_TYPE_NET | INTR_MPSAFE, irq->handler,
2227 		    NULL, irq->data, &irq->cookie)) != 0) {
2228 			device_printf(adapter->dev, "failed to register "
2229 			    "interrupt handler for irq %ju: %d\n",
2230 			    (uintmax_t)rman_get_start(irq->res), rc);
2231 			goto exit_intr;
2232 		}
2233 		irq->requested = 1;
2234 	}
2235 	goto exit;
2236 
2237 exit_intr:
2238 	v = i - 1; /* -1 because we omit the operation that failed */
2239 	while (v-- >= 0) {
2240 		int bti;
2241 		irq = &adapter->irq_tbl[v];
2242 		bti = bus_teardown_intr(adapter->dev, irq->res, irq->cookie);
2243 		if (bti != 0) {
2244 			device_printf(adapter->dev, "failed to tear "
2245 			    "down irq: %d\n", irq->vector);
2246 		}
2247 
2248 		irq->requested = 0;
2249 		device_printf_dbg(adapter->dev, "exit_intr: releasing irq %d\n",
2250 		    irq->vector);
2251 	}
2252 
2253 exit_res:
2254 	v = i - 1; /* -1 because we omit the operation that failed */
2255 	while (v-- >= 0) {
2256 		int brr;
2257 		irq = &adapter->irq_tbl[v];
2258 		device_printf_dbg(adapter->dev, "exit_res: releasing resource"
2259 		    " for irq %d\n", irq->vector);
2260 		brr = bus_release_resource(adapter->dev, SYS_RES_IRQ,
2261 		    irq->vector, irq->res);
2262 		if (brr != 0)
2263 			device_printf(adapter->dev, "dev has no parent while "
2264 			    "releasing res for irq: %d\n", irq->vector);
2265 		irq->res = NULL;
2266 	}
2267 
2268 exit:
2269 	return (rc);
2270 }
2271 
2272 /**
2273  * al_eth_setup_tx_resources - allocate Tx resources (Descriptors)
2274  * @adapter: network interface device structure
2275  * @qid: queue index
2276  *
2277  * Return 0 on success, negative on failure
2278  **/
2279 static int
2280 al_eth_setup_tx_resources(struct al_eth_adapter *adapter, int qid)
2281 {
2282 	struct al_eth_ring *tx_ring = &adapter->tx_ring[qid];
2283 	device_t dev = tx_ring->dev;
2284 	struct al_udma_q_params *q_params = &tx_ring->q_params;
2285 	int size;
2286 	int ret;
2287 
2288 	if (adapter->up)
2289 		return (0);
2290 
2291 	size = sizeof(struct al_eth_tx_buffer) * tx_ring->sw_count;
2292 
2293 	tx_ring->tx_buffer_info = malloc(size, M_IFAL, M_ZERO | M_WAITOK);
2294 	tx_ring->descs_size = tx_ring->hw_count * sizeof(union al_udma_desc);
2295 	q_params->size = tx_ring->hw_count;
2296 
2297 	ret = al_dma_alloc_coherent(dev, &q_params->desc_phy_base_tag,
2298 	    (bus_dmamap_t *)&q_params->desc_phy_base_map,
2299 	    (bus_addr_t *)&q_params->desc_phy_base,
2300 	    (void**)&q_params->desc_base, tx_ring->descs_size);
2301 	if (ret != 0) {
2302 		device_printf(dev, "failed to al_dma_alloc_coherent,"
2303 		    " ret = %d\n", ret);
2304 		return (ENOMEM);
2305 	}
2306 
2307 	if (q_params->desc_base == NULL)
2308 		return (ENOMEM);
2309 
2310 	device_printf_dbg(dev, "Initializing ring queues %d\n", qid);
2311 
2312 	/* Allocate Ring Queue */
2313 	mtx_init(&tx_ring->br_mtx, "AlRingMtx", NULL, MTX_DEF);
2314 	tx_ring->br = buf_ring_alloc(AL_BR_SIZE, M_DEVBUF, M_WAITOK,
2315 	    &tx_ring->br_mtx);
2316 
2317 	/* Allocate taskqueues */
2318 	TASK_INIT(&tx_ring->enqueue_task, 0, al_eth_start_xmit, tx_ring);
2319 	tx_ring->enqueue_tq = taskqueue_create_fast("al_tx_enque", M_NOWAIT,
2320 	    taskqueue_thread_enqueue, &tx_ring->enqueue_tq);
2321 	taskqueue_start_threads(&tx_ring->enqueue_tq, 1, PI_NET, "%s txeq",
2322 	    device_get_nameunit(adapter->dev));
2323 	TASK_INIT(&tx_ring->cmpl_task, 0, al_eth_tx_cmpl_work, tx_ring);
2324 	tx_ring->cmpl_tq = taskqueue_create_fast("al_tx_cmpl", M_NOWAIT,
2325 	    taskqueue_thread_enqueue, &tx_ring->cmpl_tq);
2326 	taskqueue_start_threads(&tx_ring->cmpl_tq, 1, PI_REALTIME, "%s txcq",
2327 	    device_get_nameunit(adapter->dev));
2328 
2329 	/* Setup DMA descriptor areas. */
2330 	ret = bus_dma_tag_create(bus_get_dma_tag(dev),
2331 	    1, 0,			/* alignment, bounds */
2332 	    BUS_SPACE_MAXADDR,		/* lowaddr */
2333 	    BUS_SPACE_MAXADDR,		/* highaddr */
2334 	    NULL, NULL,			/* filter, filterarg */
2335 	    AL_TSO_SIZE,		/* maxsize */
2336 	    AL_ETH_PKT_MAX_BUFS,	/* nsegments */
2337 	    PAGE_SIZE,			/* maxsegsize */
2338 	    0,				/* flags */
2339 	    NULL,			/* lockfunc */
2340 	    NULL,			/* lockfuncarg */
2341 	    &tx_ring->dma_buf_tag);
2342 
2343 	if (ret != 0) {
2344 		device_printf(dev,"Unable to allocate dma_buf_tag, ret = %d\n",
2345 		    ret);
2346 		return (ret);
2347 	}
2348 
2349 	for (size = 0; size < tx_ring->sw_count; size++) {
2350 		ret = bus_dmamap_create(tx_ring->dma_buf_tag, 0,
2351 		    &tx_ring->tx_buffer_info[size].dma_map);
2352 		if (ret != 0) {
2353 			device_printf(dev, "Unable to map DMA TX "
2354 			    "buffer memory [iter=%d]\n", size);
2355 			return (ret);
2356 		}
2357 	}
2358 
2359 	/* completion queue not used for tx */
2360 	q_params->cdesc_base = NULL;
2361 	/* size in bytes of the udma completion ring descriptor */
2362 	q_params->cdesc_size = 8;
2363 	tx_ring->next_to_use = 0;
2364 	tx_ring->next_to_clean = 0;
2365 
2366 	return (0);
2367 }
2368 
2369 /*
2370  * al_eth_free_tx_resources - Free Tx Resources per Queue
2371  * @adapter: network interface device structure
2372  * @qid: queue index
2373  *
2374  * Free all transmit software resources
2375  */
2376 static void
2377 al_eth_free_tx_resources(struct al_eth_adapter *adapter, int qid)
2378 {
2379 	struct al_eth_ring *tx_ring = &adapter->tx_ring[qid];
2380 	struct al_udma_q_params *q_params = &tx_ring->q_params;
2381 	int size;
2382 
2383 	/* At this point interrupts' handlers must be deactivated */
2384 	while (taskqueue_cancel(tx_ring->cmpl_tq, &tx_ring->cmpl_task, NULL))
2385 		taskqueue_drain(tx_ring->cmpl_tq, &tx_ring->cmpl_task);
2386 
2387 	taskqueue_free(tx_ring->cmpl_tq);
2388 	while (taskqueue_cancel(tx_ring->enqueue_tq,
2389 	    &tx_ring->enqueue_task, NULL)) {
2390 		taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
2391 	}
2392 
2393 	taskqueue_free(tx_ring->enqueue_tq);
2394 
2395 	if (tx_ring->br != NULL) {
2396 		drbr_flush(adapter->netdev, tx_ring->br);
2397 		buf_ring_free(tx_ring->br, M_DEVBUF);
2398 	}
2399 
2400 	for (size = 0; size < tx_ring->sw_count; size++) {
2401 		m_freem(tx_ring->tx_buffer_info[size].m);
2402 		tx_ring->tx_buffer_info[size].m = NULL;
2403 
2404 		bus_dmamap_unload(tx_ring->dma_buf_tag,
2405 		    tx_ring->tx_buffer_info[size].dma_map);
2406 		bus_dmamap_destroy(tx_ring->dma_buf_tag,
2407 		    tx_ring->tx_buffer_info[size].dma_map);
2408 	}
2409 	bus_dma_tag_destroy(tx_ring->dma_buf_tag);
2410 
2411 	free(tx_ring->tx_buffer_info, M_IFAL);
2412 	tx_ring->tx_buffer_info = NULL;
2413 
2414 	mtx_destroy(&tx_ring->br_mtx);
2415 
2416 	/* if not set, then don't free */
2417 	if (q_params->desc_base == NULL)
2418 		return;
2419 
2420 	al_dma_free_coherent(q_params->desc_phy_base_tag,
2421 	    q_params->desc_phy_base_map, q_params->desc_base);
2422 
2423 	q_params->desc_base = NULL;
2424 }
2425 
2426 /*
2427  * al_eth_free_all_tx_resources - Free Tx Resources for All Queues
2428  * @adapter: board private structure
2429  *
2430  * Free all transmit software resources
2431  */
2432 static void
2433 al_eth_free_all_tx_resources(struct al_eth_adapter *adapter)
2434 {
2435 	int i;
2436 
2437 	for (i = 0; i < adapter->num_tx_queues; i++)
2438 		if (adapter->tx_ring[i].q_params.desc_base)
2439 			al_eth_free_tx_resources(adapter, i);
2440 }
2441 
2442 /*
2443  * al_eth_setup_rx_resources - allocate Rx resources (Descriptors)
2444  * @adapter: network interface device structure
2445  * @qid: queue index
2446  *
2447  * Returns 0 on success, negative on failure
2448  */
2449 static int
2450 al_eth_setup_rx_resources(struct al_eth_adapter *adapter, unsigned int qid)
2451 {
2452 	struct al_eth_ring *rx_ring = &adapter->rx_ring[qid];
2453 	device_t dev = rx_ring->dev;
2454 	struct al_udma_q_params *q_params = &rx_ring->q_params;
2455 	int size;
2456 	int ret;
2457 
2458 	size = sizeof(struct al_eth_rx_buffer) * rx_ring->sw_count;
2459 
2460 	/* alloc extra element so in rx path we can always prefetch rx_info + 1 */
2461 	size += 1;
2462 
2463 	rx_ring->rx_buffer_info = malloc(size, M_IFAL, M_ZERO | M_WAITOK);
2464 	rx_ring->descs_size = rx_ring->hw_count * sizeof(union al_udma_desc);
2465 	q_params->size = rx_ring->hw_count;
2466 
2467 	ret = al_dma_alloc_coherent(dev, &q_params->desc_phy_base_tag,
2468 	    &q_params->desc_phy_base_map,
2469 	    (bus_addr_t *)&q_params->desc_phy_base,
2470 	    (void**)&q_params->desc_base, rx_ring->descs_size);
2471 
2472 	if ((q_params->desc_base == NULL) || (ret != 0))
2473 		return (ENOMEM);
2474 
2475 	/* size in bytes of the udma completion ring descriptor */
2476 	q_params->cdesc_size = 16;
2477 	rx_ring->cdescs_size = rx_ring->hw_count * q_params->cdesc_size;
2478 	ret = al_dma_alloc_coherent(dev, &q_params->cdesc_phy_base_tag,
2479 	    &q_params->cdesc_phy_base_map,
2480 	    (bus_addr_t *)&q_params->cdesc_phy_base,
2481 	    (void**)&q_params->cdesc_base, rx_ring->cdescs_size);
2482 
2483 	if ((q_params->cdesc_base == NULL) || (ret != 0))
2484 		return (ENOMEM);
2485 
2486 	/* Allocate taskqueues */
2487 	NET_TASK_INIT(&rx_ring->enqueue_task, 0, al_eth_rx_recv_work, rx_ring);
2488 	rx_ring->enqueue_tq = taskqueue_create_fast("al_rx_enque", M_NOWAIT,
2489 	    taskqueue_thread_enqueue, &rx_ring->enqueue_tq);
2490 	taskqueue_start_threads(&rx_ring->enqueue_tq, 1, PI_NET, "%s rxeq",
2491 	    device_get_nameunit(adapter->dev));
2492 
2493 	/* Setup DMA descriptor areas. */
2494 	ret = bus_dma_tag_create(bus_get_dma_tag(dev),
2495 	    1, 0,			/* alignment, bounds */
2496 	    BUS_SPACE_MAXADDR,		/* lowaddr */
2497 	    BUS_SPACE_MAXADDR,		/* highaddr */
2498 	    NULL, NULL,			/* filter, filterarg */
2499 	    AL_TSO_SIZE,		/* maxsize */
2500 	    1,				/* nsegments */
2501 	    AL_TSO_SIZE,		/* maxsegsize */
2502 	    0,				/* flags */
2503 	    NULL,			/* lockfunc */
2504 	    NULL,			/* lockfuncarg */
2505 	    &rx_ring->dma_buf_tag);
2506 
2507 	if (ret != 0) {
2508 		device_printf(dev,"Unable to allocate RX dma_buf_tag\n");
2509 		return (ret);
2510 	}
2511 
2512 	for (size = 0; size < rx_ring->sw_count; size++) {
2513 		ret = bus_dmamap_create(rx_ring->dma_buf_tag, 0,
2514 		    &rx_ring->rx_buffer_info[size].dma_map);
2515 		if (ret != 0) {
2516 			device_printf(dev,"Unable to map DMA RX buffer memory\n");
2517 			return (ret);
2518 		}
2519 	}
2520 
2521 	/* Zero out the descriptor ring */
2522 	memset(q_params->cdesc_base, 0, rx_ring->cdescs_size);
2523 
2524 	/* Create LRO for the ring */
2525 	if ((if_getcapenable(adapter->netdev) & IFCAP_LRO) != 0) {
2526 		int err = tcp_lro_init(&rx_ring->lro);
2527 		if (err != 0) {
2528 			device_printf(adapter->dev,
2529 			    "LRO[%d] Initialization failed!\n", qid);
2530 		} else {
2531 			device_printf_dbg(adapter->dev,
2532 			    "RX Soft LRO[%d] Initialized\n", qid);
2533 			rx_ring->lro_enabled = true;
2534 			rx_ring->lro.ifp = adapter->netdev;
2535 		}
2536 	}
2537 
2538 	rx_ring->next_to_clean = 0;
2539 	rx_ring->next_to_use = 0;
2540 
2541 	return (0);
2542 }
2543 
2544 /*
2545  * al_eth_free_rx_resources - Free Rx Resources
2546  * @adapter: network interface device structure
2547  * @qid: queue index
2548  *
2549  * Free all receive software resources
2550  */
2551 static void
2552 al_eth_free_rx_resources(struct al_eth_adapter *adapter, unsigned int qid)
2553 {
2554 	struct al_eth_ring *rx_ring = &adapter->rx_ring[qid];
2555 	struct al_udma_q_params *q_params = &rx_ring->q_params;
2556 	int size;
2557 
2558 	/* At this point interrupts' handlers must be deactivated */
2559 	while (taskqueue_cancel(rx_ring->enqueue_tq,
2560 	    &rx_ring->enqueue_task, NULL)) {
2561 		taskqueue_drain(rx_ring->enqueue_tq, &rx_ring->enqueue_task);
2562 	}
2563 
2564 	taskqueue_free(rx_ring->enqueue_tq);
2565 
2566 	for (size = 0; size < rx_ring->sw_count; size++) {
2567 		m_freem(rx_ring->rx_buffer_info[size].m);
2568 		rx_ring->rx_buffer_info[size].m = NULL;
2569 		bus_dmamap_unload(rx_ring->dma_buf_tag,
2570 		    rx_ring->rx_buffer_info[size].dma_map);
2571 		bus_dmamap_destroy(rx_ring->dma_buf_tag,
2572 		    rx_ring->rx_buffer_info[size].dma_map);
2573 	}
2574 	bus_dma_tag_destroy(rx_ring->dma_buf_tag);
2575 
2576 	free(rx_ring->rx_buffer_info, M_IFAL);
2577 	rx_ring->rx_buffer_info = NULL;
2578 
2579 	/* if not set, then don't free */
2580 	if (q_params->desc_base == NULL)
2581 		return;
2582 
2583 	al_dma_free_coherent(q_params->desc_phy_base_tag,
2584 	    q_params->desc_phy_base_map, q_params->desc_base);
2585 
2586 	q_params->desc_base = NULL;
2587 
2588 	/* if not set, then don't free */
2589 	if (q_params->cdesc_base == NULL)
2590 		return;
2591 
2592 	al_dma_free_coherent(q_params->cdesc_phy_base_tag,
2593 	    q_params->cdesc_phy_base_map, q_params->cdesc_base);
2594 
2595 	q_params->cdesc_phy_base = 0;
2596 
2597 	/* Free LRO resources */
2598 	tcp_lro_free(&rx_ring->lro);
2599 }
2600 
2601 /*
2602  * al_eth_free_all_rx_resources - Free Rx Resources for All Queues
2603  * @adapter: board private structure
2604  *
2605  * Free all receive software resources
2606  */
2607 static void
2608 al_eth_free_all_rx_resources(struct al_eth_adapter *adapter)
2609 {
2610 	int i;
2611 
2612 	for (i = 0; i < adapter->num_rx_queues; i++)
2613 		if (adapter->rx_ring[i].q_params.desc_base != 0)
2614 			al_eth_free_rx_resources(adapter, i);
2615 }
2616 
2617 /*
2618  * al_eth_setup_all_rx_resources - allocate all queues Rx resources
2619  * @adapter: board private structure
2620  *
2621  * Return 0 on success, negative on failure
2622  */
2623 static int
2624 al_eth_setup_all_rx_resources(struct al_eth_adapter *adapter)
2625 {
2626 	int i, rc = 0;
2627 
2628 	for (i = 0; i < adapter->num_rx_queues; i++) {
2629 		rc = al_eth_setup_rx_resources(adapter, i);
2630 		if (rc == 0)
2631 			continue;
2632 
2633 		device_printf(adapter->dev, "Allocation for Rx Queue %u failed\n", i);
2634 		goto err_setup_rx;
2635 	}
2636 	return (0);
2637 
2638 err_setup_rx:
2639 	/* rewind the index freeing the rings as we go */
2640 	while (i--)
2641 		al_eth_free_rx_resources(adapter, i);
2642 	return (rc);
2643 }
2644 
2645 /*
2646  * al_eth_setup_all_tx_resources - allocate all queues Tx resources
2647  * @adapter: private structure
2648  *
2649  * Return 0 on success, negative on failure
2650  */
2651 static int
2652 al_eth_setup_all_tx_resources(struct al_eth_adapter *adapter)
2653 {
2654 	int i, rc = 0;
2655 
2656 	for (i = 0; i < adapter->num_tx_queues; i++) {
2657 		rc = al_eth_setup_tx_resources(adapter, i);
2658 		if (rc == 0)
2659 			continue;
2660 
2661 		device_printf(adapter->dev,
2662 		    "Allocation for Tx Queue %u failed\n", i);
2663 		goto err_setup_tx;
2664 	}
2665 
2666 	return (0);
2667 
2668 err_setup_tx:
2669 	/* rewind the index freeing the rings as we go */
2670 	while (i--)
2671 		al_eth_free_tx_resources(adapter, i);
2672 
2673 	return (rc);
2674 }
2675 
2676 static void
2677 al_eth_disable_int_sync(struct al_eth_adapter *adapter)
2678 {
2679 
2680 	/* disable forwarding interrupts from eth through pci end point */
2681 	if ((adapter->board_type == ALPINE_FPGA_NIC) ||
2682 	    (adapter->board_type == ALPINE_NIC)) {
2683 		al_eth_forward_int_config((uint32_t*)adapter->internal_pcie_base +
2684 		    AL_REG_OFFSET_FORWARD_INTR, AL_DIS_FORWARD_INTR);
2685 	}
2686 
2687 	/* mask hw interrupts */
2688 	al_eth_interrupts_mask(adapter);
2689 }
2690 
2691 static void
2692 al_eth_interrupts_unmask(struct al_eth_adapter *adapter)
2693 {
2694 	uint32_t group_a_mask = AL_INT_GROUP_A_GROUP_D_SUM; /* enable group D summery */
2695 	uint32_t group_b_mask = (1 << adapter->num_rx_queues) - 1;/* bit per Rx q*/
2696 	uint32_t group_c_mask = (1 << adapter->num_tx_queues) - 1;/* bit per Tx q*/
2697 	uint32_t group_d_mask = 3 << 8;
2698 	struct unit_regs __iomem *regs_base =
2699 	    (struct unit_regs __iomem *)adapter->udma_base;
2700 
2701 	if (adapter->int_mode == AL_IOFIC_MODE_LEGACY)
2702 		group_a_mask |= AL_INT_GROUP_A_GROUP_B_SUM |
2703 		    AL_INT_GROUP_A_GROUP_C_SUM |
2704 		    AL_INT_GROUP_A_GROUP_D_SUM;
2705 
2706 	al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2707 	    AL_INT_GROUP_A, group_a_mask);
2708 	al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2709 	    AL_INT_GROUP_B, group_b_mask);
2710 	al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2711 	    AL_INT_GROUP_C, group_c_mask);
2712 	al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2713 	    AL_INT_GROUP_D, group_d_mask);
2714 }
2715 
2716 static void
2717 al_eth_interrupts_mask(struct al_eth_adapter *adapter)
2718 {
2719 	struct unit_regs __iomem *regs_base =
2720 	    (struct unit_regs __iomem *)adapter->udma_base;
2721 
2722 	/* mask all interrupts */
2723 	al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2724 	    AL_INT_GROUP_A, AL_MASK_GROUP_A_INT);
2725 	al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2726 	    AL_INT_GROUP_B, AL_MASK_GROUP_B_INT);
2727 	al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2728 	    AL_INT_GROUP_C, AL_MASK_GROUP_C_INT);
2729 	al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2730 	    AL_INT_GROUP_D, AL_MASK_GROUP_D_INT);
2731 }
2732 
2733 static int
2734 al_eth_configure_int_mode(struct al_eth_adapter *adapter)
2735 {
2736 	enum al_iofic_mode int_mode;
2737 	uint32_t m2s_errors_disable = AL_M2S_MASK_INIT;
2738 	uint32_t m2s_aborts_disable = AL_M2S_MASK_INIT;
2739 	uint32_t s2m_errors_disable = AL_S2M_MASK_INIT;
2740 	uint32_t s2m_aborts_disable = AL_S2M_MASK_INIT;
2741 
2742 	/* single INTX mode */
2743 	if (adapter->msix_vecs == 0)
2744 		int_mode = AL_IOFIC_MODE_LEGACY;
2745 	else if (adapter->msix_vecs > 1)
2746 		int_mode = AL_IOFIC_MODE_MSIX_PER_Q;
2747 	else {
2748 		device_printf(adapter->dev,
2749 		    "udma doesn't support single MSI-X mode yet.\n");
2750 		return (EIO);
2751 	}
2752 
2753 	if (adapter->board_type != ALPINE_INTEGRATED) {
2754 		m2s_errors_disable |= AL_M2S_S2M_MASK_NOT_INT;
2755 		m2s_errors_disable |= AL_M2S_S2M_MASK_NOT_INT;
2756 		s2m_aborts_disable |= AL_M2S_S2M_MASK_NOT_INT;
2757 		s2m_aborts_disable |= AL_M2S_S2M_MASK_NOT_INT;
2758 	}
2759 
2760 	if (al_udma_iofic_config((struct unit_regs __iomem *)adapter->udma_base,
2761 	    int_mode, m2s_errors_disable, m2s_aborts_disable,
2762 	    s2m_errors_disable, s2m_aborts_disable)) {
2763 		device_printf(adapter->dev,
2764 		    "al_udma_unit_int_config failed!.\n");
2765 		return (EIO);
2766 	}
2767 	adapter->int_mode = int_mode;
2768 	device_printf_dbg(adapter->dev, "using %s interrupt mode\n",
2769 	    int_mode == AL_IOFIC_MODE_LEGACY ? "INTx" :
2770 	    int_mode == AL_IOFIC_MODE_MSIX_PER_Q ? "MSI-X per Queue" : "Unknown");
2771 	/* set interrupt moderation resolution to 15us */
2772 	al_iofic_moder_res_config(&((struct unit_regs *)(adapter->udma_base))->gen.interrupt_regs.main_iofic, AL_INT_GROUP_B, 15);
2773 	al_iofic_moder_res_config(&((struct unit_regs *)(adapter->udma_base))->gen.interrupt_regs.main_iofic, AL_INT_GROUP_C, 15);
2774 	/* by default interrupt coalescing is disabled */
2775 	adapter->tx_usecs = 0;
2776 	adapter->rx_usecs = 0;
2777 
2778 	return (0);
2779 }
2780 
2781 /*
2782  * ethtool_rxfh_indir_default - get default value for RX flow hash indirection
2783  * @index: Index in RX flow hash indirection table
2784  * @n_rx_rings: Number of RX rings to use
2785  *
2786  * This function provides the default policy for RX flow hash indirection.
2787  */
2788 static inline uint32_t
2789 ethtool_rxfh_indir_default(uint32_t index, uint32_t n_rx_rings)
2790 {
2791 
2792 	return (index % n_rx_rings);
2793 }
2794 
2795 static void*
2796 al_eth_update_stats(struct al_eth_adapter *adapter)
2797 {
2798 	struct al_eth_mac_stats *mac_stats = &adapter->mac_stats;
2799 
2800 	if (adapter->up == 0)
2801 		return (NULL);
2802 
2803 	al_eth_mac_stats_get(&adapter->hal_adapter, mac_stats);
2804 
2805 	return (NULL);
2806 }
2807 
2808 static uint64_t
2809 al_get_counter(if_t ifp, ift_counter cnt)
2810 {
2811 	struct al_eth_adapter *adapter;
2812 	struct al_eth_mac_stats *mac_stats;
2813 	uint64_t rv;
2814 
2815 	adapter = if_getsoftc(ifp);
2816 	mac_stats = &adapter->mac_stats;
2817 
2818 	switch (cnt) {
2819 	case IFCOUNTER_IPACKETS:
2820 		return (mac_stats->aFramesReceivedOK); /* including pause frames */
2821 	case IFCOUNTER_OPACKETS:
2822 		return (mac_stats->aFramesTransmittedOK);
2823 	case IFCOUNTER_IBYTES:
2824 		return (mac_stats->aOctetsReceivedOK);
2825 	case IFCOUNTER_OBYTES:
2826 		return (mac_stats->aOctetsTransmittedOK);
2827 	case IFCOUNTER_IMCASTS:
2828 		return (mac_stats->ifInMulticastPkts);
2829 	case IFCOUNTER_OMCASTS:
2830 		return (mac_stats->ifOutMulticastPkts);
2831 	case IFCOUNTER_COLLISIONS:
2832 		return (0);
2833 	case IFCOUNTER_IQDROPS:
2834 		return (mac_stats->etherStatsDropEvents);
2835 	case IFCOUNTER_IERRORS:
2836 		rv = mac_stats->ifInErrors +
2837 		    mac_stats->etherStatsUndersizePkts + /* good but short */
2838 		    mac_stats->etherStatsFragments + /* short and bad*/
2839 		    mac_stats->etherStatsJabbers + /* with crc errors */
2840 		    mac_stats->etherStatsOversizePkts +
2841 		    mac_stats->aFrameCheckSequenceErrors +
2842 		    mac_stats->aAlignmentErrors;
2843 		return (rv);
2844 	case IFCOUNTER_OERRORS:
2845 		return (mac_stats->ifOutErrors);
2846 	default:
2847 		return (if_get_counter_default(ifp, cnt));
2848 	}
2849 }
2850 
2851 static u_int
2852 al_count_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2853 {
2854 	unsigned char *mac;
2855 
2856 	mac = LLADDR(sdl);
2857 	/* default mc address inside mac address */
2858 	if (mac[3] != 0 && mac[4] != 0 && mac[5] != 1)
2859 		return (1);
2860 	else
2861 		return (0);
2862 }
2863 
2864 static u_int
2865 al_program_addr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2866 {
2867 	struct al_eth_adapter *adapter = arg;
2868 
2869 	al_eth_mac_table_unicast_add(adapter,
2870 	    AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + 1 + cnt, 1);
2871 
2872 	return (1);
2873 }
2874 
2875 /*
2876  *  Unicast, Multicast and Promiscuous mode set
2877  *
2878  *  The set_rx_mode entry point is called whenever the unicast or multicast
2879  *  address lists or the network interface flags are updated.  This routine is
2880  *  responsible for configuring the hardware for proper unicast, multicast,
2881  *  promiscuous mode, and all-multi behavior.
2882  */
2883 static void
2884 al_eth_set_rx_mode(struct al_eth_adapter *adapter)
2885 {
2886 	if_t ifp = adapter->netdev;
2887 	int mc, uc;
2888 	uint8_t i;
2889 
2890 	/* XXXGL: why generic count won't work? */
2891 	mc = if_foreach_llmaddr(ifp, al_count_maddr, NULL);
2892 	uc = if_lladdr_count(ifp);
2893 
2894 	if ((if_getflags(ifp) & IFF_PROMISC) != 0) {
2895 		al_eth_mac_table_promiscuous_set(adapter, true);
2896 	} else {
2897 		if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) {
2898 			/* This interface is in all-multicasts mode (used by multicast routers). */
2899 			al_eth_mac_table_all_multicast_add(adapter,
2900 			    AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX, 1);
2901 		} else {
2902 			if (mc == 0) {
2903 				al_eth_mac_table_entry_clear(adapter,
2904 				    AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX);
2905 			} else {
2906 				al_eth_mac_table_all_multicast_add(adapter,
2907 				    AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX, 1);
2908 			}
2909 		}
2910 		if (uc != 0) {
2911 			i = AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + 1;
2912 			if (uc > AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT) {
2913 				/*
2914 				 * In this case there are more addresses then
2915 				 * entries in the mac table - set promiscuous
2916 				 */
2917 				al_eth_mac_table_promiscuous_set(adapter, true);
2918 				return;
2919 			}
2920 
2921 			/* clear the last configuration */
2922 			while (i < (AL_ETH_MAC_TABLE_UNICAST_IDX_BASE +
2923 				    AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT)) {
2924 				al_eth_mac_table_entry_clear(adapter, i);
2925 				i++;
2926 			}
2927 
2928 			/* set new addresses */
2929 			if_foreach_lladdr(ifp, al_program_addr, adapter);
2930 		}
2931 		al_eth_mac_table_promiscuous_set(adapter, false);
2932 	}
2933 }
2934 
2935 static void
2936 al_eth_config_rx_fwd(struct al_eth_adapter *adapter)
2937 {
2938 	struct al_eth_fwd_ctrl_table_entry entry;
2939 	int i;
2940 
2941 	/* let priority be equal to pbits */
2942 	for (i = 0; i < AL_ETH_FWD_PBITS_TABLE_NUM; i++)
2943 		al_eth_fwd_pbits_table_set(&adapter->hal_adapter, i, i);
2944 
2945 	/* map priority to queue index, queue id = priority/2 */
2946 	for (i = 0; i < AL_ETH_FWD_PRIO_TABLE_NUM; i++)
2947 		al_eth_fwd_priority_table_set(&adapter->hal_adapter, i, i >> 1);
2948 
2949 	entry.prio_sel = AL_ETH_CTRL_TABLE_PRIO_SEL_VAL_0;
2950 	entry.queue_sel_1 = AL_ETH_CTRL_TABLE_QUEUE_SEL_1_THASH_TABLE;
2951 	entry.queue_sel_2 = AL_ETH_CTRL_TABLE_QUEUE_SEL_2_NO_PRIO;
2952 	entry.udma_sel = AL_ETH_CTRL_TABLE_UDMA_SEL_MAC_TABLE;
2953 	entry.filter = false;
2954 
2955 	al_eth_ctrl_table_def_set(&adapter->hal_adapter, AL_FALSE, &entry);
2956 
2957 	/*
2958 	 * By default set the mac table to forward all unicast packets to our
2959 	 * MAC address and all broadcast. all the rest will be dropped.
2960 	 */
2961 	al_eth_mac_table_unicast_add(adapter, AL_ETH_MAC_TABLE_UNICAST_IDX_BASE,
2962 	    1);
2963 	al_eth_mac_table_broadcast_add(adapter, AL_ETH_MAC_TABLE_BROADCAST_IDX, 1);
2964 	al_eth_mac_table_promiscuous_set(adapter, false);
2965 
2966 	/* set toeplitz hash keys */
2967 	for (i = 0; i < sizeof(adapter->toeplitz_hash_key); i++)
2968 		*((uint8_t*)adapter->toeplitz_hash_key + i) = (uint8_t)random();
2969 
2970 	for (i = 0; i < AL_ETH_RX_HASH_KEY_NUM; i++)
2971 		al_eth_hash_key_set(&adapter->hal_adapter, i,
2972 		    htonl(adapter->toeplitz_hash_key[i]));
2973 
2974 	for (i = 0; i < AL_ETH_RX_RSS_TABLE_SIZE; i++) {
2975 		adapter->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i,
2976 		    AL_ETH_NUM_QUEUES);
2977 		al_eth_set_thash_table_entry(adapter, i, 0,
2978 		    adapter->rss_ind_tbl[i]);
2979 	}
2980 
2981 	al_eth_fsm_table_init(adapter);
2982 }
2983 
2984 static void
2985 al_eth_req_rx_buff_size(struct al_eth_adapter *adapter, int size)
2986 {
2987 
2988 	/*
2989 	* Determine the correct mbuf pool
2990 	* for doing jumbo frames
2991 	* Try from the smallest up to maximum supported
2992 	*/
2993 	adapter->rx_mbuf_sz = MCLBYTES;
2994 	if (size > 2048) {
2995 		if (adapter->max_rx_buff_alloc_size > 2048)
2996 			adapter->rx_mbuf_sz = MJUMPAGESIZE;
2997 		else
2998 			return;
2999 	}
3000 	if (size > 4096) {
3001 		if (adapter->max_rx_buff_alloc_size > 4096)
3002 			adapter->rx_mbuf_sz = MJUM9BYTES;
3003 		else
3004 			return;
3005 	}
3006 	if (size > 9216) {
3007 		if (adapter->max_rx_buff_alloc_size > 9216)
3008 			adapter->rx_mbuf_sz = MJUM16BYTES;
3009 		else
3010 			return;
3011 	}
3012 }
3013 
3014 static int
3015 al_eth_change_mtu(struct al_eth_adapter *adapter, int new_mtu)
3016 {
3017 	int max_frame = new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
3018 	    ETHER_VLAN_ENCAP_LEN;
3019 
3020 	al_eth_req_rx_buff_size(adapter, new_mtu);
3021 
3022 	device_printf_dbg(adapter->dev, "set MTU to %d\n", new_mtu);
3023 	al_eth_rx_pkt_limit_config(&adapter->hal_adapter,
3024 	    AL_ETH_MIN_FRAME_LEN, max_frame);
3025 
3026 	al_eth_tso_mss_config(&adapter->hal_adapter, 0, new_mtu - 100);
3027 
3028 	return (0);
3029 }
3030 
3031 static int
3032 al_eth_check_mtu(struct al_eth_adapter *adapter, int new_mtu)
3033 {
3034 	int max_frame = new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN;
3035 
3036 	if ((new_mtu < AL_ETH_MIN_FRAME_LEN) ||
3037 	    (max_frame > AL_ETH_MAX_FRAME_LEN)) {
3038 		return (EINVAL);
3039 	}
3040 
3041 	return (0);
3042 }
3043 
3044 static int
3045 al_eth_udma_queue_enable(struct al_eth_adapter *adapter, enum al_udma_type type,
3046     int qid)
3047 {
3048 	int rc = 0;
3049 	char *name = (type == UDMA_TX) ? "Tx" : "Rx";
3050 	struct al_udma_q_params *q_params;
3051 
3052 	if (type == UDMA_TX)
3053 		q_params = &adapter->tx_ring[qid].q_params;
3054 	else
3055 		q_params = &adapter->rx_ring[qid].q_params;
3056 
3057 	rc = al_eth_queue_config(&adapter->hal_adapter, type, qid, q_params);
3058 	if (rc < 0) {
3059 		device_printf(adapter->dev, "config %s queue %u failed\n", name,
3060 		    qid);
3061 		return (rc);
3062 	}
3063 	return (rc);
3064 }
3065 
3066 static int
3067 al_eth_udma_queues_enable_all(struct al_eth_adapter *adapter)
3068 {
3069 	int i;
3070 
3071 	for (i = 0; i < adapter->num_tx_queues; i++)
3072 		al_eth_udma_queue_enable(adapter, UDMA_TX, i);
3073 
3074 	for (i = 0; i < adapter->num_rx_queues; i++)
3075 		al_eth_udma_queue_enable(adapter, UDMA_RX, i);
3076 
3077 	return (0);
3078 }
3079 
3080 static void
3081 al_eth_up_complete(struct al_eth_adapter *adapter)
3082 {
3083 
3084 	al_eth_configure_int_mode(adapter);
3085 	al_eth_config_rx_fwd(adapter);
3086 	al_eth_change_mtu(adapter, if_getmtu(adapter->netdev));
3087 	al_eth_udma_queues_enable_all(adapter);
3088 	al_eth_refill_all_rx_bufs(adapter);
3089 	al_eth_interrupts_unmask(adapter);
3090 
3091 	/* enable forwarding interrupts from eth through pci end point */
3092 	if ((adapter->board_type == ALPINE_FPGA_NIC) ||
3093 	    (adapter->board_type == ALPINE_NIC)) {
3094 		al_eth_forward_int_config((uint32_t*)adapter->internal_pcie_base +
3095 		    AL_REG_OFFSET_FORWARD_INTR, AL_EN_FORWARD_INTR);
3096 	}
3097 
3098 	al_eth_flow_ctrl_enable(adapter);
3099 
3100 	mtx_lock(&adapter->stats_mtx);
3101 	callout_reset(&adapter->stats_callout, hz, al_tick_stats, (void*)adapter);
3102 	mtx_unlock(&adapter->stats_mtx);
3103 
3104 	al_eth_mac_start(&adapter->hal_adapter);
3105 }
3106 
3107 static int
3108 al_media_update(if_t ifp)
3109 {
3110 	struct al_eth_adapter *adapter = if_getsoftc(ifp);
3111 
3112 	if ((if_getflags(ifp) & IFF_UP) != 0)
3113 		mii_mediachg(adapter->mii);
3114 
3115 	return (0);
3116 }
3117 
3118 static void
3119 al_media_status(if_t ifp, struct ifmediareq *ifmr)
3120 {
3121 	struct al_eth_adapter *sc = if_getsoftc(ifp);
3122 	struct mii_data *mii;
3123 
3124 	if (sc->mii == NULL) {
3125 		ifmr->ifm_active = IFM_ETHER | IFM_NONE;
3126 		ifmr->ifm_status = 0;
3127 
3128 		return;
3129 	}
3130 
3131 	mii = sc->mii;
3132 	mii_pollstat(mii);
3133 
3134 	ifmr->ifm_active = mii->mii_media_active;
3135 	ifmr->ifm_status = mii->mii_media_status;
3136 }
3137 
3138 static void
3139 al_tick(void *arg)
3140 {
3141 	struct al_eth_adapter *adapter = arg;
3142 
3143 	mii_tick(adapter->mii);
3144 
3145 	/* Schedule another timeout one second from now */
3146 	callout_schedule(&adapter->wd_callout, hz);
3147 }
3148 
3149 static void
3150 al_tick_stats(void *arg)
3151 {
3152 	struct al_eth_adapter *adapter = arg;
3153 
3154 	al_eth_update_stats(adapter);
3155 
3156 	callout_schedule(&adapter->stats_callout, hz);
3157 }
3158 
3159 static int
3160 al_eth_up(struct al_eth_adapter *adapter)
3161 {
3162 	if_t ifp = adapter->netdev;
3163 	int rc;
3164 
3165 	if (adapter->up)
3166 		return (0);
3167 
3168 	if ((adapter->flags & AL_ETH_FLAG_RESET_REQUESTED) != 0) {
3169 		al_eth_function_reset(adapter);
3170 		adapter->flags &= ~AL_ETH_FLAG_RESET_REQUESTED;
3171 	}
3172 
3173 	if_sethwassist(ifp, 0);
3174 	if ((if_getcapenable(ifp) & IFCAP_TSO) != 0)
3175 		if_sethwassistbits(ifp, CSUM_TSO, 0);
3176 	if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
3177 		if_sethwassistbits(ifp, (CSUM_TCP | CSUM_UDP), 0);
3178 	if ((if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6) != 0)
3179 		if_sethwassistbits(ifp, (CSUM_TCP_IPV6 | CSUM_UDP_IPV6), 0);
3180 
3181 	al_eth_serdes_init(adapter);
3182 
3183 	rc = al_eth_hw_init(adapter);
3184 	if (rc != 0)
3185 		goto err_hw_init_open;
3186 
3187 	rc = al_eth_setup_int_mode(adapter);
3188 	if (rc != 0) {
3189 		device_printf(adapter->dev,
3190 		    "%s failed at setup interrupt mode!\n", __func__);
3191 		goto err_setup_int;
3192 	}
3193 
3194 	/* allocate transmit descriptors */
3195 	rc = al_eth_setup_all_tx_resources(adapter);
3196 	if (rc != 0)
3197 		goto err_setup_tx;
3198 
3199 	/* allocate receive descriptors */
3200 	rc = al_eth_setup_all_rx_resources(adapter);
3201 	if (rc != 0)
3202 		goto err_setup_rx;
3203 
3204 	rc = al_eth_request_irq(adapter);
3205 	if (rc != 0)
3206 		goto err_req_irq;
3207 
3208 	al_eth_up_complete(adapter);
3209 
3210 	adapter->up = true;
3211 
3212 	if (adapter->mac_mode == AL_ETH_MAC_MODE_10GbE_Serial)
3213 		if_link_state_change(adapter->netdev, LINK_STATE_UP);
3214 
3215 	if (adapter->mac_mode == AL_ETH_MAC_MODE_RGMII) {
3216 		mii_mediachg(adapter->mii);
3217 
3218 		/* Schedule watchdog timeout */
3219 		mtx_lock(&adapter->wd_mtx);
3220 		callout_reset(&adapter->wd_callout, hz, al_tick, adapter);
3221 		mtx_unlock(&adapter->wd_mtx);
3222 
3223 		mii_pollstat(adapter->mii);
3224 	}
3225 
3226 	return (rc);
3227 
3228 err_req_irq:
3229 	al_eth_free_all_rx_resources(adapter);
3230 err_setup_rx:
3231 	al_eth_free_all_tx_resources(adapter);
3232 err_setup_tx:
3233 	al_eth_free_irq(adapter);
3234 err_setup_int:
3235 	al_eth_hw_stop(adapter);
3236 err_hw_init_open:
3237 	al_eth_function_reset(adapter);
3238 
3239 	return (rc);
3240 }
3241 
3242 static int
3243 al_shutdown(device_t dev)
3244 {
3245 	struct al_eth_adapter *adapter = device_get_softc(dev);
3246 
3247 	al_eth_down(adapter);
3248 
3249 	return (0);
3250 }
3251 
3252 static void
3253 al_eth_down(struct al_eth_adapter *adapter)
3254 {
3255 
3256 	device_printf_dbg(adapter->dev, "al_eth_down: begin\n");
3257 
3258 	adapter->up = false;
3259 
3260 	mtx_lock(&adapter->wd_mtx);
3261 	callout_stop(&adapter->wd_callout);
3262 	mtx_unlock(&adapter->wd_mtx);
3263 
3264 	al_eth_disable_int_sync(adapter);
3265 
3266 	mtx_lock(&adapter->stats_mtx);
3267 	callout_stop(&adapter->stats_callout);
3268 	mtx_unlock(&adapter->stats_mtx);
3269 
3270 	al_eth_free_irq(adapter);
3271 	al_eth_hw_stop(adapter);
3272 
3273 	al_eth_free_all_tx_resources(adapter);
3274 	al_eth_free_all_rx_resources(adapter);
3275 }
3276 
3277 static int
3278 al_ioctl(if_t ifp, u_long command, caddr_t data)
3279 {
3280 	struct al_eth_adapter	*adapter = if_getsoftc(ifp);
3281 	struct ifreq		*ifr = (struct ifreq *)data;
3282 	int			error = 0;
3283 
3284 	switch (command) {
3285 	case SIOCSIFMTU:
3286 	{
3287 		error = al_eth_check_mtu(adapter, ifr->ifr_mtu);
3288 		if (error != 0) {
3289 			device_printf(adapter->dev, "ioctl wrong mtu %u\n",
3290 			    if_getmtu(adapter->netdev));
3291 			break;
3292 		}
3293 
3294 		if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
3295 		if_setmtu(adapter->netdev, ifr->ifr_mtu);
3296 		al_init(adapter);
3297 		break;
3298 	}
3299 	case SIOCSIFFLAGS:
3300 		if ((if_getflags(ifp) & IFF_UP) != 0) {
3301 			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
3302 				if (((if_getflags(ifp) ^ adapter->if_flags) &
3303 				    (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
3304 					device_printf_dbg(adapter->dev,
3305 					    "ioctl promisc/allmulti\n");
3306 					al_eth_set_rx_mode(adapter);
3307 				}
3308 			} else {
3309 				error = al_eth_up(adapter);
3310 				if (error == 0)
3311 					if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
3312 			}
3313 		} else {
3314 			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
3315 				al_eth_down(adapter);
3316 				if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
3317 			}
3318 		}
3319 
3320 		adapter->if_flags = if_getflags(ifp);
3321 		break;
3322 
3323 	case SIOCADDMULTI:
3324 	case SIOCDELMULTI:
3325 		if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
3326 			device_printf_dbg(adapter->dev,
3327 			    "ioctl add/del multi before\n");
3328 			al_eth_set_rx_mode(adapter);
3329 #ifdef DEVICE_POLLING
3330 			if ((if_getcapenable(ifp) & IFCAP_POLLING) == 0)
3331 #endif
3332 		}
3333 		break;
3334 	case SIOCSIFMEDIA:
3335 	case SIOCGIFMEDIA:
3336 		if (adapter->mii != NULL)
3337 			error = ifmedia_ioctl(ifp, ifr,
3338 			    &adapter->mii->mii_media, command);
3339 		else
3340 			error = ifmedia_ioctl(ifp, ifr,
3341 			    &adapter->media, command);
3342 		break;
3343 	case SIOCSIFCAP:
3344 	    {
3345 		int mask, reinit;
3346 
3347 		reinit = 0;
3348 		mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
3349 #ifdef DEVICE_POLLING
3350 		if ((mask & IFCAP_POLLING) != 0) {
3351 			if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
3352 				if (error != 0)
3353 					return (error);
3354 				if_setcapenablebit(ifp, IFCAP_POLLING, 0);
3355 			} else {
3356 				error = ether_poll_deregister(ifp);
3357 				/* Enable interrupt even in error case */
3358 				if_setcapenablebit(ifp, 0, IFCAP_POLLING);
3359 			}
3360 		}
3361 #endif
3362 		if ((mask & IFCAP_HWCSUM) != 0) {
3363 			/* apply to both rx and tx */
3364 			if_togglecapenable(ifp, IFCAP_HWCSUM);
3365 			reinit = 1;
3366 		}
3367 		if ((mask & IFCAP_HWCSUM_IPV6) != 0) {
3368 			if_togglecapenable(ifp, IFCAP_HWCSUM_IPV6);
3369 			reinit = 1;
3370 		}
3371 		if ((mask & IFCAP_TSO) != 0) {
3372 			if_togglecapenable(ifp, IFCAP_TSO);
3373 			reinit = 1;
3374 		}
3375 		if ((mask & IFCAP_LRO) != 0) {
3376 			if_togglecapenable(ifp, IFCAP_LRO);
3377 		}
3378 		if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
3379 			if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
3380 			reinit = 1;
3381 		}
3382 		if ((mask & IFCAP_VLAN_HWFILTER) != 0) {
3383 			if_togglecapenable(ifp, IFCAP_VLAN_HWFILTER);
3384 			reinit = 1;
3385 		}
3386 		if ((mask & IFCAP_VLAN_HWTSO) != 0) {
3387 			if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
3388 			reinit = 1;
3389 		}
3390 		if ((reinit != 0) &&
3391 		    ((if_getdrvflags(ifp) & IFF_DRV_RUNNING)) != 0)
3392 		{
3393 			al_init(adapter);
3394 		}
3395 		break;
3396 	    }
3397 
3398 	default:
3399 		error = ether_ioctl(ifp, command, data);
3400 		break;
3401 	}
3402 
3403 	return (error);
3404 }
3405 
3406 static int
3407 al_is_device_supported(device_t dev)
3408 {
3409 	uint16_t pci_vendor_id = pci_get_vendor(dev);
3410 	uint16_t pci_device_id = pci_get_device(dev);
3411 
3412 	return (pci_vendor_id == PCI_VENDOR_ID_ANNAPURNA_LABS &&
3413 	    (pci_device_id == PCI_DEVICE_ID_AL_ETH ||
3414 	    pci_device_id == PCI_DEVICE_ID_AL_ETH_ADVANCED ||
3415 	    pci_device_id == PCI_DEVICE_ID_AL_ETH_NIC ||
3416 	    pci_device_id == PCI_DEVICE_ID_AL_ETH_FPGA_NIC));
3417 }
3418 
3419 /* Time in mSec to keep trying to read / write from MDIO in case of error */
3420 #define	MDIO_TIMEOUT_MSEC	100
3421 #define	MDIO_PAUSE_MSEC		10
3422 
3423 static int
3424 al_miibus_readreg(device_t dev, int phy, int reg)
3425 {
3426 	struct al_eth_adapter *adapter = device_get_softc(dev);
3427 	uint16_t value = 0;
3428 	int rc;
3429 	int timeout = MDIO_TIMEOUT_MSEC;
3430 
3431 	while (timeout > 0) {
3432 		rc = al_eth_mdio_read(&adapter->hal_adapter, adapter->phy_addr,
3433 		    -1, reg, &value);
3434 
3435 		if (rc == 0)
3436 			return (value);
3437 
3438 		device_printf_dbg(adapter->dev,
3439 		    "mdio read failed. try again in 10 msec\n");
3440 
3441 		timeout -= MDIO_PAUSE_MSEC;
3442 		pause("readred pause", MDIO_PAUSE_MSEC);
3443 	}
3444 
3445 	if (rc != 0)
3446 		device_printf(adapter->dev, "MDIO read failed on timeout\n");
3447 
3448 	return (value);
3449 }
3450 
3451 static int
3452 al_miibus_writereg(device_t dev, int phy, int reg, int value)
3453 {
3454 	struct al_eth_adapter *adapter = device_get_softc(dev);
3455 	int rc;
3456 	int timeout = MDIO_TIMEOUT_MSEC;
3457 
3458 	while (timeout > 0) {
3459 		rc = al_eth_mdio_write(&adapter->hal_adapter, adapter->phy_addr,
3460 		    -1, reg, value);
3461 
3462 		if (rc == 0)
3463 			return (0);
3464 
3465 		device_printf(adapter->dev,
3466 		    "mdio write failed. try again in 10 msec\n");
3467 
3468 		timeout -= MDIO_PAUSE_MSEC;
3469 		pause("miibus writereg", MDIO_PAUSE_MSEC);
3470 	}
3471 
3472 	if (rc != 0)
3473 		device_printf(adapter->dev, "MDIO write failed on timeout\n");
3474 
3475 	return (rc);
3476 }
3477 
3478 static void
3479 al_miibus_statchg(device_t dev)
3480 {
3481 	struct al_eth_adapter *adapter = device_get_softc(dev);
3482 
3483 	device_printf_dbg(adapter->dev,
3484 	    "al_miibus_statchg: state has changed!\n");
3485 	device_printf_dbg(adapter->dev,
3486 	    "al_miibus_statchg: active = 0x%x status = 0x%x\n",
3487 	    adapter->mii->mii_media_active, adapter->mii->mii_media_status);
3488 
3489 	if (adapter->up == 0)
3490 		return;
3491 
3492 	if ((adapter->mii->mii_media_status & IFM_AVALID) != 0) {
3493 		if (adapter->mii->mii_media_status & IFM_ACTIVE) {
3494 			device_printf(adapter->dev, "link is UP\n");
3495 			if_link_state_change(adapter->netdev, LINK_STATE_UP);
3496 		} else {
3497 			device_printf(adapter->dev, "link is DOWN\n");
3498 			if_link_state_change(adapter->netdev, LINK_STATE_DOWN);
3499 		}
3500 	}
3501 }
3502 
3503 static void
3504 al_miibus_linkchg(device_t dev)
3505 {
3506 	struct al_eth_adapter *adapter = device_get_softc(dev);
3507 	uint8_t duplex = 0;
3508 	uint8_t speed = 0;
3509 
3510 	if (adapter->mii == NULL)
3511 		return;
3512 
3513 	if ((if_getflags(adapter->netdev) & IFF_UP) == 0)
3514 		return;
3515 
3516 	/* Ignore link changes when link is not ready */
3517 	if ((adapter->mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) !=
3518 	    (IFM_AVALID | IFM_ACTIVE)) {
3519 		return;
3520 	}
3521 
3522 	if ((adapter->mii->mii_media_active & IFM_FDX) != 0)
3523 		duplex = 1;
3524 
3525 	speed = IFM_SUBTYPE(adapter->mii->mii_media_active);
3526 
3527 	if (speed == IFM_10_T) {
3528 		al_eth_mac_link_config(&adapter->hal_adapter, 0, 1,
3529 		    AL_10BASE_T_SPEED, duplex);
3530 		return;
3531 	}
3532 
3533 	if (speed == IFM_100_TX) {
3534 		al_eth_mac_link_config(&adapter->hal_adapter, 0, 1,
3535 		    AL_100BASE_TX_SPEED, duplex);
3536 		return;
3537 	}
3538 
3539 	if (speed == IFM_1000_T) {
3540 		al_eth_mac_link_config(&adapter->hal_adapter, 0, 1,
3541 		    AL_1000BASE_T_SPEED, duplex);
3542 		return;
3543 	}
3544 
3545 	device_printf(adapter->dev, "ERROR: unknown MII media active 0x%08x\n",
3546 	    adapter->mii->mii_media_active);
3547 }
3548