xref: /freebsd/sys/dev/al_eth/al_eth.c (revision e6bfd18d21b225af6a0ed67ceeaf1293b7b9eba5)
1 /*-
2  * Copyright (c) 2015,2016 Annapurna Labs Ltd. and affiliates
3  * All rights reserved.
4  *
5  * Developed by Semihalf.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/bus.h>
35 #include <sys/kernel.h>
36 #include <sys/kthread.h>
37 #include <sys/lock.h>
38 #include <sys/mbuf.h>
39 #include <sys/malloc.h>
40 #include <sys/module.h>
41 #include <sys/rman.h>
42 #include <sys/socket.h>
43 #include <sys/sockio.h>
44 #include <sys/sysctl.h>
45 #include <sys/taskqueue.h>
46 
47 #include <machine/atomic.h>
48 
49 #include "opt_inet.h"
50 #include "opt_inet6.h"
51 
52 #include <net/ethernet.h>
53 #include <net/if.h>
54 #include <net/if_var.h>
55 #include <net/if_arp.h>
56 #include <net/if_dl.h>
57 #include <net/if_media.h>
58 #include <net/if_types.h>
59 #include <netinet/in.h>
60 #include <net/if_vlan_var.h>
61 #include <netinet/tcp.h>
62 #include <netinet/tcp_lro.h>
63 
64 #ifdef INET
65 #include <netinet/in.h>
66 #include <netinet/in_systm.h>
67 #include <netinet/in_var.h>
68 #include <netinet/ip.h>
69 #endif
70 
71 #ifdef INET6
72 #include <netinet/ip6.h>
73 #endif
74 
75 #include <sys/sockio.h>
76 
77 #include <dev/pci/pcireg.h>
78 #include <dev/pci/pcivar.h>
79 
80 #include <dev/mii/mii.h>
81 #include <dev/mii/miivar.h>
82 
83 #include <al_hal_common.h>
84 #include <al_hal_plat_services.h>
85 #include <al_hal_udma_config.h>
86 #include <al_hal_udma_iofic.h>
87 #include <al_hal_udma_debug.h>
88 #include <al_hal_eth.h>
89 
90 #include "al_eth.h"
91 #include "al_init_eth_lm.h"
92 #include "arm/annapurna/alpine/alpine_serdes.h"
93 
94 #include "miibus_if.h"
95 
96 #define	device_printf_dbg(fmt, ...) do {				\
97 	if (AL_DBG_LEVEL >= AL_DBG_LEVEL_DBG) { AL_DBG_LOCK();		\
98 	    device_printf(fmt, __VA_ARGS__); AL_DBG_UNLOCK();}		\
99 	} while (0)
100 
101 MALLOC_DEFINE(M_IFAL, "if_al_malloc", "All allocated data for AL ETH driver");
102 
103 /* move out to some pci header file */
104 #define	PCI_VENDOR_ID_ANNAPURNA_LABS	0x1c36
105 #define	PCI_DEVICE_ID_AL_ETH		0x0001
106 #define	PCI_DEVICE_ID_AL_ETH_ADVANCED	0x0002
107 #define	PCI_DEVICE_ID_AL_ETH_NIC	0x0003
108 #define	PCI_DEVICE_ID_AL_ETH_FPGA_NIC	0x0030
109 #define	PCI_DEVICE_ID_AL_CRYPTO		0x0011
110 #define	PCI_DEVICE_ID_AL_CRYPTO_VF	0x8011
111 #define	PCI_DEVICE_ID_AL_RAID_DMA	0x0021
112 #define	PCI_DEVICE_ID_AL_RAID_DMA_VF	0x8021
113 #define	PCI_DEVICE_ID_AL_USB		0x0041
114 
115 #define	MAC_ADDR_STR "%02x:%02x:%02x:%02x:%02x:%02x"
116 #define	MAC_ADDR(addr) addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]
117 
118 #define	AL_ETH_MAC_TABLE_UNICAST_IDX_BASE	0
119 #define	AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT	4
120 #define	AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX	(AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + \
121 						 AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT)
122 
123 #define	AL_ETH_MAC_TABLE_DROP_IDX		(AL_ETH_FWD_MAC_NUM - 1)
124 #define	AL_ETH_MAC_TABLE_BROADCAST_IDX		(AL_ETH_MAC_TABLE_DROP_IDX - 1)
125 
126 #define	AL_ETH_THASH_UDMA_SHIFT		0
127 #define	AL_ETH_THASH_UDMA_MASK		(0xF << AL_ETH_THASH_UDMA_SHIFT)
128 
129 #define	AL_ETH_THASH_Q_SHIFT		4
130 #define	AL_ETH_THASH_Q_MASK		(0x3 << AL_ETH_THASH_Q_SHIFT)
131 
132 /* the following defines should be moved to hal */
133 #define	AL_ETH_FSM_ENTRY_IPV4_TCP		0
134 #define	AL_ETH_FSM_ENTRY_IPV4_UDP		1
135 #define	AL_ETH_FSM_ENTRY_IPV6_TCP		2
136 #define	AL_ETH_FSM_ENTRY_IPV6_UDP		3
137 #define	AL_ETH_FSM_ENTRY_IPV6_NO_UDP_TCP	4
138 #define	AL_ETH_FSM_ENTRY_IPV4_NO_UDP_TCP	5
139 
140 /* FSM DATA format */
141 #define	AL_ETH_FSM_DATA_OUTER_2_TUPLE	0
142 #define	AL_ETH_FSM_DATA_OUTER_4_TUPLE	1
143 #define	AL_ETH_FSM_DATA_INNER_2_TUPLE	2
144 #define	AL_ETH_FSM_DATA_INNER_4_TUPLE	3
145 
146 #define	AL_ETH_FSM_DATA_HASH_SEL	(1 << 2)
147 
148 #define	AL_ETH_FSM_DATA_DEFAULT_Q	0
149 #define	AL_ETH_FSM_DATA_DEFAULT_UDMA	0
150 
151 #define	AL_BR_SIZE	512
152 #define	AL_TSO_SIZE	65500
153 #define	AL_DEFAULT_MTU	1500
154 
155 #define	CSUM_OFFLOAD		(CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP)
156 
157 #define	AL_IP_ALIGNMENT_OFFSET	2
158 
159 #define	SFP_I2C_ADDR		0x50
160 
161 #define	AL_MASK_GROUP_A_INT	0x7
162 #define	AL_MASK_GROUP_B_INT	0xF
163 #define	AL_MASK_GROUP_C_INT	0xF
164 #define	AL_MASK_GROUP_D_INT	0xFFFFFFFF
165 
166 #define	AL_REG_OFFSET_FORWARD_INTR	(0x1800000 + 0x1210)
167 #define	AL_EN_FORWARD_INTR	0x1FFFF
168 #define	AL_DIS_FORWARD_INTR	0
169 
170 #define	AL_M2S_MASK_INIT	0x480
171 #define	AL_S2M_MASK_INIT	0x1E0
172 #define	AL_M2S_S2M_MASK_NOT_INT	(0x3f << 25)
173 
174 #define	AL_10BASE_T_SPEED	10
175 #define	AL_100BASE_TX_SPEED	100
176 #define	AL_1000BASE_T_SPEED	1000
177 
178 #define	AL_RX_LOCK_INIT(_sc)	mtx_init(&((_sc)->if_rx_lock), "ALRXL", "ALRXL", MTX_DEF)
179 #define	AL_RX_LOCK(_sc)		mtx_lock(&((_sc)->if_rx_lock))
180 #define	AL_RX_UNLOCK(_sc)	mtx_unlock(&((_sc)->if_rx_lock))
181 
182 /* helper functions */
183 static int al_is_device_supported(device_t);
184 
185 static void al_eth_init_rings(struct al_eth_adapter *);
186 static void al_eth_flow_ctrl_disable(struct al_eth_adapter *);
187 int al_eth_fpga_read_pci_config(void *, int, uint32_t *);
188 int al_eth_fpga_write_pci_config(void *, int, uint32_t);
189 int al_eth_read_pci_config(void *, int, uint32_t *);
190 int al_eth_write_pci_config(void *, int, uint32_t);
191 void al_eth_irq_config(uint32_t *, uint32_t);
192 void al_eth_forward_int_config(uint32_t *, uint32_t);
193 static void al_eth_start_xmit(void *, int);
194 static void al_eth_rx_recv_work(void *, int);
195 static int al_eth_up(struct al_eth_adapter *);
196 static void al_eth_down(struct al_eth_adapter *);
197 static void al_eth_interrupts_unmask(struct al_eth_adapter *);
198 static void al_eth_interrupts_mask(struct al_eth_adapter *);
199 static int al_eth_check_mtu(struct al_eth_adapter *, int);
200 static uint64_t al_get_counter(if_t, ift_counter);
201 static void al_eth_req_rx_buff_size(struct al_eth_adapter *, int);
202 static int al_eth_board_params_init(struct al_eth_adapter *);
203 static int al_media_update(if_t);
204 static void al_media_status(if_t, struct ifmediareq *);
205 static int al_eth_function_reset(struct al_eth_adapter *);
206 static int al_eth_hw_init_adapter(struct al_eth_adapter *);
207 static void al_eth_serdes_init(struct al_eth_adapter *);
208 static void al_eth_lm_config(struct al_eth_adapter *);
209 static int al_eth_hw_init(struct al_eth_adapter *);
210 
211 static void al_tick_stats(void *);
212 
213 /* ifnet entry points */
214 static void al_init(void *);
215 static int al_mq_start(if_t, struct mbuf *);
216 static void al_qflush(if_t);
217 static int al_ioctl(if_t ifp, u_long, caddr_t);
218 
219 /* bus entry points */
220 static int al_probe(device_t);
221 static int al_attach(device_t);
222 static int al_detach(device_t);
223 static int al_shutdown(device_t);
224 
225 /* mii bus support routines */
226 static int al_miibus_readreg(device_t, int, int);
227 static int al_miibus_writereg(device_t, int, int, int);
228 static void al_miibus_statchg(device_t);
229 static void al_miibus_linkchg(device_t);
230 
231 struct al_eth_adapter* g_adapters[16];
232 uint32_t g_adapters_count;
233 
234 /* flag for napi-like mbuf processing, controlled from sysctl */
235 static int napi = 0;
236 
237 static device_method_t al_methods[] = {
238 	/* Device interface */
239 	DEVMETHOD(device_probe,		al_probe),
240 	DEVMETHOD(device_attach,	al_attach),
241 	DEVMETHOD(device_detach,	al_detach),
242 	DEVMETHOD(device_shutdown,	al_shutdown),
243 
244 	DEVMETHOD(miibus_readreg,	al_miibus_readreg),
245 	DEVMETHOD(miibus_writereg,	al_miibus_writereg),
246 	DEVMETHOD(miibus_statchg,	al_miibus_statchg),
247 	DEVMETHOD(miibus_linkchg,	al_miibus_linkchg),
248 	{ 0, 0 }
249 };
250 
251 static driver_t al_driver = {
252 	"al",
253 	al_methods,
254 	sizeof(struct al_eth_adapter),
255 };
256 
257 DRIVER_MODULE(al, pci, al_driver, 0, 0);
258 DRIVER_MODULE(miibus, al, miibus_driver, 0, 0);
259 
260 static int
261 al_probe(device_t dev)
262 {
263 	if ((al_is_device_supported(dev)) != 0) {
264 		device_set_desc(dev, "al");
265 		return (BUS_PROBE_DEFAULT);
266 	}
267 	return (ENXIO);
268 }
269 
270 static int
271 al_attach(device_t dev)
272 {
273 	struct al_eth_adapter *adapter;
274 	struct sysctl_oid_list *child;
275 	struct sysctl_ctx_list *ctx;
276 	struct sysctl_oid *tree;
277 	if_t ifp;
278 	uint32_t dev_id;
279 	uint32_t rev_id;
280 	int bar_udma;
281 	int bar_mac;
282 	int bar_ec;
283 	int err;
284 
285 	err = 0;
286 	ifp = NULL;
287 	dev_id = rev_id = 0;
288 	ctx = device_get_sysctl_ctx(dev);
289 	tree = SYSCTL_PARENT(device_get_sysctl_tree(dev));
290 	child = SYSCTL_CHILDREN(tree);
291 
292 	if (g_adapters_count == 0) {
293 		SYSCTL_ADD_INT(ctx, child, OID_AUTO, "napi",
294 		    CTLFLAG_RW, &napi, 0, "Use pseudo-napi mechanism");
295 	}
296 	adapter = device_get_softc(dev);
297 	adapter->dev = dev;
298 	adapter->board_type = ALPINE_INTEGRATED;
299 	snprintf(adapter->name, AL_ETH_NAME_MAX_LEN, "%s",
300 	    device_get_nameunit(dev));
301 	AL_RX_LOCK_INIT(adapter);
302 
303 	g_adapters[g_adapters_count] = adapter;
304 
305 	bar_udma = PCIR_BAR(AL_ETH_UDMA_BAR);
306 	adapter->udma_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
307 	    &bar_udma, RF_ACTIVE);
308 	if (adapter->udma_res == NULL) {
309 		device_printf(adapter->dev,
310 		    "could not allocate memory resources for DMA.\n");
311 		err = ENOMEM;
312 		goto err_res_dma;
313 	}
314 	adapter->udma_base = al_bus_dma_to_va(rman_get_bustag(adapter->udma_res),
315 	    rman_get_bushandle(adapter->udma_res));
316 	bar_mac = PCIR_BAR(AL_ETH_MAC_BAR);
317 	adapter->mac_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
318 	    &bar_mac, RF_ACTIVE);
319 	if (adapter->mac_res == NULL) {
320 		device_printf(adapter->dev,
321 		    "could not allocate memory resources for MAC.\n");
322 		err = ENOMEM;
323 		goto err_res_mac;
324 	}
325 	adapter->mac_base = al_bus_dma_to_va(rman_get_bustag(adapter->mac_res),
326 	    rman_get_bushandle(adapter->mac_res));
327 
328 	bar_ec = PCIR_BAR(AL_ETH_EC_BAR);
329 	adapter->ec_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &bar_ec,
330 	    RF_ACTIVE);
331 	if (adapter->ec_res == NULL) {
332 		device_printf(adapter->dev,
333 		    "could not allocate memory resources for EC.\n");
334 		err = ENOMEM;
335 		goto err_res_ec;
336 	}
337 	adapter->ec_base = al_bus_dma_to_va(rman_get_bustag(adapter->ec_res),
338 	    rman_get_bushandle(adapter->ec_res));
339 
340 	adapter->netdev = ifp = if_alloc(IFT_ETHER);
341 
342 	if_setsoftc(ifp, adapter);
343 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
344 	if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
345 	if_setflags(ifp, if_getdrvflags(ifp));
346 	if_setflagbits(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_ALLMULTI, 0);
347 	if_settransmitfn(ifp, al_mq_start);
348 	if_setqflushfn(ifp, al_qflush);
349 	if_setioctlfn(ifp, al_ioctl);
350 	if_setinitfn(ifp, al_init);
351 	if_setgetcounterfn(ifp, al_get_counter);
352 	if_setmtu(ifp, AL_DEFAULT_MTU);
353 
354 	adapter->if_flags = if_getflags(ifp);
355 
356 	if_setcapabilities(ifp, if_getcapenable(ifp) );
357 
358 	if_setcapabilitiesbit(ifp, IFCAP_HWCSUM |
359 	    IFCAP_HWCSUM_IPV6 | IFCAP_TSO |
360 	    IFCAP_LRO | IFCAP_JUMBO_MTU, 0);
361 
362 	if_setcapenable(ifp, if_getcapabilities(ifp));
363 
364 	adapter->id_number = g_adapters_count;
365 
366 	if (adapter->board_type == ALPINE_INTEGRATED) {
367 		dev_id = pci_get_device(adapter->dev);
368 		rev_id = pci_get_revid(adapter->dev);
369 	} else {
370 		al_eth_fpga_read_pci_config(adapter->internal_pcie_base,
371 		    PCIR_DEVICE, &dev_id);
372 		al_eth_fpga_read_pci_config(adapter->internal_pcie_base,
373 		    PCIR_REVID, &rev_id);
374 	}
375 
376 	adapter->dev_id = dev_id;
377 	adapter->rev_id = rev_id;
378 
379 	/* set default ring sizes */
380 	adapter->tx_ring_count = AL_ETH_DEFAULT_TX_SW_DESCS;
381 	adapter->tx_descs_count = AL_ETH_DEFAULT_TX_HW_DESCS;
382 	adapter->rx_ring_count = AL_ETH_DEFAULT_RX_DESCS;
383 	adapter->rx_descs_count = AL_ETH_DEFAULT_RX_DESCS;
384 
385 	adapter->num_tx_queues = AL_ETH_NUM_QUEUES;
386 	adapter->num_rx_queues = AL_ETH_NUM_QUEUES;
387 
388 	adapter->small_copy_len	= AL_ETH_DEFAULT_SMALL_PACKET_LEN;
389 	adapter->link_poll_interval = AL_ETH_DEFAULT_LINK_POLL_INTERVAL;
390 	adapter->max_rx_buff_alloc_size = AL_ETH_DEFAULT_MAX_RX_BUFF_ALLOC_SIZE;
391 
392 	al_eth_req_rx_buff_size(adapter, if_getmtu(adapter->netdev));
393 
394 	adapter->link_config.force_1000_base_x = AL_ETH_DEFAULT_FORCE_1000_BASEX;
395 
396 	err = al_eth_board_params_init(adapter);
397 	if (err != 0)
398 		goto err;
399 
400 	if (adapter->mac_mode == AL_ETH_MAC_MODE_10GbE_Serial) {
401 		ifmedia_init(&adapter->media, IFM_IMASK,
402 		    al_media_update, al_media_status);
403 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
404 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
405 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
406 		ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
407 	}
408 
409 	al_eth_function_reset(adapter);
410 
411 	err = al_eth_hw_init_adapter(adapter);
412 	if (err != 0)
413 		goto err;
414 
415 	al_eth_init_rings(adapter);
416 	g_adapters_count++;
417 
418 	al_eth_lm_config(adapter);
419 	mtx_init(&adapter->stats_mtx, "AlStatsMtx", NULL, MTX_DEF);
420 	mtx_init(&adapter->wd_mtx, "AlWdMtx", NULL, MTX_DEF);
421 	callout_init_mtx(&adapter->stats_callout, &adapter->stats_mtx, 0);
422 	callout_init_mtx(&adapter->wd_callout, &adapter->wd_mtx, 0);
423 
424 	ether_ifattach(ifp, adapter->mac_addr);
425 	if_setmtu(ifp, AL_DEFAULT_MTU);
426 
427 	if (adapter->mac_mode == AL_ETH_MAC_MODE_RGMII) {
428 		al_eth_hw_init(adapter);
429 
430 		/* Attach PHY(s) */
431 		err = mii_attach(adapter->dev, &adapter->miibus, adapter->netdev,
432 		    al_media_update, al_media_status, BMSR_DEFCAPMASK, 0,
433 		    MII_OFFSET_ANY, 0);
434 		if (err != 0) {
435 			device_printf(adapter->dev, "attaching PHYs failed\n");
436 			return (err);
437 		}
438 
439 		adapter->mii = device_get_softc(adapter->miibus);
440 	}
441 
442 	return (err);
443 
444 err:
445 	bus_release_resource(dev, SYS_RES_MEMORY, bar_ec, adapter->ec_res);
446 err_res_ec:
447 	bus_release_resource(dev, SYS_RES_MEMORY, bar_mac, adapter->mac_res);
448 err_res_mac:
449 	bus_release_resource(dev, SYS_RES_MEMORY, bar_udma, adapter->udma_res);
450 err_res_dma:
451 	return (err);
452 }
453 
454 static int
455 al_detach(device_t dev)
456 {
457 	struct al_eth_adapter *adapter;
458 
459 	adapter = device_get_softc(dev);
460 	ether_ifdetach(adapter->netdev);
461 
462 	mtx_destroy(&adapter->stats_mtx);
463 	mtx_destroy(&adapter->wd_mtx);
464 
465 	al_eth_down(adapter);
466 
467 	bus_release_resource(dev, SYS_RES_IRQ,    0, adapter->irq_res);
468 	bus_release_resource(dev, SYS_RES_MEMORY, 0, adapter->ec_res);
469 	bus_release_resource(dev, SYS_RES_MEMORY, 0, adapter->mac_res);
470 	bus_release_resource(dev, SYS_RES_MEMORY, 0, adapter->udma_res);
471 
472 	return (0);
473 }
474 
475 int
476 al_eth_fpga_read_pci_config(void *handle, int where, uint32_t *val)
477 {
478 
479 	/* handle is the base address of the adapter */
480 	*val = al_reg_read32((void*)((u_long)handle + where));
481 
482 	return (0);
483 }
484 
485 int
486 al_eth_fpga_write_pci_config(void *handle, int where, uint32_t val)
487 {
488 
489 	/* handle is the base address of the adapter */
490 	al_reg_write32((void*)((u_long)handle + where), val);
491 	return (0);
492 }
493 
494 int
495 al_eth_read_pci_config(void *handle, int where, uint32_t *val)
496 {
497 
498 	/* handle is a pci_dev */
499 	*val = pci_read_config((device_t)handle, where, sizeof(*val));
500 	return (0);
501 }
502 
503 int
504 al_eth_write_pci_config(void *handle, int where, uint32_t val)
505 {
506 
507 	/* handle is a pci_dev */
508 	pci_write_config((device_t)handle, where, val, sizeof(val));
509 	return (0);
510 }
511 
512 void
513 al_eth_irq_config(uint32_t *offset, uint32_t value)
514 {
515 
516 	al_reg_write32_relaxed(offset, value);
517 }
518 
519 void
520 al_eth_forward_int_config(uint32_t *offset, uint32_t value)
521 {
522 
523 	al_reg_write32(offset, value);
524 }
525 
526 static void
527 al_eth_serdes_init(struct al_eth_adapter *adapter)
528 {
529 	void __iomem	*serdes_base;
530 
531 	adapter->serdes_init = false;
532 
533 	serdes_base = alpine_serdes_resource_get(adapter->serdes_grp);
534 	if (serdes_base == NULL) {
535 		device_printf(adapter->dev, "serdes_base get failed!\n");
536 		return;
537 	}
538 
539 	serdes_base = al_bus_dma_to_va(serdes_tag, serdes_base);
540 
541 	al_serdes_handle_grp_init(serdes_base, adapter->serdes_grp,
542 	    &adapter->serdes_obj);
543 
544 	adapter->serdes_init = true;
545 }
546 
547 static void
548 al_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
549 {
550 	bus_addr_t *paddr;
551 
552 	paddr = arg;
553 	*paddr = segs->ds_addr;
554 }
555 
556 static int
557 al_dma_alloc_coherent(device_t dev, bus_dma_tag_t *tag, bus_dmamap_t *map,
558     bus_addr_t *baddr, void **vaddr, uint32_t size)
559 {
560 	int ret;
561 	uint32_t maxsize = ((size - 1)/PAGE_SIZE + 1) * PAGE_SIZE;
562 
563 	ret = bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0,
564 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
565 	    maxsize, 1, maxsize, BUS_DMA_COHERENT, NULL, NULL, tag);
566 	if (ret != 0) {
567 		device_printf(dev,
568 		    "failed to create bus tag, ret = %d\n", ret);
569 		return (ret);
570 	}
571 
572 	ret = bus_dmamem_alloc(*tag, vaddr,
573 	    BUS_DMA_COHERENT | BUS_DMA_ZERO, map);
574 	if (ret != 0) {
575 		device_printf(dev,
576 		    "failed to allocate dmamem, ret = %d\n", ret);
577 		return (ret);
578 	}
579 
580 	ret = bus_dmamap_load(*tag, *map, *vaddr,
581 	    size, al_dma_map_addr, baddr, 0);
582 	if (ret != 0) {
583 		device_printf(dev,
584 		    "failed to allocate bus_dmamap_load, ret = %d\n", ret);
585 		return (ret);
586 	}
587 
588 	return (0);
589 }
590 
591 static void
592 al_dma_free_coherent(bus_dma_tag_t tag, bus_dmamap_t map, void *vaddr)
593 {
594 
595 	bus_dmamap_unload(tag, map);
596 	bus_dmamem_free(tag, vaddr, map);
597 	bus_dma_tag_destroy(tag);
598 }
599 
600 static void
601 al_eth_mac_table_unicast_add(struct al_eth_adapter *adapter,
602     uint8_t idx, uint8_t udma_mask)
603 {
604 	struct al_eth_fwd_mac_table_entry entry = { { 0 } };
605 
606 	memcpy(entry.addr, adapter->mac_addr, sizeof(adapter->mac_addr));
607 
608 	memset(entry.mask, 0xff, sizeof(entry.mask));
609 	entry.rx_valid = true;
610 	entry.tx_valid = false;
611 	entry.udma_mask = udma_mask;
612 	entry.filter = false;
613 
614 	device_printf_dbg(adapter->dev,
615 	    "%s: [%d]: addr "MAC_ADDR_STR" mask "MAC_ADDR_STR"\n",
616 	    __func__, idx, MAC_ADDR(entry.addr), MAC_ADDR(entry.mask));
617 
618 	al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry);
619 }
620 
621 static void
622 al_eth_mac_table_all_multicast_add(struct al_eth_adapter *adapter, uint8_t idx,
623     uint8_t udma_mask)
624 {
625 	struct al_eth_fwd_mac_table_entry entry = { { 0 } };
626 
627 	memset(entry.addr, 0x00, sizeof(entry.addr));
628 	memset(entry.mask, 0x00, sizeof(entry.mask));
629 	entry.mask[0] |= 1;
630 	entry.addr[0] |= 1;
631 
632 	entry.rx_valid = true;
633 	entry.tx_valid = false;
634 	entry.udma_mask = udma_mask;
635 	entry.filter = false;
636 
637 	device_printf_dbg(adapter->dev,
638 	    "%s: [%d]: addr "MAC_ADDR_STR" mask "MAC_ADDR_STR"\n",
639 	    __func__, idx, MAC_ADDR(entry.addr), MAC_ADDR(entry.mask));
640 
641 	al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry);
642 }
643 
644 static void
645 al_eth_mac_table_broadcast_add(struct al_eth_adapter *adapter,
646     uint8_t idx, uint8_t udma_mask)
647 {
648 	struct al_eth_fwd_mac_table_entry entry = { { 0 } };
649 
650 	memset(entry.addr, 0xff, sizeof(entry.addr));
651 	memset(entry.mask, 0xff, sizeof(entry.mask));
652 
653 	entry.rx_valid = true;
654 	entry.tx_valid = false;
655 	entry.udma_mask = udma_mask;
656 	entry.filter = false;
657 
658 	device_printf_dbg(adapter->dev,
659 	    "%s: [%d]: addr "MAC_ADDR_STR" mask "MAC_ADDR_STR"\n",
660 	    __func__, idx, MAC_ADDR(entry.addr), MAC_ADDR(entry.mask));
661 
662 	al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry);
663 }
664 
665 static void
666 al_eth_mac_table_promiscuous_set(struct al_eth_adapter *adapter,
667     bool promiscuous)
668 {
669 	struct al_eth_fwd_mac_table_entry entry = { { 0 } };
670 
671 	memset(entry.addr, 0x00, sizeof(entry.addr));
672 	memset(entry.mask, 0x00, sizeof(entry.mask));
673 
674 	entry.rx_valid = true;
675 	entry.tx_valid = false;
676 	entry.udma_mask = (promiscuous) ? 1 : 0;
677 	entry.filter = (promiscuous) ? false : true;
678 
679 	device_printf_dbg(adapter->dev, "%s: %s promiscuous mode\n",
680 	    __func__, (promiscuous) ? "enter" : "exit");
681 
682 	al_eth_fwd_mac_table_set(&adapter->hal_adapter,
683 	    AL_ETH_MAC_TABLE_DROP_IDX, &entry);
684 }
685 
686 static void
687 al_eth_set_thash_table_entry(struct al_eth_adapter *adapter, uint8_t idx,
688     uint8_t udma, uint32_t queue)
689 {
690 
691 	if (udma != 0)
692 		panic("only UDMA0 is supporter");
693 
694 	if (queue >= AL_ETH_NUM_QUEUES)
695 		panic("invalid queue number");
696 
697 	al_eth_thash_table_set(&adapter->hal_adapter, idx, udma, queue);
698 }
699 
700 /* init FSM, no tunneling supported yet, if packet is tcp/udp over ipv4/ipv6, use 4 tuple hash */
701 static void
702 al_eth_fsm_table_init(struct al_eth_adapter *adapter)
703 {
704 	uint32_t val;
705 	int i;
706 
707 	for (i = 0; i < AL_ETH_RX_FSM_TABLE_SIZE; i++) {
708 		uint8_t outer_type = AL_ETH_FSM_ENTRY_OUTER(i);
709 		switch (outer_type) {
710 		case AL_ETH_FSM_ENTRY_IPV4_TCP:
711 		case AL_ETH_FSM_ENTRY_IPV4_UDP:
712 		case AL_ETH_FSM_ENTRY_IPV6_TCP:
713 		case AL_ETH_FSM_ENTRY_IPV6_UDP:
714 			val = AL_ETH_FSM_DATA_OUTER_4_TUPLE |
715 			    AL_ETH_FSM_DATA_HASH_SEL;
716 			break;
717 		case AL_ETH_FSM_ENTRY_IPV6_NO_UDP_TCP:
718 		case AL_ETH_FSM_ENTRY_IPV4_NO_UDP_TCP:
719 			val = AL_ETH_FSM_DATA_OUTER_2_TUPLE |
720 			    AL_ETH_FSM_DATA_HASH_SEL;
721 			break;
722 		default:
723 			val = AL_ETH_FSM_DATA_DEFAULT_Q |
724 			    AL_ETH_FSM_DATA_DEFAULT_UDMA;
725 		}
726 		al_eth_fsm_table_set(&adapter->hal_adapter, i, val);
727 	}
728 }
729 
730 static void
731 al_eth_mac_table_entry_clear(struct al_eth_adapter *adapter,
732     uint8_t idx)
733 {
734 	struct al_eth_fwd_mac_table_entry entry = { { 0 } };
735 
736 	device_printf_dbg(adapter->dev, "%s: clear entry %d\n", __func__, idx);
737 
738 	al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry);
739 }
740 
741 static int
742 al_eth_hw_init_adapter(struct al_eth_adapter *adapter)
743 {
744 	struct al_eth_adapter_params *params = &adapter->eth_hal_params;
745 	int rc;
746 
747 	/* params->dev_id = adapter->dev_id; */
748 	params->rev_id = adapter->rev_id;
749 	params->udma_id = 0;
750 	params->enable_rx_parser = 1; /* enable rx epe parser*/
751 	params->udma_regs_base = adapter->udma_base; /* UDMA register base address */
752 	params->ec_regs_base = adapter->ec_base; /* Ethernet controller registers base address */
753 	params->mac_regs_base = adapter->mac_base; /* Ethernet MAC registers base address */
754 	params->name = adapter->name;
755 	params->serdes_lane = adapter->serdes_lane;
756 
757 	rc = al_eth_adapter_init(&adapter->hal_adapter, params);
758 	if (rc != 0)
759 		device_printf(adapter->dev, "%s failed at hal init!\n",
760 		    __func__);
761 
762 	if ((adapter->board_type == ALPINE_NIC) ||
763 	    (adapter->board_type == ALPINE_FPGA_NIC)) {
764 		/* in pcie NIC mode, force eth UDMA to access PCIE0 using the vmid */
765 		struct al_udma_gen_tgtid_conf conf;
766 		int i;
767 		for (i = 0; i < DMA_MAX_Q; i++) {
768 			conf.tx_q_conf[i].queue_en = AL_TRUE;
769 			conf.tx_q_conf[i].desc_en = AL_FALSE;
770 			conf.tx_q_conf[i].tgtid = 0x100; /* for access from PCIE0 */
771 			conf.rx_q_conf[i].queue_en = AL_TRUE;
772 			conf.rx_q_conf[i].desc_en = AL_FALSE;
773 			conf.rx_q_conf[i].tgtid = 0x100; /* for access from PCIE0 */
774 		}
775 		al_udma_gen_tgtid_conf_set(adapter->udma_base, &conf);
776 	}
777 
778 	return (rc);
779 }
780 
781 static void
782 al_eth_lm_config(struct al_eth_adapter *adapter)
783 {
784 	struct al_eth_lm_init_params params = {0};
785 
786 	params.adapter = &adapter->hal_adapter;
787 	params.serdes_obj = &adapter->serdes_obj;
788 	params.lane = adapter->serdes_lane;
789 	params.sfp_detection = adapter->sfp_detection_needed;
790 	if (adapter->sfp_detection_needed == true) {
791 		params.sfp_bus_id = adapter->i2c_adapter_id;
792 		params.sfp_i2c_addr = SFP_I2C_ADDR;
793 	}
794 
795 	if (adapter->sfp_detection_needed == false) {
796 		switch (adapter->mac_mode) {
797 		case AL_ETH_MAC_MODE_10GbE_Serial:
798 			if ((adapter->lt_en != 0) && (adapter->an_en != 0))
799 				params.default_mode = AL_ETH_LM_MODE_10G_DA;
800 			else
801 				params.default_mode = AL_ETH_LM_MODE_10G_OPTIC;
802 			break;
803 		case AL_ETH_MAC_MODE_SGMII:
804 			params.default_mode = AL_ETH_LM_MODE_1G;
805 			break;
806 		default:
807 			params.default_mode = AL_ETH_LM_MODE_10G_DA;
808 		}
809 	} else
810 		params.default_mode = AL_ETH_LM_MODE_10G_DA;
811 
812 	params.link_training = adapter->lt_en;
813 	params.rx_equal = true;
814 	params.static_values = !adapter->dont_override_serdes;
815 	params.i2c_context = adapter;
816 	params.kr_fec_enable = false;
817 
818 	params.retimer_exist = adapter->retimer.exist;
819 	params.retimer_bus_id = adapter->retimer.bus_id;
820 	params.retimer_i2c_addr = adapter->retimer.i2c_addr;
821 	params.retimer_channel = adapter->retimer.channel;
822 
823 	al_eth_lm_init(&adapter->lm_context, &params);
824 }
825 
826 static int
827 al_eth_board_params_init(struct al_eth_adapter *adapter)
828 {
829 
830 	if (adapter->board_type == ALPINE_NIC) {
831 		adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial;
832 		adapter->sfp_detection_needed = false;
833 		adapter->phy_exist = false;
834 		adapter->an_en = false;
835 		adapter->lt_en = false;
836 		adapter->ref_clk_freq = AL_ETH_REF_FREQ_375_MHZ;
837 		adapter->mdio_freq = AL_ETH_DEFAULT_MDIO_FREQ_KHZ;
838 	} else if (adapter->board_type == ALPINE_FPGA_NIC) {
839 		adapter->mac_mode = AL_ETH_MAC_MODE_SGMII;
840 		adapter->sfp_detection_needed = false;
841 		adapter->phy_exist = false;
842 		adapter->an_en = false;
843 		adapter->lt_en = false;
844 		adapter->ref_clk_freq = AL_ETH_REF_FREQ_375_MHZ;
845 		adapter->mdio_freq = AL_ETH_DEFAULT_MDIO_FREQ_KHZ;
846 	} else {
847 		struct al_eth_board_params params;
848 		int rc;
849 
850 		adapter->auto_speed = false;
851 
852 		rc = al_eth_board_params_get(adapter->mac_base, &params);
853 		if (rc != 0) {
854 			device_printf(adapter->dev,
855 			    "board info not available\n");
856 			return (-1);
857 		}
858 
859 		adapter->phy_exist = params.phy_exist == true;
860 		adapter->phy_addr = params.phy_mdio_addr;
861 		adapter->an_en = params.autoneg_enable;
862 		adapter->lt_en = params.kr_lt_enable;
863 		adapter->serdes_grp = params.serdes_grp;
864 		adapter->serdes_lane = params.serdes_lane;
865 		adapter->sfp_detection_needed = params.sfp_plus_module_exist;
866 		adapter->i2c_adapter_id = params.i2c_adapter_id;
867 		adapter->ref_clk_freq = params.ref_clk_freq;
868 		adapter->dont_override_serdes = params.dont_override_serdes;
869 		adapter->link_config.active_duplex = !params.half_duplex;
870 		adapter->link_config.autoneg = !params.an_disable;
871 		adapter->link_config.force_1000_base_x = params.force_1000_base_x;
872 		adapter->retimer.exist = params.retimer_exist;
873 		adapter->retimer.bus_id = params.retimer_bus_id;
874 		adapter->retimer.i2c_addr = params.retimer_i2c_addr;
875 		adapter->retimer.channel = params.retimer_channel;
876 
877 		switch (params.speed) {
878 		default:
879 			device_printf(adapter->dev,
880 			    "%s: invalid speed (%d)\n", __func__, params.speed);
881 		case AL_ETH_BOARD_1G_SPEED_1000M:
882 			adapter->link_config.active_speed = 1000;
883 			break;
884 		case AL_ETH_BOARD_1G_SPEED_100M:
885 			adapter->link_config.active_speed = 100;
886 			break;
887 		case AL_ETH_BOARD_1G_SPEED_10M:
888 			adapter->link_config.active_speed = 10;
889 			break;
890 		}
891 
892 		switch (params.mdio_freq) {
893 		default:
894 			device_printf(adapter->dev,
895 			    "%s: invalid mdio freq (%d)\n", __func__,
896 			    params.mdio_freq);
897 		case AL_ETH_BOARD_MDIO_FREQ_2_5_MHZ:
898 			adapter->mdio_freq = AL_ETH_DEFAULT_MDIO_FREQ_KHZ;
899 			break;
900 		case AL_ETH_BOARD_MDIO_FREQ_1_MHZ:
901 			adapter->mdio_freq = AL_ETH_MDIO_FREQ_1000_KHZ;
902 			break;
903 		}
904 
905 		switch (params.media_type) {
906 		case AL_ETH_BOARD_MEDIA_TYPE_RGMII:
907 			if (params.sfp_plus_module_exist == true)
908 				/* Backward compatibility */
909 				adapter->mac_mode = AL_ETH_MAC_MODE_SGMII;
910 			else
911 				adapter->mac_mode = AL_ETH_MAC_MODE_RGMII;
912 
913 			adapter->use_lm = false;
914 			break;
915 		case AL_ETH_BOARD_MEDIA_TYPE_SGMII:
916 			adapter->mac_mode = AL_ETH_MAC_MODE_SGMII;
917 			adapter->use_lm = true;
918 			break;
919 		case AL_ETH_BOARD_MEDIA_TYPE_10GBASE_SR:
920 			adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial;
921 			adapter->use_lm = true;
922 			break;
923 		case AL_ETH_BOARD_MEDIA_TYPE_AUTO_DETECT:
924 			adapter->sfp_detection_needed = true;
925 			adapter->auto_speed = false;
926 			adapter->use_lm = true;
927 			break;
928 		case AL_ETH_BOARD_MEDIA_TYPE_AUTO_DETECT_AUTO_SPEED:
929 			adapter->sfp_detection_needed = true;
930 			adapter->auto_speed = true;
931 			adapter->mac_mode_set = false;
932 			adapter->use_lm = true;
933 
934 			adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial;
935 			break;
936 		default:
937 			device_printf(adapter->dev,
938 			    "%s: unsupported media type %d\n",
939 			    __func__, params.media_type);
940 			return (-1);
941 		}
942 
943 		device_printf(adapter->dev,
944 		    "Board info: phy exist %s. phy addr %d. mdio freq %u Khz. "
945 		    "SFP connected %s. media %d\n",
946 		    params.phy_exist ? "Yes" : "No",
947 		    params.phy_mdio_addr, adapter->mdio_freq,
948 		    params.sfp_plus_module_exist ? "Yes" : "No",
949 		    params.media_type);
950 	}
951 
952 	al_eth_mac_addr_read(adapter->ec_base, 0, adapter->mac_addr);
953 
954 	return (0);
955 }
956 
957 static int
958 al_eth_function_reset(struct al_eth_adapter *adapter)
959 {
960 	struct al_eth_board_params params;
961 	int rc;
962 
963 	/* save board params so we restore it after reset */
964 	al_eth_board_params_get(adapter->mac_base, &params);
965 	al_eth_mac_addr_read(adapter->ec_base, 0, adapter->mac_addr);
966 	if (adapter->board_type == ALPINE_INTEGRATED)
967 		rc = al_eth_flr_rmn(&al_eth_read_pci_config,
968 		    &al_eth_write_pci_config,
969 		    adapter->dev, adapter->mac_base);
970 	else
971 		rc = al_eth_flr_rmn(&al_eth_fpga_read_pci_config,
972 		    &al_eth_fpga_write_pci_config,
973 		    adapter->internal_pcie_base, adapter->mac_base);
974 
975 	/* restore params */
976 	al_eth_board_params_set(adapter->mac_base, &params);
977 	al_eth_mac_addr_store(adapter->ec_base, 0, adapter->mac_addr);
978 
979 	return (rc);
980 }
981 
982 static void
983 al_eth_init_rings(struct al_eth_adapter *adapter)
984 {
985 	int i;
986 
987 	for (i = 0; i < adapter->num_tx_queues; i++) {
988 		struct al_eth_ring *ring = &adapter->tx_ring[i];
989 
990 		ring->ring_id = i;
991 		ring->dev = adapter->dev;
992 		ring->adapter = adapter;
993 		ring->netdev = adapter->netdev;
994 		al_udma_q_handle_get(&adapter->hal_adapter.tx_udma, i,
995 		    &ring->dma_q);
996 		ring->sw_count = adapter->tx_ring_count;
997 		ring->hw_count = adapter->tx_descs_count;
998 		ring->unmask_reg_offset = al_udma_iofic_unmask_offset_get((struct unit_regs *)adapter->udma_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_C);
999 		ring->unmask_val = ~(1 << i);
1000 	}
1001 
1002 	for (i = 0; i < adapter->num_rx_queues; i++) {
1003 		struct al_eth_ring *ring = &adapter->rx_ring[i];
1004 
1005 		ring->ring_id = i;
1006 		ring->dev = adapter->dev;
1007 		ring->adapter = adapter;
1008 		ring->netdev = adapter->netdev;
1009 		al_udma_q_handle_get(&adapter->hal_adapter.rx_udma, i, &ring->dma_q);
1010 		ring->sw_count = adapter->rx_ring_count;
1011 		ring->hw_count = adapter->rx_descs_count;
1012 		ring->unmask_reg_offset = al_udma_iofic_unmask_offset_get(
1013 		    (struct unit_regs *)adapter->udma_base,
1014 		    AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_B);
1015 		ring->unmask_val = ~(1 << i);
1016 	}
1017 }
1018 
1019 static void
1020 al_init_locked(void *arg)
1021 {
1022 	struct al_eth_adapter *adapter = arg;
1023 	if_t ifp = adapter->netdev;
1024 	int rc = 0;
1025 
1026 	al_eth_down(adapter);
1027 	rc = al_eth_up(adapter);
1028 
1029 	if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1030 	if (rc == 0)
1031 		if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
1032 }
1033 
1034 static void
1035 al_init(void *arg)
1036 {
1037 	struct al_eth_adapter *adapter = arg;
1038 
1039 	al_init_locked(adapter);
1040 }
1041 
1042 static inline int
1043 al_eth_alloc_rx_buf(struct al_eth_adapter *adapter,
1044     struct al_eth_ring *rx_ring,
1045     struct al_eth_rx_buffer *rx_info)
1046 {
1047 	struct al_buf *al_buf;
1048 	bus_dma_segment_t segs[2];
1049 	int error;
1050 	int nsegs;
1051 
1052 	if (rx_info->m != NULL)
1053 		return (0);
1054 
1055 	rx_info->data_size = adapter->rx_mbuf_sz;
1056 
1057 	AL_RX_LOCK(adapter);
1058 
1059 	/* Get mbuf using UMA allocator */
1060 	rx_info->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1061 	    rx_info->data_size);
1062 	AL_RX_UNLOCK(adapter);
1063 
1064 	if (rx_info->m == NULL)
1065 		return (ENOMEM);
1066 
1067 	rx_info->m->m_pkthdr.len = rx_info->m->m_len = adapter->rx_mbuf_sz;
1068 
1069 	/* Map packets for DMA */
1070 	error = bus_dmamap_load_mbuf_sg(rx_ring->dma_buf_tag, rx_info->dma_map,
1071 	    rx_info->m, segs, &nsegs, BUS_DMA_NOWAIT);
1072 	if (__predict_false(error)) {
1073 		device_printf(rx_ring->dev, "failed to map mbuf, error = %d\n",
1074 		    error);
1075 		m_freem(rx_info->m);
1076 		rx_info->m = NULL;
1077 		return (EFAULT);
1078 	}
1079 
1080 	al_buf = &rx_info->al_buf;
1081 	al_buf->addr = segs[0].ds_addr + AL_IP_ALIGNMENT_OFFSET;
1082 	al_buf->len = rx_info->data_size - AL_IP_ALIGNMENT_OFFSET;
1083 
1084 	return (0);
1085 }
1086 
1087 static int
1088 al_eth_refill_rx_bufs(struct al_eth_adapter *adapter, unsigned int qid,
1089     unsigned int num)
1090 {
1091 	struct al_eth_ring *rx_ring = &adapter->rx_ring[qid];
1092 	uint16_t next_to_use;
1093 	unsigned int i;
1094 
1095 	next_to_use = rx_ring->next_to_use;
1096 
1097 	for (i = 0; i < num; i++) {
1098 		int rc;
1099 		struct al_eth_rx_buffer *rx_info =
1100 		    &rx_ring->rx_buffer_info[next_to_use];
1101 
1102 		if (__predict_false(al_eth_alloc_rx_buf(adapter,
1103 		    rx_ring, rx_info) < 0)) {
1104 			device_printf(adapter->dev,
1105 			    "failed to alloc buffer for rx queue %d\n", qid);
1106 			break;
1107 		}
1108 
1109 		rc = al_eth_rx_buffer_add(rx_ring->dma_q,
1110 		    &rx_info->al_buf, AL_ETH_RX_FLAGS_INT, NULL);
1111 		if (__predict_false(rc)) {
1112 			device_printf(adapter->dev,
1113 			    "failed to add buffer for rx queue %d\n", qid);
1114 			break;
1115 		}
1116 
1117 		next_to_use = AL_ETH_RX_RING_IDX_NEXT(rx_ring, next_to_use);
1118 	}
1119 
1120 	if (__predict_false(i < num))
1121 		device_printf(adapter->dev,
1122 		    "refilled rx queue %d with %d pages only - available %d\n",
1123 		    qid, i, al_udma_available_get(rx_ring->dma_q));
1124 
1125 	if (__predict_true(i))
1126 		al_eth_rx_buffer_action(rx_ring->dma_q, i);
1127 
1128 	rx_ring->next_to_use = next_to_use;
1129 
1130 	return (i);
1131 }
1132 
1133 /*
1134  * al_eth_refill_all_rx_bufs - allocate all queues Rx buffers
1135  * @adapter: board private structure
1136  */
1137 static void
1138 al_eth_refill_all_rx_bufs(struct al_eth_adapter *adapter)
1139 {
1140 	int i;
1141 
1142 	for (i = 0; i < adapter->num_rx_queues; i++)
1143 		al_eth_refill_rx_bufs(adapter, i, AL_ETH_DEFAULT_RX_DESCS - 1);
1144 }
1145 
1146 static void
1147 al_eth_tx_do_cleanup(struct al_eth_ring *tx_ring)
1148 {
1149 	unsigned int total_done;
1150 	uint16_t next_to_clean;
1151 	int qid = tx_ring->ring_id;
1152 
1153 	total_done = al_eth_comp_tx_get(tx_ring->dma_q);
1154 	device_printf_dbg(tx_ring->dev,
1155 	    "tx_poll: q %d total completed descs %x\n", qid, total_done);
1156 	next_to_clean = tx_ring->next_to_clean;
1157 
1158 	while (total_done != 0) {
1159 		struct al_eth_tx_buffer *tx_info;
1160 		struct mbuf *mbuf;
1161 
1162 		tx_info = &tx_ring->tx_buffer_info[next_to_clean];
1163 		/* stop if not all descriptors of the packet are completed */
1164 		if (tx_info->tx_descs > total_done)
1165 			break;
1166 
1167 		mbuf = tx_info->m;
1168 
1169 		tx_info->m = NULL;
1170 
1171 		device_printf_dbg(tx_ring->dev,
1172 		    "tx_poll: q %d mbuf %p completed\n", qid, mbuf);
1173 
1174 		/* map is no longer required */
1175 		bus_dmamap_unload(tx_ring->dma_buf_tag, tx_info->dma_map);
1176 
1177 		m_freem(mbuf);
1178 		total_done -= tx_info->tx_descs;
1179 		next_to_clean = AL_ETH_TX_RING_IDX_NEXT(tx_ring, next_to_clean);
1180 	}
1181 
1182 	tx_ring->next_to_clean = next_to_clean;
1183 
1184 	device_printf_dbg(tx_ring->dev, "tx_poll: q %d done next to clean %x\n",
1185 	    qid, next_to_clean);
1186 
1187 	/*
1188 	 * need to make the rings circular update visible to
1189 	 * al_eth_start_xmit() before checking for netif_queue_stopped().
1190 	 */
1191 	al_smp_data_memory_barrier();
1192 }
1193 
1194 static void
1195 al_eth_tx_csum(struct al_eth_ring *tx_ring, struct al_eth_tx_buffer *tx_info,
1196     struct al_eth_pkt *hal_pkt, struct mbuf *m)
1197 {
1198 	uint32_t mss = m->m_pkthdr.tso_segsz;
1199 	struct ether_vlan_header *eh;
1200 	uint16_t etype;
1201 #ifdef INET
1202 	struct ip *ip;
1203 #endif
1204 #ifdef INET6
1205 	struct ip6_hdr *ip6;
1206 #endif
1207 	struct tcphdr *th = NULL;
1208 	int	ehdrlen, ip_hlen = 0;
1209 	uint8_t	ipproto = 0;
1210 	uint32_t offload = 0;
1211 
1212 	if (mss != 0)
1213 		offload = 1;
1214 
1215 	if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0)
1216 		offload = 1;
1217 
1218 	if ((m->m_pkthdr.csum_flags & CSUM_OFFLOAD) != 0)
1219 		offload = 1;
1220 
1221 	if (offload != 0) {
1222 		struct al_eth_meta_data *meta = &tx_ring->hal_meta;
1223 
1224 		if (mss != 0)
1225 			hal_pkt->flags |= (AL_ETH_TX_FLAGS_TSO |
1226 			    AL_ETH_TX_FLAGS_L4_CSUM);
1227 		else
1228 			hal_pkt->flags |= (AL_ETH_TX_FLAGS_L4_CSUM |
1229 			    AL_ETH_TX_FLAGS_L4_PARTIAL_CSUM);
1230 
1231 		/*
1232 		 * Determine where frame payload starts.
1233 		 * Jump over vlan headers if already present,
1234 		 * helpful for QinQ too.
1235 		 */
1236 		eh = mtod(m, struct ether_vlan_header *);
1237 		if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1238 			etype = ntohs(eh->evl_proto);
1239 			ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1240 		} else {
1241 			etype = ntohs(eh->evl_encap_proto);
1242 			ehdrlen = ETHER_HDR_LEN;
1243 		}
1244 
1245 		switch (etype) {
1246 #ifdef INET
1247 		case ETHERTYPE_IP:
1248 			ip = (struct ip *)(m->m_data + ehdrlen);
1249 			ip_hlen = ip->ip_hl << 2;
1250 			ipproto = ip->ip_p;
1251 			hal_pkt->l3_proto_idx = AL_ETH_PROTO_ID_IPv4;
1252 			th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
1253 			if (mss != 0)
1254 				hal_pkt->flags |= AL_ETH_TX_FLAGS_IPV4_L3_CSUM;
1255 			if (ipproto == IPPROTO_TCP)
1256 				hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_TCP;
1257 			else
1258 				hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_UDP;
1259 			break;
1260 #endif /* INET */
1261 #ifdef INET6
1262 		case ETHERTYPE_IPV6:
1263 			ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1264 			hal_pkt->l3_proto_idx = AL_ETH_PROTO_ID_IPv6;
1265 			ip_hlen = sizeof(struct ip6_hdr);
1266 			th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
1267 			ipproto = ip6->ip6_nxt;
1268 			if (ipproto == IPPROTO_TCP)
1269 				hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_TCP;
1270 			else
1271 				hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_UDP;
1272 			break;
1273 #endif /* INET6 */
1274 		default:
1275 			break;
1276 		}
1277 
1278 		meta->words_valid = 4;
1279 		meta->l3_header_len = ip_hlen;
1280 		meta->l3_header_offset = ehdrlen;
1281 		if (th != NULL)
1282 			meta->l4_header_len = th->th_off; /* this param needed only for TSO */
1283 		meta->mss_idx_sel = 0;			/* check how to select MSS */
1284 		meta->mss_val = mss;
1285 		hal_pkt->meta = meta;
1286 	} else
1287 		hal_pkt->meta = NULL;
1288 }
1289 
1290 #define	XMIT_QUEUE_TIMEOUT	100
1291 
1292 static void
1293 al_eth_xmit_mbuf(struct al_eth_ring *tx_ring, struct mbuf *m)
1294 {
1295 	struct al_eth_tx_buffer *tx_info;
1296 	int error;
1297 	int nsegs, a;
1298 	uint16_t next_to_use;
1299 	bus_dma_segment_t segs[AL_ETH_PKT_MAX_BUFS + 1];
1300 	struct al_eth_pkt *hal_pkt;
1301 	struct al_buf *al_buf;
1302 	bool remap;
1303 
1304 	/* Check if queue is ready */
1305 	if (unlikely(tx_ring->stall) != 0) {
1306 		for (a = 0; a < XMIT_QUEUE_TIMEOUT; a++) {
1307 			if (al_udma_available_get(tx_ring->dma_q) >=
1308 			    (AL_ETH_DEFAULT_TX_HW_DESCS -
1309 			    AL_ETH_TX_WAKEUP_THRESH)) {
1310 				tx_ring->stall = 0;
1311 				break;
1312 			}
1313 			pause("stall", 1);
1314 		}
1315 		if (a == XMIT_QUEUE_TIMEOUT) {
1316 			device_printf(tx_ring->dev,
1317 			    "timeout waiting for queue %d ready!\n",
1318 			    tx_ring->ring_id);
1319 			return;
1320 		} else {
1321 			device_printf_dbg(tx_ring->dev,
1322 			    "queue %d is ready!\n", tx_ring->ring_id);
1323 		}
1324 	}
1325 
1326 	next_to_use = tx_ring->next_to_use;
1327 	tx_info = &tx_ring->tx_buffer_info[next_to_use];
1328 	tx_info->m = m;
1329 	hal_pkt = &tx_info->hal_pkt;
1330 
1331 	if (m == NULL) {
1332 		device_printf(tx_ring->dev, "mbuf is NULL\n");
1333 		return;
1334 	}
1335 
1336 	remap = true;
1337 	/* Map packets for DMA */
1338 retry:
1339 	error = bus_dmamap_load_mbuf_sg(tx_ring->dma_buf_tag, tx_info->dma_map,
1340 	    m, segs, &nsegs, BUS_DMA_NOWAIT);
1341 	if (__predict_false(error)) {
1342 		struct mbuf *m_new;
1343 
1344 		if (error == EFBIG) {
1345 			/* Try it again? - one try */
1346 			if (remap == true) {
1347 				remap = false;
1348 				m_new = m_defrag(m, M_NOWAIT);
1349 				if (m_new == NULL) {
1350 					device_printf(tx_ring->dev,
1351 					    "failed to defrag mbuf\n");
1352 					goto exit;
1353 				}
1354 				m = m_new;
1355 				goto retry;
1356 			} else {
1357 				device_printf(tx_ring->dev,
1358 				    "failed to map mbuf, error %d\n", error);
1359 				goto exit;
1360 			}
1361 		} else {
1362 			device_printf(tx_ring->dev,
1363 			    "failed to map mbuf, error %d\n", error);
1364 			goto exit;
1365 		}
1366 	}
1367 
1368 	/* set flags and meta data */
1369 	hal_pkt->flags = AL_ETH_TX_FLAGS_INT;
1370 	al_eth_tx_csum(tx_ring, tx_info, hal_pkt, m);
1371 
1372 	al_buf = hal_pkt->bufs;
1373 	for (a = 0; a < nsegs; a++) {
1374 		al_buf->addr = segs[a].ds_addr;
1375 		al_buf->len = segs[a].ds_len;
1376 
1377 		al_buf++;
1378 	}
1379 
1380 	hal_pkt->num_of_bufs = nsegs;
1381 
1382 	/* prepare the packet's descriptors to dma engine */
1383 	tx_info->tx_descs = al_eth_tx_pkt_prepare(tx_ring->dma_q, hal_pkt);
1384 
1385 	if (tx_info->tx_descs == 0)
1386 		goto exit;
1387 
1388 	/*
1389 	 * stop the queue when no more space available, the packet can have up
1390 	 * to AL_ETH_PKT_MAX_BUFS + 1 buffers and a meta descriptor
1391 	 */
1392 	if (unlikely(al_udma_available_get(tx_ring->dma_q) <
1393 	    (AL_ETH_PKT_MAX_BUFS + 2))) {
1394 		tx_ring->stall = 1;
1395 		device_printf_dbg(tx_ring->dev, "stall, stopping queue %d...\n",
1396 		    tx_ring->ring_id);
1397 		al_data_memory_barrier();
1398 	}
1399 
1400 	tx_ring->next_to_use = AL_ETH_TX_RING_IDX_NEXT(tx_ring, next_to_use);
1401 
1402 	/* trigger the dma engine */
1403 	al_eth_tx_dma_action(tx_ring->dma_q, tx_info->tx_descs);
1404 	return;
1405 
1406 exit:
1407 	m_freem(m);
1408 }
1409 
1410 static void
1411 al_eth_tx_cmpl_work(void *arg, int pending)
1412 {
1413 	struct al_eth_ring *tx_ring = arg;
1414 
1415 	if (napi != 0) {
1416 		tx_ring->cmpl_is_running = 1;
1417 		al_data_memory_barrier();
1418 	}
1419 
1420 	al_eth_tx_do_cleanup(tx_ring);
1421 
1422 	if (napi != 0) {
1423 		tx_ring->cmpl_is_running = 0;
1424 		al_data_memory_barrier();
1425 	}
1426 	/* all work done, enable IRQs */
1427 	al_eth_irq_config(tx_ring->unmask_reg_offset, tx_ring->unmask_val);
1428 }
1429 
1430 static int
1431 al_eth_tx_cmlp_irq_filter(void *arg)
1432 {
1433 	struct al_eth_ring *tx_ring = arg;
1434 
1435 	/* Interrupt should be auto-masked upon arrival */
1436 
1437 	device_printf_dbg(tx_ring->dev, "%s for ring ID = %d\n", __func__,
1438 	    tx_ring->ring_id);
1439 
1440 	/*
1441 	 * For napi, if work is not running, schedule it. Always schedule
1442 	 * for casual (non-napi) packet handling.
1443 	 */
1444 	if ((napi == 0) || (napi && tx_ring->cmpl_is_running == 0))
1445 		taskqueue_enqueue(tx_ring->cmpl_tq, &tx_ring->cmpl_task);
1446 
1447 	/* Do not run bottom half */
1448 	return (FILTER_HANDLED);
1449 }
1450 
1451 static int
1452 al_eth_rx_recv_irq_filter(void *arg)
1453 {
1454 	struct al_eth_ring *rx_ring = arg;
1455 
1456 	/* Interrupt should be auto-masked upon arrival */
1457 
1458 	device_printf_dbg(rx_ring->dev, "%s for ring ID = %d\n", __func__,
1459 	    rx_ring->ring_id);
1460 
1461 	/*
1462 	 * For napi, if work is not running, schedule it. Always schedule
1463 	 * for casual (non-napi) packet handling.
1464 	 */
1465 	if ((napi == 0) || (napi && rx_ring->enqueue_is_running == 0))
1466 		taskqueue_enqueue(rx_ring->enqueue_tq, &rx_ring->enqueue_task);
1467 
1468 	/* Do not run bottom half */
1469 	return (FILTER_HANDLED);
1470 }
1471 
1472 /*
1473  * al_eth_rx_checksum - indicate in mbuf if hw indicated a good cksum
1474  * @adapter: structure containing adapter specific data
1475  * @hal_pkt: HAL structure for the packet
1476  * @mbuf: mbuf currently being received and modified
1477  */
1478 static inline void
1479 al_eth_rx_checksum(struct al_eth_adapter *adapter,
1480     struct al_eth_pkt *hal_pkt, struct mbuf *mbuf)
1481 {
1482 
1483 	/* if IPv4 and error */
1484 	if (unlikely((if_getcapenable(adapter->netdev) & IFCAP_RXCSUM) &&
1485 	    (hal_pkt->l3_proto_idx == AL_ETH_PROTO_ID_IPv4) &&
1486 	    (hal_pkt->flags & AL_ETH_RX_FLAGS_L3_CSUM_ERR))) {
1487 		device_printf(adapter->dev,"rx ipv4 header checksum error\n");
1488 		return;
1489 	}
1490 
1491 	/* if IPv6 and error */
1492 	if (unlikely((if_getcapenable(adapter->netdev) & IFCAP_RXCSUM_IPV6) &&
1493 	    (hal_pkt->l3_proto_idx == AL_ETH_PROTO_ID_IPv6) &&
1494 	    (hal_pkt->flags & AL_ETH_RX_FLAGS_L3_CSUM_ERR))) {
1495 		device_printf(adapter->dev,"rx ipv6 header checksum error\n");
1496 		return;
1497 	}
1498 
1499 	/* if TCP/UDP */
1500 	if (likely((hal_pkt->l4_proto_idx == AL_ETH_PROTO_ID_TCP) ||
1501 	   (hal_pkt->l4_proto_idx == AL_ETH_PROTO_ID_UDP))) {
1502 		if (unlikely(hal_pkt->flags & AL_ETH_RX_FLAGS_L4_CSUM_ERR)) {
1503 			device_printf_dbg(adapter->dev, "rx L4 checksum error\n");
1504 
1505 			/* TCP/UDP checksum error */
1506 			mbuf->m_pkthdr.csum_flags = 0;
1507 		} else {
1508 			device_printf_dbg(adapter->dev, "rx checksum correct\n");
1509 
1510 			/* IP Checksum Good */
1511 			mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
1512 			mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1513 		}
1514 	}
1515 }
1516 
1517 static struct mbuf*
1518 al_eth_rx_mbuf(struct al_eth_adapter *adapter,
1519     struct al_eth_ring *rx_ring, struct al_eth_pkt *hal_pkt,
1520     unsigned int descs, uint16_t *next_to_clean)
1521 {
1522 	struct mbuf *mbuf;
1523 	struct al_eth_rx_buffer *rx_info =
1524 	    &rx_ring->rx_buffer_info[*next_to_clean];
1525 	unsigned int len;
1526 
1527 	len = hal_pkt->bufs[0].len;
1528 	device_printf_dbg(adapter->dev, "rx_info %p data %p\n", rx_info,
1529 	   rx_info->m);
1530 
1531 	if (rx_info->m == NULL) {
1532 		*next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring,
1533 		    *next_to_clean);
1534 		return (NULL);
1535 	}
1536 
1537 	mbuf = rx_info->m;
1538 	mbuf->m_pkthdr.len = len;
1539 	mbuf->m_len = len;
1540 	mbuf->m_pkthdr.rcvif = rx_ring->netdev;
1541 	mbuf->m_flags |= M_PKTHDR;
1542 
1543 	if (len <= adapter->small_copy_len) {
1544 		struct mbuf *smbuf;
1545 		device_printf_dbg(adapter->dev, "rx small packet. len %d\n", len);
1546 
1547 		AL_RX_LOCK(adapter);
1548 		smbuf = m_gethdr(M_NOWAIT, MT_DATA);
1549 		AL_RX_UNLOCK(adapter);
1550 		if (__predict_false(smbuf == NULL)) {
1551 			device_printf(adapter->dev, "smbuf is NULL\n");
1552 			return (NULL);
1553 		}
1554 
1555 		smbuf->m_data = smbuf->m_data + AL_IP_ALIGNMENT_OFFSET;
1556 		memcpy(smbuf->m_data, mbuf->m_data + AL_IP_ALIGNMENT_OFFSET, len);
1557 
1558 		smbuf->m_len = len;
1559 		smbuf->m_pkthdr.rcvif = rx_ring->netdev;
1560 
1561 		/* first desc of a non-ps chain */
1562 		smbuf->m_flags |= M_PKTHDR;
1563 		smbuf->m_pkthdr.len = smbuf->m_len;
1564 
1565 		*next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring,
1566 		    *next_to_clean);
1567 
1568 		return (smbuf);
1569 	}
1570 	mbuf->m_data = mbuf->m_data + AL_IP_ALIGNMENT_OFFSET;
1571 
1572 	/* Unmap the buffer */
1573 	bus_dmamap_unload(rx_ring->dma_buf_tag, rx_info->dma_map);
1574 
1575 	rx_info->m = NULL;
1576 	*next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring, *next_to_clean);
1577 
1578 	return (mbuf);
1579 }
1580 
1581 static void
1582 al_eth_rx_recv_work(void *arg, int pending)
1583 {
1584 	struct al_eth_ring *rx_ring = arg;
1585 	struct mbuf *mbuf;
1586 	struct lro_entry *queued;
1587 	unsigned int qid = rx_ring->ring_id;
1588 	struct al_eth_pkt *hal_pkt = &rx_ring->hal_pkt;
1589 	uint16_t next_to_clean = rx_ring->next_to_clean;
1590 	uint32_t refill_required;
1591 	uint32_t refill_actual;
1592 	uint32_t do_if_input;
1593 
1594 	if (napi != 0) {
1595 		rx_ring->enqueue_is_running = 1;
1596 		al_data_memory_barrier();
1597 	}
1598 
1599 	do {
1600 		unsigned int descs;
1601 
1602 		descs = al_eth_pkt_rx(rx_ring->dma_q, hal_pkt);
1603 		if (unlikely(descs == 0))
1604 			break;
1605 
1606 		device_printf_dbg(rx_ring->dev, "rx_poll: q %d got packet "
1607 		    "from hal. descs %d\n", qid, descs);
1608 		device_printf_dbg(rx_ring->dev, "rx_poll: q %d flags %x. "
1609 		    "l3 proto %d l4 proto %d\n", qid, hal_pkt->flags,
1610 		    hal_pkt->l3_proto_idx, hal_pkt->l4_proto_idx);
1611 
1612 		/* ignore if detected dma or eth controller errors */
1613 		if ((hal_pkt->flags & (AL_ETH_RX_ERROR |
1614 		    AL_UDMA_CDESC_ERROR)) != 0) {
1615 			device_printf(rx_ring->dev, "receive packet with error. "
1616 			    "flags = 0x%x\n", hal_pkt->flags);
1617 			next_to_clean = AL_ETH_RX_RING_IDX_ADD(rx_ring,
1618 			    next_to_clean, descs);
1619 			continue;
1620 		}
1621 
1622 		/* allocate mbuf and fill it */
1623 		mbuf = al_eth_rx_mbuf(rx_ring->adapter, rx_ring, hal_pkt, descs,
1624 		    &next_to_clean);
1625 
1626 		/* exit if we failed to retrieve a buffer */
1627 		if (unlikely(mbuf == NULL)) {
1628 			next_to_clean = AL_ETH_RX_RING_IDX_ADD(rx_ring,
1629 			    next_to_clean, descs);
1630 			break;
1631 		}
1632 
1633 		if (__predict_true(if_getcapenable(rx_ring->netdev) & IFCAP_RXCSUM ||
1634 		    if_getcapenable(rx_ring->netdev) & IFCAP_RXCSUM_IPV6)) {
1635 			al_eth_rx_checksum(rx_ring->adapter, hal_pkt, mbuf);
1636 		}
1637 
1638 		mbuf->m_pkthdr.flowid = qid;
1639 		M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE);
1640 
1641 		/*
1642 		 * LRO is only for IP/TCP packets and TCP checksum of the packet
1643 		 * should be computed by hardware.
1644 		 */
1645 		do_if_input = 1;
1646 		if ((rx_ring->lro_enabled != 0) &&
1647 		    ((mbuf->m_pkthdr.csum_flags & CSUM_IP_VALID) != 0) &&
1648 		    hal_pkt->l4_proto_idx == AL_ETH_PROTO_ID_TCP) {
1649 			/*
1650 			 * Send to the stack if:
1651 			 *  - LRO not enabled, or
1652 			 *  - no LRO resources, or
1653 			 *  - lro enqueue fails
1654 			 */
1655 			if (rx_ring->lro.lro_cnt != 0) {
1656 				if (tcp_lro_rx(&rx_ring->lro, mbuf, 0) == 0)
1657 					do_if_input = 0;
1658 			}
1659 		}
1660 
1661 		if (do_if_input)
1662 			if_input(rx_ring->netdev, mbuf);
1663 
1664 	} while (1);
1665 
1666 	rx_ring->next_to_clean = next_to_clean;
1667 
1668 	refill_required = al_udma_available_get(rx_ring->dma_q);
1669 	refill_actual = al_eth_refill_rx_bufs(rx_ring->adapter, qid,
1670 	    refill_required);
1671 
1672 	if (unlikely(refill_actual < refill_required)) {
1673 		device_printf_dbg(rx_ring->dev,
1674 		    "%s: not filling rx queue %d\n", __func__, qid);
1675 	}
1676 
1677 	while (((queued = LIST_FIRST(&rx_ring->lro.lro_active)) != NULL)) {
1678 		LIST_REMOVE(queued, next);
1679 		tcp_lro_flush(&rx_ring->lro, queued);
1680 	}
1681 
1682 	if (napi != 0) {
1683 		rx_ring->enqueue_is_running = 0;
1684 		al_data_memory_barrier();
1685 	}
1686 	/* unmask irq */
1687 	al_eth_irq_config(rx_ring->unmask_reg_offset, rx_ring->unmask_val);
1688 }
1689 
1690 static void
1691 al_eth_start_xmit(void *arg, int pending)
1692 {
1693 	struct al_eth_ring *tx_ring = arg;
1694 	struct mbuf *mbuf;
1695 
1696 	if (napi != 0) {
1697 		tx_ring->enqueue_is_running = 1;
1698 		al_data_memory_barrier();
1699 	}
1700 
1701 	while (1) {
1702 		mtx_lock(&tx_ring->br_mtx);
1703 		mbuf = drbr_dequeue(NULL, tx_ring->br);
1704 		mtx_unlock(&tx_ring->br_mtx);
1705 
1706 		if (mbuf == NULL)
1707 			break;
1708 
1709 		al_eth_xmit_mbuf(tx_ring, mbuf);
1710 	}
1711 
1712 	if (napi != 0) {
1713 		tx_ring->enqueue_is_running = 0;
1714 		al_data_memory_barrier();
1715 		while (1) {
1716 			mtx_lock(&tx_ring->br_mtx);
1717 			mbuf = drbr_dequeue(NULL, tx_ring->br);
1718 			mtx_unlock(&tx_ring->br_mtx);
1719 			if (mbuf == NULL)
1720 				break;
1721 			al_eth_xmit_mbuf(tx_ring, mbuf);
1722 		}
1723 	}
1724 }
1725 
1726 static int
1727 al_mq_start(if_t ifp, struct mbuf *m)
1728 {
1729 	struct al_eth_adapter *adapter = if_getsoftc(ifp);
1730 	struct al_eth_ring *tx_ring;
1731 	int i;
1732 	int ret;
1733 
1734 	/* Which queue to use */
1735 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
1736 		i = m->m_pkthdr.flowid % adapter->num_tx_queues;
1737 	else
1738 		i = curcpu % adapter->num_tx_queues;
1739 
1740 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
1741 	    IFF_DRV_RUNNING) {
1742 		return (EFAULT);
1743 	}
1744 
1745 	tx_ring = &adapter->tx_ring[i];
1746 
1747 	device_printf_dbg(adapter->dev, "dgb start() - assuming link is active, "
1748 	    "sending packet to queue %d\n", i);
1749 
1750 	ret = drbr_enqueue(ifp, tx_ring->br, m);
1751 
1752 	/*
1753 	 * For napi, if work is not running, schedule it. Always schedule
1754 	 * for casual (non-napi) packet handling.
1755 	 */
1756 	if ((napi == 0) || ((napi != 0) && (tx_ring->enqueue_is_running == 0)))
1757 		taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
1758 
1759 	return (ret);
1760 }
1761 
1762 static void
1763 al_qflush(if_t ifp)
1764 {
1765 
1766 	/* unused */
1767 }
1768 
1769 static inline void
1770 al_eth_flow_ctrl_init(struct al_eth_adapter *adapter)
1771 {
1772 	uint8_t default_flow_ctrl;
1773 
1774 	default_flow_ctrl = AL_ETH_FLOW_CTRL_TX_PAUSE;
1775 	default_flow_ctrl |= AL_ETH_FLOW_CTRL_RX_PAUSE;
1776 
1777 	adapter->link_config.flow_ctrl_supported = default_flow_ctrl;
1778 }
1779 
1780 static int
1781 al_eth_flow_ctrl_config(struct al_eth_adapter *adapter)
1782 {
1783 	struct al_eth_flow_control_params *flow_ctrl_params;
1784 	uint8_t active = adapter->link_config.flow_ctrl_active;
1785 	int i;
1786 
1787 	flow_ctrl_params = &adapter->flow_ctrl_params;
1788 
1789 	flow_ctrl_params->type = AL_ETH_FLOW_CONTROL_TYPE_LINK_PAUSE;
1790 	flow_ctrl_params->obay_enable =
1791 	    ((active & AL_ETH_FLOW_CTRL_RX_PAUSE) != 0);
1792 	flow_ctrl_params->gen_enable =
1793 	    ((active & AL_ETH_FLOW_CTRL_TX_PAUSE) != 0);
1794 
1795 	flow_ctrl_params->rx_fifo_th_high = AL_ETH_FLOW_CTRL_RX_FIFO_TH_HIGH;
1796 	flow_ctrl_params->rx_fifo_th_low = AL_ETH_FLOW_CTRL_RX_FIFO_TH_LOW;
1797 	flow_ctrl_params->quanta = AL_ETH_FLOW_CTRL_QUANTA;
1798 	flow_ctrl_params->quanta_th = AL_ETH_FLOW_CTRL_QUANTA_TH;
1799 
1800 	/* map priority to queue index, queue id = priority/2 */
1801 	for (i = 0; i < AL_ETH_FWD_PRIO_TABLE_NUM; i++)
1802 		flow_ctrl_params->prio_q_map[0][i] =  1 << (i >> 1);
1803 
1804 	al_eth_flow_control_config(&adapter->hal_adapter, flow_ctrl_params);
1805 
1806 	return (0);
1807 }
1808 
1809 static void
1810 al_eth_flow_ctrl_enable(struct al_eth_adapter *adapter)
1811 {
1812 
1813 	/*
1814 	 * change the active configuration to the default / force by ethtool
1815 	 * and call to configure
1816 	 */
1817 	adapter->link_config.flow_ctrl_active =
1818 	    adapter->link_config.flow_ctrl_supported;
1819 
1820 	al_eth_flow_ctrl_config(adapter);
1821 }
1822 
1823 static void
1824 al_eth_flow_ctrl_disable(struct al_eth_adapter *adapter)
1825 {
1826 
1827 	adapter->link_config.flow_ctrl_active = 0;
1828 	al_eth_flow_ctrl_config(adapter);
1829 }
1830 
1831 static int
1832 al_eth_hw_init(struct al_eth_adapter *adapter)
1833 {
1834 	int rc;
1835 
1836 	rc = al_eth_hw_init_adapter(adapter);
1837 	if (rc != 0)
1838 		return (rc);
1839 
1840 	rc = al_eth_mac_config(&adapter->hal_adapter, adapter->mac_mode);
1841 	if (rc < 0) {
1842 		device_printf(adapter->dev, "%s failed to configure mac!\n",
1843 		    __func__);
1844 		return (rc);
1845 	}
1846 
1847 	if ((adapter->mac_mode == AL_ETH_MAC_MODE_SGMII) ||
1848 	    (adapter->mac_mode == AL_ETH_MAC_MODE_RGMII &&
1849 	     adapter->phy_exist == false)) {
1850 		rc = al_eth_mac_link_config(&adapter->hal_adapter,
1851 		    adapter->link_config.force_1000_base_x,
1852 		    adapter->link_config.autoneg,
1853 		    adapter->link_config.active_speed,
1854 		    adapter->link_config.active_duplex);
1855 		if (rc != 0) {
1856 			device_printf(adapter->dev,
1857 			    "%s failed to configure link parameters!\n",
1858 			    __func__);
1859 			return (rc);
1860 		}
1861 	}
1862 
1863 	rc = al_eth_mdio_config(&adapter->hal_adapter,
1864 	    AL_ETH_MDIO_TYPE_CLAUSE_22, AL_TRUE /* shared_mdio_if */,
1865 	    adapter->ref_clk_freq, adapter->mdio_freq);
1866 	if (rc != 0) {
1867 		device_printf(adapter->dev, "%s failed at mdio config!\n",
1868 		    __func__);
1869 		return (rc);
1870 	}
1871 
1872 	al_eth_flow_ctrl_init(adapter);
1873 
1874 	return (rc);
1875 }
1876 
1877 static int
1878 al_eth_hw_stop(struct al_eth_adapter *adapter)
1879 {
1880 
1881 	al_eth_mac_stop(&adapter->hal_adapter);
1882 
1883 	/*
1884 	 * wait till pending rx packets written and UDMA becomes idle,
1885 	 * the MAC has ~10KB fifo, 10us should be enough time for the
1886 	 * UDMA to write to the memory
1887 	 */
1888 	DELAY(10);
1889 
1890 	al_eth_adapter_stop(&adapter->hal_adapter);
1891 
1892 	adapter->flags |= AL_ETH_FLAG_RESET_REQUESTED;
1893 
1894 	/* disable flow ctrl to avoid pause packets*/
1895 	al_eth_flow_ctrl_disable(adapter);
1896 
1897 	return (0);
1898 }
1899 
1900 /*
1901  * al_eth_intr_intx_all - Legacy Interrupt Handler for all interrupts
1902  * @irq: interrupt number
1903  * @data: pointer to a network interface device structure
1904  */
1905 static int
1906 al_eth_intr_intx_all(void *data)
1907 {
1908 	struct al_eth_adapter *adapter = data;
1909 
1910 	struct unit_regs __iomem *regs_base =
1911 	    (struct unit_regs __iomem *)adapter->udma_base;
1912 	uint32_t reg;
1913 
1914 	reg = al_udma_iofic_read_cause(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
1915 	    AL_INT_GROUP_A);
1916 	if (likely(reg))
1917 		device_printf_dbg(adapter->dev, "%s group A cause %x\n",
1918 		    __func__, reg);
1919 
1920 	if (unlikely(reg & AL_INT_GROUP_A_GROUP_D_SUM)) {
1921 		struct al_iofic_grp_ctrl __iomem *sec_ints_base;
1922 		uint32_t cause_d =  al_udma_iofic_read_cause(regs_base,
1923 		    AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_D);
1924 
1925 		sec_ints_base =
1926 		    &regs_base->gen.interrupt_regs.secondary_iofic_ctrl[0];
1927 		if (cause_d != 0) {
1928 			device_printf_dbg(adapter->dev,
1929 			    "got interrupt from group D. cause %x\n", cause_d);
1930 
1931 			cause_d = al_iofic_read_cause(sec_ints_base,
1932 			    AL_INT_GROUP_A);
1933 			device_printf(adapter->dev,
1934 			    "secondary A cause %x\n", cause_d);
1935 
1936 			cause_d = al_iofic_read_cause(sec_ints_base,
1937 			    AL_INT_GROUP_B);
1938 
1939 			device_printf_dbg(adapter->dev,
1940 			    "secondary B cause %x\n", cause_d);
1941 		}
1942 	}
1943 	if ((reg & AL_INT_GROUP_A_GROUP_B_SUM) != 0 ) {
1944 		uint32_t cause_b = al_udma_iofic_read_cause(regs_base,
1945 		    AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_B);
1946 		int qid;
1947 		device_printf_dbg(adapter->dev, "secondary B cause %x\n",
1948 		    cause_b);
1949 		for (qid = 0; qid < adapter->num_rx_queues; qid++) {
1950 			if (cause_b & (1 << qid)) {
1951 				/* mask */
1952 				al_udma_iofic_mask(
1953 				    (struct unit_regs __iomem *)adapter->udma_base,
1954 				    AL_UDMA_IOFIC_LEVEL_PRIMARY,
1955 				    AL_INT_GROUP_B, 1 << qid);
1956 			}
1957 		}
1958 	}
1959 	if ((reg & AL_INT_GROUP_A_GROUP_C_SUM) != 0) {
1960 		uint32_t cause_c = al_udma_iofic_read_cause(regs_base,
1961 		    AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_C);
1962 		int qid;
1963 		device_printf_dbg(adapter->dev, "secondary C cause %x\n", cause_c);
1964 		for (qid = 0; qid < adapter->num_tx_queues; qid++) {
1965 			if ((cause_c & (1 << qid)) != 0) {
1966 				al_udma_iofic_mask(
1967 				    (struct unit_regs __iomem *)adapter->udma_base,
1968 				    AL_UDMA_IOFIC_LEVEL_PRIMARY,
1969 				    AL_INT_GROUP_C, 1 << qid);
1970 			}
1971 		}
1972 	}
1973 
1974 	al_eth_tx_cmlp_irq_filter(adapter->tx_ring);
1975 
1976 	return (0);
1977 }
1978 
1979 static int
1980 al_eth_intr_msix_all(void *data)
1981 {
1982 	struct al_eth_adapter *adapter = data;
1983 
1984 	device_printf_dbg(adapter->dev, "%s\n", __func__);
1985 	return (0);
1986 }
1987 
1988 static int
1989 al_eth_intr_msix_mgmt(void *data)
1990 {
1991 	struct al_eth_adapter *adapter = data;
1992 
1993 	device_printf_dbg(adapter->dev, "%s\n", __func__);
1994 	return (0);
1995 }
1996 
1997 static int
1998 al_eth_enable_msix(struct al_eth_adapter *adapter)
1999 {
2000 	int i, msix_vecs, rc, count;
2001 
2002 	device_printf_dbg(adapter->dev, "%s\n", __func__);
2003 	msix_vecs = 1 + adapter->num_rx_queues + adapter->num_tx_queues;
2004 
2005 	device_printf_dbg(adapter->dev,
2006 	    "Try to enable MSIX, vector numbers = %d\n", msix_vecs);
2007 
2008 	adapter->msix_entries = malloc(msix_vecs*sizeof(*adapter->msix_entries),
2009 	    M_IFAL, M_ZERO | M_WAITOK);
2010 
2011 	if (adapter->msix_entries == NULL) {
2012 		device_printf_dbg(adapter->dev, "failed to allocate"
2013 		    " msix_entries %d\n", msix_vecs);
2014 		rc = ENOMEM;
2015 		goto exit;
2016 	}
2017 
2018 	/* management vector (GROUP_A) @2*/
2019 	adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].entry = 2;
2020 	adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector = 0;
2021 
2022 	/* rx queues start @3 */
2023 	for (i = 0; i < adapter->num_rx_queues; i++) {
2024 		int irq_idx = AL_ETH_RXQ_IRQ_IDX(adapter, i);
2025 
2026 		adapter->msix_entries[irq_idx].entry = 3 + i;
2027 		adapter->msix_entries[irq_idx].vector = 0;
2028 	}
2029 	/* tx queues start @7 */
2030 	for (i = 0; i < adapter->num_tx_queues; i++) {
2031 		int irq_idx = AL_ETH_TXQ_IRQ_IDX(adapter, i);
2032 
2033 		adapter->msix_entries[irq_idx].entry = 3 +
2034 		    AL_ETH_MAX_HW_QUEUES + i;
2035 		adapter->msix_entries[irq_idx].vector = 0;
2036 	}
2037 
2038 	count = msix_vecs + 2; /* entries start from 2 */
2039 	rc = pci_alloc_msix(adapter->dev, &count);
2040 
2041 	if (rc != 0) {
2042 		device_printf_dbg(adapter->dev, "failed to allocate MSIX "
2043 		    "vectors %d\n", msix_vecs+2);
2044 		device_printf_dbg(adapter->dev, "ret = %d\n", rc);
2045 		goto msix_entries_exit;
2046 	}
2047 
2048 	if (count != msix_vecs + 2) {
2049 		device_printf_dbg(adapter->dev, "failed to allocate all MSIX "
2050 		    "vectors %d, allocated %d\n", msix_vecs+2, count);
2051 		rc = ENOSPC;
2052 		goto msix_entries_exit;
2053 	}
2054 
2055 	for (i = 0; i < msix_vecs; i++)
2056 	    adapter->msix_entries[i].vector = 2 + 1 + i;
2057 
2058 	device_printf_dbg(adapter->dev, "successfully enabled MSIX,"
2059 	    " vectors %d\n", msix_vecs);
2060 
2061 	adapter->msix_vecs = msix_vecs;
2062 	adapter->flags |= AL_ETH_FLAG_MSIX_ENABLED;
2063 	goto exit;
2064 
2065 msix_entries_exit:
2066 	adapter->msix_vecs = 0;
2067 	free(adapter->msix_entries, M_IFAL);
2068 	adapter->msix_entries = NULL;
2069 
2070 exit:
2071 	return (rc);
2072 }
2073 
2074 static int
2075 al_eth_setup_int_mode(struct al_eth_adapter *adapter)
2076 {
2077 	int i, rc;
2078 
2079 	rc = al_eth_enable_msix(adapter);
2080 	if (rc != 0) {
2081 		device_printf(adapter->dev, "Failed to enable MSIX mode.\n");
2082 		return (rc);
2083 	}
2084 
2085 	adapter->irq_vecs = max(1, adapter->msix_vecs);
2086 	/* single INTX mode */
2087 	if (adapter->msix_vecs == 0) {
2088 		snprintf(adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].name,
2089 		    AL_ETH_IRQNAME_SIZE, "al-eth-intx-all@pci:%s",
2090 		    device_get_name(adapter->dev));
2091 		adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].handler =
2092 		    al_eth_intr_intx_all;
2093 		/* IRQ vector will be resolved from device resources */
2094 		adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector = 0;
2095 		adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].data = adapter;
2096 
2097 		device_printf(adapter->dev, "%s and vector %d \n", __func__,
2098 		    adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector);
2099 
2100 		return (0);
2101 	}
2102 	/* single MSI-X mode */
2103 	if (adapter->msix_vecs == 1) {
2104 		snprintf(adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].name,
2105 		    AL_ETH_IRQNAME_SIZE, "al-eth-msix-all@pci:%s",
2106 		    device_get_name(adapter->dev));
2107 		adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].handler =
2108 		    al_eth_intr_msix_all;
2109 		adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector =
2110 		    adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector;
2111 		adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].data = adapter;
2112 
2113 		return (0);
2114 	}
2115 	/* MSI-X per queue */
2116 	snprintf(adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].name, AL_ETH_IRQNAME_SIZE,
2117 	    "al-eth-msix-mgmt@pci:%s", device_get_name(adapter->dev));
2118 	adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].handler = al_eth_intr_msix_mgmt;
2119 
2120 	adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].data = adapter;
2121 	adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector =
2122 	    adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector;
2123 
2124 	for (i = 0; i < adapter->num_rx_queues; i++) {
2125 		int irq_idx = AL_ETH_RXQ_IRQ_IDX(adapter, i);
2126 
2127 		snprintf(adapter->irq_tbl[irq_idx].name, AL_ETH_IRQNAME_SIZE,
2128 		    "al-eth-rx-comp-%d@pci:%s", i,
2129 		    device_get_name(adapter->dev));
2130 		adapter->irq_tbl[irq_idx].handler = al_eth_rx_recv_irq_filter;
2131 		adapter->irq_tbl[irq_idx].data = &adapter->rx_ring[i];
2132 		adapter->irq_tbl[irq_idx].vector =
2133 		    adapter->msix_entries[irq_idx].vector;
2134 	}
2135 
2136 	for (i = 0; i < adapter->num_tx_queues; i++) {
2137 		int irq_idx = AL_ETH_TXQ_IRQ_IDX(adapter, i);
2138 
2139 		snprintf(adapter->irq_tbl[irq_idx].name,
2140 		    AL_ETH_IRQNAME_SIZE, "al-eth-tx-comp-%d@pci:%s", i,
2141 		    device_get_name(adapter->dev));
2142 		adapter->irq_tbl[irq_idx].handler = al_eth_tx_cmlp_irq_filter;
2143 		adapter->irq_tbl[irq_idx].data = &adapter->tx_ring[i];
2144 		adapter->irq_tbl[irq_idx].vector =
2145 		    adapter->msix_entries[irq_idx].vector;
2146 	}
2147 
2148 	return (0);
2149 }
2150 
2151 static void
2152 __al_eth_free_irq(struct al_eth_adapter *adapter)
2153 {
2154 	struct al_eth_irq *irq;
2155 	int i, rc;
2156 
2157 	for (i = 0; i < adapter->irq_vecs; i++) {
2158 		irq = &adapter->irq_tbl[i];
2159 		if (irq->requested != 0) {
2160 			device_printf_dbg(adapter->dev, "tear down irq: %d\n",
2161 			    irq->vector);
2162 			rc = bus_teardown_intr(adapter->dev, irq->res,
2163 			    irq->cookie);
2164 			if (rc != 0)
2165 				device_printf(adapter->dev, "failed to tear "
2166 				    "down irq: %d\n", irq->vector);
2167 		}
2168 		irq->requested = 0;
2169 	}
2170 }
2171 
2172 static void
2173 al_eth_free_irq(struct al_eth_adapter *adapter)
2174 {
2175 	struct al_eth_irq *irq;
2176 	int i, rc;
2177 #ifdef CONFIG_RFS_ACCEL
2178 	if (adapter->msix_vecs >= 1) {
2179 		free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
2180 		adapter->netdev->rx_cpu_rmap = NULL;
2181 	}
2182 #endif
2183 
2184 	__al_eth_free_irq(adapter);
2185 
2186 	for (i = 0; i < adapter->irq_vecs; i++) {
2187 		irq = &adapter->irq_tbl[i];
2188 		if (irq->res == NULL)
2189 			continue;
2190 		device_printf_dbg(adapter->dev, "release resource irq: %d\n",
2191 		    irq->vector);
2192 		rc = bus_release_resource(adapter->dev, SYS_RES_IRQ, irq->vector,
2193 		    irq->res);
2194 		irq->res = NULL;
2195 		if (rc != 0)
2196 			device_printf(adapter->dev, "dev has no parent while "
2197 			    "releasing res for irq: %d\n", irq->vector);
2198 	}
2199 
2200 	pci_release_msi(adapter->dev);
2201 
2202 	adapter->flags &= ~AL_ETH_FLAG_MSIX_ENABLED;
2203 
2204 	adapter->msix_vecs = 0;
2205 	free(adapter->msix_entries, M_IFAL);
2206 	adapter->msix_entries = NULL;
2207 }
2208 
2209 static int
2210 al_eth_request_irq(struct al_eth_adapter *adapter)
2211 {
2212 	unsigned long flags;
2213 	struct al_eth_irq *irq;
2214 	int rc = 0, i, v;
2215 
2216 	if ((adapter->flags & AL_ETH_FLAG_MSIX_ENABLED) != 0)
2217 		flags = RF_ACTIVE;
2218 	else
2219 		flags = RF_ACTIVE | RF_SHAREABLE;
2220 
2221 	for (i = 0; i < adapter->irq_vecs; i++) {
2222 		irq = &adapter->irq_tbl[i];
2223 
2224 		if (irq->requested != 0)
2225 			continue;
2226 
2227 		irq->res = bus_alloc_resource_any(adapter->dev, SYS_RES_IRQ,
2228 		    &irq->vector, flags);
2229 		if (irq->res == NULL) {
2230 			device_printf(adapter->dev, "could not allocate "
2231 			    "irq vector=%d\n", irq->vector);
2232 			rc = ENXIO;
2233 			goto exit_res;
2234 		}
2235 
2236 		if ((rc = bus_setup_intr(adapter->dev, irq->res,
2237 		    INTR_TYPE_NET | INTR_MPSAFE, irq->handler,
2238 		    NULL, irq->data, &irq->cookie)) != 0) {
2239 			device_printf(adapter->dev, "failed to register "
2240 			    "interrupt handler for irq %ju: %d\n",
2241 			    (uintmax_t)rman_get_start(irq->res), rc);
2242 			goto exit_intr;
2243 		}
2244 		irq->requested = 1;
2245 	}
2246 	goto exit;
2247 
2248 exit_intr:
2249 	v = i - 1; /* -1 because we omit the operation that failed */
2250 	while (v-- >= 0) {
2251 		int bti;
2252 		irq = &adapter->irq_tbl[v];
2253 		bti = bus_teardown_intr(adapter->dev, irq->res, irq->cookie);
2254 		if (bti != 0) {
2255 			device_printf(adapter->dev, "failed to tear "
2256 			    "down irq: %d\n", irq->vector);
2257 		}
2258 
2259 		irq->requested = 0;
2260 		device_printf_dbg(adapter->dev, "exit_intr: releasing irq %d\n",
2261 		    irq->vector);
2262 	}
2263 
2264 exit_res:
2265 	v = i - 1; /* -1 because we omit the operation that failed */
2266 	while (v-- >= 0) {
2267 		int brr;
2268 		irq = &adapter->irq_tbl[v];
2269 		device_printf_dbg(adapter->dev, "exit_res: releasing resource"
2270 		    " for irq %d\n", irq->vector);
2271 		brr = bus_release_resource(adapter->dev, SYS_RES_IRQ,
2272 		    irq->vector, irq->res);
2273 		if (brr != 0)
2274 			device_printf(adapter->dev, "dev has no parent while "
2275 			    "releasing res for irq: %d\n", irq->vector);
2276 		irq->res = NULL;
2277 	}
2278 
2279 exit:
2280 	return (rc);
2281 }
2282 
2283 /**
2284  * al_eth_setup_tx_resources - allocate Tx resources (Descriptors)
2285  * @adapter: network interface device structure
2286  * @qid: queue index
2287  *
2288  * Return 0 on success, negative on failure
2289  **/
2290 static int
2291 al_eth_setup_tx_resources(struct al_eth_adapter *adapter, int qid)
2292 {
2293 	struct al_eth_ring *tx_ring = &adapter->tx_ring[qid];
2294 	device_t dev = tx_ring->dev;
2295 	struct al_udma_q_params *q_params = &tx_ring->q_params;
2296 	int size;
2297 	int ret;
2298 
2299 	if (adapter->up)
2300 		return (0);
2301 
2302 	size = sizeof(struct al_eth_tx_buffer) * tx_ring->sw_count;
2303 
2304 	tx_ring->tx_buffer_info = malloc(size, M_IFAL, M_ZERO | M_WAITOK);
2305 	if (tx_ring->tx_buffer_info == NULL)
2306 		return (ENOMEM);
2307 
2308 	tx_ring->descs_size = tx_ring->hw_count * sizeof(union al_udma_desc);
2309 	q_params->size = tx_ring->hw_count;
2310 
2311 	ret = al_dma_alloc_coherent(dev, &q_params->desc_phy_base_tag,
2312 	    (bus_dmamap_t *)&q_params->desc_phy_base_map,
2313 	    (bus_addr_t *)&q_params->desc_phy_base,
2314 	    (void**)&q_params->desc_base, tx_ring->descs_size);
2315 	if (ret != 0) {
2316 		device_printf(dev, "failed to al_dma_alloc_coherent,"
2317 		    " ret = %d\n", ret);
2318 		return (ENOMEM);
2319 	}
2320 
2321 	if (q_params->desc_base == NULL)
2322 		return (ENOMEM);
2323 
2324 	device_printf_dbg(dev, "Initializing ring queues %d\n", qid);
2325 
2326 	/* Allocate Ring Queue */
2327 	mtx_init(&tx_ring->br_mtx, "AlRingMtx", NULL, MTX_DEF);
2328 	tx_ring->br = buf_ring_alloc(AL_BR_SIZE, M_DEVBUF, M_WAITOK,
2329 	    &tx_ring->br_mtx);
2330 	if (tx_ring->br == NULL) {
2331 		device_printf(dev, "Critical Failure setting up buf ring\n");
2332 		return (ENOMEM);
2333 	}
2334 
2335 	/* Allocate taskqueues */
2336 	TASK_INIT(&tx_ring->enqueue_task, 0, al_eth_start_xmit, tx_ring);
2337 	tx_ring->enqueue_tq = taskqueue_create_fast("al_tx_enque", M_NOWAIT,
2338 	    taskqueue_thread_enqueue, &tx_ring->enqueue_tq);
2339 	taskqueue_start_threads(&tx_ring->enqueue_tq, 1, PI_NET, "%s txeq",
2340 	    device_get_nameunit(adapter->dev));
2341 	TASK_INIT(&tx_ring->cmpl_task, 0, al_eth_tx_cmpl_work, tx_ring);
2342 	tx_ring->cmpl_tq = taskqueue_create_fast("al_tx_cmpl", M_NOWAIT,
2343 	    taskqueue_thread_enqueue, &tx_ring->cmpl_tq);
2344 	taskqueue_start_threads(&tx_ring->cmpl_tq, 1, PI_REALTIME, "%s txcq",
2345 	    device_get_nameunit(adapter->dev));
2346 
2347 	/* Setup DMA descriptor areas. */
2348 	ret = bus_dma_tag_create(bus_get_dma_tag(dev),
2349 	    1, 0,			/* alignment, bounds */
2350 	    BUS_SPACE_MAXADDR,		/* lowaddr */
2351 	    BUS_SPACE_MAXADDR,		/* highaddr */
2352 	    NULL, NULL,			/* filter, filterarg */
2353 	    AL_TSO_SIZE,		/* maxsize */
2354 	    AL_ETH_PKT_MAX_BUFS,	/* nsegments */
2355 	    PAGE_SIZE,			/* maxsegsize */
2356 	    0,				/* flags */
2357 	    NULL,			/* lockfunc */
2358 	    NULL,			/* lockfuncarg */
2359 	    &tx_ring->dma_buf_tag);
2360 
2361 	if (ret != 0) {
2362 		device_printf(dev,"Unable to allocate dma_buf_tag, ret = %d\n",
2363 		    ret);
2364 		return (ret);
2365 	}
2366 
2367 	for (size = 0; size < tx_ring->sw_count; size++) {
2368 		ret = bus_dmamap_create(tx_ring->dma_buf_tag, 0,
2369 		    &tx_ring->tx_buffer_info[size].dma_map);
2370 		if (ret != 0) {
2371 			device_printf(dev, "Unable to map DMA TX "
2372 			    "buffer memory [iter=%d]\n", size);
2373 			return (ret);
2374 		}
2375 	}
2376 
2377 	/* completion queue not used for tx */
2378 	q_params->cdesc_base = NULL;
2379 	/* size in bytes of the udma completion ring descriptor */
2380 	q_params->cdesc_size = 8;
2381 	tx_ring->next_to_use = 0;
2382 	tx_ring->next_to_clean = 0;
2383 
2384 	return (0);
2385 }
2386 
2387 /*
2388  * al_eth_free_tx_resources - Free Tx Resources per Queue
2389  * @adapter: network interface device structure
2390  * @qid: queue index
2391  *
2392  * Free all transmit software resources
2393  */
2394 static void
2395 al_eth_free_tx_resources(struct al_eth_adapter *adapter, int qid)
2396 {
2397 	struct al_eth_ring *tx_ring = &adapter->tx_ring[qid];
2398 	struct al_udma_q_params *q_params = &tx_ring->q_params;
2399 	int size;
2400 
2401 	/* At this point interrupts' handlers must be deactivated */
2402 	while (taskqueue_cancel(tx_ring->cmpl_tq, &tx_ring->cmpl_task, NULL))
2403 		taskqueue_drain(tx_ring->cmpl_tq, &tx_ring->cmpl_task);
2404 
2405 	taskqueue_free(tx_ring->cmpl_tq);
2406 	while (taskqueue_cancel(tx_ring->enqueue_tq,
2407 	    &tx_ring->enqueue_task, NULL)) {
2408 		taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
2409 	}
2410 
2411 	taskqueue_free(tx_ring->enqueue_tq);
2412 
2413 	if (tx_ring->br != NULL) {
2414 		drbr_flush(adapter->netdev, tx_ring->br);
2415 		buf_ring_free(tx_ring->br, M_DEVBUF);
2416 	}
2417 
2418 	for (size = 0; size < tx_ring->sw_count; size++) {
2419 		m_freem(tx_ring->tx_buffer_info[size].m);
2420 		tx_ring->tx_buffer_info[size].m = NULL;
2421 
2422 		bus_dmamap_unload(tx_ring->dma_buf_tag,
2423 		    tx_ring->tx_buffer_info[size].dma_map);
2424 		bus_dmamap_destroy(tx_ring->dma_buf_tag,
2425 		    tx_ring->tx_buffer_info[size].dma_map);
2426 	}
2427 	bus_dma_tag_destroy(tx_ring->dma_buf_tag);
2428 
2429 	free(tx_ring->tx_buffer_info, M_IFAL);
2430 	tx_ring->tx_buffer_info = NULL;
2431 
2432 	mtx_destroy(&tx_ring->br_mtx);
2433 
2434 	/* if not set, then don't free */
2435 	if (q_params->desc_base == NULL)
2436 		return;
2437 
2438 	al_dma_free_coherent(q_params->desc_phy_base_tag,
2439 	    q_params->desc_phy_base_map, q_params->desc_base);
2440 
2441 	q_params->desc_base = NULL;
2442 }
2443 
2444 /*
2445  * al_eth_free_all_tx_resources - Free Tx Resources for All Queues
2446  * @adapter: board private structure
2447  *
2448  * Free all transmit software resources
2449  */
2450 static void
2451 al_eth_free_all_tx_resources(struct al_eth_adapter *adapter)
2452 {
2453 	int i;
2454 
2455 	for (i = 0; i < adapter->num_tx_queues; i++)
2456 		if (adapter->tx_ring[i].q_params.desc_base)
2457 			al_eth_free_tx_resources(adapter, i);
2458 }
2459 
2460 /*
2461  * al_eth_setup_rx_resources - allocate Rx resources (Descriptors)
2462  * @adapter: network interface device structure
2463  * @qid: queue index
2464  *
2465  * Returns 0 on success, negative on failure
2466  */
2467 static int
2468 al_eth_setup_rx_resources(struct al_eth_adapter *adapter, unsigned int qid)
2469 {
2470 	struct al_eth_ring *rx_ring = &adapter->rx_ring[qid];
2471 	device_t dev = rx_ring->dev;
2472 	struct al_udma_q_params *q_params = &rx_ring->q_params;
2473 	int size;
2474 	int ret;
2475 
2476 	size = sizeof(struct al_eth_rx_buffer) * rx_ring->sw_count;
2477 
2478 	/* alloc extra element so in rx path we can always prefetch rx_info + 1 */
2479 	size += 1;
2480 
2481 	rx_ring->rx_buffer_info = malloc(size, M_IFAL, M_ZERO | M_WAITOK);
2482 	if (rx_ring->rx_buffer_info == NULL)
2483 		return (ENOMEM);
2484 
2485 	rx_ring->descs_size = rx_ring->hw_count * sizeof(union al_udma_desc);
2486 	q_params->size = rx_ring->hw_count;
2487 
2488 	ret = al_dma_alloc_coherent(dev, &q_params->desc_phy_base_tag,
2489 	    &q_params->desc_phy_base_map,
2490 	    (bus_addr_t *)&q_params->desc_phy_base,
2491 	    (void**)&q_params->desc_base, rx_ring->descs_size);
2492 
2493 	if ((q_params->desc_base == NULL) || (ret != 0))
2494 		return (ENOMEM);
2495 
2496 	/* size in bytes of the udma completion ring descriptor */
2497 	q_params->cdesc_size = 16;
2498 	rx_ring->cdescs_size = rx_ring->hw_count * q_params->cdesc_size;
2499 	ret = al_dma_alloc_coherent(dev, &q_params->cdesc_phy_base_tag,
2500 	    &q_params->cdesc_phy_base_map,
2501 	    (bus_addr_t *)&q_params->cdesc_phy_base,
2502 	    (void**)&q_params->cdesc_base, rx_ring->cdescs_size);
2503 
2504 	if ((q_params->cdesc_base == NULL) || (ret != 0))
2505 		return (ENOMEM);
2506 
2507 	/* Allocate taskqueues */
2508 	NET_TASK_INIT(&rx_ring->enqueue_task, 0, al_eth_rx_recv_work, rx_ring);
2509 	rx_ring->enqueue_tq = taskqueue_create_fast("al_rx_enque", M_NOWAIT,
2510 	    taskqueue_thread_enqueue, &rx_ring->enqueue_tq);
2511 	taskqueue_start_threads(&rx_ring->enqueue_tq, 1, PI_NET, "%s rxeq",
2512 	    device_get_nameunit(adapter->dev));
2513 
2514 	/* Setup DMA descriptor areas. */
2515 	ret = bus_dma_tag_create(bus_get_dma_tag(dev),
2516 	    1, 0,			/* alignment, bounds */
2517 	    BUS_SPACE_MAXADDR,		/* lowaddr */
2518 	    BUS_SPACE_MAXADDR,		/* highaddr */
2519 	    NULL, NULL,			/* filter, filterarg */
2520 	    AL_TSO_SIZE,		/* maxsize */
2521 	    1,				/* nsegments */
2522 	    AL_TSO_SIZE,		/* maxsegsize */
2523 	    0,				/* flags */
2524 	    NULL,			/* lockfunc */
2525 	    NULL,			/* lockfuncarg */
2526 	    &rx_ring->dma_buf_tag);
2527 
2528 	if (ret != 0) {
2529 		device_printf(dev,"Unable to allocate RX dma_buf_tag\n");
2530 		return (ret);
2531 	}
2532 
2533 	for (size = 0; size < rx_ring->sw_count; size++) {
2534 		ret = bus_dmamap_create(rx_ring->dma_buf_tag, 0,
2535 		    &rx_ring->rx_buffer_info[size].dma_map);
2536 		if (ret != 0) {
2537 			device_printf(dev,"Unable to map DMA RX buffer memory\n");
2538 			return (ret);
2539 		}
2540 	}
2541 
2542 	/* Zero out the descriptor ring */
2543 	memset(q_params->cdesc_base, 0, rx_ring->cdescs_size);
2544 
2545 	/* Create LRO for the ring */
2546 	if ((if_getcapenable(adapter->netdev) & IFCAP_LRO) != 0) {
2547 		int err = tcp_lro_init(&rx_ring->lro);
2548 		if (err != 0) {
2549 			device_printf(adapter->dev,
2550 			    "LRO[%d] Initialization failed!\n", qid);
2551 		} else {
2552 			device_printf_dbg(adapter->dev,
2553 			    "RX Soft LRO[%d] Initialized\n", qid);
2554 			rx_ring->lro_enabled = true;
2555 			rx_ring->lro.ifp = adapter->netdev;
2556 		}
2557 	}
2558 
2559 	rx_ring->next_to_clean = 0;
2560 	rx_ring->next_to_use = 0;
2561 
2562 	return (0);
2563 }
2564 
2565 /*
2566  * al_eth_free_rx_resources - Free Rx Resources
2567  * @adapter: network interface device structure
2568  * @qid: queue index
2569  *
2570  * Free all receive software resources
2571  */
2572 static void
2573 al_eth_free_rx_resources(struct al_eth_adapter *adapter, unsigned int qid)
2574 {
2575 	struct al_eth_ring *rx_ring = &adapter->rx_ring[qid];
2576 	struct al_udma_q_params *q_params = &rx_ring->q_params;
2577 	int size;
2578 
2579 	/* At this point interrupts' handlers must be deactivated */
2580 	while (taskqueue_cancel(rx_ring->enqueue_tq,
2581 	    &rx_ring->enqueue_task, NULL)) {
2582 		taskqueue_drain(rx_ring->enqueue_tq, &rx_ring->enqueue_task);
2583 	}
2584 
2585 	taskqueue_free(rx_ring->enqueue_tq);
2586 
2587 	for (size = 0; size < rx_ring->sw_count; size++) {
2588 		m_freem(rx_ring->rx_buffer_info[size].m);
2589 		rx_ring->rx_buffer_info[size].m = NULL;
2590 		bus_dmamap_unload(rx_ring->dma_buf_tag,
2591 		    rx_ring->rx_buffer_info[size].dma_map);
2592 		bus_dmamap_destroy(rx_ring->dma_buf_tag,
2593 		    rx_ring->rx_buffer_info[size].dma_map);
2594 	}
2595 	bus_dma_tag_destroy(rx_ring->dma_buf_tag);
2596 
2597 	free(rx_ring->rx_buffer_info, M_IFAL);
2598 	rx_ring->rx_buffer_info = NULL;
2599 
2600 	/* if not set, then don't free */
2601 	if (q_params->desc_base == NULL)
2602 		return;
2603 
2604 	al_dma_free_coherent(q_params->desc_phy_base_tag,
2605 	    q_params->desc_phy_base_map, q_params->desc_base);
2606 
2607 	q_params->desc_base = NULL;
2608 
2609 	/* if not set, then don't free */
2610 	if (q_params->cdesc_base == NULL)
2611 		return;
2612 
2613 	al_dma_free_coherent(q_params->cdesc_phy_base_tag,
2614 	    q_params->cdesc_phy_base_map, q_params->cdesc_base);
2615 
2616 	q_params->cdesc_phy_base = 0;
2617 
2618 	/* Free LRO resources */
2619 	tcp_lro_free(&rx_ring->lro);
2620 }
2621 
2622 /*
2623  * al_eth_free_all_rx_resources - Free Rx Resources for All Queues
2624  * @adapter: board private structure
2625  *
2626  * Free all receive software resources
2627  */
2628 static void
2629 al_eth_free_all_rx_resources(struct al_eth_adapter *adapter)
2630 {
2631 	int i;
2632 
2633 	for (i = 0; i < adapter->num_rx_queues; i++)
2634 		if (adapter->rx_ring[i].q_params.desc_base != 0)
2635 			al_eth_free_rx_resources(adapter, i);
2636 }
2637 
2638 /*
2639  * al_eth_setup_all_rx_resources - allocate all queues Rx resources
2640  * @adapter: board private structure
2641  *
2642  * Return 0 on success, negative on failure
2643  */
2644 static int
2645 al_eth_setup_all_rx_resources(struct al_eth_adapter *adapter)
2646 {
2647 	int i, rc = 0;
2648 
2649 	for (i = 0; i < adapter->num_rx_queues; i++) {
2650 		rc = al_eth_setup_rx_resources(adapter, i);
2651 		if (rc == 0)
2652 			continue;
2653 
2654 		device_printf(adapter->dev, "Allocation for Rx Queue %u failed\n", i);
2655 		goto err_setup_rx;
2656 	}
2657 	return (0);
2658 
2659 err_setup_rx:
2660 	/* rewind the index freeing the rings as we go */
2661 	while (i--)
2662 		al_eth_free_rx_resources(adapter, i);
2663 	return (rc);
2664 }
2665 
2666 /*
2667  * al_eth_setup_all_tx_resources - allocate all queues Tx resources
2668  * @adapter: private structure
2669  *
2670  * Return 0 on success, negative on failure
2671  */
2672 static int
2673 al_eth_setup_all_tx_resources(struct al_eth_adapter *adapter)
2674 {
2675 	int i, rc = 0;
2676 
2677 	for (i = 0; i < adapter->num_tx_queues; i++) {
2678 		rc = al_eth_setup_tx_resources(adapter, i);
2679 		if (rc == 0)
2680 			continue;
2681 
2682 		device_printf(adapter->dev,
2683 		    "Allocation for Tx Queue %u failed\n", i);
2684 		goto err_setup_tx;
2685 	}
2686 
2687 	return (0);
2688 
2689 err_setup_tx:
2690 	/* rewind the index freeing the rings as we go */
2691 	while (i--)
2692 		al_eth_free_tx_resources(adapter, i);
2693 
2694 	return (rc);
2695 }
2696 
2697 static void
2698 al_eth_disable_int_sync(struct al_eth_adapter *adapter)
2699 {
2700 
2701 	/* disable forwarding interrupts from eth through pci end point */
2702 	if ((adapter->board_type == ALPINE_FPGA_NIC) ||
2703 	    (adapter->board_type == ALPINE_NIC)) {
2704 		al_eth_forward_int_config((uint32_t*)adapter->internal_pcie_base +
2705 		    AL_REG_OFFSET_FORWARD_INTR, AL_DIS_FORWARD_INTR);
2706 	}
2707 
2708 	/* mask hw interrupts */
2709 	al_eth_interrupts_mask(adapter);
2710 }
2711 
2712 static void
2713 al_eth_interrupts_unmask(struct al_eth_adapter *adapter)
2714 {
2715 	uint32_t group_a_mask = AL_INT_GROUP_A_GROUP_D_SUM; /* enable group D summery */
2716 	uint32_t group_b_mask = (1 << adapter->num_rx_queues) - 1;/* bit per Rx q*/
2717 	uint32_t group_c_mask = (1 << adapter->num_tx_queues) - 1;/* bit per Tx q*/
2718 	uint32_t group_d_mask = 3 << 8;
2719 	struct unit_regs __iomem *regs_base =
2720 	    (struct unit_regs __iomem *)adapter->udma_base;
2721 
2722 	if (adapter->int_mode == AL_IOFIC_MODE_LEGACY)
2723 		group_a_mask |= AL_INT_GROUP_A_GROUP_B_SUM |
2724 		    AL_INT_GROUP_A_GROUP_C_SUM |
2725 		    AL_INT_GROUP_A_GROUP_D_SUM;
2726 
2727 	al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2728 	    AL_INT_GROUP_A, group_a_mask);
2729 	al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2730 	    AL_INT_GROUP_B, group_b_mask);
2731 	al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2732 	    AL_INT_GROUP_C, group_c_mask);
2733 	al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2734 	    AL_INT_GROUP_D, group_d_mask);
2735 }
2736 
2737 static void
2738 al_eth_interrupts_mask(struct al_eth_adapter *adapter)
2739 {
2740 	struct unit_regs __iomem *regs_base =
2741 	    (struct unit_regs __iomem *)adapter->udma_base;
2742 
2743 	/* mask all interrupts */
2744 	al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2745 	    AL_INT_GROUP_A, AL_MASK_GROUP_A_INT);
2746 	al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2747 	    AL_INT_GROUP_B, AL_MASK_GROUP_B_INT);
2748 	al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2749 	    AL_INT_GROUP_C, AL_MASK_GROUP_C_INT);
2750 	al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2751 	    AL_INT_GROUP_D, AL_MASK_GROUP_D_INT);
2752 }
2753 
2754 static int
2755 al_eth_configure_int_mode(struct al_eth_adapter *adapter)
2756 {
2757 	enum al_iofic_mode int_mode;
2758 	uint32_t m2s_errors_disable = AL_M2S_MASK_INIT;
2759 	uint32_t m2s_aborts_disable = AL_M2S_MASK_INIT;
2760 	uint32_t s2m_errors_disable = AL_S2M_MASK_INIT;
2761 	uint32_t s2m_aborts_disable = AL_S2M_MASK_INIT;
2762 
2763 	/* single INTX mode */
2764 	if (adapter->msix_vecs == 0)
2765 		int_mode = AL_IOFIC_MODE_LEGACY;
2766 	else if (adapter->msix_vecs > 1)
2767 		int_mode = AL_IOFIC_MODE_MSIX_PER_Q;
2768 	else {
2769 		device_printf(adapter->dev,
2770 		    "udma doesn't support single MSI-X mode yet.\n");
2771 		return (EIO);
2772 	}
2773 
2774 	if (adapter->board_type != ALPINE_INTEGRATED) {
2775 		m2s_errors_disable |= AL_M2S_S2M_MASK_NOT_INT;
2776 		m2s_errors_disable |= AL_M2S_S2M_MASK_NOT_INT;
2777 		s2m_aborts_disable |= AL_M2S_S2M_MASK_NOT_INT;
2778 		s2m_aborts_disable |= AL_M2S_S2M_MASK_NOT_INT;
2779 	}
2780 
2781 	if (al_udma_iofic_config((struct unit_regs __iomem *)adapter->udma_base,
2782 	    int_mode, m2s_errors_disable, m2s_aborts_disable,
2783 	    s2m_errors_disable, s2m_aborts_disable)) {
2784 		device_printf(adapter->dev,
2785 		    "al_udma_unit_int_config failed!.\n");
2786 		return (EIO);
2787 	}
2788 	adapter->int_mode = int_mode;
2789 	device_printf_dbg(adapter->dev, "using %s interrupt mode\n",
2790 	    int_mode == AL_IOFIC_MODE_LEGACY ? "INTx" :
2791 	    int_mode == AL_IOFIC_MODE_MSIX_PER_Q ? "MSI-X per Queue" : "Unknown");
2792 	/* set interrupt moderation resolution to 15us */
2793 	al_iofic_moder_res_config(&((struct unit_regs *)(adapter->udma_base))->gen.interrupt_regs.main_iofic, AL_INT_GROUP_B, 15);
2794 	al_iofic_moder_res_config(&((struct unit_regs *)(adapter->udma_base))->gen.interrupt_regs.main_iofic, AL_INT_GROUP_C, 15);
2795 	/* by default interrupt coalescing is disabled */
2796 	adapter->tx_usecs = 0;
2797 	adapter->rx_usecs = 0;
2798 
2799 	return (0);
2800 }
2801 
2802 /*
2803  * ethtool_rxfh_indir_default - get default value for RX flow hash indirection
2804  * @index: Index in RX flow hash indirection table
2805  * @n_rx_rings: Number of RX rings to use
2806  *
2807  * This function provides the default policy for RX flow hash indirection.
2808  */
2809 static inline uint32_t
2810 ethtool_rxfh_indir_default(uint32_t index, uint32_t n_rx_rings)
2811 {
2812 
2813 	return (index % n_rx_rings);
2814 }
2815 
2816 static void*
2817 al_eth_update_stats(struct al_eth_adapter *adapter)
2818 {
2819 	struct al_eth_mac_stats *mac_stats = &adapter->mac_stats;
2820 
2821 	if (adapter->up == 0)
2822 		return (NULL);
2823 
2824 	al_eth_mac_stats_get(&adapter->hal_adapter, mac_stats);
2825 
2826 	return (NULL);
2827 }
2828 
2829 static uint64_t
2830 al_get_counter(if_t ifp, ift_counter cnt)
2831 {
2832 	struct al_eth_adapter *adapter;
2833 	struct al_eth_mac_stats *mac_stats;
2834 	uint64_t rv;
2835 
2836 	adapter = if_getsoftc(ifp);
2837 	mac_stats = &adapter->mac_stats;
2838 
2839 	switch (cnt) {
2840 	case IFCOUNTER_IPACKETS:
2841 		return (mac_stats->aFramesReceivedOK); /* including pause frames */
2842 	case IFCOUNTER_OPACKETS:
2843 		return (mac_stats->aFramesTransmittedOK);
2844 	case IFCOUNTER_IBYTES:
2845 		return (mac_stats->aOctetsReceivedOK);
2846 	case IFCOUNTER_OBYTES:
2847 		return (mac_stats->aOctetsTransmittedOK);
2848 	case IFCOUNTER_IMCASTS:
2849 		return (mac_stats->ifInMulticastPkts);
2850 	case IFCOUNTER_OMCASTS:
2851 		return (mac_stats->ifOutMulticastPkts);
2852 	case IFCOUNTER_COLLISIONS:
2853 		return (0);
2854 	case IFCOUNTER_IQDROPS:
2855 		return (mac_stats->etherStatsDropEvents);
2856 	case IFCOUNTER_IERRORS:
2857 		rv = mac_stats->ifInErrors +
2858 		    mac_stats->etherStatsUndersizePkts + /* good but short */
2859 		    mac_stats->etherStatsFragments + /* short and bad*/
2860 		    mac_stats->etherStatsJabbers + /* with crc errors */
2861 		    mac_stats->etherStatsOversizePkts +
2862 		    mac_stats->aFrameCheckSequenceErrors +
2863 		    mac_stats->aAlignmentErrors;
2864 		return (rv);
2865 	case IFCOUNTER_OERRORS:
2866 		return (mac_stats->ifOutErrors);
2867 	default:
2868 		return (if_get_counter_default(ifp, cnt));
2869 	}
2870 }
2871 
2872 static u_int
2873 al_count_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2874 {
2875 	unsigned char *mac;
2876 
2877 	mac = LLADDR(sdl);
2878 	/* default mc address inside mac address */
2879 	if (mac[3] != 0 && mac[4] != 0 && mac[5] != 1)
2880 		return (1);
2881 	else
2882 		return (0);
2883 }
2884 
2885 static u_int
2886 al_program_addr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2887 {
2888 	struct al_eth_adapter *adapter = arg;
2889 
2890 	al_eth_mac_table_unicast_add(adapter,
2891 	    AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + 1 + cnt, 1);
2892 
2893 	return (1);
2894 }
2895 
2896 /*
2897  *  Unicast, Multicast and Promiscuous mode set
2898  *
2899  *  The set_rx_mode entry point is called whenever the unicast or multicast
2900  *  address lists or the network interface flags are updated.  This routine is
2901  *  responsible for configuring the hardware for proper unicast, multicast,
2902  *  promiscuous mode, and all-multi behavior.
2903  */
2904 static void
2905 al_eth_set_rx_mode(struct al_eth_adapter *adapter)
2906 {
2907 	if_t ifp = adapter->netdev;
2908 	int mc, uc;
2909 	uint8_t i;
2910 
2911 	/* XXXGL: why generic count won't work? */
2912 	mc = if_foreach_llmaddr(ifp, al_count_maddr, NULL);
2913 	uc = if_lladdr_count(ifp);
2914 
2915 	if ((if_getflags(ifp) & IFF_PROMISC) != 0) {
2916 		al_eth_mac_table_promiscuous_set(adapter, true);
2917 	} else {
2918 		if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) {
2919 			/* This interface is in all-multicasts mode (used by multicast routers). */
2920 			al_eth_mac_table_all_multicast_add(adapter,
2921 			    AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX, 1);
2922 		} else {
2923 			if (mc == 0) {
2924 				al_eth_mac_table_entry_clear(adapter,
2925 				    AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX);
2926 			} else {
2927 				al_eth_mac_table_all_multicast_add(adapter,
2928 				    AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX, 1);
2929 			}
2930 		}
2931 		if (uc != 0) {
2932 			i = AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + 1;
2933 			if (uc > AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT) {
2934 				/*
2935 				 * In this case there are more addresses then
2936 				 * entries in the mac table - set promiscuous
2937 				 */
2938 				al_eth_mac_table_promiscuous_set(adapter, true);
2939 				return;
2940 			}
2941 
2942 			/* clear the last configuration */
2943 			while (i < (AL_ETH_MAC_TABLE_UNICAST_IDX_BASE +
2944 				    AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT)) {
2945 				al_eth_mac_table_entry_clear(adapter, i);
2946 				i++;
2947 			}
2948 
2949 			/* set new addresses */
2950 			if_foreach_lladdr(ifp, al_program_addr, adapter);
2951 		}
2952 		al_eth_mac_table_promiscuous_set(adapter, false);
2953 	}
2954 }
2955 
2956 static void
2957 al_eth_config_rx_fwd(struct al_eth_adapter *adapter)
2958 {
2959 	struct al_eth_fwd_ctrl_table_entry entry;
2960 	int i;
2961 
2962 	/* let priority be equal to pbits */
2963 	for (i = 0; i < AL_ETH_FWD_PBITS_TABLE_NUM; i++)
2964 		al_eth_fwd_pbits_table_set(&adapter->hal_adapter, i, i);
2965 
2966 	/* map priority to queue index, queue id = priority/2 */
2967 	for (i = 0; i < AL_ETH_FWD_PRIO_TABLE_NUM; i++)
2968 		al_eth_fwd_priority_table_set(&adapter->hal_adapter, i, i >> 1);
2969 
2970 	entry.prio_sel = AL_ETH_CTRL_TABLE_PRIO_SEL_VAL_0;
2971 	entry.queue_sel_1 = AL_ETH_CTRL_TABLE_QUEUE_SEL_1_THASH_TABLE;
2972 	entry.queue_sel_2 = AL_ETH_CTRL_TABLE_QUEUE_SEL_2_NO_PRIO;
2973 	entry.udma_sel = AL_ETH_CTRL_TABLE_UDMA_SEL_MAC_TABLE;
2974 	entry.filter = false;
2975 
2976 	al_eth_ctrl_table_def_set(&adapter->hal_adapter, AL_FALSE, &entry);
2977 
2978 	/*
2979 	 * By default set the mac table to forward all unicast packets to our
2980 	 * MAC address and all broadcast. all the rest will be dropped.
2981 	 */
2982 	al_eth_mac_table_unicast_add(adapter, AL_ETH_MAC_TABLE_UNICAST_IDX_BASE,
2983 	    1);
2984 	al_eth_mac_table_broadcast_add(adapter, AL_ETH_MAC_TABLE_BROADCAST_IDX, 1);
2985 	al_eth_mac_table_promiscuous_set(adapter, false);
2986 
2987 	/* set toeplitz hash keys */
2988 	for (i = 0; i < sizeof(adapter->toeplitz_hash_key); i++)
2989 		*((uint8_t*)adapter->toeplitz_hash_key + i) = (uint8_t)random();
2990 
2991 	for (i = 0; i < AL_ETH_RX_HASH_KEY_NUM; i++)
2992 		al_eth_hash_key_set(&adapter->hal_adapter, i,
2993 		    htonl(adapter->toeplitz_hash_key[i]));
2994 
2995 	for (i = 0; i < AL_ETH_RX_RSS_TABLE_SIZE; i++) {
2996 		adapter->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i,
2997 		    AL_ETH_NUM_QUEUES);
2998 		al_eth_set_thash_table_entry(adapter, i, 0,
2999 		    adapter->rss_ind_tbl[i]);
3000 	}
3001 
3002 	al_eth_fsm_table_init(adapter);
3003 }
3004 
3005 static void
3006 al_eth_req_rx_buff_size(struct al_eth_adapter *adapter, int size)
3007 {
3008 
3009 	/*
3010 	* Determine the correct mbuf pool
3011 	* for doing jumbo frames
3012 	* Try from the smallest up to maximum supported
3013 	*/
3014 	adapter->rx_mbuf_sz = MCLBYTES;
3015 	if (size > 2048) {
3016 		if (adapter->max_rx_buff_alloc_size > 2048)
3017 			adapter->rx_mbuf_sz = MJUMPAGESIZE;
3018 		else
3019 			return;
3020 	}
3021 	if (size > 4096) {
3022 		if (adapter->max_rx_buff_alloc_size > 4096)
3023 			adapter->rx_mbuf_sz = MJUM9BYTES;
3024 		else
3025 			return;
3026 	}
3027 	if (size > 9216) {
3028 		if (adapter->max_rx_buff_alloc_size > 9216)
3029 			adapter->rx_mbuf_sz = MJUM16BYTES;
3030 		else
3031 			return;
3032 	}
3033 }
3034 
3035 static int
3036 al_eth_change_mtu(struct al_eth_adapter *adapter, int new_mtu)
3037 {
3038 	int max_frame = new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
3039 	    ETHER_VLAN_ENCAP_LEN;
3040 
3041 	al_eth_req_rx_buff_size(adapter, new_mtu);
3042 
3043 	device_printf_dbg(adapter->dev, "set MTU to %d\n", new_mtu);
3044 	al_eth_rx_pkt_limit_config(&adapter->hal_adapter,
3045 	    AL_ETH_MIN_FRAME_LEN, max_frame);
3046 
3047 	al_eth_tso_mss_config(&adapter->hal_adapter, 0, new_mtu - 100);
3048 
3049 	return (0);
3050 }
3051 
3052 static int
3053 al_eth_check_mtu(struct al_eth_adapter *adapter, int new_mtu)
3054 {
3055 	int max_frame = new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN;
3056 
3057 	if ((new_mtu < AL_ETH_MIN_FRAME_LEN) ||
3058 	    (max_frame > AL_ETH_MAX_FRAME_LEN)) {
3059 		return (EINVAL);
3060 	}
3061 
3062 	return (0);
3063 }
3064 
3065 static int
3066 al_eth_udma_queue_enable(struct al_eth_adapter *adapter, enum al_udma_type type,
3067     int qid)
3068 {
3069 	int rc = 0;
3070 	char *name = (type == UDMA_TX) ? "Tx" : "Rx";
3071 	struct al_udma_q_params *q_params;
3072 
3073 	if (type == UDMA_TX)
3074 		q_params = &adapter->tx_ring[qid].q_params;
3075 	else
3076 		q_params = &adapter->rx_ring[qid].q_params;
3077 
3078 	rc = al_eth_queue_config(&adapter->hal_adapter, type, qid, q_params);
3079 	if (rc < 0) {
3080 		device_printf(adapter->dev, "config %s queue %u failed\n", name,
3081 		    qid);
3082 		return (rc);
3083 	}
3084 	return (rc);
3085 }
3086 
3087 static int
3088 al_eth_udma_queues_enable_all(struct al_eth_adapter *adapter)
3089 {
3090 	int i;
3091 
3092 	for (i = 0; i < adapter->num_tx_queues; i++)
3093 		al_eth_udma_queue_enable(adapter, UDMA_TX, i);
3094 
3095 	for (i = 0; i < adapter->num_rx_queues; i++)
3096 		al_eth_udma_queue_enable(adapter, UDMA_RX, i);
3097 
3098 	return (0);
3099 }
3100 
3101 static void
3102 al_eth_up_complete(struct al_eth_adapter *adapter)
3103 {
3104 
3105 	al_eth_configure_int_mode(adapter);
3106 	al_eth_config_rx_fwd(adapter);
3107 	al_eth_change_mtu(adapter, if_getmtu(adapter->netdev));
3108 	al_eth_udma_queues_enable_all(adapter);
3109 	al_eth_refill_all_rx_bufs(adapter);
3110 	al_eth_interrupts_unmask(adapter);
3111 
3112 	/* enable forwarding interrupts from eth through pci end point */
3113 	if ((adapter->board_type == ALPINE_FPGA_NIC) ||
3114 	    (adapter->board_type == ALPINE_NIC)) {
3115 		al_eth_forward_int_config((uint32_t*)adapter->internal_pcie_base +
3116 		    AL_REG_OFFSET_FORWARD_INTR, AL_EN_FORWARD_INTR);
3117 	}
3118 
3119 	al_eth_flow_ctrl_enable(adapter);
3120 
3121 	mtx_lock(&adapter->stats_mtx);
3122 	callout_reset(&adapter->stats_callout, hz, al_tick_stats, (void*)adapter);
3123 	mtx_unlock(&adapter->stats_mtx);
3124 
3125 	al_eth_mac_start(&adapter->hal_adapter);
3126 }
3127 
3128 static int
3129 al_media_update(if_t ifp)
3130 {
3131 	struct al_eth_adapter *adapter = if_getsoftc(ifp);
3132 
3133 	if ((if_getflags(ifp) & IFF_UP) != 0)
3134 		mii_mediachg(adapter->mii);
3135 
3136 	return (0);
3137 }
3138 
3139 static void
3140 al_media_status(if_t ifp, struct ifmediareq *ifmr)
3141 {
3142 	struct al_eth_adapter *sc = if_getsoftc(ifp);
3143 	struct mii_data *mii;
3144 
3145 	if (sc->mii == NULL) {
3146 		ifmr->ifm_active = IFM_ETHER | IFM_NONE;
3147 		ifmr->ifm_status = 0;
3148 
3149 		return;
3150 	}
3151 
3152 	mii = sc->mii;
3153 	mii_pollstat(mii);
3154 
3155 	ifmr->ifm_active = mii->mii_media_active;
3156 	ifmr->ifm_status = mii->mii_media_status;
3157 }
3158 
3159 static void
3160 al_tick(void *arg)
3161 {
3162 	struct al_eth_adapter *adapter = arg;
3163 
3164 	mii_tick(adapter->mii);
3165 
3166 	/* Schedule another timeout one second from now */
3167 	callout_schedule(&adapter->wd_callout, hz);
3168 }
3169 
3170 static void
3171 al_tick_stats(void *arg)
3172 {
3173 	struct al_eth_adapter *adapter = arg;
3174 
3175 	al_eth_update_stats(adapter);
3176 
3177 	callout_schedule(&adapter->stats_callout, hz);
3178 }
3179 
3180 static int
3181 al_eth_up(struct al_eth_adapter *adapter)
3182 {
3183 	if_t ifp = adapter->netdev;
3184 	int rc;
3185 
3186 	if (adapter->up)
3187 		return (0);
3188 
3189 	if ((adapter->flags & AL_ETH_FLAG_RESET_REQUESTED) != 0) {
3190 		al_eth_function_reset(adapter);
3191 		adapter->flags &= ~AL_ETH_FLAG_RESET_REQUESTED;
3192 	}
3193 
3194 	if_sethwassist(ifp, 0);
3195 	if ((if_getcapenable(ifp) & IFCAP_TSO) != 0)
3196 		if_sethwassistbits(ifp, CSUM_TSO, 0);
3197 	if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
3198 		if_sethwassistbits(ifp, (CSUM_TCP | CSUM_UDP), 0);
3199 	if ((if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6) != 0)
3200 		if_sethwassistbits(ifp, (CSUM_TCP_IPV6 | CSUM_UDP_IPV6), 0);
3201 
3202 	al_eth_serdes_init(adapter);
3203 
3204 	rc = al_eth_hw_init(adapter);
3205 	if (rc != 0)
3206 		goto err_hw_init_open;
3207 
3208 	rc = al_eth_setup_int_mode(adapter);
3209 	if (rc != 0) {
3210 		device_printf(adapter->dev,
3211 		    "%s failed at setup interrupt mode!\n", __func__);
3212 		goto err_setup_int;
3213 	}
3214 
3215 	/* allocate transmit descriptors */
3216 	rc = al_eth_setup_all_tx_resources(adapter);
3217 	if (rc != 0)
3218 		goto err_setup_tx;
3219 
3220 	/* allocate receive descriptors */
3221 	rc = al_eth_setup_all_rx_resources(adapter);
3222 	if (rc != 0)
3223 		goto err_setup_rx;
3224 
3225 	rc = al_eth_request_irq(adapter);
3226 	if (rc != 0)
3227 		goto err_req_irq;
3228 
3229 	al_eth_up_complete(adapter);
3230 
3231 	adapter->up = true;
3232 
3233 	if (adapter->mac_mode == AL_ETH_MAC_MODE_10GbE_Serial)
3234 		if_link_state_change(adapter->netdev, LINK_STATE_UP);
3235 
3236 	if (adapter->mac_mode == AL_ETH_MAC_MODE_RGMII) {
3237 		mii_mediachg(adapter->mii);
3238 
3239 		/* Schedule watchdog timeout */
3240 		mtx_lock(&adapter->wd_mtx);
3241 		callout_reset(&adapter->wd_callout, hz, al_tick, adapter);
3242 		mtx_unlock(&adapter->wd_mtx);
3243 
3244 		mii_pollstat(adapter->mii);
3245 	}
3246 
3247 	return (rc);
3248 
3249 err_req_irq:
3250 	al_eth_free_all_rx_resources(adapter);
3251 err_setup_rx:
3252 	al_eth_free_all_tx_resources(adapter);
3253 err_setup_tx:
3254 	al_eth_free_irq(adapter);
3255 err_setup_int:
3256 	al_eth_hw_stop(adapter);
3257 err_hw_init_open:
3258 	al_eth_function_reset(adapter);
3259 
3260 	return (rc);
3261 }
3262 
3263 static int
3264 al_shutdown(device_t dev)
3265 {
3266 	struct al_eth_adapter *adapter = device_get_softc(dev);
3267 
3268 	al_eth_down(adapter);
3269 
3270 	return (0);
3271 }
3272 
3273 static void
3274 al_eth_down(struct al_eth_adapter *adapter)
3275 {
3276 
3277 	device_printf_dbg(adapter->dev, "al_eth_down: begin\n");
3278 
3279 	adapter->up = false;
3280 
3281 	mtx_lock(&adapter->wd_mtx);
3282 	callout_stop(&adapter->wd_callout);
3283 	mtx_unlock(&adapter->wd_mtx);
3284 
3285 	al_eth_disable_int_sync(adapter);
3286 
3287 	mtx_lock(&adapter->stats_mtx);
3288 	callout_stop(&adapter->stats_callout);
3289 	mtx_unlock(&adapter->stats_mtx);
3290 
3291 	al_eth_free_irq(adapter);
3292 	al_eth_hw_stop(adapter);
3293 
3294 	al_eth_free_all_tx_resources(adapter);
3295 	al_eth_free_all_rx_resources(adapter);
3296 }
3297 
3298 static int
3299 al_ioctl(if_t ifp, u_long command, caddr_t data)
3300 {
3301 	struct al_eth_adapter	*adapter = if_getsoftc(ifp);
3302 	struct ifreq		*ifr = (struct ifreq *)data;
3303 	int			error = 0;
3304 
3305 	switch (command) {
3306 	case SIOCSIFMTU:
3307 	{
3308 		error = al_eth_check_mtu(adapter, ifr->ifr_mtu);
3309 		if (error != 0) {
3310 			device_printf(adapter->dev, "ioctl wrong mtu %u\n",
3311 			    if_getmtu(adapter->netdev));
3312 			break;
3313 		}
3314 
3315 		if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
3316 		if_setmtu(adapter->netdev, ifr->ifr_mtu);
3317 		al_init(adapter);
3318 		break;
3319 	}
3320 	case SIOCSIFFLAGS:
3321 		if ((if_getflags(ifp) & IFF_UP) != 0) {
3322 			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
3323 				if (((if_getflags(ifp) ^ adapter->if_flags) &
3324 				    (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
3325 					device_printf_dbg(adapter->dev,
3326 					    "ioctl promisc/allmulti\n");
3327 					al_eth_set_rx_mode(adapter);
3328 				}
3329 			} else {
3330 				error = al_eth_up(adapter);
3331 				if (error == 0)
3332 					if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
3333 			}
3334 		} else {
3335 			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
3336 				al_eth_down(adapter);
3337 				if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
3338 			}
3339 		}
3340 
3341 		adapter->if_flags = if_getflags(ifp);
3342 		break;
3343 
3344 	case SIOCADDMULTI:
3345 	case SIOCDELMULTI:
3346 		if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
3347 			device_printf_dbg(adapter->dev,
3348 			    "ioctl add/del multi before\n");
3349 			al_eth_set_rx_mode(adapter);
3350 #ifdef DEVICE_POLLING
3351 			if ((if_getcapenable(ifp) & IFCAP_POLLING) == 0)
3352 #endif
3353 		}
3354 		break;
3355 	case SIOCSIFMEDIA:
3356 	case SIOCGIFMEDIA:
3357 		if (adapter->mii != NULL)
3358 			error = ifmedia_ioctl(ifp, ifr,
3359 			    &adapter->mii->mii_media, command);
3360 		else
3361 			error = ifmedia_ioctl(ifp, ifr,
3362 			    &adapter->media, command);
3363 		break;
3364 	case SIOCSIFCAP:
3365 	    {
3366 		int mask, reinit;
3367 
3368 		reinit = 0;
3369 		mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
3370 #ifdef DEVICE_POLLING
3371 		if ((mask & IFCAP_POLLING) != 0) {
3372 			if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
3373 				if (error != 0)
3374 					return (error);
3375 				if_setcapenablebit(ifp, IFCAP_POLLING, 0);
3376 			} else {
3377 				error = ether_poll_deregister(ifp);
3378 				/* Enable interrupt even in error case */
3379 				if_setcapenablebit(ifp, 0, IFCAP_POLLING);
3380 			}
3381 		}
3382 #endif
3383 		if ((mask & IFCAP_HWCSUM) != 0) {
3384 			/* apply to both rx and tx */
3385 			if_togglecapenable(ifp, IFCAP_HWCSUM);
3386 			reinit = 1;
3387 		}
3388 		if ((mask & IFCAP_HWCSUM_IPV6) != 0) {
3389 			if_togglecapenable(ifp, IFCAP_HWCSUM_IPV6);
3390 			reinit = 1;
3391 		}
3392 		if ((mask & IFCAP_TSO) != 0) {
3393 			if_togglecapenable(ifp, IFCAP_TSO);
3394 			reinit = 1;
3395 		}
3396 		if ((mask & IFCAP_LRO) != 0) {
3397 			if_togglecapenable(ifp, IFCAP_LRO);
3398 		}
3399 		if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
3400 			if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
3401 			reinit = 1;
3402 		}
3403 		if ((mask & IFCAP_VLAN_HWFILTER) != 0) {
3404 			if_togglecapenable(ifp, IFCAP_VLAN_HWFILTER);
3405 			reinit = 1;
3406 		}
3407 		if ((mask & IFCAP_VLAN_HWTSO) != 0) {
3408 			if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
3409 			reinit = 1;
3410 		}
3411 		if ((reinit != 0) &&
3412 		    ((if_getdrvflags(ifp) & IFF_DRV_RUNNING)) != 0)
3413 		{
3414 			al_init(adapter);
3415 		}
3416 		break;
3417 	    }
3418 
3419 	default:
3420 		error = ether_ioctl(ifp, command, data);
3421 		break;
3422 	}
3423 
3424 	return (error);
3425 }
3426 
3427 static int
3428 al_is_device_supported(device_t dev)
3429 {
3430 	uint16_t pci_vendor_id = pci_get_vendor(dev);
3431 	uint16_t pci_device_id = pci_get_device(dev);
3432 
3433 	return (pci_vendor_id == PCI_VENDOR_ID_ANNAPURNA_LABS &&
3434 	    (pci_device_id == PCI_DEVICE_ID_AL_ETH ||
3435 	    pci_device_id == PCI_DEVICE_ID_AL_ETH_ADVANCED ||
3436 	    pci_device_id == PCI_DEVICE_ID_AL_ETH_NIC ||
3437 	    pci_device_id == PCI_DEVICE_ID_AL_ETH_FPGA_NIC));
3438 }
3439 
3440 /* Time in mSec to keep trying to read / write from MDIO in case of error */
3441 #define	MDIO_TIMEOUT_MSEC	100
3442 #define	MDIO_PAUSE_MSEC		10
3443 
3444 static int
3445 al_miibus_readreg(device_t dev, int phy, int reg)
3446 {
3447 	struct al_eth_adapter *adapter = device_get_softc(dev);
3448 	uint16_t value = 0;
3449 	int rc;
3450 	int timeout = MDIO_TIMEOUT_MSEC;
3451 
3452 	while (timeout > 0) {
3453 		rc = al_eth_mdio_read(&adapter->hal_adapter, adapter->phy_addr,
3454 		    -1, reg, &value);
3455 
3456 		if (rc == 0)
3457 			return (value);
3458 
3459 		device_printf_dbg(adapter->dev,
3460 		    "mdio read failed. try again in 10 msec\n");
3461 
3462 		timeout -= MDIO_PAUSE_MSEC;
3463 		pause("readred pause", MDIO_PAUSE_MSEC);
3464 	}
3465 
3466 	if (rc != 0)
3467 		device_printf(adapter->dev, "MDIO read failed on timeout\n");
3468 
3469 	return (value);
3470 }
3471 
3472 static int
3473 al_miibus_writereg(device_t dev, int phy, int reg, int value)
3474 {
3475 	struct al_eth_adapter *adapter = device_get_softc(dev);
3476 	int rc;
3477 	int timeout = MDIO_TIMEOUT_MSEC;
3478 
3479 	while (timeout > 0) {
3480 		rc = al_eth_mdio_write(&adapter->hal_adapter, adapter->phy_addr,
3481 		    -1, reg, value);
3482 
3483 		if (rc == 0)
3484 			return (0);
3485 
3486 		device_printf(adapter->dev,
3487 		    "mdio write failed. try again in 10 msec\n");
3488 
3489 		timeout -= MDIO_PAUSE_MSEC;
3490 		pause("miibus writereg", MDIO_PAUSE_MSEC);
3491 	}
3492 
3493 	if (rc != 0)
3494 		device_printf(adapter->dev, "MDIO write failed on timeout\n");
3495 
3496 	return (rc);
3497 }
3498 
3499 static void
3500 al_miibus_statchg(device_t dev)
3501 {
3502 	struct al_eth_adapter *adapter = device_get_softc(dev);
3503 
3504 	device_printf_dbg(adapter->dev,
3505 	    "al_miibus_statchg: state has changed!\n");
3506 	device_printf_dbg(adapter->dev,
3507 	    "al_miibus_statchg: active = 0x%x status = 0x%x\n",
3508 	    adapter->mii->mii_media_active, adapter->mii->mii_media_status);
3509 
3510 	if (adapter->up == 0)
3511 		return;
3512 
3513 	if ((adapter->mii->mii_media_status & IFM_AVALID) != 0) {
3514 		if (adapter->mii->mii_media_status & IFM_ACTIVE) {
3515 			device_printf(adapter->dev, "link is UP\n");
3516 			if_link_state_change(adapter->netdev, LINK_STATE_UP);
3517 		} else {
3518 			device_printf(adapter->dev, "link is DOWN\n");
3519 			if_link_state_change(adapter->netdev, LINK_STATE_DOWN);
3520 		}
3521 	}
3522 }
3523 
3524 static void
3525 al_miibus_linkchg(device_t dev)
3526 {
3527 	struct al_eth_adapter *adapter = device_get_softc(dev);
3528 	uint8_t duplex = 0;
3529 	uint8_t speed = 0;
3530 
3531 	if (adapter->mii == NULL)
3532 		return;
3533 
3534 	if ((if_getflags(adapter->netdev) & IFF_UP) == 0)
3535 		return;
3536 
3537 	/* Ignore link changes when link is not ready */
3538 	if ((adapter->mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) !=
3539 	    (IFM_AVALID | IFM_ACTIVE)) {
3540 		return;
3541 	}
3542 
3543 	if ((adapter->mii->mii_media_active & IFM_FDX) != 0)
3544 		duplex = 1;
3545 
3546 	speed = IFM_SUBTYPE(adapter->mii->mii_media_active);
3547 
3548 	if (speed == IFM_10_T) {
3549 		al_eth_mac_link_config(&adapter->hal_adapter, 0, 1,
3550 		    AL_10BASE_T_SPEED, duplex);
3551 		return;
3552 	}
3553 
3554 	if (speed == IFM_100_TX) {
3555 		al_eth_mac_link_config(&adapter->hal_adapter, 0, 1,
3556 		    AL_100BASE_TX_SPEED, duplex);
3557 		return;
3558 	}
3559 
3560 	if (speed == IFM_1000_T) {
3561 		al_eth_mac_link_config(&adapter->hal_adapter, 0, 1,
3562 		    AL_1000BASE_T_SPEED, duplex);
3563 		return;
3564 	}
3565 
3566 	device_printf(adapter->dev, "ERROR: unknown MII media active 0x%08x\n",
3567 	    adapter->mii->mii_media_active);
3568 }
3569