xref: /freebsd/sys/dev/al_eth/al_eth.c (revision 0b45d36510d8c629fcc49805bc64e5893f4ba63c)
1 /*-
2  * Copyright (c) 2015,2016 Annapurna Labs Ltd. and affiliates
3  * All rights reserved.
4  *
5  * Developed by Semihalf.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/bus.h>
32 #include <sys/kernel.h>
33 #include <sys/kthread.h>
34 #include <sys/lock.h>
35 #include <sys/mbuf.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/rman.h>
39 #include <sys/socket.h>
40 #include <sys/sockio.h>
41 #include <sys/sysctl.h>
42 #include <sys/taskqueue.h>
43 
44 #include <machine/atomic.h>
45 
46 #include "opt_inet.h"
47 #include "opt_inet6.h"
48 
49 #include <net/ethernet.h>
50 #include <net/if.h>
51 #include <net/if_var.h>
52 #include <net/if_arp.h>
53 #include <net/if_dl.h>
54 #include <net/if_media.h>
55 #include <net/if_types.h>
56 #include <netinet/in.h>
57 #include <net/if_vlan_var.h>
58 #include <netinet/tcp.h>
59 #include <netinet/tcp_lro.h>
60 
61 #ifdef INET
62 #include <netinet/in.h>
63 #include <netinet/in_systm.h>
64 #include <netinet/in_var.h>
65 #include <netinet/ip.h>
66 #endif
67 
68 #ifdef INET6
69 #include <netinet/ip6.h>
70 #endif
71 
72 #include <sys/sockio.h>
73 
74 #include <dev/pci/pcireg.h>
75 #include <dev/pci/pcivar.h>
76 
77 #include <dev/mii/mii.h>
78 #include <dev/mii/miivar.h>
79 
80 #include <al_hal_common.h>
81 #include <al_hal_plat_services.h>
82 #include <al_hal_udma_config.h>
83 #include <al_hal_udma_iofic.h>
84 #include <al_hal_udma_debug.h>
85 #include <al_hal_eth.h>
86 
87 #include "al_eth.h"
88 #include "al_init_eth_lm.h"
89 #include "arm/annapurna/alpine/alpine_serdes.h"
90 
91 #include "miibus_if.h"
92 
93 #define	device_printf_dbg(fmt, ...) do {				\
94 	if (AL_DBG_LEVEL >= AL_DBG_LEVEL_DBG) { AL_DBG_LOCK();		\
95 	    device_printf(fmt, __VA_ARGS__); AL_DBG_UNLOCK();}		\
96 	} while (0)
97 
98 MALLOC_DEFINE(M_IFAL, "if_al_malloc", "All allocated data for AL ETH driver");
99 
100 /* move out to some pci header file */
101 #define	PCI_VENDOR_ID_ANNAPURNA_LABS	0x1c36
102 #define	PCI_DEVICE_ID_AL_ETH		0x0001
103 #define	PCI_DEVICE_ID_AL_ETH_ADVANCED	0x0002
104 #define	PCI_DEVICE_ID_AL_ETH_NIC	0x0003
105 #define	PCI_DEVICE_ID_AL_ETH_FPGA_NIC	0x0030
106 #define	PCI_DEVICE_ID_AL_CRYPTO		0x0011
107 #define	PCI_DEVICE_ID_AL_CRYPTO_VF	0x8011
108 #define	PCI_DEVICE_ID_AL_RAID_DMA	0x0021
109 #define	PCI_DEVICE_ID_AL_RAID_DMA_VF	0x8021
110 #define	PCI_DEVICE_ID_AL_USB		0x0041
111 
112 #define	MAC_ADDR_STR "%02x:%02x:%02x:%02x:%02x:%02x"
113 #define	MAC_ADDR(addr) addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]
114 
115 #define	AL_ETH_MAC_TABLE_UNICAST_IDX_BASE	0
116 #define	AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT	4
117 #define	AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX	(AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + \
118 						 AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT)
119 
120 #define	AL_ETH_MAC_TABLE_DROP_IDX		(AL_ETH_FWD_MAC_NUM - 1)
121 #define	AL_ETH_MAC_TABLE_BROADCAST_IDX		(AL_ETH_MAC_TABLE_DROP_IDX - 1)
122 
123 #define	AL_ETH_THASH_UDMA_SHIFT		0
124 #define	AL_ETH_THASH_UDMA_MASK		(0xF << AL_ETH_THASH_UDMA_SHIFT)
125 
126 #define	AL_ETH_THASH_Q_SHIFT		4
127 #define	AL_ETH_THASH_Q_MASK		(0x3 << AL_ETH_THASH_Q_SHIFT)
128 
129 /* the following defines should be moved to hal */
130 #define	AL_ETH_FSM_ENTRY_IPV4_TCP		0
131 #define	AL_ETH_FSM_ENTRY_IPV4_UDP		1
132 #define	AL_ETH_FSM_ENTRY_IPV6_TCP		2
133 #define	AL_ETH_FSM_ENTRY_IPV6_UDP		3
134 #define	AL_ETH_FSM_ENTRY_IPV6_NO_UDP_TCP	4
135 #define	AL_ETH_FSM_ENTRY_IPV4_NO_UDP_TCP	5
136 
137 /* FSM DATA format */
138 #define	AL_ETH_FSM_DATA_OUTER_2_TUPLE	0
139 #define	AL_ETH_FSM_DATA_OUTER_4_TUPLE	1
140 #define	AL_ETH_FSM_DATA_INNER_2_TUPLE	2
141 #define	AL_ETH_FSM_DATA_INNER_4_TUPLE	3
142 
143 #define	AL_ETH_FSM_DATA_HASH_SEL	(1 << 2)
144 
145 #define	AL_ETH_FSM_DATA_DEFAULT_Q	0
146 #define	AL_ETH_FSM_DATA_DEFAULT_UDMA	0
147 
148 #define	AL_BR_SIZE	512
149 #define	AL_TSO_SIZE	65500
150 #define	AL_DEFAULT_MTU	1500
151 
152 #define	CSUM_OFFLOAD		(CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP)
153 
154 #define	AL_IP_ALIGNMENT_OFFSET	2
155 
156 #define	SFP_I2C_ADDR		0x50
157 
158 #define	AL_MASK_GROUP_A_INT	0x7
159 #define	AL_MASK_GROUP_B_INT	0xF
160 #define	AL_MASK_GROUP_C_INT	0xF
161 #define	AL_MASK_GROUP_D_INT	0xFFFFFFFF
162 
163 #define	AL_REG_OFFSET_FORWARD_INTR	(0x1800000 + 0x1210)
164 #define	AL_EN_FORWARD_INTR	0x1FFFF
165 #define	AL_DIS_FORWARD_INTR	0
166 
167 #define	AL_M2S_MASK_INIT	0x480
168 #define	AL_S2M_MASK_INIT	0x1E0
169 #define	AL_M2S_S2M_MASK_NOT_INT	(0x3f << 25)
170 
171 #define	AL_10BASE_T_SPEED	10
172 #define	AL_100BASE_TX_SPEED	100
173 #define	AL_1000BASE_T_SPEED	1000
174 
175 #define	AL_RX_LOCK_INIT(_sc)	mtx_init(&((_sc)->if_rx_lock), "ALRXL", "ALRXL", MTX_DEF)
176 #define	AL_RX_LOCK(_sc)		mtx_lock(&((_sc)->if_rx_lock))
177 #define	AL_RX_UNLOCK(_sc)	mtx_unlock(&((_sc)->if_rx_lock))
178 
179 /* helper functions */
180 static int al_is_device_supported(device_t);
181 
182 static void al_eth_init_rings(struct al_eth_adapter *);
183 static void al_eth_flow_ctrl_disable(struct al_eth_adapter *);
184 int al_eth_fpga_read_pci_config(void *, int, uint32_t *);
185 int al_eth_fpga_write_pci_config(void *, int, uint32_t);
186 int al_eth_read_pci_config(void *, int, uint32_t *);
187 int al_eth_write_pci_config(void *, int, uint32_t);
188 void al_eth_irq_config(uint32_t *, uint32_t);
189 void al_eth_forward_int_config(uint32_t *, uint32_t);
190 static void al_eth_start_xmit(void *, int);
191 static void al_eth_rx_recv_work(void *, int);
192 static int al_eth_up(struct al_eth_adapter *);
193 static void al_eth_down(struct al_eth_adapter *);
194 static void al_eth_interrupts_unmask(struct al_eth_adapter *);
195 static void al_eth_interrupts_mask(struct al_eth_adapter *);
196 static int al_eth_check_mtu(struct al_eth_adapter *, int);
197 static uint64_t al_get_counter(if_t, ift_counter);
198 static void al_eth_req_rx_buff_size(struct al_eth_adapter *, int);
199 static int al_eth_board_params_init(struct al_eth_adapter *);
200 static int al_media_update(if_t);
201 static void al_media_status(if_t, struct ifmediareq *);
202 static int al_eth_function_reset(struct al_eth_adapter *);
203 static int al_eth_hw_init_adapter(struct al_eth_adapter *);
204 static void al_eth_serdes_init(struct al_eth_adapter *);
205 static void al_eth_lm_config(struct al_eth_adapter *);
206 static int al_eth_hw_init(struct al_eth_adapter *);
207 
208 static void al_tick_stats(void *);
209 
210 /* ifnet entry points */
211 static void al_init(void *);
212 static int al_mq_start(if_t, struct mbuf *);
213 static void al_qflush(if_t);
214 static int al_ioctl(if_t ifp, u_long, caddr_t);
215 
216 /* bus entry points */
217 static int al_probe(device_t);
218 static int al_attach(device_t);
219 static int al_detach(device_t);
220 static int al_shutdown(device_t);
221 
222 /* mii bus support routines */
223 static int al_miibus_readreg(device_t, int, int);
224 static int al_miibus_writereg(device_t, int, int, int);
225 static void al_miibus_statchg(device_t);
226 static void al_miibus_linkchg(device_t);
227 
228 struct al_eth_adapter* g_adapters[16];
229 uint32_t g_adapters_count;
230 
231 /* flag for napi-like mbuf processing, controlled from sysctl */
232 static int napi = 0;
233 
234 static device_method_t al_methods[] = {
235 	/* Device interface */
236 	DEVMETHOD(device_probe,		al_probe),
237 	DEVMETHOD(device_attach,	al_attach),
238 	DEVMETHOD(device_detach,	al_detach),
239 	DEVMETHOD(device_shutdown,	al_shutdown),
240 
241 	DEVMETHOD(miibus_readreg,	al_miibus_readreg),
242 	DEVMETHOD(miibus_writereg,	al_miibus_writereg),
243 	DEVMETHOD(miibus_statchg,	al_miibus_statchg),
244 	DEVMETHOD(miibus_linkchg,	al_miibus_linkchg),
245 	{ 0, 0 }
246 };
247 
248 static driver_t al_driver = {
249 	"al",
250 	al_methods,
251 	sizeof(struct al_eth_adapter),
252 };
253 
254 DRIVER_MODULE(al, pci, al_driver, 0, 0);
255 DRIVER_MODULE(miibus, al, miibus_driver, 0, 0);
256 
257 static int
al_probe(device_t dev)258 al_probe(device_t dev)
259 {
260 	if ((al_is_device_supported(dev)) != 0) {
261 		device_set_desc(dev, "al");
262 		return (BUS_PROBE_DEFAULT);
263 	}
264 	return (ENXIO);
265 }
266 
267 static int
al_attach(device_t dev)268 al_attach(device_t dev)
269 {
270 	struct al_eth_adapter *adapter;
271 	struct sysctl_oid_list *child;
272 	struct sysctl_ctx_list *ctx;
273 	struct sysctl_oid *tree;
274 	if_t ifp;
275 	uint32_t dev_id;
276 	uint32_t rev_id;
277 	int bar_udma;
278 	int bar_mac;
279 	int bar_ec;
280 	int err;
281 
282 	err = 0;
283 	ifp = NULL;
284 	dev_id = rev_id = 0;
285 	ctx = device_get_sysctl_ctx(dev);
286 	tree = SYSCTL_PARENT(device_get_sysctl_tree(dev));
287 	child = SYSCTL_CHILDREN(tree);
288 
289 	if (g_adapters_count == 0) {
290 		SYSCTL_ADD_INT(ctx, child, OID_AUTO, "napi",
291 		    CTLFLAG_RW, &napi, 0, "Use pseudo-napi mechanism");
292 	}
293 	adapter = device_get_softc(dev);
294 	adapter->dev = dev;
295 	adapter->board_type = ALPINE_INTEGRATED;
296 	snprintf(adapter->name, AL_ETH_NAME_MAX_LEN, "%s",
297 	    device_get_nameunit(dev));
298 	AL_RX_LOCK_INIT(adapter);
299 
300 	g_adapters[g_adapters_count] = adapter;
301 
302 	bar_udma = PCIR_BAR(AL_ETH_UDMA_BAR);
303 	adapter->udma_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
304 	    &bar_udma, RF_ACTIVE);
305 	if (adapter->udma_res == NULL) {
306 		device_printf(adapter->dev,
307 		    "could not allocate memory resources for DMA.\n");
308 		err = ENOMEM;
309 		goto err_res_dma;
310 	}
311 	adapter->udma_base = al_bus_dma_to_va(rman_get_bustag(adapter->udma_res),
312 	    rman_get_bushandle(adapter->udma_res));
313 	bar_mac = PCIR_BAR(AL_ETH_MAC_BAR);
314 	adapter->mac_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
315 	    &bar_mac, RF_ACTIVE);
316 	if (adapter->mac_res == NULL) {
317 		device_printf(adapter->dev,
318 		    "could not allocate memory resources for MAC.\n");
319 		err = ENOMEM;
320 		goto err_res_mac;
321 	}
322 	adapter->mac_base = al_bus_dma_to_va(rman_get_bustag(adapter->mac_res),
323 	    rman_get_bushandle(adapter->mac_res));
324 
325 	bar_ec = PCIR_BAR(AL_ETH_EC_BAR);
326 	adapter->ec_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &bar_ec,
327 	    RF_ACTIVE);
328 	if (adapter->ec_res == NULL) {
329 		device_printf(adapter->dev,
330 		    "could not allocate memory resources for EC.\n");
331 		err = ENOMEM;
332 		goto err_res_ec;
333 	}
334 	adapter->ec_base = al_bus_dma_to_va(rman_get_bustag(adapter->ec_res),
335 	    rman_get_bushandle(adapter->ec_res));
336 
337 	adapter->netdev = ifp = if_alloc(IFT_ETHER);
338 
339 	if_setsoftc(ifp, adapter);
340 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
341 	if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
342 	if_setflags(ifp, if_getdrvflags(ifp));
343 	if_setflagbits(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_ALLMULTI, 0);
344 	if_settransmitfn(ifp, al_mq_start);
345 	if_setqflushfn(ifp, al_qflush);
346 	if_setioctlfn(ifp, al_ioctl);
347 	if_setinitfn(ifp, al_init);
348 	if_setgetcounterfn(ifp, al_get_counter);
349 	if_setmtu(ifp, AL_DEFAULT_MTU);
350 
351 	adapter->if_flags = if_getflags(ifp);
352 
353 	if_setcapabilities(ifp, if_getcapenable(ifp) );
354 
355 	if_setcapabilitiesbit(ifp, IFCAP_HWCSUM |
356 	    IFCAP_HWCSUM_IPV6 | IFCAP_TSO |
357 	    IFCAP_LRO | IFCAP_JUMBO_MTU, 0);
358 
359 	if_setcapenable(ifp, if_getcapabilities(ifp));
360 
361 	adapter->id_number = g_adapters_count;
362 
363 	if (adapter->board_type == ALPINE_INTEGRATED) {
364 		dev_id = pci_get_device(adapter->dev);
365 		rev_id = pci_get_revid(adapter->dev);
366 	} else {
367 		al_eth_fpga_read_pci_config(adapter->internal_pcie_base,
368 		    PCIR_DEVICE, &dev_id);
369 		al_eth_fpga_read_pci_config(adapter->internal_pcie_base,
370 		    PCIR_REVID, &rev_id);
371 	}
372 
373 	adapter->dev_id = dev_id;
374 	adapter->rev_id = rev_id;
375 
376 	/* set default ring sizes */
377 	adapter->tx_ring_count = AL_ETH_DEFAULT_TX_SW_DESCS;
378 	adapter->tx_descs_count = AL_ETH_DEFAULT_TX_HW_DESCS;
379 	adapter->rx_ring_count = AL_ETH_DEFAULT_RX_DESCS;
380 	adapter->rx_descs_count = AL_ETH_DEFAULT_RX_DESCS;
381 
382 	adapter->num_tx_queues = AL_ETH_NUM_QUEUES;
383 	adapter->num_rx_queues = AL_ETH_NUM_QUEUES;
384 
385 	adapter->small_copy_len	= AL_ETH_DEFAULT_SMALL_PACKET_LEN;
386 	adapter->link_poll_interval = AL_ETH_DEFAULT_LINK_POLL_INTERVAL;
387 	adapter->max_rx_buff_alloc_size = AL_ETH_DEFAULT_MAX_RX_BUFF_ALLOC_SIZE;
388 
389 	al_eth_req_rx_buff_size(adapter, if_getmtu(adapter->netdev));
390 
391 	adapter->link_config.force_1000_base_x = AL_ETH_DEFAULT_FORCE_1000_BASEX;
392 
393 	err = al_eth_board_params_init(adapter);
394 	if (err != 0)
395 		goto err;
396 
397 	if (adapter->mac_mode == AL_ETH_MAC_MODE_10GbE_Serial) {
398 		ifmedia_init(&adapter->media, IFM_IMASK,
399 		    al_media_update, al_media_status);
400 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
401 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
402 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
403 		ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
404 	}
405 
406 	al_eth_function_reset(adapter);
407 
408 	err = al_eth_hw_init_adapter(adapter);
409 	if (err != 0)
410 		goto err;
411 
412 	al_eth_init_rings(adapter);
413 	g_adapters_count++;
414 
415 	al_eth_lm_config(adapter);
416 	mtx_init(&adapter->stats_mtx, "AlStatsMtx", NULL, MTX_DEF);
417 	mtx_init(&adapter->wd_mtx, "AlWdMtx", NULL, MTX_DEF);
418 	callout_init_mtx(&adapter->stats_callout, &adapter->stats_mtx, 0);
419 	callout_init_mtx(&adapter->wd_callout, &adapter->wd_mtx, 0);
420 
421 	ether_ifattach(ifp, adapter->mac_addr);
422 	if_setmtu(ifp, AL_DEFAULT_MTU);
423 
424 	if (adapter->mac_mode == AL_ETH_MAC_MODE_RGMII) {
425 		al_eth_hw_init(adapter);
426 
427 		/* Attach PHY(s) */
428 		err = mii_attach(adapter->dev, &adapter->miibus, adapter->netdev,
429 		    al_media_update, al_media_status, BMSR_DEFCAPMASK, 0,
430 		    MII_OFFSET_ANY, 0);
431 		if (err != 0) {
432 			device_printf(adapter->dev, "attaching PHYs failed\n");
433 			return (err);
434 		}
435 
436 		adapter->mii = device_get_softc(adapter->miibus);
437 	}
438 
439 	return (err);
440 
441 err:
442 	bus_release_resource(dev, SYS_RES_MEMORY, bar_ec, adapter->ec_res);
443 err_res_ec:
444 	bus_release_resource(dev, SYS_RES_MEMORY, bar_mac, adapter->mac_res);
445 err_res_mac:
446 	bus_release_resource(dev, SYS_RES_MEMORY, bar_udma, adapter->udma_res);
447 err_res_dma:
448 	return (err);
449 }
450 
451 static int
al_detach(device_t dev)452 al_detach(device_t dev)
453 {
454 	struct al_eth_adapter *adapter;
455 
456 	adapter = device_get_softc(dev);
457 	ether_ifdetach(adapter->netdev);
458 
459 	mtx_destroy(&adapter->stats_mtx);
460 	mtx_destroy(&adapter->wd_mtx);
461 
462 	al_eth_down(adapter);
463 
464 	bus_release_resource(dev, SYS_RES_IRQ,    0, adapter->irq_res);
465 	bus_release_resource(dev, SYS_RES_MEMORY, 0, adapter->ec_res);
466 	bus_release_resource(dev, SYS_RES_MEMORY, 0, adapter->mac_res);
467 	bus_release_resource(dev, SYS_RES_MEMORY, 0, adapter->udma_res);
468 
469 	return (0);
470 }
471 
472 int
al_eth_fpga_read_pci_config(void * handle,int where,uint32_t * val)473 al_eth_fpga_read_pci_config(void *handle, int where, uint32_t *val)
474 {
475 
476 	/* handle is the base address of the adapter */
477 	*val = al_reg_read32((void*)((u_long)handle + where));
478 
479 	return (0);
480 }
481 
482 int
al_eth_fpga_write_pci_config(void * handle,int where,uint32_t val)483 al_eth_fpga_write_pci_config(void *handle, int where, uint32_t val)
484 {
485 
486 	/* handle is the base address of the adapter */
487 	al_reg_write32((void*)((u_long)handle + where), val);
488 	return (0);
489 }
490 
491 int
al_eth_read_pci_config(void * handle,int where,uint32_t * val)492 al_eth_read_pci_config(void *handle, int where, uint32_t *val)
493 {
494 
495 	/* handle is a pci_dev */
496 	*val = pci_read_config((device_t)handle, where, sizeof(*val));
497 	return (0);
498 }
499 
500 int
al_eth_write_pci_config(void * handle,int where,uint32_t val)501 al_eth_write_pci_config(void *handle, int where, uint32_t val)
502 {
503 
504 	/* handle is a pci_dev */
505 	pci_write_config((device_t)handle, where, val, sizeof(val));
506 	return (0);
507 }
508 
509 void
al_eth_irq_config(uint32_t * offset,uint32_t value)510 al_eth_irq_config(uint32_t *offset, uint32_t value)
511 {
512 
513 	al_reg_write32_relaxed(offset, value);
514 }
515 
516 void
al_eth_forward_int_config(uint32_t * offset,uint32_t value)517 al_eth_forward_int_config(uint32_t *offset, uint32_t value)
518 {
519 
520 	al_reg_write32(offset, value);
521 }
522 
523 static void
al_eth_serdes_init(struct al_eth_adapter * adapter)524 al_eth_serdes_init(struct al_eth_adapter *adapter)
525 {
526 	void __iomem	*serdes_base;
527 
528 	adapter->serdes_init = false;
529 
530 	serdes_base = alpine_serdes_resource_get(adapter->serdes_grp);
531 	if (serdes_base == NULL) {
532 		device_printf(adapter->dev, "serdes_base get failed!\n");
533 		return;
534 	}
535 
536 	serdes_base = al_bus_dma_to_va(serdes_tag, serdes_base);
537 
538 	al_serdes_handle_grp_init(serdes_base, adapter->serdes_grp,
539 	    &adapter->serdes_obj);
540 
541 	adapter->serdes_init = true;
542 }
543 
544 static void
al_dma_map_addr(void * arg,bus_dma_segment_t * segs,int nseg,int error)545 al_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
546 {
547 	bus_addr_t *paddr;
548 
549 	paddr = arg;
550 	*paddr = segs->ds_addr;
551 }
552 
553 static int
al_dma_alloc_coherent(device_t dev,bus_dma_tag_t * tag,bus_dmamap_t * map,bus_addr_t * baddr,void ** vaddr,uint32_t size)554 al_dma_alloc_coherent(device_t dev, bus_dma_tag_t *tag, bus_dmamap_t *map,
555     bus_addr_t *baddr, void **vaddr, uint32_t size)
556 {
557 	int ret;
558 	uint32_t maxsize = ((size - 1)/PAGE_SIZE + 1) * PAGE_SIZE;
559 
560 	ret = bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0,
561 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
562 	    maxsize, 1, maxsize, BUS_DMA_COHERENT, NULL, NULL, tag);
563 	if (ret != 0) {
564 		device_printf(dev,
565 		    "failed to create bus tag, ret = %d\n", ret);
566 		return (ret);
567 	}
568 
569 	ret = bus_dmamem_alloc(*tag, vaddr,
570 	    BUS_DMA_COHERENT | BUS_DMA_ZERO, map);
571 	if (ret != 0) {
572 		device_printf(dev,
573 		    "failed to allocate dmamem, ret = %d\n", ret);
574 		return (ret);
575 	}
576 
577 	ret = bus_dmamap_load(*tag, *map, *vaddr,
578 	    size, al_dma_map_addr, baddr, 0);
579 	if (ret != 0) {
580 		device_printf(dev,
581 		    "failed to allocate bus_dmamap_load, ret = %d\n", ret);
582 		return (ret);
583 	}
584 
585 	return (0);
586 }
587 
588 static void
al_dma_free_coherent(bus_dma_tag_t tag,bus_dmamap_t map,void * vaddr)589 al_dma_free_coherent(bus_dma_tag_t tag, bus_dmamap_t map, void *vaddr)
590 {
591 
592 	bus_dmamap_unload(tag, map);
593 	bus_dmamem_free(tag, vaddr, map);
594 	bus_dma_tag_destroy(tag);
595 }
596 
597 static void
al_eth_mac_table_unicast_add(struct al_eth_adapter * adapter,uint8_t idx,uint8_t udma_mask)598 al_eth_mac_table_unicast_add(struct al_eth_adapter *adapter,
599     uint8_t idx, uint8_t udma_mask)
600 {
601 	struct al_eth_fwd_mac_table_entry entry = { { 0 } };
602 
603 	memcpy(entry.addr, adapter->mac_addr, sizeof(adapter->mac_addr));
604 
605 	memset(entry.mask, 0xff, sizeof(entry.mask));
606 	entry.rx_valid = true;
607 	entry.tx_valid = false;
608 	entry.udma_mask = udma_mask;
609 	entry.filter = false;
610 
611 	device_printf_dbg(adapter->dev,
612 	    "%s: [%d]: addr "MAC_ADDR_STR" mask "MAC_ADDR_STR"\n",
613 	    __func__, idx, MAC_ADDR(entry.addr), MAC_ADDR(entry.mask));
614 
615 	al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry);
616 }
617 
618 static void
al_eth_mac_table_all_multicast_add(struct al_eth_adapter * adapter,uint8_t idx,uint8_t udma_mask)619 al_eth_mac_table_all_multicast_add(struct al_eth_adapter *adapter, uint8_t idx,
620     uint8_t udma_mask)
621 {
622 	struct al_eth_fwd_mac_table_entry entry = { { 0 } };
623 
624 	memset(entry.addr, 0x00, sizeof(entry.addr));
625 	memset(entry.mask, 0x00, sizeof(entry.mask));
626 	entry.mask[0] |= 1;
627 	entry.addr[0] |= 1;
628 
629 	entry.rx_valid = true;
630 	entry.tx_valid = false;
631 	entry.udma_mask = udma_mask;
632 	entry.filter = false;
633 
634 	device_printf_dbg(adapter->dev,
635 	    "%s: [%d]: addr "MAC_ADDR_STR" mask "MAC_ADDR_STR"\n",
636 	    __func__, idx, MAC_ADDR(entry.addr), MAC_ADDR(entry.mask));
637 
638 	al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry);
639 }
640 
641 static void
al_eth_mac_table_broadcast_add(struct al_eth_adapter * adapter,uint8_t idx,uint8_t udma_mask)642 al_eth_mac_table_broadcast_add(struct al_eth_adapter *adapter,
643     uint8_t idx, uint8_t udma_mask)
644 {
645 	struct al_eth_fwd_mac_table_entry entry = { { 0 } };
646 
647 	memset(entry.addr, 0xff, sizeof(entry.addr));
648 	memset(entry.mask, 0xff, sizeof(entry.mask));
649 
650 	entry.rx_valid = true;
651 	entry.tx_valid = false;
652 	entry.udma_mask = udma_mask;
653 	entry.filter = false;
654 
655 	device_printf_dbg(adapter->dev,
656 	    "%s: [%d]: addr "MAC_ADDR_STR" mask "MAC_ADDR_STR"\n",
657 	    __func__, idx, MAC_ADDR(entry.addr), MAC_ADDR(entry.mask));
658 
659 	al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry);
660 }
661 
662 static void
al_eth_mac_table_promiscuous_set(struct al_eth_adapter * adapter,bool promiscuous)663 al_eth_mac_table_promiscuous_set(struct al_eth_adapter *adapter,
664     bool promiscuous)
665 {
666 	struct al_eth_fwd_mac_table_entry entry = { { 0 } };
667 
668 	memset(entry.addr, 0x00, sizeof(entry.addr));
669 	memset(entry.mask, 0x00, sizeof(entry.mask));
670 
671 	entry.rx_valid = true;
672 	entry.tx_valid = false;
673 	entry.udma_mask = (promiscuous) ? 1 : 0;
674 	entry.filter = (promiscuous) ? false : true;
675 
676 	device_printf_dbg(adapter->dev, "%s: %s promiscuous mode\n",
677 	    __func__, (promiscuous) ? "enter" : "exit");
678 
679 	al_eth_fwd_mac_table_set(&adapter->hal_adapter,
680 	    AL_ETH_MAC_TABLE_DROP_IDX, &entry);
681 }
682 
683 static void
al_eth_set_thash_table_entry(struct al_eth_adapter * adapter,uint8_t idx,uint8_t udma,uint32_t queue)684 al_eth_set_thash_table_entry(struct al_eth_adapter *adapter, uint8_t idx,
685     uint8_t udma, uint32_t queue)
686 {
687 
688 	if (udma != 0)
689 		panic("only UDMA0 is supporter");
690 
691 	if (queue >= AL_ETH_NUM_QUEUES)
692 		panic("invalid queue number");
693 
694 	al_eth_thash_table_set(&adapter->hal_adapter, idx, udma, queue);
695 }
696 
697 /* init FSM, no tunneling supported yet, if packet is tcp/udp over ipv4/ipv6, use 4 tuple hash */
698 static void
al_eth_fsm_table_init(struct al_eth_adapter * adapter)699 al_eth_fsm_table_init(struct al_eth_adapter *adapter)
700 {
701 	uint32_t val;
702 	int i;
703 
704 	for (i = 0; i < AL_ETH_RX_FSM_TABLE_SIZE; i++) {
705 		uint8_t outer_type = AL_ETH_FSM_ENTRY_OUTER(i);
706 		switch (outer_type) {
707 		case AL_ETH_FSM_ENTRY_IPV4_TCP:
708 		case AL_ETH_FSM_ENTRY_IPV4_UDP:
709 		case AL_ETH_FSM_ENTRY_IPV6_TCP:
710 		case AL_ETH_FSM_ENTRY_IPV6_UDP:
711 			val = AL_ETH_FSM_DATA_OUTER_4_TUPLE |
712 			    AL_ETH_FSM_DATA_HASH_SEL;
713 			break;
714 		case AL_ETH_FSM_ENTRY_IPV6_NO_UDP_TCP:
715 		case AL_ETH_FSM_ENTRY_IPV4_NO_UDP_TCP:
716 			val = AL_ETH_FSM_DATA_OUTER_2_TUPLE |
717 			    AL_ETH_FSM_DATA_HASH_SEL;
718 			break;
719 		default:
720 			val = AL_ETH_FSM_DATA_DEFAULT_Q |
721 			    AL_ETH_FSM_DATA_DEFAULT_UDMA;
722 		}
723 		al_eth_fsm_table_set(&adapter->hal_adapter, i, val);
724 	}
725 }
726 
727 static void
al_eth_mac_table_entry_clear(struct al_eth_adapter * adapter,uint8_t idx)728 al_eth_mac_table_entry_clear(struct al_eth_adapter *adapter,
729     uint8_t idx)
730 {
731 	struct al_eth_fwd_mac_table_entry entry = { { 0 } };
732 
733 	device_printf_dbg(adapter->dev, "%s: clear entry %d\n", __func__, idx);
734 
735 	al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry);
736 }
737 
738 static int
al_eth_hw_init_adapter(struct al_eth_adapter * adapter)739 al_eth_hw_init_adapter(struct al_eth_adapter *adapter)
740 {
741 	struct al_eth_adapter_params *params = &adapter->eth_hal_params;
742 	int rc;
743 
744 	/* params->dev_id = adapter->dev_id; */
745 	params->rev_id = adapter->rev_id;
746 	params->udma_id = 0;
747 	params->enable_rx_parser = 1; /* enable rx epe parser*/
748 	params->udma_regs_base = adapter->udma_base; /* UDMA register base address */
749 	params->ec_regs_base = adapter->ec_base; /* Ethernet controller registers base address */
750 	params->mac_regs_base = adapter->mac_base; /* Ethernet MAC registers base address */
751 	params->name = adapter->name;
752 	params->serdes_lane = adapter->serdes_lane;
753 
754 	rc = al_eth_adapter_init(&adapter->hal_adapter, params);
755 	if (rc != 0)
756 		device_printf(adapter->dev, "%s failed at hal init!\n",
757 		    __func__);
758 
759 	if ((adapter->board_type == ALPINE_NIC) ||
760 	    (adapter->board_type == ALPINE_FPGA_NIC)) {
761 		/* in pcie NIC mode, force eth UDMA to access PCIE0 using the vmid */
762 		struct al_udma_gen_tgtid_conf conf;
763 		int i;
764 		for (i = 0; i < DMA_MAX_Q; i++) {
765 			conf.tx_q_conf[i].queue_en = AL_TRUE;
766 			conf.tx_q_conf[i].desc_en = AL_FALSE;
767 			conf.tx_q_conf[i].tgtid = 0x100; /* for access from PCIE0 */
768 			conf.rx_q_conf[i].queue_en = AL_TRUE;
769 			conf.rx_q_conf[i].desc_en = AL_FALSE;
770 			conf.rx_q_conf[i].tgtid = 0x100; /* for access from PCIE0 */
771 		}
772 		al_udma_gen_tgtid_conf_set(adapter->udma_base, &conf);
773 	}
774 
775 	return (rc);
776 }
777 
778 static void
al_eth_lm_config(struct al_eth_adapter * adapter)779 al_eth_lm_config(struct al_eth_adapter *adapter)
780 {
781 	struct al_eth_lm_init_params params = {0};
782 
783 	params.adapter = &adapter->hal_adapter;
784 	params.serdes_obj = &adapter->serdes_obj;
785 	params.lane = adapter->serdes_lane;
786 	params.sfp_detection = adapter->sfp_detection_needed;
787 	if (adapter->sfp_detection_needed == true) {
788 		params.sfp_bus_id = adapter->i2c_adapter_id;
789 		params.sfp_i2c_addr = SFP_I2C_ADDR;
790 	}
791 
792 	if (adapter->sfp_detection_needed == false) {
793 		switch (adapter->mac_mode) {
794 		case AL_ETH_MAC_MODE_10GbE_Serial:
795 			if ((adapter->lt_en != 0) && (adapter->an_en != 0))
796 				params.default_mode = AL_ETH_LM_MODE_10G_DA;
797 			else
798 				params.default_mode = AL_ETH_LM_MODE_10G_OPTIC;
799 			break;
800 		case AL_ETH_MAC_MODE_SGMII:
801 			params.default_mode = AL_ETH_LM_MODE_1G;
802 			break;
803 		default:
804 			params.default_mode = AL_ETH_LM_MODE_10G_DA;
805 		}
806 	} else
807 		params.default_mode = AL_ETH_LM_MODE_10G_DA;
808 
809 	params.link_training = adapter->lt_en;
810 	params.rx_equal = true;
811 	params.static_values = !adapter->dont_override_serdes;
812 	params.i2c_context = adapter;
813 	params.kr_fec_enable = false;
814 
815 	params.retimer_exist = adapter->retimer.exist;
816 	params.retimer_bus_id = adapter->retimer.bus_id;
817 	params.retimer_i2c_addr = adapter->retimer.i2c_addr;
818 	params.retimer_channel = adapter->retimer.channel;
819 
820 	al_eth_lm_init(&adapter->lm_context, &params);
821 }
822 
823 static int
al_eth_board_params_init(struct al_eth_adapter * adapter)824 al_eth_board_params_init(struct al_eth_adapter *adapter)
825 {
826 
827 	if (adapter->board_type == ALPINE_NIC) {
828 		adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial;
829 		adapter->sfp_detection_needed = false;
830 		adapter->phy_exist = false;
831 		adapter->an_en = false;
832 		adapter->lt_en = false;
833 		adapter->ref_clk_freq = AL_ETH_REF_FREQ_375_MHZ;
834 		adapter->mdio_freq = AL_ETH_DEFAULT_MDIO_FREQ_KHZ;
835 	} else if (adapter->board_type == ALPINE_FPGA_NIC) {
836 		adapter->mac_mode = AL_ETH_MAC_MODE_SGMII;
837 		adapter->sfp_detection_needed = false;
838 		adapter->phy_exist = false;
839 		adapter->an_en = false;
840 		adapter->lt_en = false;
841 		adapter->ref_clk_freq = AL_ETH_REF_FREQ_375_MHZ;
842 		adapter->mdio_freq = AL_ETH_DEFAULT_MDIO_FREQ_KHZ;
843 	} else {
844 		struct al_eth_board_params params;
845 		int rc;
846 
847 		adapter->auto_speed = false;
848 
849 		rc = al_eth_board_params_get(adapter->mac_base, &params);
850 		if (rc != 0) {
851 			device_printf(adapter->dev,
852 			    "board info not available\n");
853 			return (-1);
854 		}
855 
856 		adapter->phy_exist = params.phy_exist == true;
857 		adapter->phy_addr = params.phy_mdio_addr;
858 		adapter->an_en = params.autoneg_enable;
859 		adapter->lt_en = params.kr_lt_enable;
860 		adapter->serdes_grp = params.serdes_grp;
861 		adapter->serdes_lane = params.serdes_lane;
862 		adapter->sfp_detection_needed = params.sfp_plus_module_exist;
863 		adapter->i2c_adapter_id = params.i2c_adapter_id;
864 		adapter->ref_clk_freq = params.ref_clk_freq;
865 		adapter->dont_override_serdes = params.dont_override_serdes;
866 		adapter->link_config.active_duplex = !params.half_duplex;
867 		adapter->link_config.autoneg = !params.an_disable;
868 		adapter->link_config.force_1000_base_x = params.force_1000_base_x;
869 		adapter->retimer.exist = params.retimer_exist;
870 		adapter->retimer.bus_id = params.retimer_bus_id;
871 		adapter->retimer.i2c_addr = params.retimer_i2c_addr;
872 		adapter->retimer.channel = params.retimer_channel;
873 
874 		switch (params.speed) {
875 		default:
876 			device_printf(adapter->dev,
877 			    "%s: invalid speed (%d)\n", __func__, params.speed);
878 		case AL_ETH_BOARD_1G_SPEED_1000M:
879 			adapter->link_config.active_speed = 1000;
880 			break;
881 		case AL_ETH_BOARD_1G_SPEED_100M:
882 			adapter->link_config.active_speed = 100;
883 			break;
884 		case AL_ETH_BOARD_1G_SPEED_10M:
885 			adapter->link_config.active_speed = 10;
886 			break;
887 		}
888 
889 		switch (params.mdio_freq) {
890 		default:
891 			device_printf(adapter->dev,
892 			    "%s: invalid mdio freq (%d)\n", __func__,
893 			    params.mdio_freq);
894 		case AL_ETH_BOARD_MDIO_FREQ_2_5_MHZ:
895 			adapter->mdio_freq = AL_ETH_DEFAULT_MDIO_FREQ_KHZ;
896 			break;
897 		case AL_ETH_BOARD_MDIO_FREQ_1_MHZ:
898 			adapter->mdio_freq = AL_ETH_MDIO_FREQ_1000_KHZ;
899 			break;
900 		}
901 
902 		switch (params.media_type) {
903 		case AL_ETH_BOARD_MEDIA_TYPE_RGMII:
904 			if (params.sfp_plus_module_exist == true)
905 				/* Backward compatibility */
906 				adapter->mac_mode = AL_ETH_MAC_MODE_SGMII;
907 			else
908 				adapter->mac_mode = AL_ETH_MAC_MODE_RGMII;
909 
910 			adapter->use_lm = false;
911 			break;
912 		case AL_ETH_BOARD_MEDIA_TYPE_SGMII:
913 			adapter->mac_mode = AL_ETH_MAC_MODE_SGMII;
914 			adapter->use_lm = true;
915 			break;
916 		case AL_ETH_BOARD_MEDIA_TYPE_10GBASE_SR:
917 			adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial;
918 			adapter->use_lm = true;
919 			break;
920 		case AL_ETH_BOARD_MEDIA_TYPE_AUTO_DETECT:
921 			adapter->sfp_detection_needed = true;
922 			adapter->auto_speed = false;
923 			adapter->use_lm = true;
924 			break;
925 		case AL_ETH_BOARD_MEDIA_TYPE_AUTO_DETECT_AUTO_SPEED:
926 			adapter->sfp_detection_needed = true;
927 			adapter->auto_speed = true;
928 			adapter->mac_mode_set = false;
929 			adapter->use_lm = true;
930 
931 			adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial;
932 			break;
933 		default:
934 			device_printf(adapter->dev,
935 			    "%s: unsupported media type %d\n",
936 			    __func__, params.media_type);
937 			return (-1);
938 		}
939 
940 		device_printf(adapter->dev,
941 		    "Board info: phy exist %s. phy addr %d. mdio freq %u Khz. "
942 		    "SFP connected %s. media %d\n",
943 		    params.phy_exist ? "Yes" : "No",
944 		    params.phy_mdio_addr, adapter->mdio_freq,
945 		    params.sfp_plus_module_exist ? "Yes" : "No",
946 		    params.media_type);
947 	}
948 
949 	al_eth_mac_addr_read(adapter->ec_base, 0, adapter->mac_addr);
950 
951 	return (0);
952 }
953 
954 static int
al_eth_function_reset(struct al_eth_adapter * adapter)955 al_eth_function_reset(struct al_eth_adapter *adapter)
956 {
957 	struct al_eth_board_params params;
958 	int rc;
959 
960 	/* save board params so we restore it after reset */
961 	al_eth_board_params_get(adapter->mac_base, &params);
962 	al_eth_mac_addr_read(adapter->ec_base, 0, adapter->mac_addr);
963 	if (adapter->board_type == ALPINE_INTEGRATED)
964 		rc = al_eth_flr_rmn(&al_eth_read_pci_config,
965 		    &al_eth_write_pci_config,
966 		    adapter->dev, adapter->mac_base);
967 	else
968 		rc = al_eth_flr_rmn(&al_eth_fpga_read_pci_config,
969 		    &al_eth_fpga_write_pci_config,
970 		    adapter->internal_pcie_base, adapter->mac_base);
971 
972 	/* restore params */
973 	al_eth_board_params_set(adapter->mac_base, &params);
974 	al_eth_mac_addr_store(adapter->ec_base, 0, adapter->mac_addr);
975 
976 	return (rc);
977 }
978 
979 static void
al_eth_init_rings(struct al_eth_adapter * adapter)980 al_eth_init_rings(struct al_eth_adapter *adapter)
981 {
982 	int i;
983 
984 	for (i = 0; i < adapter->num_tx_queues; i++) {
985 		struct al_eth_ring *ring = &adapter->tx_ring[i];
986 
987 		ring->ring_id = i;
988 		ring->dev = adapter->dev;
989 		ring->adapter = adapter;
990 		ring->netdev = adapter->netdev;
991 		al_udma_q_handle_get(&adapter->hal_adapter.tx_udma, i,
992 		    &ring->dma_q);
993 		ring->sw_count = adapter->tx_ring_count;
994 		ring->hw_count = adapter->tx_descs_count;
995 		ring->unmask_reg_offset = al_udma_iofic_unmask_offset_get((struct unit_regs *)adapter->udma_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_C);
996 		ring->unmask_val = ~(1 << i);
997 	}
998 
999 	for (i = 0; i < adapter->num_rx_queues; i++) {
1000 		struct al_eth_ring *ring = &adapter->rx_ring[i];
1001 
1002 		ring->ring_id = i;
1003 		ring->dev = adapter->dev;
1004 		ring->adapter = adapter;
1005 		ring->netdev = adapter->netdev;
1006 		al_udma_q_handle_get(&adapter->hal_adapter.rx_udma, i, &ring->dma_q);
1007 		ring->sw_count = adapter->rx_ring_count;
1008 		ring->hw_count = adapter->rx_descs_count;
1009 		ring->unmask_reg_offset = al_udma_iofic_unmask_offset_get(
1010 		    (struct unit_regs *)adapter->udma_base,
1011 		    AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_B);
1012 		ring->unmask_val = ~(1 << i);
1013 	}
1014 }
1015 
1016 static void
al_init_locked(void * arg)1017 al_init_locked(void *arg)
1018 {
1019 	struct al_eth_adapter *adapter = arg;
1020 	if_t ifp = adapter->netdev;
1021 	int rc = 0;
1022 
1023 	al_eth_down(adapter);
1024 	rc = al_eth_up(adapter);
1025 
1026 	if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1027 	if (rc == 0)
1028 		if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
1029 }
1030 
1031 static void
al_init(void * arg)1032 al_init(void *arg)
1033 {
1034 	struct al_eth_adapter *adapter = arg;
1035 
1036 	al_init_locked(adapter);
1037 }
1038 
1039 static inline int
al_eth_alloc_rx_buf(struct al_eth_adapter * adapter,struct al_eth_ring * rx_ring,struct al_eth_rx_buffer * rx_info)1040 al_eth_alloc_rx_buf(struct al_eth_adapter *adapter,
1041     struct al_eth_ring *rx_ring,
1042     struct al_eth_rx_buffer *rx_info)
1043 {
1044 	struct al_buf *al_buf;
1045 	bus_dma_segment_t segs[2];
1046 	int error;
1047 	int nsegs;
1048 
1049 	if (rx_info->m != NULL)
1050 		return (0);
1051 
1052 	rx_info->data_size = adapter->rx_mbuf_sz;
1053 
1054 	AL_RX_LOCK(adapter);
1055 
1056 	/* Get mbuf using UMA allocator */
1057 	rx_info->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1058 	    rx_info->data_size);
1059 	AL_RX_UNLOCK(adapter);
1060 
1061 	if (rx_info->m == NULL)
1062 		return (ENOMEM);
1063 
1064 	rx_info->m->m_pkthdr.len = rx_info->m->m_len = adapter->rx_mbuf_sz;
1065 
1066 	/* Map packets for DMA */
1067 	error = bus_dmamap_load_mbuf_sg(rx_ring->dma_buf_tag, rx_info->dma_map,
1068 	    rx_info->m, segs, &nsegs, BUS_DMA_NOWAIT);
1069 	if (__predict_false(error)) {
1070 		device_printf(rx_ring->dev, "failed to map mbuf, error = %d\n",
1071 		    error);
1072 		m_freem(rx_info->m);
1073 		rx_info->m = NULL;
1074 		return (EFAULT);
1075 	}
1076 
1077 	al_buf = &rx_info->al_buf;
1078 	al_buf->addr = segs[0].ds_addr + AL_IP_ALIGNMENT_OFFSET;
1079 	al_buf->len = rx_info->data_size - AL_IP_ALIGNMENT_OFFSET;
1080 
1081 	return (0);
1082 }
1083 
1084 static int
al_eth_refill_rx_bufs(struct al_eth_adapter * adapter,unsigned int qid,unsigned int num)1085 al_eth_refill_rx_bufs(struct al_eth_adapter *adapter, unsigned int qid,
1086     unsigned int num)
1087 {
1088 	struct al_eth_ring *rx_ring = &adapter->rx_ring[qid];
1089 	uint16_t next_to_use;
1090 	unsigned int i;
1091 
1092 	next_to_use = rx_ring->next_to_use;
1093 
1094 	for (i = 0; i < num; i++) {
1095 		int rc;
1096 		struct al_eth_rx_buffer *rx_info =
1097 		    &rx_ring->rx_buffer_info[next_to_use];
1098 
1099 		if (__predict_false(al_eth_alloc_rx_buf(adapter,
1100 		    rx_ring, rx_info) < 0)) {
1101 			device_printf(adapter->dev,
1102 			    "failed to alloc buffer for rx queue %d\n", qid);
1103 			break;
1104 		}
1105 
1106 		rc = al_eth_rx_buffer_add(rx_ring->dma_q,
1107 		    &rx_info->al_buf, AL_ETH_RX_FLAGS_INT, NULL);
1108 		if (__predict_false(rc)) {
1109 			device_printf(adapter->dev,
1110 			    "failed to add buffer for rx queue %d\n", qid);
1111 			break;
1112 		}
1113 
1114 		next_to_use = AL_ETH_RX_RING_IDX_NEXT(rx_ring, next_to_use);
1115 	}
1116 
1117 	if (__predict_false(i < num))
1118 		device_printf(adapter->dev,
1119 		    "refilled rx queue %d with %d pages only - available %d\n",
1120 		    qid, i, al_udma_available_get(rx_ring->dma_q));
1121 
1122 	if (__predict_true(i))
1123 		al_eth_rx_buffer_action(rx_ring->dma_q, i);
1124 
1125 	rx_ring->next_to_use = next_to_use;
1126 
1127 	return (i);
1128 }
1129 
1130 /*
1131  * al_eth_refill_all_rx_bufs - allocate all queues Rx buffers
1132  * @adapter: board private structure
1133  */
1134 static void
al_eth_refill_all_rx_bufs(struct al_eth_adapter * adapter)1135 al_eth_refill_all_rx_bufs(struct al_eth_adapter *adapter)
1136 {
1137 	int i;
1138 
1139 	for (i = 0; i < adapter->num_rx_queues; i++)
1140 		al_eth_refill_rx_bufs(adapter, i, AL_ETH_DEFAULT_RX_DESCS - 1);
1141 }
1142 
1143 static void
al_eth_tx_do_cleanup(struct al_eth_ring * tx_ring)1144 al_eth_tx_do_cleanup(struct al_eth_ring *tx_ring)
1145 {
1146 	unsigned int total_done;
1147 	uint16_t next_to_clean;
1148 	int qid = tx_ring->ring_id;
1149 
1150 	total_done = al_eth_comp_tx_get(tx_ring->dma_q);
1151 	device_printf_dbg(tx_ring->dev,
1152 	    "tx_poll: q %d total completed descs %x\n", qid, total_done);
1153 	next_to_clean = tx_ring->next_to_clean;
1154 
1155 	while (total_done != 0) {
1156 		struct al_eth_tx_buffer *tx_info;
1157 		struct mbuf *mbuf;
1158 
1159 		tx_info = &tx_ring->tx_buffer_info[next_to_clean];
1160 		/* stop if not all descriptors of the packet are completed */
1161 		if (tx_info->tx_descs > total_done)
1162 			break;
1163 
1164 		mbuf = tx_info->m;
1165 
1166 		tx_info->m = NULL;
1167 
1168 		device_printf_dbg(tx_ring->dev,
1169 		    "tx_poll: q %d mbuf %p completed\n", qid, mbuf);
1170 
1171 		/* map is no longer required */
1172 		bus_dmamap_unload(tx_ring->dma_buf_tag, tx_info->dma_map);
1173 
1174 		m_freem(mbuf);
1175 		total_done -= tx_info->tx_descs;
1176 		next_to_clean = AL_ETH_TX_RING_IDX_NEXT(tx_ring, next_to_clean);
1177 	}
1178 
1179 	tx_ring->next_to_clean = next_to_clean;
1180 
1181 	device_printf_dbg(tx_ring->dev, "tx_poll: q %d done next to clean %x\n",
1182 	    qid, next_to_clean);
1183 
1184 	/*
1185 	 * need to make the rings circular update visible to
1186 	 * al_eth_start_xmit() before checking for netif_queue_stopped().
1187 	 */
1188 	al_smp_data_memory_barrier();
1189 }
1190 
1191 static void
al_eth_tx_csum(struct al_eth_ring * tx_ring,struct al_eth_tx_buffer * tx_info,struct al_eth_pkt * hal_pkt,struct mbuf * m)1192 al_eth_tx_csum(struct al_eth_ring *tx_ring, struct al_eth_tx_buffer *tx_info,
1193     struct al_eth_pkt *hal_pkt, struct mbuf *m)
1194 {
1195 	uint32_t mss = m->m_pkthdr.tso_segsz;
1196 	struct ether_vlan_header *eh;
1197 	uint16_t etype;
1198 #ifdef INET
1199 	struct ip *ip;
1200 #endif
1201 #ifdef INET6
1202 	struct ip6_hdr *ip6;
1203 #endif
1204 	struct tcphdr *th = NULL;
1205 	int	ehdrlen, ip_hlen = 0;
1206 	uint8_t	ipproto = 0;
1207 	uint32_t offload = 0;
1208 
1209 	if (mss != 0)
1210 		offload = 1;
1211 
1212 	if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0)
1213 		offload = 1;
1214 
1215 	if ((m->m_pkthdr.csum_flags & CSUM_OFFLOAD) != 0)
1216 		offload = 1;
1217 
1218 	if (offload != 0) {
1219 		struct al_eth_meta_data *meta = &tx_ring->hal_meta;
1220 
1221 		if (mss != 0)
1222 			hal_pkt->flags |= (AL_ETH_TX_FLAGS_TSO |
1223 			    AL_ETH_TX_FLAGS_L4_CSUM);
1224 		else
1225 			hal_pkt->flags |= (AL_ETH_TX_FLAGS_L4_CSUM |
1226 			    AL_ETH_TX_FLAGS_L4_PARTIAL_CSUM);
1227 
1228 		/*
1229 		 * Determine where frame payload starts.
1230 		 * Jump over vlan headers if already present,
1231 		 * helpful for QinQ too.
1232 		 */
1233 		eh = mtod(m, struct ether_vlan_header *);
1234 		if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1235 			etype = ntohs(eh->evl_proto);
1236 			ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1237 		} else {
1238 			etype = ntohs(eh->evl_encap_proto);
1239 			ehdrlen = ETHER_HDR_LEN;
1240 		}
1241 
1242 		switch (etype) {
1243 #ifdef INET
1244 		case ETHERTYPE_IP:
1245 			ip = (struct ip *)(m->m_data + ehdrlen);
1246 			ip_hlen = ip->ip_hl << 2;
1247 			ipproto = ip->ip_p;
1248 			hal_pkt->l3_proto_idx = AL_ETH_PROTO_ID_IPv4;
1249 			th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
1250 			if (mss != 0)
1251 				hal_pkt->flags |= AL_ETH_TX_FLAGS_IPV4_L3_CSUM;
1252 			if (ipproto == IPPROTO_TCP)
1253 				hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_TCP;
1254 			else
1255 				hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_UDP;
1256 			break;
1257 #endif /* INET */
1258 #ifdef INET6
1259 		case ETHERTYPE_IPV6:
1260 			ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1261 			hal_pkt->l3_proto_idx = AL_ETH_PROTO_ID_IPv6;
1262 			ip_hlen = sizeof(struct ip6_hdr);
1263 			th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
1264 			ipproto = ip6->ip6_nxt;
1265 			if (ipproto == IPPROTO_TCP)
1266 				hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_TCP;
1267 			else
1268 				hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_UDP;
1269 			break;
1270 #endif /* INET6 */
1271 		default:
1272 			break;
1273 		}
1274 
1275 		meta->words_valid = 4;
1276 		meta->l3_header_len = ip_hlen;
1277 		meta->l3_header_offset = ehdrlen;
1278 		if (th != NULL)
1279 			meta->l4_header_len = th->th_off; /* this param needed only for TSO */
1280 		meta->mss_idx_sel = 0;			/* check how to select MSS */
1281 		meta->mss_val = mss;
1282 		hal_pkt->meta = meta;
1283 	} else
1284 		hal_pkt->meta = NULL;
1285 }
1286 
1287 #define	XMIT_QUEUE_TIMEOUT	100
1288 
1289 static void
al_eth_xmit_mbuf(struct al_eth_ring * tx_ring,struct mbuf * m)1290 al_eth_xmit_mbuf(struct al_eth_ring *tx_ring, struct mbuf *m)
1291 {
1292 	struct al_eth_tx_buffer *tx_info;
1293 	int error;
1294 	int nsegs, a;
1295 	uint16_t next_to_use;
1296 	bus_dma_segment_t segs[AL_ETH_PKT_MAX_BUFS + 1];
1297 	struct al_eth_pkt *hal_pkt;
1298 	struct al_buf *al_buf;
1299 	bool remap;
1300 
1301 	/* Check if queue is ready */
1302 	if (unlikely(tx_ring->stall) != 0) {
1303 		for (a = 0; a < XMIT_QUEUE_TIMEOUT; a++) {
1304 			if (al_udma_available_get(tx_ring->dma_q) >=
1305 			    (AL_ETH_DEFAULT_TX_HW_DESCS -
1306 			    AL_ETH_TX_WAKEUP_THRESH)) {
1307 				tx_ring->stall = 0;
1308 				break;
1309 			}
1310 			pause("stall", 1);
1311 		}
1312 		if (a == XMIT_QUEUE_TIMEOUT) {
1313 			device_printf(tx_ring->dev,
1314 			    "timeout waiting for queue %d ready!\n",
1315 			    tx_ring->ring_id);
1316 			return;
1317 		} else {
1318 			device_printf_dbg(tx_ring->dev,
1319 			    "queue %d is ready!\n", tx_ring->ring_id);
1320 		}
1321 	}
1322 
1323 	next_to_use = tx_ring->next_to_use;
1324 	tx_info = &tx_ring->tx_buffer_info[next_to_use];
1325 	tx_info->m = m;
1326 	hal_pkt = &tx_info->hal_pkt;
1327 
1328 	if (m == NULL) {
1329 		device_printf(tx_ring->dev, "mbuf is NULL\n");
1330 		return;
1331 	}
1332 
1333 	remap = true;
1334 	/* Map packets for DMA */
1335 retry:
1336 	error = bus_dmamap_load_mbuf_sg(tx_ring->dma_buf_tag, tx_info->dma_map,
1337 	    m, segs, &nsegs, BUS_DMA_NOWAIT);
1338 	if (__predict_false(error)) {
1339 		struct mbuf *m_new;
1340 
1341 		if (error == EFBIG) {
1342 			/* Try it again? - one try */
1343 			if (remap == true) {
1344 				remap = false;
1345 				m_new = m_defrag(m, M_NOWAIT);
1346 				if (m_new == NULL) {
1347 					device_printf(tx_ring->dev,
1348 					    "failed to defrag mbuf\n");
1349 					goto exit;
1350 				}
1351 				m = m_new;
1352 				goto retry;
1353 			} else {
1354 				device_printf(tx_ring->dev,
1355 				    "failed to map mbuf, error %d\n", error);
1356 				goto exit;
1357 			}
1358 		} else {
1359 			device_printf(tx_ring->dev,
1360 			    "failed to map mbuf, error %d\n", error);
1361 			goto exit;
1362 		}
1363 	}
1364 
1365 	/* set flags and meta data */
1366 	hal_pkt->flags = AL_ETH_TX_FLAGS_INT;
1367 	al_eth_tx_csum(tx_ring, tx_info, hal_pkt, m);
1368 
1369 	al_buf = hal_pkt->bufs;
1370 	for (a = 0; a < nsegs; a++) {
1371 		al_buf->addr = segs[a].ds_addr;
1372 		al_buf->len = segs[a].ds_len;
1373 
1374 		al_buf++;
1375 	}
1376 
1377 	hal_pkt->num_of_bufs = nsegs;
1378 
1379 	/* prepare the packet's descriptors to dma engine */
1380 	tx_info->tx_descs = al_eth_tx_pkt_prepare(tx_ring->dma_q, hal_pkt);
1381 
1382 	if (tx_info->tx_descs == 0)
1383 		goto exit;
1384 
1385 	/*
1386 	 * stop the queue when no more space available, the packet can have up
1387 	 * to AL_ETH_PKT_MAX_BUFS + 1 buffers and a meta descriptor
1388 	 */
1389 	if (unlikely(al_udma_available_get(tx_ring->dma_q) <
1390 	    (AL_ETH_PKT_MAX_BUFS + 2))) {
1391 		tx_ring->stall = 1;
1392 		device_printf_dbg(tx_ring->dev, "stall, stopping queue %d...\n",
1393 		    tx_ring->ring_id);
1394 		al_data_memory_barrier();
1395 	}
1396 
1397 	tx_ring->next_to_use = AL_ETH_TX_RING_IDX_NEXT(tx_ring, next_to_use);
1398 
1399 	/* trigger the dma engine */
1400 	al_eth_tx_dma_action(tx_ring->dma_q, tx_info->tx_descs);
1401 	return;
1402 
1403 exit:
1404 	m_freem(m);
1405 }
1406 
1407 static void
al_eth_tx_cmpl_work(void * arg,int pending)1408 al_eth_tx_cmpl_work(void *arg, int pending)
1409 {
1410 	struct al_eth_ring *tx_ring = arg;
1411 
1412 	if (napi != 0) {
1413 		tx_ring->cmpl_is_running = 1;
1414 		al_data_memory_barrier();
1415 	}
1416 
1417 	al_eth_tx_do_cleanup(tx_ring);
1418 
1419 	if (napi != 0) {
1420 		tx_ring->cmpl_is_running = 0;
1421 		al_data_memory_barrier();
1422 	}
1423 	/* all work done, enable IRQs */
1424 	al_eth_irq_config(tx_ring->unmask_reg_offset, tx_ring->unmask_val);
1425 }
1426 
1427 static int
al_eth_tx_cmlp_irq_filter(void * arg)1428 al_eth_tx_cmlp_irq_filter(void *arg)
1429 {
1430 	struct al_eth_ring *tx_ring = arg;
1431 
1432 	/* Interrupt should be auto-masked upon arrival */
1433 
1434 	device_printf_dbg(tx_ring->dev, "%s for ring ID = %d\n", __func__,
1435 	    tx_ring->ring_id);
1436 
1437 	/*
1438 	 * For napi, if work is not running, schedule it. Always schedule
1439 	 * for casual (non-napi) packet handling.
1440 	 */
1441 	if ((napi == 0) || (napi && tx_ring->cmpl_is_running == 0))
1442 		taskqueue_enqueue(tx_ring->cmpl_tq, &tx_ring->cmpl_task);
1443 
1444 	/* Do not run bottom half */
1445 	return (FILTER_HANDLED);
1446 }
1447 
1448 static int
al_eth_rx_recv_irq_filter(void * arg)1449 al_eth_rx_recv_irq_filter(void *arg)
1450 {
1451 	struct al_eth_ring *rx_ring = arg;
1452 
1453 	/* Interrupt should be auto-masked upon arrival */
1454 
1455 	device_printf_dbg(rx_ring->dev, "%s for ring ID = %d\n", __func__,
1456 	    rx_ring->ring_id);
1457 
1458 	/*
1459 	 * For napi, if work is not running, schedule it. Always schedule
1460 	 * for casual (non-napi) packet handling.
1461 	 */
1462 	if ((napi == 0) || (napi && rx_ring->enqueue_is_running == 0))
1463 		taskqueue_enqueue(rx_ring->enqueue_tq, &rx_ring->enqueue_task);
1464 
1465 	/* Do not run bottom half */
1466 	return (FILTER_HANDLED);
1467 }
1468 
1469 /*
1470  * al_eth_rx_checksum - indicate in mbuf if hw indicated a good cksum
1471  * @adapter: structure containing adapter specific data
1472  * @hal_pkt: HAL structure for the packet
1473  * @mbuf: mbuf currently being received and modified
1474  */
1475 static inline void
al_eth_rx_checksum(struct al_eth_adapter * adapter,struct al_eth_pkt * hal_pkt,struct mbuf * mbuf)1476 al_eth_rx_checksum(struct al_eth_adapter *adapter,
1477     struct al_eth_pkt *hal_pkt, struct mbuf *mbuf)
1478 {
1479 
1480 	/* if IPv4 and error */
1481 	if (unlikely((if_getcapenable(adapter->netdev) & IFCAP_RXCSUM) &&
1482 	    (hal_pkt->l3_proto_idx == AL_ETH_PROTO_ID_IPv4) &&
1483 	    (hal_pkt->flags & AL_ETH_RX_FLAGS_L3_CSUM_ERR))) {
1484 		device_printf(adapter->dev,"rx ipv4 header checksum error\n");
1485 		return;
1486 	}
1487 
1488 	/* if IPv6 and error */
1489 	if (unlikely((if_getcapenable(adapter->netdev) & IFCAP_RXCSUM_IPV6) &&
1490 	    (hal_pkt->l3_proto_idx == AL_ETH_PROTO_ID_IPv6) &&
1491 	    (hal_pkt->flags & AL_ETH_RX_FLAGS_L3_CSUM_ERR))) {
1492 		device_printf(adapter->dev,"rx ipv6 header checksum error\n");
1493 		return;
1494 	}
1495 
1496 	/* if TCP/UDP */
1497 	if (likely((hal_pkt->l4_proto_idx == AL_ETH_PROTO_ID_TCP) ||
1498 	   (hal_pkt->l4_proto_idx == AL_ETH_PROTO_ID_UDP))) {
1499 		if (unlikely(hal_pkt->flags & AL_ETH_RX_FLAGS_L4_CSUM_ERR)) {
1500 			device_printf_dbg(adapter->dev, "rx L4 checksum error\n");
1501 
1502 			/* TCP/UDP checksum error */
1503 			mbuf->m_pkthdr.csum_flags = 0;
1504 		} else {
1505 			device_printf_dbg(adapter->dev, "rx checksum correct\n");
1506 
1507 			/* IP Checksum Good */
1508 			mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
1509 			mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1510 		}
1511 	}
1512 }
1513 
1514 static struct mbuf*
al_eth_rx_mbuf(struct al_eth_adapter * adapter,struct al_eth_ring * rx_ring,struct al_eth_pkt * hal_pkt,unsigned int descs,uint16_t * next_to_clean)1515 al_eth_rx_mbuf(struct al_eth_adapter *adapter,
1516     struct al_eth_ring *rx_ring, struct al_eth_pkt *hal_pkt,
1517     unsigned int descs, uint16_t *next_to_clean)
1518 {
1519 	struct mbuf *mbuf;
1520 	struct al_eth_rx_buffer *rx_info =
1521 	    &rx_ring->rx_buffer_info[*next_to_clean];
1522 	unsigned int len;
1523 
1524 	len = hal_pkt->bufs[0].len;
1525 	device_printf_dbg(adapter->dev, "rx_info %p data %p\n", rx_info,
1526 	   rx_info->m);
1527 
1528 	if (rx_info->m == NULL) {
1529 		*next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring,
1530 		    *next_to_clean);
1531 		return (NULL);
1532 	}
1533 
1534 	mbuf = rx_info->m;
1535 	mbuf->m_pkthdr.len = len;
1536 	mbuf->m_len = len;
1537 	mbuf->m_pkthdr.rcvif = rx_ring->netdev;
1538 	mbuf->m_flags |= M_PKTHDR;
1539 
1540 	if (len <= adapter->small_copy_len) {
1541 		struct mbuf *smbuf;
1542 		device_printf_dbg(adapter->dev, "rx small packet. len %d\n", len);
1543 
1544 		AL_RX_LOCK(adapter);
1545 		smbuf = m_gethdr(M_NOWAIT, MT_DATA);
1546 		AL_RX_UNLOCK(adapter);
1547 		if (__predict_false(smbuf == NULL)) {
1548 			device_printf(adapter->dev, "smbuf is NULL\n");
1549 			return (NULL);
1550 		}
1551 
1552 		smbuf->m_data = smbuf->m_data + AL_IP_ALIGNMENT_OFFSET;
1553 		memcpy(smbuf->m_data, mbuf->m_data + AL_IP_ALIGNMENT_OFFSET, len);
1554 
1555 		smbuf->m_len = len;
1556 		smbuf->m_pkthdr.rcvif = rx_ring->netdev;
1557 
1558 		/* first desc of a non-ps chain */
1559 		smbuf->m_flags |= M_PKTHDR;
1560 		smbuf->m_pkthdr.len = smbuf->m_len;
1561 
1562 		*next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring,
1563 		    *next_to_clean);
1564 
1565 		return (smbuf);
1566 	}
1567 	mbuf->m_data = mbuf->m_data + AL_IP_ALIGNMENT_OFFSET;
1568 
1569 	/* Unmap the buffer */
1570 	bus_dmamap_unload(rx_ring->dma_buf_tag, rx_info->dma_map);
1571 
1572 	rx_info->m = NULL;
1573 	*next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring, *next_to_clean);
1574 
1575 	return (mbuf);
1576 }
1577 
1578 static void
al_eth_rx_recv_work(void * arg,int pending)1579 al_eth_rx_recv_work(void *arg, int pending)
1580 {
1581 	struct al_eth_ring *rx_ring = arg;
1582 	struct mbuf *mbuf;
1583 	unsigned int qid = rx_ring->ring_id;
1584 	struct al_eth_pkt *hal_pkt = &rx_ring->hal_pkt;
1585 	uint16_t next_to_clean = rx_ring->next_to_clean;
1586 	uint32_t refill_required;
1587 	uint32_t refill_actual;
1588 	uint32_t do_if_input;
1589 
1590 	if (napi != 0) {
1591 		rx_ring->enqueue_is_running = 1;
1592 		al_data_memory_barrier();
1593 	}
1594 
1595 	do {
1596 		unsigned int descs;
1597 
1598 		descs = al_eth_pkt_rx(rx_ring->dma_q, hal_pkt);
1599 		if (unlikely(descs == 0))
1600 			break;
1601 
1602 		device_printf_dbg(rx_ring->dev, "rx_poll: q %d got packet "
1603 		    "from hal. descs %d\n", qid, descs);
1604 		device_printf_dbg(rx_ring->dev, "rx_poll: q %d flags %x. "
1605 		    "l3 proto %d l4 proto %d\n", qid, hal_pkt->flags,
1606 		    hal_pkt->l3_proto_idx, hal_pkt->l4_proto_idx);
1607 
1608 		/* ignore if detected dma or eth controller errors */
1609 		if ((hal_pkt->flags & (AL_ETH_RX_ERROR |
1610 		    AL_UDMA_CDESC_ERROR)) != 0) {
1611 			device_printf(rx_ring->dev, "receive packet with error. "
1612 			    "flags = 0x%x\n", hal_pkt->flags);
1613 			next_to_clean = AL_ETH_RX_RING_IDX_ADD(rx_ring,
1614 			    next_to_clean, descs);
1615 			continue;
1616 		}
1617 
1618 		/* allocate mbuf and fill it */
1619 		mbuf = al_eth_rx_mbuf(rx_ring->adapter, rx_ring, hal_pkt, descs,
1620 		    &next_to_clean);
1621 
1622 		/* exit if we failed to retrieve a buffer */
1623 		if (unlikely(mbuf == NULL)) {
1624 			next_to_clean = AL_ETH_RX_RING_IDX_ADD(rx_ring,
1625 			    next_to_clean, descs);
1626 			break;
1627 		}
1628 
1629 		if (__predict_true(if_getcapenable(rx_ring->netdev) & IFCAP_RXCSUM ||
1630 		    if_getcapenable(rx_ring->netdev) & IFCAP_RXCSUM_IPV6)) {
1631 			al_eth_rx_checksum(rx_ring->adapter, hal_pkt, mbuf);
1632 		}
1633 
1634 		mbuf->m_pkthdr.flowid = qid;
1635 		M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE);
1636 
1637 		/*
1638 		 * LRO is only for IP/TCP packets and TCP checksum of the packet
1639 		 * should be computed by hardware.
1640 		 */
1641 		do_if_input = 1;
1642 		if ((rx_ring->lro_enabled != 0) &&
1643 		    ((mbuf->m_pkthdr.csum_flags & CSUM_IP_VALID) != 0) &&
1644 		    hal_pkt->l4_proto_idx == AL_ETH_PROTO_ID_TCP) {
1645 			/*
1646 			 * Send to the stack if:
1647 			 *  - LRO not enabled, or
1648 			 *  - no LRO resources, or
1649 			 *  - lro enqueue fails
1650 			 */
1651 			if (rx_ring->lro.lro_cnt != 0) {
1652 				if (tcp_lro_rx(&rx_ring->lro, mbuf, 0) == 0)
1653 					do_if_input = 0;
1654 			}
1655 		}
1656 
1657 		if (do_if_input)
1658 			if_input(rx_ring->netdev, mbuf);
1659 
1660 	} while (1);
1661 
1662 	rx_ring->next_to_clean = next_to_clean;
1663 
1664 	refill_required = al_udma_available_get(rx_ring->dma_q);
1665 	refill_actual = al_eth_refill_rx_bufs(rx_ring->adapter, qid,
1666 	    refill_required);
1667 
1668 	if (unlikely(refill_actual < refill_required)) {
1669 		device_printf_dbg(rx_ring->dev,
1670 		    "%s: not filling rx queue %d\n", __func__, qid);
1671 	}
1672 
1673 	tcp_lro_flush_all(&rx_ring->lro);
1674 
1675 	if (napi != 0) {
1676 		rx_ring->enqueue_is_running = 0;
1677 		al_data_memory_barrier();
1678 	}
1679 	/* unmask irq */
1680 	al_eth_irq_config(rx_ring->unmask_reg_offset, rx_ring->unmask_val);
1681 }
1682 
1683 static void
al_eth_start_xmit(void * arg,int pending)1684 al_eth_start_xmit(void *arg, int pending)
1685 {
1686 	struct al_eth_ring *tx_ring = arg;
1687 	struct mbuf *mbuf;
1688 
1689 	if (napi != 0) {
1690 		tx_ring->enqueue_is_running = 1;
1691 		al_data_memory_barrier();
1692 	}
1693 
1694 	while (1) {
1695 		mtx_lock(&tx_ring->br_mtx);
1696 		mbuf = drbr_dequeue(NULL, tx_ring->br);
1697 		mtx_unlock(&tx_ring->br_mtx);
1698 
1699 		if (mbuf == NULL)
1700 			break;
1701 
1702 		al_eth_xmit_mbuf(tx_ring, mbuf);
1703 	}
1704 
1705 	if (napi != 0) {
1706 		tx_ring->enqueue_is_running = 0;
1707 		al_data_memory_barrier();
1708 		while (1) {
1709 			mtx_lock(&tx_ring->br_mtx);
1710 			mbuf = drbr_dequeue(NULL, tx_ring->br);
1711 			mtx_unlock(&tx_ring->br_mtx);
1712 			if (mbuf == NULL)
1713 				break;
1714 			al_eth_xmit_mbuf(tx_ring, mbuf);
1715 		}
1716 	}
1717 }
1718 
1719 static int
al_mq_start(if_t ifp,struct mbuf * m)1720 al_mq_start(if_t ifp, struct mbuf *m)
1721 {
1722 	struct al_eth_adapter *adapter = if_getsoftc(ifp);
1723 	struct al_eth_ring *tx_ring;
1724 	int i;
1725 	int ret;
1726 
1727 	/* Which queue to use */
1728 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
1729 		i = m->m_pkthdr.flowid % adapter->num_tx_queues;
1730 	else
1731 		i = curcpu % adapter->num_tx_queues;
1732 
1733 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
1734 	    IFF_DRV_RUNNING) {
1735 		return (EFAULT);
1736 	}
1737 
1738 	tx_ring = &adapter->tx_ring[i];
1739 
1740 	device_printf_dbg(adapter->dev, "dgb start() - assuming link is active, "
1741 	    "sending packet to queue %d\n", i);
1742 
1743 	ret = drbr_enqueue(ifp, tx_ring->br, m);
1744 
1745 	/*
1746 	 * For napi, if work is not running, schedule it. Always schedule
1747 	 * for casual (non-napi) packet handling.
1748 	 */
1749 	if ((napi == 0) || ((napi != 0) && (tx_ring->enqueue_is_running == 0)))
1750 		taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
1751 
1752 	return (ret);
1753 }
1754 
1755 static void
al_qflush(if_t ifp)1756 al_qflush(if_t ifp)
1757 {
1758 
1759 	/* unused */
1760 }
1761 
1762 static inline void
al_eth_flow_ctrl_init(struct al_eth_adapter * adapter)1763 al_eth_flow_ctrl_init(struct al_eth_adapter *adapter)
1764 {
1765 	uint8_t default_flow_ctrl;
1766 
1767 	default_flow_ctrl = AL_ETH_FLOW_CTRL_TX_PAUSE;
1768 	default_flow_ctrl |= AL_ETH_FLOW_CTRL_RX_PAUSE;
1769 
1770 	adapter->link_config.flow_ctrl_supported = default_flow_ctrl;
1771 }
1772 
1773 static int
al_eth_flow_ctrl_config(struct al_eth_adapter * adapter)1774 al_eth_flow_ctrl_config(struct al_eth_adapter *adapter)
1775 {
1776 	struct al_eth_flow_control_params *flow_ctrl_params;
1777 	uint8_t active = adapter->link_config.flow_ctrl_active;
1778 	int i;
1779 
1780 	flow_ctrl_params = &adapter->flow_ctrl_params;
1781 
1782 	flow_ctrl_params->type = AL_ETH_FLOW_CONTROL_TYPE_LINK_PAUSE;
1783 	flow_ctrl_params->obay_enable =
1784 	    ((active & AL_ETH_FLOW_CTRL_RX_PAUSE) != 0);
1785 	flow_ctrl_params->gen_enable =
1786 	    ((active & AL_ETH_FLOW_CTRL_TX_PAUSE) != 0);
1787 
1788 	flow_ctrl_params->rx_fifo_th_high = AL_ETH_FLOW_CTRL_RX_FIFO_TH_HIGH;
1789 	flow_ctrl_params->rx_fifo_th_low = AL_ETH_FLOW_CTRL_RX_FIFO_TH_LOW;
1790 	flow_ctrl_params->quanta = AL_ETH_FLOW_CTRL_QUANTA;
1791 	flow_ctrl_params->quanta_th = AL_ETH_FLOW_CTRL_QUANTA_TH;
1792 
1793 	/* map priority to queue index, queue id = priority/2 */
1794 	for (i = 0; i < AL_ETH_FWD_PRIO_TABLE_NUM; i++)
1795 		flow_ctrl_params->prio_q_map[0][i] =  1 << (i >> 1);
1796 
1797 	al_eth_flow_control_config(&adapter->hal_adapter, flow_ctrl_params);
1798 
1799 	return (0);
1800 }
1801 
1802 static void
al_eth_flow_ctrl_enable(struct al_eth_adapter * adapter)1803 al_eth_flow_ctrl_enable(struct al_eth_adapter *adapter)
1804 {
1805 
1806 	/*
1807 	 * change the active configuration to the default / force by ethtool
1808 	 * and call to configure
1809 	 */
1810 	adapter->link_config.flow_ctrl_active =
1811 	    adapter->link_config.flow_ctrl_supported;
1812 
1813 	al_eth_flow_ctrl_config(adapter);
1814 }
1815 
1816 static void
al_eth_flow_ctrl_disable(struct al_eth_adapter * adapter)1817 al_eth_flow_ctrl_disable(struct al_eth_adapter *adapter)
1818 {
1819 
1820 	adapter->link_config.flow_ctrl_active = 0;
1821 	al_eth_flow_ctrl_config(adapter);
1822 }
1823 
1824 static int
al_eth_hw_init(struct al_eth_adapter * adapter)1825 al_eth_hw_init(struct al_eth_adapter *adapter)
1826 {
1827 	int rc;
1828 
1829 	rc = al_eth_hw_init_adapter(adapter);
1830 	if (rc != 0)
1831 		return (rc);
1832 
1833 	rc = al_eth_mac_config(&adapter->hal_adapter, adapter->mac_mode);
1834 	if (rc < 0) {
1835 		device_printf(adapter->dev, "%s failed to configure mac!\n",
1836 		    __func__);
1837 		return (rc);
1838 	}
1839 
1840 	if ((adapter->mac_mode == AL_ETH_MAC_MODE_SGMII) ||
1841 	    (adapter->mac_mode == AL_ETH_MAC_MODE_RGMII &&
1842 	     adapter->phy_exist == false)) {
1843 		rc = al_eth_mac_link_config(&adapter->hal_adapter,
1844 		    adapter->link_config.force_1000_base_x,
1845 		    adapter->link_config.autoneg,
1846 		    adapter->link_config.active_speed,
1847 		    adapter->link_config.active_duplex);
1848 		if (rc != 0) {
1849 			device_printf(adapter->dev,
1850 			    "%s failed to configure link parameters!\n",
1851 			    __func__);
1852 			return (rc);
1853 		}
1854 	}
1855 
1856 	rc = al_eth_mdio_config(&adapter->hal_adapter,
1857 	    AL_ETH_MDIO_TYPE_CLAUSE_22, AL_TRUE /* shared_mdio_if */,
1858 	    adapter->ref_clk_freq, adapter->mdio_freq);
1859 	if (rc != 0) {
1860 		device_printf(adapter->dev, "%s failed at mdio config!\n",
1861 		    __func__);
1862 		return (rc);
1863 	}
1864 
1865 	al_eth_flow_ctrl_init(adapter);
1866 
1867 	return (rc);
1868 }
1869 
1870 static int
al_eth_hw_stop(struct al_eth_adapter * adapter)1871 al_eth_hw_stop(struct al_eth_adapter *adapter)
1872 {
1873 
1874 	al_eth_mac_stop(&adapter->hal_adapter);
1875 
1876 	/*
1877 	 * wait till pending rx packets written and UDMA becomes idle,
1878 	 * the MAC has ~10KB fifo, 10us should be enough time for the
1879 	 * UDMA to write to the memory
1880 	 */
1881 	DELAY(10);
1882 
1883 	al_eth_adapter_stop(&adapter->hal_adapter);
1884 
1885 	adapter->flags |= AL_ETH_FLAG_RESET_REQUESTED;
1886 
1887 	/* disable flow ctrl to avoid pause packets*/
1888 	al_eth_flow_ctrl_disable(adapter);
1889 
1890 	return (0);
1891 }
1892 
1893 /*
1894  * al_eth_intr_intx_all - Legacy Interrupt Handler for all interrupts
1895  * @irq: interrupt number
1896  * @data: pointer to a network interface device structure
1897  */
1898 static int
al_eth_intr_intx_all(void * data)1899 al_eth_intr_intx_all(void *data)
1900 {
1901 	struct al_eth_adapter *adapter = data;
1902 
1903 	struct unit_regs __iomem *regs_base =
1904 	    (struct unit_regs __iomem *)adapter->udma_base;
1905 	uint32_t reg;
1906 
1907 	reg = al_udma_iofic_read_cause(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
1908 	    AL_INT_GROUP_A);
1909 	if (likely(reg))
1910 		device_printf_dbg(adapter->dev, "%s group A cause %x\n",
1911 		    __func__, reg);
1912 
1913 	if (unlikely(reg & AL_INT_GROUP_A_GROUP_D_SUM)) {
1914 		struct al_iofic_grp_ctrl __iomem *sec_ints_base;
1915 		uint32_t cause_d =  al_udma_iofic_read_cause(regs_base,
1916 		    AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_D);
1917 
1918 		sec_ints_base =
1919 		    &regs_base->gen.interrupt_regs.secondary_iofic_ctrl[0];
1920 		if (cause_d != 0) {
1921 			device_printf_dbg(adapter->dev,
1922 			    "got interrupt from group D. cause %x\n", cause_d);
1923 
1924 			cause_d = al_iofic_read_cause(sec_ints_base,
1925 			    AL_INT_GROUP_A);
1926 			device_printf(adapter->dev,
1927 			    "secondary A cause %x\n", cause_d);
1928 
1929 			cause_d = al_iofic_read_cause(sec_ints_base,
1930 			    AL_INT_GROUP_B);
1931 
1932 			device_printf_dbg(adapter->dev,
1933 			    "secondary B cause %x\n", cause_d);
1934 		}
1935 	}
1936 	if ((reg & AL_INT_GROUP_A_GROUP_B_SUM) != 0 ) {
1937 		uint32_t cause_b = al_udma_iofic_read_cause(regs_base,
1938 		    AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_B);
1939 		int qid;
1940 		device_printf_dbg(adapter->dev, "secondary B cause %x\n",
1941 		    cause_b);
1942 		for (qid = 0; qid < adapter->num_rx_queues; qid++) {
1943 			if (cause_b & (1 << qid)) {
1944 				/* mask */
1945 				al_udma_iofic_mask(
1946 				    (struct unit_regs __iomem *)adapter->udma_base,
1947 				    AL_UDMA_IOFIC_LEVEL_PRIMARY,
1948 				    AL_INT_GROUP_B, 1 << qid);
1949 			}
1950 		}
1951 	}
1952 	if ((reg & AL_INT_GROUP_A_GROUP_C_SUM) != 0) {
1953 		uint32_t cause_c = al_udma_iofic_read_cause(regs_base,
1954 		    AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_C);
1955 		int qid;
1956 		device_printf_dbg(adapter->dev, "secondary C cause %x\n", cause_c);
1957 		for (qid = 0; qid < adapter->num_tx_queues; qid++) {
1958 			if ((cause_c & (1 << qid)) != 0) {
1959 				al_udma_iofic_mask(
1960 				    (struct unit_regs __iomem *)adapter->udma_base,
1961 				    AL_UDMA_IOFIC_LEVEL_PRIMARY,
1962 				    AL_INT_GROUP_C, 1 << qid);
1963 			}
1964 		}
1965 	}
1966 
1967 	al_eth_tx_cmlp_irq_filter(adapter->tx_ring);
1968 
1969 	return (0);
1970 }
1971 
1972 static int
al_eth_intr_msix_all(void * data)1973 al_eth_intr_msix_all(void *data)
1974 {
1975 	struct al_eth_adapter *adapter = data;
1976 
1977 	device_printf_dbg(adapter->dev, "%s\n", __func__);
1978 	return (0);
1979 }
1980 
1981 static int
al_eth_intr_msix_mgmt(void * data)1982 al_eth_intr_msix_mgmt(void *data)
1983 {
1984 	struct al_eth_adapter *adapter = data;
1985 
1986 	device_printf_dbg(adapter->dev, "%s\n", __func__);
1987 	return (0);
1988 }
1989 
1990 static int
al_eth_enable_msix(struct al_eth_adapter * adapter)1991 al_eth_enable_msix(struct al_eth_adapter *adapter)
1992 {
1993 	int i, msix_vecs, rc, count;
1994 
1995 	device_printf_dbg(adapter->dev, "%s\n", __func__);
1996 	msix_vecs = 1 + adapter->num_rx_queues + adapter->num_tx_queues;
1997 
1998 	device_printf_dbg(adapter->dev,
1999 	    "Try to enable MSIX, vector numbers = %d\n", msix_vecs);
2000 
2001 	adapter->msix_entries = malloc(msix_vecs*sizeof(*adapter->msix_entries),
2002 	    M_IFAL, M_ZERO | M_WAITOK);
2003 	/* management vector (GROUP_A) @2*/
2004 	adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].entry = 2;
2005 	adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector = 0;
2006 
2007 	/* rx queues start @3 */
2008 	for (i = 0; i < adapter->num_rx_queues; i++) {
2009 		int irq_idx = AL_ETH_RXQ_IRQ_IDX(adapter, i);
2010 
2011 		adapter->msix_entries[irq_idx].entry = 3 + i;
2012 		adapter->msix_entries[irq_idx].vector = 0;
2013 	}
2014 	/* tx queues start @7 */
2015 	for (i = 0; i < adapter->num_tx_queues; i++) {
2016 		int irq_idx = AL_ETH_TXQ_IRQ_IDX(adapter, i);
2017 
2018 		adapter->msix_entries[irq_idx].entry = 3 +
2019 		    AL_ETH_MAX_HW_QUEUES + i;
2020 		adapter->msix_entries[irq_idx].vector = 0;
2021 	}
2022 
2023 	count = msix_vecs + 2; /* entries start from 2 */
2024 	rc = pci_alloc_msix(adapter->dev, &count);
2025 
2026 	if (rc != 0) {
2027 		device_printf_dbg(adapter->dev, "failed to allocate MSIX "
2028 		    "vectors %d\n", msix_vecs+2);
2029 		device_printf_dbg(adapter->dev, "ret = %d\n", rc);
2030 		goto msix_entries_exit;
2031 	}
2032 
2033 	if (count != msix_vecs + 2) {
2034 		device_printf_dbg(adapter->dev, "failed to allocate all MSIX "
2035 		    "vectors %d, allocated %d\n", msix_vecs+2, count);
2036 		rc = ENOSPC;
2037 		goto msix_entries_exit;
2038 	}
2039 
2040 	for (i = 0; i < msix_vecs; i++)
2041 	    adapter->msix_entries[i].vector = 2 + 1 + i;
2042 
2043 	device_printf_dbg(adapter->dev, "successfully enabled MSIX,"
2044 	    " vectors %d\n", msix_vecs);
2045 
2046 	adapter->msix_vecs = msix_vecs;
2047 	adapter->flags |= AL_ETH_FLAG_MSIX_ENABLED;
2048 	goto exit;
2049 
2050 msix_entries_exit:
2051 	adapter->msix_vecs = 0;
2052 	free(adapter->msix_entries, M_IFAL);
2053 	adapter->msix_entries = NULL;
2054 
2055 exit:
2056 	return (rc);
2057 }
2058 
2059 static int
al_eth_setup_int_mode(struct al_eth_adapter * adapter)2060 al_eth_setup_int_mode(struct al_eth_adapter *adapter)
2061 {
2062 	int i, rc;
2063 
2064 	rc = al_eth_enable_msix(adapter);
2065 	if (rc != 0) {
2066 		device_printf(adapter->dev, "Failed to enable MSIX mode.\n");
2067 		return (rc);
2068 	}
2069 
2070 	adapter->irq_vecs = max(1, adapter->msix_vecs);
2071 	/* single INTX mode */
2072 	if (adapter->msix_vecs == 0) {
2073 		snprintf(adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].name,
2074 		    AL_ETH_IRQNAME_SIZE, "al-eth-intx-all@pci:%s",
2075 		    device_get_name(adapter->dev));
2076 		adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].handler =
2077 		    al_eth_intr_intx_all;
2078 		/* IRQ vector will be resolved from device resources */
2079 		adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector = 0;
2080 		adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].data = adapter;
2081 
2082 		device_printf(adapter->dev, "%s and vector %d \n", __func__,
2083 		    adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector);
2084 
2085 		return (0);
2086 	}
2087 	/* single MSI-X mode */
2088 	if (adapter->msix_vecs == 1) {
2089 		snprintf(adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].name,
2090 		    AL_ETH_IRQNAME_SIZE, "al-eth-msix-all@pci:%s",
2091 		    device_get_name(adapter->dev));
2092 		adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].handler =
2093 		    al_eth_intr_msix_all;
2094 		adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector =
2095 		    adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector;
2096 		adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].data = adapter;
2097 
2098 		return (0);
2099 	}
2100 	/* MSI-X per queue */
2101 	snprintf(adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].name, AL_ETH_IRQNAME_SIZE,
2102 	    "al-eth-msix-mgmt@pci:%s", device_get_name(adapter->dev));
2103 	adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].handler = al_eth_intr_msix_mgmt;
2104 
2105 	adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].data = adapter;
2106 	adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector =
2107 	    adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector;
2108 
2109 	for (i = 0; i < adapter->num_rx_queues; i++) {
2110 		int irq_idx = AL_ETH_RXQ_IRQ_IDX(adapter, i);
2111 
2112 		snprintf(adapter->irq_tbl[irq_idx].name, AL_ETH_IRQNAME_SIZE,
2113 		    "al-eth-rx-comp-%d@pci:%s", i,
2114 		    device_get_name(adapter->dev));
2115 		adapter->irq_tbl[irq_idx].handler = al_eth_rx_recv_irq_filter;
2116 		adapter->irq_tbl[irq_idx].data = &adapter->rx_ring[i];
2117 		adapter->irq_tbl[irq_idx].vector =
2118 		    adapter->msix_entries[irq_idx].vector;
2119 	}
2120 
2121 	for (i = 0; i < adapter->num_tx_queues; i++) {
2122 		int irq_idx = AL_ETH_TXQ_IRQ_IDX(adapter, i);
2123 
2124 		snprintf(adapter->irq_tbl[irq_idx].name,
2125 		    AL_ETH_IRQNAME_SIZE, "al-eth-tx-comp-%d@pci:%s", i,
2126 		    device_get_name(adapter->dev));
2127 		adapter->irq_tbl[irq_idx].handler = al_eth_tx_cmlp_irq_filter;
2128 		adapter->irq_tbl[irq_idx].data = &adapter->tx_ring[i];
2129 		adapter->irq_tbl[irq_idx].vector =
2130 		    adapter->msix_entries[irq_idx].vector;
2131 	}
2132 
2133 	return (0);
2134 }
2135 
2136 static void
__al_eth_free_irq(struct al_eth_adapter * adapter)2137 __al_eth_free_irq(struct al_eth_adapter *adapter)
2138 {
2139 	struct al_eth_irq *irq;
2140 	int i, rc;
2141 
2142 	for (i = 0; i < adapter->irq_vecs; i++) {
2143 		irq = &adapter->irq_tbl[i];
2144 		if (irq->requested != 0) {
2145 			device_printf_dbg(adapter->dev, "tear down irq: %d\n",
2146 			    irq->vector);
2147 			rc = bus_teardown_intr(adapter->dev, irq->res,
2148 			    irq->cookie);
2149 			if (rc != 0)
2150 				device_printf(adapter->dev, "failed to tear "
2151 				    "down irq: %d\n", irq->vector);
2152 		}
2153 		irq->requested = 0;
2154 	}
2155 }
2156 
2157 static void
al_eth_free_irq(struct al_eth_adapter * adapter)2158 al_eth_free_irq(struct al_eth_adapter *adapter)
2159 {
2160 	struct al_eth_irq *irq;
2161 	int i, rc;
2162 #ifdef CONFIG_RFS_ACCEL
2163 	if (adapter->msix_vecs >= 1) {
2164 		free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
2165 		adapter->netdev->rx_cpu_rmap = NULL;
2166 	}
2167 #endif
2168 
2169 	__al_eth_free_irq(adapter);
2170 
2171 	for (i = 0; i < adapter->irq_vecs; i++) {
2172 		irq = &adapter->irq_tbl[i];
2173 		if (irq->res == NULL)
2174 			continue;
2175 		device_printf_dbg(adapter->dev, "release resource irq: %d\n",
2176 		    irq->vector);
2177 		rc = bus_release_resource(adapter->dev, SYS_RES_IRQ, irq->vector,
2178 		    irq->res);
2179 		irq->res = NULL;
2180 		if (rc != 0)
2181 			device_printf(adapter->dev, "dev has no parent while "
2182 			    "releasing res for irq: %d\n", irq->vector);
2183 	}
2184 
2185 	pci_release_msi(adapter->dev);
2186 
2187 	adapter->flags &= ~AL_ETH_FLAG_MSIX_ENABLED;
2188 
2189 	adapter->msix_vecs = 0;
2190 	free(adapter->msix_entries, M_IFAL);
2191 	adapter->msix_entries = NULL;
2192 }
2193 
2194 static int
al_eth_request_irq(struct al_eth_adapter * adapter)2195 al_eth_request_irq(struct al_eth_adapter *adapter)
2196 {
2197 	unsigned long flags;
2198 	struct al_eth_irq *irq;
2199 	int rc = 0, i, v;
2200 
2201 	if ((adapter->flags & AL_ETH_FLAG_MSIX_ENABLED) != 0)
2202 		flags = RF_ACTIVE;
2203 	else
2204 		flags = RF_ACTIVE | RF_SHAREABLE;
2205 
2206 	for (i = 0; i < adapter->irq_vecs; i++) {
2207 		irq = &adapter->irq_tbl[i];
2208 
2209 		if (irq->requested != 0)
2210 			continue;
2211 
2212 		irq->res = bus_alloc_resource_any(adapter->dev, SYS_RES_IRQ,
2213 		    &irq->vector, flags);
2214 		if (irq->res == NULL) {
2215 			device_printf(adapter->dev, "could not allocate "
2216 			    "irq vector=%d\n", irq->vector);
2217 			rc = ENXIO;
2218 			goto exit_res;
2219 		}
2220 
2221 		if ((rc = bus_setup_intr(adapter->dev, irq->res,
2222 		    INTR_TYPE_NET | INTR_MPSAFE, irq->handler,
2223 		    NULL, irq->data, &irq->cookie)) != 0) {
2224 			device_printf(adapter->dev, "failed to register "
2225 			    "interrupt handler for irq %ju: %d\n",
2226 			    (uintmax_t)rman_get_start(irq->res), rc);
2227 			goto exit_intr;
2228 		}
2229 		irq->requested = 1;
2230 	}
2231 	goto exit;
2232 
2233 exit_intr:
2234 	v = i - 1; /* -1 because we omit the operation that failed */
2235 	while (v-- >= 0) {
2236 		int bti;
2237 		irq = &adapter->irq_tbl[v];
2238 		bti = bus_teardown_intr(adapter->dev, irq->res, irq->cookie);
2239 		if (bti != 0) {
2240 			device_printf(adapter->dev, "failed to tear "
2241 			    "down irq: %d\n", irq->vector);
2242 		}
2243 
2244 		irq->requested = 0;
2245 		device_printf_dbg(adapter->dev, "exit_intr: releasing irq %d\n",
2246 		    irq->vector);
2247 	}
2248 
2249 exit_res:
2250 	v = i - 1; /* -1 because we omit the operation that failed */
2251 	while (v-- >= 0) {
2252 		int brr;
2253 		irq = &adapter->irq_tbl[v];
2254 		device_printf_dbg(adapter->dev, "exit_res: releasing resource"
2255 		    " for irq %d\n", irq->vector);
2256 		brr = bus_release_resource(adapter->dev, SYS_RES_IRQ,
2257 		    irq->vector, irq->res);
2258 		if (brr != 0)
2259 			device_printf(adapter->dev, "dev has no parent while "
2260 			    "releasing res for irq: %d\n", irq->vector);
2261 		irq->res = NULL;
2262 	}
2263 
2264 exit:
2265 	return (rc);
2266 }
2267 
2268 /**
2269  * al_eth_setup_tx_resources - allocate Tx resources (Descriptors)
2270  * @adapter: network interface device structure
2271  * @qid: queue index
2272  *
2273  * Return 0 on success, negative on failure
2274  **/
2275 static int
al_eth_setup_tx_resources(struct al_eth_adapter * adapter,int qid)2276 al_eth_setup_tx_resources(struct al_eth_adapter *adapter, int qid)
2277 {
2278 	struct al_eth_ring *tx_ring = &adapter->tx_ring[qid];
2279 	device_t dev = tx_ring->dev;
2280 	struct al_udma_q_params *q_params = &tx_ring->q_params;
2281 	int size;
2282 	int ret;
2283 
2284 	if (adapter->up)
2285 		return (0);
2286 
2287 	size = sizeof(struct al_eth_tx_buffer) * tx_ring->sw_count;
2288 
2289 	tx_ring->tx_buffer_info = malloc(size, M_IFAL, M_ZERO | M_WAITOK);
2290 	tx_ring->descs_size = tx_ring->hw_count * sizeof(union al_udma_desc);
2291 	q_params->size = tx_ring->hw_count;
2292 
2293 	ret = al_dma_alloc_coherent(dev, &q_params->desc_phy_base_tag,
2294 	    (bus_dmamap_t *)&q_params->desc_phy_base_map,
2295 	    (bus_addr_t *)&q_params->desc_phy_base,
2296 	    (void**)&q_params->desc_base, tx_ring->descs_size);
2297 	if (ret != 0) {
2298 		device_printf(dev, "failed to al_dma_alloc_coherent,"
2299 		    " ret = %d\n", ret);
2300 		return (ENOMEM);
2301 	}
2302 
2303 	if (q_params->desc_base == NULL)
2304 		return (ENOMEM);
2305 
2306 	device_printf_dbg(dev, "Initializing ring queues %d\n", qid);
2307 
2308 	/* Allocate Ring Queue */
2309 	mtx_init(&tx_ring->br_mtx, "AlRingMtx", NULL, MTX_DEF);
2310 	tx_ring->br = buf_ring_alloc(AL_BR_SIZE, M_DEVBUF, M_WAITOK,
2311 	    &tx_ring->br_mtx);
2312 
2313 	/* Allocate taskqueues */
2314 	TASK_INIT(&tx_ring->enqueue_task, 0, al_eth_start_xmit, tx_ring);
2315 	tx_ring->enqueue_tq = taskqueue_create_fast("al_tx_enque", M_NOWAIT,
2316 	    taskqueue_thread_enqueue, &tx_ring->enqueue_tq);
2317 	taskqueue_start_threads(&tx_ring->enqueue_tq, 1, PI_NET, "%s txeq",
2318 	    device_get_nameunit(adapter->dev));
2319 	TASK_INIT(&tx_ring->cmpl_task, 0, al_eth_tx_cmpl_work, tx_ring);
2320 	tx_ring->cmpl_tq = taskqueue_create_fast("al_tx_cmpl", M_NOWAIT,
2321 	    taskqueue_thread_enqueue, &tx_ring->cmpl_tq);
2322 	taskqueue_start_threads(&tx_ring->cmpl_tq, 1, PI_REALTIME, "%s txcq",
2323 	    device_get_nameunit(adapter->dev));
2324 
2325 	/* Setup DMA descriptor areas. */
2326 	ret = bus_dma_tag_create(bus_get_dma_tag(dev),
2327 	    1, 0,			/* alignment, bounds */
2328 	    BUS_SPACE_MAXADDR,		/* lowaddr */
2329 	    BUS_SPACE_MAXADDR,		/* highaddr */
2330 	    NULL, NULL,			/* filter, filterarg */
2331 	    AL_TSO_SIZE,		/* maxsize */
2332 	    AL_ETH_PKT_MAX_BUFS,	/* nsegments */
2333 	    PAGE_SIZE,			/* maxsegsize */
2334 	    0,				/* flags */
2335 	    NULL,			/* lockfunc */
2336 	    NULL,			/* lockfuncarg */
2337 	    &tx_ring->dma_buf_tag);
2338 
2339 	if (ret != 0) {
2340 		device_printf(dev,"Unable to allocate dma_buf_tag, ret = %d\n",
2341 		    ret);
2342 		return (ret);
2343 	}
2344 
2345 	for (size = 0; size < tx_ring->sw_count; size++) {
2346 		ret = bus_dmamap_create(tx_ring->dma_buf_tag, 0,
2347 		    &tx_ring->tx_buffer_info[size].dma_map);
2348 		if (ret != 0) {
2349 			device_printf(dev, "Unable to map DMA TX "
2350 			    "buffer memory [iter=%d]\n", size);
2351 			return (ret);
2352 		}
2353 	}
2354 
2355 	/* completion queue not used for tx */
2356 	q_params->cdesc_base = NULL;
2357 	/* size in bytes of the udma completion ring descriptor */
2358 	q_params->cdesc_size = 8;
2359 	tx_ring->next_to_use = 0;
2360 	tx_ring->next_to_clean = 0;
2361 
2362 	return (0);
2363 }
2364 
2365 /*
2366  * al_eth_free_tx_resources - Free Tx Resources per Queue
2367  * @adapter: network interface device structure
2368  * @qid: queue index
2369  *
2370  * Free all transmit software resources
2371  */
2372 static void
al_eth_free_tx_resources(struct al_eth_adapter * adapter,int qid)2373 al_eth_free_tx_resources(struct al_eth_adapter *adapter, int qid)
2374 {
2375 	struct al_eth_ring *tx_ring = &adapter->tx_ring[qid];
2376 	struct al_udma_q_params *q_params = &tx_ring->q_params;
2377 	int size;
2378 
2379 	/* At this point interrupts' handlers must be deactivated */
2380 	while (taskqueue_cancel(tx_ring->cmpl_tq, &tx_ring->cmpl_task, NULL))
2381 		taskqueue_drain(tx_ring->cmpl_tq, &tx_ring->cmpl_task);
2382 
2383 	taskqueue_free(tx_ring->cmpl_tq);
2384 	while (taskqueue_cancel(tx_ring->enqueue_tq,
2385 	    &tx_ring->enqueue_task, NULL)) {
2386 		taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
2387 	}
2388 
2389 	taskqueue_free(tx_ring->enqueue_tq);
2390 
2391 	if (tx_ring->br != NULL) {
2392 		drbr_flush(adapter->netdev, tx_ring->br);
2393 		buf_ring_free(tx_ring->br, M_DEVBUF);
2394 	}
2395 
2396 	for (size = 0; size < tx_ring->sw_count; size++) {
2397 		m_freem(tx_ring->tx_buffer_info[size].m);
2398 		tx_ring->tx_buffer_info[size].m = NULL;
2399 
2400 		bus_dmamap_unload(tx_ring->dma_buf_tag,
2401 		    tx_ring->tx_buffer_info[size].dma_map);
2402 		bus_dmamap_destroy(tx_ring->dma_buf_tag,
2403 		    tx_ring->tx_buffer_info[size].dma_map);
2404 	}
2405 	bus_dma_tag_destroy(tx_ring->dma_buf_tag);
2406 
2407 	free(tx_ring->tx_buffer_info, M_IFAL);
2408 	tx_ring->tx_buffer_info = NULL;
2409 
2410 	mtx_destroy(&tx_ring->br_mtx);
2411 
2412 	/* if not set, then don't free */
2413 	if (q_params->desc_base == NULL)
2414 		return;
2415 
2416 	al_dma_free_coherent(q_params->desc_phy_base_tag,
2417 	    q_params->desc_phy_base_map, q_params->desc_base);
2418 
2419 	q_params->desc_base = NULL;
2420 }
2421 
2422 /*
2423  * al_eth_free_all_tx_resources - Free Tx Resources for All Queues
2424  * @adapter: board private structure
2425  *
2426  * Free all transmit software resources
2427  */
2428 static void
al_eth_free_all_tx_resources(struct al_eth_adapter * adapter)2429 al_eth_free_all_tx_resources(struct al_eth_adapter *adapter)
2430 {
2431 	int i;
2432 
2433 	for (i = 0; i < adapter->num_tx_queues; i++)
2434 		if (adapter->tx_ring[i].q_params.desc_base)
2435 			al_eth_free_tx_resources(adapter, i);
2436 }
2437 
2438 /*
2439  * al_eth_setup_rx_resources - allocate Rx resources (Descriptors)
2440  * @adapter: network interface device structure
2441  * @qid: queue index
2442  *
2443  * Returns 0 on success, negative on failure
2444  */
2445 static int
al_eth_setup_rx_resources(struct al_eth_adapter * adapter,unsigned int qid)2446 al_eth_setup_rx_resources(struct al_eth_adapter *adapter, unsigned int qid)
2447 {
2448 	struct al_eth_ring *rx_ring = &adapter->rx_ring[qid];
2449 	device_t dev = rx_ring->dev;
2450 	struct al_udma_q_params *q_params = &rx_ring->q_params;
2451 	int size;
2452 	int ret;
2453 
2454 	size = sizeof(struct al_eth_rx_buffer) * rx_ring->sw_count;
2455 
2456 	/* alloc extra element so in rx path we can always prefetch rx_info + 1 */
2457 	size += 1;
2458 
2459 	rx_ring->rx_buffer_info = malloc(size, M_IFAL, M_ZERO | M_WAITOK);
2460 	rx_ring->descs_size = rx_ring->hw_count * sizeof(union al_udma_desc);
2461 	q_params->size = rx_ring->hw_count;
2462 
2463 	ret = al_dma_alloc_coherent(dev, &q_params->desc_phy_base_tag,
2464 	    &q_params->desc_phy_base_map,
2465 	    (bus_addr_t *)&q_params->desc_phy_base,
2466 	    (void**)&q_params->desc_base, rx_ring->descs_size);
2467 
2468 	if ((q_params->desc_base == NULL) || (ret != 0))
2469 		return (ENOMEM);
2470 
2471 	/* size in bytes of the udma completion ring descriptor */
2472 	q_params->cdesc_size = 16;
2473 	rx_ring->cdescs_size = rx_ring->hw_count * q_params->cdesc_size;
2474 	ret = al_dma_alloc_coherent(dev, &q_params->cdesc_phy_base_tag,
2475 	    &q_params->cdesc_phy_base_map,
2476 	    (bus_addr_t *)&q_params->cdesc_phy_base,
2477 	    (void**)&q_params->cdesc_base, rx_ring->cdescs_size);
2478 
2479 	if ((q_params->cdesc_base == NULL) || (ret != 0))
2480 		return (ENOMEM);
2481 
2482 	/* Allocate taskqueues */
2483 	NET_TASK_INIT(&rx_ring->enqueue_task, 0, al_eth_rx_recv_work, rx_ring);
2484 	rx_ring->enqueue_tq = taskqueue_create_fast("al_rx_enque", M_NOWAIT,
2485 	    taskqueue_thread_enqueue, &rx_ring->enqueue_tq);
2486 	taskqueue_start_threads(&rx_ring->enqueue_tq, 1, PI_NET, "%s rxeq",
2487 	    device_get_nameunit(adapter->dev));
2488 
2489 	/* Setup DMA descriptor areas. */
2490 	ret = bus_dma_tag_create(bus_get_dma_tag(dev),
2491 	    1, 0,			/* alignment, bounds */
2492 	    BUS_SPACE_MAXADDR,		/* lowaddr */
2493 	    BUS_SPACE_MAXADDR,		/* highaddr */
2494 	    NULL, NULL,			/* filter, filterarg */
2495 	    AL_TSO_SIZE,		/* maxsize */
2496 	    1,				/* nsegments */
2497 	    AL_TSO_SIZE,		/* maxsegsize */
2498 	    0,				/* flags */
2499 	    NULL,			/* lockfunc */
2500 	    NULL,			/* lockfuncarg */
2501 	    &rx_ring->dma_buf_tag);
2502 
2503 	if (ret != 0) {
2504 		device_printf(dev,"Unable to allocate RX dma_buf_tag\n");
2505 		return (ret);
2506 	}
2507 
2508 	for (size = 0; size < rx_ring->sw_count; size++) {
2509 		ret = bus_dmamap_create(rx_ring->dma_buf_tag, 0,
2510 		    &rx_ring->rx_buffer_info[size].dma_map);
2511 		if (ret != 0) {
2512 			device_printf(dev,"Unable to map DMA RX buffer memory\n");
2513 			return (ret);
2514 		}
2515 	}
2516 
2517 	/* Zero out the descriptor ring */
2518 	memset(q_params->cdesc_base, 0, rx_ring->cdescs_size);
2519 
2520 	/* Create LRO for the ring */
2521 	if ((if_getcapenable(adapter->netdev) & IFCAP_LRO) != 0) {
2522 		int err = tcp_lro_init(&rx_ring->lro);
2523 		if (err != 0) {
2524 			device_printf(adapter->dev,
2525 			    "LRO[%d] Initialization failed!\n", qid);
2526 		} else {
2527 			device_printf_dbg(adapter->dev,
2528 			    "RX Soft LRO[%d] Initialized\n", qid);
2529 			rx_ring->lro_enabled = true;
2530 			rx_ring->lro.ifp = adapter->netdev;
2531 		}
2532 	}
2533 
2534 	rx_ring->next_to_clean = 0;
2535 	rx_ring->next_to_use = 0;
2536 
2537 	return (0);
2538 }
2539 
2540 /*
2541  * al_eth_free_rx_resources - Free Rx Resources
2542  * @adapter: network interface device structure
2543  * @qid: queue index
2544  *
2545  * Free all receive software resources
2546  */
2547 static void
al_eth_free_rx_resources(struct al_eth_adapter * adapter,unsigned int qid)2548 al_eth_free_rx_resources(struct al_eth_adapter *adapter, unsigned int qid)
2549 {
2550 	struct al_eth_ring *rx_ring = &adapter->rx_ring[qid];
2551 	struct al_udma_q_params *q_params = &rx_ring->q_params;
2552 	int size;
2553 
2554 	/* At this point interrupts' handlers must be deactivated */
2555 	while (taskqueue_cancel(rx_ring->enqueue_tq,
2556 	    &rx_ring->enqueue_task, NULL)) {
2557 		taskqueue_drain(rx_ring->enqueue_tq, &rx_ring->enqueue_task);
2558 	}
2559 
2560 	taskqueue_free(rx_ring->enqueue_tq);
2561 
2562 	for (size = 0; size < rx_ring->sw_count; size++) {
2563 		m_freem(rx_ring->rx_buffer_info[size].m);
2564 		rx_ring->rx_buffer_info[size].m = NULL;
2565 		bus_dmamap_unload(rx_ring->dma_buf_tag,
2566 		    rx_ring->rx_buffer_info[size].dma_map);
2567 		bus_dmamap_destroy(rx_ring->dma_buf_tag,
2568 		    rx_ring->rx_buffer_info[size].dma_map);
2569 	}
2570 	bus_dma_tag_destroy(rx_ring->dma_buf_tag);
2571 
2572 	free(rx_ring->rx_buffer_info, M_IFAL);
2573 	rx_ring->rx_buffer_info = NULL;
2574 
2575 	/* if not set, then don't free */
2576 	if (q_params->desc_base == NULL)
2577 		return;
2578 
2579 	al_dma_free_coherent(q_params->desc_phy_base_tag,
2580 	    q_params->desc_phy_base_map, q_params->desc_base);
2581 
2582 	q_params->desc_base = NULL;
2583 
2584 	/* if not set, then don't free */
2585 	if (q_params->cdesc_base == NULL)
2586 		return;
2587 
2588 	al_dma_free_coherent(q_params->cdesc_phy_base_tag,
2589 	    q_params->cdesc_phy_base_map, q_params->cdesc_base);
2590 
2591 	q_params->cdesc_phy_base = 0;
2592 
2593 	/* Free LRO resources */
2594 	tcp_lro_free(&rx_ring->lro);
2595 }
2596 
2597 /*
2598  * al_eth_free_all_rx_resources - Free Rx Resources for All Queues
2599  * @adapter: board private structure
2600  *
2601  * Free all receive software resources
2602  */
2603 static void
al_eth_free_all_rx_resources(struct al_eth_adapter * adapter)2604 al_eth_free_all_rx_resources(struct al_eth_adapter *adapter)
2605 {
2606 	int i;
2607 
2608 	for (i = 0; i < adapter->num_rx_queues; i++)
2609 		if (adapter->rx_ring[i].q_params.desc_base != 0)
2610 			al_eth_free_rx_resources(adapter, i);
2611 }
2612 
2613 /*
2614  * al_eth_setup_all_rx_resources - allocate all queues Rx resources
2615  * @adapter: board private structure
2616  *
2617  * Return 0 on success, negative on failure
2618  */
2619 static int
al_eth_setup_all_rx_resources(struct al_eth_adapter * adapter)2620 al_eth_setup_all_rx_resources(struct al_eth_adapter *adapter)
2621 {
2622 	int i, rc = 0;
2623 
2624 	for (i = 0; i < adapter->num_rx_queues; i++) {
2625 		rc = al_eth_setup_rx_resources(adapter, i);
2626 		if (rc == 0)
2627 			continue;
2628 
2629 		device_printf(adapter->dev, "Allocation for Rx Queue %u failed\n", i);
2630 		goto err_setup_rx;
2631 	}
2632 	return (0);
2633 
2634 err_setup_rx:
2635 	/* rewind the index freeing the rings as we go */
2636 	while (i--)
2637 		al_eth_free_rx_resources(adapter, i);
2638 	return (rc);
2639 }
2640 
2641 /*
2642  * al_eth_setup_all_tx_resources - allocate all queues Tx resources
2643  * @adapter: private structure
2644  *
2645  * Return 0 on success, negative on failure
2646  */
2647 static int
al_eth_setup_all_tx_resources(struct al_eth_adapter * adapter)2648 al_eth_setup_all_tx_resources(struct al_eth_adapter *adapter)
2649 {
2650 	int i, rc = 0;
2651 
2652 	for (i = 0; i < adapter->num_tx_queues; i++) {
2653 		rc = al_eth_setup_tx_resources(adapter, i);
2654 		if (rc == 0)
2655 			continue;
2656 
2657 		device_printf(adapter->dev,
2658 		    "Allocation for Tx Queue %u failed\n", i);
2659 		goto err_setup_tx;
2660 	}
2661 
2662 	return (0);
2663 
2664 err_setup_tx:
2665 	/* rewind the index freeing the rings as we go */
2666 	while (i--)
2667 		al_eth_free_tx_resources(adapter, i);
2668 
2669 	return (rc);
2670 }
2671 
2672 static void
al_eth_disable_int_sync(struct al_eth_adapter * adapter)2673 al_eth_disable_int_sync(struct al_eth_adapter *adapter)
2674 {
2675 
2676 	/* disable forwarding interrupts from eth through pci end point */
2677 	if ((adapter->board_type == ALPINE_FPGA_NIC) ||
2678 	    (adapter->board_type == ALPINE_NIC)) {
2679 		al_eth_forward_int_config((uint32_t*)adapter->internal_pcie_base +
2680 		    AL_REG_OFFSET_FORWARD_INTR, AL_DIS_FORWARD_INTR);
2681 	}
2682 
2683 	/* mask hw interrupts */
2684 	al_eth_interrupts_mask(adapter);
2685 }
2686 
2687 static void
al_eth_interrupts_unmask(struct al_eth_adapter * adapter)2688 al_eth_interrupts_unmask(struct al_eth_adapter *adapter)
2689 {
2690 	uint32_t group_a_mask = AL_INT_GROUP_A_GROUP_D_SUM; /* enable group D summery */
2691 	uint32_t group_b_mask = (1 << adapter->num_rx_queues) - 1;/* bit per Rx q*/
2692 	uint32_t group_c_mask = (1 << adapter->num_tx_queues) - 1;/* bit per Tx q*/
2693 	uint32_t group_d_mask = 3 << 8;
2694 	struct unit_regs __iomem *regs_base =
2695 	    (struct unit_regs __iomem *)adapter->udma_base;
2696 
2697 	if (adapter->int_mode == AL_IOFIC_MODE_LEGACY)
2698 		group_a_mask |= AL_INT_GROUP_A_GROUP_B_SUM |
2699 		    AL_INT_GROUP_A_GROUP_C_SUM |
2700 		    AL_INT_GROUP_A_GROUP_D_SUM;
2701 
2702 	al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2703 	    AL_INT_GROUP_A, group_a_mask);
2704 	al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2705 	    AL_INT_GROUP_B, group_b_mask);
2706 	al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2707 	    AL_INT_GROUP_C, group_c_mask);
2708 	al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2709 	    AL_INT_GROUP_D, group_d_mask);
2710 }
2711 
2712 static void
al_eth_interrupts_mask(struct al_eth_adapter * adapter)2713 al_eth_interrupts_mask(struct al_eth_adapter *adapter)
2714 {
2715 	struct unit_regs __iomem *regs_base =
2716 	    (struct unit_regs __iomem *)adapter->udma_base;
2717 
2718 	/* mask all interrupts */
2719 	al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2720 	    AL_INT_GROUP_A, AL_MASK_GROUP_A_INT);
2721 	al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2722 	    AL_INT_GROUP_B, AL_MASK_GROUP_B_INT);
2723 	al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2724 	    AL_INT_GROUP_C, AL_MASK_GROUP_C_INT);
2725 	al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2726 	    AL_INT_GROUP_D, AL_MASK_GROUP_D_INT);
2727 }
2728 
2729 static int
al_eth_configure_int_mode(struct al_eth_adapter * adapter)2730 al_eth_configure_int_mode(struct al_eth_adapter *adapter)
2731 {
2732 	enum al_iofic_mode int_mode;
2733 	uint32_t m2s_errors_disable = AL_M2S_MASK_INIT;
2734 	uint32_t m2s_aborts_disable = AL_M2S_MASK_INIT;
2735 	uint32_t s2m_errors_disable = AL_S2M_MASK_INIT;
2736 	uint32_t s2m_aborts_disable = AL_S2M_MASK_INIT;
2737 
2738 	/* single INTX mode */
2739 	if (adapter->msix_vecs == 0)
2740 		int_mode = AL_IOFIC_MODE_LEGACY;
2741 	else if (adapter->msix_vecs > 1)
2742 		int_mode = AL_IOFIC_MODE_MSIX_PER_Q;
2743 	else {
2744 		device_printf(adapter->dev,
2745 		    "udma doesn't support single MSI-X mode yet.\n");
2746 		return (EIO);
2747 	}
2748 
2749 	if (adapter->board_type != ALPINE_INTEGRATED) {
2750 		m2s_errors_disable |= AL_M2S_S2M_MASK_NOT_INT;
2751 		m2s_errors_disable |= AL_M2S_S2M_MASK_NOT_INT;
2752 		s2m_aborts_disable |= AL_M2S_S2M_MASK_NOT_INT;
2753 		s2m_aborts_disable |= AL_M2S_S2M_MASK_NOT_INT;
2754 	}
2755 
2756 	if (al_udma_iofic_config((struct unit_regs __iomem *)adapter->udma_base,
2757 	    int_mode, m2s_errors_disable, m2s_aborts_disable,
2758 	    s2m_errors_disable, s2m_aborts_disable)) {
2759 		device_printf(adapter->dev,
2760 		    "al_udma_unit_int_config failed!.\n");
2761 		return (EIO);
2762 	}
2763 	adapter->int_mode = int_mode;
2764 	device_printf_dbg(adapter->dev, "using %s interrupt mode\n",
2765 	    int_mode == AL_IOFIC_MODE_LEGACY ? "INTx" :
2766 	    int_mode == AL_IOFIC_MODE_MSIX_PER_Q ? "MSI-X per Queue" : "Unknown");
2767 	/* set interrupt moderation resolution to 15us */
2768 	al_iofic_moder_res_config(&((struct unit_regs *)(adapter->udma_base))->gen.interrupt_regs.main_iofic, AL_INT_GROUP_B, 15);
2769 	al_iofic_moder_res_config(&((struct unit_regs *)(adapter->udma_base))->gen.interrupt_regs.main_iofic, AL_INT_GROUP_C, 15);
2770 	/* by default interrupt coalescing is disabled */
2771 	adapter->tx_usecs = 0;
2772 	adapter->rx_usecs = 0;
2773 
2774 	return (0);
2775 }
2776 
2777 /*
2778  * ethtool_rxfh_indir_default - get default value for RX flow hash indirection
2779  * @index: Index in RX flow hash indirection table
2780  * @n_rx_rings: Number of RX rings to use
2781  *
2782  * This function provides the default policy for RX flow hash indirection.
2783  */
2784 static inline uint32_t
ethtool_rxfh_indir_default(uint32_t index,uint32_t n_rx_rings)2785 ethtool_rxfh_indir_default(uint32_t index, uint32_t n_rx_rings)
2786 {
2787 
2788 	return (index % n_rx_rings);
2789 }
2790 
2791 static void*
al_eth_update_stats(struct al_eth_adapter * adapter)2792 al_eth_update_stats(struct al_eth_adapter *adapter)
2793 {
2794 	struct al_eth_mac_stats *mac_stats = &adapter->mac_stats;
2795 
2796 	if (adapter->up == 0)
2797 		return (NULL);
2798 
2799 	al_eth_mac_stats_get(&adapter->hal_adapter, mac_stats);
2800 
2801 	return (NULL);
2802 }
2803 
2804 static uint64_t
al_get_counter(if_t ifp,ift_counter cnt)2805 al_get_counter(if_t ifp, ift_counter cnt)
2806 {
2807 	struct al_eth_adapter *adapter;
2808 	struct al_eth_mac_stats *mac_stats;
2809 	uint64_t rv;
2810 
2811 	adapter = if_getsoftc(ifp);
2812 	mac_stats = &adapter->mac_stats;
2813 
2814 	switch (cnt) {
2815 	case IFCOUNTER_IPACKETS:
2816 		return (mac_stats->aFramesReceivedOK); /* including pause frames */
2817 	case IFCOUNTER_OPACKETS:
2818 		return (mac_stats->aFramesTransmittedOK);
2819 	case IFCOUNTER_IBYTES:
2820 		return (mac_stats->aOctetsReceivedOK);
2821 	case IFCOUNTER_OBYTES:
2822 		return (mac_stats->aOctetsTransmittedOK);
2823 	case IFCOUNTER_IMCASTS:
2824 		return (mac_stats->ifInMulticastPkts);
2825 	case IFCOUNTER_OMCASTS:
2826 		return (mac_stats->ifOutMulticastPkts);
2827 	case IFCOUNTER_COLLISIONS:
2828 		return (0);
2829 	case IFCOUNTER_IQDROPS:
2830 		return (mac_stats->etherStatsDropEvents);
2831 	case IFCOUNTER_IERRORS:
2832 		rv = mac_stats->ifInErrors +
2833 		    mac_stats->etherStatsUndersizePkts + /* good but short */
2834 		    mac_stats->etherStatsFragments + /* short and bad*/
2835 		    mac_stats->etherStatsJabbers + /* with crc errors */
2836 		    mac_stats->etherStatsOversizePkts +
2837 		    mac_stats->aFrameCheckSequenceErrors +
2838 		    mac_stats->aAlignmentErrors;
2839 		return (rv);
2840 	case IFCOUNTER_OERRORS:
2841 		return (mac_stats->ifOutErrors);
2842 	default:
2843 		return (if_get_counter_default(ifp, cnt));
2844 	}
2845 }
2846 
2847 static u_int
al_count_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)2848 al_count_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2849 {
2850 	unsigned char *mac;
2851 
2852 	mac = LLADDR(sdl);
2853 	/* default mc address inside mac address */
2854 	if (mac[3] != 0 && mac[4] != 0 && mac[5] != 1)
2855 		return (1);
2856 	else
2857 		return (0);
2858 }
2859 
2860 static u_int
al_program_addr(void * arg,struct sockaddr_dl * sdl,u_int cnt)2861 al_program_addr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2862 {
2863 	struct al_eth_adapter *adapter = arg;
2864 
2865 	al_eth_mac_table_unicast_add(adapter,
2866 	    AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + 1 + cnt, 1);
2867 
2868 	return (1);
2869 }
2870 
2871 /*
2872  *  Unicast, Multicast and Promiscuous mode set
2873  *
2874  *  The set_rx_mode entry point is called whenever the unicast or multicast
2875  *  address lists or the network interface flags are updated.  This routine is
2876  *  responsible for configuring the hardware for proper unicast, multicast,
2877  *  promiscuous mode, and all-multi behavior.
2878  */
2879 static void
al_eth_set_rx_mode(struct al_eth_adapter * adapter)2880 al_eth_set_rx_mode(struct al_eth_adapter *adapter)
2881 {
2882 	if_t ifp = adapter->netdev;
2883 	int mc, uc;
2884 	uint8_t i;
2885 
2886 	/* XXXGL: why generic count won't work? */
2887 	mc = if_foreach_llmaddr(ifp, al_count_maddr, NULL);
2888 	uc = if_lladdr_count(ifp);
2889 
2890 	if ((if_getflags(ifp) & IFF_PROMISC) != 0) {
2891 		al_eth_mac_table_promiscuous_set(adapter, true);
2892 	} else {
2893 		if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) {
2894 			/* This interface is in all-multicasts mode (used by multicast routers). */
2895 			al_eth_mac_table_all_multicast_add(adapter,
2896 			    AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX, 1);
2897 		} else {
2898 			if (mc == 0) {
2899 				al_eth_mac_table_entry_clear(adapter,
2900 				    AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX);
2901 			} else {
2902 				al_eth_mac_table_all_multicast_add(adapter,
2903 				    AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX, 1);
2904 			}
2905 		}
2906 		if (uc != 0) {
2907 			i = AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + 1;
2908 			if (uc > AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT) {
2909 				/*
2910 				 * In this case there are more addresses then
2911 				 * entries in the mac table - set promiscuous
2912 				 */
2913 				al_eth_mac_table_promiscuous_set(adapter, true);
2914 				return;
2915 			}
2916 
2917 			/* clear the last configuration */
2918 			while (i < (AL_ETH_MAC_TABLE_UNICAST_IDX_BASE +
2919 				    AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT)) {
2920 				al_eth_mac_table_entry_clear(adapter, i);
2921 				i++;
2922 			}
2923 
2924 			/* set new addresses */
2925 			if_foreach_lladdr(ifp, al_program_addr, adapter);
2926 		}
2927 		al_eth_mac_table_promiscuous_set(adapter, false);
2928 	}
2929 }
2930 
2931 static void
al_eth_config_rx_fwd(struct al_eth_adapter * adapter)2932 al_eth_config_rx_fwd(struct al_eth_adapter *adapter)
2933 {
2934 	struct al_eth_fwd_ctrl_table_entry entry;
2935 	int i;
2936 
2937 	/* let priority be equal to pbits */
2938 	for (i = 0; i < AL_ETH_FWD_PBITS_TABLE_NUM; i++)
2939 		al_eth_fwd_pbits_table_set(&adapter->hal_adapter, i, i);
2940 
2941 	/* map priority to queue index, queue id = priority/2 */
2942 	for (i = 0; i < AL_ETH_FWD_PRIO_TABLE_NUM; i++)
2943 		al_eth_fwd_priority_table_set(&adapter->hal_adapter, i, i >> 1);
2944 
2945 	entry.prio_sel = AL_ETH_CTRL_TABLE_PRIO_SEL_VAL_0;
2946 	entry.queue_sel_1 = AL_ETH_CTRL_TABLE_QUEUE_SEL_1_THASH_TABLE;
2947 	entry.queue_sel_2 = AL_ETH_CTRL_TABLE_QUEUE_SEL_2_NO_PRIO;
2948 	entry.udma_sel = AL_ETH_CTRL_TABLE_UDMA_SEL_MAC_TABLE;
2949 	entry.filter = false;
2950 
2951 	al_eth_ctrl_table_def_set(&adapter->hal_adapter, AL_FALSE, &entry);
2952 
2953 	/*
2954 	 * By default set the mac table to forward all unicast packets to our
2955 	 * MAC address and all broadcast. all the rest will be dropped.
2956 	 */
2957 	al_eth_mac_table_unicast_add(adapter, AL_ETH_MAC_TABLE_UNICAST_IDX_BASE,
2958 	    1);
2959 	al_eth_mac_table_broadcast_add(adapter, AL_ETH_MAC_TABLE_BROADCAST_IDX, 1);
2960 	al_eth_mac_table_promiscuous_set(adapter, false);
2961 
2962 	/* set toeplitz hash keys */
2963 	for (i = 0; i < sizeof(adapter->toeplitz_hash_key); i++)
2964 		*((uint8_t*)adapter->toeplitz_hash_key + i) = (uint8_t)random();
2965 
2966 	for (i = 0; i < AL_ETH_RX_HASH_KEY_NUM; i++)
2967 		al_eth_hash_key_set(&adapter->hal_adapter, i,
2968 		    htonl(adapter->toeplitz_hash_key[i]));
2969 
2970 	for (i = 0; i < AL_ETH_RX_RSS_TABLE_SIZE; i++) {
2971 		adapter->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i,
2972 		    AL_ETH_NUM_QUEUES);
2973 		al_eth_set_thash_table_entry(adapter, i, 0,
2974 		    adapter->rss_ind_tbl[i]);
2975 	}
2976 
2977 	al_eth_fsm_table_init(adapter);
2978 }
2979 
2980 static void
al_eth_req_rx_buff_size(struct al_eth_adapter * adapter,int size)2981 al_eth_req_rx_buff_size(struct al_eth_adapter *adapter, int size)
2982 {
2983 
2984 	/*
2985 	* Determine the correct mbuf pool
2986 	* for doing jumbo frames
2987 	* Try from the smallest up to maximum supported
2988 	*/
2989 	adapter->rx_mbuf_sz = MCLBYTES;
2990 	if (size > 2048) {
2991 		if (adapter->max_rx_buff_alloc_size > 2048)
2992 			adapter->rx_mbuf_sz = MJUMPAGESIZE;
2993 		else
2994 			return;
2995 	}
2996 	if (size > 4096) {
2997 		if (adapter->max_rx_buff_alloc_size > 4096)
2998 			adapter->rx_mbuf_sz = MJUM9BYTES;
2999 		else
3000 			return;
3001 	}
3002 	if (size > 9216) {
3003 		if (adapter->max_rx_buff_alloc_size > 9216)
3004 			adapter->rx_mbuf_sz = MJUM16BYTES;
3005 		else
3006 			return;
3007 	}
3008 }
3009 
3010 static int
al_eth_change_mtu(struct al_eth_adapter * adapter,int new_mtu)3011 al_eth_change_mtu(struct al_eth_adapter *adapter, int new_mtu)
3012 {
3013 	int max_frame = new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
3014 	    ETHER_VLAN_ENCAP_LEN;
3015 
3016 	al_eth_req_rx_buff_size(adapter, new_mtu);
3017 
3018 	device_printf_dbg(adapter->dev, "set MTU to %d\n", new_mtu);
3019 	al_eth_rx_pkt_limit_config(&adapter->hal_adapter,
3020 	    AL_ETH_MIN_FRAME_LEN, max_frame);
3021 
3022 	al_eth_tso_mss_config(&adapter->hal_adapter, 0, new_mtu - 100);
3023 
3024 	return (0);
3025 }
3026 
3027 static int
al_eth_check_mtu(struct al_eth_adapter * adapter,int new_mtu)3028 al_eth_check_mtu(struct al_eth_adapter *adapter, int new_mtu)
3029 {
3030 	int max_frame = new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN;
3031 
3032 	if ((new_mtu < AL_ETH_MIN_FRAME_LEN) ||
3033 	    (max_frame > AL_ETH_MAX_FRAME_LEN)) {
3034 		return (EINVAL);
3035 	}
3036 
3037 	return (0);
3038 }
3039 
3040 static int
al_eth_udma_queue_enable(struct al_eth_adapter * adapter,enum al_udma_type type,int qid)3041 al_eth_udma_queue_enable(struct al_eth_adapter *adapter, enum al_udma_type type,
3042     int qid)
3043 {
3044 	int rc = 0;
3045 	char *name = (type == UDMA_TX) ? "Tx" : "Rx";
3046 	struct al_udma_q_params *q_params;
3047 
3048 	if (type == UDMA_TX)
3049 		q_params = &adapter->tx_ring[qid].q_params;
3050 	else
3051 		q_params = &adapter->rx_ring[qid].q_params;
3052 
3053 	rc = al_eth_queue_config(&adapter->hal_adapter, type, qid, q_params);
3054 	if (rc < 0) {
3055 		device_printf(adapter->dev, "config %s queue %u failed\n", name,
3056 		    qid);
3057 		return (rc);
3058 	}
3059 	return (rc);
3060 }
3061 
3062 static int
al_eth_udma_queues_enable_all(struct al_eth_adapter * adapter)3063 al_eth_udma_queues_enable_all(struct al_eth_adapter *adapter)
3064 {
3065 	int i;
3066 
3067 	for (i = 0; i < adapter->num_tx_queues; i++)
3068 		al_eth_udma_queue_enable(adapter, UDMA_TX, i);
3069 
3070 	for (i = 0; i < adapter->num_rx_queues; i++)
3071 		al_eth_udma_queue_enable(adapter, UDMA_RX, i);
3072 
3073 	return (0);
3074 }
3075 
3076 static void
al_eth_up_complete(struct al_eth_adapter * adapter)3077 al_eth_up_complete(struct al_eth_adapter *adapter)
3078 {
3079 
3080 	al_eth_configure_int_mode(adapter);
3081 	al_eth_config_rx_fwd(adapter);
3082 	al_eth_change_mtu(adapter, if_getmtu(adapter->netdev));
3083 	al_eth_udma_queues_enable_all(adapter);
3084 	al_eth_refill_all_rx_bufs(adapter);
3085 	al_eth_interrupts_unmask(adapter);
3086 
3087 	/* enable forwarding interrupts from eth through pci end point */
3088 	if ((adapter->board_type == ALPINE_FPGA_NIC) ||
3089 	    (adapter->board_type == ALPINE_NIC)) {
3090 		al_eth_forward_int_config((uint32_t*)adapter->internal_pcie_base +
3091 		    AL_REG_OFFSET_FORWARD_INTR, AL_EN_FORWARD_INTR);
3092 	}
3093 
3094 	al_eth_flow_ctrl_enable(adapter);
3095 
3096 	mtx_lock(&adapter->stats_mtx);
3097 	callout_reset(&adapter->stats_callout, hz, al_tick_stats, (void*)adapter);
3098 	mtx_unlock(&adapter->stats_mtx);
3099 
3100 	al_eth_mac_start(&adapter->hal_adapter);
3101 }
3102 
3103 static int
al_media_update(if_t ifp)3104 al_media_update(if_t ifp)
3105 {
3106 	struct al_eth_adapter *adapter = if_getsoftc(ifp);
3107 
3108 	if ((if_getflags(ifp) & IFF_UP) != 0)
3109 		mii_mediachg(adapter->mii);
3110 
3111 	return (0);
3112 }
3113 
3114 static void
al_media_status(if_t ifp,struct ifmediareq * ifmr)3115 al_media_status(if_t ifp, struct ifmediareq *ifmr)
3116 {
3117 	struct al_eth_adapter *sc = if_getsoftc(ifp);
3118 	struct mii_data *mii;
3119 
3120 	if (sc->mii == NULL) {
3121 		ifmr->ifm_active = IFM_ETHER | IFM_NONE;
3122 		ifmr->ifm_status = 0;
3123 
3124 		return;
3125 	}
3126 
3127 	mii = sc->mii;
3128 	mii_pollstat(mii);
3129 
3130 	ifmr->ifm_active = mii->mii_media_active;
3131 	ifmr->ifm_status = mii->mii_media_status;
3132 }
3133 
3134 static void
al_tick(void * arg)3135 al_tick(void *arg)
3136 {
3137 	struct al_eth_adapter *adapter = arg;
3138 
3139 	mii_tick(adapter->mii);
3140 
3141 	/* Schedule another timeout one second from now */
3142 	callout_schedule(&adapter->wd_callout, hz);
3143 }
3144 
3145 static void
al_tick_stats(void * arg)3146 al_tick_stats(void *arg)
3147 {
3148 	struct al_eth_adapter *adapter = arg;
3149 
3150 	al_eth_update_stats(adapter);
3151 
3152 	callout_schedule(&adapter->stats_callout, hz);
3153 }
3154 
3155 static int
al_eth_up(struct al_eth_adapter * adapter)3156 al_eth_up(struct al_eth_adapter *adapter)
3157 {
3158 	if_t ifp = adapter->netdev;
3159 	int rc;
3160 
3161 	if (adapter->up)
3162 		return (0);
3163 
3164 	if ((adapter->flags & AL_ETH_FLAG_RESET_REQUESTED) != 0) {
3165 		al_eth_function_reset(adapter);
3166 		adapter->flags &= ~AL_ETH_FLAG_RESET_REQUESTED;
3167 	}
3168 
3169 	if_sethwassist(ifp, 0);
3170 	if ((if_getcapenable(ifp) & IFCAP_TSO) != 0)
3171 		if_sethwassistbits(ifp, CSUM_TSO, 0);
3172 	if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
3173 		if_sethwassistbits(ifp, (CSUM_TCP | CSUM_UDP), 0);
3174 	if ((if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6) != 0)
3175 		if_sethwassistbits(ifp, (CSUM_TCP_IPV6 | CSUM_UDP_IPV6), 0);
3176 
3177 	al_eth_serdes_init(adapter);
3178 
3179 	rc = al_eth_hw_init(adapter);
3180 	if (rc != 0)
3181 		goto err_hw_init_open;
3182 
3183 	rc = al_eth_setup_int_mode(adapter);
3184 	if (rc != 0) {
3185 		device_printf(adapter->dev,
3186 		    "%s failed at setup interrupt mode!\n", __func__);
3187 		goto err_setup_int;
3188 	}
3189 
3190 	/* allocate transmit descriptors */
3191 	rc = al_eth_setup_all_tx_resources(adapter);
3192 	if (rc != 0)
3193 		goto err_setup_tx;
3194 
3195 	/* allocate receive descriptors */
3196 	rc = al_eth_setup_all_rx_resources(adapter);
3197 	if (rc != 0)
3198 		goto err_setup_rx;
3199 
3200 	rc = al_eth_request_irq(adapter);
3201 	if (rc != 0)
3202 		goto err_req_irq;
3203 
3204 	al_eth_up_complete(adapter);
3205 
3206 	adapter->up = true;
3207 
3208 	if (adapter->mac_mode == AL_ETH_MAC_MODE_10GbE_Serial)
3209 		if_link_state_change(adapter->netdev, LINK_STATE_UP);
3210 
3211 	if (adapter->mac_mode == AL_ETH_MAC_MODE_RGMII) {
3212 		mii_mediachg(adapter->mii);
3213 
3214 		/* Schedule watchdog timeout */
3215 		mtx_lock(&adapter->wd_mtx);
3216 		callout_reset(&adapter->wd_callout, hz, al_tick, adapter);
3217 		mtx_unlock(&adapter->wd_mtx);
3218 
3219 		mii_pollstat(adapter->mii);
3220 	}
3221 
3222 	return (rc);
3223 
3224 err_req_irq:
3225 	al_eth_free_all_rx_resources(adapter);
3226 err_setup_rx:
3227 	al_eth_free_all_tx_resources(adapter);
3228 err_setup_tx:
3229 	al_eth_free_irq(adapter);
3230 err_setup_int:
3231 	al_eth_hw_stop(adapter);
3232 err_hw_init_open:
3233 	al_eth_function_reset(adapter);
3234 
3235 	return (rc);
3236 }
3237 
3238 static int
al_shutdown(device_t dev)3239 al_shutdown(device_t dev)
3240 {
3241 	struct al_eth_adapter *adapter = device_get_softc(dev);
3242 
3243 	al_eth_down(adapter);
3244 
3245 	return (0);
3246 }
3247 
3248 static void
al_eth_down(struct al_eth_adapter * adapter)3249 al_eth_down(struct al_eth_adapter *adapter)
3250 {
3251 
3252 	device_printf_dbg(adapter->dev, "al_eth_down: begin\n");
3253 
3254 	adapter->up = false;
3255 
3256 	mtx_lock(&adapter->wd_mtx);
3257 	callout_stop(&adapter->wd_callout);
3258 	mtx_unlock(&adapter->wd_mtx);
3259 
3260 	al_eth_disable_int_sync(adapter);
3261 
3262 	mtx_lock(&adapter->stats_mtx);
3263 	callout_stop(&adapter->stats_callout);
3264 	mtx_unlock(&adapter->stats_mtx);
3265 
3266 	al_eth_free_irq(adapter);
3267 	al_eth_hw_stop(adapter);
3268 
3269 	al_eth_free_all_tx_resources(adapter);
3270 	al_eth_free_all_rx_resources(adapter);
3271 }
3272 
3273 static int
al_ioctl(if_t ifp,u_long command,caddr_t data)3274 al_ioctl(if_t ifp, u_long command, caddr_t data)
3275 {
3276 	struct al_eth_adapter	*adapter = if_getsoftc(ifp);
3277 	struct ifreq		*ifr = (struct ifreq *)data;
3278 	int			error = 0;
3279 
3280 	switch (command) {
3281 	case SIOCSIFMTU:
3282 	{
3283 		error = al_eth_check_mtu(adapter, ifr->ifr_mtu);
3284 		if (error != 0) {
3285 			device_printf(adapter->dev, "ioctl wrong mtu %u\n",
3286 			    if_getmtu(adapter->netdev));
3287 			break;
3288 		}
3289 
3290 		if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
3291 		if_setmtu(adapter->netdev, ifr->ifr_mtu);
3292 		al_init(adapter);
3293 		break;
3294 	}
3295 	case SIOCSIFFLAGS:
3296 		if ((if_getflags(ifp) & IFF_UP) != 0) {
3297 			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
3298 				if (((if_getflags(ifp) ^ adapter->if_flags) &
3299 				    (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
3300 					device_printf_dbg(adapter->dev,
3301 					    "ioctl promisc/allmulti\n");
3302 					al_eth_set_rx_mode(adapter);
3303 				}
3304 			} else {
3305 				error = al_eth_up(adapter);
3306 				if (error == 0)
3307 					if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
3308 			}
3309 		} else {
3310 			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
3311 				al_eth_down(adapter);
3312 				if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
3313 			}
3314 		}
3315 
3316 		adapter->if_flags = if_getflags(ifp);
3317 		break;
3318 
3319 	case SIOCADDMULTI:
3320 	case SIOCDELMULTI:
3321 		if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
3322 			device_printf_dbg(adapter->dev,
3323 			    "ioctl add/del multi before\n");
3324 			al_eth_set_rx_mode(adapter);
3325 #ifdef DEVICE_POLLING
3326 			if ((if_getcapenable(ifp) & IFCAP_POLLING) == 0)
3327 #endif
3328 		}
3329 		break;
3330 	case SIOCSIFMEDIA:
3331 	case SIOCGIFMEDIA:
3332 		if (adapter->mii != NULL)
3333 			error = ifmedia_ioctl(ifp, ifr,
3334 			    &adapter->mii->mii_media, command);
3335 		else
3336 			error = ifmedia_ioctl(ifp, ifr,
3337 			    &adapter->media, command);
3338 		break;
3339 	case SIOCSIFCAP:
3340 	    {
3341 		int mask, reinit;
3342 
3343 		reinit = 0;
3344 		mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
3345 #ifdef DEVICE_POLLING
3346 		if ((mask & IFCAP_POLLING) != 0) {
3347 			if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
3348 				if (error != 0)
3349 					return (error);
3350 				if_setcapenablebit(ifp, IFCAP_POLLING, 0);
3351 			} else {
3352 				error = ether_poll_deregister(ifp);
3353 				/* Enable interrupt even in error case */
3354 				if_setcapenablebit(ifp, 0, IFCAP_POLLING);
3355 			}
3356 		}
3357 #endif
3358 		if ((mask & IFCAP_HWCSUM) != 0) {
3359 			/* apply to both rx and tx */
3360 			if_togglecapenable(ifp, IFCAP_HWCSUM);
3361 			reinit = 1;
3362 		}
3363 		if ((mask & IFCAP_HWCSUM_IPV6) != 0) {
3364 			if_togglecapenable(ifp, IFCAP_HWCSUM_IPV6);
3365 			reinit = 1;
3366 		}
3367 		if ((mask & IFCAP_TSO) != 0) {
3368 			if_togglecapenable(ifp, IFCAP_TSO);
3369 			reinit = 1;
3370 		}
3371 		if ((mask & IFCAP_LRO) != 0) {
3372 			if_togglecapenable(ifp, IFCAP_LRO);
3373 		}
3374 		if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
3375 			if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
3376 			reinit = 1;
3377 		}
3378 		if ((mask & IFCAP_VLAN_HWFILTER) != 0) {
3379 			if_togglecapenable(ifp, IFCAP_VLAN_HWFILTER);
3380 			reinit = 1;
3381 		}
3382 		if ((mask & IFCAP_VLAN_HWTSO) != 0) {
3383 			if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
3384 			reinit = 1;
3385 		}
3386 		if ((reinit != 0) &&
3387 		    ((if_getdrvflags(ifp) & IFF_DRV_RUNNING)) != 0)
3388 		{
3389 			al_init(adapter);
3390 		}
3391 		break;
3392 	    }
3393 
3394 	default:
3395 		error = ether_ioctl(ifp, command, data);
3396 		break;
3397 	}
3398 
3399 	return (error);
3400 }
3401 
3402 static int
al_is_device_supported(device_t dev)3403 al_is_device_supported(device_t dev)
3404 {
3405 	uint16_t pci_vendor_id = pci_get_vendor(dev);
3406 	uint16_t pci_device_id = pci_get_device(dev);
3407 
3408 	return (pci_vendor_id == PCI_VENDOR_ID_ANNAPURNA_LABS &&
3409 	    (pci_device_id == PCI_DEVICE_ID_AL_ETH ||
3410 	    pci_device_id == PCI_DEVICE_ID_AL_ETH_ADVANCED ||
3411 	    pci_device_id == PCI_DEVICE_ID_AL_ETH_NIC ||
3412 	    pci_device_id == PCI_DEVICE_ID_AL_ETH_FPGA_NIC));
3413 }
3414 
3415 /* Time in mSec to keep trying to read / write from MDIO in case of error */
3416 #define	MDIO_TIMEOUT_MSEC	100
3417 #define	MDIO_PAUSE_MSEC		10
3418 
3419 static int
al_miibus_readreg(device_t dev,int phy,int reg)3420 al_miibus_readreg(device_t dev, int phy, int reg)
3421 {
3422 	struct al_eth_adapter *adapter = device_get_softc(dev);
3423 	uint16_t value = 0;
3424 	int rc;
3425 	int timeout = MDIO_TIMEOUT_MSEC;
3426 
3427 	while (timeout > 0) {
3428 		rc = al_eth_mdio_read(&adapter->hal_adapter, adapter->phy_addr,
3429 		    -1, reg, &value);
3430 
3431 		if (rc == 0)
3432 			return (value);
3433 
3434 		device_printf_dbg(adapter->dev,
3435 		    "mdio read failed. try again in 10 msec\n");
3436 
3437 		timeout -= MDIO_PAUSE_MSEC;
3438 		pause("readred pause", MDIO_PAUSE_MSEC);
3439 	}
3440 
3441 	if (rc != 0)
3442 		device_printf(adapter->dev, "MDIO read failed on timeout\n");
3443 
3444 	return (value);
3445 }
3446 
3447 static int
al_miibus_writereg(device_t dev,int phy,int reg,int value)3448 al_miibus_writereg(device_t dev, int phy, int reg, int value)
3449 {
3450 	struct al_eth_adapter *adapter = device_get_softc(dev);
3451 	int rc;
3452 	int timeout = MDIO_TIMEOUT_MSEC;
3453 
3454 	while (timeout > 0) {
3455 		rc = al_eth_mdio_write(&adapter->hal_adapter, adapter->phy_addr,
3456 		    -1, reg, value);
3457 
3458 		if (rc == 0)
3459 			return (0);
3460 
3461 		device_printf(adapter->dev,
3462 		    "mdio write failed. try again in 10 msec\n");
3463 
3464 		timeout -= MDIO_PAUSE_MSEC;
3465 		pause("miibus writereg", MDIO_PAUSE_MSEC);
3466 	}
3467 
3468 	if (rc != 0)
3469 		device_printf(adapter->dev, "MDIO write failed on timeout\n");
3470 
3471 	return (rc);
3472 }
3473 
3474 static void
al_miibus_statchg(device_t dev)3475 al_miibus_statchg(device_t dev)
3476 {
3477 	struct al_eth_adapter *adapter = device_get_softc(dev);
3478 
3479 	device_printf_dbg(adapter->dev,
3480 	    "al_miibus_statchg: state has changed!\n");
3481 	device_printf_dbg(adapter->dev,
3482 	    "al_miibus_statchg: active = 0x%x status = 0x%x\n",
3483 	    adapter->mii->mii_media_active, adapter->mii->mii_media_status);
3484 
3485 	if (adapter->up == 0)
3486 		return;
3487 
3488 	if ((adapter->mii->mii_media_status & IFM_AVALID) != 0) {
3489 		if (adapter->mii->mii_media_status & IFM_ACTIVE) {
3490 			device_printf(adapter->dev, "link is UP\n");
3491 			if_link_state_change(adapter->netdev, LINK_STATE_UP);
3492 		} else {
3493 			device_printf(adapter->dev, "link is DOWN\n");
3494 			if_link_state_change(adapter->netdev, LINK_STATE_DOWN);
3495 		}
3496 	}
3497 }
3498 
3499 static void
al_miibus_linkchg(device_t dev)3500 al_miibus_linkchg(device_t dev)
3501 {
3502 	struct al_eth_adapter *adapter = device_get_softc(dev);
3503 	uint8_t duplex = 0;
3504 	uint8_t speed = 0;
3505 
3506 	if (adapter->mii == NULL)
3507 		return;
3508 
3509 	if ((if_getflags(adapter->netdev) & IFF_UP) == 0)
3510 		return;
3511 
3512 	/* Ignore link changes when link is not ready */
3513 	if ((adapter->mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) !=
3514 	    (IFM_AVALID | IFM_ACTIVE)) {
3515 		return;
3516 	}
3517 
3518 	if ((adapter->mii->mii_media_active & IFM_FDX) != 0)
3519 		duplex = 1;
3520 
3521 	speed = IFM_SUBTYPE(adapter->mii->mii_media_active);
3522 
3523 	if (speed == IFM_10_T) {
3524 		al_eth_mac_link_config(&adapter->hal_adapter, 0, 1,
3525 		    AL_10BASE_T_SPEED, duplex);
3526 		return;
3527 	}
3528 
3529 	if (speed == IFM_100_TX) {
3530 		al_eth_mac_link_config(&adapter->hal_adapter, 0, 1,
3531 		    AL_100BASE_TX_SPEED, duplex);
3532 		return;
3533 	}
3534 
3535 	if (speed == IFM_1000_T) {
3536 		al_eth_mac_link_config(&adapter->hal_adapter, 0, 1,
3537 		    AL_1000BASE_T_SPEED, duplex);
3538 		return;
3539 	}
3540 
3541 	device_printf(adapter->dev, "ERROR: unknown MII media active 0x%08x\n",
3542 	    adapter->mii->mii_media_active);
3543 }
3544