xref: /freebsd/sys/dev/qlnx/qlnxe/qlnx_os.c (revision b9e5884ef786a6c0fd203064baa07ef308025707)
1 /*
2  * Copyright (c) 2017-2018 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 /*
29  * File: qlnx_os.c
30  * Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include "qlnx_os.h"
37 #include "bcm_osal.h"
38 #include "reg_addr.h"
39 #include "ecore_gtt_reg_addr.h"
40 #include "ecore.h"
41 #include "ecore_chain.h"
42 #include "ecore_status.h"
43 #include "ecore_hw.h"
44 #include "ecore_rt_defs.h"
45 #include "ecore_init_ops.h"
46 #include "ecore_int.h"
47 #include "ecore_cxt.h"
48 #include "ecore_spq.h"
49 #include "ecore_init_fw_funcs.h"
50 #include "ecore_sp_commands.h"
51 #include "ecore_dev_api.h"
52 #include "ecore_l2_api.h"
53 #include "ecore_mcp.h"
54 #include "ecore_hw_defs.h"
55 #include "mcp_public.h"
56 #include "ecore_iro.h"
57 #include "nvm_cfg.h"
58 #include "ecore_dev_api.h"
59 #include "ecore_dbg_fw_funcs.h"
60 #include "ecore_iov_api.h"
61 #include "ecore_vf_api.h"
62 
63 #include "qlnx_ioctl.h"
64 #include "qlnx_def.h"
65 #include "qlnx_ver.h"
66 
67 #ifdef QLNX_ENABLE_IWARP
68 #include "qlnx_rdma.h"
69 #endif /* #ifdef QLNX_ENABLE_IWARP */
70 
71 #include <sys/smp.h>
72 
73 /*
74  * static functions
75  */
76 /*
77  * ioctl related functions
78  */
79 static void qlnx_add_sysctls(qlnx_host_t *ha);
80 
81 /*
82  * main driver
83  */
84 static void qlnx_release(qlnx_host_t *ha);
85 static void qlnx_fp_isr(void *arg);
86 static void qlnx_init_ifnet(device_t dev, qlnx_host_t *ha);
87 static void qlnx_init(void *arg);
88 static void qlnx_init_locked(qlnx_host_t *ha);
89 static int qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi);
90 static int qlnx_set_promisc(qlnx_host_t *ha);
91 static int qlnx_set_allmulti(qlnx_host_t *ha);
92 static int qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
93 static int qlnx_media_change(struct ifnet *ifp);
94 static void qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
95 static void qlnx_stop(qlnx_host_t *ha);
96 static int qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp,
97 		struct mbuf **m_headp);
98 static int qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha);
99 static uint32_t qlnx_get_optics(qlnx_host_t *ha,
100 			struct qlnx_link_output *if_link);
101 static int qlnx_transmit(struct ifnet *ifp, struct mbuf  *mp);
102 static int qlnx_transmit_locked(struct ifnet *ifp, struct qlnx_fastpath *fp,
103 		struct mbuf *mp);
104 static void qlnx_qflush(struct ifnet *ifp);
105 
106 static int qlnx_alloc_parent_dma_tag(qlnx_host_t *ha);
107 static void qlnx_free_parent_dma_tag(qlnx_host_t *ha);
108 static int qlnx_alloc_tx_dma_tag(qlnx_host_t *ha);
109 static void qlnx_free_tx_dma_tag(qlnx_host_t *ha);
110 static int qlnx_alloc_rx_dma_tag(qlnx_host_t *ha);
111 static void qlnx_free_rx_dma_tag(qlnx_host_t *ha);
112 
113 static int qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver);
114 static int qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size);
115 
116 static int qlnx_nic_setup(struct ecore_dev *cdev,
117 		struct ecore_pf_params *func_params);
118 static int qlnx_nic_start(struct ecore_dev *cdev);
119 static int qlnx_slowpath_start(qlnx_host_t *ha);
120 static int qlnx_slowpath_stop(qlnx_host_t *ha);
121 static int qlnx_init_hw(qlnx_host_t *ha);
122 static void qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
123 		char ver_str[VER_SIZE]);
124 static void qlnx_unload(qlnx_host_t *ha);
125 static int qlnx_load(qlnx_host_t *ha);
126 static void qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt,
127 		uint32_t add_mac);
128 static void qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf,
129 		uint32_t len);
130 static int qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq);
131 static void qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq);
132 static void qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn,
133 		struct qlnx_rx_queue *rxq);
134 static int qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter);
135 static int qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords,
136 		int hwfn_index);
137 static int qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords,
138 		int hwfn_index);
139 static void qlnx_timer(void *arg);
140 static int qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
141 static void qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
142 static void qlnx_trigger_dump(qlnx_host_t *ha);
143 static uint16_t qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp,
144 			struct qlnx_tx_queue *txq);
145 static void qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
146 		struct qlnx_tx_queue *txq);
147 static int qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
148 		int lro_enable);
149 static void qlnx_fp_taskqueue(void *context, int pending);
150 static void qlnx_sample_storm_stats(qlnx_host_t *ha);
151 static int qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
152 		struct qlnx_agg_info *tpa);
153 static void qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa);
154 
155 #if __FreeBSD_version >= 1100000
156 static uint64_t qlnx_get_counter(if_t ifp, ift_counter cnt);
157 #endif
158 
159 /*
160  * Hooks to the Operating Systems
161  */
162 static int qlnx_pci_probe (device_t);
163 static int qlnx_pci_attach (device_t);
164 static int qlnx_pci_detach (device_t);
165 
166 #ifndef QLNX_VF
167 
168 #ifdef CONFIG_ECORE_SRIOV
169 
170 static int qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params);
171 static void qlnx_iov_uninit(device_t dev);
172 static int qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params);
173 static void qlnx_initialize_sriov(qlnx_host_t *ha);
174 static void qlnx_pf_taskqueue(void *context, int pending);
175 static int qlnx_create_pf_taskqueues(qlnx_host_t *ha);
176 static void qlnx_destroy_pf_taskqueues(qlnx_host_t *ha);
177 static void qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha);
178 
179 #endif /* #ifdef CONFIG_ECORE_SRIOV */
180 
181 static device_method_t qlnx_pci_methods[] = {
182 	/* Device interface */
183 	DEVMETHOD(device_probe, qlnx_pci_probe),
184 	DEVMETHOD(device_attach, qlnx_pci_attach),
185 	DEVMETHOD(device_detach, qlnx_pci_detach),
186 
187 #ifdef CONFIG_ECORE_SRIOV
188 	DEVMETHOD(pci_iov_init, qlnx_iov_init),
189 	DEVMETHOD(pci_iov_uninit, qlnx_iov_uninit),
190 	DEVMETHOD(pci_iov_add_vf, qlnx_iov_add_vf),
191 #endif /* #ifdef CONFIG_ECORE_SRIOV */
192 	{ 0, 0 }
193 };
194 
195 static driver_t qlnx_pci_driver = {
196 	"ql", qlnx_pci_methods, sizeof (qlnx_host_t),
197 };
198 
199 static devclass_t qlnx_devclass;
200 
201 MODULE_VERSION(if_qlnxe,1);
202 DRIVER_MODULE(if_qlnxe, pci, qlnx_pci_driver, qlnx_devclass, 0, 0);
203 
204 MODULE_DEPEND(if_qlnxe, pci, 1, 1, 1);
205 MODULE_DEPEND(if_qlnxe, ether, 1, 1, 1);
206 
207 #else
208 
209 static device_method_t qlnxv_pci_methods[] = {
210 	/* Device interface */
211 	DEVMETHOD(device_probe, qlnx_pci_probe),
212 	DEVMETHOD(device_attach, qlnx_pci_attach),
213 	DEVMETHOD(device_detach, qlnx_pci_detach),
214 	{ 0, 0 }
215 };
216 
217 static driver_t qlnxv_pci_driver = {
218 	"ql", qlnxv_pci_methods, sizeof (qlnx_host_t),
219 };
220 
221 static devclass_t qlnxv_devclass;
222 MODULE_VERSION(if_qlnxev,1);
223 DRIVER_MODULE(if_qlnxev, pci, qlnxv_pci_driver, qlnxv_devclass, 0, 0);
224 
225 MODULE_DEPEND(if_qlnxev, pci, 1, 1, 1);
226 MODULE_DEPEND(if_qlnxev, ether, 1, 1, 1);
227 
228 #endif /* #ifdef QLNX_VF */
229 
230 MALLOC_DEFINE(M_QLNXBUF, "qlnxbuf", "Buffers for qlnx driver");
231 
232 char qlnx_dev_str[128];
233 char qlnx_ver_str[VER_SIZE];
234 char qlnx_name_str[NAME_SIZE];
235 
236 /*
237  * Some PCI Configuration Space Related Defines
238  */
239 
240 #ifndef PCI_VENDOR_QLOGIC
241 #define PCI_VENDOR_QLOGIC		0x1077
242 #endif
243 
244 /* 40G Adapter QLE45xxx*/
245 #ifndef QLOGIC_PCI_DEVICE_ID_1634
246 #define QLOGIC_PCI_DEVICE_ID_1634	0x1634
247 #endif
248 
249 /* 100G Adapter QLE45xxx*/
250 #ifndef QLOGIC_PCI_DEVICE_ID_1644
251 #define QLOGIC_PCI_DEVICE_ID_1644	0x1644
252 #endif
253 
254 /* 25G Adapter QLE45xxx*/
255 #ifndef QLOGIC_PCI_DEVICE_ID_1656
256 #define QLOGIC_PCI_DEVICE_ID_1656	0x1656
257 #endif
258 
259 /* 50G Adapter QLE45xxx*/
260 #ifndef QLOGIC_PCI_DEVICE_ID_1654
261 #define QLOGIC_PCI_DEVICE_ID_1654	0x1654
262 #endif
263 
264 /* 10G/25G/40G Adapter QLE41xxx*/
265 #ifndef QLOGIC_PCI_DEVICE_ID_8070
266 #define QLOGIC_PCI_DEVICE_ID_8070	0x8070
267 #endif
268 
269 /* SRIOV Device (All Speeds) Adapter QLE41xxx*/
270 #ifndef QLOGIC_PCI_DEVICE_ID_8090
271 #define QLOGIC_PCI_DEVICE_ID_8090	0x8090
272 #endif
273 
274 SYSCTL_NODE(_hw, OID_AUTO, qlnxe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
275     "qlnxe driver parameters");
276 
277 /* Number of Queues: 0 (Auto) or 1 to 32 (fixed queue number) */
278 static int qlnxe_queue_count = QLNX_DEFAULT_RSS;
279 
280 #if __FreeBSD_version < 1100000
281 
282 TUNABLE_INT("hw.qlnxe.queue_count", &qlnxe_queue_count);
283 
284 #endif
285 
286 SYSCTL_INT(_hw_qlnxe, OID_AUTO, queue_count, CTLFLAG_RDTUN,
287 		&qlnxe_queue_count, 0, "Multi-Queue queue count");
288 
289 /*
290  * Note on RDMA personality setting
291  *
292  * Read the personality configured in NVRAM
293  * If the personality is ETH_ONLY, ETH_IWARP or ETH_ROCE and
294  * the configured personality in sysctl is QLNX_PERSONALITY_DEFAULT
295  * use the personality in NVRAM.
296 
297  * Otherwise use t the personality configured in sysctl.
298  *
299  */
300 #define QLNX_PERSONALITY_DEFAULT	0x0  /* use personality in NVRAM */
301 #define QLNX_PERSONALITY_ETH_ONLY	0x1  /* Override with ETH_ONLY */
302 #define QLNX_PERSONALITY_ETH_IWARP	0x2  /* Override with ETH_IWARP */
303 #define QLNX_PERSONALITY_ETH_ROCE	0x3  /* Override with ETH_ROCE */
304 #define QLNX_PERSONALITY_BITS_PER_FUNC	4
305 #define QLNX_PERSONALIY_MASK		0xF
306 
307 /* RDMA configuration; 64bit field allows setting for 16 physical functions*/
308 static uint64_t qlnxe_rdma_configuration = 0x22222222;
309 
310 #if __FreeBSD_version < 1100000
311 
312 TUNABLE_QUAD("hw.qlnxe.rdma_configuration", &qlnxe_rdma_configuration);
313 
314 SYSCTL_UQUAD(_hw_qlnxe, OID_AUTO, rdma_configuration, CTLFLAG_RDTUN,
315                &qlnxe_rdma_configuration, 0, "RDMA Configuration");
316 
317 #else
318 
319 SYSCTL_U64(_hw_qlnxe, OID_AUTO, rdma_configuration, CTLFLAG_RDTUN,
320                 &qlnxe_rdma_configuration, 0, "RDMA Configuration");
321 
322 #endif /* #if __FreeBSD_version < 1100000 */
323 
324 int
325 qlnx_vf_device(qlnx_host_t *ha)
326 {
327         uint16_t	device_id;
328 
329         device_id = ha->device_id;
330 
331         if (device_id == QLOGIC_PCI_DEVICE_ID_8090)
332                 return 0;
333 
334         return -1;
335 }
336 
337 static int
338 qlnx_valid_device(qlnx_host_t *ha)
339 {
340         uint16_t device_id;
341 
342         device_id = ha->device_id;
343 
344 #ifndef QLNX_VF
345         if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) ||
346                 (device_id == QLOGIC_PCI_DEVICE_ID_1644) ||
347                 (device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
348                 (device_id == QLOGIC_PCI_DEVICE_ID_1654) ||
349                 (device_id == QLOGIC_PCI_DEVICE_ID_8070))
350                 return 0;
351 #else
352         if (device_id == QLOGIC_PCI_DEVICE_ID_8090)
353 		return 0;
354 
355 #endif /* #ifndef QLNX_VF */
356         return -1;
357 }
358 
359 #ifdef QLNX_ENABLE_IWARP
360 static int
361 qlnx_rdma_supported(struct qlnx_host *ha)
362 {
363 	uint16_t device_id;
364 
365 	device_id = pci_get_device(ha->pci_dev);
366 
367 	if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) ||
368 		(device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
369 		(device_id == QLOGIC_PCI_DEVICE_ID_1654) ||
370 		(device_id == QLOGIC_PCI_DEVICE_ID_8070))
371 		return (0);
372 
373 	return (-1);
374 }
375 #endif /* #ifdef QLNX_ENABLE_IWARP */
376 
377 /*
378  * Name:	qlnx_pci_probe
379  * Function:	Validate the PCI device to be a QLA80XX device
380  */
381 static int
382 qlnx_pci_probe(device_t dev)
383 {
384 	snprintf(qlnx_ver_str, sizeof(qlnx_ver_str), "v%d.%d.%d",
385 		QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, QLNX_VERSION_BUILD);
386 	snprintf(qlnx_name_str, sizeof(qlnx_name_str), "qlnx");
387 
388 	if (pci_get_vendor(dev) != PCI_VENDOR_QLOGIC) {
389                 return (ENXIO);
390 	}
391 
392         switch (pci_get_device(dev)) {
393 #ifndef QLNX_VF
394 
395         case QLOGIC_PCI_DEVICE_ID_1644:
396 		snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
397 			"Qlogic 100GbE PCI CNA Adapter-Ethernet Function",
398 			QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
399 			QLNX_VERSION_BUILD);
400                 device_set_desc_copy(dev, qlnx_dev_str);
401 
402                 break;
403 
404         case QLOGIC_PCI_DEVICE_ID_1634:
405 		snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
406 			"Qlogic 40GbE PCI CNA Adapter-Ethernet Function",
407 			QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
408 			QLNX_VERSION_BUILD);
409                 device_set_desc_copy(dev, qlnx_dev_str);
410 
411                 break;
412 
413         case QLOGIC_PCI_DEVICE_ID_1656:
414 		snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
415 			"Qlogic 25GbE PCI CNA Adapter-Ethernet Function",
416 			QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
417 			QLNX_VERSION_BUILD);
418                 device_set_desc_copy(dev, qlnx_dev_str);
419 
420                 break;
421 
422         case QLOGIC_PCI_DEVICE_ID_1654:
423 		snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
424 			"Qlogic 50GbE PCI CNA Adapter-Ethernet Function",
425 			QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
426 			QLNX_VERSION_BUILD);
427                 device_set_desc_copy(dev, qlnx_dev_str);
428 
429                 break;
430 
431 	case QLOGIC_PCI_DEVICE_ID_8070:
432 		snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
433 			"Qlogic 10GbE/25GbE/40GbE PCI CNA (AH)"
434 			" Adapter-Ethernet Function",
435 			QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
436 			QLNX_VERSION_BUILD);
437 		device_set_desc_copy(dev, qlnx_dev_str);
438 
439 		break;
440 
441 #else
442 	case QLOGIC_PCI_DEVICE_ID_8090:
443 		snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
444 			"Qlogic SRIOV PCI CNA (AH) "
445 			"Adapter-Ethernet Function",
446 			QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
447 			QLNX_VERSION_BUILD);
448 		device_set_desc_copy(dev, qlnx_dev_str);
449 
450 		break;
451 
452 #endif /* #ifndef QLNX_VF */
453 
454         default:
455                 return (ENXIO);
456         }
457 
458 #ifdef QLNX_ENABLE_IWARP
459 	qlnx_rdma_init();
460 #endif /* #ifdef QLNX_ENABLE_IWARP */
461 
462         return (BUS_PROBE_DEFAULT);
463 }
464 
465 static uint16_t
466 qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp,
467 	struct qlnx_tx_queue *txq)
468 {
469 	u16 hw_bd_cons;
470 	u16 ecore_cons_idx;
471 	uint16_t diff;
472 
473 	hw_bd_cons = le16toh(*txq->hw_cons_ptr);
474 
475 	ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl);
476 	if (hw_bd_cons < ecore_cons_idx) {
477 		diff = (1 << 16) - (ecore_cons_idx - hw_bd_cons);
478 	} else {
479 		diff = hw_bd_cons - ecore_cons_idx;
480 	}
481 	return diff;
482 }
483 
484 static void
485 qlnx_sp_intr(void *arg)
486 {
487 	struct ecore_hwfn	*p_hwfn;
488 	qlnx_host_t		*ha;
489 	int			i;
490 
491 	p_hwfn = arg;
492 
493 	if (p_hwfn == NULL) {
494 		printf("%s: spurious slowpath intr\n", __func__);
495 		return;
496 	}
497 
498 	ha = (qlnx_host_t *)p_hwfn->p_dev;
499 
500 	QL_DPRINT2(ha, "enter\n");
501 
502 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
503 		if (&ha->cdev.hwfns[i] == p_hwfn) {
504 			taskqueue_enqueue(ha->sp_taskqueue[i], &ha->sp_task[i]);
505 			break;
506 		}
507 	}
508 	QL_DPRINT2(ha, "exit\n");
509 
510 	return;
511 }
512 
513 static void
514 qlnx_sp_taskqueue(void *context, int pending)
515 {
516 	struct ecore_hwfn	*p_hwfn;
517 
518 	p_hwfn = context;
519 
520 	if (p_hwfn != NULL) {
521 		qlnx_sp_isr(p_hwfn);
522 	}
523 	return;
524 }
525 
526 static int
527 qlnx_create_sp_taskqueues(qlnx_host_t *ha)
528 {
529 	int	i;
530 	uint8_t	tq_name[32];
531 
532 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
533                 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
534 
535 		bzero(tq_name, sizeof (tq_name));
536 		snprintf(tq_name, sizeof (tq_name), "ql_sp_tq_%d", i);
537 
538 		TASK_INIT(&ha->sp_task[i], 0, qlnx_sp_taskqueue, p_hwfn);
539 
540 		ha->sp_taskqueue[i] = taskqueue_create(tq_name, M_NOWAIT,
541 			 taskqueue_thread_enqueue, &ha->sp_taskqueue[i]);
542 
543 		if (ha->sp_taskqueue[i] == NULL)
544 			return (-1);
545 
546 		taskqueue_start_threads(&ha->sp_taskqueue[i], 1, PI_NET, "%s",
547 			tq_name);
548 
549 		QL_DPRINT1(ha, "%p\n", ha->sp_taskqueue[i]);
550 	}
551 
552 	return (0);
553 }
554 
555 static void
556 qlnx_destroy_sp_taskqueues(qlnx_host_t *ha)
557 {
558 	int	i;
559 
560 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
561 		if (ha->sp_taskqueue[i] != NULL) {
562 			taskqueue_drain(ha->sp_taskqueue[i], &ha->sp_task[i]);
563 			taskqueue_free(ha->sp_taskqueue[i]);
564 		}
565 	}
566 	return;
567 }
568 
569 static void
570 qlnx_fp_taskqueue(void *context, int pending)
571 {
572         struct qlnx_fastpath	*fp;
573         qlnx_host_t		*ha;
574         struct ifnet		*ifp;
575 
576         fp = context;
577 
578         if (fp == NULL)
579                 return;
580 
581 	ha = (qlnx_host_t *)fp->edev;
582 
583 	ifp = ha->ifp;
584 
585         if(ifp->if_drv_flags & IFF_DRV_RUNNING) {
586                 if (!drbr_empty(ifp, fp->tx_br)) {
587                         if(mtx_trylock(&fp->tx_mtx)) {
588 #ifdef QLNX_TRACE_PERF_DATA
589                                 tx_pkts = fp->tx_pkts_transmitted;
590                                 tx_compl = fp->tx_pkts_completed;
591 #endif
592 
593                                 qlnx_transmit_locked(ifp, fp, NULL);
594 
595 #ifdef QLNX_TRACE_PERF_DATA
596                                 fp->tx_pkts_trans_fp +=
597 					(fp->tx_pkts_transmitted - tx_pkts);
598                                 fp->tx_pkts_compl_fp +=
599 					(fp->tx_pkts_completed - tx_compl);
600 #endif
601                                 mtx_unlock(&fp->tx_mtx);
602                         }
603                 }
604         }
605 
606         QL_DPRINT2(ha, "exit \n");
607         return;
608 }
609 
610 static int
611 qlnx_create_fp_taskqueues(qlnx_host_t *ha)
612 {
613 	int	i;
614 	uint8_t	tq_name[32];
615 	struct qlnx_fastpath *fp;
616 
617 	for (i = 0; i < ha->num_rss; i++) {
618                 fp = &ha->fp_array[i];
619 
620 		bzero(tq_name, sizeof (tq_name));
621 		snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i);
622 
623 		TASK_INIT(&fp->fp_task, 0, qlnx_fp_taskqueue, fp);
624 
625 		fp->fp_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
626 					taskqueue_thread_enqueue,
627 					&fp->fp_taskqueue);
628 
629 		if (fp->fp_taskqueue == NULL)
630 			return (-1);
631 
632 		taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s",
633 			tq_name);
634 
635 		QL_DPRINT1(ha, "%p\n",fp->fp_taskqueue);
636 	}
637 
638 	return (0);
639 }
640 
641 static void
642 qlnx_destroy_fp_taskqueues(qlnx_host_t *ha)
643 {
644 	int			i;
645 	struct qlnx_fastpath	*fp;
646 
647 	for (i = 0; i < ha->num_rss; i++) {
648                 fp = &ha->fp_array[i];
649 
650 		if (fp->fp_taskqueue != NULL) {
651 			taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
652 			taskqueue_free(fp->fp_taskqueue);
653 			fp->fp_taskqueue = NULL;
654 		}
655 	}
656 	return;
657 }
658 
659 static void
660 qlnx_drain_fp_taskqueues(qlnx_host_t *ha)
661 {
662 	int			i;
663 	struct qlnx_fastpath	*fp;
664 
665 	for (i = 0; i < ha->num_rss; i++) {
666                 fp = &ha->fp_array[i];
667 
668 		if (fp->fp_taskqueue != NULL) {
669 			QLNX_UNLOCK(ha);
670 			taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
671 			QLNX_LOCK(ha);
672 		}
673 	}
674 	return;
675 }
676 
677 static void
678 qlnx_get_params(qlnx_host_t *ha)
679 {
680 	if ((qlnxe_queue_count < 0) || (qlnxe_queue_count > QLNX_MAX_RSS)) {
681 		device_printf(ha->pci_dev, "invalid queue_count value (%d)\n",
682 			qlnxe_queue_count);
683 		qlnxe_queue_count = 0;
684 	}
685 	return;
686 }
687 
688 static void
689 qlnx_error_recovery_taskqueue(void *context, int pending)
690 {
691         qlnx_host_t *ha;
692 
693         ha = context;
694 
695         QL_DPRINT2(ha, "enter\n");
696 
697         QLNX_LOCK(ha);
698         qlnx_stop(ha);
699         QLNX_UNLOCK(ha);
700 
701 #ifdef QLNX_ENABLE_IWARP
702 	qlnx_rdma_dev_remove(ha);
703 #endif /* #ifdef QLNX_ENABLE_IWARP */
704 
705         qlnx_slowpath_stop(ha);
706         qlnx_slowpath_start(ha);
707 
708 #ifdef QLNX_ENABLE_IWARP
709 	qlnx_rdma_dev_add(ha);
710 #endif /* #ifdef QLNX_ENABLE_IWARP */
711 
712         qlnx_init(ha);
713 
714         callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
715 
716         QL_DPRINT2(ha, "exit\n");
717 
718         return;
719 }
720 
721 static int
722 qlnx_create_error_recovery_taskqueue(qlnx_host_t *ha)
723 {
724         uint8_t tq_name[32];
725 
726         bzero(tq_name, sizeof (tq_name));
727         snprintf(tq_name, sizeof (tq_name), "ql_err_tq");
728 
729         TASK_INIT(&ha->err_task, 0, qlnx_error_recovery_taskqueue, ha);
730 
731         ha->err_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
732                                 taskqueue_thread_enqueue, &ha->err_taskqueue);
733 
734         if (ha->err_taskqueue == NULL)
735                 return (-1);
736 
737         taskqueue_start_threads(&ha->err_taskqueue, 1, PI_NET, "%s", tq_name);
738 
739         QL_DPRINT1(ha, "%p\n",ha->err_taskqueue);
740 
741         return (0);
742 }
743 
744 static void
745 qlnx_destroy_error_recovery_taskqueue(qlnx_host_t *ha)
746 {
747         if (ha->err_taskqueue != NULL) {
748                 taskqueue_drain(ha->err_taskqueue, &ha->err_task);
749                 taskqueue_free(ha->err_taskqueue);
750         }
751 
752         ha->err_taskqueue = NULL;
753 
754         return;
755 }
756 
757 /*
758  * Name:	qlnx_pci_attach
759  * Function:	attaches the device to the operating system
760  */
761 static int
762 qlnx_pci_attach(device_t dev)
763 {
764 	qlnx_host_t	*ha = NULL;
765 	uint32_t	rsrc_len_reg __unused = 0;
766 	uint32_t	rsrc_len_dbells = 0;
767 	uint32_t	rsrc_len_msix __unused = 0;
768 	int		i;
769 	uint32_t	mfw_ver;
770 	uint32_t	num_sp_msix = 0;
771 	uint32_t	num_rdma_irqs = 0;
772 
773         if ((ha = device_get_softc(dev)) == NULL) {
774                 device_printf(dev, "cannot get softc\n");
775                 return (ENOMEM);
776         }
777 
778         memset(ha, 0, sizeof (qlnx_host_t));
779 
780         ha->device_id = pci_get_device(dev);
781 
782         if (qlnx_valid_device(ha) != 0) {
783                 device_printf(dev, "device is not valid device\n");
784                 return (ENXIO);
785 	}
786         ha->pci_func = pci_get_function(dev);
787 
788         ha->pci_dev = dev;
789 
790 	mtx_init(&ha->hw_lock, "qlnx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
791 
792         ha->flags.lock_init = 1;
793 
794         pci_enable_busmaster(dev);
795 
796 	/*
797 	 * map the PCI BARs
798 	 */
799 
800         ha->reg_rid = PCIR_BAR(0);
801         ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
802                                 RF_ACTIVE);
803 
804         if (ha->pci_reg == NULL) {
805                 device_printf(dev, "unable to map BAR0\n");
806                 goto qlnx_pci_attach_err;
807         }
808 
809         rsrc_len_reg = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
810                                         ha->reg_rid);
811 
812 	ha->dbells_rid = PCIR_BAR(2);
813 	rsrc_len_dbells = (uint32_t) bus_get_resource_count(dev,
814 					SYS_RES_MEMORY,
815 					ha->dbells_rid);
816 	if (rsrc_len_dbells) {
817 		ha->pci_dbells = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
818 					&ha->dbells_rid, RF_ACTIVE);
819 
820 		if (ha->pci_dbells == NULL) {
821 			device_printf(dev, "unable to map BAR1\n");
822 			goto qlnx_pci_attach_err;
823 		}
824 		ha->dbells_phys_addr = (uint64_t)
825 			bus_get_resource_start(dev, SYS_RES_MEMORY, ha->dbells_rid);
826 
827 		ha->dbells_size = rsrc_len_dbells;
828 	} else {
829 		if (qlnx_vf_device(ha) != 0) {
830 			device_printf(dev, " BAR1 size is zero\n");
831 			goto qlnx_pci_attach_err;
832 		}
833 	}
834 
835         ha->msix_rid = PCIR_BAR(4);
836         ha->msix_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
837                         &ha->msix_rid, RF_ACTIVE);
838 
839         if (ha->msix_bar == NULL) {
840                 device_printf(dev, "unable to map BAR2\n");
841                 goto qlnx_pci_attach_err;
842 	}
843 
844         rsrc_len_msix = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
845                                         ha->msix_rid);
846 
847 	ha->dbg_level = 0x0000;
848 
849 	QL_DPRINT1(ha, "\n\t\t\t"
850 		"pci_dev = %p pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x"
851 		"\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x"
852 		"\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x"
853 		" msix_avail = 0x%x "
854 		"\n\t\t\t[ncpus = %d]\n",
855 		ha->pci_dev, ha->pci_reg, rsrc_len_reg,
856 		ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid,
857 		ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev),
858 		mp_ncpus);
859 	/*
860 	 * allocate dma tags
861 	 */
862 
863 	if (qlnx_alloc_parent_dma_tag(ha))
864                 goto qlnx_pci_attach_err;
865 
866 	if (qlnx_alloc_tx_dma_tag(ha))
867                 goto qlnx_pci_attach_err;
868 
869 	if (qlnx_alloc_rx_dma_tag(ha))
870                 goto qlnx_pci_attach_err;
871 
872 
873 	if (qlnx_init_hw(ha) != 0)
874 		goto qlnx_pci_attach_err;
875 
876         ha->flags.hw_init = 1;
877 
878 	qlnx_get_params(ha);
879 
880 	if((pci_get_device(dev) == QLOGIC_PCI_DEVICE_ID_1644) &&
881 		(qlnxe_queue_count == QLNX_DEFAULT_RSS)) {
882 		qlnxe_queue_count = QLNX_MAX_RSS;
883 	}
884 
885 	/*
886 	 * Allocate MSI-x vectors
887 	 */
888 	if (qlnx_vf_device(ha) != 0) {
889 		if (qlnxe_queue_count == 0)
890 			ha->num_rss = QLNX_DEFAULT_RSS;
891 		else
892 			ha->num_rss = qlnxe_queue_count;
893 
894 		num_sp_msix = ha->cdev.num_hwfns;
895 	} else {
896 		uint8_t max_rxq;
897 		uint8_t max_txq;
898 
899 		ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_rxq);
900 		ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_txq);
901 
902 		if (max_rxq < max_txq)
903 			ha->num_rss = max_rxq;
904 		else
905 			ha->num_rss = max_txq;
906 
907 		if (ha->num_rss > QLNX_MAX_VF_RSS)
908 			ha->num_rss = QLNX_MAX_VF_RSS;
909 
910 		num_sp_msix = 0;
911 	}
912 
913 	if (ha->num_rss > mp_ncpus)
914 		ha->num_rss = mp_ncpus;
915 
916 	ha->num_tc = QLNX_MAX_TC;
917 
918         ha->msix_count = pci_msix_count(dev);
919 
920 #ifdef QLNX_ENABLE_IWARP
921 
922 	num_rdma_irqs = qlnx_rdma_get_num_irqs(ha);
923 
924 #endif /* #ifdef QLNX_ENABLE_IWARP */
925 
926         if (!ha->msix_count ||
927 		(ha->msix_count < (num_sp_msix + 1 + num_rdma_irqs))) {
928                 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
929                         ha->msix_count);
930                 goto qlnx_pci_attach_err;
931         }
932 
933 	if (ha->msix_count > (ha->num_rss + num_sp_msix + num_rdma_irqs))
934 		ha->msix_count = ha->num_rss + num_sp_msix + num_rdma_irqs;
935 	else
936 		ha->num_rss = ha->msix_count - (num_sp_msix + num_rdma_irqs);
937 
938 	QL_DPRINT1(ha, "\n\t\t\t"
939 		"pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x"
940 		"\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x"
941 		"\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x"
942 		" msix_avail = 0x%x msix_alloc = 0x%x"
943 		"\n\t\t\t[ncpus = %d][num_rss = 0x%x] [num_tc = 0x%x]\n",
944 		 ha->pci_reg, rsrc_len_reg,
945 		ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid,
946 		ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev),
947 		ha->msix_count, mp_ncpus, ha->num_rss, ha->num_tc);
948 
949         if (pci_alloc_msix(dev, &ha->msix_count)) {
950                 device_printf(dev, "%s: pci_alloc_msix[%d] failed\n", __func__,
951                         ha->msix_count);
952                 ha->msix_count = 0;
953                 goto qlnx_pci_attach_err;
954         }
955 
956 	/*
957 	 * Initialize slow path interrupt and task queue
958 	 */
959 
960 	if (num_sp_msix) {
961 		if (qlnx_create_sp_taskqueues(ha) != 0)
962 			goto qlnx_pci_attach_err;
963 
964 		for (i = 0; i < ha->cdev.num_hwfns; i++) {
965 			struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
966 
967 			ha->sp_irq_rid[i] = i + 1;
968 			ha->sp_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ,
969 						&ha->sp_irq_rid[i],
970 						(RF_ACTIVE | RF_SHAREABLE));
971 			if (ha->sp_irq[i] == NULL) {
972                 		device_printf(dev,
973 					"could not allocate mbx interrupt\n");
974 				goto qlnx_pci_attach_err;
975 			}
976 
977 			if (bus_setup_intr(dev, ha->sp_irq[i],
978 				(INTR_TYPE_NET | INTR_MPSAFE), NULL,
979 				qlnx_sp_intr, p_hwfn, &ha->sp_handle[i])) {
980 				device_printf(dev,
981 					"could not setup slow path interrupt\n");
982 				goto qlnx_pci_attach_err;
983 			}
984 
985 			QL_DPRINT1(ha, "p_hwfn [%p] sp_irq_rid %d"
986 				" sp_irq %p sp_handle %p\n", p_hwfn,
987 				ha->sp_irq_rid[i], ha->sp_irq[i], ha->sp_handle[i]);
988 		}
989 	}
990 
991 	/*
992 	 * initialize fast path interrupt
993 	 */
994 	if (qlnx_create_fp_taskqueues(ha) != 0)
995 		goto qlnx_pci_attach_err;
996 
997         for (i = 0; i < ha->num_rss; i++) {
998                 ha->irq_vec[i].rss_idx = i;
999                 ha->irq_vec[i].ha = ha;
1000                 ha->irq_vec[i].irq_rid = (1 + num_sp_msix) + i;
1001 
1002                 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1003                                 &ha->irq_vec[i].irq_rid,
1004                                 (RF_ACTIVE | RF_SHAREABLE));
1005 
1006                 if (ha->irq_vec[i].irq == NULL) {
1007                         device_printf(dev,
1008 				"could not allocate interrupt[%d] irq_rid = %d\n",
1009 				i, ha->irq_vec[i].irq_rid);
1010                         goto qlnx_pci_attach_err;
1011                 }
1012 
1013 		if (qlnx_alloc_tx_br(ha, &ha->fp_array[i])) {
1014                         device_printf(dev, "could not allocate tx_br[%d]\n", i);
1015                         goto qlnx_pci_attach_err;
1016 		}
1017 	}
1018 
1019 	if (qlnx_vf_device(ha) != 0) {
1020 		callout_init(&ha->qlnx_callout, 1);
1021 		ha->flags.callout_init = 1;
1022 
1023 		for (i = 0; i < ha->cdev.num_hwfns; i++) {
1024 			if (qlnx_grc_dumpsize(ha, &ha->grcdump_size[i], i) != 0)
1025 				goto qlnx_pci_attach_err;
1026 			if (ha->grcdump_size[i] == 0)
1027 				goto qlnx_pci_attach_err;
1028 
1029 			ha->grcdump_size[i] = ha->grcdump_size[i] << 2;
1030 			QL_DPRINT1(ha, "grcdump_size[%d] = 0x%08x\n",
1031 				i, ha->grcdump_size[i]);
1032 
1033 			ha->grcdump[i] = qlnx_zalloc(ha->grcdump_size[i]);
1034 			if (ha->grcdump[i] == NULL) {
1035 				device_printf(dev, "grcdump alloc[%d] failed\n", i);
1036 				goto qlnx_pci_attach_err;
1037 			}
1038 
1039 			if (qlnx_idle_chk_size(ha, &ha->idle_chk_size[i], i) != 0)
1040 				goto qlnx_pci_attach_err;
1041 			if (ha->idle_chk_size[i] == 0)
1042 				goto qlnx_pci_attach_err;
1043 
1044 			ha->idle_chk_size[i] = ha->idle_chk_size[i] << 2;
1045 			QL_DPRINT1(ha, "idle_chk_size[%d] = 0x%08x\n",
1046 				i, ha->idle_chk_size[i]);
1047 
1048 			ha->idle_chk[i] = qlnx_zalloc(ha->idle_chk_size[i]);
1049 
1050 			if (ha->idle_chk[i] == NULL) {
1051 				device_printf(dev, "idle_chk alloc failed\n");
1052 				goto qlnx_pci_attach_err;
1053 			}
1054 		}
1055 
1056 		if (qlnx_create_error_recovery_taskqueue(ha) != 0)
1057 			goto qlnx_pci_attach_err;
1058 	}
1059 
1060 	if (qlnx_slowpath_start(ha) != 0)
1061 		goto qlnx_pci_attach_err;
1062 	else
1063 		ha->flags.slowpath_start = 1;
1064 
1065 	if (qlnx_vf_device(ha) != 0) {
1066 		if (qlnx_get_flash_size(ha, &ha->flash_size) != 0) {
1067 			qlnx_mdelay(__func__, 1000);
1068 			qlnx_trigger_dump(ha);
1069 
1070 			goto qlnx_pci_attach_err0;
1071 		}
1072 
1073 		if (qlnx_get_mfw_version(ha, &mfw_ver) != 0) {
1074 			qlnx_mdelay(__func__, 1000);
1075 			qlnx_trigger_dump(ha);
1076 
1077 			goto qlnx_pci_attach_err0;
1078 		}
1079 	} else {
1080 		struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0];
1081 		ecore_mcp_get_mfw_ver(p_hwfn, NULL, &mfw_ver, NULL);
1082 	}
1083 
1084 	snprintf(ha->mfw_ver, sizeof(ha->mfw_ver), "%d.%d.%d.%d",
1085 		((mfw_ver >> 24) & 0xFF), ((mfw_ver >> 16) & 0xFF),
1086 		((mfw_ver >> 8) & 0xFF), (mfw_ver & 0xFF));
1087 	snprintf(ha->stormfw_ver, sizeof(ha->stormfw_ver), "%d.%d.%d.%d",
1088 		FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
1089 		FW_ENGINEERING_VERSION);
1090 
1091 	QL_DPRINT1(ha, "STORM_FW version %s MFW version %s\n",
1092 		 ha->stormfw_ver, ha->mfw_ver);
1093 
1094 	qlnx_init_ifnet(dev, ha);
1095 
1096 	/*
1097 	 * add sysctls
1098 	 */
1099 	qlnx_add_sysctls(ha);
1100 
1101 qlnx_pci_attach_err0:
1102         /*
1103 	 * create ioctl device interface
1104 	 */
1105 	if (qlnx_vf_device(ha) != 0) {
1106 		if (qlnx_make_cdev(ha)) {
1107 			device_printf(dev, "%s: ql_make_cdev failed\n", __func__);
1108 			goto qlnx_pci_attach_err;
1109 		}
1110 
1111 #ifdef QLNX_ENABLE_IWARP
1112 		qlnx_rdma_dev_add(ha);
1113 #endif /* #ifdef QLNX_ENABLE_IWARP */
1114 	}
1115 
1116 #ifndef QLNX_VF
1117 #ifdef CONFIG_ECORE_SRIOV
1118 
1119 	if (qlnx_vf_device(ha) != 0)
1120 		qlnx_initialize_sriov(ha);
1121 
1122 #endif /* #ifdef CONFIG_ECORE_SRIOV */
1123 #endif /* #ifdef QLNX_VF */
1124 
1125 	QL_DPRINT2(ha, "success\n");
1126 
1127         return (0);
1128 
1129 qlnx_pci_attach_err:
1130 
1131 	qlnx_release(ha);
1132 
1133 	return (ENXIO);
1134 }
1135 
1136 /*
1137  * Name:	qlnx_pci_detach
1138  * Function:	Unhooks the device from the operating system
1139  */
1140 static int
1141 qlnx_pci_detach(device_t dev)
1142 {
1143 	qlnx_host_t	*ha = NULL;
1144 
1145         if ((ha = device_get_softc(dev)) == NULL) {
1146                 device_printf(dev, "%s: cannot get softc\n", __func__);
1147                 return (ENOMEM);
1148         }
1149 
1150 	if (qlnx_vf_device(ha) != 0) {
1151 #ifdef CONFIG_ECORE_SRIOV
1152 		int ret;
1153 
1154 		ret = pci_iov_detach(dev);
1155 		if (ret) {
1156                 	device_printf(dev, "%s: SRIOV in use\n", __func__);
1157 			return (ret);
1158 		}
1159 
1160 #endif /* #ifdef CONFIG_ECORE_SRIOV */
1161 
1162 #ifdef QLNX_ENABLE_IWARP
1163 		if (qlnx_rdma_dev_remove(ha) != 0)
1164 			return (EBUSY);
1165 #endif /* #ifdef QLNX_ENABLE_IWARP */
1166 	}
1167 
1168 	QLNX_LOCK(ha);
1169 	qlnx_stop(ha);
1170 	QLNX_UNLOCK(ha);
1171 
1172 	qlnx_release(ha);
1173 
1174         return (0);
1175 }
1176 
1177 #ifdef QLNX_ENABLE_IWARP
1178 
1179 static uint8_t
1180 qlnx_get_personality(uint8_t pci_func)
1181 {
1182 	uint8_t personality;
1183 
1184 	personality = (qlnxe_rdma_configuration >>
1185 				(pci_func * QLNX_PERSONALITY_BITS_PER_FUNC)) &
1186 				QLNX_PERSONALIY_MASK;
1187 	return (personality);
1188 }
1189 
1190 static void
1191 qlnx_set_personality(qlnx_host_t *ha)
1192 {
1193 	uint8_t personality;
1194 
1195 	personality = qlnx_get_personality(ha->pci_func);
1196 
1197 	switch (personality) {
1198 	case QLNX_PERSONALITY_DEFAULT:
1199                	device_printf(ha->pci_dev, "%s: DEFAULT\n",
1200 			__func__);
1201 		ha->personality = ECORE_PCI_DEFAULT;
1202 		break;
1203 
1204 	case QLNX_PERSONALITY_ETH_ONLY:
1205                	device_printf(ha->pci_dev, "%s: ETH_ONLY\n",
1206 			__func__);
1207 		ha->personality = ECORE_PCI_ETH;
1208 		break;
1209 
1210 	case QLNX_PERSONALITY_ETH_IWARP:
1211                	device_printf(ha->pci_dev, "%s: ETH_IWARP\n",
1212 			__func__);
1213 		ha->personality = ECORE_PCI_ETH_IWARP;
1214 		break;
1215 
1216 	case QLNX_PERSONALITY_ETH_ROCE:
1217                	device_printf(ha->pci_dev, "%s: ETH_ROCE\n",
1218 			__func__);
1219 		ha->personality = ECORE_PCI_ETH_ROCE;
1220 		break;
1221 	}
1222 
1223 	return;
1224 }
1225 
1226 #endif /* #ifdef QLNX_ENABLE_IWARP */
1227 
1228 static int
1229 qlnx_init_hw(qlnx_host_t *ha)
1230 {
1231 	int				rval = 0;
1232 	struct ecore_hw_prepare_params	params;
1233 
1234 	ecore_init_struct(&ha->cdev);
1235 
1236 	/* ha->dp_module = ECORE_MSG_PROBE |
1237 				ECORE_MSG_INTR |
1238 				ECORE_MSG_SP |
1239 				ECORE_MSG_LINK |
1240 				ECORE_MSG_SPQ |
1241 				ECORE_MSG_RDMA;
1242 	ha->dp_level = ECORE_LEVEL_VERBOSE;*/
1243 	//ha->dp_module = ECORE_MSG_RDMA | ECORE_MSG_INTR | ECORE_MSG_LL2;
1244 	ha->dp_level = ECORE_LEVEL_NOTICE;
1245 	//ha->dp_level = ECORE_LEVEL_VERBOSE;
1246 
1247 	ecore_init_dp(&ha->cdev, ha->dp_module, ha->dp_level, ha->pci_dev);
1248 
1249 	ha->cdev.regview = ha->pci_reg;
1250 
1251 	ha->personality = ECORE_PCI_DEFAULT;
1252 
1253 	if (qlnx_vf_device(ha) == 0) {
1254 		ha->cdev.b_is_vf = true;
1255 
1256 		if (ha->pci_dbells != NULL) {
1257 			ha->cdev.doorbells = ha->pci_dbells;
1258 			ha->cdev.db_phys_addr = ha->dbells_phys_addr;
1259 			ha->cdev.db_size = ha->dbells_size;
1260 		} else {
1261 			ha->pci_dbells = ha->pci_reg;
1262 		}
1263 	} else {
1264 		ha->cdev.doorbells = ha->pci_dbells;
1265 		ha->cdev.db_phys_addr = ha->dbells_phys_addr;
1266 		ha->cdev.db_size = ha->dbells_size;
1267 
1268 #ifdef QLNX_ENABLE_IWARP
1269 
1270 		if (qlnx_rdma_supported(ha) == 0)
1271 			qlnx_set_personality(ha);
1272 
1273 #endif /* #ifdef QLNX_ENABLE_IWARP */
1274 	}
1275 	QL_DPRINT2(ha, "%s: %s\n", __func__,
1276 		(ha->personality == ECORE_PCI_ETH_IWARP ? "iwarp": "ethernet"));
1277 
1278 	bzero(&params, sizeof (struct ecore_hw_prepare_params));
1279 
1280 	params.personality = ha->personality;
1281 
1282 	params.drv_resc_alloc = false;
1283 	params.chk_reg_fifo = false;
1284 	params.initiate_pf_flr = true;
1285 	params.epoch = 0;
1286 
1287 	ecore_hw_prepare(&ha->cdev, &params);
1288 
1289 	qlnx_set_id(&ha->cdev, qlnx_name_str, qlnx_ver_str);
1290 
1291 	QL_DPRINT1(ha, "ha = %p cdev = %p p_hwfn = %p\n",
1292 		ha, &ha->cdev, &ha->cdev.hwfns[0]);
1293 
1294 	return (rval);
1295 }
1296 
1297 static void
1298 qlnx_release(qlnx_host_t *ha)
1299 {
1300         device_t	dev;
1301         int		i;
1302 
1303         dev = ha->pci_dev;
1304 
1305 	QL_DPRINT2(ha, "enter\n");
1306 
1307 	for (i = 0; i < QLNX_MAX_HW_FUNCS; i++) {
1308 		if (ha->idle_chk[i] != NULL) {
1309 			free(ha->idle_chk[i], M_QLNXBUF);
1310 			ha->idle_chk[i] = NULL;
1311 		}
1312 
1313 		if (ha->grcdump[i] != NULL) {
1314 			free(ha->grcdump[i], M_QLNXBUF);
1315 			ha->grcdump[i] = NULL;
1316 		}
1317 	}
1318 
1319         if (ha->flags.callout_init)
1320                 callout_drain(&ha->qlnx_callout);
1321 
1322 	if (ha->flags.slowpath_start) {
1323 		qlnx_slowpath_stop(ha);
1324 	}
1325 
1326         if (ha->flags.hw_init)
1327 		ecore_hw_remove(&ha->cdev);
1328 
1329         qlnx_del_cdev(ha);
1330 
1331         if (ha->ifp != NULL)
1332                 ether_ifdetach(ha->ifp);
1333 
1334 	qlnx_free_tx_dma_tag(ha);
1335 
1336 	qlnx_free_rx_dma_tag(ha);
1337 
1338 	qlnx_free_parent_dma_tag(ha);
1339 
1340 	if (qlnx_vf_device(ha) != 0) {
1341 		qlnx_destroy_error_recovery_taskqueue(ha);
1342 	}
1343 
1344         for (i = 0; i < ha->num_rss; i++) {
1345 		struct qlnx_fastpath *fp = &ha->fp_array[i];
1346 
1347                 if (ha->irq_vec[i].handle) {
1348                         (void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
1349                                         ha->irq_vec[i].handle);
1350                 }
1351 
1352                 if (ha->irq_vec[i].irq) {
1353                         (void)bus_release_resource(dev, SYS_RES_IRQ,
1354                                 ha->irq_vec[i].irq_rid,
1355                                 ha->irq_vec[i].irq);
1356                 }
1357 
1358 		qlnx_free_tx_br(ha, fp);
1359         }
1360 	qlnx_destroy_fp_taskqueues(ha);
1361 
1362  	for (i = 0; i < ha->cdev.num_hwfns; i++) {
1363         	if (ha->sp_handle[i])
1364                 	(void)bus_teardown_intr(dev, ha->sp_irq[i],
1365 				ha->sp_handle[i]);
1366 
1367         	if (ha->sp_irq[i])
1368 			(void) bus_release_resource(dev, SYS_RES_IRQ,
1369 				ha->sp_irq_rid[i], ha->sp_irq[i]);
1370 	}
1371 
1372 	qlnx_destroy_sp_taskqueues(ha);
1373 
1374         if (ha->msix_count)
1375                 pci_release_msi(dev);
1376 
1377         if (ha->flags.lock_init) {
1378                 mtx_destroy(&ha->hw_lock);
1379         }
1380 
1381         if (ha->pci_reg)
1382                 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
1383                                 ha->pci_reg);
1384 
1385         if (ha->dbells_size && ha->pci_dbells)
1386                 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->dbells_rid,
1387                                 ha->pci_dbells);
1388 
1389         if (ha->msix_bar)
1390                 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->msix_rid,
1391                                 ha->msix_bar);
1392 
1393 	QL_DPRINT2(ha, "exit\n");
1394 	return;
1395 }
1396 
1397 static void
1398 qlnx_trigger_dump(qlnx_host_t *ha)
1399 {
1400 	int	i;
1401 
1402 	if (ha->ifp != NULL)
1403 		ha->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
1404 
1405 	QL_DPRINT2(ha, "enter\n");
1406 
1407 	if (qlnx_vf_device(ha) == 0)
1408 		return;
1409 
1410 	ha->error_recovery = 1;
1411 
1412 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
1413 		qlnx_grc_dump(ha, &ha->grcdump_dwords[i], i);
1414 		qlnx_idle_chk(ha, &ha->idle_chk_dwords[i], i);
1415 	}
1416 
1417 	QL_DPRINT2(ha, "exit\n");
1418 
1419 	return;
1420 }
1421 
1422 static int
1423 qlnx_trigger_dump_sysctl(SYSCTL_HANDLER_ARGS)
1424 {
1425         int		err, ret = 0;
1426         qlnx_host_t	*ha;
1427 
1428         err = sysctl_handle_int(oidp, &ret, 0, req);
1429 
1430         if (err || !req->newptr)
1431                 return (err);
1432 
1433         if (ret == 1) {
1434                 ha = (qlnx_host_t *)arg1;
1435                 qlnx_trigger_dump(ha);
1436         }
1437         return (err);
1438 }
1439 
1440 static int
1441 qlnx_set_tx_coalesce(SYSCTL_HANDLER_ARGS)
1442 {
1443         int			err, i, ret = 0, usecs = 0;
1444         qlnx_host_t		*ha;
1445 	struct ecore_hwfn	*p_hwfn;
1446 	struct qlnx_fastpath	*fp;
1447 
1448         err = sysctl_handle_int(oidp, &usecs, 0, req);
1449 
1450         if (err || !req->newptr || !usecs || (usecs > 255))
1451                 return (err);
1452 
1453         ha = (qlnx_host_t *)arg1;
1454 
1455 	if (qlnx_vf_device(ha) == 0)
1456 		return (-1);
1457 
1458 	for (i = 0; i < ha->num_rss; i++) {
1459 		p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1460 
1461         	fp = &ha->fp_array[i];
1462 
1463 		if (fp->txq[0]->handle != NULL) {
1464 			ret = ecore_set_queue_coalesce(p_hwfn, 0,
1465 					(uint16_t)usecs, fp->txq[0]->handle);
1466 		}
1467         }
1468 
1469 	if (!ret)
1470 		ha->tx_coalesce_usecs = (uint8_t)usecs;
1471 
1472         return (err);
1473 }
1474 
1475 static int
1476 qlnx_set_rx_coalesce(SYSCTL_HANDLER_ARGS)
1477 {
1478         int			err, i, ret = 0, usecs = 0;
1479         qlnx_host_t		*ha;
1480 	struct ecore_hwfn	*p_hwfn;
1481 	struct qlnx_fastpath	*fp;
1482 
1483         err = sysctl_handle_int(oidp, &usecs, 0, req);
1484 
1485         if (err || !req->newptr || !usecs || (usecs > 255))
1486                 return (err);
1487 
1488         ha = (qlnx_host_t *)arg1;
1489 
1490 	if (qlnx_vf_device(ha) == 0)
1491 		return (-1);
1492 
1493 	for (i = 0; i < ha->num_rss; i++) {
1494 		p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1495 
1496         	fp = &ha->fp_array[i];
1497 
1498 		if (fp->rxq->handle != NULL) {
1499 			ret = ecore_set_queue_coalesce(p_hwfn, (uint16_t)usecs,
1500 					 0, fp->rxq->handle);
1501 		}
1502 	}
1503 
1504 	if (!ret)
1505 		ha->rx_coalesce_usecs = (uint8_t)usecs;
1506 
1507         return (err);
1508 }
1509 
1510 static void
1511 qlnx_add_sp_stats_sysctls(qlnx_host_t *ha)
1512 {
1513         struct sysctl_ctx_list	*ctx;
1514         struct sysctl_oid_list	*children;
1515 	struct sysctl_oid	*ctx_oid;
1516 
1517         ctx = device_get_sysctl_ctx(ha->pci_dev);
1518 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1519 
1520 	ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "spstat",
1521 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "spstat");
1522         children = SYSCTL_CHILDREN(ctx_oid);
1523 
1524 	SYSCTL_ADD_QUAD(ctx, children,
1525                 OID_AUTO, "sp_interrupts",
1526                 CTLFLAG_RD, &ha->sp_interrupts,
1527                 "No. of slowpath interrupts");
1528 
1529 	return;
1530 }
1531 
1532 static void
1533 qlnx_add_fp_stats_sysctls(qlnx_host_t *ha)
1534 {
1535         struct sysctl_ctx_list	*ctx;
1536         struct sysctl_oid_list	*children;
1537         struct sysctl_oid_list	*node_children;
1538 	struct sysctl_oid	*ctx_oid;
1539 	int			i, j;
1540 	uint8_t			name_str[16];
1541 
1542         ctx = device_get_sysctl_ctx(ha->pci_dev);
1543 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1544 
1545 	ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fpstat",
1546 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "fpstat");
1547 	children = SYSCTL_CHILDREN(ctx_oid);
1548 
1549 	for (i = 0; i < ha->num_rss; i++) {
1550 		bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1551 		snprintf(name_str, sizeof(name_str), "%d", i);
1552 
1553 		ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
1554 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str);
1555 		node_children = SYSCTL_CHILDREN(ctx_oid);
1556 
1557 		/* Tx Related */
1558 
1559 		SYSCTL_ADD_QUAD(ctx, node_children,
1560 			OID_AUTO, "tx_pkts_processed",
1561 			CTLFLAG_RD, &ha->fp_array[i].tx_pkts_processed,
1562 			"No. of packets processed for transmission");
1563 
1564 		SYSCTL_ADD_QUAD(ctx, node_children,
1565 			OID_AUTO, "tx_pkts_freed",
1566 			CTLFLAG_RD, &ha->fp_array[i].tx_pkts_freed,
1567 			"No. of freed packets");
1568 
1569 		SYSCTL_ADD_QUAD(ctx, node_children,
1570 			OID_AUTO, "tx_pkts_transmitted",
1571 			CTLFLAG_RD, &ha->fp_array[i].tx_pkts_transmitted,
1572 			"No. of transmitted packets");
1573 
1574 		SYSCTL_ADD_QUAD(ctx, node_children,
1575 			OID_AUTO, "tx_pkts_completed",
1576 			CTLFLAG_RD, &ha->fp_array[i].tx_pkts_completed,
1577 			"No. of transmit completions");
1578 
1579                 SYSCTL_ADD_QUAD(ctx, node_children,
1580                         OID_AUTO, "tx_non_tso_pkts",
1581                         CTLFLAG_RD, &ha->fp_array[i].tx_non_tso_pkts,
1582                         "No. of non LSO transmited packets");
1583 
1584 #ifdef QLNX_TRACE_PERF_DATA
1585 
1586                 SYSCTL_ADD_QUAD(ctx, node_children,
1587                         OID_AUTO, "tx_pkts_trans_ctx",
1588                         CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_ctx,
1589                         "No. of transmitted packets in transmit context");
1590 
1591                 SYSCTL_ADD_QUAD(ctx, node_children,
1592                         OID_AUTO, "tx_pkts_compl_ctx",
1593                         CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_ctx,
1594                         "No. of transmit completions in transmit context");
1595 
1596                 SYSCTL_ADD_QUAD(ctx, node_children,
1597                         OID_AUTO, "tx_pkts_trans_fp",
1598                         CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_fp,
1599                         "No. of transmitted packets in taskqueue");
1600 
1601                 SYSCTL_ADD_QUAD(ctx, node_children,
1602                         OID_AUTO, "tx_pkts_compl_fp",
1603                         CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_fp,
1604                         "No. of transmit completions in taskqueue");
1605 
1606                 SYSCTL_ADD_QUAD(ctx, node_children,
1607                         OID_AUTO, "tx_pkts_compl_intr",
1608                         CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_intr,
1609                         "No. of transmit completions in interrupt ctx");
1610 #endif
1611 
1612                 SYSCTL_ADD_QUAD(ctx, node_children,
1613                         OID_AUTO, "tx_tso_pkts",
1614                         CTLFLAG_RD, &ha->fp_array[i].tx_tso_pkts,
1615                         "No. of LSO transmited packets");
1616 
1617 		SYSCTL_ADD_QUAD(ctx, node_children,
1618 			OID_AUTO, "tx_lso_wnd_min_len",
1619 			CTLFLAG_RD, &ha->fp_array[i].tx_lso_wnd_min_len,
1620 			"tx_lso_wnd_min_len");
1621 
1622 		SYSCTL_ADD_QUAD(ctx, node_children,
1623 			OID_AUTO, "tx_defrag",
1624 			CTLFLAG_RD, &ha->fp_array[i].tx_defrag,
1625 			"tx_defrag");
1626 
1627 		SYSCTL_ADD_QUAD(ctx, node_children,
1628 			OID_AUTO, "tx_nsegs_gt_elem_left",
1629 			CTLFLAG_RD, &ha->fp_array[i].tx_nsegs_gt_elem_left,
1630 			"tx_nsegs_gt_elem_left");
1631 
1632 		SYSCTL_ADD_UINT(ctx, node_children,
1633 			OID_AUTO, "tx_tso_max_nsegs",
1634 			CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_nsegs,
1635 			ha->fp_array[i].tx_tso_max_nsegs, "tx_tso_max_nsegs");
1636 
1637 		SYSCTL_ADD_UINT(ctx, node_children,
1638 			OID_AUTO, "tx_tso_min_nsegs",
1639 			CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_nsegs,
1640 			ha->fp_array[i].tx_tso_min_nsegs, "tx_tso_min_nsegs");
1641 
1642 		SYSCTL_ADD_UINT(ctx, node_children,
1643 			OID_AUTO, "tx_tso_max_pkt_len",
1644 			CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_pkt_len,
1645 			ha->fp_array[i].tx_tso_max_pkt_len,
1646 			"tx_tso_max_pkt_len");
1647 
1648 		SYSCTL_ADD_UINT(ctx, node_children,
1649 			OID_AUTO, "tx_tso_min_pkt_len",
1650 			CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_pkt_len,
1651 			ha->fp_array[i].tx_tso_min_pkt_len,
1652 			"tx_tso_min_pkt_len");
1653 
1654 		for (j = 0; j < QLNX_FP_MAX_SEGS; j++) {
1655 			bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1656 			snprintf(name_str, sizeof(name_str),
1657 				"tx_pkts_nseg_%02d", (j+1));
1658 
1659 			SYSCTL_ADD_QUAD(ctx, node_children,
1660 				OID_AUTO, name_str, CTLFLAG_RD,
1661 				&ha->fp_array[i].tx_pkts[j], name_str);
1662 		}
1663 
1664 #ifdef QLNX_TRACE_PERF_DATA
1665                 for (j = 0; j < 18; j++) {
1666                         bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1667                         snprintf(name_str, sizeof(name_str),
1668                                 "tx_pkts_hist_%02d", (j+1));
1669 
1670                         SYSCTL_ADD_QUAD(ctx, node_children,
1671                                 OID_AUTO, name_str, CTLFLAG_RD,
1672                                 &ha->fp_array[i].tx_pkts_hist[j], name_str);
1673                 }
1674                 for (j = 0; j < 5; j++) {
1675                         bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1676                         snprintf(name_str, sizeof(name_str),
1677                                 "tx_comInt_%02d", (j+1));
1678 
1679                         SYSCTL_ADD_QUAD(ctx, node_children,
1680                                 OID_AUTO, name_str, CTLFLAG_RD,
1681                                 &ha->fp_array[i].tx_comInt[j], name_str);
1682                 }
1683                 for (j = 0; j < 18; j++) {
1684                         bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1685                         snprintf(name_str, sizeof(name_str),
1686                                 "tx_pkts_q_%02d", (j+1));
1687 
1688                         SYSCTL_ADD_QUAD(ctx, node_children,
1689                                 OID_AUTO, name_str, CTLFLAG_RD,
1690                                 &ha->fp_array[i].tx_pkts_q[j], name_str);
1691                 }
1692 #endif
1693 
1694 		SYSCTL_ADD_QUAD(ctx, node_children,
1695 			OID_AUTO, "err_tx_nsegs_gt_elem_left",
1696 			CTLFLAG_RD, &ha->fp_array[i].err_tx_nsegs_gt_elem_left,
1697 			"err_tx_nsegs_gt_elem_left");
1698 
1699 		SYSCTL_ADD_QUAD(ctx, node_children,
1700 			OID_AUTO, "err_tx_dmamap_create",
1701 			CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_create,
1702 			"err_tx_dmamap_create");
1703 
1704 		SYSCTL_ADD_QUAD(ctx, node_children,
1705 			OID_AUTO, "err_tx_defrag_dmamap_load",
1706 			CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag_dmamap_load,
1707 			"err_tx_defrag_dmamap_load");
1708 
1709 		SYSCTL_ADD_QUAD(ctx, node_children,
1710 			OID_AUTO, "err_tx_non_tso_max_seg",
1711 			CTLFLAG_RD, &ha->fp_array[i].err_tx_non_tso_max_seg,
1712 			"err_tx_non_tso_max_seg");
1713 
1714 		SYSCTL_ADD_QUAD(ctx, node_children,
1715 			OID_AUTO, "err_tx_dmamap_load",
1716 			CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_load,
1717 			"err_tx_dmamap_load");
1718 
1719 		SYSCTL_ADD_QUAD(ctx, node_children,
1720 			OID_AUTO, "err_tx_defrag",
1721 			CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag,
1722 			"err_tx_defrag");
1723 
1724 		SYSCTL_ADD_QUAD(ctx, node_children,
1725 			OID_AUTO, "err_tx_free_pkt_null",
1726 			CTLFLAG_RD, &ha->fp_array[i].err_tx_free_pkt_null,
1727 			"err_tx_free_pkt_null");
1728 
1729 		SYSCTL_ADD_QUAD(ctx, node_children,
1730 			OID_AUTO, "err_tx_cons_idx_conflict",
1731 			CTLFLAG_RD, &ha->fp_array[i].err_tx_cons_idx_conflict,
1732 			"err_tx_cons_idx_conflict");
1733 
1734 		SYSCTL_ADD_QUAD(ctx, node_children,
1735 			OID_AUTO, "lro_cnt_64",
1736 			CTLFLAG_RD, &ha->fp_array[i].lro_cnt_64,
1737 			"lro_cnt_64");
1738 
1739 		SYSCTL_ADD_QUAD(ctx, node_children,
1740 			OID_AUTO, "lro_cnt_128",
1741 			CTLFLAG_RD, &ha->fp_array[i].lro_cnt_128,
1742 			"lro_cnt_128");
1743 
1744 		SYSCTL_ADD_QUAD(ctx, node_children,
1745 			OID_AUTO, "lro_cnt_256",
1746 			CTLFLAG_RD, &ha->fp_array[i].lro_cnt_256,
1747 			"lro_cnt_256");
1748 
1749 		SYSCTL_ADD_QUAD(ctx, node_children,
1750 			OID_AUTO, "lro_cnt_512",
1751 			CTLFLAG_RD, &ha->fp_array[i].lro_cnt_512,
1752 			"lro_cnt_512");
1753 
1754 		SYSCTL_ADD_QUAD(ctx, node_children,
1755 			OID_AUTO, "lro_cnt_1024",
1756 			CTLFLAG_RD, &ha->fp_array[i].lro_cnt_1024,
1757 			"lro_cnt_1024");
1758 
1759 		/* Rx Related */
1760 
1761 		SYSCTL_ADD_QUAD(ctx, node_children,
1762 			OID_AUTO, "rx_pkts",
1763 			CTLFLAG_RD, &ha->fp_array[i].rx_pkts,
1764 			"No. of received packets");
1765 
1766 		SYSCTL_ADD_QUAD(ctx, node_children,
1767 			OID_AUTO, "tpa_start",
1768 			CTLFLAG_RD, &ha->fp_array[i].tpa_start,
1769 			"No. of tpa_start packets");
1770 
1771 		SYSCTL_ADD_QUAD(ctx, node_children,
1772 			OID_AUTO, "tpa_cont",
1773 			CTLFLAG_RD, &ha->fp_array[i].tpa_cont,
1774 			"No. of tpa_cont packets");
1775 
1776 		SYSCTL_ADD_QUAD(ctx, node_children,
1777 			OID_AUTO, "tpa_end",
1778 			CTLFLAG_RD, &ha->fp_array[i].tpa_end,
1779 			"No. of tpa_end packets");
1780 
1781 		SYSCTL_ADD_QUAD(ctx, node_children,
1782 			OID_AUTO, "err_m_getcl",
1783 			CTLFLAG_RD, &ha->fp_array[i].err_m_getcl,
1784 			"err_m_getcl");
1785 
1786 		SYSCTL_ADD_QUAD(ctx, node_children,
1787 			OID_AUTO, "err_m_getjcl",
1788 			CTLFLAG_RD, &ha->fp_array[i].err_m_getjcl,
1789 			"err_m_getjcl");
1790 
1791 		SYSCTL_ADD_QUAD(ctx, node_children,
1792 			OID_AUTO, "err_rx_hw_errors",
1793 			CTLFLAG_RD, &ha->fp_array[i].err_rx_hw_errors,
1794 			"err_rx_hw_errors");
1795 
1796 		SYSCTL_ADD_QUAD(ctx, node_children,
1797 			OID_AUTO, "err_rx_alloc_errors",
1798 			CTLFLAG_RD, &ha->fp_array[i].err_rx_alloc_errors,
1799 			"err_rx_alloc_errors");
1800 	}
1801 
1802 	return;
1803 }
1804 
1805 static void
1806 qlnx_add_hw_stats_sysctls(qlnx_host_t *ha)
1807 {
1808         struct sysctl_ctx_list	*ctx;
1809         struct sysctl_oid_list	*children;
1810 	struct sysctl_oid	*ctx_oid;
1811 
1812         ctx = device_get_sysctl_ctx(ha->pci_dev);
1813 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1814 
1815 	ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "hwstat",
1816 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "hwstat");
1817         children = SYSCTL_CHILDREN(ctx_oid);
1818 
1819 	SYSCTL_ADD_QUAD(ctx, children,
1820                 OID_AUTO, "no_buff_discards",
1821                 CTLFLAG_RD, &ha->hw_stats.common.no_buff_discards,
1822                 "No. of packets discarded due to lack of buffer");
1823 
1824 	SYSCTL_ADD_QUAD(ctx, children,
1825                 OID_AUTO, "packet_too_big_discard",
1826                 CTLFLAG_RD, &ha->hw_stats.common.packet_too_big_discard,
1827                 "No. of packets discarded because packet was too big");
1828 
1829 	SYSCTL_ADD_QUAD(ctx, children,
1830                 OID_AUTO, "ttl0_discard",
1831                 CTLFLAG_RD, &ha->hw_stats.common.ttl0_discard,
1832                 "ttl0_discard");
1833 
1834 	SYSCTL_ADD_QUAD(ctx, children,
1835                 OID_AUTO, "rx_ucast_bytes",
1836                 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_bytes,
1837                 "rx_ucast_bytes");
1838 
1839 	SYSCTL_ADD_QUAD(ctx, children,
1840                 OID_AUTO, "rx_mcast_bytes",
1841                 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_bytes,
1842                 "rx_mcast_bytes");
1843 
1844 	SYSCTL_ADD_QUAD(ctx, children,
1845                 OID_AUTO, "rx_bcast_bytes",
1846                 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_bytes,
1847                 "rx_bcast_bytes");
1848 
1849 	SYSCTL_ADD_QUAD(ctx, children,
1850                 OID_AUTO, "rx_ucast_pkts",
1851                 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_pkts,
1852                 "rx_ucast_pkts");
1853 
1854 	SYSCTL_ADD_QUAD(ctx, children,
1855                 OID_AUTO, "rx_mcast_pkts",
1856                 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_pkts,
1857                 "rx_mcast_pkts");
1858 
1859 	SYSCTL_ADD_QUAD(ctx, children,
1860                 OID_AUTO, "rx_bcast_pkts",
1861                 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_pkts,
1862                 "rx_bcast_pkts");
1863 
1864 	SYSCTL_ADD_QUAD(ctx, children,
1865                 OID_AUTO, "mftag_filter_discards",
1866                 CTLFLAG_RD, &ha->hw_stats.common.mftag_filter_discards,
1867                 "mftag_filter_discards");
1868 
1869 	SYSCTL_ADD_QUAD(ctx, children,
1870                 OID_AUTO, "mac_filter_discards",
1871                 CTLFLAG_RD, &ha->hw_stats.common.mac_filter_discards,
1872                 "mac_filter_discards");
1873 
1874 	SYSCTL_ADD_QUAD(ctx, children,
1875                 OID_AUTO, "tx_ucast_bytes",
1876                 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_bytes,
1877                 "tx_ucast_bytes");
1878 
1879 	SYSCTL_ADD_QUAD(ctx, children,
1880                 OID_AUTO, "tx_mcast_bytes",
1881                 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_bytes,
1882                 "tx_mcast_bytes");
1883 
1884 	SYSCTL_ADD_QUAD(ctx, children,
1885                 OID_AUTO, "tx_bcast_bytes",
1886                 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_bytes,
1887                 "tx_bcast_bytes");
1888 
1889 	SYSCTL_ADD_QUAD(ctx, children,
1890                 OID_AUTO, "tx_ucast_pkts",
1891                 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_pkts,
1892                 "tx_ucast_pkts");
1893 
1894 	SYSCTL_ADD_QUAD(ctx, children,
1895                 OID_AUTO, "tx_mcast_pkts",
1896                 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_pkts,
1897                 "tx_mcast_pkts");
1898 
1899 	SYSCTL_ADD_QUAD(ctx, children,
1900                 OID_AUTO, "tx_bcast_pkts",
1901                 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_pkts,
1902                 "tx_bcast_pkts");
1903 
1904 	SYSCTL_ADD_QUAD(ctx, children,
1905                 OID_AUTO, "tx_err_drop_pkts",
1906                 CTLFLAG_RD, &ha->hw_stats.common.tx_err_drop_pkts,
1907                 "tx_err_drop_pkts");
1908 
1909 	SYSCTL_ADD_QUAD(ctx, children,
1910                 OID_AUTO, "tpa_coalesced_pkts",
1911                 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_pkts,
1912                 "tpa_coalesced_pkts");
1913 
1914 	SYSCTL_ADD_QUAD(ctx, children,
1915                 OID_AUTO, "tpa_coalesced_events",
1916                 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_events,
1917                 "tpa_coalesced_events");
1918 
1919 	SYSCTL_ADD_QUAD(ctx, children,
1920                 OID_AUTO, "tpa_aborts_num",
1921                 CTLFLAG_RD, &ha->hw_stats.common.tpa_aborts_num,
1922                 "tpa_aborts_num");
1923 
1924 	SYSCTL_ADD_QUAD(ctx, children,
1925                 OID_AUTO, "tpa_not_coalesced_pkts",
1926                 CTLFLAG_RD, &ha->hw_stats.common.tpa_not_coalesced_pkts,
1927                 "tpa_not_coalesced_pkts");
1928 
1929 	SYSCTL_ADD_QUAD(ctx, children,
1930                 OID_AUTO, "tpa_coalesced_bytes",
1931                 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_bytes,
1932                 "tpa_coalesced_bytes");
1933 
1934 	SYSCTL_ADD_QUAD(ctx, children,
1935                 OID_AUTO, "rx_64_byte_packets",
1936                 CTLFLAG_RD, &ha->hw_stats.common.rx_64_byte_packets,
1937                 "rx_64_byte_packets");
1938 
1939 	SYSCTL_ADD_QUAD(ctx, children,
1940                 OID_AUTO, "rx_65_to_127_byte_packets",
1941                 CTLFLAG_RD, &ha->hw_stats.common.rx_65_to_127_byte_packets,
1942                 "rx_65_to_127_byte_packets");
1943 
1944 	SYSCTL_ADD_QUAD(ctx, children,
1945                 OID_AUTO, "rx_128_to_255_byte_packets",
1946                 CTLFLAG_RD, &ha->hw_stats.common.rx_128_to_255_byte_packets,
1947                 "rx_128_to_255_byte_packets");
1948 
1949 	SYSCTL_ADD_QUAD(ctx, children,
1950                 OID_AUTO, "rx_256_to_511_byte_packets",
1951                 CTLFLAG_RD, &ha->hw_stats.common.rx_256_to_511_byte_packets,
1952                 "rx_256_to_511_byte_packets");
1953 
1954 	SYSCTL_ADD_QUAD(ctx, children,
1955                 OID_AUTO, "rx_512_to_1023_byte_packets",
1956                 CTLFLAG_RD, &ha->hw_stats.common.rx_512_to_1023_byte_packets,
1957                 "rx_512_to_1023_byte_packets");
1958 
1959 	SYSCTL_ADD_QUAD(ctx, children,
1960                 OID_AUTO, "rx_1024_to_1518_byte_packets",
1961                 CTLFLAG_RD, &ha->hw_stats.common.rx_1024_to_1518_byte_packets,
1962                 "rx_1024_to_1518_byte_packets");
1963 
1964 	SYSCTL_ADD_QUAD(ctx, children,
1965                 OID_AUTO, "rx_1519_to_1522_byte_packets",
1966                 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_1522_byte_packets,
1967                 "rx_1519_to_1522_byte_packets");
1968 
1969 	SYSCTL_ADD_QUAD(ctx, children,
1970                 OID_AUTO, "rx_1523_to_2047_byte_packets",
1971                 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_2047_byte_packets,
1972                 "rx_1523_to_2047_byte_packets");
1973 
1974 	SYSCTL_ADD_QUAD(ctx, children,
1975                 OID_AUTO, "rx_2048_to_4095_byte_packets",
1976                 CTLFLAG_RD, &ha->hw_stats.bb.rx_2048_to_4095_byte_packets,
1977                 "rx_2048_to_4095_byte_packets");
1978 
1979 	SYSCTL_ADD_QUAD(ctx, children,
1980                 OID_AUTO, "rx_4096_to_9216_byte_packets",
1981                 CTLFLAG_RD, &ha->hw_stats.bb.rx_4096_to_9216_byte_packets,
1982                 "rx_4096_to_9216_byte_packets");
1983 
1984 	SYSCTL_ADD_QUAD(ctx, children,
1985                 OID_AUTO, "rx_9217_to_16383_byte_packets",
1986                 CTLFLAG_RD, &ha->hw_stats.bb.rx_9217_to_16383_byte_packets,
1987                 "rx_9217_to_16383_byte_packets");
1988 
1989 	SYSCTL_ADD_QUAD(ctx, children,
1990                 OID_AUTO, "rx_crc_errors",
1991                 CTLFLAG_RD, &ha->hw_stats.common.rx_crc_errors,
1992                 "rx_crc_errors");
1993 
1994 	SYSCTL_ADD_QUAD(ctx, children,
1995                 OID_AUTO, "rx_mac_crtl_frames",
1996                 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_crtl_frames,
1997                 "rx_mac_crtl_frames");
1998 
1999 	SYSCTL_ADD_QUAD(ctx, children,
2000                 OID_AUTO, "rx_pause_frames",
2001                 CTLFLAG_RD, &ha->hw_stats.common.rx_pause_frames,
2002                 "rx_pause_frames");
2003 
2004 	SYSCTL_ADD_QUAD(ctx, children,
2005                 OID_AUTO, "rx_pfc_frames",
2006                 CTLFLAG_RD, &ha->hw_stats.common.rx_pfc_frames,
2007                 "rx_pfc_frames");
2008 
2009 	SYSCTL_ADD_QUAD(ctx, children,
2010                 OID_AUTO, "rx_align_errors",
2011                 CTLFLAG_RD, &ha->hw_stats.common.rx_align_errors,
2012                 "rx_align_errors");
2013 
2014 	SYSCTL_ADD_QUAD(ctx, children,
2015                 OID_AUTO, "rx_carrier_errors",
2016                 CTLFLAG_RD, &ha->hw_stats.common.rx_carrier_errors,
2017                 "rx_carrier_errors");
2018 
2019 	SYSCTL_ADD_QUAD(ctx, children,
2020                 OID_AUTO, "rx_oversize_packets",
2021                 CTLFLAG_RD, &ha->hw_stats.common.rx_oversize_packets,
2022                 "rx_oversize_packets");
2023 
2024 	SYSCTL_ADD_QUAD(ctx, children,
2025                 OID_AUTO, "rx_jabbers",
2026                 CTLFLAG_RD, &ha->hw_stats.common.rx_jabbers,
2027                 "rx_jabbers");
2028 
2029 	SYSCTL_ADD_QUAD(ctx, children,
2030                 OID_AUTO, "rx_undersize_packets",
2031                 CTLFLAG_RD, &ha->hw_stats.common.rx_undersize_packets,
2032                 "rx_undersize_packets");
2033 
2034 	SYSCTL_ADD_QUAD(ctx, children,
2035                 OID_AUTO, "rx_fragments",
2036                 CTLFLAG_RD, &ha->hw_stats.common.rx_fragments,
2037                 "rx_fragments");
2038 
2039 	SYSCTL_ADD_QUAD(ctx, children,
2040                 OID_AUTO, "tx_64_byte_packets",
2041                 CTLFLAG_RD, &ha->hw_stats.common.tx_64_byte_packets,
2042                 "tx_64_byte_packets");
2043 
2044 	SYSCTL_ADD_QUAD(ctx, children,
2045                 OID_AUTO, "tx_65_to_127_byte_packets",
2046                 CTLFLAG_RD, &ha->hw_stats.common.tx_65_to_127_byte_packets,
2047                 "tx_65_to_127_byte_packets");
2048 
2049 	SYSCTL_ADD_QUAD(ctx, children,
2050                 OID_AUTO, "tx_128_to_255_byte_packets",
2051                 CTLFLAG_RD, &ha->hw_stats.common.tx_128_to_255_byte_packets,
2052                 "tx_128_to_255_byte_packets");
2053 
2054 	SYSCTL_ADD_QUAD(ctx, children,
2055                 OID_AUTO, "tx_256_to_511_byte_packets",
2056                 CTLFLAG_RD, &ha->hw_stats.common.tx_256_to_511_byte_packets,
2057                 "tx_256_to_511_byte_packets");
2058 
2059 	SYSCTL_ADD_QUAD(ctx, children,
2060                 OID_AUTO, "tx_512_to_1023_byte_packets",
2061                 CTLFLAG_RD, &ha->hw_stats.common.tx_512_to_1023_byte_packets,
2062                 "tx_512_to_1023_byte_packets");
2063 
2064 	SYSCTL_ADD_QUAD(ctx, children,
2065                 OID_AUTO, "tx_1024_to_1518_byte_packets",
2066                 CTLFLAG_RD, &ha->hw_stats.common.tx_1024_to_1518_byte_packets,
2067                 "tx_1024_to_1518_byte_packets");
2068 
2069 	SYSCTL_ADD_QUAD(ctx, children,
2070                 OID_AUTO, "tx_1519_to_2047_byte_packets",
2071                 CTLFLAG_RD, &ha->hw_stats.bb.tx_1519_to_2047_byte_packets,
2072                 "tx_1519_to_2047_byte_packets");
2073 
2074 	SYSCTL_ADD_QUAD(ctx, children,
2075                 OID_AUTO, "tx_2048_to_4095_byte_packets",
2076                 CTLFLAG_RD, &ha->hw_stats.bb.tx_2048_to_4095_byte_packets,
2077                 "tx_2048_to_4095_byte_packets");
2078 
2079 	SYSCTL_ADD_QUAD(ctx, children,
2080                 OID_AUTO, "tx_4096_to_9216_byte_packets",
2081                 CTLFLAG_RD, &ha->hw_stats.bb.tx_4096_to_9216_byte_packets,
2082                 "tx_4096_to_9216_byte_packets");
2083 
2084 	SYSCTL_ADD_QUAD(ctx, children,
2085                 OID_AUTO, "tx_9217_to_16383_byte_packets",
2086                 CTLFLAG_RD, &ha->hw_stats.bb.tx_9217_to_16383_byte_packets,
2087                 "tx_9217_to_16383_byte_packets");
2088 
2089 	SYSCTL_ADD_QUAD(ctx, children,
2090                 OID_AUTO, "tx_pause_frames",
2091                 CTLFLAG_RD, &ha->hw_stats.common.tx_pause_frames,
2092                 "tx_pause_frames");
2093 
2094 	SYSCTL_ADD_QUAD(ctx, children,
2095                 OID_AUTO, "tx_pfc_frames",
2096                 CTLFLAG_RD, &ha->hw_stats.common.tx_pfc_frames,
2097                 "tx_pfc_frames");
2098 
2099 	SYSCTL_ADD_QUAD(ctx, children,
2100                 OID_AUTO, "tx_lpi_entry_count",
2101                 CTLFLAG_RD, &ha->hw_stats.bb.tx_lpi_entry_count,
2102                 "tx_lpi_entry_count");
2103 
2104 	SYSCTL_ADD_QUAD(ctx, children,
2105                 OID_AUTO, "tx_total_collisions",
2106                 CTLFLAG_RD, &ha->hw_stats.bb.tx_total_collisions,
2107                 "tx_total_collisions");
2108 
2109 	SYSCTL_ADD_QUAD(ctx, children,
2110                 OID_AUTO, "brb_truncates",
2111                 CTLFLAG_RD, &ha->hw_stats.common.brb_truncates,
2112                 "brb_truncates");
2113 
2114 	SYSCTL_ADD_QUAD(ctx, children,
2115                 OID_AUTO, "brb_discards",
2116                 CTLFLAG_RD, &ha->hw_stats.common.brb_discards,
2117                 "brb_discards");
2118 
2119 	SYSCTL_ADD_QUAD(ctx, children,
2120                 OID_AUTO, "rx_mac_bytes",
2121                 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bytes,
2122                 "rx_mac_bytes");
2123 
2124 	SYSCTL_ADD_QUAD(ctx, children,
2125                 OID_AUTO, "rx_mac_uc_packets",
2126                 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_uc_packets,
2127                 "rx_mac_uc_packets");
2128 
2129 	SYSCTL_ADD_QUAD(ctx, children,
2130                 OID_AUTO, "rx_mac_mc_packets",
2131                 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_mc_packets,
2132                 "rx_mac_mc_packets");
2133 
2134 	SYSCTL_ADD_QUAD(ctx, children,
2135                 OID_AUTO, "rx_mac_bc_packets",
2136                 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bc_packets,
2137                 "rx_mac_bc_packets");
2138 
2139 	SYSCTL_ADD_QUAD(ctx, children,
2140                 OID_AUTO, "rx_mac_frames_ok",
2141                 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_frames_ok,
2142                 "rx_mac_frames_ok");
2143 
2144 	SYSCTL_ADD_QUAD(ctx, children,
2145                 OID_AUTO, "tx_mac_bytes",
2146                 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bytes,
2147                 "tx_mac_bytes");
2148 
2149 	SYSCTL_ADD_QUAD(ctx, children,
2150                 OID_AUTO, "tx_mac_uc_packets",
2151                 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_uc_packets,
2152                 "tx_mac_uc_packets");
2153 
2154 	SYSCTL_ADD_QUAD(ctx, children,
2155                 OID_AUTO, "tx_mac_mc_packets",
2156                 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_mc_packets,
2157                 "tx_mac_mc_packets");
2158 
2159 	SYSCTL_ADD_QUAD(ctx, children,
2160                 OID_AUTO, "tx_mac_bc_packets",
2161                 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bc_packets,
2162                 "tx_mac_bc_packets");
2163 
2164 	SYSCTL_ADD_QUAD(ctx, children,
2165                 OID_AUTO, "tx_mac_ctrl_frames",
2166                 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_ctrl_frames,
2167                 "tx_mac_ctrl_frames");
2168 	return;
2169 }
2170 
2171 static void
2172 qlnx_add_sysctls(qlnx_host_t *ha)
2173 {
2174         device_t		dev = ha->pci_dev;
2175 	struct sysctl_ctx_list	*ctx;
2176 	struct sysctl_oid_list	*children;
2177 
2178 	ctx = device_get_sysctl_ctx(dev);
2179 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2180 
2181 	qlnx_add_fp_stats_sysctls(ha);
2182 	qlnx_add_sp_stats_sysctls(ha);
2183 
2184 	if (qlnx_vf_device(ha) != 0)
2185 		qlnx_add_hw_stats_sysctls(ha);
2186 
2187 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Driver_Version",
2188 		CTLFLAG_RD, qlnx_ver_str, 0,
2189 		"Driver Version");
2190 
2191 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "STORMFW_Version",
2192 		CTLFLAG_RD, ha->stormfw_ver, 0,
2193 		"STORM Firmware Version");
2194 
2195 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "MFW_Version",
2196 		CTLFLAG_RD, ha->mfw_ver, 0,
2197 		"Management Firmware Version");
2198 
2199         SYSCTL_ADD_UINT(ctx, children,
2200                 OID_AUTO, "personality", CTLFLAG_RD,
2201                 &ha->personality, ha->personality,
2202 		"\tpersonality = 0 => Ethernet Only\n"
2203 		"\tpersonality = 3 => Ethernet and RoCE\n"
2204 		"\tpersonality = 4 => Ethernet and iWARP\n"
2205 		"\tpersonality = 6 => Default in Shared Memory\n");
2206 
2207         ha->dbg_level = 0;
2208         SYSCTL_ADD_UINT(ctx, children,
2209                 OID_AUTO, "debug", CTLFLAG_RW,
2210                 &ha->dbg_level, ha->dbg_level, "Debug Level");
2211 
2212         ha->dp_level = 0x01;
2213         SYSCTL_ADD_UINT(ctx, children,
2214                 OID_AUTO, "dp_level", CTLFLAG_RW,
2215                 &ha->dp_level, ha->dp_level, "DP Level");
2216 
2217         ha->dbg_trace_lro_cnt = 0;
2218         SYSCTL_ADD_UINT(ctx, children,
2219                 OID_AUTO, "dbg_trace_lro_cnt", CTLFLAG_RW,
2220                 &ha->dbg_trace_lro_cnt, ha->dbg_trace_lro_cnt,
2221 		"Trace LRO Counts");
2222 
2223         ha->dbg_trace_tso_pkt_len = 0;
2224         SYSCTL_ADD_UINT(ctx, children,
2225                 OID_AUTO, "dbg_trace_tso_pkt_len", CTLFLAG_RW,
2226                 &ha->dbg_trace_tso_pkt_len, ha->dbg_trace_tso_pkt_len,
2227 		"Trace TSO packet lengths");
2228 
2229         ha->dp_module = 0;
2230         SYSCTL_ADD_UINT(ctx, children,
2231                 OID_AUTO, "dp_module", CTLFLAG_RW,
2232                 &ha->dp_module, ha->dp_module, "DP Module");
2233 
2234         ha->err_inject = 0;
2235 
2236         SYSCTL_ADD_UINT(ctx, children,
2237                 OID_AUTO, "err_inject", CTLFLAG_RW,
2238                 &ha->err_inject, ha->err_inject, "Error Inject");
2239 
2240 	ha->storm_stats_enable = 0;
2241 
2242 	SYSCTL_ADD_UINT(ctx, children,
2243 		OID_AUTO, "storm_stats_enable", CTLFLAG_RW,
2244 		&ha->storm_stats_enable, ha->storm_stats_enable,
2245 		"Enable Storm Statistics Gathering");
2246 
2247 	ha->storm_stats_index = 0;
2248 
2249 	SYSCTL_ADD_UINT(ctx, children,
2250 		OID_AUTO, "storm_stats_index", CTLFLAG_RD,
2251 		&ha->storm_stats_index, ha->storm_stats_index,
2252 		"Enable Storm Statistics Gathering Current Index");
2253 
2254 	ha->grcdump_taken = 0;
2255 	SYSCTL_ADD_UINT(ctx, children,
2256 		OID_AUTO, "grcdump_taken", CTLFLAG_RD,
2257 		&ha->grcdump_taken, ha->grcdump_taken,
2258 		"grcdump_taken");
2259 
2260 	ha->idle_chk_taken = 0;
2261 	SYSCTL_ADD_UINT(ctx, children,
2262 		OID_AUTO, "idle_chk_taken", CTLFLAG_RD,
2263 		&ha->idle_chk_taken, ha->idle_chk_taken,
2264 		"idle_chk_taken");
2265 
2266 	SYSCTL_ADD_UINT(ctx, children,
2267 		OID_AUTO, "rx_coalesce_usecs", CTLFLAG_RD,
2268 		&ha->rx_coalesce_usecs, ha->rx_coalesce_usecs,
2269 		"rx_coalesce_usecs");
2270 
2271 	SYSCTL_ADD_UINT(ctx, children,
2272 		OID_AUTO, "tx_coalesce_usecs", CTLFLAG_RD,
2273 		&ha->tx_coalesce_usecs, ha->tx_coalesce_usecs,
2274 		"tx_coalesce_usecs");
2275 
2276 	SYSCTL_ADD_PROC(ctx, children,
2277 	    OID_AUTO, "trigger_dump",
2278 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2279 	    (void *)ha, 0, qlnx_trigger_dump_sysctl, "I", "trigger_dump");
2280 
2281 	SYSCTL_ADD_PROC(ctx, children,
2282 	    OID_AUTO, "set_rx_coalesce_usecs",
2283 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2284 	    (void *)ha, 0, qlnx_set_rx_coalesce, "I",
2285 	    "rx interrupt coalesce period microseconds");
2286 
2287 	SYSCTL_ADD_PROC(ctx, children,
2288 	    OID_AUTO, "set_tx_coalesce_usecs",
2289 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2290 	    (void *)ha, 0, qlnx_set_tx_coalesce, "I",
2291 	    "tx interrupt coalesce period microseconds");
2292 
2293 	ha->rx_pkt_threshold = 128;
2294         SYSCTL_ADD_UINT(ctx, children,
2295                 OID_AUTO, "rx_pkt_threshold", CTLFLAG_RW,
2296                 &ha->rx_pkt_threshold, ha->rx_pkt_threshold,
2297 		"No. of Rx Pkts to process at a time");
2298 
2299 	ha->rx_jumbo_buf_eq_mtu = 0;
2300         SYSCTL_ADD_UINT(ctx, children,
2301                 OID_AUTO, "rx_jumbo_buf_eq_mtu", CTLFLAG_RW,
2302                 &ha->rx_jumbo_buf_eq_mtu, ha->rx_jumbo_buf_eq_mtu,
2303 		"== 0 => Rx Jumbo buffers are capped to 4Kbytes\n"
2304 		"otherwise Rx Jumbo buffers are set to >= MTU size\n");
2305 
2306 	SYSCTL_ADD_QUAD(ctx, children,
2307                 OID_AUTO, "err_illegal_intr", CTLFLAG_RD,
2308 		&ha->err_illegal_intr, "err_illegal_intr");
2309 
2310 	SYSCTL_ADD_QUAD(ctx, children,
2311                 OID_AUTO, "err_fp_null", CTLFLAG_RD,
2312 		&ha->err_fp_null, "err_fp_null");
2313 
2314 	SYSCTL_ADD_QUAD(ctx, children,
2315                 OID_AUTO, "err_get_proto_invalid_type", CTLFLAG_RD,
2316 		&ha->err_get_proto_invalid_type, "err_get_proto_invalid_type");
2317 	return;
2318 }
2319 
2320 /*****************************************************************************
2321  * Operating System Network Interface Functions
2322  *****************************************************************************/
2323 
2324 static void
2325 qlnx_init_ifnet(device_t dev, qlnx_host_t *ha)
2326 {
2327 	uint16_t	device_id;
2328         struct ifnet	*ifp;
2329 
2330         ifp = ha->ifp = if_alloc(IFT_ETHER);
2331 
2332         if (ifp == NULL)
2333                 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
2334 
2335         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2336 
2337 	device_id = pci_get_device(ha->pci_dev);
2338 
2339 #if __FreeBSD_version >= 1000000
2340 
2341         if (device_id == QLOGIC_PCI_DEVICE_ID_1634)
2342 		ifp->if_baudrate = IF_Gbps(40);
2343         else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
2344 			(device_id == QLOGIC_PCI_DEVICE_ID_8070))
2345 		ifp->if_baudrate = IF_Gbps(25);
2346         else if (device_id == QLOGIC_PCI_DEVICE_ID_1654)
2347 		ifp->if_baudrate = IF_Gbps(50);
2348         else if (device_id == QLOGIC_PCI_DEVICE_ID_1644)
2349 		ifp->if_baudrate = IF_Gbps(100);
2350 
2351         ifp->if_capabilities = IFCAP_LINKSTATE;
2352 #else
2353         ifp->if_mtu = ETHERMTU;
2354 	ifp->if_baudrate = (1 * 1000 * 1000 *1000);
2355 
2356 #endif /* #if __FreeBSD_version >= 1000000 */
2357 
2358         ifp->if_init = qlnx_init;
2359         ifp->if_softc = ha;
2360         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2361         ifp->if_ioctl = qlnx_ioctl;
2362         ifp->if_transmit = qlnx_transmit;
2363         ifp->if_qflush = qlnx_qflush;
2364 
2365         IFQ_SET_MAXLEN(&ifp->if_snd, qlnx_get_ifq_snd_maxlen(ha));
2366         ifp->if_snd.ifq_drv_maxlen = qlnx_get_ifq_snd_maxlen(ha);
2367         IFQ_SET_READY(&ifp->if_snd);
2368 
2369 #if __FreeBSD_version >= 1100036
2370 	if_setgetcounterfn(ifp, qlnx_get_counter);
2371 #endif
2372 
2373         ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2374 
2375         memcpy(ha->primary_mac, qlnx_get_mac_addr(ha), ETH_ALEN);
2376 
2377 	if (!ha->primary_mac[0] && !ha->primary_mac[1] &&
2378 		!ha->primary_mac[2] && !ha->primary_mac[3] &&
2379 		!ha->primary_mac[4] && !ha->primary_mac[5]) {
2380 		uint32_t rnd;
2381 
2382 		rnd = arc4random();
2383 
2384 		ha->primary_mac[0] = 0x00;
2385 		ha->primary_mac[1] = 0x0e;
2386 		ha->primary_mac[2] = 0x1e;
2387 		ha->primary_mac[3] = rnd & 0xFF;
2388 		ha->primary_mac[4] = (rnd >> 8) & 0xFF;
2389 		ha->primary_mac[5] = (rnd >> 16) & 0xFF;
2390 	}
2391 
2392 	ether_ifattach(ifp, ha->primary_mac);
2393 	bcopy(IF_LLADDR(ha->ifp), ha->primary_mac, ETHER_ADDR_LEN);
2394 
2395 	ifp->if_capabilities = IFCAP_HWCSUM;
2396 	ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2397 
2398 	ifp->if_capabilities |= IFCAP_VLAN_MTU;
2399 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
2400 	ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2401 	ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
2402 	ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
2403 	ifp->if_capabilities |= IFCAP_TSO4;
2404 	ifp->if_capabilities |= IFCAP_TSO6;
2405 	ifp->if_capabilities |= IFCAP_LRO;
2406 
2407 	ifp->if_hw_tsomax =  QLNX_MAX_TSO_FRAME_SIZE -
2408 				(ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
2409 	ifp->if_hw_tsomaxsegcount = QLNX_MAX_SEGMENTS - 1 /* hdr */;
2410 	ifp->if_hw_tsomaxsegsize = QLNX_MAX_TX_MBUF_SIZE;
2411 
2412         ifp->if_capenable = ifp->if_capabilities;
2413 
2414 	ifp->if_hwassist = CSUM_IP;
2415 	ifp->if_hwassist |= CSUM_TCP | CSUM_UDP;
2416 	ifp->if_hwassist |= CSUM_TCP_IPV6 | CSUM_UDP_IPV6;
2417 	ifp->if_hwassist |= CSUM_TSO;
2418 
2419 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2420 
2421         ifmedia_init(&ha->media, IFM_IMASK, qlnx_media_change,\
2422 		qlnx_media_status);
2423 
2424         if (device_id == QLOGIC_PCI_DEVICE_ID_1634) {
2425 		ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_LR4), 0, NULL);
2426 		ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_SR4), 0, NULL);
2427 		ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_CR4), 0, NULL);
2428         } else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
2429 			(device_id == QLOGIC_PCI_DEVICE_ID_8070)) {
2430 		ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_SR), 0, NULL);
2431 		ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_CR), 0, NULL);
2432         } else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) {
2433 		ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_KR2), 0, NULL);
2434 		ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_CR2), 0, NULL);
2435         } else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) {
2436 		ifmedia_add(&ha->media,
2437 			(IFM_ETHER | QLNX_IFM_100G_LR4), 0, NULL);
2438 		ifmedia_add(&ha->media,
2439 			(IFM_ETHER | QLNX_IFM_100G_SR4), 0, NULL);
2440 		ifmedia_add(&ha->media,
2441 			(IFM_ETHER | QLNX_IFM_100G_CR4), 0, NULL);
2442 	}
2443 
2444         ifmedia_add(&ha->media, (IFM_ETHER | IFM_FDX), 0, NULL);
2445         ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
2446 
2447         ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
2448 
2449         QL_DPRINT2(ha, "exit\n");
2450 
2451         return;
2452 }
2453 
2454 static void
2455 qlnx_init_locked(qlnx_host_t *ha)
2456 {
2457 	struct ifnet	*ifp = ha->ifp;
2458 
2459 	QL_DPRINT1(ha, "Driver Initialization start \n");
2460 
2461 	qlnx_stop(ha);
2462 
2463 	if (qlnx_load(ha) == 0) {
2464 		ifp->if_drv_flags |= IFF_DRV_RUNNING;
2465 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2466 
2467 #ifdef QLNX_ENABLE_IWARP
2468 		if (qlnx_vf_device(ha) != 0) {
2469 			qlnx_rdma_dev_open(ha);
2470 		}
2471 #endif /* #ifdef QLNX_ENABLE_IWARP */
2472 	}
2473 
2474 	return;
2475 }
2476 
2477 static void
2478 qlnx_init(void *arg)
2479 {
2480 	qlnx_host_t	*ha;
2481 
2482 	ha = (qlnx_host_t *)arg;
2483 
2484 	QL_DPRINT2(ha, "enter\n");
2485 
2486 	QLNX_LOCK(ha);
2487 	qlnx_init_locked(ha);
2488 	QLNX_UNLOCK(ha);
2489 
2490 	QL_DPRINT2(ha, "exit\n");
2491 
2492 	return;
2493 }
2494 
2495 static int
2496 qlnx_config_mcast_mac_addr(qlnx_host_t *ha, uint8_t *mac_addr, uint32_t add_mac)
2497 {
2498 	struct ecore_filter_mcast	*mcast;
2499 	struct ecore_dev		*cdev;
2500 	int				rc;
2501 
2502 	cdev = &ha->cdev;
2503 
2504 	mcast = &ha->ecore_mcast;
2505 	bzero(mcast, sizeof(struct ecore_filter_mcast));
2506 
2507 	if (add_mac)
2508 		mcast->opcode = ECORE_FILTER_ADD;
2509 	else
2510 		mcast->opcode = ECORE_FILTER_REMOVE;
2511 
2512 	mcast->num_mc_addrs = 1;
2513 	memcpy(mcast->mac, mac_addr, ETH_ALEN);
2514 
2515 	rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL);
2516 
2517 	return (rc);
2518 }
2519 
2520 static int
2521 qlnx_hw_add_mcast(qlnx_host_t *ha, uint8_t *mta)
2522 {
2523         int	i;
2524 
2525         for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2526                 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0)
2527                         return 0; /* its been already added */
2528         }
2529 
2530         for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2531                 if ((ha->mcast[i].addr[0] == 0) &&
2532                         (ha->mcast[i].addr[1] == 0) &&
2533                         (ha->mcast[i].addr[2] == 0) &&
2534                         (ha->mcast[i].addr[3] == 0) &&
2535                         (ha->mcast[i].addr[4] == 0) &&
2536                         (ha->mcast[i].addr[5] == 0)) {
2537                         if (qlnx_config_mcast_mac_addr(ha, mta, 1))
2538                                 return (-1);
2539 
2540                         bcopy(mta, ha->mcast[i].addr, ETH_ALEN);
2541                         ha->nmcast++;
2542 
2543                         return 0;
2544                 }
2545         }
2546         return 0;
2547 }
2548 
2549 static int
2550 qlnx_hw_del_mcast(qlnx_host_t *ha, uint8_t *mta)
2551 {
2552         int	i;
2553 
2554         for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2555                 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) {
2556                         if (qlnx_config_mcast_mac_addr(ha, mta, 0))
2557                                 return (-1);
2558 
2559                         ha->mcast[i].addr[0] = 0;
2560                         ha->mcast[i].addr[1] = 0;
2561                         ha->mcast[i].addr[2] = 0;
2562                         ha->mcast[i].addr[3] = 0;
2563                         ha->mcast[i].addr[4] = 0;
2564                         ha->mcast[i].addr[5] = 0;
2565 
2566                         ha->nmcast--;
2567 
2568                         return 0;
2569                 }
2570         }
2571         return 0;
2572 }
2573 
2574 /*
2575  * Name: qls_hw_set_multi
2576  * Function: Sets the Multicast Addresses provided the host O.S into the
2577  *      hardware (for the given interface)
2578  */
2579 static void
2580 qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt,
2581 	uint32_t add_mac)
2582 {
2583         int	i;
2584 
2585         for (i = 0; i < mcnt; i++) {
2586                 if (add_mac) {
2587                         if (qlnx_hw_add_mcast(ha, mta))
2588                                 break;
2589                 } else {
2590                         if (qlnx_hw_del_mcast(ha, mta))
2591                                 break;
2592                 }
2593 
2594                 mta += ETHER_HDR_LEN;
2595         }
2596         return;
2597 }
2598 
2599 static u_int
2600 qlnx_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt)
2601 {
2602 	uint8_t *mta = arg;
2603 
2604 	if (mcnt == QLNX_MAX_NUM_MULTICAST_ADDRS)
2605 		return (0);
2606 
2607 	bcopy(LLADDR(sdl), &mta[mcnt * ETHER_HDR_LEN], ETHER_HDR_LEN);
2608 
2609 	return (1);
2610 }
2611 
2612 static int
2613 qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi)
2614 {
2615 	uint8_t		mta[QLNX_MAX_NUM_MULTICAST_ADDRS * ETHER_HDR_LEN];
2616 	struct ifnet	*ifp = ha->ifp;
2617 	u_int		mcnt;
2618 
2619 	if (qlnx_vf_device(ha) == 0)
2620 		return (0);
2621 
2622 	mcnt = if_foreach_llmaddr(ifp, qlnx_copy_maddr, mta);
2623 
2624 	QLNX_LOCK(ha);
2625 	qlnx_hw_set_multi(ha, mta, mcnt, add_multi);
2626 	QLNX_UNLOCK(ha);
2627 
2628 	return (0);
2629 }
2630 
2631 static int
2632 qlnx_set_promisc(qlnx_host_t *ha)
2633 {
2634 	int	rc = 0;
2635 	uint8_t	filter;
2636 
2637 	if (qlnx_vf_device(ha) == 0)
2638 		return (0);
2639 
2640 	filter = ha->filter;
2641 	filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
2642 	filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
2643 
2644 	rc = qlnx_set_rx_accept_filter(ha, filter);
2645 	return (rc);
2646 }
2647 
2648 static int
2649 qlnx_set_allmulti(qlnx_host_t *ha)
2650 {
2651 	int	rc = 0;
2652 	uint8_t	filter;
2653 
2654 	if (qlnx_vf_device(ha) == 0)
2655 		return (0);
2656 
2657 	filter = ha->filter;
2658 	filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
2659 	rc = qlnx_set_rx_accept_filter(ha, filter);
2660 
2661 	return (rc);
2662 }
2663 
2664 static int
2665 qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2666 {
2667 	int		ret = 0, mask;
2668 	struct ifreq	*ifr = (struct ifreq *)data;
2669 	struct ifaddr	*ifa = (struct ifaddr *)data;
2670 	qlnx_host_t	*ha;
2671 
2672 	ha = (qlnx_host_t *)ifp->if_softc;
2673 
2674 	switch (cmd) {
2675 	case SIOCSIFADDR:
2676 		QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx)\n", cmd);
2677 
2678 		if (ifa->ifa_addr->sa_family == AF_INET) {
2679 			ifp->if_flags |= IFF_UP;
2680 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2681 				QLNX_LOCK(ha);
2682 				qlnx_init_locked(ha);
2683 				QLNX_UNLOCK(ha);
2684 			}
2685 			QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
2686 				   cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr));
2687 
2688 			arp_ifinit(ifp, ifa);
2689 		} else {
2690 			ether_ioctl(ifp, cmd, data);
2691 		}
2692 		break;
2693 
2694 	case SIOCSIFMTU:
2695 		QL_DPRINT4(ha, "SIOCSIFMTU (0x%lx)\n", cmd);
2696 
2697 		if (ifr->ifr_mtu > QLNX_MAX_MTU) {
2698 			ret = EINVAL;
2699 		} else {
2700 			QLNX_LOCK(ha);
2701 			ifp->if_mtu = ifr->ifr_mtu;
2702 			ha->max_frame_size =
2703 				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2704 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2705 				qlnx_init_locked(ha);
2706 			}
2707 
2708 			QLNX_UNLOCK(ha);
2709 		}
2710 
2711 		break;
2712 
2713 	case SIOCSIFFLAGS:
2714 		QL_DPRINT4(ha, "SIOCSIFFLAGS (0x%lx)\n", cmd);
2715 
2716 		QLNX_LOCK(ha);
2717 
2718 		if (ifp->if_flags & IFF_UP) {
2719 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2720 				if ((ifp->if_flags ^ ha->if_flags) &
2721 					IFF_PROMISC) {
2722 					ret = qlnx_set_promisc(ha);
2723 				} else if ((ifp->if_flags ^ ha->if_flags) &
2724 					IFF_ALLMULTI) {
2725 					ret = qlnx_set_allmulti(ha);
2726 				}
2727 			} else {
2728 				ha->max_frame_size = ifp->if_mtu +
2729 					ETHER_HDR_LEN + ETHER_CRC_LEN;
2730 				qlnx_init_locked(ha);
2731 			}
2732 		} else {
2733 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2734 				qlnx_stop(ha);
2735 			ha->if_flags = ifp->if_flags;
2736 		}
2737 
2738 		QLNX_UNLOCK(ha);
2739 		break;
2740 
2741 	case SIOCADDMULTI:
2742 		QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCADDMULTI", cmd);
2743 
2744 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2745 			if (qlnx_set_multi(ha, 1))
2746 				ret = EINVAL;
2747 		}
2748 		break;
2749 
2750 	case SIOCDELMULTI:
2751 		QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCDELMULTI", cmd);
2752 
2753 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2754 			if (qlnx_set_multi(ha, 0))
2755 				ret = EINVAL;
2756 		}
2757 		break;
2758 
2759 	case SIOCSIFMEDIA:
2760 	case SIOCGIFMEDIA:
2761 		QL_DPRINT4(ha, "SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", cmd);
2762 
2763 		ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
2764 		break;
2765 
2766 	case SIOCSIFCAP:
2767 
2768 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2769 
2770 		QL_DPRINT4(ha, "SIOCSIFCAP (0x%lx)\n", cmd);
2771 
2772 		if (mask & IFCAP_HWCSUM)
2773 			ifp->if_capenable ^= IFCAP_HWCSUM;
2774 		if (mask & IFCAP_TSO4)
2775 			ifp->if_capenable ^= IFCAP_TSO4;
2776 		if (mask & IFCAP_TSO6)
2777 			ifp->if_capenable ^= IFCAP_TSO6;
2778 		if (mask & IFCAP_VLAN_HWTAGGING)
2779 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2780 		if (mask & IFCAP_VLAN_HWTSO)
2781 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
2782 		if (mask & IFCAP_LRO)
2783 			ifp->if_capenable ^= IFCAP_LRO;
2784 
2785 		QLNX_LOCK(ha);
2786 
2787 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2788 			qlnx_init_locked(ha);
2789 
2790 		QLNX_UNLOCK(ha);
2791 
2792 		VLAN_CAPABILITIES(ifp);
2793 		break;
2794 
2795 #if (__FreeBSD_version >= 1100101)
2796 
2797 	case SIOCGI2C:
2798 	{
2799 		struct ifi2creq i2c;
2800 		struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0];
2801 		struct ecore_ptt *p_ptt;
2802 
2803 		ret = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
2804 
2805 		if (ret)
2806 			break;
2807 
2808 		if ((i2c.len > sizeof (i2c.data)) ||
2809 			(i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2)) {
2810 			ret = EINVAL;
2811 			break;
2812 		}
2813 
2814 		p_ptt = ecore_ptt_acquire(p_hwfn);
2815 
2816 		if (!p_ptt) {
2817 			QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
2818 			ret = -1;
2819 			break;
2820 		}
2821 
2822 		ret = ecore_mcp_phy_sfp_read(p_hwfn, p_ptt,
2823 			(ha->pci_func & 0x1), i2c.dev_addr, i2c.offset,
2824 			i2c.len, &i2c.data[0]);
2825 
2826 		ecore_ptt_release(p_hwfn, p_ptt);
2827 
2828 		if (ret) {
2829 			ret = -1;
2830 			break;
2831 		}
2832 
2833 		ret = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
2834 
2835 		QL_DPRINT8(ha, "SIOCGI2C copyout ret = %d \
2836 			 len = %d addr = 0x%02x offset = 0x%04x \
2837 			 data[0..7]=0x%02x 0x%02x 0x%02x 0x%02x 0x%02x \
2838 			 0x%02x 0x%02x 0x%02x\n",
2839 			ret, i2c.len, i2c.dev_addr, i2c.offset,
2840 			i2c.data[0], i2c.data[1], i2c.data[2], i2c.data[3],
2841 			i2c.data[4], i2c.data[5], i2c.data[6], i2c.data[7]);
2842 		break;
2843 	}
2844 #endif /* #if (__FreeBSD_version >= 1100101) */
2845 
2846 	default:
2847 		QL_DPRINT4(ha, "default (0x%lx)\n", cmd);
2848 		ret = ether_ioctl(ifp, cmd, data);
2849 		break;
2850 	}
2851 
2852 	return (ret);
2853 }
2854 
2855 static int
2856 qlnx_media_change(struct ifnet *ifp)
2857 {
2858 	qlnx_host_t	*ha;
2859 	struct ifmedia	*ifm;
2860 	int		ret = 0;
2861 
2862 	ha = (qlnx_host_t *)ifp->if_softc;
2863 
2864 	QL_DPRINT2(ha, "enter\n");
2865 
2866 	ifm = &ha->media;
2867 
2868 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2869 		ret = EINVAL;
2870 
2871 	QL_DPRINT2(ha, "exit\n");
2872 
2873 	return (ret);
2874 }
2875 
2876 static void
2877 qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2878 {
2879 	qlnx_host_t		*ha;
2880 
2881 	ha = (qlnx_host_t *)ifp->if_softc;
2882 
2883 	QL_DPRINT2(ha, "enter\n");
2884 
2885 	ifmr->ifm_status = IFM_AVALID;
2886 	ifmr->ifm_active = IFM_ETHER;
2887 
2888 	if (ha->link_up) {
2889 		ifmr->ifm_status |= IFM_ACTIVE;
2890 		ifmr->ifm_active |=
2891 			(IFM_FDX | qlnx_get_optics(ha, &ha->if_link));
2892 
2893 		if (ha->if_link.link_partner_caps &
2894 			(QLNX_LINK_CAP_Pause | QLNX_LINK_CAP_Asym_Pause))
2895 			ifmr->ifm_active |=
2896 				(IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE);
2897 	}
2898 
2899 	QL_DPRINT2(ha, "exit (%s)\n", (ha->link_up ? "link_up" : "link_down"));
2900 
2901 	return;
2902 }
2903 
2904 static void
2905 qlnx_free_tx_pkt(qlnx_host_t *ha, struct qlnx_fastpath *fp,
2906 	struct qlnx_tx_queue *txq)
2907 {
2908 	u16			idx;
2909 	struct mbuf		*mp;
2910 	bus_dmamap_t		map;
2911 	int			i;
2912 //	struct eth_tx_bd	*tx_data_bd;
2913 	struct eth_tx_1st_bd	*first_bd;
2914 	int			nbds = 0;
2915 
2916 	idx = txq->sw_tx_cons;
2917 	mp = txq->sw_tx_ring[idx].mp;
2918 	map = txq->sw_tx_ring[idx].map;
2919 
2920 	if ((mp == NULL) || QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL)){
2921 		QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL);
2922 
2923 		QL_DPRINT1(ha, "(mp == NULL) "
2924 			" tx_idx = 0x%x"
2925 			" ecore_prod_idx = 0x%x"
2926 			" ecore_cons_idx = 0x%x"
2927 			" hw_bd_cons = 0x%x"
2928 			" txq_db_last = 0x%x"
2929 			" elem_left = 0x%x\n",
2930 			fp->rss_id,
2931 			ecore_chain_get_prod_idx(&txq->tx_pbl),
2932 			ecore_chain_get_cons_idx(&txq->tx_pbl),
2933 			le16toh(*txq->hw_cons_ptr),
2934 			txq->tx_db.raw,
2935 			ecore_chain_get_elem_left(&txq->tx_pbl));
2936 
2937 		fp->err_tx_free_pkt_null++;
2938 
2939 		//DEBUG
2940 		qlnx_trigger_dump(ha);
2941 
2942 		return;
2943 	} else {
2944 		QLNX_INC_OPACKETS((ha->ifp));
2945 		QLNX_INC_OBYTES((ha->ifp), (mp->m_pkthdr.len));
2946 
2947 		bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_POSTWRITE);
2948 		bus_dmamap_unload(ha->tx_tag, map);
2949 
2950 		fp->tx_pkts_freed++;
2951 		fp->tx_pkts_completed++;
2952 
2953 		m_freem(mp);
2954 	}
2955 
2956 	first_bd = (struct eth_tx_1st_bd *)ecore_chain_consume(&txq->tx_pbl);
2957 	nbds = first_bd->data.nbds;
2958 
2959 //	BD_SET_UNMAP_ADDR_LEN(first_bd, 0, 0);
2960 
2961 	for (i = 1; i < nbds; i++) {
2962 		/* tx_data_bd = */ ecore_chain_consume(&txq->tx_pbl);
2963 //		BD_SET_UNMAP_ADDR_LEN(tx_data_bd, 0, 0);
2964 	}
2965 	txq->sw_tx_ring[idx].flags = 0;
2966 	txq->sw_tx_ring[idx].mp = NULL;
2967 	txq->sw_tx_ring[idx].map = (bus_dmamap_t)0;
2968 
2969 	return;
2970 }
2971 
2972 static void
2973 qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
2974 	struct qlnx_tx_queue *txq)
2975 {
2976 	u16 hw_bd_cons;
2977 	u16 ecore_cons_idx;
2978 	uint16_t diff;
2979 	uint16_t idx, idx2;
2980 
2981 	hw_bd_cons = le16toh(*txq->hw_cons_ptr);
2982 
2983 	while (hw_bd_cons !=
2984 		(ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
2985 		if (hw_bd_cons < ecore_cons_idx) {
2986 			diff = (1 << 16) - (ecore_cons_idx - hw_bd_cons);
2987 		} else {
2988 			diff = hw_bd_cons - ecore_cons_idx;
2989 		}
2990 		if ((diff > TX_RING_SIZE) ||
2991 			QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF)){
2992 			QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF);
2993 
2994 			QL_DPRINT1(ha, "(diff = 0x%x) "
2995 				" tx_idx = 0x%x"
2996 				" ecore_prod_idx = 0x%x"
2997 				" ecore_cons_idx = 0x%x"
2998 				" hw_bd_cons = 0x%x"
2999 				" txq_db_last = 0x%x"
3000 				" elem_left = 0x%x\n",
3001 				diff,
3002 				fp->rss_id,
3003 				ecore_chain_get_prod_idx(&txq->tx_pbl),
3004 				ecore_chain_get_cons_idx(&txq->tx_pbl),
3005 				le16toh(*txq->hw_cons_ptr),
3006 				txq->tx_db.raw,
3007 				ecore_chain_get_elem_left(&txq->tx_pbl));
3008 
3009 			fp->err_tx_cons_idx_conflict++;
3010 
3011 			//DEBUG
3012 			qlnx_trigger_dump(ha);
3013 		}
3014 
3015 		idx = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1);
3016 		idx2 = (txq->sw_tx_cons + 2) & (TX_RING_SIZE - 1);
3017 		prefetch(txq->sw_tx_ring[idx].mp);
3018 		prefetch(txq->sw_tx_ring[idx2].mp);
3019 
3020 		qlnx_free_tx_pkt(ha, fp, txq);
3021 
3022 		txq->sw_tx_cons = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1);
3023 	}
3024 	return;
3025 }
3026 
3027 static int
3028 qlnx_transmit_locked(struct ifnet *ifp,struct qlnx_fastpath  *fp, struct mbuf  *mp)
3029 {
3030         int                     ret = 0;
3031         struct qlnx_tx_queue    *txq;
3032         qlnx_host_t *           ha;
3033         uint16_t elem_left;
3034 
3035         txq = fp->txq[0];
3036         ha = (qlnx_host_t *)fp->edev;
3037 
3038         if ((!(ifp->if_drv_flags & IFF_DRV_RUNNING)) || (!ha->link_up)) {
3039                 if(mp != NULL)
3040                         ret = drbr_enqueue(ifp, fp->tx_br, mp);
3041                 return (ret);
3042         }
3043 
3044         if(mp != NULL)
3045                 ret  = drbr_enqueue(ifp, fp->tx_br, mp);
3046 
3047         mp = drbr_peek(ifp, fp->tx_br);
3048 
3049         while (mp != NULL) {
3050                 if (qlnx_send(ha, fp, &mp)) {
3051                         if (mp != NULL) {
3052                                 drbr_putback(ifp, fp->tx_br, mp);
3053                         } else {
3054                                 fp->tx_pkts_processed++;
3055                                 drbr_advance(ifp, fp->tx_br);
3056                         }
3057                         goto qlnx_transmit_locked_exit;
3058 
3059                 } else {
3060                         drbr_advance(ifp, fp->tx_br);
3061                         fp->tx_pkts_transmitted++;
3062                         fp->tx_pkts_processed++;
3063                 }
3064 
3065                 mp = drbr_peek(ifp, fp->tx_br);
3066         }
3067 
3068 qlnx_transmit_locked_exit:
3069         if((qlnx_num_tx_compl(ha,fp, fp->txq[0]) > QLNX_TX_COMPL_THRESH) ||
3070                 ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))
3071                                         < QLNX_TX_ELEM_MAX_THRESH))
3072                 (void)qlnx_tx_int(ha, fp, fp->txq[0]);
3073 
3074         QL_DPRINT2(ha, "%s: exit ret = %d\n", __func__, ret);
3075         return ret;
3076 }
3077 
3078 static int
3079 qlnx_transmit(struct ifnet *ifp, struct mbuf  *mp)
3080 {
3081         qlnx_host_t		*ha = (qlnx_host_t *)ifp->if_softc;
3082         struct qlnx_fastpath	*fp;
3083         int			rss_id = 0, ret = 0;
3084 
3085 #ifdef QLNX_TRACEPERF_DATA
3086         uint64_t tx_pkts = 0, tx_compl = 0;
3087 #endif
3088 
3089         QL_DPRINT2(ha, "enter\n");
3090 
3091 #if __FreeBSD_version >= 1100000
3092         if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE)
3093 #else
3094         if (mp->m_flags & M_FLOWID)
3095 #endif
3096                 rss_id = (mp->m_pkthdr.flowid % ECORE_RSS_IND_TABLE_SIZE) %
3097 					ha->num_rss;
3098 
3099         fp = &ha->fp_array[rss_id];
3100 
3101         if (fp->tx_br == NULL) {
3102                 ret = EINVAL;
3103                 goto qlnx_transmit_exit;
3104         }
3105 
3106         if (mtx_trylock(&fp->tx_mtx)) {
3107 #ifdef QLNX_TRACEPERF_DATA
3108                         tx_pkts = fp->tx_pkts_transmitted;
3109                         tx_compl = fp->tx_pkts_completed;
3110 #endif
3111 
3112                         ret = qlnx_transmit_locked(ifp, fp, mp);
3113 
3114 #ifdef QLNX_TRACEPERF_DATA
3115                         fp->tx_pkts_trans_ctx += (fp->tx_pkts_transmitted - tx_pkts);
3116                         fp->tx_pkts_compl_ctx += (fp->tx_pkts_completed - tx_compl);
3117 #endif
3118                         mtx_unlock(&fp->tx_mtx);
3119         } else {
3120                 if (mp != NULL && (fp->fp_taskqueue != NULL)) {
3121                         ret = drbr_enqueue(ifp, fp->tx_br, mp);
3122                         taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
3123                 }
3124         }
3125 
3126 qlnx_transmit_exit:
3127 
3128         QL_DPRINT2(ha, "exit ret = %d\n", ret);
3129         return ret;
3130 }
3131 
3132 static void
3133 qlnx_qflush(struct ifnet *ifp)
3134 {
3135 	int			rss_id;
3136 	struct qlnx_fastpath	*fp;
3137 	struct mbuf		*mp;
3138 	qlnx_host_t		*ha;
3139 
3140 	ha = (qlnx_host_t *)ifp->if_softc;
3141 
3142 	QL_DPRINT2(ha, "enter\n");
3143 
3144 	for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
3145 		fp = &ha->fp_array[rss_id];
3146 
3147 		if (fp == NULL)
3148 			continue;
3149 
3150 		if (fp->tx_br) {
3151 			mtx_lock(&fp->tx_mtx);
3152 
3153 			while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
3154 				fp->tx_pkts_freed++;
3155 				m_freem(mp);
3156 			}
3157 			mtx_unlock(&fp->tx_mtx);
3158 		}
3159 	}
3160 	QL_DPRINT2(ha, "exit\n");
3161 
3162 	return;
3163 }
3164 
3165 static void
3166 qlnx_txq_doorbell_wr32(qlnx_host_t *ha, void *reg_addr, uint32_t value)
3167 {
3168 	uint32_t		offset;
3169 
3170 	offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)ha->pci_dbells);
3171 
3172 	bus_write_4(ha->pci_dbells, offset, value);
3173 	bus_barrier(ha->pci_reg,  0, 0, BUS_SPACE_BARRIER_READ);
3174 	bus_barrier(ha->pci_dbells,  0, 0, BUS_SPACE_BARRIER_READ);
3175 
3176 	return;
3177 }
3178 
3179 static uint32_t
3180 qlnx_tcp_offset(qlnx_host_t *ha, struct mbuf *mp)
3181 {
3182         struct ether_vlan_header	*eh = NULL;
3183         struct ip			*ip = NULL;
3184         struct ip6_hdr			*ip6 = NULL;
3185         struct tcphdr			*th = NULL;
3186         uint32_t			ehdrlen = 0, ip_hlen = 0, offset = 0;
3187         uint16_t			etype = 0;
3188         uint8_t				buf[sizeof(struct ip6_hdr)];
3189 
3190         eh = mtod(mp, struct ether_vlan_header *);
3191 
3192         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3193                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3194                 etype = ntohs(eh->evl_proto);
3195         } else {
3196                 ehdrlen = ETHER_HDR_LEN;
3197                 etype = ntohs(eh->evl_encap_proto);
3198         }
3199 
3200         switch (etype) {
3201                 case ETHERTYPE_IP:
3202                         ip = (struct ip *)(mp->m_data + ehdrlen);
3203 
3204                         ip_hlen = sizeof (struct ip);
3205 
3206                         if (mp->m_len < (ehdrlen + ip_hlen)) {
3207                                 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
3208                                 ip = (struct ip *)buf;
3209                         }
3210 
3211                         th = (struct tcphdr *)(ip + 1);
3212 			offset = ip_hlen + ehdrlen + (th->th_off << 2);
3213                 break;
3214 
3215                 case ETHERTYPE_IPV6:
3216                         ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3217 
3218                         ip_hlen = sizeof(struct ip6_hdr);
3219 
3220                         if (mp->m_len < (ehdrlen + ip_hlen)) {
3221                                 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
3222                                         buf);
3223                                 ip6 = (struct ip6_hdr *)buf;
3224                         }
3225                         th = (struct tcphdr *)(ip6 + 1);
3226 			offset = ip_hlen + ehdrlen + (th->th_off << 2);
3227                 break;
3228 
3229                 default:
3230                 break;
3231         }
3232 
3233         return (offset);
3234 }
3235 
3236 static __inline int
3237 qlnx_tso_check(struct qlnx_fastpath *fp, bus_dma_segment_t *segs, int nsegs,
3238 	uint32_t offset)
3239 {
3240 	int			i;
3241 	uint32_t		sum, nbds_in_hdr = 1;
3242         uint32_t		window;
3243         bus_dma_segment_t	*s_seg;
3244 
3245         /* If the header spans multiple segments, skip those segments */
3246 
3247         if (nsegs < ETH_TX_LSO_WINDOW_BDS_NUM)
3248                 return (0);
3249 
3250         i = 0;
3251 
3252         while ((i < nsegs) && (offset >= segs->ds_len)) {
3253                 offset = offset - segs->ds_len;
3254                 segs++;
3255                 i++;
3256                 nbds_in_hdr++;
3257         }
3258 
3259         window = ETH_TX_LSO_WINDOW_BDS_NUM - nbds_in_hdr;
3260 
3261         nsegs = nsegs - i;
3262 
3263         while (nsegs >= window) {
3264                 sum = 0;
3265                 s_seg = segs;
3266 
3267                 for (i = 0; i < window; i++){
3268                         sum += s_seg->ds_len;
3269                         s_seg++;
3270                 }
3271 
3272                 if (sum < ETH_TX_LSO_WINDOW_MIN_LEN) {
3273                         fp->tx_lso_wnd_min_len++;
3274                         return (-1);
3275                 }
3276 
3277                 nsegs = nsegs - 1;
3278                 segs++;
3279         }
3280 
3281 	return (0);
3282 }
3283 
3284 static int
3285 qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct mbuf **m_headp)
3286 {
3287 	bus_dma_segment_t	*segs;
3288 	bus_dmamap_t		map = 0;
3289 	uint32_t		nsegs = 0;
3290 	int			ret = -1;
3291 	struct mbuf		*m_head = *m_headp;
3292 	uint16_t		idx = 0;
3293 	uint16_t		elem_left;
3294 
3295 	uint8_t			nbd = 0;
3296 	struct qlnx_tx_queue    *txq;
3297 
3298 	struct eth_tx_1st_bd    *first_bd;
3299 	struct eth_tx_2nd_bd    *second_bd;
3300 	struct eth_tx_3rd_bd    *third_bd;
3301 	struct eth_tx_bd        *tx_data_bd;
3302 
3303 	int			seg_idx = 0;
3304 	uint32_t		nbds_in_hdr = 0;
3305 	uint32_t		offset = 0;
3306 
3307 #ifdef QLNX_TRACE_PERF_DATA
3308         uint16_t                bd_used;
3309 #endif
3310 
3311 	QL_DPRINT8(ha, "enter[%d]\n", fp->rss_id);
3312 
3313 	if (!ha->link_up)
3314 		return (-1);
3315 
3316 	first_bd	= NULL;
3317 	second_bd	= NULL;
3318 	third_bd	= NULL;
3319 	tx_data_bd	= NULL;
3320 
3321 	txq = fp->txq[0];
3322 
3323         if ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl)) <
3324 		QLNX_TX_ELEM_MIN_THRESH) {
3325                 fp->tx_nsegs_gt_elem_left++;
3326                 fp->err_tx_nsegs_gt_elem_left++;
3327 
3328                 return (ENOBUFS);
3329         }
3330 
3331 	idx = txq->sw_tx_prod;
3332 
3333 	map = txq->sw_tx_ring[idx].map;
3334 	segs = txq->segs;
3335 
3336 	ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
3337 			BUS_DMA_NOWAIT);
3338 
3339 	if (ha->dbg_trace_tso_pkt_len) {
3340 		if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3341 			if (!fp->tx_tso_min_pkt_len) {
3342 				fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
3343 				fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
3344 			} else {
3345 				if (fp->tx_tso_min_pkt_len > m_head->m_pkthdr.len)
3346 					fp->tx_tso_min_pkt_len =
3347 						m_head->m_pkthdr.len;
3348 				if (fp->tx_tso_max_pkt_len < m_head->m_pkthdr.len)
3349 					fp->tx_tso_max_pkt_len =
3350 						m_head->m_pkthdr.len;
3351 			}
3352 		}
3353 	}
3354 
3355 	if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
3356 		offset = qlnx_tcp_offset(ha, m_head);
3357 
3358 	if ((ret == EFBIG) ||
3359 		((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && (
3360 			(!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) ||
3361 		((m_head->m_pkthdr.csum_flags & CSUM_TSO) &&
3362 			qlnx_tso_check(fp, segs, nsegs, offset))))) {
3363 		struct mbuf *m;
3364 
3365 		QL_DPRINT8(ha, "EFBIG [%d]\n", m_head->m_pkthdr.len);
3366 
3367 		fp->tx_defrag++;
3368 
3369 		m = m_defrag(m_head, M_NOWAIT);
3370 		if (m == NULL) {
3371 			fp->err_tx_defrag++;
3372 			fp->tx_pkts_freed++;
3373 			m_freem(m_head);
3374 			*m_headp = NULL;
3375 			QL_DPRINT1(ha, "m_defrag() = NULL [%d]\n", ret);
3376 			return (ENOBUFS);
3377 		}
3378 
3379 		m_head = m;
3380 		*m_headp = m_head;
3381 
3382 		if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
3383 				segs, &nsegs, BUS_DMA_NOWAIT))) {
3384 			fp->err_tx_defrag_dmamap_load++;
3385 
3386 			QL_DPRINT1(ha,
3387 				"bus_dmamap_load_mbuf_sg failed0 [%d, %d]\n",
3388 				ret, m_head->m_pkthdr.len);
3389 
3390 			fp->tx_pkts_freed++;
3391 			m_freem(m_head);
3392 			*m_headp = NULL;
3393 
3394 			return (ret);
3395 		}
3396 
3397 		if ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) &&
3398 			!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) {
3399 			fp->err_tx_non_tso_max_seg++;
3400 
3401 			QL_DPRINT1(ha,
3402 				"(%d) nsegs too many for non-TSO [%d, %d]\n",
3403 				ret, nsegs, m_head->m_pkthdr.len);
3404 
3405 			fp->tx_pkts_freed++;
3406 			m_freem(m_head);
3407 			*m_headp = NULL;
3408 
3409 			return (ret);
3410 		}
3411 		if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
3412 			offset = qlnx_tcp_offset(ha, m_head);
3413 
3414 	} else if (ret) {
3415 		fp->err_tx_dmamap_load++;
3416 
3417 		QL_DPRINT1(ha, "bus_dmamap_load_mbuf_sg failed1 [%d, %d]\n",
3418 			   ret, m_head->m_pkthdr.len);
3419 		fp->tx_pkts_freed++;
3420 		m_freem(m_head);
3421 		*m_headp = NULL;
3422 		return (ret);
3423 	}
3424 
3425 	QL_ASSERT(ha, (nsegs != 0), ("qlnx_send: empty packet"));
3426 
3427 	if (ha->dbg_trace_tso_pkt_len) {
3428 		if (nsegs < QLNX_FP_MAX_SEGS)
3429 			fp->tx_pkts[(nsegs - 1)]++;
3430 		else
3431 			fp->tx_pkts[(QLNX_FP_MAX_SEGS - 1)]++;
3432 	}
3433 
3434 #ifdef QLNX_TRACE_PERF_DATA
3435         if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3436                 if(m_head->m_pkthdr.len <= 2048)
3437                         fp->tx_pkts_hist[0]++;
3438                 else if((m_head->m_pkthdr.len > 2048) &&
3439 				(m_head->m_pkthdr.len <= 4096))
3440                         fp->tx_pkts_hist[1]++;
3441                 else if((m_head->m_pkthdr.len > 4096) &&
3442 				(m_head->m_pkthdr.len <= 8192))
3443                         fp->tx_pkts_hist[2]++;
3444                 else if((m_head->m_pkthdr.len > 8192) &&
3445 				(m_head->m_pkthdr.len <= 12288 ))
3446                         fp->tx_pkts_hist[3]++;
3447                 else if((m_head->m_pkthdr.len > 11288) &&
3448 				(m_head->m_pkthdr.len <= 16394))
3449                         fp->tx_pkts_hist[4]++;
3450                 else if((m_head->m_pkthdr.len > 16384) &&
3451 				(m_head->m_pkthdr.len <= 20480))
3452                         fp->tx_pkts_hist[5]++;
3453                 else if((m_head->m_pkthdr.len > 20480) &&
3454 				(m_head->m_pkthdr.len <= 24576))
3455                         fp->tx_pkts_hist[6]++;
3456                 else if((m_head->m_pkthdr.len > 24576) &&
3457 				(m_head->m_pkthdr.len <= 28672))
3458                         fp->tx_pkts_hist[7]++;
3459                 else if((m_head->m_pkthdr.len > 28762) &&
3460 				(m_head->m_pkthdr.len <= 32768))
3461                         fp->tx_pkts_hist[8]++;
3462                 else if((m_head->m_pkthdr.len > 32768) &&
3463 				(m_head->m_pkthdr.len <= 36864))
3464                         fp->tx_pkts_hist[9]++;
3465                 else if((m_head->m_pkthdr.len > 36864) &&
3466 				(m_head->m_pkthdr.len <= 40960))
3467                         fp->tx_pkts_hist[10]++;
3468                 else if((m_head->m_pkthdr.len > 40960) &&
3469 				(m_head->m_pkthdr.len <= 45056))
3470                         fp->tx_pkts_hist[11]++;
3471                 else if((m_head->m_pkthdr.len > 45056) &&
3472 				(m_head->m_pkthdr.len <= 49152))
3473                         fp->tx_pkts_hist[12]++;
3474                 else if((m_head->m_pkthdr.len > 49512) &&
3475 				m_head->m_pkthdr.len <= 53248))
3476                         fp->tx_pkts_hist[13]++;
3477                 else if((m_head->m_pkthdr.len > 53248) &&
3478 				(m_head->m_pkthdr.len <= 57344))
3479                         fp->tx_pkts_hist[14]++;
3480                 else if((m_head->m_pkthdr.len > 53248) &&
3481 				(m_head->m_pkthdr.len <= 57344))
3482                         fp->tx_pkts_hist[15]++;
3483                 else if((m_head->m_pkthdr.len > 57344) &&
3484 				(m_head->m_pkthdr.len <= 61440))
3485                         fp->tx_pkts_hist[16]++;
3486                 else
3487                         fp->tx_pkts_hist[17]++;
3488         }
3489 
3490         if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3491                 elem_left =  ecore_chain_get_elem_left(&txq->tx_pbl);
3492                 bd_used = TX_RING_SIZE - elem_left;
3493 
3494                 if(bd_used <= 100)
3495                         fp->tx_pkts_q[0]++;
3496                 else if((bd_used > 100) && (bd_used <= 500))
3497                         fp->tx_pkts_q[1]++;
3498                 else if((bd_used > 500) && (bd_used <= 1000))
3499                         fp->tx_pkts_q[2]++;
3500                 else if((bd_used > 1000) && (bd_used <= 2000))
3501                         fp->tx_pkts_q[3]++;
3502                 else if((bd_used > 3000) && (bd_used <= 4000))
3503                         fp->tx_pkts_q[4]++;
3504                 else if((bd_used > 4000) && (bd_used <= 5000))
3505                         fp->tx_pkts_q[5]++;
3506                 else if((bd_used > 6000) && (bd_used <= 7000))
3507                         fp->tx_pkts_q[6]++;
3508                 else if((bd_used > 7000) && (bd_used <= 8000))
3509                         fp->tx_pkts_q[7]++;
3510                 else if((bd_used > 8000) && (bd_used <= 9000))
3511                         fp->tx_pkts_q[8]++;
3512                 else if((bd_used > 9000) && (bd_used <= 10000))
3513                         fp->tx_pkts_q[9]++;
3514                 else if((bd_used > 10000) && (bd_used <= 11000))
3515                         fp->tx_pkts_q[10]++;
3516                 else if((bd_used > 11000) && (bd_used <= 12000))
3517                         fp->tx_pkts_q[11]++;
3518                 else if((bd_used > 12000) && (bd_used <= 13000))
3519                         fp->tx_pkts_q[12]++;
3520                 else if((bd_used > 13000) && (bd_used <= 14000))
3521                         fp->tx_pkts_q[13]++;
3522                 else if((bd_used > 14000) && (bd_used <= 15000))
3523                         fp->tx_pkts_q[14]++;
3524                else if((bd_used > 15000) && (bd_used <= 16000))
3525                         fp->tx_pkts_q[15]++;
3526                 else
3527                         fp->tx_pkts_q[16]++;
3528         }
3529 
3530 #endif /* end of QLNX_TRACE_PERF_DATA */
3531 
3532 	if ((nsegs + QLNX_TX_ELEM_RESERVE) >
3533 		(int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))) {
3534 		QL_DPRINT1(ha, "(%d, 0x%x) insuffient BDs"
3535 			" in chain[%d] trying to free packets\n",
3536 			nsegs, elem_left, fp->rss_id);
3537 
3538 		fp->tx_nsegs_gt_elem_left++;
3539 
3540 		(void)qlnx_tx_int(ha, fp, txq);
3541 
3542 		if ((nsegs + QLNX_TX_ELEM_RESERVE) > (int)(elem_left =
3543 			ecore_chain_get_elem_left(&txq->tx_pbl))) {
3544 			QL_DPRINT1(ha,
3545 				"(%d, 0x%x) insuffient BDs in chain[%d]\n",
3546 				nsegs, elem_left, fp->rss_id);
3547 
3548 			fp->err_tx_nsegs_gt_elem_left++;
3549 			fp->tx_ring_full = 1;
3550 			if (ha->storm_stats_enable)
3551 				ha->storm_stats_gather = 1;
3552 			return (ENOBUFS);
3553 		}
3554 	}
3555 
3556 	bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
3557 
3558 	txq->sw_tx_ring[idx].mp = m_head;
3559 
3560 	first_bd = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
3561 
3562 	memset(first_bd, 0, sizeof(*first_bd));
3563 
3564 	first_bd->data.bd_flags.bitfields =
3565 		1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
3566 
3567 	BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, segs->ds_len);
3568 
3569 	nbd++;
3570 
3571 	if (m_head->m_pkthdr.csum_flags & CSUM_IP) {
3572 		first_bd->data.bd_flags.bitfields |=
3573 			(1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
3574 	}
3575 
3576 	if (m_head->m_pkthdr.csum_flags &
3577 		(CSUM_UDP | CSUM_TCP | CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) {
3578 		first_bd->data.bd_flags.bitfields |=
3579 			(1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT);
3580 	}
3581 
3582         if (m_head->m_flags & M_VLANTAG) {
3583                 first_bd->data.vlan = m_head->m_pkthdr.ether_vtag;
3584 		first_bd->data.bd_flags.bitfields |=
3585 			(1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT);
3586         }
3587 
3588 	if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3589                 first_bd->data.bd_flags.bitfields |=
3590 			(1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
3591 		first_bd->data.bd_flags.bitfields |=
3592 			(1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
3593 
3594 		nbds_in_hdr = 1;
3595 
3596 		if (offset == segs->ds_len) {
3597 			BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
3598 			segs++;
3599 			seg_idx++;
3600 
3601 			second_bd = (struct eth_tx_2nd_bd *)
3602 					ecore_chain_produce(&txq->tx_pbl);
3603 			memset(second_bd, 0, sizeof(*second_bd));
3604 			nbd++;
3605 
3606 			if (seg_idx < nsegs) {
3607 				BD_SET_UNMAP_ADDR_LEN(second_bd, \
3608 					(segs->ds_addr), (segs->ds_len));
3609 				segs++;
3610 				seg_idx++;
3611 			}
3612 
3613 			third_bd = (struct eth_tx_3rd_bd *)
3614 					ecore_chain_produce(&txq->tx_pbl);
3615 			memset(third_bd, 0, sizeof(*third_bd));
3616 			third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3617 			third_bd->data.bitfields |=
3618 				(nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3619 			nbd++;
3620 
3621 			if (seg_idx < nsegs) {
3622 				BD_SET_UNMAP_ADDR_LEN(third_bd, \
3623 					(segs->ds_addr), (segs->ds_len));
3624 				segs++;
3625 				seg_idx++;
3626 			}
3627 
3628 			for (; seg_idx < nsegs; seg_idx++) {
3629 				tx_data_bd = (struct eth_tx_bd *)
3630 					ecore_chain_produce(&txq->tx_pbl);
3631 				memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3632 				BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \
3633 					segs->ds_addr,\
3634 					segs->ds_len);
3635 				segs++;
3636 				nbd++;
3637 			}
3638 
3639 		} else if (offset < segs->ds_len) {
3640 			BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
3641 
3642 			second_bd = (struct eth_tx_2nd_bd *)
3643 					ecore_chain_produce(&txq->tx_pbl);
3644 			memset(second_bd, 0, sizeof(*second_bd));
3645 			BD_SET_UNMAP_ADDR_LEN(second_bd, \
3646 				(segs->ds_addr + offset),\
3647 				(segs->ds_len - offset));
3648 			nbd++;
3649 			segs++;
3650 
3651 			third_bd = (struct eth_tx_3rd_bd *)
3652 					ecore_chain_produce(&txq->tx_pbl);
3653 			memset(third_bd, 0, sizeof(*third_bd));
3654 
3655 			BD_SET_UNMAP_ADDR_LEN(third_bd, \
3656 					segs->ds_addr,\
3657 					segs->ds_len);
3658 			third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3659 			third_bd->data.bitfields |=
3660 				(nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3661 			segs++;
3662 			nbd++;
3663 
3664 			for (seg_idx = 2; seg_idx < nsegs; seg_idx++) {
3665 				tx_data_bd = (struct eth_tx_bd *)
3666 					ecore_chain_produce(&txq->tx_pbl);
3667 				memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3668 				BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \
3669 					segs->ds_addr,\
3670 					segs->ds_len);
3671 				segs++;
3672 				nbd++;
3673 			}
3674 
3675 		} else {
3676 			offset = offset - segs->ds_len;
3677 			segs++;
3678 
3679 			for (seg_idx = 1; seg_idx < nsegs; seg_idx++) {
3680 				if (offset)
3681 					nbds_in_hdr++;
3682 
3683 				tx_data_bd = (struct eth_tx_bd *)
3684 					ecore_chain_produce(&txq->tx_pbl);
3685 				memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3686 
3687 				if (second_bd == NULL) {
3688 					second_bd = (struct eth_tx_2nd_bd *)
3689 								tx_data_bd;
3690 				} else if (third_bd == NULL) {
3691 					third_bd = (struct eth_tx_3rd_bd *)
3692 								tx_data_bd;
3693 				}
3694 
3695 				if (offset && (offset < segs->ds_len)) {
3696 					BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3697 						segs->ds_addr, offset);
3698 
3699 					tx_data_bd = (struct eth_tx_bd *)
3700 					ecore_chain_produce(&txq->tx_pbl);
3701 
3702 					memset(tx_data_bd, 0,
3703 						sizeof(*tx_data_bd));
3704 
3705 					if (second_bd == NULL) {
3706 						second_bd =
3707 					(struct eth_tx_2nd_bd *)tx_data_bd;
3708 					} else if (third_bd == NULL) {
3709 						third_bd =
3710 					(struct eth_tx_3rd_bd *)tx_data_bd;
3711 					}
3712 					BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3713 						(segs->ds_addr + offset), \
3714 						(segs->ds_len - offset));
3715 					nbd++;
3716 					offset = 0;
3717 				} else {
3718 					if (offset)
3719 						offset = offset - segs->ds_len;
3720 					BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3721 						segs->ds_addr, segs->ds_len);
3722 				}
3723 				segs++;
3724 				nbd++;
3725 			}
3726 
3727 			if (third_bd == NULL) {
3728 				third_bd = (struct eth_tx_3rd_bd *)
3729 					ecore_chain_produce(&txq->tx_pbl);
3730 				memset(third_bd, 0, sizeof(*third_bd));
3731 			}
3732 
3733 			third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3734 			third_bd->data.bitfields |=
3735 				(nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3736 		}
3737 		fp->tx_tso_pkts++;
3738 	} else {
3739 		segs++;
3740 		for (seg_idx = 1; seg_idx < nsegs; seg_idx++) {
3741 			tx_data_bd = (struct eth_tx_bd *)
3742 					ecore_chain_produce(&txq->tx_pbl);
3743 			memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3744 			BD_SET_UNMAP_ADDR_LEN(tx_data_bd, segs->ds_addr,\
3745 				segs->ds_len);
3746 			segs++;
3747 			nbd++;
3748 		}
3749 		first_bd->data.bitfields =
3750 			(m_head->m_pkthdr.len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
3751 				 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
3752 		first_bd->data.bitfields =
3753 			htole16(first_bd->data.bitfields);
3754 		fp->tx_non_tso_pkts++;
3755 	}
3756 
3757 	first_bd->data.nbds = nbd;
3758 
3759 	if (ha->dbg_trace_tso_pkt_len) {
3760 		if (fp->tx_tso_max_nsegs < nsegs)
3761 			fp->tx_tso_max_nsegs = nsegs;
3762 
3763 		if ((nsegs < fp->tx_tso_min_nsegs) || (!fp->tx_tso_min_nsegs))
3764 			fp->tx_tso_min_nsegs = nsegs;
3765 	}
3766 
3767 	txq->sw_tx_ring[idx].nsegs = nsegs;
3768 	txq->sw_tx_prod = (txq->sw_tx_prod + 1) & (TX_RING_SIZE - 1);
3769 
3770 	txq->tx_db.data.bd_prod =
3771 		htole16(ecore_chain_get_prod_idx(&txq->tx_pbl));
3772 
3773 	qlnx_txq_doorbell_wr32(ha, txq->doorbell_addr, txq->tx_db.raw);
3774 
3775 	QL_DPRINT8(ha, "exit[%d]\n", fp->rss_id);
3776 	return (0);
3777 }
3778 
3779 static void
3780 qlnx_stop(qlnx_host_t *ha)
3781 {
3782 	struct ifnet	*ifp = ha->ifp;
3783 	int		i;
3784 
3785 	ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
3786 
3787 	/*
3788 	 * We simply lock and unlock each fp->tx_mtx to
3789 	 * propagate the if_drv_flags
3790 	 * state to each tx thread
3791 	 */
3792         QL_DPRINT1(ha, "QLNX STATE = %d\n",ha->state);
3793 
3794 	if (ha->state == QLNX_STATE_OPEN) {
3795         	for (i = 0; i < ha->num_rss; i++) {
3796 			struct qlnx_fastpath *fp = &ha->fp_array[i];
3797 
3798 			mtx_lock(&fp->tx_mtx);
3799 			mtx_unlock(&fp->tx_mtx);
3800 
3801 			if (fp->fp_taskqueue != NULL)
3802 				taskqueue_enqueue(fp->fp_taskqueue,
3803 					&fp->fp_task);
3804 		}
3805 	}
3806 #ifdef QLNX_ENABLE_IWARP
3807 	if (qlnx_vf_device(ha) != 0) {
3808 		qlnx_rdma_dev_close(ha);
3809 	}
3810 #endif /* #ifdef QLNX_ENABLE_IWARP */
3811 
3812 	qlnx_unload(ha);
3813 
3814 	return;
3815 }
3816 
3817 static int
3818 qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha)
3819 {
3820         return(TX_RING_SIZE - 1);
3821 }
3822 
3823 uint8_t *
3824 qlnx_get_mac_addr(qlnx_host_t *ha)
3825 {
3826 	struct ecore_hwfn	*p_hwfn;
3827 	unsigned char mac[ETHER_ADDR_LEN];
3828 	uint8_t			p_is_forced;
3829 
3830 	p_hwfn = &ha->cdev.hwfns[0];
3831 
3832 	if (qlnx_vf_device(ha) != 0)
3833 		return (p_hwfn->hw_info.hw_mac_addr);
3834 
3835 	ecore_vf_read_bulletin(p_hwfn, &p_is_forced);
3836 	if (ecore_vf_bulletin_get_forced_mac(p_hwfn, mac, &p_is_forced) ==
3837 		true) {
3838 		device_printf(ha->pci_dev, "%s: p_is_forced = %d"
3839 			" mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n", __func__,
3840 			p_is_forced, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3841         	memcpy(ha->primary_mac, mac, ETH_ALEN);
3842 	}
3843 
3844 	return (ha->primary_mac);
3845 }
3846 
3847 static uint32_t
3848 qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link)
3849 {
3850 	uint32_t	ifm_type = 0;
3851 
3852 	switch (if_link->media_type) {
3853 	case MEDIA_MODULE_FIBER:
3854 	case MEDIA_UNSPECIFIED:
3855 		if (if_link->speed == (100 * 1000))
3856 			ifm_type = QLNX_IFM_100G_SR4;
3857 		else if (if_link->speed == (40 * 1000))
3858 			ifm_type = IFM_40G_SR4;
3859 		else if (if_link->speed == (25 * 1000))
3860 			ifm_type = QLNX_IFM_25G_SR;
3861 		else if (if_link->speed == (10 * 1000))
3862 			ifm_type = (IFM_10G_LR | IFM_10G_SR);
3863 		else if (if_link->speed == (1 * 1000))
3864 			ifm_type = (IFM_1000_SX | IFM_1000_LX);
3865 
3866 		break;
3867 
3868 	case MEDIA_DA_TWINAX:
3869 		if (if_link->speed == (100 * 1000))
3870 			ifm_type = QLNX_IFM_100G_CR4;
3871 		else if (if_link->speed == (40 * 1000))
3872 			ifm_type = IFM_40G_CR4;
3873 		else if (if_link->speed == (25 * 1000))
3874 			ifm_type = QLNX_IFM_25G_CR;
3875 		else if (if_link->speed == (10 * 1000))
3876 			ifm_type = IFM_10G_TWINAX;
3877 
3878 		break;
3879 
3880 	default :
3881 		ifm_type = IFM_UNKNOWN;
3882 		break;
3883 	}
3884 	return (ifm_type);
3885 }
3886 
3887 /*****************************************************************************
3888  * Interrupt Service Functions
3889  *****************************************************************************/
3890 
3891 static int
3892 qlnx_rx_jumbo_chain(qlnx_host_t *ha, struct qlnx_fastpath *fp,
3893 	struct mbuf *mp_head, uint16_t len)
3894 {
3895 	struct mbuf		*mp, *mpf, *mpl;
3896 	struct sw_rx_data	*sw_rx_data;
3897 	struct qlnx_rx_queue	*rxq;
3898 	uint16_t 		len_in_buffer;
3899 
3900 	rxq = fp->rxq;
3901 	mpf = mpl = mp = NULL;
3902 
3903 	while (len) {
3904         	rxq->sw_rx_cons  = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3905 
3906                 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3907                 mp = sw_rx_data->data;
3908 
3909 		if (mp == NULL) {
3910                 	QL_DPRINT1(ha, "mp = NULL\n");
3911 			fp->err_rx_mp_null++;
3912         		rxq->sw_rx_cons  =
3913 				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3914 
3915 			if (mpf != NULL)
3916 				m_freem(mpf);
3917 
3918 			return (-1);
3919 		}
3920 		bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3921 			BUS_DMASYNC_POSTREAD);
3922 
3923                 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3924                         QL_DPRINT1(ha, "New buffer allocation failed, dropping"
3925 				" incoming packet and reusing its buffer\n");
3926 
3927                         qlnx_reuse_rx_data(rxq);
3928                         fp->err_rx_alloc_errors++;
3929 
3930 			if (mpf != NULL)
3931 				m_freem(mpf);
3932 
3933 			return (-1);
3934 		}
3935                 ecore_chain_consume(&rxq->rx_bd_ring);
3936 
3937 		if (len > rxq->rx_buf_size)
3938 			len_in_buffer = rxq->rx_buf_size;
3939 		else
3940 			len_in_buffer = len;
3941 
3942 		len = len - len_in_buffer;
3943 
3944 		mp->m_flags &= ~M_PKTHDR;
3945 		mp->m_next = NULL;
3946 		mp->m_len = len_in_buffer;
3947 
3948 		if (mpf == NULL)
3949 			mpf = mpl = mp;
3950 		else {
3951 			mpl->m_next = mp;
3952 			mpl = mp;
3953 		}
3954 	}
3955 
3956 	if (mpf != NULL)
3957 		mp_head->m_next = mpf;
3958 
3959 	return (0);
3960 }
3961 
3962 static void
3963 qlnx_tpa_start(qlnx_host_t *ha,
3964 	struct qlnx_fastpath *fp,
3965 	struct qlnx_rx_queue *rxq,
3966 	struct eth_fast_path_rx_tpa_start_cqe *cqe)
3967 {
3968 	uint32_t		agg_index;
3969         struct ifnet		*ifp = ha->ifp;
3970 	struct mbuf		*mp;
3971 	struct mbuf		*mpf = NULL, *mpl = NULL, *mpc = NULL;
3972 	struct sw_rx_data	*sw_rx_data;
3973 	dma_addr_t		addr;
3974 	bus_dmamap_t		map;
3975 	struct eth_rx_bd	*rx_bd;
3976 	int			i;
3977 #if __FreeBSD_version >= 1100000
3978 	uint8_t			hash_type;
3979 #endif /* #if __FreeBSD_version >= 1100000 */
3980 
3981 	agg_index = cqe->tpa_agg_index;
3982 
3983         QL_DPRINT7(ha, "[rss_id = %d]: enter\n \
3984                 \t type = 0x%x\n \
3985                 \t bitfields = 0x%x\n \
3986                 \t seg_len = 0x%x\n \
3987                 \t pars_flags = 0x%x\n \
3988                 \t vlan_tag = 0x%x\n \
3989                 \t rss_hash = 0x%x\n \
3990                 \t len_on_first_bd = 0x%x\n \
3991                 \t placement_offset = 0x%x\n \
3992                 \t tpa_agg_index = 0x%x\n \
3993                 \t header_len = 0x%x\n \
3994                 \t ext_bd_len_list[0] = 0x%x\n \
3995                 \t ext_bd_len_list[1] = 0x%x\n \
3996                 \t ext_bd_len_list[2] = 0x%x\n \
3997                 \t ext_bd_len_list[3] = 0x%x\n \
3998                 \t ext_bd_len_list[4] = 0x%x\n",
3999                 fp->rss_id, cqe->type, cqe->bitfields, cqe->seg_len,
4000                 cqe->pars_flags.flags, cqe->vlan_tag,
4001                 cqe->rss_hash, cqe->len_on_first_bd, cqe->placement_offset,
4002                 cqe->tpa_agg_index, cqe->header_len,
4003                 cqe->ext_bd_len_list[0], cqe->ext_bd_len_list[1],
4004                 cqe->ext_bd_len_list[2], cqe->ext_bd_len_list[3],
4005                 cqe->ext_bd_len_list[4]);
4006 
4007 	if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
4008 		fp->err_rx_tpa_invalid_agg_num++;
4009 		return;
4010 	}
4011 
4012 	sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4013 	bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD);
4014 	mp = sw_rx_data->data;
4015 
4016 	QL_DPRINT7(ha, "[rss_id = %d]: mp = %p \n ", fp->rss_id, mp);
4017 
4018 	if (mp == NULL) {
4019                	QL_DPRINT7(ha, "[%d]: mp = NULL\n", fp->rss_id);
4020 		fp->err_rx_mp_null++;
4021        		rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4022 
4023 		return;
4024 	}
4025 
4026 	if ((le16toh(cqe->pars_flags.flags)) & CQE_FLAGS_ERR) {
4027 		QL_DPRINT7(ha, "[%d]: CQE in CONS = %u has error,"
4028 			" flags = %x, dropping incoming packet\n", fp->rss_id,
4029 			rxq->sw_rx_cons, le16toh(cqe->pars_flags.flags));
4030 
4031 		fp->err_rx_hw_errors++;
4032 
4033 		qlnx_reuse_rx_data(rxq);
4034 
4035 		QLNX_INC_IERRORS(ifp);
4036 
4037 		return;
4038 	}
4039 
4040 	if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4041 		QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4042 			" dropping incoming packet and reusing its buffer\n",
4043 			fp->rss_id);
4044 
4045 		fp->err_rx_alloc_errors++;
4046 		QLNX_INC_IQDROPS(ifp);
4047 
4048 		/*
4049 		 * Load the tpa mbuf into the rx ring and save the
4050 		 * posted mbuf
4051 		 */
4052 
4053 		map = sw_rx_data->map;
4054 		addr = sw_rx_data->dma_addr;
4055 
4056 		sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
4057 
4058 		sw_rx_data->data = rxq->tpa_info[agg_index].rx_buf.data;
4059 		sw_rx_data->dma_addr = rxq->tpa_info[agg_index].rx_buf.dma_addr;
4060 		sw_rx_data->map = rxq->tpa_info[agg_index].rx_buf.map;
4061 
4062 		rxq->tpa_info[agg_index].rx_buf.data = mp;
4063 		rxq->tpa_info[agg_index].rx_buf.dma_addr = addr;
4064 		rxq->tpa_info[agg_index].rx_buf.map = map;
4065 
4066 		rx_bd = (struct eth_rx_bd *)
4067 				ecore_chain_produce(&rxq->rx_bd_ring);
4068 
4069 		rx_bd->addr.hi = htole32(U64_HI(sw_rx_data->dma_addr));
4070 		rx_bd->addr.lo = htole32(U64_LO(sw_rx_data->dma_addr));
4071 
4072 		bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4073 			BUS_DMASYNC_PREREAD);
4074 
4075 		rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
4076 		rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4077 
4078 		ecore_chain_consume(&rxq->rx_bd_ring);
4079 
4080 		/* Now reuse any buffers posted in ext_bd_len_list */
4081 		for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
4082 			if (cqe->ext_bd_len_list[i] == 0)
4083 				break;
4084 
4085 			qlnx_reuse_rx_data(rxq);
4086 		}
4087 
4088 		rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
4089 		return;
4090 	}
4091 
4092 	if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
4093 		QL_DPRINT7(ha, "[%d]: invalid aggregation state,"
4094 			" dropping incoming packet and reusing its buffer\n",
4095 			fp->rss_id);
4096 
4097 		QLNX_INC_IQDROPS(ifp);
4098 
4099 		/* if we already have mbuf head in aggregation free it */
4100 		if (rxq->tpa_info[agg_index].mpf) {
4101 			m_freem(rxq->tpa_info[agg_index].mpf);
4102 			rxq->tpa_info[agg_index].mpl = NULL;
4103 		}
4104 		rxq->tpa_info[agg_index].mpf = mp;
4105 		rxq->tpa_info[agg_index].mpl = NULL;
4106 
4107 		rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4108 		ecore_chain_consume(&rxq->rx_bd_ring);
4109 
4110 		/* Now reuse any buffers posted in ext_bd_len_list */
4111 		for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
4112 			if (cqe->ext_bd_len_list[i] == 0)
4113 				break;
4114 
4115 			qlnx_reuse_rx_data(rxq);
4116 		}
4117 		rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
4118 
4119 		return;
4120 	}
4121 
4122 	/*
4123 	 * first process the ext_bd_len_list
4124 	 * if this fails then we simply drop the packet
4125 	 */
4126 	ecore_chain_consume(&rxq->rx_bd_ring);
4127 	rxq->sw_rx_cons  = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4128 
4129 	for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
4130 		QL_DPRINT7(ha, "[%d]: 4\n ", fp->rss_id);
4131 
4132 		if (cqe->ext_bd_len_list[i] == 0)
4133 			break;
4134 
4135 		sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4136 		bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4137 			BUS_DMASYNC_POSTREAD);
4138 
4139 		mpc = sw_rx_data->data;
4140 
4141 		if (mpc == NULL) {
4142 			QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4143 			fp->err_rx_mp_null++;
4144 			if (mpf != NULL)
4145 				m_freem(mpf);
4146 			mpf = mpl = NULL;
4147 			rxq->tpa_info[agg_index].agg_state =
4148 						QLNX_AGG_STATE_ERROR;
4149 			ecore_chain_consume(&rxq->rx_bd_ring);
4150 			rxq->sw_rx_cons =
4151 				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4152 			continue;
4153 		}
4154 
4155 		if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4156 			QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4157 				" dropping incoming packet and reusing its"
4158 				" buffer\n", fp->rss_id);
4159 
4160 			qlnx_reuse_rx_data(rxq);
4161 
4162 			if (mpf != NULL)
4163 				m_freem(mpf);
4164 			mpf = mpl = NULL;
4165 
4166 			rxq->tpa_info[agg_index].agg_state =
4167 						QLNX_AGG_STATE_ERROR;
4168 
4169 			ecore_chain_consume(&rxq->rx_bd_ring);
4170 			rxq->sw_rx_cons =
4171 				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4172 
4173 			continue;
4174 		}
4175 
4176 		mpc->m_flags &= ~M_PKTHDR;
4177 		mpc->m_next = NULL;
4178 		mpc->m_len = cqe->ext_bd_len_list[i];
4179 
4180 		if (mpf == NULL) {
4181 			mpf = mpl = mpc;
4182 		} else {
4183 			mpl->m_len = ha->rx_buf_size;
4184 			mpl->m_next = mpc;
4185 			mpl = mpc;
4186 		}
4187 
4188 		ecore_chain_consume(&rxq->rx_bd_ring);
4189 		rxq->sw_rx_cons =
4190 			(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4191 	}
4192 
4193 	if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
4194 		QL_DPRINT7(ha, "[%d]: invalid aggregation state, dropping"
4195 			" incoming packet and reusing its buffer\n",
4196 			fp->rss_id);
4197 
4198 		QLNX_INC_IQDROPS(ifp);
4199 
4200 		rxq->tpa_info[agg_index].mpf = mp;
4201 		rxq->tpa_info[agg_index].mpl = NULL;
4202 
4203 		return;
4204 	}
4205 
4206         rxq->tpa_info[agg_index].placement_offset = cqe->placement_offset;
4207 
4208         if (mpf != NULL) {
4209                 mp->m_len = ha->rx_buf_size;
4210                 mp->m_next = mpf;
4211                 rxq->tpa_info[agg_index].mpf = mp;
4212                 rxq->tpa_info[agg_index].mpl = mpl;
4213         } else {
4214                 mp->m_len = cqe->len_on_first_bd + cqe->placement_offset;
4215                 rxq->tpa_info[agg_index].mpf = mp;
4216                 rxq->tpa_info[agg_index].mpl = mp;
4217                 mp->m_next = NULL;
4218         }
4219 
4220 	mp->m_flags |= M_PKTHDR;
4221 
4222 	/* assign packet to this interface interface */
4223 	mp->m_pkthdr.rcvif = ifp;
4224 
4225 	/* assume no hardware checksum has complated */
4226 	mp->m_pkthdr.csum_flags = 0;
4227 
4228 	//mp->m_pkthdr.flowid = fp->rss_id;
4229 	mp->m_pkthdr.flowid = cqe->rss_hash;
4230 
4231 #if __FreeBSD_version >= 1100000
4232 
4233 	hash_type = cqe->bitfields &
4234 			(ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK <<
4235 			ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT);
4236 
4237 	switch (hash_type) {
4238 	case RSS_HASH_TYPE_IPV4:
4239 		M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4);
4240 		break;
4241 
4242 	case RSS_HASH_TYPE_TCP_IPV4:
4243 		M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4);
4244 		break;
4245 
4246 	case RSS_HASH_TYPE_IPV6:
4247 		M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6);
4248 		break;
4249 
4250 	case RSS_HASH_TYPE_TCP_IPV6:
4251 		M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6);
4252 		break;
4253 
4254 	default:
4255 		M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
4256 		break;
4257 	}
4258 
4259 #else
4260 	mp->m_flags |= M_FLOWID;
4261 #endif
4262 
4263 	mp->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID |
4264 					CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4265 
4266 	mp->m_pkthdr.csum_data = 0xFFFF;
4267 
4268 	if (CQE_HAS_VLAN(cqe->pars_flags.flags)) {
4269 		mp->m_pkthdr.ether_vtag = le16toh(cqe->vlan_tag);
4270 		mp->m_flags |= M_VLANTAG;
4271 	}
4272 
4273 	rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_START;
4274 
4275         QL_DPRINT7(ha, "[%d]: 5\n\tagg_state = %d\n\t mpf = %p mpl = %p\n",
4276 		fp->rss_id, rxq->tpa_info[agg_index].agg_state,
4277                 rxq->tpa_info[agg_index].mpf, rxq->tpa_info[agg_index].mpl);
4278 
4279 	return;
4280 }
4281 
4282 static void
4283 qlnx_tpa_cont(qlnx_host_t *ha, struct qlnx_fastpath *fp,
4284 	struct qlnx_rx_queue *rxq,
4285 	struct eth_fast_path_rx_tpa_cont_cqe *cqe)
4286 {
4287 	struct sw_rx_data	*sw_rx_data;
4288 	int			i;
4289 	struct mbuf		*mpf = NULL, *mpl = NULL, *mpc = NULL;
4290 	struct mbuf		*mp;
4291 	uint32_t		agg_index;
4292 
4293         QL_DPRINT7(ha, "[%d]: enter\n \
4294                 \t type = 0x%x\n \
4295                 \t tpa_agg_index = 0x%x\n \
4296                 \t len_list[0] = 0x%x\n \
4297                 \t len_list[1] = 0x%x\n \
4298                 \t len_list[2] = 0x%x\n \
4299                 \t len_list[3] = 0x%x\n \
4300                 \t len_list[4] = 0x%x\n \
4301                 \t len_list[5] = 0x%x\n",
4302                 fp->rss_id, cqe->type, cqe->tpa_agg_index,
4303                 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
4304                 cqe->len_list[3], cqe->len_list[4], cqe->len_list[5]);
4305 
4306 	agg_index = cqe->tpa_agg_index;
4307 
4308 	if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
4309 		QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
4310 		fp->err_rx_tpa_invalid_agg_num++;
4311 		return;
4312 	}
4313 
4314 	for (i = 0; i < ETH_TPA_CQE_CONT_LEN_LIST_SIZE; i++) {
4315 		QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
4316 
4317 		if (cqe->len_list[i] == 0)
4318 			break;
4319 
4320 		if (rxq->tpa_info[agg_index].agg_state !=
4321 			QLNX_AGG_STATE_START) {
4322 			qlnx_reuse_rx_data(rxq);
4323 			continue;
4324 		}
4325 
4326 		sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4327 		bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4328 			BUS_DMASYNC_POSTREAD);
4329 
4330 		mpc = sw_rx_data->data;
4331 
4332 		if (mpc == NULL) {
4333 			QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4334 
4335 			fp->err_rx_mp_null++;
4336 			if (mpf != NULL)
4337 				m_freem(mpf);
4338 			mpf = mpl = NULL;
4339 			rxq->tpa_info[agg_index].agg_state =
4340 						QLNX_AGG_STATE_ERROR;
4341 			ecore_chain_consume(&rxq->rx_bd_ring);
4342 			rxq->sw_rx_cons =
4343 				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4344 			continue;
4345 		}
4346 
4347 		if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4348 			QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4349 				" dropping incoming packet and reusing its"
4350 				" buffer\n", fp->rss_id);
4351 
4352 			qlnx_reuse_rx_data(rxq);
4353 
4354 			if (mpf != NULL)
4355 				m_freem(mpf);
4356 			mpf = mpl = NULL;
4357 
4358 			rxq->tpa_info[agg_index].agg_state =
4359 						QLNX_AGG_STATE_ERROR;
4360 
4361 			ecore_chain_consume(&rxq->rx_bd_ring);
4362 			rxq->sw_rx_cons =
4363 				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4364 
4365 			continue;
4366 		}
4367 
4368 		mpc->m_flags &= ~M_PKTHDR;
4369 		mpc->m_next = NULL;
4370 		mpc->m_len = cqe->len_list[i];
4371 
4372 		if (mpf == NULL) {
4373 			mpf = mpl = mpc;
4374 		} else {
4375 			mpl->m_len = ha->rx_buf_size;
4376 			mpl->m_next = mpc;
4377 			mpl = mpc;
4378 		}
4379 
4380 		ecore_chain_consume(&rxq->rx_bd_ring);
4381 		rxq->sw_rx_cons =
4382 			(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4383 	}
4384 
4385         QL_DPRINT7(ha, "[%d]: 2\n" "\tmpf = %p mpl = %p\n",
4386                   fp->rss_id, mpf, mpl);
4387 
4388 	if (mpf != NULL) {
4389 		mp = rxq->tpa_info[agg_index].mpl;
4390 		mp->m_len = ha->rx_buf_size;
4391 		mp->m_next = mpf;
4392 		rxq->tpa_info[agg_index].mpl = mpl;
4393 	}
4394 
4395 	return;
4396 }
4397 
4398 static int
4399 qlnx_tpa_end(qlnx_host_t *ha, struct qlnx_fastpath *fp,
4400 	struct qlnx_rx_queue *rxq,
4401 	struct eth_fast_path_rx_tpa_end_cqe *cqe)
4402 {
4403 	struct sw_rx_data	*sw_rx_data;
4404 	int			i;
4405 	struct mbuf		*mpf = NULL, *mpl = NULL, *mpc = NULL;
4406 	struct mbuf		*mp;
4407 	uint32_t		agg_index;
4408 	uint32_t		len = 0;
4409         struct ifnet		*ifp = ha->ifp;
4410 
4411         QL_DPRINT7(ha, "[%d]: enter\n \
4412                 \t type = 0x%x\n \
4413                 \t tpa_agg_index = 0x%x\n \
4414                 \t total_packet_len = 0x%x\n \
4415                 \t num_of_bds = 0x%x\n \
4416                 \t end_reason = 0x%x\n \
4417                 \t num_of_coalesced_segs = 0x%x\n \
4418                 \t ts_delta = 0x%x\n \
4419                 \t len_list[0] = 0x%x\n \
4420                 \t len_list[1] = 0x%x\n \
4421                 \t len_list[2] = 0x%x\n \
4422                 \t len_list[3] = 0x%x\n",
4423                  fp->rss_id, cqe->type, cqe->tpa_agg_index,
4424                 cqe->total_packet_len, cqe->num_of_bds,
4425                 cqe->end_reason, cqe->num_of_coalesced_segs, cqe->ts_delta,
4426                 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
4427                 cqe->len_list[3]);
4428 
4429 	agg_index = cqe->tpa_agg_index;
4430 
4431 	if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
4432 		QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
4433 
4434 		fp->err_rx_tpa_invalid_agg_num++;
4435 		return (0);
4436 	}
4437 
4438 	for (i = 0; i < ETH_TPA_CQE_END_LEN_LIST_SIZE; i++) {
4439 		QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
4440 
4441 		if (cqe->len_list[i] == 0)
4442 			break;
4443 
4444 		if (rxq->tpa_info[agg_index].agg_state !=
4445 			QLNX_AGG_STATE_START) {
4446 			QL_DPRINT7(ha, "[%d]: 2\n ", fp->rss_id);
4447 
4448 			qlnx_reuse_rx_data(rxq);
4449 			continue;
4450 		}
4451 
4452 		sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4453 		bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4454 			BUS_DMASYNC_POSTREAD);
4455 
4456 		mpc = sw_rx_data->data;
4457 
4458 		if (mpc == NULL) {
4459 			QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4460 
4461 			fp->err_rx_mp_null++;
4462 			if (mpf != NULL)
4463 				m_freem(mpf);
4464 			mpf = mpl = NULL;
4465 			rxq->tpa_info[agg_index].agg_state =
4466 						QLNX_AGG_STATE_ERROR;
4467 			ecore_chain_consume(&rxq->rx_bd_ring);
4468 			rxq->sw_rx_cons =
4469 				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4470 			continue;
4471 		}
4472 
4473 		if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4474 			QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4475 				" dropping incoming packet and reusing its"
4476 				" buffer\n", fp->rss_id);
4477 
4478 			qlnx_reuse_rx_data(rxq);
4479 
4480 			if (mpf != NULL)
4481 				m_freem(mpf);
4482 			mpf = mpl = NULL;
4483 
4484 			rxq->tpa_info[agg_index].agg_state =
4485 						QLNX_AGG_STATE_ERROR;
4486 
4487 			ecore_chain_consume(&rxq->rx_bd_ring);
4488 			rxq->sw_rx_cons =
4489 				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4490 
4491 			continue;
4492 		}
4493 
4494 		mpc->m_flags &= ~M_PKTHDR;
4495 		mpc->m_next = NULL;
4496 		mpc->m_len = cqe->len_list[i];
4497 
4498 		if (mpf == NULL) {
4499 			mpf = mpl = mpc;
4500 		} else {
4501 			mpl->m_len = ha->rx_buf_size;
4502 			mpl->m_next = mpc;
4503 			mpl = mpc;
4504 		}
4505 
4506 		ecore_chain_consume(&rxq->rx_bd_ring);
4507 		rxq->sw_rx_cons =
4508 			(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4509 	}
4510 
4511 	QL_DPRINT7(ha, "[%d]: 5\n ", fp->rss_id);
4512 
4513 	if (mpf != NULL) {
4514 		QL_DPRINT7(ha, "[%d]: 6\n ", fp->rss_id);
4515 
4516 		mp = rxq->tpa_info[agg_index].mpl;
4517 		mp->m_len = ha->rx_buf_size;
4518 		mp->m_next = mpf;
4519 	}
4520 
4521 	if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_START) {
4522 		QL_DPRINT7(ha, "[%d]: 7\n ", fp->rss_id);
4523 
4524 		if (rxq->tpa_info[agg_index].mpf != NULL)
4525 			m_freem(rxq->tpa_info[agg_index].mpf);
4526 		rxq->tpa_info[agg_index].mpf = NULL;
4527 		rxq->tpa_info[agg_index].mpl = NULL;
4528 		rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
4529 		return (0);
4530 	}
4531 
4532 	mp = rxq->tpa_info[agg_index].mpf;
4533 	m_adj(mp, rxq->tpa_info[agg_index].placement_offset);
4534 	mp->m_pkthdr.len = cqe->total_packet_len;
4535 
4536 	if (mp->m_next  == NULL)
4537 		mp->m_len = mp->m_pkthdr.len;
4538 	else {
4539 		/* compute the total packet length */
4540 		mpf = mp;
4541 		while (mpf != NULL) {
4542 			len += mpf->m_len;
4543 			mpf = mpf->m_next;
4544 		}
4545 
4546 		if (cqe->total_packet_len > len) {
4547 			mpl = rxq->tpa_info[agg_index].mpl;
4548 			mpl->m_len += (cqe->total_packet_len - len);
4549 		}
4550 	}
4551 
4552 	QLNX_INC_IPACKETS(ifp);
4553 	QLNX_INC_IBYTES(ifp, (cqe->total_packet_len));
4554 
4555         QL_DPRINT7(ha, "[%d]: 8 csum_data = 0x%x csum_flags = 0x%" PRIu64 "\n \
4556 		m_len = 0x%x m_pkthdr_len = 0x%x\n",
4557                 fp->rss_id, mp->m_pkthdr.csum_data,
4558                 (uint64_t)mp->m_pkthdr.csum_flags, mp->m_len, mp->m_pkthdr.len);
4559 
4560 	(*ifp->if_input)(ifp, mp);
4561 
4562 	rxq->tpa_info[agg_index].mpf = NULL;
4563 	rxq->tpa_info[agg_index].mpl = NULL;
4564 	rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
4565 
4566 	return (cqe->num_of_coalesced_segs);
4567 }
4568 
4569 static int
4570 qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
4571 	int lro_enable)
4572 {
4573         uint16_t		hw_comp_cons, sw_comp_cons;
4574         int			rx_pkt = 0;
4575         struct qlnx_rx_queue	*rxq = fp->rxq;
4576         struct ifnet		*ifp = ha->ifp;
4577 	struct ecore_dev	*cdev = &ha->cdev;
4578 	struct ecore_hwfn       *p_hwfn;
4579 
4580 #ifdef QLNX_SOFT_LRO
4581 	struct lro_ctrl		*lro;
4582 
4583 	lro = &rxq->lro;
4584 #endif /* #ifdef QLNX_SOFT_LRO */
4585 
4586         hw_comp_cons = le16toh(*rxq->hw_cons_ptr);
4587         sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
4588 
4589 	p_hwfn = &ha->cdev.hwfns[(fp->rss_id % cdev->num_hwfns)];
4590 
4591         /* Memory barrier to prevent the CPU from doing speculative reads of CQE
4592          * / BD in the while-loop before reading hw_comp_cons. If the CQE is
4593          * read before it is written by FW, then FW writes CQE and SB, and then
4594          * the CPU reads the hw_comp_cons, it will use an old CQE.
4595          */
4596 
4597         /* Loop to complete all indicated BDs */
4598         while (sw_comp_cons != hw_comp_cons) {
4599                 union eth_rx_cqe		*cqe;
4600                 struct eth_fast_path_rx_reg_cqe	*fp_cqe;
4601                 struct sw_rx_data		*sw_rx_data;
4602 		register struct mbuf		*mp;
4603                 enum eth_rx_cqe_type		cqe_type;
4604                 uint16_t			len, pad, len_on_first_bd;
4605                 uint8_t				*data;
4606 #if __FreeBSD_version >= 1100000
4607 		uint8_t				hash_type;
4608 #endif /* #if __FreeBSD_version >= 1100000 */
4609 
4610                 /* Get the CQE from the completion ring */
4611                 cqe = (union eth_rx_cqe *)
4612                         ecore_chain_consume(&rxq->rx_comp_ring);
4613                 cqe_type = cqe->fast_path_regular.type;
4614 
4615                 if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) {
4616                         QL_DPRINT3(ha, "Got a slowath CQE\n");
4617 
4618                         ecore_eth_cqe_completion(p_hwfn,
4619                                         (struct eth_slow_path_rx_cqe *)cqe);
4620                         goto next_cqe;
4621                 }
4622 
4623 		if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) {
4624 			switch (cqe_type) {
4625 			case ETH_RX_CQE_TYPE_TPA_START:
4626 				qlnx_tpa_start(ha, fp, rxq,
4627 					&cqe->fast_path_tpa_start);
4628 				fp->tpa_start++;
4629 				break;
4630 
4631 			case ETH_RX_CQE_TYPE_TPA_CONT:
4632 				qlnx_tpa_cont(ha, fp, rxq,
4633 					&cqe->fast_path_tpa_cont);
4634 				fp->tpa_cont++;
4635 				break;
4636 
4637 			case ETH_RX_CQE_TYPE_TPA_END:
4638 				rx_pkt += qlnx_tpa_end(ha, fp, rxq,
4639 						&cqe->fast_path_tpa_end);
4640 				fp->tpa_end++;
4641 				break;
4642 
4643 			default:
4644 				break;
4645 			}
4646 
4647                         goto next_cqe;
4648 		}
4649 
4650                 /* Get the data from the SW ring */
4651                 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4652                 mp = sw_rx_data->data;
4653 
4654 		if (mp == NULL) {
4655                 	QL_DPRINT1(ha, "mp = NULL\n");
4656 			fp->err_rx_mp_null++;
4657         		rxq->sw_rx_cons  =
4658 				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4659 			goto next_cqe;
4660 		}
4661 		bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4662 			BUS_DMASYNC_POSTREAD);
4663 
4664                 /* non GRO */
4665                 fp_cqe = &cqe->fast_path_regular;/* MK CR TPA check assembly */
4666                 len =  le16toh(fp_cqe->pkt_len);
4667                 pad = fp_cqe->placement_offset;
4668 #if 0
4669 		QL_DPRINT3(ha, "CQE type = %x, flags = %x, vlan = %x,"
4670 			" len %u, parsing flags = %d pad  = %d\n",
4671 			cqe_type, fp_cqe->bitfields,
4672 			le16toh(fp_cqe->vlan_tag),
4673 			len, le16toh(fp_cqe->pars_flags.flags), pad);
4674 #endif
4675 		data = mtod(mp, uint8_t *);
4676 		data = data + pad;
4677 
4678 		if (0)
4679 			qlnx_dump_buf8(ha, __func__, data, len);
4680 
4681                 /* For every Rx BD consumed, we allocate a new BD so the BD ring
4682                  * is always with a fixed size. If allocation fails, we take the
4683                  * consumed BD and return it to the ring in the PROD position.
4684                  * The packet that was received on that BD will be dropped (and
4685                  * not passed to the upper stack).
4686                  */
4687 		/* If this is an error packet then drop it */
4688 		if ((le16toh(cqe->fast_path_regular.pars_flags.flags)) &
4689 			CQE_FLAGS_ERR) {
4690 			QL_DPRINT1(ha, "CQE in CONS = %u has error, flags = %x,"
4691 				" dropping incoming packet\n", sw_comp_cons,
4692 			le16toh(cqe->fast_path_regular.pars_flags.flags));
4693 			fp->err_rx_hw_errors++;
4694 
4695                         qlnx_reuse_rx_data(rxq);
4696 
4697 			QLNX_INC_IERRORS(ifp);
4698 
4699 			goto next_cqe;
4700 		}
4701 
4702                 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4703                         QL_DPRINT1(ha, "New buffer allocation failed, dropping"
4704 				" incoming packet and reusing its buffer\n");
4705                         qlnx_reuse_rx_data(rxq);
4706 
4707                         fp->err_rx_alloc_errors++;
4708 
4709 			QLNX_INC_IQDROPS(ifp);
4710 
4711                         goto next_cqe;
4712                 }
4713 
4714                 ecore_chain_consume(&rxq->rx_bd_ring);
4715 
4716 		len_on_first_bd = fp_cqe->len_on_first_bd;
4717 		m_adj(mp, pad);
4718 		mp->m_pkthdr.len = len;
4719 
4720 		if ((len > 60 ) && (len > len_on_first_bd)) {
4721 			mp->m_len = len_on_first_bd;
4722 
4723 			if (qlnx_rx_jumbo_chain(ha, fp, mp,
4724 				(len - len_on_first_bd)) != 0) {
4725 				m_freem(mp);
4726 
4727 				QLNX_INC_IQDROPS(ifp);
4728 
4729                         	goto next_cqe;
4730 			}
4731 
4732 		} else if (len_on_first_bd < len) {
4733 			fp->err_rx_jumbo_chain_pkts++;
4734 		} else {
4735 			mp->m_len = len;
4736 		}
4737 
4738 		mp->m_flags |= M_PKTHDR;
4739 
4740 		/* assign packet to this interface interface */
4741 		mp->m_pkthdr.rcvif = ifp;
4742 
4743 		/* assume no hardware checksum has complated */
4744 		mp->m_pkthdr.csum_flags = 0;
4745 
4746 		mp->m_pkthdr.flowid = fp_cqe->rss_hash;
4747 
4748 #if __FreeBSD_version >= 1100000
4749 
4750 		hash_type = fp_cqe->bitfields &
4751 				(ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK <<
4752 				ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT);
4753 
4754 		switch (hash_type) {
4755 		case RSS_HASH_TYPE_IPV4:
4756 			M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4);
4757 			break;
4758 
4759 		case RSS_HASH_TYPE_TCP_IPV4:
4760 			M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4);
4761 			break;
4762 
4763 		case RSS_HASH_TYPE_IPV6:
4764 			M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6);
4765 			break;
4766 
4767 		case RSS_HASH_TYPE_TCP_IPV6:
4768 			M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6);
4769 			break;
4770 
4771 		default:
4772 			M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
4773 			break;
4774 		}
4775 
4776 #else
4777 		mp->m_flags |= M_FLOWID;
4778 #endif
4779 
4780 		if (CQE_L3_PACKET(fp_cqe->pars_flags.flags)) {
4781 			mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
4782 		}
4783 
4784 		if (!(CQE_IP_HDR_ERR(fp_cqe->pars_flags.flags))) {
4785 			mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4786 		}
4787 
4788 		if (CQE_L4_HAS_CSUM(fp_cqe->pars_flags.flags)) {
4789 			mp->m_pkthdr.csum_data = 0xFFFF;
4790 			mp->m_pkthdr.csum_flags |=
4791 				(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4792 		}
4793 
4794 		if (CQE_HAS_VLAN(fp_cqe->pars_flags.flags)) {
4795 			mp->m_pkthdr.ether_vtag = le16toh(fp_cqe->vlan_tag);
4796 			mp->m_flags |= M_VLANTAG;
4797 		}
4798 
4799 		QLNX_INC_IPACKETS(ifp);
4800 		QLNX_INC_IBYTES(ifp, len);
4801 
4802 #ifdef QLNX_SOFT_LRO
4803 
4804 		if (lro_enable) {
4805 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
4806 
4807 			tcp_lro_queue_mbuf(lro, mp);
4808 
4809 #else
4810 
4811 			if (tcp_lro_rx(lro, mp, 0))
4812 				(*ifp->if_input)(ifp, mp);
4813 
4814 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
4815 
4816 		} else {
4817 			(*ifp->if_input)(ifp, mp);
4818 		}
4819 #else
4820 
4821 		(*ifp->if_input)(ifp, mp);
4822 
4823 #endif /* #ifdef QLNX_SOFT_LRO */
4824 
4825                 rx_pkt++;
4826 
4827         	rxq->sw_rx_cons  = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4828 
4829 next_cqe:	/* don't consume bd rx buffer */
4830                 ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
4831                 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
4832 
4833 		/* CR TPA - revisit how to handle budget in TPA perhaps
4834 		   increase on "end" */
4835                 if (rx_pkt == budget)
4836                         break;
4837         } /* repeat while sw_comp_cons != hw_comp_cons... */
4838 
4839         /* Update producers */
4840         qlnx_update_rx_prod(p_hwfn, rxq);
4841 
4842         return rx_pkt;
4843 }
4844 
4845 /*
4846  * fast path interrupt
4847  */
4848 
4849 static void
4850 qlnx_fp_isr(void *arg)
4851 {
4852         qlnx_ivec_t		*ivec = arg;
4853         qlnx_host_t		*ha;
4854         struct qlnx_fastpath	*fp = NULL;
4855         int			idx;
4856 
4857         ha = ivec->ha;
4858 
4859         if (ha->state != QLNX_STATE_OPEN) {
4860                 return;
4861         }
4862 
4863         idx = ivec->rss_idx;
4864 
4865         if ((idx = ivec->rss_idx) >= ha->num_rss) {
4866                 QL_DPRINT1(ha, "illegal interrupt[%d]\n", idx);
4867                 ha->err_illegal_intr++;
4868                 return;
4869         }
4870         fp = &ha->fp_array[idx];
4871 
4872         if (fp == NULL) {
4873                 ha->err_fp_null++;
4874         } else {
4875 		int			rx_int = 0;
4876 #ifdef QLNX_SOFT_LRO
4877 		int			total_rx_count = 0;
4878 #endif
4879 		int 			lro_enable, tc;
4880 		struct qlnx_tx_queue	*txq;
4881 		uint16_t		elem_left;
4882 
4883 		lro_enable = ha->ifp->if_capenable & IFCAP_LRO;
4884 
4885                 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
4886 
4887                 do {
4888                         for (tc = 0; tc < ha->num_tc; tc++) {
4889 				txq = fp->txq[tc];
4890 
4891 				if((int)(elem_left =
4892 					ecore_chain_get_elem_left(&txq->tx_pbl)) <
4893 						QLNX_TX_ELEM_THRESH)  {
4894                                 	if (mtx_trylock(&fp->tx_mtx)) {
4895 #ifdef QLNX_TRACE_PERF_DATA
4896 						tx_compl = fp->tx_pkts_completed;
4897 #endif
4898 
4899 						qlnx_tx_int(ha, fp, fp->txq[tc]);
4900 #ifdef QLNX_TRACE_PERF_DATA
4901 						fp->tx_pkts_compl_intr +=
4902 							(fp->tx_pkts_completed - tx_compl);
4903 						if ((fp->tx_pkts_completed - tx_compl) <= 32)
4904 							fp->tx_comInt[0]++;
4905 						else if (((fp->tx_pkts_completed - tx_compl) > 32) &&
4906 							((fp->tx_pkts_completed - tx_compl) <= 64))
4907 							fp->tx_comInt[1]++;
4908 						else if(((fp->tx_pkts_completed - tx_compl) > 64) &&
4909 							((fp->tx_pkts_completed - tx_compl) <= 128))
4910 							fp->tx_comInt[2]++;
4911 						else if(((fp->tx_pkts_completed - tx_compl) > 128))
4912 							fp->tx_comInt[3]++;
4913 #endif
4914 						mtx_unlock(&fp->tx_mtx);
4915 					}
4916 				}
4917                         }
4918 
4919                         rx_int = qlnx_rx_int(ha, fp, ha->rx_pkt_threshold,
4920                                         lro_enable);
4921 
4922                         if (rx_int) {
4923                                 fp->rx_pkts += rx_int;
4924 #ifdef QLNX_SOFT_LRO
4925                                 total_rx_count += rx_int;
4926 #endif
4927                         }
4928 
4929                 } while (rx_int);
4930 
4931 #ifdef QLNX_SOFT_LRO
4932                 {
4933                         struct lro_ctrl *lro;
4934 
4935                         lro = &fp->rxq->lro;
4936 
4937                         if (lro_enable && total_rx_count) {
4938 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
4939 
4940 #ifdef QLNX_TRACE_LRO_CNT
4941                                 if (lro->lro_mbuf_count & ~1023)
4942                                         fp->lro_cnt_1024++;
4943                                 else if (lro->lro_mbuf_count & ~511)
4944                                         fp->lro_cnt_512++;
4945                                 else if (lro->lro_mbuf_count & ~255)
4946                                         fp->lro_cnt_256++;
4947                                 else if (lro->lro_mbuf_count & ~127)
4948                                         fp->lro_cnt_128++;
4949                                 else if (lro->lro_mbuf_count & ~63)
4950                                         fp->lro_cnt_64++;
4951 #endif /* #ifdef QLNX_TRACE_LRO_CNT */
4952 
4953                                 tcp_lro_flush_all(lro);
4954 
4955 #else
4956                                 struct lro_entry *queued;
4957 
4958                                 while ((!SLIST_EMPTY(&lro->lro_active))) {
4959                                         queued = SLIST_FIRST(&lro->lro_active);
4960                                         SLIST_REMOVE_HEAD(&lro->lro_active, \
4961                                                 next);
4962                                         tcp_lro_flush(lro, queued);
4963                                 }
4964 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
4965                         }
4966                 }
4967 #endif /* #ifdef QLNX_SOFT_LRO */
4968 
4969                 ecore_sb_update_sb_idx(fp->sb_info);
4970                 rmb();
4971                 ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
4972         }
4973 
4974         return;
4975 }
4976 
4977 /*
4978  * slow path interrupt processing function
4979  * can be invoked in polled mode or in interrupt mode via taskqueue.
4980  */
4981 void
4982 qlnx_sp_isr(void *arg)
4983 {
4984 	struct ecore_hwfn	*p_hwfn;
4985 	qlnx_host_t		*ha;
4986 
4987 	p_hwfn = arg;
4988 
4989 	ha = (qlnx_host_t *)p_hwfn->p_dev;
4990 
4991 	ha->sp_interrupts++;
4992 
4993 	QL_DPRINT2(ha, "enter\n");
4994 
4995 	ecore_int_sp_dpc(p_hwfn);
4996 
4997 	QL_DPRINT2(ha, "exit\n");
4998 
4999 	return;
5000 }
5001 
5002 /*****************************************************************************
5003  * Support Functions for DMA'able Memory
5004  *****************************************************************************/
5005 
5006 static void
5007 qlnx_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
5008 {
5009         *((bus_addr_t *)arg) = 0;
5010 
5011         if (error) {
5012                 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
5013                 return;
5014         }
5015 
5016         *((bus_addr_t *)arg) = segs[0].ds_addr;
5017 
5018         return;
5019 }
5020 
5021 static int
5022 qlnx_alloc_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
5023 {
5024         int             ret = 0;
5025         bus_addr_t      b_addr;
5026 
5027         ret = bus_dma_tag_create(
5028                         ha->parent_tag,/* parent */
5029                         dma_buf->alignment,
5030                         ((bus_size_t)(1ULL << 32)),/* boundary */
5031                         BUS_SPACE_MAXADDR,      /* lowaddr */
5032                         BUS_SPACE_MAXADDR,      /* highaddr */
5033                         NULL, NULL,             /* filter, filterarg */
5034                         dma_buf->size,          /* maxsize */
5035                         1,                      /* nsegments */
5036                         dma_buf->size,          /* maxsegsize */
5037                         0,                      /* flags */
5038                         NULL, NULL,             /* lockfunc, lockarg */
5039                         &dma_buf->dma_tag);
5040 
5041         if (ret) {
5042                 QL_DPRINT1(ha, "could not create dma tag\n");
5043                 goto qlnx_alloc_dmabuf_exit;
5044         }
5045         ret = bus_dmamem_alloc(dma_buf->dma_tag,
5046                         (void **)&dma_buf->dma_b,
5047                         (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
5048                         &dma_buf->dma_map);
5049         if (ret) {
5050                 bus_dma_tag_destroy(dma_buf->dma_tag);
5051                 QL_DPRINT1(ha, "bus_dmamem_alloc failed\n");
5052                 goto qlnx_alloc_dmabuf_exit;
5053         }
5054 
5055         ret = bus_dmamap_load(dma_buf->dma_tag,
5056                         dma_buf->dma_map,
5057                         dma_buf->dma_b,
5058                         dma_buf->size,
5059                         qlnx_dmamap_callback,
5060                         &b_addr, BUS_DMA_NOWAIT);
5061 
5062         if (ret || !b_addr) {
5063                 bus_dma_tag_destroy(dma_buf->dma_tag);
5064                 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
5065                         dma_buf->dma_map);
5066                 ret = -1;
5067                 goto qlnx_alloc_dmabuf_exit;
5068         }
5069 
5070         dma_buf->dma_addr = b_addr;
5071 
5072 qlnx_alloc_dmabuf_exit:
5073 
5074         return ret;
5075 }
5076 
5077 static void
5078 qlnx_free_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
5079 {
5080 	bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
5081         bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
5082         bus_dma_tag_destroy(dma_buf->dma_tag);
5083 	return;
5084 }
5085 
5086 void *
5087 qlnx_dma_alloc_coherent(void *ecore_dev, bus_addr_t *phys, uint32_t size)
5088 {
5089 	qlnx_dma_t	dma_buf;
5090 	qlnx_dma_t	*dma_p;
5091 	qlnx_host_t	*ha __unused;
5092 
5093 	ha = (qlnx_host_t *)ecore_dev;
5094 
5095 	size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
5096 
5097 	memset(&dma_buf, 0, sizeof (qlnx_dma_t));
5098 
5099 	dma_buf.size = size + PAGE_SIZE;
5100 	dma_buf.alignment = 8;
5101 
5102 	if (qlnx_alloc_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf) != 0)
5103 		return (NULL);
5104 	bzero((uint8_t *)dma_buf.dma_b, dma_buf.size);
5105 
5106 	*phys = dma_buf.dma_addr;
5107 
5108 	dma_p = (qlnx_dma_t *)((uint8_t *)dma_buf.dma_b + size);
5109 
5110 	memcpy(dma_p, &dma_buf, sizeof(qlnx_dma_t));
5111 
5112 	QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
5113 		(void *)dma_buf.dma_map, (void *)dma_buf.dma_tag,
5114 		dma_buf.dma_b, (void *)dma_buf.dma_addr, size);
5115 
5116 	return (dma_buf.dma_b);
5117 }
5118 
5119 void
5120 qlnx_dma_free_coherent(void *ecore_dev, void *v_addr, bus_addr_t phys,
5121 	uint32_t size)
5122 {
5123 	qlnx_dma_t dma_buf, *dma_p;
5124 	qlnx_host_t	*ha;
5125 
5126 	ha = (qlnx_host_t *)ecore_dev;
5127 
5128 	if (v_addr == NULL)
5129 		return;
5130 
5131 	size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
5132 
5133 	dma_p = (qlnx_dma_t *)((uint8_t *)v_addr + size);
5134 
5135 	QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
5136 		(void *)dma_p->dma_map, (void *)dma_p->dma_tag,
5137 		dma_p->dma_b, (void *)dma_p->dma_addr, size);
5138 
5139 	dma_buf = *dma_p;
5140 
5141 	if (!ha->qlnxr_debug)
5142 	qlnx_free_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf);
5143 	return;
5144 }
5145 
5146 static int
5147 qlnx_alloc_parent_dma_tag(qlnx_host_t *ha)
5148 {
5149         int             ret;
5150         device_t        dev;
5151 
5152         dev = ha->pci_dev;
5153 
5154         /*
5155          * Allocate parent DMA Tag
5156          */
5157         ret = bus_dma_tag_create(
5158                         bus_get_dma_tag(dev),   /* parent */
5159                         1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
5160                         BUS_SPACE_MAXADDR,      /* lowaddr */
5161                         BUS_SPACE_MAXADDR,      /* highaddr */
5162                         NULL, NULL,             /* filter, filterarg */
5163                         BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
5164                         0,                      /* nsegments */
5165                         BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
5166                         0,                      /* flags */
5167                         NULL, NULL,             /* lockfunc, lockarg */
5168                         &ha->parent_tag);
5169 
5170         if (ret) {
5171                 QL_DPRINT1(ha, "could not create parent dma tag\n");
5172                 return (-1);
5173         }
5174 
5175         ha->flags.parent_tag = 1;
5176 
5177         return (0);
5178 }
5179 
5180 static void
5181 qlnx_free_parent_dma_tag(qlnx_host_t *ha)
5182 {
5183         if (ha->parent_tag != NULL) {
5184                 bus_dma_tag_destroy(ha->parent_tag);
5185 		ha->parent_tag = NULL;
5186         }
5187 	return;
5188 }
5189 
5190 static int
5191 qlnx_alloc_tx_dma_tag(qlnx_host_t *ha)
5192 {
5193         if (bus_dma_tag_create(NULL,    /* parent */
5194                 1, 0,    /* alignment, bounds */
5195                 BUS_SPACE_MAXADDR,       /* lowaddr */
5196                 BUS_SPACE_MAXADDR,       /* highaddr */
5197                 NULL, NULL,      /* filter, filterarg */
5198                 QLNX_MAX_TSO_FRAME_SIZE,     /* maxsize */
5199                 QLNX_MAX_SEGMENTS,        /* nsegments */
5200                 QLNX_MAX_TX_MBUF_SIZE,	  /* maxsegsize */
5201                 0,        /* flags */
5202                 NULL,    /* lockfunc */
5203                 NULL,    /* lockfuncarg */
5204                 &ha->tx_tag)) {
5205                 QL_DPRINT1(ha, "tx_tag alloc failed\n");
5206                 return (-1);
5207         }
5208 
5209 	return (0);
5210 }
5211 
5212 static void
5213 qlnx_free_tx_dma_tag(qlnx_host_t *ha)
5214 {
5215         if (ha->tx_tag != NULL) {
5216                 bus_dma_tag_destroy(ha->tx_tag);
5217 		ha->tx_tag = NULL;
5218         }
5219 	return;
5220 }
5221 
5222 static int
5223 qlnx_alloc_rx_dma_tag(qlnx_host_t *ha)
5224 {
5225         if (bus_dma_tag_create(NULL,    /* parent */
5226                         1, 0,    /* alignment, bounds */
5227                         BUS_SPACE_MAXADDR,       /* lowaddr */
5228                         BUS_SPACE_MAXADDR,       /* highaddr */
5229                         NULL, NULL,      /* filter, filterarg */
5230                         MJUM9BYTES,     /* maxsize */
5231                         1,        /* nsegments */
5232                         MJUM9BYTES,        /* maxsegsize */
5233                         0,        /* flags */
5234                         NULL,    /* lockfunc */
5235                         NULL,    /* lockfuncarg */
5236                         &ha->rx_tag)) {
5237                 QL_DPRINT1(ha, " rx_tag alloc failed\n");
5238 
5239                 return (-1);
5240         }
5241 	return (0);
5242 }
5243 
5244 static void
5245 qlnx_free_rx_dma_tag(qlnx_host_t *ha)
5246 {
5247         if (ha->rx_tag != NULL) {
5248                 bus_dma_tag_destroy(ha->rx_tag);
5249 		ha->rx_tag = NULL;
5250         }
5251 	return;
5252 }
5253 
5254 /*********************************
5255  * Exported functions
5256  *********************************/
5257 uint32_t
5258 qlnx_pci_bus_get_bar_size(void *ecore_dev, uint8_t bar_id)
5259 {
5260 	uint32_t bar_size;
5261 
5262 	bar_id = bar_id * 2;
5263 
5264 	bar_size = bus_get_resource_count(((qlnx_host_t *)ecore_dev)->pci_dev,
5265 				SYS_RES_MEMORY,
5266 				PCIR_BAR(bar_id));
5267 
5268 	return (bar_size);
5269 }
5270 
5271 uint32_t
5272 qlnx_pci_read_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t *reg_value)
5273 {
5274 	*reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5275 				pci_reg, 1);
5276 	return 0;
5277 }
5278 
5279 uint32_t
5280 qlnx_pci_read_config_word(void *ecore_dev, uint32_t pci_reg,
5281 	uint16_t *reg_value)
5282 {
5283 	*reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5284 				pci_reg, 2);
5285 	return 0;
5286 }
5287 
5288 uint32_t
5289 qlnx_pci_read_config_dword(void *ecore_dev, uint32_t pci_reg,
5290 	uint32_t *reg_value)
5291 {
5292 	*reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5293 				pci_reg, 4);
5294 	return 0;
5295 }
5296 
5297 void
5298 qlnx_pci_write_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t reg_value)
5299 {
5300 	pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5301 		pci_reg, reg_value, 1);
5302 	return;
5303 }
5304 
5305 void
5306 qlnx_pci_write_config_word(void *ecore_dev, uint32_t pci_reg,
5307 	uint16_t reg_value)
5308 {
5309 	pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5310 		pci_reg, reg_value, 2);
5311 	return;
5312 }
5313 
5314 void
5315 qlnx_pci_write_config_dword(void *ecore_dev, uint32_t pci_reg,
5316 	uint32_t reg_value)
5317 {
5318 	pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5319 		pci_reg, reg_value, 4);
5320 	return;
5321 }
5322 
5323 int
5324 qlnx_pci_find_capability(void *ecore_dev, int cap)
5325 {
5326 	int		reg;
5327 	qlnx_host_t	*ha;
5328 
5329 	ha = ecore_dev;
5330 
5331 	if (pci_find_cap(ha->pci_dev, PCIY_EXPRESS, &reg) == 0)
5332 		return reg;
5333 	else {
5334 		QL_DPRINT1(ha, "failed\n");
5335 		return 0;
5336 	}
5337 }
5338 
5339 int
5340 qlnx_pci_find_ext_capability(void *ecore_dev, int ext_cap)
5341 {
5342 	int		reg;
5343 	qlnx_host_t	*ha;
5344 
5345 	ha = ecore_dev;
5346 
5347 	if (pci_find_extcap(ha->pci_dev, ext_cap, &reg) == 0)
5348 		return reg;
5349 	else {
5350 		QL_DPRINT1(ha, "failed\n");
5351 		return 0;
5352 	}
5353 }
5354 
5355 uint32_t
5356 qlnx_reg_rd32(void *hwfn, uint32_t reg_addr)
5357 {
5358 	uint32_t		data32;
5359 	struct ecore_hwfn	*p_hwfn;
5360 
5361 	p_hwfn = hwfn;
5362 
5363 	data32 = bus_read_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
5364 			(bus_size_t)(p_hwfn->reg_offset + reg_addr));
5365 
5366 	return (data32);
5367 }
5368 
5369 void
5370 qlnx_reg_wr32(void *hwfn, uint32_t reg_addr, uint32_t value)
5371 {
5372 	struct ecore_hwfn	*p_hwfn = hwfn;
5373 
5374 	bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
5375 		(bus_size_t)(p_hwfn->reg_offset + reg_addr), value);
5376 
5377 	return;
5378 }
5379 
5380 void
5381 qlnx_reg_wr16(void *hwfn, uint32_t reg_addr, uint16_t value)
5382 {
5383 	struct ecore_hwfn	*p_hwfn = hwfn;
5384 
5385 	bus_write_2(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
5386 		(bus_size_t)(p_hwfn->reg_offset + reg_addr), value);
5387 	return;
5388 }
5389 
5390 void
5391 qlnx_dbell_wr32_db(void *hwfn, void *reg_addr, uint32_t value)
5392 {
5393 	struct ecore_dev	*cdev;
5394 	struct ecore_hwfn	*p_hwfn;
5395 	uint32_t	offset;
5396 
5397 	p_hwfn = hwfn;
5398 
5399 	cdev = p_hwfn->p_dev;
5400 
5401 	offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(p_hwfn->doorbells));
5402 	bus_write_4(((qlnx_host_t *)cdev)->pci_dbells, offset, value);
5403 
5404 	return;
5405 }
5406 
5407 void
5408 qlnx_dbell_wr32(void *hwfn, uint32_t reg_addr, uint32_t value)
5409 {
5410 	struct ecore_hwfn	*p_hwfn = hwfn;
5411 
5412 	bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_dbells, \
5413 		(bus_size_t)(p_hwfn->db_offset + reg_addr), value);
5414 
5415 	return;
5416 }
5417 
5418 uint32_t
5419 qlnx_direct_reg_rd32(void *p_hwfn, uint32_t *reg_addr)
5420 {
5421 	uint32_t		data32;
5422 	bus_size_t		offset;
5423 	struct ecore_dev	*cdev;
5424 
5425 	cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5426 	offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5427 
5428 	data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, offset);
5429 
5430 	return (data32);
5431 }
5432 
5433 void
5434 qlnx_direct_reg_wr32(void *p_hwfn, void *reg_addr, uint32_t value)
5435 {
5436 	bus_size_t		offset;
5437 	struct ecore_dev	*cdev;
5438 
5439 	cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5440 	offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5441 
5442 	bus_write_4(((qlnx_host_t *)cdev)->pci_reg, offset, value);
5443 
5444 	return;
5445 }
5446 
5447 void
5448 qlnx_direct_reg_wr64(void *p_hwfn, void *reg_addr, uint64_t value)
5449 {
5450 	bus_size_t		offset;
5451 	struct ecore_dev	*cdev;
5452 
5453 	cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5454 	offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5455 
5456 	bus_write_8(((qlnx_host_t *)cdev)->pci_reg, offset, value);
5457 	return;
5458 }
5459 
5460 void *
5461 qlnx_zalloc(uint32_t size)
5462 {
5463 	caddr_t	va;
5464 
5465 	va = malloc((unsigned long)size, M_QLNXBUF, M_NOWAIT);
5466 	bzero(va, size);
5467 	return ((void *)va);
5468 }
5469 
5470 void
5471 qlnx_barrier(void *p_hwfn)
5472 {
5473 	qlnx_host_t	*ha;
5474 
5475 	ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
5476 	bus_barrier(ha->pci_reg,  0, 0, BUS_SPACE_BARRIER_WRITE);
5477 }
5478 
5479 void
5480 qlnx_link_update(void *p_hwfn)
5481 {
5482 	qlnx_host_t	*ha;
5483 	int		prev_link_state;
5484 
5485 	ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
5486 
5487 	qlnx_fill_link(ha, p_hwfn, &ha->if_link);
5488 
5489 	prev_link_state = ha->link_up;
5490 	ha->link_up = ha->if_link.link_up;
5491 
5492         if (prev_link_state !=  ha->link_up) {
5493                 if (ha->link_up) {
5494                         if_link_state_change(ha->ifp, LINK_STATE_UP);
5495                 } else {
5496                         if_link_state_change(ha->ifp, LINK_STATE_DOWN);
5497                 }
5498         }
5499 #ifndef QLNX_VF
5500 #ifdef CONFIG_ECORE_SRIOV
5501 
5502 	if (qlnx_vf_device(ha) != 0) {
5503 		if (ha->sriov_initialized)
5504 			qlnx_inform_vf_link_state(p_hwfn, ha);
5505 	}
5506 
5507 #endif /* #ifdef CONFIG_ECORE_SRIOV */
5508 #endif /* #ifdef QLNX_VF */
5509 
5510         return;
5511 }
5512 
5513 static void
5514 __qlnx_osal_vf_fill_acquire_resc_req(struct ecore_hwfn *p_hwfn,
5515 	struct ecore_vf_acquire_sw_info *p_sw_info)
5516 {
5517 	p_sw_info->driver_version = (QLNX_VERSION_MAJOR << 24) |
5518 					(QLNX_VERSION_MINOR << 16) |
5519 					 QLNX_VERSION_BUILD;
5520 	p_sw_info->os_type = VFPF_ACQUIRE_OS_FREEBSD;
5521 
5522 	return;
5523 }
5524 
5525 void
5526 qlnx_osal_vf_fill_acquire_resc_req(void *p_hwfn, void *p_resc_req,
5527 	void *p_sw_info)
5528 {
5529 	__qlnx_osal_vf_fill_acquire_resc_req(p_hwfn, p_sw_info);
5530 
5531 	return;
5532 }
5533 
5534 void
5535 qlnx_fill_link(qlnx_host_t *ha, struct ecore_hwfn *hwfn,
5536 	struct qlnx_link_output *if_link)
5537 {
5538 	struct ecore_mcp_link_params    link_params;
5539 	struct ecore_mcp_link_state     link_state;
5540 	uint8_t				p_change;
5541 	struct ecore_ptt *p_ptt = NULL;
5542 
5543 	memset(if_link, 0, sizeof(*if_link));
5544 	memset(&link_params, 0, sizeof(struct ecore_mcp_link_params));
5545 	memset(&link_state, 0, sizeof(struct ecore_mcp_link_state));
5546 
5547 	ha = (qlnx_host_t *)hwfn->p_dev;
5548 
5549 	/* Prepare source inputs */
5550 	/* we only deal with physical functions */
5551 	if (qlnx_vf_device(ha) != 0) {
5552         	p_ptt = ecore_ptt_acquire(hwfn);
5553 
5554 	        if (p_ptt == NULL) {
5555 			QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
5556 			return;
5557 		}
5558 
5559 		ecore_mcp_get_media_type(hwfn, p_ptt, &if_link->media_type);
5560 		ecore_ptt_release(hwfn, p_ptt);
5561 
5562 		memcpy(&link_params, ecore_mcp_get_link_params(hwfn),
5563 			sizeof(link_params));
5564 		memcpy(&link_state, ecore_mcp_get_link_state(hwfn),
5565 			sizeof(link_state));
5566 	} else {
5567 		ecore_mcp_get_media_type(hwfn, NULL, &if_link->media_type);
5568 		ecore_vf_read_bulletin(hwfn, &p_change);
5569 		ecore_vf_get_link_params(hwfn, &link_params);
5570 		ecore_vf_get_link_state(hwfn, &link_state);
5571 	}
5572 
5573 	/* Set the link parameters to pass to protocol driver */
5574 	if (link_state.link_up) {
5575 		if_link->link_up = true;
5576 		if_link->speed = link_state.speed;
5577 	}
5578 
5579 	if_link->supported_caps = QLNX_LINK_CAP_FIBRE;
5580 
5581 	if (link_params.speed.autoneg)
5582 		if_link->supported_caps |= QLNX_LINK_CAP_Autoneg;
5583 
5584 	if (link_params.pause.autoneg ||
5585 		(link_params.pause.forced_rx && link_params.pause.forced_tx))
5586 		if_link->supported_caps |= QLNX_LINK_CAP_Asym_Pause;
5587 
5588 	if (link_params.pause.autoneg || link_params.pause.forced_rx ||
5589 		link_params.pause.forced_tx)
5590 		if_link->supported_caps |= QLNX_LINK_CAP_Pause;
5591 
5592 	if (link_params.speed.advertised_speeds &
5593 		NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
5594 		if_link->supported_caps |= QLNX_LINK_CAP_1000baseT_Half |
5595                                            QLNX_LINK_CAP_1000baseT_Full;
5596 
5597 	if (link_params.speed.advertised_speeds &
5598 		NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
5599 		if_link->supported_caps |= QLNX_LINK_CAP_10000baseKR_Full;
5600 
5601 	if (link_params.speed.advertised_speeds &
5602 		NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
5603 		if_link->supported_caps |= QLNX_LINK_CAP_25000baseKR_Full;
5604 
5605 	if (link_params.speed.advertised_speeds &
5606 		NVM_CFG1_PORT_DRV_LINK_SPEED_40G)
5607 		if_link->supported_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
5608 
5609 	if (link_params.speed.advertised_speeds &
5610 		NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
5611 		if_link->supported_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
5612 
5613 	if (link_params.speed.advertised_speeds &
5614 		NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
5615 		if_link->supported_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
5616 
5617 	if_link->advertised_caps = if_link->supported_caps;
5618 
5619 	if_link->autoneg = link_params.speed.autoneg;
5620 	if_link->duplex = QLNX_LINK_DUPLEX;
5621 
5622 	/* Link partner capabilities */
5623 
5624 	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_HD)
5625 		if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Half;
5626 
5627 	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_FD)
5628 		if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Full;
5629 
5630 	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_10G)
5631 		if_link->link_partner_caps |= QLNX_LINK_CAP_10000baseKR_Full;
5632 
5633 	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_25G)
5634 		if_link->link_partner_caps |= QLNX_LINK_CAP_25000baseKR_Full;
5635 
5636 	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_40G)
5637 		if_link->link_partner_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
5638 
5639 	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_50G)
5640 		if_link->link_partner_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
5641 
5642 	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_100G)
5643 		if_link->link_partner_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
5644 
5645 	if (link_state.an_complete)
5646 		if_link->link_partner_caps |= QLNX_LINK_CAP_Autoneg;
5647 
5648 	if (link_state.partner_adv_pause)
5649 		if_link->link_partner_caps |= QLNX_LINK_CAP_Pause;
5650 
5651 	if ((link_state.partner_adv_pause ==
5652 		ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE) ||
5653 		(link_state.partner_adv_pause ==
5654 			ECORE_LINK_PARTNER_BOTH_PAUSE))
5655 		if_link->link_partner_caps |= QLNX_LINK_CAP_Asym_Pause;
5656 
5657 	return;
5658 }
5659 
5660 void
5661 qlnx_schedule_recovery(void *p_hwfn)
5662 {
5663 	qlnx_host_t	*ha;
5664 
5665 	ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
5666 
5667 	if (qlnx_vf_device(ha) != 0) {
5668 		taskqueue_enqueue(ha->err_taskqueue, &ha->err_task);
5669 	}
5670 
5671 	return;
5672 }
5673 
5674 static int
5675 qlnx_nic_setup(struct ecore_dev *cdev, struct ecore_pf_params *func_params)
5676 {
5677         int	rc, i;
5678 
5679         for (i = 0; i < cdev->num_hwfns; i++) {
5680                 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
5681                 p_hwfn->pf_params = *func_params;
5682 
5683 #ifdef QLNX_ENABLE_IWARP
5684 		if (qlnx_vf_device((qlnx_host_t *)cdev) != 0) {
5685 			p_hwfn->using_ll2 = true;
5686 		}
5687 #endif /* #ifdef QLNX_ENABLE_IWARP */
5688         }
5689 
5690         rc = ecore_resc_alloc(cdev);
5691         if (rc)
5692                 goto qlnx_nic_setup_exit;
5693 
5694         ecore_resc_setup(cdev);
5695 
5696 qlnx_nic_setup_exit:
5697 
5698         return rc;
5699 }
5700 
5701 static int
5702 qlnx_nic_start(struct ecore_dev *cdev)
5703 {
5704         int				rc;
5705 	struct ecore_hw_init_params	params;
5706 
5707 	bzero(&params, sizeof (struct ecore_hw_init_params));
5708 
5709 	params.p_tunn = NULL;
5710 	params.b_hw_start = true;
5711 	params.int_mode = cdev->int_mode;
5712 	params.allow_npar_tx_switch = true;
5713 	params.bin_fw_data = NULL;
5714 
5715         rc = ecore_hw_init(cdev, &params);
5716         if (rc) {
5717                 ecore_resc_free(cdev);
5718                 return rc;
5719         }
5720 
5721         return 0;
5722 }
5723 
5724 static int
5725 qlnx_slowpath_start(qlnx_host_t *ha)
5726 {
5727 	struct ecore_dev	*cdev;
5728 	struct ecore_pf_params	pf_params;
5729 	int			rc;
5730 
5731 	memset(&pf_params, 0, sizeof(struct ecore_pf_params));
5732 	pf_params.eth_pf_params.num_cons  =
5733 		(ha->num_rss) * (ha->num_tc + 1);
5734 
5735 #ifdef QLNX_ENABLE_IWARP
5736 	if (qlnx_vf_device(ha) != 0) {
5737 		if(ha->personality == ECORE_PCI_ETH_IWARP) {
5738 			device_printf(ha->pci_dev, "setting parameters required by iWARP dev\n");
5739 			pf_params.rdma_pf_params.num_qps = 1024;
5740 			pf_params.rdma_pf_params.num_srqs = 1024;
5741 			pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX;
5742 			pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_IWARP;
5743 		} else if(ha->personality == ECORE_PCI_ETH_ROCE) {
5744 			device_printf(ha->pci_dev, "setting parameters required by RoCE dev\n");
5745 			pf_params.rdma_pf_params.num_qps = 8192;
5746 			pf_params.rdma_pf_params.num_srqs = 8192;
5747 			//pf_params.rdma_pf_params.min_dpis = 0;
5748 			pf_params.rdma_pf_params.min_dpis = 8;
5749 			pf_params.rdma_pf_params.roce_edpm_mode = 0;
5750 			pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX;
5751 			pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_ROCE;
5752 		}
5753 	}
5754 #endif /* #ifdef QLNX_ENABLE_IWARP */
5755 
5756 	cdev = &ha->cdev;
5757 
5758 	rc = qlnx_nic_setup(cdev, &pf_params);
5759         if (rc)
5760                 goto qlnx_slowpath_start_exit;
5761 
5762         cdev->int_mode = ECORE_INT_MODE_MSIX;
5763         cdev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE;
5764 
5765 #ifdef QLNX_MAX_COALESCE
5766 	cdev->rx_coalesce_usecs = 255;
5767 	cdev->tx_coalesce_usecs = 255;
5768 #endif
5769 
5770 	rc = qlnx_nic_start(cdev);
5771 
5772 	ha->rx_coalesce_usecs = cdev->rx_coalesce_usecs;
5773 	ha->tx_coalesce_usecs = cdev->tx_coalesce_usecs;
5774 
5775 #ifdef QLNX_USER_LLDP
5776 	(void)qlnx_set_lldp_tlvx(ha, NULL);
5777 #endif /* #ifdef QLNX_USER_LLDP */
5778 
5779 qlnx_slowpath_start_exit:
5780 
5781 	return (rc);
5782 }
5783 
5784 static int
5785 qlnx_slowpath_stop(qlnx_host_t *ha)
5786 {
5787 	struct ecore_dev	*cdev;
5788 	device_t		dev = ha->pci_dev;
5789 	int			i;
5790 
5791 	cdev = &ha->cdev;
5792 
5793 	ecore_hw_stop(cdev);
5794 
5795  	for (i = 0; i < ha->cdev.num_hwfns; i++) {
5796         	if (ha->sp_handle[i])
5797                 	(void)bus_teardown_intr(dev, ha->sp_irq[i],
5798 				ha->sp_handle[i]);
5799 
5800 		ha->sp_handle[i] = NULL;
5801 
5802         	if (ha->sp_irq[i])
5803 			(void) bus_release_resource(dev, SYS_RES_IRQ,
5804 				ha->sp_irq_rid[i], ha->sp_irq[i]);
5805 		ha->sp_irq[i] = NULL;
5806 	}
5807 
5808         ecore_resc_free(cdev);
5809 
5810         return 0;
5811 }
5812 
5813 static void
5814 qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
5815 	char ver_str[VER_SIZE])
5816 {
5817         int	i;
5818 
5819         memcpy(cdev->name, name, NAME_SIZE);
5820 
5821         for_each_hwfn(cdev, i) {
5822                 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
5823         }
5824 
5825         cdev->drv_type = DRV_ID_DRV_TYPE_FREEBSD;
5826 
5827 	return ;
5828 }
5829 
5830 void
5831 qlnx_get_protocol_stats(void *cdev, int proto_type, void *proto_stats)
5832 {
5833 	enum ecore_mcp_protocol_type	type;
5834 	union ecore_mcp_protocol_stats	*stats;
5835 	struct ecore_eth_stats		eth_stats;
5836 	qlnx_host_t			*ha;
5837 
5838 	ha = cdev;
5839 	stats = proto_stats;
5840 	type = proto_type;
5841 
5842         switch (type) {
5843         case ECORE_MCP_LAN_STATS:
5844                 ecore_get_vport_stats((struct ecore_dev *)cdev, &eth_stats);
5845                 stats->lan_stats.ucast_rx_pkts = eth_stats.common.rx_ucast_pkts;
5846                 stats->lan_stats.ucast_tx_pkts = eth_stats.common.tx_ucast_pkts;
5847                 stats->lan_stats.fcs_err = -1;
5848                 break;
5849 
5850 	default:
5851 		ha->err_get_proto_invalid_type++;
5852 
5853 		QL_DPRINT1(ha, "invalid protocol type 0x%x\n", type);
5854 		break;
5855 	}
5856 	return;
5857 }
5858 
5859 static int
5860 qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver)
5861 {
5862 	struct ecore_hwfn	*p_hwfn;
5863 	struct ecore_ptt	*p_ptt;
5864 
5865 	p_hwfn = &ha->cdev.hwfns[0];
5866 	p_ptt = ecore_ptt_acquire(p_hwfn);
5867 
5868 	if (p_ptt ==  NULL) {
5869                 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
5870                 return (-1);
5871 	}
5872 	ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, mfw_ver, NULL);
5873 
5874 	ecore_ptt_release(p_hwfn, p_ptt);
5875 
5876 	return (0);
5877 }
5878 
5879 static int
5880 qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size)
5881 {
5882 	struct ecore_hwfn	*p_hwfn;
5883 	struct ecore_ptt	*p_ptt;
5884 
5885 	p_hwfn = &ha->cdev.hwfns[0];
5886 	p_ptt = ecore_ptt_acquire(p_hwfn);
5887 
5888 	if (p_ptt ==  NULL) {
5889                 QL_DPRINT1(ha,"ecore_ptt_acquire failed\n");
5890                 return (-1);
5891 	}
5892 	ecore_mcp_get_flash_size(p_hwfn, p_ptt, flash_size);
5893 
5894 	ecore_ptt_release(p_hwfn, p_ptt);
5895 
5896 	return (0);
5897 }
5898 
5899 static int
5900 qlnx_alloc_mem_arrays(qlnx_host_t *ha)
5901 {
5902 	bzero(&ha->txq_array[0], (sizeof(struct qlnx_tx_queue) * QLNX_MAX_RSS));
5903 	bzero(&ha->rxq_array[0], (sizeof(struct qlnx_rx_queue) * QLNX_MAX_RSS));
5904 	bzero(&ha->sb_array[0], (sizeof(struct ecore_sb_info) * QLNX_MAX_RSS));
5905 
5906         return 0;
5907 }
5908 
5909 static void
5910 qlnx_init_fp(qlnx_host_t *ha)
5911 {
5912 	int rss_id, txq_array_index, tc;
5913 
5914 	for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
5915 		struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
5916 
5917 		fp->rss_id = rss_id;
5918 		fp->edev = ha;
5919 		fp->sb_info = &ha->sb_array[rss_id];
5920 		fp->rxq = &ha->rxq_array[rss_id];
5921 		fp->rxq->rxq_id = rss_id;
5922 
5923 		for (tc = 0; tc < ha->num_tc; tc++) {
5924                         txq_array_index = tc * ha->num_rss + rss_id;
5925                         fp->txq[tc] = &ha->txq_array[txq_array_index];
5926                         fp->txq[tc]->index = txq_array_index;
5927 		}
5928 
5929 		snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", qlnx_name_str,
5930 			rss_id);
5931 
5932 		fp->tx_ring_full = 0;
5933 
5934 		/* reset all the statistics counters */
5935 
5936 		fp->tx_pkts_processed = 0;
5937 		fp->tx_pkts_freed = 0;
5938 		fp->tx_pkts_transmitted = 0;
5939 		fp->tx_pkts_completed = 0;
5940 
5941 #ifdef QLNX_TRACE_PERF_DATA
5942 		fp->tx_pkts_trans_ctx = 0;
5943 		fp->tx_pkts_compl_ctx = 0;
5944 		fp->tx_pkts_trans_fp = 0;
5945 		fp->tx_pkts_compl_fp = 0;
5946 		fp->tx_pkts_compl_intr = 0;
5947 #endif
5948 		fp->tx_lso_wnd_min_len = 0;
5949 		fp->tx_defrag = 0;
5950 		fp->tx_nsegs_gt_elem_left = 0;
5951 		fp->tx_tso_max_nsegs = 0;
5952 		fp->tx_tso_min_nsegs = 0;
5953 		fp->err_tx_nsegs_gt_elem_left = 0;
5954 		fp->err_tx_dmamap_create = 0;
5955 		fp->err_tx_defrag_dmamap_load = 0;
5956 		fp->err_tx_non_tso_max_seg = 0;
5957 		fp->err_tx_dmamap_load = 0;
5958 		fp->err_tx_defrag = 0;
5959 		fp->err_tx_free_pkt_null = 0;
5960 		fp->err_tx_cons_idx_conflict = 0;
5961 
5962 		fp->rx_pkts = 0;
5963 		fp->err_m_getcl = 0;
5964 		fp->err_m_getjcl = 0;
5965         }
5966 	return;
5967 }
5968 
5969 void
5970 qlnx_free_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info)
5971 {
5972 	struct ecore_dev	*cdev;
5973 
5974 	cdev = &ha->cdev;
5975 
5976         if (sb_info->sb_virt) {
5977                 OSAL_DMA_FREE_COHERENT(cdev, ((void *)sb_info->sb_virt),
5978 			(sb_info->sb_phys), (sizeof(*sb_info->sb_virt)));
5979 		sb_info->sb_virt = NULL;
5980 	}
5981 }
5982 
5983 static int
5984 qlnx_sb_init(struct ecore_dev *cdev, struct ecore_sb_info *sb_info,
5985 	void *sb_virt_addr, bus_addr_t sb_phy_addr, u16 sb_id)
5986 {
5987         struct ecore_hwfn	*p_hwfn;
5988         int			hwfn_index, rc;
5989         u16			rel_sb_id;
5990 
5991         hwfn_index = sb_id % cdev->num_hwfns;
5992         p_hwfn = &cdev->hwfns[hwfn_index];
5993         rel_sb_id = sb_id / cdev->num_hwfns;
5994 
5995         QL_DPRINT2(((qlnx_host_t *)cdev),
5996                 "hwfn_index = %d p_hwfn = %p sb_id = 0x%x rel_sb_id = 0x%x \
5997                 sb_info = %p sb_virt_addr = %p sb_phy_addr = %p\n",
5998                 hwfn_index, p_hwfn, sb_id, rel_sb_id, sb_info,
5999                 sb_virt_addr, (void *)sb_phy_addr);
6000 
6001         rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
6002                              sb_virt_addr, sb_phy_addr, rel_sb_id);
6003 
6004         return rc;
6005 }
6006 
6007 /* This function allocates fast-path status block memory */
6008 int
6009 qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info, u16 sb_id)
6010 {
6011         struct status_block_e4	*sb_virt;
6012         bus_addr_t		sb_phys;
6013         int			rc;
6014 	uint32_t		size;
6015 	struct ecore_dev	*cdev;
6016 
6017 	cdev = &ha->cdev;
6018 
6019 	size = sizeof(*sb_virt);
6020 	sb_virt = OSAL_DMA_ALLOC_COHERENT(cdev, (&sb_phys), size);
6021 
6022         if (!sb_virt) {
6023                 QL_DPRINT1(ha, "Status block allocation failed\n");
6024                 return -ENOMEM;
6025         }
6026 
6027         rc = qlnx_sb_init(cdev, sb_info, sb_virt, sb_phys, sb_id);
6028         if (rc) {
6029                 OSAL_DMA_FREE_COHERENT(cdev, sb_virt, sb_phys, size);
6030         }
6031 
6032 	return rc;
6033 }
6034 
6035 static void
6036 qlnx_free_rx_buffers(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
6037 {
6038         int			i;
6039 	struct sw_rx_data	*rx_buf;
6040 
6041         for (i = 0; i < rxq->num_rx_buffers; i++) {
6042                 rx_buf = &rxq->sw_rx_ring[i];
6043 
6044 		if (rx_buf->data != NULL) {
6045 			if (rx_buf->map != NULL) {
6046 				bus_dmamap_unload(ha->rx_tag, rx_buf->map);
6047 				bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
6048 				rx_buf->map = NULL;
6049 			}
6050 			m_freem(rx_buf->data);
6051 			rx_buf->data = NULL;
6052 		}
6053         }
6054 	return;
6055 }
6056 
6057 static void
6058 qlnx_free_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
6059 {
6060 	struct ecore_dev	*cdev;
6061 	int			i;
6062 
6063 	cdev = &ha->cdev;
6064 
6065 	qlnx_free_rx_buffers(ha, rxq);
6066 
6067 	for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
6068 		qlnx_free_tpa_mbuf(ha, &rxq->tpa_info[i]);
6069 		if (rxq->tpa_info[i].mpf != NULL)
6070 			m_freem(rxq->tpa_info[i].mpf);
6071 	}
6072 
6073 	bzero((void *)&rxq->sw_rx_ring[0],
6074 		(sizeof (struct sw_rx_data) * RX_RING_SIZE));
6075 
6076         /* Free the real RQ ring used by FW */
6077 	if (rxq->rx_bd_ring.p_virt_addr) {
6078                 ecore_chain_free(cdev, &rxq->rx_bd_ring);
6079                 rxq->rx_bd_ring.p_virt_addr = NULL;
6080         }
6081 
6082         /* Free the real completion ring used by FW */
6083         if (rxq->rx_comp_ring.p_virt_addr &&
6084                         rxq->rx_comp_ring.pbl_sp.p_virt_table) {
6085                 ecore_chain_free(cdev, &rxq->rx_comp_ring);
6086                 rxq->rx_comp_ring.p_virt_addr = NULL;
6087                 rxq->rx_comp_ring.pbl_sp.p_virt_table = NULL;
6088         }
6089 
6090 #ifdef QLNX_SOFT_LRO
6091 	{
6092 		struct lro_ctrl *lro;
6093 
6094 		lro = &rxq->lro;
6095 		tcp_lro_free(lro);
6096 	}
6097 #endif /* #ifdef QLNX_SOFT_LRO */
6098 
6099 	return;
6100 }
6101 
6102 static int
6103 qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
6104 {
6105         register struct mbuf	*mp;
6106         uint16_t		rx_buf_size;
6107         struct sw_rx_data	*sw_rx_data;
6108         struct eth_rx_bd	*rx_bd;
6109         dma_addr_t		dma_addr;
6110 	bus_dmamap_t		map;
6111 	bus_dma_segment_t       segs[1];
6112 	int			nsegs;
6113 	int			ret;
6114 
6115         rx_buf_size = rxq->rx_buf_size;
6116 
6117 	mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
6118 
6119         if (mp == NULL) {
6120                 QL_DPRINT1(ha, "Failed to allocate Rx data\n");
6121                 return -ENOMEM;
6122         }
6123 
6124 	mp->m_len = mp->m_pkthdr.len = rx_buf_size;
6125 
6126 	map = (bus_dmamap_t)0;
6127 
6128 	ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
6129 			BUS_DMA_NOWAIT);
6130 	dma_addr = segs[0].ds_addr;
6131 
6132 	if (ret || !dma_addr || (nsegs != 1)) {
6133 		m_freem(mp);
6134 		QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
6135                            ret, (long long unsigned int)dma_addr, nsegs);
6136 		return -ENOMEM;
6137 	}
6138 
6139         sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
6140         sw_rx_data->data = mp;
6141         sw_rx_data->dma_addr = dma_addr;
6142         sw_rx_data->map = map;
6143 
6144         /* Advance PROD and get BD pointer */
6145         rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
6146         rx_bd->addr.hi = htole32(U64_HI(dma_addr));
6147         rx_bd->addr.lo = htole32(U64_LO(dma_addr));
6148 	bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
6149 
6150         rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
6151 
6152         return 0;
6153 }
6154 
6155 static int
6156 qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
6157 	struct qlnx_agg_info *tpa)
6158 {
6159 	struct mbuf		*mp;
6160         dma_addr_t		dma_addr;
6161 	bus_dmamap_t		map;
6162 	bus_dma_segment_t       segs[1];
6163 	int			nsegs;
6164 	int			ret;
6165         struct sw_rx_data	*rx_buf;
6166 
6167 	mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
6168 
6169         if (mp == NULL) {
6170                 QL_DPRINT1(ha, "Failed to allocate Rx data\n");
6171                 return -ENOMEM;
6172         }
6173 
6174 	mp->m_len = mp->m_pkthdr.len = rx_buf_size;
6175 
6176 	map = (bus_dmamap_t)0;
6177 
6178 	ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
6179 			BUS_DMA_NOWAIT);
6180 	dma_addr = segs[0].ds_addr;
6181 
6182 	if (ret || !dma_addr || (nsegs != 1)) {
6183 		m_freem(mp);
6184 		QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
6185 			ret, (long long unsigned int)dma_addr, nsegs);
6186 		return -ENOMEM;
6187 	}
6188 
6189         rx_buf = &tpa->rx_buf;
6190 
6191 	memset(rx_buf, 0, sizeof (struct sw_rx_data));
6192 
6193         rx_buf->data = mp;
6194         rx_buf->dma_addr = dma_addr;
6195         rx_buf->map = map;
6196 
6197 	bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
6198 
6199 	return (0);
6200 }
6201 
6202 static void
6203 qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa)
6204 {
6205         struct sw_rx_data	*rx_buf;
6206 
6207 	rx_buf = &tpa->rx_buf;
6208 
6209 	if (rx_buf->data != NULL) {
6210 		if (rx_buf->map != NULL) {
6211 			bus_dmamap_unload(ha->rx_tag, rx_buf->map);
6212 			bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
6213 			rx_buf->map = NULL;
6214 		}
6215 		m_freem(rx_buf->data);
6216 		rx_buf->data = NULL;
6217 	}
6218 	return;
6219 }
6220 
6221 /* This function allocates all memory needed per Rx queue */
6222 static int
6223 qlnx_alloc_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
6224 {
6225         int			i, rc, num_allocated;
6226 	struct ecore_dev	 *cdev;
6227 
6228 	cdev = &ha->cdev;
6229 
6230         rxq->num_rx_buffers = RX_RING_SIZE;
6231 
6232 	rxq->rx_buf_size = ha->rx_buf_size;
6233 
6234         /* Allocate the parallel driver ring for Rx buffers */
6235 	bzero((void *)&rxq->sw_rx_ring[0],
6236 		(sizeof (struct sw_rx_data) * RX_RING_SIZE));
6237 
6238         /* Allocate FW Rx ring  */
6239 
6240         rc = ecore_chain_alloc(cdev,
6241 			ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
6242 			ECORE_CHAIN_MODE_NEXT_PTR,
6243 			ECORE_CHAIN_CNT_TYPE_U16,
6244 			RX_RING_SIZE,
6245 			sizeof(struct eth_rx_bd),
6246 			&rxq->rx_bd_ring, NULL);
6247 
6248         if (rc)
6249                 goto err;
6250 
6251         /* Allocate FW completion ring */
6252         rc = ecore_chain_alloc(cdev,
6253                         ECORE_CHAIN_USE_TO_CONSUME,
6254                         ECORE_CHAIN_MODE_PBL,
6255 			ECORE_CHAIN_CNT_TYPE_U16,
6256                         RX_RING_SIZE,
6257                         sizeof(union eth_rx_cqe),
6258                         &rxq->rx_comp_ring, NULL);
6259 
6260         if (rc)
6261                 goto err;
6262 
6263         /* Allocate buffers for the Rx ring */
6264 
6265 	for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
6266 		rc = qlnx_alloc_tpa_mbuf(ha, rxq->rx_buf_size,
6267 			&rxq->tpa_info[i]);
6268                 if (rc)
6269                         break;
6270 	}
6271 
6272         for (i = 0; i < rxq->num_rx_buffers; i++) {
6273                 rc = qlnx_alloc_rx_buffer(ha, rxq);
6274                 if (rc)
6275                         break;
6276         }
6277         num_allocated = i;
6278         if (!num_allocated) {
6279 		QL_DPRINT1(ha, "Rx buffers allocation failed\n");
6280                 goto err;
6281         } else if (num_allocated < rxq->num_rx_buffers) {
6282 		QL_DPRINT1(ha, "Allocated less buffers than"
6283 			" desired (%d allocated)\n", num_allocated);
6284         }
6285 
6286 #ifdef QLNX_SOFT_LRO
6287 
6288 	{
6289 		struct lro_ctrl *lro;
6290 
6291 		lro = &rxq->lro;
6292 
6293 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
6294 		if (tcp_lro_init_args(lro, ifp, 0, rxq->num_rx_buffers)) {
6295 			QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n",
6296 				   rxq->rxq_id);
6297 			goto err;
6298 		}
6299 #else
6300 		if (tcp_lro_init(lro)) {
6301 			QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n",
6302 				   rxq->rxq_id);
6303 			goto err;
6304 		}
6305 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
6306 
6307 		lro->ifp = ha->ifp;
6308 	}
6309 #endif /* #ifdef QLNX_SOFT_LRO */
6310         return 0;
6311 
6312 err:
6313         qlnx_free_mem_rxq(ha, rxq);
6314         return -ENOMEM;
6315 }
6316 
6317 static void
6318 qlnx_free_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6319 	struct qlnx_tx_queue *txq)
6320 {
6321 	struct ecore_dev	*cdev;
6322 
6323 	cdev = &ha->cdev;
6324 
6325 	bzero((void *)&txq->sw_tx_ring[0],
6326 		(sizeof (struct sw_tx_bd) * TX_RING_SIZE));
6327 
6328         /* Free the real RQ ring used by FW */
6329         if (txq->tx_pbl.p_virt_addr) {
6330                 ecore_chain_free(cdev, &txq->tx_pbl);
6331                 txq->tx_pbl.p_virt_addr = NULL;
6332         }
6333 	return;
6334 }
6335 
6336 /* This function allocates all memory needed per Tx queue */
6337 static int
6338 qlnx_alloc_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6339 	struct qlnx_tx_queue *txq)
6340 {
6341         int			ret = ECORE_SUCCESS;
6342         union eth_tx_bd_types	*p_virt;
6343 	struct ecore_dev	*cdev;
6344 
6345 	cdev = &ha->cdev;
6346 
6347 	bzero((void *)&txq->sw_tx_ring[0],
6348 		(sizeof (struct sw_tx_bd) * TX_RING_SIZE));
6349 
6350         /* Allocate the real Tx ring to be used by FW */
6351         ret = ecore_chain_alloc(cdev,
6352                         ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
6353                         ECORE_CHAIN_MODE_PBL,
6354 			ECORE_CHAIN_CNT_TYPE_U16,
6355                         TX_RING_SIZE,
6356                         sizeof(*p_virt),
6357                         &txq->tx_pbl, NULL);
6358 
6359         if (ret != ECORE_SUCCESS) {
6360                 goto err;
6361         }
6362 
6363 	txq->num_tx_buffers = TX_RING_SIZE;
6364 
6365         return 0;
6366 
6367 err:
6368         qlnx_free_mem_txq(ha, fp, txq);
6369         return -ENOMEM;
6370 }
6371 
6372 static void
6373 qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6374 {
6375 	struct mbuf	*mp;
6376 	struct ifnet	*ifp = ha->ifp;
6377 
6378 	if (mtx_initialized(&fp->tx_mtx)) {
6379 		if (fp->tx_br != NULL) {
6380 			mtx_lock(&fp->tx_mtx);
6381 
6382 			while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
6383 				fp->tx_pkts_freed++;
6384 				m_freem(mp);
6385 			}
6386 
6387 			mtx_unlock(&fp->tx_mtx);
6388 
6389 			buf_ring_free(fp->tx_br, M_DEVBUF);
6390 			fp->tx_br = NULL;
6391 		}
6392 		mtx_destroy(&fp->tx_mtx);
6393 	}
6394 	return;
6395 }
6396 
6397 static void
6398 qlnx_free_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6399 {
6400         int	tc;
6401 
6402         qlnx_free_mem_sb(ha, fp->sb_info);
6403 
6404         qlnx_free_mem_rxq(ha, fp->rxq);
6405 
6406         for (tc = 0; tc < ha->num_tc; tc++)
6407                 qlnx_free_mem_txq(ha, fp, fp->txq[tc]);
6408 
6409 	return;
6410 }
6411 
6412 static int
6413 qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6414 {
6415 	snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
6416 		"qlnx%d_fp%d_tx_mq_lock", ha->dev_unit, fp->rss_id);
6417 
6418 	mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
6419 
6420         fp->tx_br = buf_ring_alloc(TX_RING_SIZE, M_DEVBUF,
6421                                    M_NOWAIT, &fp->tx_mtx);
6422         if (fp->tx_br == NULL) {
6423 		QL_DPRINT1(ha, "buf_ring_alloc failed for fp[%d, %d]\n",
6424 			ha->dev_unit, fp->rss_id);
6425 		return -ENOMEM;
6426         }
6427 	return 0;
6428 }
6429 
6430 static int
6431 qlnx_alloc_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6432 {
6433         int	rc, tc;
6434 
6435         rc = qlnx_alloc_mem_sb(ha, fp->sb_info, fp->rss_id);
6436         if (rc)
6437                 goto err;
6438 
6439 	if (ha->rx_jumbo_buf_eq_mtu) {
6440 		if (ha->max_frame_size <= MCLBYTES)
6441 			ha->rx_buf_size = MCLBYTES;
6442 		else if (ha->max_frame_size <= MJUMPAGESIZE)
6443 			ha->rx_buf_size = MJUMPAGESIZE;
6444 		else if (ha->max_frame_size <= MJUM9BYTES)
6445 			ha->rx_buf_size = MJUM9BYTES;
6446 		else if (ha->max_frame_size <= MJUM16BYTES)
6447 			ha->rx_buf_size = MJUM16BYTES;
6448 	} else {
6449 		if (ha->max_frame_size <= MCLBYTES)
6450 			ha->rx_buf_size = MCLBYTES;
6451 		else
6452 			ha->rx_buf_size = MJUMPAGESIZE;
6453 	}
6454 
6455         rc = qlnx_alloc_mem_rxq(ha, fp->rxq);
6456         if (rc)
6457                 goto err;
6458 
6459         for (tc = 0; tc < ha->num_tc; tc++) {
6460                 rc = qlnx_alloc_mem_txq(ha, fp, fp->txq[tc]);
6461                 if (rc)
6462                         goto err;
6463         }
6464 
6465         return 0;
6466 
6467 err:
6468         qlnx_free_mem_fp(ha, fp);
6469         return -ENOMEM;
6470 }
6471 
6472 static void
6473 qlnx_free_mem_load(qlnx_host_t *ha)
6474 {
6475         int			i;
6476 
6477         for (i = 0; i < ha->num_rss; i++) {
6478                 struct qlnx_fastpath *fp = &ha->fp_array[i];
6479 
6480                 qlnx_free_mem_fp(ha, fp);
6481         }
6482 	return;
6483 }
6484 
6485 static int
6486 qlnx_alloc_mem_load(qlnx_host_t *ha)
6487 {
6488         int	rc = 0, rss_id;
6489 
6490         for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
6491                 struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
6492 
6493                 rc = qlnx_alloc_mem_fp(ha, fp);
6494                 if (rc)
6495                         break;
6496         }
6497 	return (rc);
6498 }
6499 
6500 static int
6501 qlnx_start_vport(struct ecore_dev *cdev,
6502                 u8 vport_id,
6503                 u16 mtu,
6504                 u8 drop_ttl0_flg,
6505                 u8 inner_vlan_removal_en_flg,
6506 		u8 tx_switching,
6507 		u8 hw_lro_enable)
6508 {
6509         int					rc, i;
6510 	struct ecore_sp_vport_start_params	vport_start_params = { 0 };
6511 	qlnx_host_t				*ha __unused;
6512 
6513 	ha = (qlnx_host_t *)cdev;
6514 
6515 	vport_start_params.remove_inner_vlan = inner_vlan_removal_en_flg;
6516 	vport_start_params.tx_switching = 0;
6517 	vport_start_params.handle_ptp_pkts = 0;
6518 	vport_start_params.only_untagged = 0;
6519 	vport_start_params.drop_ttl0 = drop_ttl0_flg;
6520 
6521 	vport_start_params.tpa_mode =
6522 		(hw_lro_enable ? ECORE_TPA_MODE_RSC : ECORE_TPA_MODE_NONE);
6523 	vport_start_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
6524 
6525 	vport_start_params.vport_id = vport_id;
6526 	vport_start_params.mtu = mtu;
6527 
6528 	QL_DPRINT2(ha, "Setting mtu to %d and VPORT ID = %d\n", mtu, vport_id);
6529 
6530         for_each_hwfn(cdev, i) {
6531                 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
6532 
6533 		vport_start_params.concrete_fid = p_hwfn->hw_info.concrete_fid;
6534 		vport_start_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
6535 
6536                 rc = ecore_sp_vport_start(p_hwfn, &vport_start_params);
6537 
6538                 if (rc) {
6539 			QL_DPRINT1(ha, "Failed to start VPORT V-PORT %d"
6540 				" with MTU %d\n" , vport_id, mtu);
6541                         return -ENOMEM;
6542                 }
6543 
6544                 ecore_hw_start_fastpath(p_hwfn);
6545 
6546 		QL_DPRINT2(ha, "Started V-PORT %d with MTU %d\n",
6547 			vport_id, mtu);
6548         }
6549         return 0;
6550 }
6551 
6552 static int
6553 qlnx_update_vport(struct ecore_dev *cdev,
6554 	struct qlnx_update_vport_params *params)
6555 {
6556         struct ecore_sp_vport_update_params	sp_params;
6557         int					rc, i, j, fp_index;
6558 	struct ecore_hwfn			*p_hwfn;
6559         struct ecore_rss_params			*rss;
6560 	qlnx_host_t				*ha = (qlnx_host_t *)cdev;
6561         struct qlnx_fastpath			*fp;
6562 
6563         memset(&sp_params, 0, sizeof(sp_params));
6564         /* Translate protocol params into sp params */
6565         sp_params.vport_id = params->vport_id;
6566 
6567         sp_params.update_vport_active_rx_flg =
6568 		params->update_vport_active_rx_flg;
6569         sp_params.vport_active_rx_flg = params->vport_active_rx_flg;
6570 
6571         sp_params.update_vport_active_tx_flg =
6572 		params->update_vport_active_tx_flg;
6573         sp_params.vport_active_tx_flg = params->vport_active_tx_flg;
6574 
6575         sp_params.update_inner_vlan_removal_flg =
6576                 params->update_inner_vlan_removal_flg;
6577         sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg;
6578 
6579 	sp_params.sge_tpa_params = params->sge_tpa_params;
6580 
6581         /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
6582          * We need to re-fix the rss values per engine for CMT.
6583          */
6584 	if (params->rss_params->update_rss_config)
6585         sp_params.rss_params = params->rss_params;
6586 	else
6587 		sp_params.rss_params =  NULL;
6588 
6589         for_each_hwfn(cdev, i) {
6590 		p_hwfn = &cdev->hwfns[i];
6591 
6592 		if ((cdev->num_hwfns > 1) &&
6593 			params->rss_params->update_rss_config &&
6594 			params->rss_params->rss_enable) {
6595 			rss = params->rss_params;
6596 
6597 			for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE; j++) {
6598 				fp_index = ((cdev->num_hwfns * j) + i) %
6599 						ha->num_rss;
6600 
6601                 		fp = &ha->fp_array[fp_index];
6602                         	rss->rss_ind_table[j] = fp->rxq->handle;
6603 			}
6604 
6605 			for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE;) {
6606 				QL_DPRINT3(ha, "%p %p %p %p %p %p %p %p \n",
6607 					rss->rss_ind_table[j],
6608 					rss->rss_ind_table[j+1],
6609 					rss->rss_ind_table[j+2],
6610 					rss->rss_ind_table[j+3],
6611 					rss->rss_ind_table[j+4],
6612 					rss->rss_ind_table[j+5],
6613 					rss->rss_ind_table[j+6],
6614 					rss->rss_ind_table[j+7]);
6615 					j += 8;
6616 			}
6617 		}
6618 
6619                 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
6620 
6621 		QL_DPRINT1(ha, "Update sp vport ID=%d\n", params->vport_id);
6622 
6623                 rc = ecore_sp_vport_update(p_hwfn, &sp_params,
6624                                            ECORE_SPQ_MODE_EBLOCK, NULL);
6625                 if (rc) {
6626 			QL_DPRINT1(ha, "Failed to update VPORT\n");
6627                         return rc;
6628                 }
6629 
6630                 QL_DPRINT2(ha, "Updated V-PORT %d: tx_active_flag %d, \
6631 			rx_active_flag %d [tx_update %d], [rx_update %d]\n",
6632 			params->vport_id, params->vport_active_tx_flg,
6633 			params->vport_active_rx_flg,
6634 			params->update_vport_active_tx_flg,
6635 			params->update_vport_active_rx_flg);
6636         }
6637 
6638         return 0;
6639 }
6640 
6641 static void
6642 qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq)
6643 {
6644         struct eth_rx_bd	*rx_bd_cons =
6645 					ecore_chain_consume(&rxq->rx_bd_ring);
6646         struct eth_rx_bd	*rx_bd_prod =
6647 					ecore_chain_produce(&rxq->rx_bd_ring);
6648         struct sw_rx_data	*sw_rx_data_cons =
6649 					&rxq->sw_rx_ring[rxq->sw_rx_cons];
6650         struct sw_rx_data	*sw_rx_data_prod =
6651 					&rxq->sw_rx_ring[rxq->sw_rx_prod];
6652 
6653         sw_rx_data_prod->data = sw_rx_data_cons->data;
6654         memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd));
6655 
6656         rxq->sw_rx_cons  = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
6657         rxq->sw_rx_prod  = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
6658 
6659 	return;
6660 }
6661 
6662 static void
6663 qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, struct qlnx_rx_queue *rxq)
6664 {
6665 
6666         uint16_t	 	bd_prod;
6667         uint16_t		cqe_prod;
6668 	union {
6669 		struct eth_rx_prod_data rx_prod_data;
6670 		uint32_t		data32;
6671 	} rx_prods;
6672 
6673         bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
6674         cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
6675 
6676         /* Update producers */
6677         rx_prods.rx_prod_data.bd_prod = htole16(bd_prod);
6678         rx_prods.rx_prod_data.cqe_prod = htole16(cqe_prod);
6679 
6680         /* Make sure that the BD and SGE data is updated before updating the
6681          * producers since FW might read the BD/SGE right after the producer
6682          * is updated.
6683          */
6684 	wmb();
6685 
6686         internal_ram_wr(p_hwfn, rxq->hw_rxq_prod_addr,
6687 		sizeof(rx_prods), &rx_prods.data32);
6688 
6689         /* mmiowb is needed to synchronize doorbell writes from more than one
6690          * processor. It guarantees that the write arrives to the device before
6691          * the napi lock is released and another qlnx_poll is called (possibly
6692          * on another CPU). Without this barrier, the next doorbell can bypass
6693          * this doorbell. This is applicable to IA64/Altix systems.
6694          */
6695         wmb();
6696 
6697 	return;
6698 }
6699 
6700 static uint32_t qlnx_hash_key[] = {
6701                 ((0x6d << 24)|(0x5a << 16)|(0x56 << 8)|0xda),
6702                 ((0x25 << 24)|(0x5b << 16)|(0x0e << 8)|0xc2),
6703                 ((0x41 << 24)|(0x67 << 16)|(0x25 << 8)|0x3d),
6704                 ((0x43 << 24)|(0xa3 << 16)|(0x8f << 8)|0xb0),
6705                 ((0xd0 << 24)|(0xca << 16)|(0x2b << 8)|0xcb),
6706                 ((0xae << 24)|(0x7b << 16)|(0x30 << 8)|0xb4),
6707                 ((0x77 << 24)|(0xcb << 16)|(0x2d << 8)|0xa3),
6708                 ((0x80 << 24)|(0x30 << 16)|(0xf2 << 8)|0x0c),
6709                 ((0x6a << 24)|(0x42 << 16)|(0xb7 << 8)|0x3b),
6710                 ((0xbe << 24)|(0xac << 16)|(0x01 << 8)|0xfa)};
6711 
6712 static int
6713 qlnx_start_queues(qlnx_host_t *ha)
6714 {
6715         int				rc, tc, i, vport_id = 0,
6716 					drop_ttl0_flg = 1, vlan_removal_en = 1,
6717 					tx_switching = 0, hw_lro_enable = 0;
6718         struct ecore_dev		*cdev = &ha->cdev;
6719         struct ecore_rss_params		*rss_params = &ha->rss_params;
6720         struct qlnx_update_vport_params	vport_update_params;
6721         struct ifnet			*ifp;
6722         struct ecore_hwfn		*p_hwfn;
6723 	struct ecore_sge_tpa_params	tpa_params;
6724 	struct ecore_queue_start_common_params qparams;
6725         struct qlnx_fastpath		*fp;
6726 
6727 	ifp = ha->ifp;
6728 
6729 	QL_DPRINT1(ha, "Num RSS = %d\n", ha->num_rss);
6730 
6731         if (!ha->num_rss) {
6732 		QL_DPRINT1(ha, "Cannot update V-VPORT as active as there"
6733 			" are no Rx queues\n");
6734                 return -EINVAL;
6735         }
6736 
6737 #ifndef QLNX_SOFT_LRO
6738         hw_lro_enable = ifp->if_capenable & IFCAP_LRO;
6739 #endif /* #ifndef QLNX_SOFT_LRO */
6740 
6741         rc = qlnx_start_vport(cdev, vport_id, ifp->if_mtu, drop_ttl0_flg,
6742 			vlan_removal_en, tx_switching, hw_lro_enable);
6743 
6744         if (rc) {
6745                 QL_DPRINT1(ha, "Start V-PORT failed %d\n", rc);
6746                 return rc;
6747         }
6748 
6749 	QL_DPRINT2(ha, "Start vport ramrod passed, "
6750 		"vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
6751 		vport_id, (int)(ifp->if_mtu + 0xe), vlan_removal_en);
6752 
6753         for_each_rss(i) {
6754 		struct ecore_rxq_start_ret_params rx_ret_params;
6755 		struct ecore_txq_start_ret_params tx_ret_params;
6756 
6757                 fp = &ha->fp_array[i];
6758         	p_hwfn = &cdev->hwfns[(fp->rss_id % cdev->num_hwfns)];
6759 
6760 		bzero(&qparams, sizeof(struct ecore_queue_start_common_params));
6761 		bzero(&rx_ret_params,
6762 			sizeof (struct ecore_rxq_start_ret_params));
6763 
6764 		qparams.queue_id = i ;
6765 		qparams.vport_id = vport_id;
6766 		qparams.stats_id = vport_id;
6767 		qparams.p_sb = fp->sb_info;
6768 		qparams.sb_idx = RX_PI;
6769 
6770 
6771 		rc = ecore_eth_rx_queue_start(p_hwfn,
6772 			p_hwfn->hw_info.opaque_fid,
6773 			&qparams,
6774 			fp->rxq->rx_buf_size,	/* bd_max_bytes */
6775 			/* bd_chain_phys_addr */
6776 			fp->rxq->rx_bd_ring.p_phys_addr,
6777 			/* cqe_pbl_addr */
6778 			ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring),
6779 			/* cqe_pbl_size */
6780 			ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring),
6781 			&rx_ret_params);
6782 
6783                 if (rc) {
6784                 	QL_DPRINT1(ha, "Start RXQ #%d failed %d\n", i, rc);
6785                         return rc;
6786                 }
6787 
6788 		fp->rxq->hw_rxq_prod_addr	= rx_ret_params.p_prod;
6789 		fp->rxq->handle			= rx_ret_params.p_handle;
6790                 fp->rxq->hw_cons_ptr		=
6791 				&fp->sb_info->sb_virt->pi_array[RX_PI];
6792 
6793                 qlnx_update_rx_prod(p_hwfn, fp->rxq);
6794 
6795                 for (tc = 0; tc < ha->num_tc; tc++) {
6796                         struct qlnx_tx_queue *txq = fp->txq[tc];
6797 
6798 			bzero(&qparams,
6799 				sizeof(struct ecore_queue_start_common_params));
6800 			bzero(&tx_ret_params,
6801 				sizeof (struct ecore_txq_start_ret_params));
6802 
6803 			qparams.queue_id = txq->index / cdev->num_hwfns ;
6804 			qparams.vport_id = vport_id;
6805 			qparams.stats_id = vport_id;
6806 			qparams.p_sb = fp->sb_info;
6807 			qparams.sb_idx = TX_PI(tc);
6808 
6809 			rc = ecore_eth_tx_queue_start(p_hwfn,
6810 				p_hwfn->hw_info.opaque_fid,
6811 				&qparams, tc,
6812 				/* bd_chain_phys_addr */
6813 				ecore_chain_get_pbl_phys(&txq->tx_pbl),
6814 				ecore_chain_get_page_cnt(&txq->tx_pbl),
6815 				&tx_ret_params);
6816 
6817                         if (rc) {
6818                 		QL_DPRINT1(ha, "Start TXQ #%d failed %d\n",
6819 					   txq->index, rc);
6820                                 return rc;
6821                         }
6822 
6823 			txq->doorbell_addr = tx_ret_params.p_doorbell;
6824 			txq->handle = tx_ret_params.p_handle;
6825 
6826                         txq->hw_cons_ptr =
6827                                 &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
6828                         SET_FIELD(txq->tx_db.data.params,
6829                                   ETH_DB_DATA_DEST, DB_DEST_XCM);
6830                         SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
6831                                   DB_AGG_CMD_SET);
6832                         SET_FIELD(txq->tx_db.data.params,
6833                                   ETH_DB_DATA_AGG_VAL_SEL,
6834                                   DQ_XCM_ETH_TX_BD_PROD_CMD);
6835 
6836                         txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
6837                 }
6838         }
6839 
6840         /* Fill struct with RSS params */
6841         if (ha->num_rss > 1) {
6842                 rss_params->update_rss_config = 1;
6843                 rss_params->rss_enable = 1;
6844                 rss_params->update_rss_capabilities = 1;
6845                 rss_params->update_rss_ind_table = 1;
6846                 rss_params->update_rss_key = 1;
6847                 rss_params->rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 |
6848                                        ECORE_RSS_IPV4_TCP | ECORE_RSS_IPV6_TCP;
6849                 rss_params->rss_table_size_log = 7; /* 2^7 = 128 */
6850 
6851                 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
6852                 	fp = &ha->fp_array[(i % ha->num_rss)];
6853                         rss_params->rss_ind_table[i] = fp->rxq->handle;
6854 		}
6855 
6856                 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
6857 			rss_params->rss_key[i] = (__le32)qlnx_hash_key[i];
6858 
6859         } else {
6860                 memset(rss_params, 0, sizeof(*rss_params));
6861         }
6862 
6863         /* Prepare and send the vport enable */
6864         memset(&vport_update_params, 0, sizeof(vport_update_params));
6865         vport_update_params.vport_id = vport_id;
6866         vport_update_params.update_vport_active_tx_flg = 1;
6867         vport_update_params.vport_active_tx_flg = 1;
6868         vport_update_params.update_vport_active_rx_flg = 1;
6869         vport_update_params.vport_active_rx_flg = 1;
6870         vport_update_params.rss_params = rss_params;
6871         vport_update_params.update_inner_vlan_removal_flg = 1;
6872         vport_update_params.inner_vlan_removal_flg = 1;
6873 
6874 	if (hw_lro_enable) {
6875 		memset(&tpa_params, 0, sizeof (struct ecore_sge_tpa_params));
6876 
6877 		tpa_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
6878 
6879 		tpa_params.update_tpa_en_flg = 1;
6880 		tpa_params.tpa_ipv4_en_flg = 1;
6881 		tpa_params.tpa_ipv6_en_flg = 1;
6882 
6883 		tpa_params.update_tpa_param_flg = 1;
6884 		tpa_params.tpa_pkt_split_flg = 0;
6885 		tpa_params.tpa_hdr_data_split_flg = 0;
6886 		tpa_params.tpa_gro_consistent_flg = 0;
6887 		tpa_params.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
6888 		tpa_params.tpa_max_size = (uint16_t)(-1);
6889 		tpa_params.tpa_min_size_to_start = ifp->if_mtu/2;
6890 		tpa_params.tpa_min_size_to_cont = ifp->if_mtu/2;
6891 
6892 		vport_update_params.sge_tpa_params = &tpa_params;
6893 	}
6894 
6895         rc = qlnx_update_vport(cdev, &vport_update_params);
6896         if (rc) {
6897 		QL_DPRINT1(ha, "Update V-PORT failed %d\n", rc);
6898                 return rc;
6899         }
6900 
6901         return 0;
6902 }
6903 
6904 static int
6905 qlnx_drain_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6906 	struct qlnx_tx_queue *txq)
6907 {
6908 	uint16_t	hw_bd_cons;
6909 	uint16_t	ecore_cons_idx;
6910 
6911 	QL_DPRINT2(ha, "enter\n");
6912 
6913 	hw_bd_cons = le16toh(*txq->hw_cons_ptr);
6914 
6915 	while (hw_bd_cons !=
6916 		(ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
6917 		mtx_lock(&fp->tx_mtx);
6918 
6919 		(void)qlnx_tx_int(ha, fp, txq);
6920 
6921 		mtx_unlock(&fp->tx_mtx);
6922 
6923 		qlnx_mdelay(__func__, 2);
6924 
6925 		hw_bd_cons = le16toh(*txq->hw_cons_ptr);
6926 	}
6927 
6928 	QL_DPRINT2(ha, "[%d, %d]: done\n", fp->rss_id, txq->index);
6929 
6930         return 0;
6931 }
6932 
6933 static int
6934 qlnx_stop_queues(qlnx_host_t *ha)
6935 {
6936         struct qlnx_update_vport_params	vport_update_params;
6937         struct ecore_dev		*cdev;
6938         struct qlnx_fastpath		*fp;
6939         int				rc, tc, i;
6940 
6941         cdev = &ha->cdev;
6942 
6943         /* Disable the vport */
6944 
6945         memset(&vport_update_params, 0, sizeof(vport_update_params));
6946 
6947         vport_update_params.vport_id = 0;
6948         vport_update_params.update_vport_active_tx_flg = 1;
6949         vport_update_params.vport_active_tx_flg = 0;
6950         vport_update_params.update_vport_active_rx_flg = 1;
6951         vport_update_params.vport_active_rx_flg = 0;
6952         vport_update_params.rss_params = &ha->rss_params;
6953         vport_update_params.rss_params->update_rss_config = 0;
6954         vport_update_params.rss_params->rss_enable = 0;
6955         vport_update_params.update_inner_vlan_removal_flg = 0;
6956         vport_update_params.inner_vlan_removal_flg = 0;
6957 
6958 	QL_DPRINT1(ha, "Update vport ID= %d\n", vport_update_params.vport_id);
6959 
6960         rc = qlnx_update_vport(cdev, &vport_update_params);
6961         if (rc) {
6962 		QL_DPRINT1(ha, "Failed to update vport\n");
6963                 return rc;
6964         }
6965 
6966         /* Flush Tx queues. If needed, request drain from MCP */
6967         for_each_rss(i) {
6968                 fp = &ha->fp_array[i];
6969 
6970                 for (tc = 0; tc < ha->num_tc; tc++) {
6971                         struct qlnx_tx_queue *txq = fp->txq[tc];
6972 
6973                         rc = qlnx_drain_txq(ha, fp, txq);
6974                         if (rc)
6975                                 return rc;
6976                 }
6977         }
6978 
6979         /* Stop all Queues in reverse order*/
6980         for (i = ha->num_rss - 1; i >= 0; i--) {
6981 		struct ecore_hwfn *p_hwfn = &cdev->hwfns[(i % cdev->num_hwfns)];
6982 
6983                 fp = &ha->fp_array[i];
6984 
6985                 /* Stop the Tx Queue(s)*/
6986                 for (tc = 0; tc < ha->num_tc; tc++) {
6987 			int tx_queue_id __unused;
6988 
6989 			tx_queue_id = tc * ha->num_rss + i;
6990 			rc = ecore_eth_tx_queue_stop(p_hwfn,
6991 					fp->txq[tc]->handle);
6992 
6993                         if (rc) {
6994 				QL_DPRINT1(ha, "Failed to stop TXQ #%d\n",
6995 					   tx_queue_id);
6996                                 return rc;
6997                         }
6998                 }
6999 
7000                 /* Stop the Rx Queue*/
7001 		rc = ecore_eth_rx_queue_stop(p_hwfn, fp->rxq->handle, false,
7002 				false);
7003                 if (rc) {
7004                         QL_DPRINT1(ha, "Failed to stop RXQ #%d\n", i);
7005                         return rc;
7006                 }
7007         }
7008 
7009         /* Stop the vport */
7010 	for_each_hwfn(cdev, i) {
7011 		struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
7012 
7013 		rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 0);
7014 
7015 		if (rc) {
7016                         QL_DPRINT1(ha, "Failed to stop VPORT\n");
7017 			return rc;
7018 		}
7019 	}
7020 
7021         return rc;
7022 }
7023 
7024 static int
7025 qlnx_set_ucast_rx_mac(qlnx_host_t *ha,
7026 	enum ecore_filter_opcode opcode,
7027 	unsigned char mac[ETH_ALEN])
7028 {
7029 	struct ecore_filter_ucast	ucast;
7030 	struct ecore_dev		*cdev;
7031 	int				rc;
7032 
7033 	cdev = &ha->cdev;
7034 
7035 	bzero(&ucast, sizeof(struct ecore_filter_ucast));
7036 
7037         ucast.opcode = opcode;
7038         ucast.type = ECORE_FILTER_MAC;
7039         ucast.is_rx_filter = 1;
7040         ucast.vport_to_add_to = 0;
7041         memcpy(&ucast.mac[0], mac, ETH_ALEN);
7042 
7043 	rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
7044 
7045         return (rc);
7046 }
7047 
7048 static int
7049 qlnx_remove_all_ucast_mac(qlnx_host_t *ha)
7050 {
7051 	struct ecore_filter_ucast	ucast;
7052 	struct ecore_dev		*cdev;
7053 	int				rc;
7054 
7055 	bzero(&ucast, sizeof(struct ecore_filter_ucast));
7056 
7057 	ucast.opcode = ECORE_FILTER_REPLACE;
7058 	ucast.type = ECORE_FILTER_MAC;
7059 	ucast.is_rx_filter = 1;
7060 
7061 	cdev = &ha->cdev;
7062 
7063 	rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
7064 
7065 	return (rc);
7066 }
7067 
7068 static int
7069 qlnx_remove_all_mcast_mac(qlnx_host_t *ha)
7070 {
7071 	struct ecore_filter_mcast	*mcast;
7072 	struct ecore_dev		*cdev;
7073 	int				rc, i;
7074 
7075 	cdev = &ha->cdev;
7076 
7077 	mcast = &ha->ecore_mcast;
7078 	bzero(mcast, sizeof(struct ecore_filter_mcast));
7079 
7080 	mcast->opcode = ECORE_FILTER_REMOVE;
7081 
7082 	for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
7083 		if (ha->mcast[i].addr[0] || ha->mcast[i].addr[1] ||
7084 			ha->mcast[i].addr[2] || ha->mcast[i].addr[3] ||
7085 			ha->mcast[i].addr[4] || ha->mcast[i].addr[5]) {
7086 			memcpy(&mcast->mac[i][0], &ha->mcast[i].addr[0], ETH_ALEN);
7087 			mcast->num_mc_addrs++;
7088 		}
7089 	}
7090 	mcast = &ha->ecore_mcast;
7091 
7092 	rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL);
7093 
7094 	bzero(ha->mcast, (sizeof(qlnx_mcast_t) * QLNX_MAX_NUM_MULTICAST_ADDRS));
7095 	ha->nmcast = 0;
7096 
7097 	return (rc);
7098 }
7099 
7100 static int
7101 qlnx_clean_filters(qlnx_host_t *ha)
7102 {
7103         int	rc = 0;
7104 
7105 	/* Remove all unicast macs */
7106 	rc = qlnx_remove_all_ucast_mac(ha);
7107 	if (rc)
7108 		return rc;
7109 
7110 	/* Remove all multicast macs */
7111 	rc = qlnx_remove_all_mcast_mac(ha);
7112 	if (rc)
7113 		return rc;
7114 
7115         rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_FLUSH, ha->primary_mac);
7116 
7117         return (rc);
7118 }
7119 
7120 static int
7121 qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter)
7122 {
7123 	struct ecore_filter_accept_flags	accept;
7124 	int					rc = 0;
7125 	struct ecore_dev			*cdev;
7126 
7127 	cdev = &ha->cdev;
7128 
7129 	bzero(&accept, sizeof(struct ecore_filter_accept_flags));
7130 
7131 	accept.update_rx_mode_config = 1;
7132 	accept.rx_accept_filter = filter;
7133 
7134 	accept.update_tx_mode_config = 1;
7135 	accept.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
7136 		ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST;
7137 
7138 	rc = ecore_filter_accept_cmd(cdev, 0, accept, false, false,
7139 			ECORE_SPQ_MODE_CB, NULL);
7140 
7141 	return (rc);
7142 }
7143 
7144 static int
7145 qlnx_set_rx_mode(qlnx_host_t *ha)
7146 {
7147 	int	rc = 0;
7148 	uint8_t	filter;
7149 
7150 	rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, ha->primary_mac);
7151         if (rc)
7152                 return rc;
7153 
7154 	rc = qlnx_remove_all_mcast_mac(ha);
7155         if (rc)
7156                 return rc;
7157 
7158 	filter = ECORE_ACCEPT_UCAST_MATCHED |
7159 			ECORE_ACCEPT_MCAST_MATCHED |
7160 			ECORE_ACCEPT_BCAST;
7161 
7162 	if (qlnx_vf_device(ha) == 0) {
7163 		filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
7164 		filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
7165 	}
7166 	ha->filter = filter;
7167 
7168 	rc = qlnx_set_rx_accept_filter(ha, filter);
7169 
7170 	return (rc);
7171 }
7172 
7173 static int
7174 qlnx_set_link(qlnx_host_t *ha, bool link_up)
7175 {
7176         int			i, rc = 0;
7177 	struct ecore_dev	*cdev;
7178 	struct ecore_hwfn	*hwfn;
7179 	struct ecore_ptt	*ptt;
7180 
7181 	if (qlnx_vf_device(ha) == 0)
7182 		return (0);
7183 
7184 	cdev = &ha->cdev;
7185 
7186         for_each_hwfn(cdev, i) {
7187                 hwfn = &cdev->hwfns[i];
7188 
7189                 ptt = ecore_ptt_acquire(hwfn);
7190        	        if (!ptt)
7191                         return -EBUSY;
7192 
7193                 rc = ecore_mcp_set_link(hwfn, ptt, link_up);
7194 
7195                 ecore_ptt_release(hwfn, ptt);
7196 
7197                 if (rc)
7198                         return rc;
7199         }
7200         return (rc);
7201 }
7202 
7203 #if __FreeBSD_version >= 1100000
7204 static uint64_t
7205 qlnx_get_counter(if_t ifp, ift_counter cnt)
7206 {
7207 	qlnx_host_t *ha;
7208 	uint64_t count;
7209 
7210         ha = (qlnx_host_t *)if_getsoftc(ifp);
7211 
7212         switch (cnt) {
7213         case IFCOUNTER_IPACKETS:
7214 		count = ha->hw_stats.common.rx_ucast_pkts +
7215 			ha->hw_stats.common.rx_mcast_pkts +
7216 			ha->hw_stats.common.rx_bcast_pkts;
7217 		break;
7218 
7219         case IFCOUNTER_IERRORS:
7220 		count = ha->hw_stats.common.rx_crc_errors +
7221 			ha->hw_stats.common.rx_align_errors +
7222 			ha->hw_stats.common.rx_oversize_packets +
7223 			ha->hw_stats.common.rx_undersize_packets;
7224 		break;
7225 
7226         case IFCOUNTER_OPACKETS:
7227 		count = ha->hw_stats.common.tx_ucast_pkts +
7228 			ha->hw_stats.common.tx_mcast_pkts +
7229 			ha->hw_stats.common.tx_bcast_pkts;
7230 		break;
7231 
7232         case IFCOUNTER_OERRORS:
7233                 count = ha->hw_stats.common.tx_err_drop_pkts;
7234 		break;
7235 
7236         case IFCOUNTER_COLLISIONS:
7237                 return (0);
7238 
7239         case IFCOUNTER_IBYTES:
7240 		count = ha->hw_stats.common.rx_ucast_bytes +
7241 			ha->hw_stats.common.rx_mcast_bytes +
7242 			ha->hw_stats.common.rx_bcast_bytes;
7243 		break;
7244 
7245         case IFCOUNTER_OBYTES:
7246 		count = ha->hw_stats.common.tx_ucast_bytes +
7247 			ha->hw_stats.common.tx_mcast_bytes +
7248 			ha->hw_stats.common.tx_bcast_bytes;
7249 		break;
7250 
7251         case IFCOUNTER_IMCASTS:
7252 		count = ha->hw_stats.common.rx_mcast_bytes;
7253 		break;
7254 
7255         case IFCOUNTER_OMCASTS:
7256 		count = ha->hw_stats.common.tx_mcast_bytes;
7257 		break;
7258 
7259         case IFCOUNTER_IQDROPS:
7260         case IFCOUNTER_OQDROPS:
7261         case IFCOUNTER_NOPROTO:
7262 
7263         default:
7264                 return (if_get_counter_default(ifp, cnt));
7265         }
7266 	return (count);
7267 }
7268 #endif
7269 
7270 static void
7271 qlnx_timer(void *arg)
7272 {
7273 	qlnx_host_t	*ha;
7274 
7275 	ha = (qlnx_host_t *)arg;
7276 
7277 	if (ha->error_recovery) {
7278 		ha->error_recovery = 0;
7279 		taskqueue_enqueue(ha->err_taskqueue, &ha->err_task);
7280 		return;
7281 	}
7282 
7283        	ecore_get_vport_stats(&ha->cdev, &ha->hw_stats);
7284 
7285 	if (ha->storm_stats_gather)
7286 		qlnx_sample_storm_stats(ha);
7287 
7288 	callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
7289 
7290 	return;
7291 }
7292 
7293 static int
7294 qlnx_load(qlnx_host_t *ha)
7295 {
7296 	int			i;
7297 	int			rc = 0;
7298         device_t		dev;
7299 
7300         dev = ha->pci_dev;
7301 
7302 	QL_DPRINT2(ha, "enter\n");
7303 
7304         rc = qlnx_alloc_mem_arrays(ha);
7305         if (rc)
7306                 goto qlnx_load_exit0;
7307 
7308         qlnx_init_fp(ha);
7309 
7310         rc = qlnx_alloc_mem_load(ha);
7311         if (rc)
7312                 goto qlnx_load_exit1;
7313 
7314         QL_DPRINT2(ha, "Allocated %d RSS queues on %d TC/s\n",
7315 		   ha->num_rss, ha->num_tc);
7316 
7317 	for (i = 0; i < ha->num_rss; i++) {
7318 		if ((rc = bus_setup_intr(dev, ha->irq_vec[i].irq,
7319                         (INTR_TYPE_NET | INTR_MPSAFE),
7320                         NULL, qlnx_fp_isr, &ha->irq_vec[i],
7321                         &ha->irq_vec[i].handle))) {
7322                         QL_DPRINT1(ha, "could not setup interrupt\n");
7323                         goto qlnx_load_exit2;
7324 		}
7325 
7326 		QL_DPRINT2(ha, "rss_id = %d irq_rid %d \
7327 			 irq %p handle %p\n", i,
7328 			ha->irq_vec[i].irq_rid,
7329 			ha->irq_vec[i].irq, ha->irq_vec[i].handle);
7330 
7331 		bus_bind_intr(dev, ha->irq_vec[i].irq, (i % mp_ncpus));
7332 	}
7333 
7334         rc = qlnx_start_queues(ha);
7335         if (rc)
7336                 goto qlnx_load_exit2;
7337 
7338         QL_DPRINT2(ha, "Start VPORT, RXQ and TXQ succeeded\n");
7339 
7340         /* Add primary mac and set Rx filters */
7341         rc = qlnx_set_rx_mode(ha);
7342         if (rc)
7343                 goto qlnx_load_exit2;
7344 
7345         /* Ask for link-up using current configuration */
7346 	qlnx_set_link(ha, true);
7347 
7348 	if (qlnx_vf_device(ha) == 0)
7349 		qlnx_link_update(&ha->cdev.hwfns[0]);
7350 
7351         ha->state = QLNX_STATE_OPEN;
7352 
7353 	bzero(&ha->hw_stats, sizeof(struct ecore_eth_stats));
7354 
7355 	if (ha->flags.callout_init)
7356         	callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
7357 
7358         goto qlnx_load_exit0;
7359 
7360 qlnx_load_exit2:
7361         qlnx_free_mem_load(ha);
7362 
7363 qlnx_load_exit1:
7364         ha->num_rss = 0;
7365 
7366 qlnx_load_exit0:
7367 	QL_DPRINT2(ha, "exit [%d]\n", rc);
7368         return rc;
7369 }
7370 
7371 static void
7372 qlnx_drain_soft_lro(qlnx_host_t *ha)
7373 {
7374 #ifdef QLNX_SOFT_LRO
7375 
7376 	struct ifnet	*ifp;
7377 	int		i;
7378 
7379 	ifp = ha->ifp;
7380 
7381 	if (ifp->if_capenable & IFCAP_LRO) {
7382 	        for (i = 0; i < ha->num_rss; i++) {
7383 			struct qlnx_fastpath *fp = &ha->fp_array[i];
7384 			struct lro_ctrl *lro;
7385 
7386 			lro = &fp->rxq->lro;
7387 
7388 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
7389 
7390 			tcp_lro_flush_all(lro);
7391 
7392 #else
7393 			struct lro_entry *queued;
7394 
7395 			while ((!SLIST_EMPTY(&lro->lro_active))){
7396 				queued = SLIST_FIRST(&lro->lro_active);
7397 				SLIST_REMOVE_HEAD(&lro->lro_active, next);
7398 				tcp_lro_flush(lro, queued);
7399 			}
7400 
7401 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
7402                 }
7403 	}
7404 
7405 #endif /* #ifdef QLNX_SOFT_LRO */
7406 
7407 	return;
7408 }
7409 
7410 static void
7411 qlnx_unload(qlnx_host_t *ha)
7412 {
7413 	struct ecore_dev	*cdev;
7414         device_t		dev;
7415 	int			i;
7416 
7417 	cdev = &ha->cdev;
7418         dev = ha->pci_dev;
7419 
7420 	QL_DPRINT2(ha, "enter\n");
7421         QL_DPRINT1(ha, " QLNX STATE = %d\n",ha->state);
7422 
7423 	if (ha->state == QLNX_STATE_OPEN) {
7424 		qlnx_set_link(ha, false);
7425 		qlnx_clean_filters(ha);
7426 		qlnx_stop_queues(ha);
7427 		ecore_hw_stop_fastpath(cdev);
7428 
7429 		for (i = 0; i < ha->num_rss; i++) {
7430 			if (ha->irq_vec[i].handle) {
7431 				(void)bus_teardown_intr(dev,
7432 					ha->irq_vec[i].irq,
7433 					ha->irq_vec[i].handle);
7434 				ha->irq_vec[i].handle = NULL;
7435 			}
7436 		}
7437 
7438 		qlnx_drain_fp_taskqueues(ha);
7439 		qlnx_drain_soft_lro(ha);
7440         	qlnx_free_mem_load(ha);
7441 	}
7442 
7443 	if (ha->flags.callout_init)
7444 		callout_drain(&ha->qlnx_callout);
7445 
7446 	qlnx_mdelay(__func__, 1000);
7447 
7448         ha->state = QLNX_STATE_CLOSED;
7449 
7450 	QL_DPRINT2(ha, "exit\n");
7451 	return;
7452 }
7453 
7454 static int
7455 qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
7456 {
7457 	int			rval = -1;
7458 	struct ecore_hwfn	*p_hwfn;
7459 	struct ecore_ptt	*p_ptt;
7460 
7461 	ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
7462 
7463 	p_hwfn = &ha->cdev.hwfns[hwfn_index];
7464 	p_ptt = ecore_ptt_acquire(p_hwfn);
7465 
7466         if (!p_ptt) {
7467 		QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
7468                 return (rval);
7469         }
7470 
7471         rval = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
7472 
7473 	if (rval == DBG_STATUS_OK)
7474                 rval = 0;
7475         else {
7476 		QL_DPRINT1(ha, "ecore_dbg_grc_get_dump_buf_size failed"
7477 			"[0x%x]\n", rval);
7478 	}
7479 
7480         ecore_ptt_release(p_hwfn, p_ptt);
7481 
7482         return (rval);
7483 }
7484 
7485 static int
7486 qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
7487 {
7488 	int			rval = -1;
7489 	struct ecore_hwfn	*p_hwfn;
7490 	struct ecore_ptt	*p_ptt;
7491 
7492 	ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
7493 
7494 	p_hwfn = &ha->cdev.hwfns[hwfn_index];
7495 	p_ptt = ecore_ptt_acquire(p_hwfn);
7496 
7497         if (!p_ptt) {
7498 		QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
7499                 return (rval);
7500         }
7501 
7502         rval = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
7503 
7504 	if (rval == DBG_STATUS_OK)
7505                 rval = 0;
7506         else {
7507 		QL_DPRINT1(ha, "ecore_dbg_idle_chk_get_dump_buf_size failed"
7508 			" [0x%x]\n", rval);
7509 	}
7510 
7511         ecore_ptt_release(p_hwfn, p_ptt);
7512 
7513         return (rval);
7514 }
7515 
7516 static void
7517 qlnx_sample_storm_stats(qlnx_host_t *ha)
7518 {
7519         int			i, index;
7520         struct ecore_dev	*cdev;
7521 	qlnx_storm_stats_t	*s_stats;
7522 	uint32_t		reg;
7523         struct ecore_ptt	*p_ptt;
7524         struct ecore_hwfn	*hwfn;
7525 
7526 	if (ha->storm_stats_index >= QLNX_STORM_STATS_SAMPLES_PER_HWFN) {
7527 		ha->storm_stats_gather = 0;
7528 		return;
7529 	}
7530 
7531         cdev = &ha->cdev;
7532 
7533         for_each_hwfn(cdev, i) {
7534                 hwfn = &cdev->hwfns[i];
7535 
7536                 p_ptt = ecore_ptt_acquire(hwfn);
7537                 if (!p_ptt)
7538                         return;
7539 
7540 		index = ha->storm_stats_index +
7541 				(i * QLNX_STORM_STATS_SAMPLES_PER_HWFN);
7542 
7543 		s_stats = &ha->storm_stats[index];
7544 
7545 		/* XSTORM */
7546 		reg = XSEM_REG_FAST_MEMORY +
7547 				SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7548 		s_stats->xstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7549 
7550 		reg = XSEM_REG_FAST_MEMORY +
7551 				SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7552 		s_stats->xstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7553 
7554 		reg = XSEM_REG_FAST_MEMORY +
7555 				SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7556 		s_stats->xstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7557 
7558 		reg = XSEM_REG_FAST_MEMORY +
7559 				SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7560 		s_stats->xstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7561 
7562 		/* YSTORM */
7563 		reg = YSEM_REG_FAST_MEMORY +
7564 				SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7565 		s_stats->ystorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7566 
7567 		reg = YSEM_REG_FAST_MEMORY +
7568 				SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7569 		s_stats->ystorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7570 
7571 		reg = YSEM_REG_FAST_MEMORY +
7572 				SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7573 		s_stats->ystorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7574 
7575 		reg = YSEM_REG_FAST_MEMORY +
7576 				SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7577 		s_stats->ystorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7578 
7579 		/* PSTORM */
7580 		reg = PSEM_REG_FAST_MEMORY +
7581 				SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7582 		s_stats->pstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7583 
7584 		reg = PSEM_REG_FAST_MEMORY +
7585 				SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7586 		s_stats->pstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7587 
7588 		reg = PSEM_REG_FAST_MEMORY +
7589 				SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7590 		s_stats->pstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7591 
7592 		reg = PSEM_REG_FAST_MEMORY +
7593 				SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7594 		s_stats->pstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7595 
7596 		/* TSTORM */
7597 		reg = TSEM_REG_FAST_MEMORY +
7598 				SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7599 		s_stats->tstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7600 
7601 		reg = TSEM_REG_FAST_MEMORY +
7602 				SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7603 		s_stats->tstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7604 
7605 		reg = TSEM_REG_FAST_MEMORY +
7606 				SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7607 		s_stats->tstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7608 
7609 		reg = TSEM_REG_FAST_MEMORY +
7610 				SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7611 		s_stats->tstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7612 
7613 		/* MSTORM */
7614 		reg = MSEM_REG_FAST_MEMORY +
7615 				SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7616 		s_stats->mstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7617 
7618 		reg = MSEM_REG_FAST_MEMORY +
7619 				SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7620 		s_stats->mstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7621 
7622 		reg = MSEM_REG_FAST_MEMORY +
7623 				SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7624 		s_stats->mstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7625 
7626 		reg = MSEM_REG_FAST_MEMORY +
7627 				SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7628 		s_stats->mstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7629 
7630 		/* USTORM */
7631 		reg = USEM_REG_FAST_MEMORY +
7632 				SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7633 		s_stats->ustorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7634 
7635 		reg = USEM_REG_FAST_MEMORY +
7636 				SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7637 		s_stats->ustorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7638 
7639 		reg = USEM_REG_FAST_MEMORY +
7640 				SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7641 		s_stats->ustorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7642 
7643 		reg = USEM_REG_FAST_MEMORY +
7644 				SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7645 		s_stats->ustorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7646 
7647                 ecore_ptt_release(hwfn, p_ptt);
7648         }
7649 
7650 	ha->storm_stats_index++;
7651 
7652         return;
7653 }
7654 
7655 /*
7656  * Name: qlnx_dump_buf8
7657  * Function: dumps a buffer as bytes
7658  */
7659 static void
7660 qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, uint32_t len)
7661 {
7662         device_t	dev;
7663         uint32_t	i = 0;
7664         uint8_t		*buf;
7665 
7666         dev = ha->pci_dev;
7667         buf = dbuf;
7668 
7669         device_printf(dev, "%s: %s 0x%x dump start\n", __func__, msg, len);
7670 
7671         while (len >= 16) {
7672                 device_printf(dev,"0x%08x:"
7673                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7674                         " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
7675                         buf[0], buf[1], buf[2], buf[3],
7676                         buf[4], buf[5], buf[6], buf[7],
7677                         buf[8], buf[9], buf[10], buf[11],
7678                         buf[12], buf[13], buf[14], buf[15]);
7679                 i += 16;
7680                 len -= 16;
7681                 buf += 16;
7682         }
7683         switch (len) {
7684         case 1:
7685                 device_printf(dev,"0x%08x: %02x\n", i, buf[0]);
7686                 break;
7687         case 2:
7688                 device_printf(dev,"0x%08x: %02x %02x\n", i, buf[0], buf[1]);
7689                 break;
7690         case 3:
7691                 device_printf(dev,"0x%08x: %02x %02x %02x\n",
7692                         i, buf[0], buf[1], buf[2]);
7693                 break;
7694         case 4:
7695                 device_printf(dev,"0x%08x: %02x %02x %02x %02x\n", i,
7696                         buf[0], buf[1], buf[2], buf[3]);
7697                 break;
7698         case 5:
7699                 device_printf(dev,"0x%08x:"
7700                         " %02x %02x %02x %02x %02x\n", i,
7701                         buf[0], buf[1], buf[2], buf[3], buf[4]);
7702                 break;
7703         case 6:
7704                 device_printf(dev,"0x%08x:"
7705                         " %02x %02x %02x %02x %02x %02x\n", i,
7706                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
7707                 break;
7708         case 7:
7709                 device_printf(dev,"0x%08x:"
7710                         " %02x %02x %02x %02x %02x %02x %02x\n", i,
7711                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]);
7712                 break;
7713         case 8:
7714                 device_printf(dev,"0x%08x:"
7715                         " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
7716                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7717                         buf[7]);
7718                 break;
7719         case 9:
7720                 device_printf(dev,"0x%08x:"
7721                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7722                         " %02x\n", i,
7723                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7724                         buf[7], buf[8]);
7725                 break;
7726         case 10:
7727                 device_printf(dev,"0x%08x:"
7728                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7729                         " %02x %02x\n", i,
7730                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7731                         buf[7], buf[8], buf[9]);
7732                 break;
7733         case 11:
7734                 device_printf(dev,"0x%08x:"
7735                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7736                         " %02x %02x %02x\n", i,
7737                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7738                         buf[7], buf[8], buf[9], buf[10]);
7739                 break;
7740         case 12:
7741                 device_printf(dev,"0x%08x:"
7742                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7743                         " %02x %02x %02x %02x\n", i,
7744                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7745                         buf[7], buf[8], buf[9], buf[10], buf[11]);
7746                 break;
7747         case 13:
7748                 device_printf(dev,"0x%08x:"
7749                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7750                         " %02x %02x %02x %02x %02x\n", i,
7751                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7752                         buf[7], buf[8], buf[9], buf[10], buf[11], buf[12]);
7753                 break;
7754         case 14:
7755                 device_printf(dev,"0x%08x:"
7756                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7757                         " %02x %02x %02x %02x %02x %02x\n", i,
7758                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7759                         buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
7760                         buf[13]);
7761                 break;
7762         case 15:
7763                 device_printf(dev,"0x%08x:"
7764                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7765                         " %02x %02x %02x %02x %02x %02x %02x\n", i,
7766                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7767                         buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
7768                         buf[13], buf[14]);
7769                 break;
7770         default:
7771                 break;
7772         }
7773 
7774         device_printf(dev, "%s: %s dump end\n", __func__, msg);
7775 
7776         return;
7777 }
7778 
7779 #ifdef CONFIG_ECORE_SRIOV
7780 
7781 static void
7782 __qlnx_osal_iov_vf_cleanup(struct ecore_hwfn *p_hwfn, uint8_t rel_vf_id)
7783 {
7784         struct ecore_public_vf_info *vf_info;
7785 
7786         vf_info = ecore_iov_get_public_vf_info(p_hwfn, rel_vf_id, false);
7787 
7788         if (!vf_info)
7789                 return;
7790 
7791         /* Clear the VF mac */
7792         memset(vf_info->forced_mac, 0, ETH_ALEN);
7793 
7794         vf_info->forced_vlan = 0;
7795 
7796 	return;
7797 }
7798 
7799 void
7800 qlnx_osal_iov_vf_cleanup(void *p_hwfn, uint8_t relative_vf_id)
7801 {
7802 	__qlnx_osal_iov_vf_cleanup(p_hwfn, relative_vf_id);
7803 	return;
7804 }
7805 
7806 static int
7807 __qlnx_iov_chk_ucast(struct ecore_hwfn *p_hwfn, int vfid,
7808 	struct ecore_filter_ucast *params)
7809 {
7810         struct ecore_public_vf_info *vf;
7811 
7812 	if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) {
7813 		QL_DPRINT1(((qlnx_host_t *)p_hwfn->p_dev),
7814 			"VF[%d] vport not initialized\n", vfid);
7815 		return ECORE_INVAL;
7816 	}
7817 
7818         vf = ecore_iov_get_public_vf_info(p_hwfn, vfid, true);
7819         if (!vf)
7820                 return -EINVAL;
7821 
7822         /* No real decision to make; Store the configured MAC */
7823         if (params->type == ECORE_FILTER_MAC ||
7824             params->type == ECORE_FILTER_MAC_VLAN)
7825                 memcpy(params->mac, vf->forced_mac, ETH_ALEN);
7826 
7827         return 0;
7828 }
7829 
7830 int
7831 qlnx_iov_chk_ucast(void *p_hwfn, int vfid, void *params)
7832 {
7833 	return (__qlnx_iov_chk_ucast(p_hwfn, vfid, params));
7834 }
7835 
7836 static int
7837 __qlnx_iov_update_vport(struct ecore_hwfn *hwfn, uint8_t vfid,
7838         struct ecore_sp_vport_update_params *params, uint16_t * tlvs)
7839 {
7840 	if (!ecore_iov_vf_has_vport_instance(hwfn, vfid)) {
7841 		QL_DPRINT1(((qlnx_host_t *)hwfn->p_dev),
7842 			"VF[%d] vport not initialized\n", vfid);
7843 		return ECORE_INVAL;
7844 	}
7845 
7846         /* Untrusted VFs can't even be trusted to know that fact.
7847          * Simply indicate everything is configured fine, and trace
7848          * configuration 'behind their back'.
7849          */
7850         if (!(*tlvs & BIT(ECORE_IOV_VP_UPDATE_ACCEPT_PARAM)))
7851                 return 0;
7852 
7853         return 0;
7854 
7855 }
7856 int
7857 qlnx_iov_update_vport(void *hwfn, uint8_t vfid, void *params, uint16_t *tlvs)
7858 {
7859 	return(__qlnx_iov_update_vport(hwfn, vfid, params, tlvs));
7860 }
7861 
7862 static int
7863 qlnx_find_hwfn_index(struct ecore_hwfn *p_hwfn)
7864 {
7865 	int			i;
7866 	struct ecore_dev	*cdev;
7867 
7868 	cdev = p_hwfn->p_dev;
7869 
7870 	for (i = 0; i < cdev->num_hwfns; i++) {
7871 		if (&cdev->hwfns[i] == p_hwfn)
7872 			break;
7873 	}
7874 
7875 	if (i >= cdev->num_hwfns)
7876 		return (-1);
7877 
7878 	return (i);
7879 }
7880 
7881 static int
7882 __qlnx_pf_vf_msg(struct ecore_hwfn *p_hwfn, uint16_t rel_vf_id)
7883 {
7884 	qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
7885 	int i;
7886 
7887 	QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p rel_vf_id = %d\n",
7888 		ha, p_hwfn->p_dev, p_hwfn, rel_vf_id);
7889 
7890 	if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
7891 		return (-1);
7892 
7893 	if (ha->sriov_task[i].pf_taskqueue != NULL) {
7894 		atomic_testandset_32(&ha->sriov_task[i].flags,
7895 			QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG);
7896 
7897 		taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
7898 			&ha->sriov_task[i].pf_task);
7899 	}
7900 
7901 	return (ECORE_SUCCESS);
7902 }
7903 
7904 int
7905 qlnx_pf_vf_msg(void *p_hwfn, uint16_t relative_vf_id)
7906 {
7907 	return (__qlnx_pf_vf_msg(p_hwfn, relative_vf_id));
7908 }
7909 
7910 static void
7911 __qlnx_vf_flr_update(struct ecore_hwfn *p_hwfn)
7912 {
7913 	qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
7914 	int i;
7915 
7916 	if (!ha->sriov_initialized)
7917 		return;
7918 
7919 	QL_DPRINT2(ha,  "ha = %p cdev = %p p_hwfn = %p \n",
7920 		ha, p_hwfn->p_dev, p_hwfn);
7921 
7922 	if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
7923 		return;
7924 
7925 	if (ha->sriov_task[i].pf_taskqueue != NULL) {
7926 		atomic_testandset_32(&ha->sriov_task[i].flags,
7927 			QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE);
7928 
7929 		taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
7930 			&ha->sriov_task[i].pf_task);
7931 	}
7932 
7933 	return;
7934 }
7935 
7936 void
7937 qlnx_vf_flr_update(void *p_hwfn)
7938 {
7939 	__qlnx_vf_flr_update(p_hwfn);
7940 
7941 	return;
7942 }
7943 
7944 #ifndef QLNX_VF
7945 
7946 static void
7947 qlnx_vf_bulleting_update(struct ecore_hwfn *p_hwfn)
7948 {
7949 	qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
7950 	int i;
7951 
7952 	QL_DPRINT2(ha,  "ha = %p cdev = %p p_hwfn = %p \n",
7953 		ha, p_hwfn->p_dev, p_hwfn);
7954 
7955 	if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
7956 		return;
7957 
7958 	QL_DPRINT2(ha,  "ha = %p cdev = %p p_hwfn = %p i = %d\n",
7959 		ha, p_hwfn->p_dev, p_hwfn, i);
7960 
7961 	if (ha->sriov_task[i].pf_taskqueue != NULL) {
7962 		atomic_testandset_32(&ha->sriov_task[i].flags,
7963 			QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE);
7964 
7965 		taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
7966 			&ha->sriov_task[i].pf_task);
7967 	}
7968 }
7969 
7970 static void
7971 qlnx_initialize_sriov(qlnx_host_t *ha)
7972 {
7973 	device_t	dev;
7974 	nvlist_t	*pf_schema, *vf_schema;
7975 	int		iov_error;
7976 
7977 	dev = ha->pci_dev;
7978 
7979 	pf_schema = pci_iov_schema_alloc_node();
7980 	vf_schema = pci_iov_schema_alloc_node();
7981 
7982 	pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
7983 	pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
7984 		IOV_SCHEMA_HASDEFAULT, FALSE);
7985 	pci_iov_schema_add_bool(vf_schema, "allow-promisc",
7986 		IOV_SCHEMA_HASDEFAULT, FALSE);
7987 	pci_iov_schema_add_uint16(vf_schema, "num-queues",
7988 		IOV_SCHEMA_HASDEFAULT, 1);
7989 
7990 	iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
7991 
7992 	if (iov_error != 0) {
7993 		ha->sriov_initialized = 0;
7994 	} else {
7995 		device_printf(dev, "SRIOV initialized\n");
7996 		ha->sriov_initialized = 1;
7997 	}
7998 
7999 	return;
8000 }
8001 
8002 static void
8003 qlnx_sriov_disable(qlnx_host_t *ha)
8004 {
8005 	struct ecore_dev *cdev;
8006 	int i, j;
8007 
8008 	cdev = &ha->cdev;
8009 
8010 	ecore_iov_set_vfs_to_disable(cdev, true);
8011 
8012 	for_each_hwfn(cdev, i) {
8013 		struct ecore_hwfn *hwfn = &cdev->hwfns[i];
8014 		struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn);
8015 
8016 		if (!ptt) {
8017 			QL_DPRINT1(ha, "Failed to acquire ptt\n");
8018 			return;
8019 		}
8020 		/* Clean WFQ db and configure equal weight for all vports */
8021 		ecore_clean_wfq_db(hwfn, ptt);
8022 
8023 		ecore_for_each_vf(hwfn, j) {
8024 			int k = 0;
8025 
8026 			if (!ecore_iov_is_valid_vfid(hwfn, j, true, false))
8027 				continue;
8028 
8029 			if (ecore_iov_is_vf_started(hwfn, j)) {
8030 				/* Wait until VF is disabled before releasing */
8031 
8032 				for (k = 0; k < 100; k++) {
8033 					if (!ecore_iov_is_vf_stopped(hwfn, j)) {
8034 						qlnx_mdelay(__func__, 10);
8035 					} else
8036 						break;
8037 				}
8038 			}
8039 
8040 			if (k < 100)
8041 				ecore_iov_release_hw_for_vf(&cdev->hwfns[i],
8042                                                           ptt, j);
8043 			else {
8044 				QL_DPRINT1(ha,
8045 					"Timeout waiting for VF's FLR to end\n");
8046 			}
8047 		}
8048 		ecore_ptt_release(hwfn, ptt);
8049 	}
8050 
8051 	ecore_iov_set_vfs_to_disable(cdev, false);
8052 
8053 	return;
8054 }
8055 
8056 static void
8057 qlnx_sriov_enable_qid_config(struct ecore_hwfn *hwfn, u16 vfid,
8058 	struct ecore_iov_vf_init_params *params)
8059 {
8060         u16 base, i;
8061 
8062         /* Since we have an equal resource distribution per-VF, and we assume
8063          * PF has acquired the ECORE_PF_L2_QUE first queues, we start setting
8064          * sequentially from there.
8065          */
8066         base = FEAT_NUM(hwfn, ECORE_PF_L2_QUE) + vfid * params->num_queues;
8067 
8068         params->rel_vf_id = vfid;
8069 
8070         for (i = 0; i < params->num_queues; i++) {
8071                 params->req_rx_queue[i] = base + i;
8072                 params->req_tx_queue[i] = base + i;
8073         }
8074 
8075         /* PF uses indices 0 for itself; Set vport/RSS afterwards */
8076         params->vport_id = vfid + 1;
8077         params->rss_eng_id = vfid + 1;
8078 
8079 	return;
8080 }
8081 
8082 static int
8083 qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *nvlist_params)
8084 {
8085 	qlnx_host_t		*ha;
8086 	struct ecore_dev	*cdev;
8087 	struct ecore_iov_vf_init_params params;
8088 	int ret, j, i;
8089 	uint32_t max_vfs;
8090 
8091 	if ((ha = device_get_softc(dev)) == NULL) {
8092 		device_printf(dev, "%s: cannot get softc\n", __func__);
8093 		return (-1);
8094 	}
8095 
8096 	if (qlnx_create_pf_taskqueues(ha) != 0)
8097 		goto qlnx_iov_init_err0;
8098 
8099 	cdev = &ha->cdev;
8100 
8101 	max_vfs = RESC_NUM(&cdev->hwfns[0], ECORE_VPORT);
8102 
8103 	QL_DPRINT2(ha," dev = %p enter num_vfs = %d max_vfs = %d\n",
8104 		dev, num_vfs, max_vfs);
8105 
8106         if (num_vfs >= max_vfs) {
8107                 QL_DPRINT1(ha, "Can start at most %d VFs\n",
8108                           (RESC_NUM(&cdev->hwfns[0], ECORE_VPORT) - 1));
8109 		goto qlnx_iov_init_err0;
8110         }
8111 
8112 	ha->vf_attr =  malloc(((sizeof (qlnx_vf_attr_t) * num_vfs)), M_QLNXBUF,
8113 				M_NOWAIT);
8114 
8115 	if (ha->vf_attr == NULL)
8116 		goto qlnx_iov_init_err0;
8117 
8118         memset(&params, 0, sizeof(params));
8119 
8120         /* Initialize HW for VF access */
8121         for_each_hwfn(cdev, j) {
8122                 struct ecore_hwfn *hwfn = &cdev->hwfns[j];
8123                 struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn);
8124 
8125                 /* Make sure not to use more than 16 queues per VF */
8126                 params.num_queues = min_t(int,
8127                                           (FEAT_NUM(hwfn, ECORE_VF_L2_QUE) / num_vfs),
8128                                           16);
8129 
8130                 if (!ptt) {
8131                         QL_DPRINT1(ha, "Failed to acquire ptt\n");
8132                         goto qlnx_iov_init_err1;
8133                 }
8134 
8135                 for (i = 0; i < num_vfs; i++) {
8136                         if (!ecore_iov_is_valid_vfid(hwfn, i, false, true))
8137                                 continue;
8138 
8139                         qlnx_sriov_enable_qid_config(hwfn, i, &params);
8140 
8141                         ret = ecore_iov_init_hw_for_vf(hwfn, ptt, &params);
8142 
8143                         if (ret) {
8144                                 QL_DPRINT1(ha, "Failed to enable VF[%d]\n", i);
8145                                 ecore_ptt_release(hwfn, ptt);
8146                                 goto qlnx_iov_init_err1;
8147                         }
8148                 }
8149 
8150                 ecore_ptt_release(hwfn, ptt);
8151         }
8152 
8153 	ha->num_vfs = num_vfs;
8154 	qlnx_inform_vf_link_state(&cdev->hwfns[0], ha);
8155 
8156 	QL_DPRINT2(ha," dev = %p exit num_vfs = %d\n", dev, num_vfs);
8157 
8158 	return (0);
8159 
8160 qlnx_iov_init_err1:
8161 	qlnx_sriov_disable(ha);
8162 
8163 qlnx_iov_init_err0:
8164 	qlnx_destroy_pf_taskqueues(ha);
8165 	ha->num_vfs = 0;
8166 
8167 	return (-1);
8168 }
8169 
8170 static void
8171 qlnx_iov_uninit(device_t dev)
8172 {
8173 	qlnx_host_t	*ha;
8174 
8175 	if ((ha = device_get_softc(dev)) == NULL) {
8176 		device_printf(dev, "%s: cannot get softc\n", __func__);
8177 		return;
8178 	}
8179 
8180 	QL_DPRINT2(ha," dev = %p enter\n", dev);
8181 
8182 	qlnx_sriov_disable(ha);
8183 	qlnx_destroy_pf_taskqueues(ha);
8184 
8185 	free(ha->vf_attr, M_QLNXBUF);
8186 	ha->vf_attr = NULL;
8187 
8188 	ha->num_vfs = 0;
8189 
8190 	QL_DPRINT2(ha," dev = %p exit\n", dev);
8191 	return;
8192 }
8193 
8194 static int
8195 qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
8196 {
8197 	qlnx_host_t	*ha;
8198 	qlnx_vf_attr_t	*vf_attr;
8199 	unsigned const char *mac;
8200 	size_t size;
8201 	struct ecore_hwfn *p_hwfn;
8202 
8203 	if ((ha = device_get_softc(dev)) == NULL) {
8204 		device_printf(dev, "%s: cannot get softc\n", __func__);
8205 		return (-1);
8206 	}
8207 
8208 	QL_DPRINT2(ha," dev = %p enter vfnum = %d\n", dev, vfnum);
8209 
8210 	if (vfnum > (ha->num_vfs - 1)) {
8211 		QL_DPRINT1(ha, " VF[%d] is greater than max allowed [%d]\n",
8212 			vfnum, (ha->num_vfs - 1));
8213 	}
8214 
8215 	vf_attr = &ha->vf_attr[vfnum];
8216 
8217         if (nvlist_exists_binary(params, "mac-addr")) {
8218                 mac = nvlist_get_binary(params, "mac-addr", &size);
8219                 bcopy(mac, vf_attr->mac_addr, ETHER_ADDR_LEN);
8220 		device_printf(dev,
8221 			"%s: mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
8222 			__func__, vf_attr->mac_addr[0],
8223 			vf_attr->mac_addr[1], vf_attr->mac_addr[2],
8224 			vf_attr->mac_addr[3], vf_attr->mac_addr[4],
8225 			vf_attr->mac_addr[5]);
8226 		p_hwfn = &ha->cdev.hwfns[0];
8227 		ecore_iov_bulletin_set_mac(p_hwfn, vf_attr->mac_addr,
8228 			vfnum);
8229 	}
8230 
8231 	QL_DPRINT2(ha," dev = %p exit vfnum = %d\n", dev, vfnum);
8232 	return (0);
8233 }
8234 
8235 static void
8236 qlnx_handle_vf_msg(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
8237 {
8238         uint64_t events[ECORE_VF_ARRAY_LENGTH];
8239         struct ecore_ptt *ptt;
8240         int i;
8241 
8242         ptt = ecore_ptt_acquire(p_hwfn);
8243         if (!ptt) {
8244                 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8245 		__qlnx_pf_vf_msg(p_hwfn, 0);
8246                 return;
8247         }
8248 
8249         ecore_iov_pf_get_pending_events(p_hwfn, events);
8250 
8251         QL_DPRINT2(ha, "Event mask of VF events:"
8252 		"0x%" PRIu64 "0x%" PRIu64 " 0x%" PRIu64 "\n",
8253                    events[0], events[1], events[2]);
8254 
8255         ecore_for_each_vf(p_hwfn, i) {
8256                 /* Skip VFs with no pending messages */
8257                 if (!(events[i / 64] & (1ULL << (i % 64))))
8258                         continue;
8259 
8260 		QL_DPRINT2(ha,
8261                            "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
8262                            i, p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i);
8263 
8264                 /* Copy VF's message to PF's request buffer for that VF */
8265                 if (ecore_iov_copy_vf_msg(p_hwfn, ptt, i))
8266                         continue;
8267 
8268                 ecore_iov_process_mbx_req(p_hwfn, ptt, i);
8269         }
8270 
8271         ecore_ptt_release(p_hwfn, ptt);
8272 
8273 	return;
8274 }
8275 
8276 static void
8277 qlnx_handle_vf_flr_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
8278 {
8279         struct ecore_ptt *ptt;
8280 	int ret;
8281 
8282 	ptt = ecore_ptt_acquire(p_hwfn);
8283 
8284 	if (!ptt) {
8285                 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8286 		__qlnx_vf_flr_update(p_hwfn);
8287                 return;
8288 	}
8289 
8290 	ret = ecore_iov_vf_flr_cleanup(p_hwfn, ptt);
8291 
8292 	if (ret) {
8293                 QL_DPRINT1(ha, "ecore_iov_vf_flr_cleanup failed; re-scheduling\n");
8294 	}
8295 
8296 	ecore_ptt_release(p_hwfn, ptt);
8297 
8298 	return;
8299 }
8300 
8301 static void
8302 qlnx_handle_bulletin_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
8303 {
8304         struct ecore_ptt *ptt;
8305 	int i;
8306 
8307 	ptt = ecore_ptt_acquire(p_hwfn);
8308 
8309 	if (!ptt) {
8310                 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8311 		qlnx_vf_bulleting_update(p_hwfn);
8312                 return;
8313 	}
8314 
8315 	ecore_for_each_vf(p_hwfn, i) {
8316 		QL_DPRINT1(ha, "ecore_iov_post_vf_bulletin[%p, %d]\n",
8317 			p_hwfn, i);
8318 		ecore_iov_post_vf_bulletin(p_hwfn, i, ptt);
8319 	}
8320 
8321 	ecore_ptt_release(p_hwfn, ptt);
8322 
8323 	return;
8324 }
8325 
8326 static void
8327 qlnx_pf_taskqueue(void *context, int pending)
8328 {
8329 	struct ecore_hwfn	*p_hwfn;
8330 	qlnx_host_t		*ha;
8331 	int			i;
8332 
8333 	p_hwfn = context;
8334 
8335 	if (p_hwfn == NULL)
8336 		return;
8337 
8338 	ha = (qlnx_host_t *)(p_hwfn->p_dev);
8339 
8340 	if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
8341 		return;
8342 
8343 	if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8344 		QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG))
8345 		qlnx_handle_vf_msg(ha, p_hwfn);
8346 
8347 	if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8348 		QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE))
8349 		qlnx_handle_vf_flr_update(ha, p_hwfn);
8350 
8351 	if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8352 		QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE))
8353 		qlnx_handle_bulletin_update(ha, p_hwfn);
8354 
8355 	return;
8356 }
8357 
8358 static int
8359 qlnx_create_pf_taskqueues(qlnx_host_t *ha)
8360 {
8361 	int	i;
8362 	uint8_t	tq_name[32];
8363 
8364 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
8365                 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
8366 
8367 		bzero(tq_name, sizeof (tq_name));
8368 		snprintf(tq_name, sizeof (tq_name), "ql_pf_tq_%d", i);
8369 
8370 		TASK_INIT(&ha->sriov_task[i].pf_task, 0, qlnx_pf_taskqueue, p_hwfn);
8371 
8372 		ha->sriov_task[i].pf_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
8373 			 taskqueue_thread_enqueue,
8374 			&ha->sriov_task[i].pf_taskqueue);
8375 
8376 		if (ha->sriov_task[i].pf_taskqueue == NULL)
8377 			return (-1);
8378 
8379 		taskqueue_start_threads(&ha->sriov_task[i].pf_taskqueue, 1,
8380 			PI_NET, "%s", tq_name);
8381 
8382 		QL_DPRINT1(ha, "%p\n", ha->sriov_task[i].pf_taskqueue);
8383 	}
8384 
8385 	return (0);
8386 }
8387 
8388 static void
8389 qlnx_destroy_pf_taskqueues(qlnx_host_t *ha)
8390 {
8391 	int	i;
8392 
8393 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
8394 		if (ha->sriov_task[i].pf_taskqueue != NULL) {
8395 			taskqueue_drain(ha->sriov_task[i].pf_taskqueue,
8396 				&ha->sriov_task[i].pf_task);
8397 			taskqueue_free(ha->sriov_task[i].pf_taskqueue);
8398 			ha->sriov_task[i].pf_taskqueue = NULL;
8399 		}
8400 	}
8401 	return;
8402 }
8403 
8404 static void
8405 qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha)
8406 {
8407 	struct ecore_mcp_link_capabilities caps;
8408 	struct ecore_mcp_link_params params;
8409 	struct ecore_mcp_link_state link;
8410 	int i;
8411 
8412 	if (!p_hwfn->pf_iov_info)
8413 		return;
8414 
8415 	memset(&params, 0, sizeof(struct ecore_mcp_link_params));
8416 	memset(&link, 0, sizeof(struct ecore_mcp_link_state));
8417 	memset(&caps, 0, sizeof(struct ecore_mcp_link_capabilities));
8418 
8419 	memcpy(&caps, ecore_mcp_get_link_capabilities(p_hwfn), sizeof(caps));
8420         memcpy(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link));
8421         memcpy(&params, ecore_mcp_get_link_params(p_hwfn), sizeof(params));
8422 
8423 	QL_DPRINT2(ha, "called\n");
8424 
8425         /* Update bulletin of all future possible VFs with link configuration */
8426         for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) {
8427                 /* Modify link according to the VF's configured link state */
8428 
8429                 link.link_up = false;
8430 
8431                 if (ha->link_up) {
8432                         link.link_up = true;
8433                         /* Set speed according to maximum supported by HW.
8434                          * that is 40G for regular devices and 100G for CMT
8435                          * mode devices.
8436                          */
8437                         link.speed = (p_hwfn->p_dev->num_hwfns > 1) ?
8438 						100000 : link.speed;
8439 		}
8440 		QL_DPRINT2(ha, "link [%d] = %d\n", i, link.link_up);
8441                 ecore_iov_set_link(p_hwfn, i, &params, &link, &caps);
8442         }
8443 
8444 	qlnx_vf_bulleting_update(p_hwfn);
8445 
8446 	return;
8447 }
8448 #endif /* #ifndef QLNX_VF */
8449 #endif /* #ifdef CONFIG_ECORE_SRIOV */
8450